prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__all__ = ['load_data', 'shape_shower', 'location_max_finder', 'differentiate', 'intensity_direction_shower', 'write_data', 'mix_pics']
import io
import sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
# 以防中文部分出现乱码
SIDE_LENGTH = 100
N = 256
row = np.linspace(0, SIDE_LENGTH, N).tolist()
rows = np.array([row] * N)
cols = rows.T
def load_data(address, sheetname=0, header=None):
"""
从附件中得到数据
"""
return pd.read_excel(address, sheetname=sheetname, header=header).values
def write_data(address, data, sheetname='page_1'):
"""
将数据写入到Excel文件中
"""
data_df = pd.DataFrame(data)
writer = | pd.ExcelWriter(address) | pandas.ExcelWriter |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import pandas as pd
import requests
UTAHAQ_API_BASE_URI = 'http://meso2.chpc.utah.edu/aq/cgi-bin/download_mobile_archive.cgi'
UTAHAQ_API_TOKEN = os.getenv('UTAHAQ_API_TOKEN')
def _utahaq_batch_get(stid: str,
yr: int,
mo: int,
datatype: str) -> pd.DataFrame:
"""Queries UtahAQ API endpoint for single month of data
For API reference, see
http://utahaq.chpc.utah.edu/aq/cgi-bin/mobile_archive.cgi
Args:
stid: unique station identifier
yr: year desired
mo: month desired
datatype: measurement dataset identifier, see reference
Returns:
pd.DataFrame: flattened time, stid, lat/lon, and relevant readings
"""
yr = str(yr).zfill(4)
mo = str(mo).zfill(2)
stid = stid.upper()
datatype = datatype.lower()
uri = (
f'{UTAHAQ_API_BASE_URI}'
f'?accesskey={UTAHAQ_API_TOKEN}'
f'&stid={stid}'
f'&yr={yr}'
f'&mo={mo}'
f'&datatype={datatype}'
)
try:
res = | pd.read_csv(uri, skiprows=True) | pandas.read_csv |
__author__ = '<NAME>'
__email__ = '<EMAIL>'
########################################
# imports
########################################
import networkx as nx
from tqdm.autonotebook import tqdm
import pandas as pd
from itertools import product
########################################
# Feature Extractor
########################################
class FeatureExtractor:
def __init__(self, g):
self._g = g
########################################
# edge topological features
########################################
def _friends_measure(self, neighborhood_1: set, neighborhood_2: set):
"""
Returns the friend measure.
Counter adds 1 for:
each vertex which is shared by both neighborhoods,
each edge existing between the neighborhoods
"""
# instantiate a counter
output = 0
# count each shared vertex and inter-neighborhoods edges
for edge in product(neighborhood_1, neighborhood_2):
if edge in self._g.edges:
output += 1
return output
def _get_edge_topological_features(self, u, v):
"""
Returns a dictionary containing edge (u, v) topological features.
:param u: one of the edge's vertices.
:param v: one of the edge's vertices.
:return: a dictionary.
"""
# If edge exists, remove it and maintain a boolean to add it back later
edge_removed = False
if self._g.has_edge(u, v):
self._g.remove_edge(u, v)
edge_removed = True
# vertices' neighborhoods
u_neighborhood = set(self._g.neighbors(u))
v_neighborhood = set(self._g.neighbors(v))
# vertices' degrees
u_deg = len(u_neighborhood)
v_deg = len(v_neighborhood)
# preferential attachment score
preferential_attachment_score = u_deg * v_deg
# friends measure
friends_measure = self._friends_measure(u_neighborhood, v_neighborhood)
# join vertices' neighborhoods
total_friends = len(u_neighborhood | v_neighborhood)
# shortest path
if nx.has_path(self._g, u, v):
shortest_path = len(nx.shortest_path(self._g, u, v)) - 1
else:
shortest_path = -1
# instantiate a dictionary to contain edge topological features
output_dict = {
'total_friends': total_friends,
'preferential_attachment_score': preferential_attachment_score,
'friends_measure': friends_measure,
'shortest_path': shortest_path,
'vertex_1_degree': u_deg,
'vertex_2_degree': v_deg
}
# if edge was removed, add it back
if edge_removed:
self._g.add_edge(u, v)
return output_dict
########################################
# edge lists topological features
########################################
def _get_all_topological_features(self, pos_edges: list, neg_edges: list):
"""
Iterates through 2 lists of edges and extract theirs topological features.
Creates a dictionary of form {(u,v): {edge_features... , u features... , v features... }}.
:param pos_edges: a list of tuples, each indicating an existing edge.
:param neg_edges: a list of tuples, each indicating a non-existing edge.
:return: a dictionary.
"""
output = {}
print('\nExtracting positive edges features...\n')
for (u, v) in tqdm(pos_edges):
edge_dict = self._get_edge_topological_features(u, v)
edge_dict.update({'edge_exist': 1})
output[f'({u}, {v})'] = edge_dict
print('\nExtracting negative edges features...\n')
for (u, v) in tqdm(neg_edges):
edge_dict = self._get_edge_topological_features(u, v)
edge_dict.update({'edge_exist': 0})
output[f'({u}, {v})'] = edge_dict
return output
########################################
# create train and test sets of edges' topological features
########################################
def create_topological_features_df(
self, positive_edges: list, negative_edges: list, save: bool = False, save_dir_path: str = None):
"""
Extracts topological features of all given edge lists and returns as DataFrame.
Operates on a single graph.
One can provide both positive_edges list and negative_edges list or just positive edges.
"""
edges_dict = None
if negative_edges is not None and len(negative_edges) > 0:
edges_dict = self._get_all_topological_features(positive_edges, negative_edges)
elif negative_edges is None or len(negative_edges) == 0:
edges_dict = self._get_all_topological_features(positive_edges, [])
edges_df = | pd.DataFrame.from_dict(edges_dict, orient='index') | pandas.DataFrame.from_dict |
import pandas as pd
import numpy as np
import pycountry_convert as pc
import pycountry
import os
from iso3166 import countries
PATH_AS_RELATIONSHIPS = '../Datasets/AS-relationships/20210701.as-rel2.txt'
NODE2VEC_EMBEDDINGS = '../Check_for_improvements/Embeddings/Node2Vec_embeddings.emb'
DEEPWALK_EMBEDDINGS_128 = '../Check_for_improvements/Embeddings/DeepWalk_128.csv'
DIFF2VEC_EMBEDDINGS_128 = '../Check_for_improvements/Embeddings/Diff2Vec_128.csv'
NETMF_EMBEDDINGS_128 = '../Check_for_improvements/Embeddings/NetMF_128.csv'
NODESKETCH_EMBEDDINGS_128 = '../Check_for_improvements/Embeddings/NodeSketch_128.csv'
WALKLETS_EMBEDDINGS_256 = '../Check_for_improvements/Embeddings/Walklets_256.csv'
NODE2VEC_EMBEDDINGS_64 = '../Check_for_improvements/Embeddings/Node2Vec_embeddings.emb'
NODE2VEC_LOCAL_EMBEDDINGS_64 = '../Check_for_improvements/Embeddings/Node2Vec_p2_64.csv'
NODE2VEC_GLOBAL_EMBEDDINGS_64 = '../Check_for_improvements/Embeddings/Node2Vec_q2_64.csv'
DIFF2VEC_EMBEDDINGS_64 = '../Check_for_improvements/Embeddings/Diff2Vec_64.csv'
NETMF_EMBEDDINGS_64 = '../Check_for_improvements/Embeddings/NetMF_64.csv'
NODESKETCH_EMBEDDINGS_64 = '../Check_for_improvements/Embeddings/NodeSketch_64.csv'
NODE2VEC_WL5_E3_LOCAL = '../Check_for_improvements/Embeddings/Node2Vec_64_wl5_ws2_ep3_local.csv'
NODE2VEC_WL5_E3_GLOBAL = '../Check_for_improvements/Embeddings/Node2Vec_64_wl5_ws2_ep3_global.csv'
NODE2VEC_64_WL5_E1_GLOBAL = '../Check_for_improvements/Embeddings/Node2Vec_64_wl5_ws2_global.csv'
BGP2VEC_64 = '../Check_for_improvements/Embeddings/Node2Vec_bgp2Vec.csv'
BGP2VEC_32 = '../Check_for_improvements/Embeddings/BGP2VEC_32'
WALKLETS_EMBEDDINGS_128 = '../Check_for_improvements/Embeddings/Walklets_128.csv'
STORE_CSV_TO_FOLDER = '../Embeddings_Visualization/StorePreprocessedEmb'
def country_flag(data):
"""
:param data: Contains a dataframe combining 3 datasets
:param list_alpha_2: Contains the 2-letter abbreviation from each country
:return: Matches the acronyms with the Fullname of the countries
"""
list_alpha_2 = [i.alpha2 for i in list(countries)]
if data['AS_rank_iso'] in list_alpha_2:
return pycountry.countries.get(alpha_2=data['AS_rank_iso']).name
else:
return 'Unknown Code'
def country_to_continent(country_name):
"""
This function takes as input a country name and returns the continent that the given country belongs.
:param country_name: Contains the name of a country
:return: The continent
"""
try:
country_alpha2 = pc.country_name_to_country_alpha2(country_name)
country_continent_code = pc.country_alpha2_to_continent_code(country_alpha2)
country_continent_name = pc.convert_continent_code_to_continent_name(country_continent_code)
return country_continent_name
except:
return np.nan
def convert_country_to_continent(data):
"""
The function converts iso = alpha_2 (example: US) to the whole name of the country. Needs (import iso3166)
:param data: Contains a dataframe combining 4 datasets
:return: The continent for each country
"""
data['AS_rank_iso'] = data.apply(country_flag, axis=1)
temp_list = []
for i in range(0, len(data)):
temp_list.append(country_to_continent(data['AS_rank_iso'][i]))
df = pd.DataFrame(temp_list, columns=['AS_rank_iso'])
data['AS_rank_iso'] = df['AS_rank_iso']
return data['AS_rank_iso']
def merge_datasets(final_df, embeddings_df):
"""
:param final_df: Its the dataset that is generated in Analysis/aggregate_data folder
:param embeddings_df: Contains pretrained embeddings
:return: A new merged dataset (containing improvement_score and the embedding of each ASN)
"""
print(final_df['ASN'].isin(embeddings_df['ASN']).value_counts())
mergedStuff = pd.merge(embeddings_df, final_df, on=['ASN'], how='left')
mergedStuff.replace('', np.nan, inplace=True)
return mergedStuff
def get_path_and_filename(model, dimensions):
"""
:param model: The model's name
:param dimensions: The number of dimensions of the given model
:return: The path where the script will be stored and its name
"""
file_name = 'Preprocessed' + str(model) + str(dimensions) + f'.csv'
outdir = STORE_CSV_TO_FOLDER
if not os.path.exists(outdir):
os.mkdir(outdir)
full_name = os.path.join(outdir, file_name)
return full_name
def read_Node2Vec_embeddings_file():
"""
:return: A dataframe containing the ASNs and the embeddings of each ASn created based on Node2Vec algorithm.
"""
emb_df = pd.read_table(NODE2VEC_EMBEDDINGS, skiprows=1, header=None, sep=" ")
# name the columns
rng = range(0, 65)
new_cols = ['dim_' + str(i) for i in rng]
emb_df.columns = new_cols
# rename first column
emb_df.rename(columns={'dim_0': 'ASN'}, inplace=True)
return emb_df
def read_karateClub_embeddings_file(emb, dimensions):
"""
Karateclub library requires nodes to be named with consecutive Integer numbers. In the end gives as an output
containing the embeddings in ascending order. So in this function we need to reassign each ASN to its own embedding.
:param emb: A dataset containing pretrained embeddings
:param dimensions: The dimensions of the given dataset
:return: A dataframe containing pretrained embeddings
"""
if dimensions == 64:
if emb == 'Diff2Vec':
df = pd.read_csv(DIFF2VEC_EMBEDDINGS_64, sep=',')
elif emb == 'NetMF':
df = pd.read_csv(NETMF_EMBEDDINGS_64, sep=',')
elif emb == 'NodeSketch':
df = pd.read_csv(NODESKETCH_EMBEDDINGS_64, sep=',')
elif emb == 'Walklets':
df = pd.read_csv(WALKLETS_EMBEDDINGS_128, sep=',')
elif emb == 'Node2Vec_Local':
df = pd.read_csv(NODE2VEC_LOCAL_EMBEDDINGS_64, sep=',')
elif emb == 'Node2Vec_Global':
df = pd.read_csv(NODE2VEC_GLOBAL_EMBEDDINGS_64, sep=',')
elif emb == 'Node2Vec_wl5_global':
df = pd.read_csv(NODE2VEC_64_WL5_E1_GLOBAL, sep=',')
elif emb == 'Node2Vec_wl5_e3_global':
df = pd.read_csv(NODE2VEC_WL5_E3_GLOBAL, sep=',')
elif emb == 'Node2Vec_wl5_e3_local':
df = pd.read_csv(NODE2VEC_WL5_E3_LOCAL, sep=',')
elif emb == 'bgp2vec_64':
df = pd.read_csv(BGP2VEC_64, sep=',')
elif emb == 'bgp2vec_32':
df = pd.read_csv(BGP2VEC_32, sep=',')
else:
raise Exception('Not defined dataset')
else:
if emb == 'Diff2Vec':
df = pd.read_csv(DIFF2VEC_EMBEDDINGS_128, sep=',')
elif emb == 'NetMF':
df = pd.read_csv(NETMF_EMBEDDINGS_128, sep=',')
elif emb == 'NodeSketch':
df = pd.read_csv(NODESKETCH_EMBEDDINGS_128, sep=',')
elif emb == 'Walklets':
df = pd.read_csv(WALKLETS_EMBEDDINGS_256, sep=',')
elif emb == 'DeepWalk':
df = | pd.read_csv(DEEPWALK_EMBEDDINGS_128, sep=',') | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 4 10:30:17 2018
@author: avelinojaver
"""
from tierpsy.features.tierpsy_features.summary_stats import get_summary_stats
from tierpsy.summary.helper import augment_data, add_trajectory_info
from tierpsy.summary.filtering import filter_trajectories
from tierpsy.helper.params import read_fps, read_microns_per_pixel
from tierpsy.helper.misc import WLAB,print_flush
from tierpsy.analysis.split_fov.helper import was_fov_split
from tierpsy.analysis.split_fov.FOVMultiWellsSplitter import FOVMultiWellsSplitter
import pandas as pd
import pdb
#%%
def time_to_frame_nb(time_windows,time_units,fps,timestamp,fname):
"""
Converts the time windows to units of frame numbers (if they were defined in seconds).
It also defines the end frame of a window, if the index is set to -1 (end).
"""
from copy import deepcopy
if timestamp.empty:
return
time_windows_frames = deepcopy(time_windows)
if time_units == 'seconds':
assert fps!=-1, 'Cannot convert time windows to frame numbers. Frames per second ratio not known.'
for iwin, win in enumerate(time_windows_frames):
for iinterval in range(len(win)):
for ilim in range(2):
if time_windows_frames[iwin][iinterval][ilim]!=-1:
time_windows_frames[iwin][iinterval][ilim] = \
round(time_windows_frames[iwin][iinterval][ilim]*fps)
last_frame = timestamp.sort_values().iloc[-1]
for iwin, win in enumerate(time_windows_frames):
for iinterval in range(len(win)):
# If a window ends with -1, replace with the frame number of the
# last frame (or the start frame of the window+1 if window out of bounds)
if time_windows_frames[iwin][iinterval][1]==-1:
time_windows_frames[iwin][iinterval][1] = \
max(last_frame+1, time_windows_frames[iwin][iinterval][0])
# If a window is out of bounds, print warning
if time_windows_frames[iwin][iinterval][0]>last_frame:
print_flush(
'Warning: The start time of interval '+
'{}/{} '.format(iinterval+1, len(win)) +
'of window {} '.format(iwin) +
'is out of bounds of file \'{}\'.'.format(fname))
return time_windows_frames
def no_attr_flush(attr, fname):
if attr=='fps':
out = ['seconds', 'frames_per_second', fname, 'frame numbers']
elif attr=='mpp':
out = ['microns', 'microns_per_pixel', fname, 'pixels']
print_flush(
"""
Warning: some of the summarizer input were given in {0}, but the {1}
ratio for file \'{2}\' is unknown. Give input in {3} instead.
""".format(*out)
)
return
def _no_fps(time_units, fps, fname):
if fps==-1:
if time_units=='seconds':
no_attr_flush('fps', fname)
return True
return False
def _match_units(filter_params, fps, fname):
"""
author: EM
The filtering thresholds must match the timeseries units. If the right
conversion is not possible, then check_ok is False, and the feature
summaries will not be calculated for this file.
"""
from copy import deepcopy
if filter_params is None:
return filter_params, True
all_units = filter_params['units']+[filter_params['time_units']]
cfilter_params = deepcopy(filter_params)
if fps==-1:
# In this case, all time-related timeseries will be in frames.
# If thresholds have been defined in seconds there is no way to convert.
if 'seconds' in all_units:
no_attr_flush('fps', fname)
return cfilter_params, False
else:
# In this case, all time-related timeseries will be in seconds.
# We always want the time_units for traj_length in frames
if filter_params['time_units']=='seconds' and \
filter_params['min_traj_length'] is not None:
cfilter_params['min_traj_length'] = \
filter_params['min_traj_length']*fps
# If the timeseries therholds are defined in seconds, no conversion is
# necessary
# If the timeseries thresholds are defined in frames, we need to convert
# to seconds
if 'frame_numbers' in filter_params['units']:
ids = [i for i,x in enumerate(filter_params['units']) if x=='frame_numbers']
for i in ids:
if filter_params['min_thresholds'][i] is not None:
cfilter_params['min_thresholds'][i]= \
filter_params['min_thresholds'][i]/fps
if filter_params['max_thresholds'][i] is not None:
cfilter_params['max_thresholds'][i]= \
filter_params['max_thresholds'][i]/fps
mpp = read_microns_per_pixel(fname)
if mpp==-1:
# In this case, all distance-related timeseries will be in pixels.
# If thresholds have been defined in microns there is no way to convert.
if 'microns' in all_units:
no_attr_flush('mpp', fname)
return cfilter_params, False
else:
# In this case, all distance-related timeseries will be in microns.
# If the timeseries threholds are defined in micorns, no conversion is
# necessary
# If the timeseries thresholds are defined in pixels, we need to convert
# to microns
if filter_params['distance_units']=='pixels' and \
filter_params['min_distance_traveled'] is not None:
cfilter_params['min_distance_traveled'] = \
filter_params['min_distance_traveled']*mpp
if 'pixels' in filter_params['units']:
ids = [i for i,x in enumerate(filter_params['units']) if x=='pixels']
for i in ids:
if filter_params['min_thresholds'][i] is not None:
cfilter_params['min_thresholds'][i]= \
filter_params['min_thresholds'][i]*mpp
if filter_params['max_thresholds'][i] is not None:
cfilter_params['max_thresholds'][i]= \
filter_params['max_thresholds'][i]*mpp
return cfilter_params, True
#%%
def read_data(fname, filter_params, time_windows, time_units, fps, is_manual_index):
"""
Reads the timeseries_data and the blob_features for a given file within every time window.
return:
timeseries_data_list: list of timeseries_data for each time window (length of lists = number of windows)
blob_features_list: list of blob_features for each time window (length of lists = number of windows)
"""
import numpy as np
# EM: If time_units=seconds and fps is not defined, then return None with warning of no fps.
# Make this check here, to avoid wasting time reading the file
if _no_fps(time_units, fps, fname):
return
cfilter_params, check_ok = _match_units(filter_params, fps, fname)
if not check_ok:
return
with pd.HDFStore(fname, 'r') as fid:
timeseries_data = fid['/timeseries_data']
blob_features = fid['/blob_features']
if is_manual_index:
#keep only data labeled as worm or worm clusters
valid_labels = [WLAB[x] for x in ['WORM', 'WORMS']]
trajectories_data = fid['/trajectories_data']
if not 'worm_index_manual' in trajectories_data:
#no manual index, nothing to do here
return
good = trajectories_data['worm_label'].isin(valid_labels)
good = good & (trajectories_data['skeleton_id'] >= 0)
skel_id = trajectories_data['skeleton_id'][good]
timeseries_data = timeseries_data.loc[skel_id]
timeseries_data['worm_index'] = trajectories_data['worm_index_manual'][good].values
timeseries_data = timeseries_data.reset_index(drop=True)
blob_features = blob_features.loc[skel_id].reset_index(drop=True)
if timeseries_data.empty:
#no data, nothing to do here
return
# convert time windows to frame numbers for the given file
time_windows_frames = time_to_frame_nb(
time_windows, time_units, fps, timeseries_data['timestamp'], fname
)
# EM: Filter trajectories
if cfilter_params is not None:
timeseries_data, blob_features = \
filter_trajectories(timeseries_data, blob_features, **cfilter_params)
if timeseries_data.empty:
#no data, nothing to do here
return
# EM: extract the timeseries_data and blob_features corresponding to each
# time window and store them in a list (length of lists = number of windows)
timeseries_data_list = []
blob_features_list = []
for window in time_windows_frames:
in_window = []
for interval in window:
in_interval = (timeseries_data['timestamp']>=interval[0]) & \
(timeseries_data['timestamp']<interval[1])
in_window.append(in_interval.values)
in_window = np.any(in_window, axis=0)
timeseries_data_list.append(timeseries_data.loc[in_window, :].reset_index(drop=True))
blob_features_list.append(blob_features.loc[in_window].reset_index(drop=True))
return timeseries_data_list, blob_features_list
def count_skeletons(timeseries):
cols = [col for col in timeseries.columns if col.startswith('eigen')]
return (~timeseries[cols].isna().any(axis=1)).sum()
#%%
def tierpsy_plate_summary(
fname, filter_params, time_windows, time_units,
only_abs_ventral=False, selected_feat=None,
is_manual_index=False, delta_time=1/3):
"""
Calculate the plate summaries for a given file fname, within a given time window
(units of start time and end time are in frame numbers).
"""
fps = read_fps(fname)
data_in = read_data(
fname, filter_params, time_windows, time_units, fps, is_manual_index)
# if manual annotation was chosen and the trajectories_data does not contain
# worm_index_manual, then data_in is None
# if time_windows in seconds and fps is not defined (fps=-1), then data_in is None
if data_in is None:
return [pd.DataFrame() for iwin in range(len(time_windows))]
timeseries_data, blob_features = data_in
# was the fov split in wells? only use the first window to detect that,
# and to extract the list of well names
is_fov_tosplit = was_fov_split(fname)
# is_fov_tosplit = False
if is_fov_tosplit:
fovsplitter = FOVMultiWellsSplitter(fname)
good_wells_df = fovsplitter.wells[['well_name','is_good_well']].copy()
# print(good_wells_df)
# initialize list of plate summaries for all time windows
plate_feats_list = []
for iwin,window in enumerate(time_windows):
if is_fov_tosplit == False:
plate_feats = get_summary_stats(
timeseries_data[iwin], fps, blob_features[iwin], delta_time,
only_abs_ventral=only_abs_ventral,
selected_feat=selected_feat
)
plate_feats['n_skeletons'] = count_skeletons(timeseries_data[iwin])
plate_feats_list.append(pd.DataFrame(plate_feats).T)
else:
# get list of well names in this time window
# (maybe some wells looked empty during a whole window,
# this prevents errors later on)
well_names_list = list(set(timeseries_data[iwin]['well_name']) - set(['n/a']))
# create a list of well-specific, one-line long dataframes
well_feats_list = []
for well_name in well_names_list:
# find entries in timeseries_data[iwin] belonging to the right well
idx_well = timeseries_data[iwin]['well_name'] == well_name
well_feats = get_summary_stats(
timeseries_data[iwin][idx_well].reset_index(), fps,
blob_features[iwin][idx_well].reset_index(), delta_time,
only_abs_ventral=only_abs_ventral,
selected_feat=selected_feat
)
well_feats['n_skeletons'] = count_skeletons(timeseries_data[iwin][idx_well])
# first prepend the well_name_s to the well_feats series,
# then transpose it so it is a single-row dataframe,
# and append it to the well_feats_list
well_name_s = pd.Series({'well_name':well_name})
well_feats_list.append(pd.DataFrame(pd.concat([well_name_s,well_feats])).T)
# check: did we find any well?
if len(well_feats_list) == 0:
plate_feats_list.append(pd.DataFrame())
else:
# now concatenate all the single-row df in well_feats_list in a single df
# and append it to the growing list (1 entry = 1 window)
plate_feats = pd.concat(well_feats_list, ignore_index=True, sort=False)
# import pdb; pdb.set_trace()
plate_feats = plate_feats.merge(good_wells_df,
on='well_name',
how='left')
plate_feats_list.append(plate_feats)
return plate_feats_list
def tierpsy_trajectories_summary(
fname, filter_params, time_windows, time_units,
only_abs_ventral=False, selected_feat=None,
is_manual_index=False, delta_time=1/3):
"""
Calculate the trajectory summaries for a given file fname, within a given time window
(units of start time and end time are in frame numbers).
"""
fps = read_fps(fname)
data_in = read_data(
fname, filter_params, time_windows, time_units, fps, is_manual_index)
if data_in is None:
return [pd.DataFrame() for iwin in range(len(time_windows))]
timeseries_data, blob_features = data_in
is_fov_tosplit = was_fov_split(fname)
# is_fov_tosplit = False
if is_fov_tosplit:
fovsplitter = FOVMultiWellsSplitter(fname)
good_wells_df = fovsplitter.wells[['well_name','is_good_well']].copy()
# print(good_wells_df)
# initialize list of summaries for all time windows
all_summaries_list = []
# loop over time windows
for iwin,window in enumerate(time_windows):
if timeseries_data[iwin].empty:
all_summary = pd.DataFrame([])
else:
# initialize list of trajectory summaries for given time window
all_summary = []
# loop over worm indexes (individual trajectories)
for w_ind, w_ts_data in timeseries_data[iwin].groupby('worm_index'):
w_blobs = blob_features[iwin].loc[w_ts_data.index]
w_ts_data = w_ts_data.reset_index(drop=True)
w_blobs = w_blobs.reset_index(drop=True)
worm_feats = get_summary_stats(
w_ts_data, fps, w_blobs, delta_time,
only_abs_ventral=only_abs_ventral,
selected_feat=selected_feat
) # returns empty dataframe when w_ts_data is empty
worm_feats['n_skeletons'] = count_skeletons(w_ts_data)
worm_feats = | pd.DataFrame(worm_feats) | pandas.DataFrame |
from collections import namedtuple
from pathlib import Path
import logging
import numpy as np
import pandas as pd
import scipy
from . import (
ctd_plots,
get_ctdcal_config,
flagging,
process_ctd,
oxy_fitting,
)
cfg = get_ctdcal_config()
log = logging.getLogger(__name__)
RinkoO2Cal = namedtuple("RinkoO2Cal", [*"ABCDEFGH"])
RinkoTMPCal = namedtuple("RinkoTMPCal", [*"ABCD"])
def rinko_DO(p_prime, G, H):
"""
Calculates the dissolved oxygen percentage.
"""
DO = G + H * p_prime
return DO
def rinko_p_prime(N, t, A, B, C, D, E, F, G, H):
"""
Per RinkoIII manual: 'The film sensing the water is affect by environment
temperature and pressure at the depth where it is deployed. Based on experiments,
an empirical algorithm as following is used to correct data dissolved oxygen.'
Parameters
----------
N : array-like
Raw instrument output
t : array-like
Temperature [degC]
A-H : float
Calibration parameters
"""
p_prime = A / (1 + D * (t - 25)) + B / ((N - F) * (1 + D * (t - 25)) + C + F)
return p_prime
def correct_pressure(P, d, E):
"""
Parameters
----------
P : array-like
Temperature-corrected DO [%]
d : array-like
Pressure [MPa]
E : float
Manufacturer calibration coefficient
Returns
-------
P_d : array-like
Temperature- and pressure-corrected DO [%]
"""
# TODO: check d range to make sure it's MPa
# what is the dbar ~ MPa?
P_d = P * (1 + E * d)
return P_d
def salinity_correction(DO_c, T, S):
"""
Oxygen optode is not able to detect salinity, so a correction is applied to
account for the effect of salt on oxygen concentration. See Uchida (2010) in
GO-SHIP manual (pg. 6, eq. 9) for more info.
Parameters
----------
DO_c : array-like
Pressure-corrected dissolved oxygen
T : array-like
Calibrated CTD temperature
S : array-like
Calibrated CTD salinity
Returns
-------
DO_sc : array-like
Pressure- and salinity-corrected dissolved oxygen
"""
# solubility coefficients from <NAME> Krause (1984),
# as recommended by Garcia and Gordon (1992)
B0 = -6.24523e-3
B1 = -7.37614e-3
B2 = -1.03410e-2
B3 = -8.17083e-3
C0 = -4.88682e-7
# "scaled temperature"
T_scaled = np.log((298.15 - T) / (273.15 + T))
# correction equation
DO_sc = DO_c * np.exp(
S * (B0 + (B1 * T_scaled) + (B2 * T_scaled ** 2) + (B3 * T_scaled ** 3))
+ C0 * S ** 2
)
return DO_sc
def _Uchida_DO_eq(coefs, inputs):
"""
See Uchida et. al (2008) for more info:
https://doi.org/10.1175/2008JTECHO549.1
and Uchida et. al (2010) - GO-SHIP manual
Parameters
----------
coefs : tuple
(c0, c1, c2, d0, d1, d2, cp)
inputs : tuple
(raw voltage, pressure, temperature, salinity, oxygen solubility)
"""
c0, c1, c2, d0, d1, d2, cp = coefs
V_r, P, T, S, o2_sol = inputs
K_sv = c0 + (c1 * T) + (c2 * T ** 2) # Stern-Volmer constant (Tengberg et al. 2006)
V0 = (1 + d0 * T) # voltage at zero oxygen (Uchida 2010, eq. 10)
Vc = (d1 + d2 * V_r) # raw voltage (Uchida 2010, eq. 10)
o2_sat = ((V0 / Vc) - 1) / K_sv # oxygen saturation [%] (Uchida 2010, eq. 6)
DO = o2_sat * o2_sol # dissolved oxygen concentration
DO_c = DO * (1 + cp * P / 1000) ** (1 / 3) # pressure compensated DO
DO_sc = salinity_correction(DO_c, T, S) # salinity + pressure compensated DO
return DO_sc
def oxy_weighted_residual(coefs, weights, inputs, refoxy, L_norm=2):
# TODO: optionally include other residual types
# (abstracted from PMEL code oxygen_cal_ml.m)
# unweighted L2: sum((ref - oxy)^2) # if weighted fails
# unweighted L4: sum((ref - oxy)^4) # unsure of use case
# unweighted L1: sum(abs(ref - oxy)) # very far from ideal
# anything else? genericize with integer "norm" function input?
residuals = np.sum(
(weights * (refoxy - _Uchida_DO_eq(coefs, inputs)) ** 2)
) / np.sum(weights ** 2)
return residuals
def calibrate_oxy(btl_df, time_df, ssscc_list):
"""
Non-linear least squares fit oxygen optode against bottle oxygen.
Note: optode data that were obtained during bottle stops can be used for calibration
instead of density matching the downcast (see Uchida 2010, pg. 7).
Parameters
----------
btl_df : DataFrame
CTD data at bottle stops
time_df : DataFrame
Continuous CTD data
ssscc_list : list of str
List of stations to process
Returns
-------
"""
log.info("Calibrating oxygen (RINKO)")
# initialize coef df
coefs_df = | pd.DataFrame(columns=["c0", "c1", "c2", "d0", "d1", "d2", "cp"]) | pandas.DataFrame |
'''
@author : <NAME>
ML model for foreign exchange prediction
'''
import pandas as pd
import numpy as np
from sklearn.linear_model import LinearRegression
import joblib
def getFxRatesForPairs(pairName):
df = pd.read_csv("C:\\Users\\Srivastava_Am\\PycharmProjects\\exchange-rate-prediction\\data_source\\fx_rates_aud-USD.csv")
df = df.replace('ND', np.nan)
df = df.dropna().reset_index(drop=True)
df.isna().sum()
for col in df.columns[1:]:
df[col] = pd.to_numeric(df[col], errors='coerce')
df['Time Series'] = pd.to_datetime(df['Time Series'])
df['month'] = df['Time Series'].dt.month
df['year'] = df['Time Series'].dt.year
df['month_year'] = df['Time Series'].dt.to_period('M')
return df.groupby('month_year').AUD_USD.mean().reset_index()
def getIrdData(pairName):
ir_df = pd.read_csv("C:\\Users\\Srivastava_Am\\PycharmProjects\\exchange-rate-prediction\\data_source\\aud-usd-ird.csv")
ir_df = ir_df[(ir_df['Date'] >= '2016-03-01') &
(ir_df['Date'] <= '2020-04-02')]
ir_df = ir_df['Long Carry'].astype(str)
ir_df.reindex(index=ir_df.index[::-1])
ir_df = ir_df.replace({'%': ''}, regex=True)
ir_df = ir_df.astype(float)
return np.array(ir_df).reshape(-1, 1)
def getGdpDiff(pairName):
aus_gdp = pd.read_csv("C:\\Users\\Srivastava_Am\\PycharmProjects\\exchange-rate-prediction\\data_source\\aus-gdp-rate.csv")
usa_gdp = pd.read_csv("C:\\Users\\Srivastava_Am\\PycharmProjects\\exchange-rate-prediction\\data_source\\usd-gdp-rate.csv")
aus_gdp['DATE'] = pd.to_datetime(aus_gdp['DATE']).dt.to_period('M')
aus_gdp = aus_gdp.set_index('DATE').resample('M').interpolate()
aus_gdp['month_year'] = aus_gdp.index
usa_gdp['DATE'] = pd.to_datetime(usa_gdp['DATE']).dt.to_period('M')
usa_gdp = usa_gdp.set_index('DATE').resample('M').interpolate()
usa_gdp['month_year'] = usa_gdp.index
aus_gdp = aus_gdp.rename(columns={'GDP': 'AUS_GDP'})
aus_usa_gdp = | pd.merge(aus_gdp, usa_gdp, on="month_year", how="inner") | pandas.merge |
##? not sure what this is ...
from numpy.core.numeric import True_
import pandas as pd
import numpy as np
## this function gives detailed info on NaN values of input df
from data_clean import perc_null
#these functionas add a date column (x2) and correct mp season format
from data_fix_dates import game_add_mp_date, bet_add_mp_date, fix_mp_season
#these functions assign nhl_names eg 'NYR' to bet, mp, and game;
# functions use simple dictionaries
from data_fix_team_names import bet_to_nhl, mp_to_nhl, game_to_nhl
##these are two different functions for assigning game_id to df_betting, based on team, date, H/A
##one uses df_game as look up table ... other uses df_mp_teams as look up table
from data_bet_add_game_id import mp_to_bet_add_game_id_no_VH
##Stage 1. Import all the files
##file paths
path = "/Users/joejohns/data_bootcamp/GitHub/final_project_nhl_prediction/"
Kaggle_path = "/Users/joejohns/data_bootcamp/GitHub/final_project_nhl_prediction/Data/Kaggle_Data_Ellis/"
mp_path = "/Users/joejohns/data_bootcamp/GitHub/final_project_nhl_prediction/Data/Money_Puck_Data/"
betting_path = "/Users/joejohns/data_bootcamp/GitHub/final_project_nhl_prediction/Data/Betting_Data/"
#data_bootcamp/GitHub/final_project_nhl_prediction/Data/Betting_Data/nhl odds 2007-08.xlsx
#data_bootcamp/GitHub/final_project_nhl_prediction/Data/Kaggle_Data_Ellis/
#data_bootcamp/GitHub/final_project_nhl_prediction/Data/Money_Puck_Data/
##Kaggle files
df_game = pd.read_csv(Kaggle_path+'game.csv')
df_game_team_stats = pd.read_csv(Kaggle_path+'game_teams_stats.csv')
df_game_skater_stats = pd.read_csv(Kaggle_path+'game_skater_stats.csv')
df_game_goalie_stats = pd.read_csv(Kaggle_path+'game_goalie_stats.csv')
##more subtle Kaggle features:
df_game_scratches = pd.read_csv(Kaggle_path+'game_scratches.csv')
df_game_officials = pd.read_csv(Kaggle_path+'game_officials.csv')
df_team_info = pd.read_csv(Kaggle_path+'team_info.csv')
## grab all the moneypuck data
df_mp_teams = pd.read_csv(mp_path+'all_teams.csv')
## grab all betting data
df1 = pd.read_excel(io = betting_path+'nhl odds 2007-08.xlsx')
df2 = pd.read_excel(io = betting_path+'nhl odds 2008-09.xlsx')
df3 = pd.read_excel(io = betting_path+'nhl odds 2009-10.xlsx')
df4 = | pd.read_excel(io = betting_path+'nhl odds 2010-11.xlsx') | pandas.read_excel |
'''
Scripts for loading various experimental datasets.
Created on Jul 6, 2017
@author: <NAME>
'''
import os
import re
import sys
import pandas as pd
import numpy as np
import glob
from sklearn.feature_extraction.text import CountVectorizer
from evaluation.experiment import Experiment
def convert_argmin(x):
label = x.split('-')[0]
if label == 'I':
return 0
if label == 'O':
return 1
if label == 'B':
return 2
def convert_7class_argmin(x):
label = x.split('-')[0]
if label == 'I':
label = x.split('-')[1].split(':')[0]
if label == 'MajorClaim':
return 0
elif label == 'Claim':
return 3
elif label == 'Premise':
return 5
if label == 'O':
return 1
if label == 'B':
label = x.split('-')[1].split(':')[0]
if label == 'MajorClaim':
return 2
elif label == 'Claim':
return 4
elif label == 'Premise':
return 6
def convert_crowdsourcing(x):
if x == 'Premise-I':
return 0
elif x == 'O':
return 1
elif x== 'Premise-B':
return 2
else:
return -1
def load_argmin_data():
path = '../../data/bayesian_sequence_combination/data/argmin/'
if not os.path.isdir(path):
os.mkdir(path)
all_files = glob.glob(os.path.join(path, "*.dat.out"))
df_from_each_file = (pd.read_csv(f, sep='\t', usecols=(0, 5, 6), converters={5:convert_argmin, 6:convert_argmin},
header=None, quoting=3) for f in all_files)
concatenated = pd.concat(df_from_each_file, ignore_index=True, axis=1).as_matrix()
annos = concatenated[:, 1::3]
for t in range(1, annos.shape[0]):
annos[t, (annos[t-1, :] == 1) & (annos[t, :] == 0)] = 2
gt = concatenated[:, 2][:, None]
doc_start = np.zeros((annos.shape[0], 1))
doc_start[np.where(concatenated[:, 0] == 1)] = 1
# correct the base classifiers
non_start_labels = [0]
start_labels = [2] # values to change invalid I tokens to
for l, label in enumerate(non_start_labels):
start_annos = annos[doc_start.astype(bool).flatten(), :]
start_annos[start_annos == label] = start_labels[l]
annos[doc_start.astype(bool).flatten(), :] = start_annos
np.savetxt('../../data/bayesian_sequence_combination/data/argmin/annos.csv', annos, fmt='%s', delimiter=',')
np.savetxt('../../data/bayesian_sequence_combination/data/argmin/gt.csv', gt, fmt='%s', delimiter=',')
np.savetxt('../../data/bayesian_sequence_combination/data/argmin/doc_start.csv', doc_start, fmt='%s', delimiter=',')
return gt, annos, doc_start
def load_argmin_7class_data():
path = '../../data/bayesian_sequence_combination/data/argmin/'
if not os.path.isdir(path):
os.mkdir(path)
all_files = glob.glob(os.path.join(path, "*.dat.out"))
df_from_each_file = (pd.read_csv(f, sep='\t', usecols=(0, 5, 6), converters={5:convert_7class_argmin,
6:convert_7class_argmin}, header=None, quoting=3) for f in all_files)
concatenated = pd.concat(df_from_each_file, ignore_index=True, axis=1).as_matrix()
annos = concatenated[:, 1::3]
gt = concatenated[:, 2][:, None]
doc_start = np.zeros((annos.shape[0], 1))
doc_start[np.where(concatenated[:, 0] == 1)] = 1
# correct the base classifiers
non_start_labels = [0, 3, 5]
start_labels = [2, 4, 6] # values to change invalid I tokens to
for l, label in enumerate(non_start_labels):
start_annos = annos[doc_start.astype(bool).flatten(), :]
start_annos[start_annos == label] = start_labels[l]
annos[doc_start.astype(bool).flatten(), :] = start_annos
outpath = '../../data/bayesian_sequence_combination/data/argmin7/'
if not os.path.isdir(outpath):
os.mkdir(outpath)
np.savetxt(outpath + 'annos.csv', annos, fmt='%s', delimiter=',')
np.savetxt(outpath + 'gt.csv', gt, fmt='%s', delimiter=',')
np.savetxt(outpath + 'doc_start.csv', doc_start, fmt='%s', delimiter=',')
return gt, annos, doc_start
def load_crowdsourcing_data():
path = '../../data/bayesian_sequence_combination/data/crowdsourcing/'
if not os.path.isdir(path):
os.mkdir(path)
all_files = glob.glob(os.path.join(path, "exported*.csv"))
print(all_files)
convs = {}
for i in range(1,50):
convs[i] = convert_crowdsourcing
df_from_each_file = [pd.read_csv(f, sep=',', header=None, skiprows=1, converters=convs) for f in all_files]
concatenated = pd.concat(df_from_each_file, ignore_index=False, axis=1).as_matrix()
concatenated = np.delete(concatenated, 25, 1);
annos = concatenated[:,1:]
doc_start = np.zeros((annos.shape[0],1))
doc_start[0] = 1
for i in range(1,annos.shape[0]):
if '_00' in str(concatenated[i,0]):
doc_start[i] = 1
np.savetxt('../../data/bayesian_sequence_combination/data/crowdsourcing/gen/annos.csv', annos, fmt='%s', delimiter=',')
np.savetxt('../../data/bayesian_sequence_combination/data/crowdsourcing/gen/doc_start.csv', doc_start, fmt='%s', delimiter=',')
return annos, doc_start
def build_feature_vectors(text_data_arr):
text_data_arr = np.array(text_data_arr).astype(str)
vectorizer = CountVectorizer()
count_vectors = vectorizer.fit_transform(text_data_arr) # each element can be a sentence or a single word
count_vectors = count_vectors.toarray() # each row will be a sentence
return count_vectors, vectorizer.get_feature_names()
def _load_pico_feature_vectors_from_file(corpus):
all_text = []
for docid in corpus.docs:
text_d = corpus.get_doc_text(docid)
all_text.append(text_d)
feature_vecs, _ = build_feature_vectors(all_text)
return feature_vecs
def _load_bio_folder(anno_path_root, folder_name):
'''
Loads one data directory out of the complete collection.
:return: dataframe containing the data from this folder.
'''
from data.pico.corpus import Corpus
DOC_PATH = os.path.expanduser("../../data/bayesian_sequence_combination/data/bio-PICO/docs/")
ANNOTYPE = 'Participants'
anno_path = anno_path_root + folder_name
anno_fn = anno_path + '/PICO-annos-crowdsourcing.json'
gt_fn = anno_path + '/PICO-annos-professional.json'
corpus = Corpus(doc_path=DOC_PATH, verbose=False)
corpus.load_annotations(anno_fn, docids=None)
if os.path.exists(gt_fn):
corpus.load_groundtruth(gt_fn)
# get a list of the docids
docids = []
workerids = np.array([], dtype=str)
all_data = None
#all_fv = _load_pico_feature_vectors_from_file(corpus)
for d, docid in enumerate(corpus.docs):
docids.append(docid)
annos_d = corpus.get_doc_annos(docid, ANNOTYPE)
spacydoc = corpus.get_doc_spacydoc(docid)
text_d = spacydoc #all_fv[d]
doc_length = len(text_d)
doc_data = None
for workerid in annos_d:
print('Processing data for doc %s and worker %s' % (docid, workerid))
if workerid not in workerids:
workerids = np.append(workerids, workerid)
# add the worker to the dataframe if not already there
if doc_data is None or workerid not in doc_data:
doc_data_w = np.ones(doc_length, dtype=int) # O tokens
if doc_data is None:
doc_data = pd.DataFrame(doc_data_w, columns=[workerid])
else:
doc_data_w = doc_data[workerid]
for span in annos_d[workerid]:
start = span[0]
fin = span[1]
doc_data_w[start] = 2
doc_data_w[start + 1:fin] = 0
doc_data[workerid] = doc_data_w
if os.path.exists(gt_fn):
gold_d = corpus.get_doc_groundtruth(docid, ANNOTYPE)
if 'gold' not in doc_data:
doc_data['gold'] = np.ones(doc_length, dtype=int)
for spans in gold_d:
start = spans[0]
fin = spans[1]
doc_data['gold'][start] = 2
doc_data['gold'][start + 1:fin] = 0
else:
doc_data['gold'] = np.zeros(doc_length, dtype=int) - 1 # -1 for missing gold values
text_d = [spacytoken.text for spacytoken in text_d]
doc_data['text'] = text_d
doc_start = np.zeros(doc_length, dtype=int)
doc_start[0] = 1
doc_gaps = doc_data['text'] == '\n\n' # sentence breaks
doc_start[doc_gaps[doc_gaps].index[:-1] + 1] = 1
doc_data['doc_start'] = doc_start
# doc_data = doc_data.replace(r'\n', ' ', regex=True)
doc_data = doc_data[np.invert(doc_gaps)]
doc_data['docid'] = docid
if all_data is None:
all_data = doc_data
else:
all_data = pd.concat([all_data, doc_data], axis=0)
# print('breaking for fast debugging')
# break
return all_data, workerids
def load_biomedical_data(regen_data_files, debug_subset_size=None):
savepath = '../../data/bayesian_sequence_combination/data/bio/'
if not os.path.isdir(savepath):
os.mkdir(savepath)
if regen_data_files or not os.path.isfile(savepath + '/annos.csv'):
anno_path_root = '../../data/bayesian_sequence_combination/data/bio-PICO/annotations'
# There are four folders here:
# acl17-test: the only one containing 'professional' annotations. 191 docs
# train: 3549 docs
# dev: 500 docs
# test: 500 docs
# Total of 4740 is slightly fewer than the values stated in the paper.
# The validation/test split in the acl17-test data is also not given. This suggests we may need to run the
# HMMCrowd and LSTMCrowd methods with hyperparameter tuning on our own splits. Let's skip that tuning for now?
# Cite Nils' paper about using a generic hyperparameter tuning that works well across tasks -- we need to do
# this initially because we don't have gold data to optimise on.
# Nguyen et al do only light tuning with a few (less than 5) values to choose from for each hyperparameter of
# HMM-Crowd, LSTM-Crowd and the individual LSTMs. Not clear whether the values set in the code as default are
# the chosen values -- let's assume so for now. We can re-tune later if necessary. Remember: we don't require
# a validation set for tuning our methods.
# We need for task1 and task2:
# train, dev and test splits.
# I believe the acl17-test set was split to form the dev and test sets in nguyen et al.
# Task 1 does not require separate training samples -- it's trained on crowdsourced rather than professional labels.
# Task 2 requires testing on separate samples (with gold labels)
# from the training samples (with crowd labels).
# Both tasks use all training data for training and the acl17-test set for validation/testing.
# These other splits into the train, test and dev folders appear to relate to a different set of experiments
# and are not relevant to nguyen et al 2017.
folders_to_load = ['acl17-test', 'train', 'test', 'dev']
all_data = None
all_workerids = None
for folder in folders_to_load:
print('Loading folder %s' % folder)
folder_data, workerids = _load_bio_folder(anno_path_root, folder)
if all_data is None:
all_data = folder_data
all_workerids = workerids
else:
all_data = pd.concat([all_data, folder_data])
all_workerids = np.unique(np.append(workerids.flatten(), all_workerids.flatten()))
all_data.to_csv(savepath + '/annos.csv', columns=all_workerids, header=False, index=False)
all_data.to_csv(savepath + '/gt.csv', columns=['gold'], header=False, index=False)
all_data.to_csv(savepath + '/doc_start.csv', columns=['doc_start'], header=False, index=False)
all_data.to_csv(savepath + '/text.csv', columns=['text'], header=False, index=False)
print('loading annos...')
annos = pd.read_csv(savepath + '/annos.csv', header=None, nrows=debug_subset_size)
annos = annos.fillna(-1)
annos = annos.values
#np.genfromtxt(savepath + '/annos.csv', delimiter=',')
print('loading text data...')
text = pd.read_csv(savepath + './text.csv', skip_blank_lines=False, header=None, nrows=debug_subset_size)
text = text.fillna(' ').values
print('loading doc starts...')
doc_start = pd.read_csv(savepath + '/doc_start.csv', header=None, nrows=debug_subset_size).values #np.genfromtxt(savepath + '/doc_start.csv')
print('Loaded %i documents' % np.sum(doc_start))
print('loading ground truth labels...')
gt = pd.read_csv(savepath + '/gt.csv', header=None, nrows=debug_subset_size).values # np.genfromtxt(savepath + '/gt.csv')
# # debug subset
# # crowd_labelled[int(np.round(0.01 * len(crowd_labelled) )):] = False
# annos = pd.read_csv(savepath + './annos_debug.csv', skip_blank_lines=False, header=None)
# annos = annos.fillna(-1)
# annos = annos.values
#
# text = pd.read_csv(savepath + './text_debug.csv', skip_blank_lines=False, header=None)
# text = text.fillna(' ').values
#
# doc_start = pd.read_csv(savepath + './doc_start_debug.csv', skip_blank_lines=False, header=None)
# doc_start = doc_start.values.astype(bool)
#
# gt = pd.read_csv(savepath + './gt_debug.csv', skip_blank_lines=False, header=None)
# gt = gt.values.astype(int)
if len(text) == len(annos) - 1:
# sometimes the last line of text is blank and doesn't get loaded into text, but doc_start and gt contain labels
# for the newline token
annos = annos[:-1]
doc_start = doc_start[:-1]
gt = gt[:-1]
print('Creating dev/test split...')
# seed = 10
#
# gt_test, gt_dev, doc_start_dev, text_dev = split_dataset(
# gt, doc_start, text, annos, seed
# )
#
# since there is no separate validation set, we split the test set
ndocs = np.sum(doc_start & (gt != -1))
#testdocs = np.random.randint(0, ndocs, int(np.floor(ndocs * 0.5)))
ntestdocs = int(np.floor(ndocs * 0.5))
docidxs = np.cumsum(doc_start & (gt != -1)) # gets us the doc ids
# # testidxs = np.in1d(docidxs, testdocs)
ntestidxs = np.argwhere(docidxs == (ntestdocs+1))[0][0]
#
# # devidxs = np.ones(len(gt), dtype=bool)
# # devidxs[testidxs] = False
#
# The first half of the labelled data is used as dev, second half as test
gt_test = np.copy(gt)
gt_test[ntestidxs:] = -1
gt_dev = np.copy(gt)
gt_dev[:ntestidxs] = -1
doc_start_dev = doc_start[gt_dev != -1]
text_dev = text[gt_dev != -1]
gt_task1_dev = gt_dev
gt_dev = gt_dev[gt_dev != -1]
return gt_test, annos, doc_start, text, gt_task1_dev, gt_dev, doc_start_dev, text_dev
def _map_ner_str_to_labels(arr):
arr = arr.astype(str)
arr[arr == 'O'] = 1
arr[arr == 'B-ORG'] = 2
arr[arr == 'I-ORG'] = 0
arr[arr == 'B-PER'] = 4
arr[arr == 'I-PER'] = 3
arr[arr == 'B-LOC'] = 6
arr[arr == 'I-LOC'] = 5
arr[arr == 'B-MISC'] = 8
arr[arr == 'I-MISC'] = 7
arr[arr == '?'] = -1
try:
arr_ints = arr.astype(int)
except:
print("Could not map all annotations to integers. The annotations we found were:")
uannos = []
for anno in arr:
if anno not in uannos:
uannos.append(anno)
print(uannos)
# # Don't correc the training data like this as it can introduce more errors, e.g. some errors in the data are where
# there is a mis-placed O in the middle of a tag. Correcting the subsequent I to a B is wrong...
# I_labels = [0, 3, 5, 7]
# B_labels = [2, 4, 6, 8]
# for i, I in enumerate(I_labels):
# arr_prev = np.zeros(arr_ints.shape)
# arr_prev[1:] = arr_ints[:-1]
# to_correct = (arr_ints == I) & (arr_prev != B_labels[i]) & (arr_prev != I)
#
# if np.sum(to_correct):
# print('Correction at tokens: %s' % np.argwhere(to_correct).flatten())
# arr_ints[to_correct] = B_labels[i]
# # change IOB2 to IOB
# I_labels = [0, 3, 5, 7]
# B_labels = [2, 4, 6, 8]
# for i, I in enumerate(I_labels):
# arr_prev = np.zeros(arr_ints.shape)
# arr_prev[1:] = arr_ints[:-1]
# to_correct = (arr_ints == B_labels[i]) & (arr_prev != I)
#
# if np.sum(to_correct):
# print('Correction at tokens: %s' % np.argwhere(to_correct).flatten())
# arr_ints[to_correct] = I
return arr_ints
def _load_rodrigues_annotations(dir, worker_str, gold_char_idxs=None, gold_tokens=None, skip_imperfect_matches=False):
worker_data = None
for f in os.listdir(dir):
if not f.endswith('.txt'):
continue
doc_str = f.split('.')[0]
f = os.path.join(dir, f)
#print('Processing %s' % f)
new_data = pd.read_csv(f, names=['text', worker_str], skip_blank_lines=False,
dtype={'text':str, worker_str:str}, na_filter=False, delim_whitespace=True)
doc_gaps = (new_data['text'] == '') & (new_data[worker_str] == '')
doc_start = np.zeros(doc_gaps.shape[0], dtype=int)
doc_start[doc_gaps[:-1][doc_gaps[:-1]].index + 1] = 1 # the indexes after the gaps
doc_content = new_data['text'] != ''
new_data['doc_start'] = doc_start
new_data = new_data[doc_content]
new_data['doc_start'].iat[0] = 1
annos_to_keep = np.ones(new_data.shape[0], dtype=bool)
for t, tok in enumerate(new_data['text']):
if len(tok.split('/')) > 1:
tok = tok.split('/')[0]
new_data['text'].iat[t] = tok
if len(tok) == 0:
annos_to_keep[t] = False
# compare the tokens in the worker annotations to the gold labels. They are misaligned in the dataset. We will
# skip labels in the worker annotations that are assigned to only a part of a token in the gold dataset.
char_counter = 0
gold_tok_idx = 0
skip_sentence = False
sentence_start = 0
if gold_char_idxs is not None:
gold_chars = np.array(gold_char_idxs[doc_str])
last_accepted_tok = ''
last_accepted_idx = -1
for t, tok in enumerate(new_data['text']):
if skip_imperfect_matches and skip_sentence:
new_data[worker_str].iloc[t] = -1
if new_data['doc_start'].iat[t]:
skip_sentence = False
if new_data['doc_start'].iat[t]:
sentence_start = t
gold_char_idx = gold_chars[gold_tok_idx]
gold_tok = gold_tokens[doc_str][gold_tok_idx]
#print('tok = %s, gold_tok = %s' % (tok, gold_tok))
if not annos_to_keep[t]:
continue # already marked as skippable
if char_counter < gold_char_idx and \
(last_accepted_tok + tok) in gold_tokens[doc_str][gold_tok_idx-1]:
print('Correcting misaligned annotations (split word in worker data): %i, %s' % (t, tok))
skip_sentence = True
last_accepted_tok += tok
annos_to_keep[last_accepted_idx] = False # skip the previous ones until the end
# where we remove a line, assume that the last annotation in the removed line really belongs to the
# line before...
# new_data[worker_str].iat[t - 1] = new_data[worker_str].iat[t]
# assume that the first annotation was actually correct -- I don't think we want this because the
# first token was sometimes erroneously applied to only a part of the string.
#new_data[worker_str].iat[t] = new_data[worker_str].iat[last_accepted_idx]
new_data['text'].iat[t] = last_accepted_tok
new_data['doc_start'].iat[t] = new_data['doc_start'].iat[last_accepted_idx]
last_accepted_idx = t
char_counter += len(tok)
elif tok not in gold_tok or (tok == '' and gold_tok != ''):
print('Correcting misaligned annotations (spurious text in worker data): %i, %s vs. %s' % (t, tok, gold_tok))
skip_sentence = True
annos_to_keep[t] = False # skip the previous ones until the end
if new_data['doc_start'].iat[t]: # now we are skipping this token but we don't want to lose the doc_start record.
new_data['doc_start'].iat[t+1] = 1
elif tok == gold_tok[:len(tok)]: # needs to match the first characters in the string, not just be there somewhere
gold_tok_idx += 1
if tok != gold_tok:
skip_sentence = True
while char_counter > gold_char_idx:
print('error in text alignment between worker and gold!')
len_to_skip = gold_chars[gold_tok_idx - 1] - gold_chars[gold_tok_idx - 2]
# move the gold counter along to the next token because gold is behind
gold_tok_idx += 1
gold_chars[gold_tok_idx:] -= len_to_skip
gold_char_idx = gold_chars[gold_tok_idx]
gold_char_idxs[doc_str] = gold_chars
last_accepted_tok = tok
last_accepted_idx = t
char_counter += len(tok)
else:
skip_sentence = True
annos_to_keep[t] = False
if new_data['doc_start'].iat[t]: # now we are skipping this token but we don't want to lose the doc_start record.
new_data['doc_start'].iat[t+1] = 1
# no more text in this document, but the last sentence must be skipped
if skip_imperfect_matches and skip_sentence:
# annos_to_keep[sentence_start:t+1] = False
new_data[worker_str].iloc[sentence_start:t+1] = -1
new_data = new_data[annos_to_keep]
new_data[worker_str] = _map_ner_str_to_labels(new_data[worker_str])
new_data['doc_id'] = doc_str
new_data['tok_idx'] = np.arange(new_data.shape[0])
# add to data from this worker
if worker_data is None:
worker_data = new_data
else:
worker_data = pd.concat([worker_data, new_data])
return worker_data
def _load_rodrigues_annotations_all_workers(annotation_data_path, gold_data, skip_dirty=False):
worker_dirs = os.listdir(annotation_data_path)
data = None
annotator_cols = np.array([], dtype=str)
char_idx_word_starts = {}
chars = {}
char_counter = 0
for t, tok in enumerate(gold_data['text']):
if gold_data['doc_id'].iloc[t] not in char_idx_word_starts:
char_counter = 0
starts = []
toks = []
char_idx_word_starts[gold_data['doc_id'].iloc[t]] = starts
chars[gold_data['doc_id'].iloc[t]] = toks
starts.append(char_counter)
toks.append(tok)
char_counter += len(tok)
for widx, dir in enumerate(worker_dirs):
if dir.startswith("."):
continue
worker_str = dir
annotator_cols = np.append(annotator_cols, worker_str)
dir = os.path.join(annotation_data_path, dir)
print('Processing dir for worker %s (%i of %i)' % (worker_str, widx, len(worker_dirs)))
worker_data = _load_rodrigues_annotations(dir, worker_str,
char_idx_word_starts, chars, skip_dirty)
print("Loaded a dataset of size %s" % str(worker_data.shape))
# now need to join this to other workers' data
if data is None:
data = worker_data
else:
data = data.merge(worker_data, on=['doc_id', 'tok_idx', 'text', 'doc_start'], how='outer', sort=True, validate='1:1')
return data, annotator_cols
def IOB_to_IOB2(seq):
# test with and without this to see if we can reproduce the MV values from Nguyen et al with NER data.
# It seems to make little difference.
I_labels = [0, 3, 5, 7]
B_labels = [2, 4, 6, 8]
for i, label in enumerate(seq):
if label in I_labels:
typeidx = np.argwhere(I_labels == label)[0][0]
if i == 0 or (seq[i-1] != B_labels[typeidx] and seq[i-1] != label):
# we have I preceded by O. This needs to be changed to a B.
seq[i] = B_labels[typeidx]
return seq
def IOB2_to_IOB(seq):
I_labels = [0, 3, 5, 7]
B_labels = [2, 4, 6, 8]
for i, label in enumerate(seq):
if label in B_labels:
typeidx = np.argwhere(B_labels == label)[0][0]
if i == 0 or (seq[i-1] != B_labels[typeidx] or seq[i-1] != I_labels[typeidx]):
# we have I preceded by O. This needs to be changed to a B.
seq[i] = I_labels[typeidx]
return seq
def load_ner_data(regen_data_files, skip_sen_with_dirty_data=False):
# In Nguyen et al 2017, the original data has been separated out for task 1, aggregation of crowd labels. In this
# task, the original training data is further split into val and test -- to make our results comparable with Nguyen
# et al, we need to test on the test split for task 1, but train our model on both.
# To make them comparable with Rodrigues et al. 2014, we need to test on all data (check this in their paper).
# Task 2 is for prediction on a test set given a model trained on the training set and optimised on the validation
# set. It would be ideal to show both these results...
savepath = '../../data/bayesian_sequence_combination/data/ner/' # location to save our csv files to
if not os.path.isdir(savepath):
os.mkdir(savepath)
# within each of these folders below is an mturk_train_data folder, containing crowd labels, and a ground_truth
# folder. Rodrigues et al. have assigned document IDs that allow us to match up the annotations from each worker.
# Nguyen et al. have split the training set into the val/test folders for task 1. Data is otherwise the same as in
# the Rodrigues folder under mturk/extracted_data.
task1_val_path = '../../data/bayesian_sequence_combination/data/crf-ma-NER-task1/val/'
task1_test_path = '../../data/bayesian_sequence_combination/data/crf-ma-NER-task1/test/'
# These are just two files that we use for text features + ground truth labels.
task2_val_path = '../../data/bayesian_sequence_combination/data/English NER/eng.testa'
task2_test_path = '../../data/bayesian_sequence_combination/data/English NER/eng.testb'
if regen_data_files or not os.path.isfile(savepath + '/task1_val_annos.csv'):
# Steps to load data (all steps need to map annotations to consecutive integer labels).
# 1. Create an annos.csv file containing all the annotations in task1_val_path and task1_test_path.
# load the gold data in the same way as the worker data
gold_data = _load_rodrigues_annotations(task1_val_path + 'ground_truth/', 'gold')
# load the validation data
data, annotator_cols = _load_rodrigues_annotations_all_workers(task1_val_path + 'mturk_train_data/',
gold_data, skip_sen_with_dirty_data)
# 2. Create ground truth CSV for task1_val_path (for tuning the LSTM)
# merge gold with the worker data
data = data.merge(gold_data, how='outer', on=['doc_id', 'tok_idx', 'doc_start', 'text'], sort=True)
num_annotations = np.zeros(data.shape[0]) # count annotations per token
for col in annotator_cols:
num_annotations += np.invert(data[col].isna())
for doc in np.unique(data['doc_id']):
# get tokens from this doc
drows = data['doc_id'] == doc
# get the annotation counts for this doc
counts = num_annotations[drows]
# check that all tokens have same number of annotations
if len(np.unique(counts)) > 1:
print('Validation data: we have some misaligned labels.')
print(counts)
if np.any(counts.values == 0):
print('Removing document %s with no annotations.' % doc)
# remove any lines with no annotations
annotated_idxs = num_annotations >= 1
data = data[annotated_idxs]
# save the annos.csv
data.to_csv(savepath + '/task1_val_annos.csv', columns=annotator_cols, index=False,
float_format='%.f', na_rep=-1)
# save the text in same order
data.to_csv(savepath + '/task1_val_text.csv', columns=['text'], header=False, index=False)
# save the doc starts
data.to_csv(savepath + '/task1_val_doc_start.csv', columns=['doc_start'], header=False, index=False)
# save the annos.csv
data.to_csv(savepath + '/task1_val_gt.csv', columns=['gold'], header=False, index=False)
# 3. Load worker annotations for test set.
# load the gold data in the same way as the worker data
gold_data = _load_rodrigues_annotations(task1_test_path + 'ground_truth/', 'gold')
# load the test data
data, annotator_cols = _load_rodrigues_annotations_all_workers(task1_test_path + 'mturk_train_data/',
gold_data, skip_sen_with_dirty_data)
# 4. Create ground truth CSV for task1_test_path
# merge with the worker data
data = data.merge(gold_data, how='outer', on=['doc_id', 'tok_idx', 'doc_start', 'text'], sort=True)
num_annotations = np.zeros(data.shape[0]) # count annotations per token
for col in annotator_cols:
num_annotations += np.invert(data[col].isna())
for doc in np.unique(data['doc_id']):
# get tokens from this doc
drows = data['doc_id'] == doc
# get the annotation counts for this doc
counts = num_annotations[drows]
# check that all tokens have same number of annotations
if len(np.unique(counts)) > 1:
print('Test data: we have some misaligned labels.')
print(counts)
if np.any(counts.values == 0):
print('Removing document %s with no annotations.' % doc)
# remove any lines with no annotations
annotated_idxs = num_annotations >= 1
data = data[annotated_idxs]
# save the annos.csv
data.to_csv(savepath + '/task1_test_annos.csv', columns=annotator_cols, index=False,
float_format='%.f', na_rep=-1)
# save the text in same order
data.to_csv(savepath + '/task1_test_text.csv', columns=['text'], header=False, index=False)
# save the doc starts
data.to_csv(savepath + '/task1_test_doc_start.csv', columns=['doc_start'], header=False, index=False)
# save the annos.csv
data.to_csv(savepath + '/task1_test_gt.csv', columns=['gold'], header=False, index=False)
# 5. Create a file containing only the words for the task 2 validation set, i.e. like annos.csv with no annotations.
# Create ground truth CSV for task1_val_path, task1_test_path and task2_val_path but blank out the task_1 labels
# (for tuning the LSTM for task 2)
import csv
eng_val = pd.read_csv(task2_val_path, delimiter=' ', usecols=[0,3], names=['text', 'gold'],
skip_blank_lines=True, quoting=csv.QUOTE_NONE)
doc_starts = np.zeros(eng_val.shape[0])
docstart_token = eng_val['text'][0]
doc_starts[1:] = (eng_val['text'] == docstart_token)[:-1]
eng_val['doc_start'] = doc_starts
eng_val['tok_idx'] = eng_val.index
eng_val = eng_val[eng_val['text'] != docstart_token] # remove all the docstart labels
eng_val['gold'] = _map_ner_str_to_labels(eng_val['gold'])
eng_val['gold'] = IOB_to_IOB2(eng_val['gold'].values)
eng_val.to_csv(savepath + '/task2_val_gt.csv', columns=['gold'], header=False, index=False)
eng_val.to_csv(savepath + '/task2_val_text.csv', columns=['text'], header=False, index=False)
eng_val.to_csv(savepath + '/task2_val_doc_start.csv', columns=['doc_start'], header=False, index=False)
# 6. Create a file containing only the words for the task 2 test set, i.e. like annos.csv with no annotations.
# Create ground truth CSV for task1_val_path, task1_test_path and task2_test_path but blank out the task_1 labels/
eng_test = pd.read_csv(task2_test_path, delimiter=' ', usecols=[0,3], names=['text', 'gold'],
skip_blank_lines=True, quoting=csv.QUOTE_NONE)
doc_starts = np.zeros(eng_test.shape[0])
docstart_token = eng_test['text'][0]
doc_starts[1:] = (eng_test['text'] == docstart_token)[:-1]
eng_test['doc_start'] = doc_starts
eng_test['tok_idx'] = eng_test.index
eng_test = eng_test[eng_test['text'] != docstart_token] # remove all the docstart labels
eng_test['gold'] = _map_ner_str_to_labels(eng_test['gold'])
eng_test['gold'] = IOB_to_IOB2(eng_test['gold'].values)
eng_test.to_csv(savepath + '/task2_test_gt.csv', columns=['gold'], header=False, index=False)
eng_test.to_csv(savepath + '/task2_test_text.csv', columns=['text'], header=False, index=False)
eng_test.to_csv(savepath + '/task2_test_doc_start.csv', columns=['doc_start'], header=False, index=False)
# 7. Reload the data for the current run...
print('loading annos for task1 test...')
annos = pd.read_csv(savepath + '/task1_test_annos.csv', skip_blank_lines=False)
print('loading text data for task1 test...')
text = pd.read_csv(savepath + '/task1_test_text.csv', skip_blank_lines=False, header=None)
print('loading doc_starts for task1 test...')
doc_start = pd.read_csv(savepath + '/task1_test_doc_start.csv', skip_blank_lines=False, header=None)
print('loading ground truth for task1 test...')
gt_t = pd.read_csv(savepath + '/task1_test_gt.csv', skip_blank_lines=False, header=None)
print('Unique labels: ')
print(np.unique(gt_t))
print(gt_t.shape)
print('loading annos for task1 val...')
annos_v = pd.read_csv(savepath + '/task1_val_annos.csv', skip_blank_lines=False)
# remove any lines with no annotations
# annotated_idxs = np.argwhere(np.any(annos_v != -1, axis=1)).flatten()
# annos_v = annos_v.iloc[annotated_idxs, :]
annos = pd.concat((annos, annos_v), axis=0)
annos = annos.fillna(-1)
annos = annos.values
print('loaded annotations for %i tokens' % annos.shape[0])
print('loading text data for task1 val...')
text_v = pd.read_csv(savepath + '/task1_val_text.csv', skip_blank_lines=False, header=None)
# text_v = text_v.iloc[annotated_idxs]
text = pd.concat((text, text_v), axis=0)
text = text.fillna(' ').values
print('loading doc_starts for task1 val...')
doc_start_v = pd.read_csv(savepath + '/task1_val_doc_start.csv', skip_blank_lines=False, header=None)
# doc_start_v = doc_start_v.iloc[annotated_idxs]
doc_start = pd.concat((doc_start, doc_start_v), axis=0).values
print('Loading a gold valdation set for task 1')
gt_blanks = pd.DataFrame(np.zeros(gt_t.shape[0]) - 1)
gt_val_task1_orig = pd.read_csv(savepath + '/task1_val_gt.csv', skip_blank_lines=False, header=None)
gt_val_task1 = pd.concat((gt_blanks, gt_val_task1_orig), axis=0).values
print('not concatenating ground truth for task1 val')
gt_v = pd.DataFrame(np.zeros(annos_v.shape[0]) - 1) # gt_val_task1_orig#
#gt = pd.DataFrame(np.zeros(gt.shape[0]) - 1) # gt_val_task1_orig#
gt_v_real = pd.read_csv(savepath + '/task1_val_gt.csv', skip_blank_lines=False, header=None)
#gt_v = gt_v.iloc[annotated_idxs]
gt = pd.concat((gt_t, gt_v), axis=0).values
gt_all = pd.concat((gt_t, gt_v_real), axis=0).values
print('loaded ground truth for %i tokens' % gt.shape[0])
print('loading text data for task 2 test')
text_task2 = pd.read_csv(savepath + '/task2_test_text.csv', skip_blank_lines=False, header=None)
text_task2 = text_task2.fillna(' ').values
print('loading doc_starts for task 2 test')
doc_start_task2 = pd.read_csv(savepath + '/task2_test_doc_start.csv', skip_blank_lines=False, header=None).values
print('loading ground truth for task 2 test')
gt_task2 = pd.read_csv(savepath + '/task2_test_gt.csv', skip_blank_lines=False, header=None).values
# validation sets for methods that predict on features only
print('loading text data for task 2 val')
text_val_task2 = pd.read_csv(savepath + '/task2_val_text.csv', skip_blank_lines=False, header=None)
text_val_task2 = text_val_task2.fillna(' ').values
print('loading doc_starts for task 2 val')
doc_start_val_task2 = pd.read_csv(savepath + '/task2_val_doc_start.csv', skip_blank_lines=False, header=None).values
print('loading ground truth for task 2 val')
gt_val_task2 = pd.read_csv(savepath + '/task2_val_gt.csv', skip_blank_lines=False, header=None).values
return gt, annos, doc_start, text, gt_task2, doc_start_task2, text_task2, \
gt_val_task1, gt_val_task2, doc_start_val_task2, text_val_task2, gt_all
def split_dataset(gt, doc_start, text, annos, seed):
print('Creating dev/test split...')
np.random.seed(seed)
# since there is no separate validation set, we split the test set
ndocs = np.sum(doc_start & (gt != -1))
testdocs = np.random.randint(0, ndocs, int(np.floor(ndocs * 0.5)))
docidxs = np.cumsum(doc_start & (gt != -1)) - 1 # gets us the doc ids
testidxs = np.in1d(docidxs, testdocs)
ntestidxs = np.sum(testidxs)
devidxs = np.ones(len(gt), dtype=bool)
devidxs[testidxs] = False
gt_test = np.copy(gt)
gt_test[devidxs] = -1
#gt_dev = np.copy(gt)
#gt_dev[testidxs] = -1
gt_dev = gt[devidxs]
doc_start_dev = doc_start[devidxs]
text_dev = text[devidxs]
return gt_test, gt_dev, doc_start_dev, text_dev
if __name__ == '__main__':
output_dir = '../../data/bayesian_sequence_combination/output/bio_task1_mini/'
savepath = '../../data/bayesian_sequence_combination/data/bio/'
print('loading annos...')
annos = pd.read_csv(savepath + '/annos.csv', header=None)
annos = annos.fillna(-1)
annos = annos.values
#np.genfromtxt(savepath + '/annos.csv', delimiter=',')
#for a in range(annos.shape[1]):
# annos[:, a] = IOB2_to_IOB(annos[:, a])
print('loading text data...')
text = | pd.read_csv(savepath + './text.csv', skip_blank_lines=False, header=None) | pandas.read_csv |
import requests
import pandas as pd
import world_bank_data as wb
import lxml
def wb_corr(data, col, indicator, change=False):
pd.options.mode.chained_assignment = None # Change option within function to avoid warning of value being placed on a copy of a slice.
"""
Returns the relationship that an input variable has with a chosen variable or chosen variables from the World Bank data, sorted by the strength of relationship
Relationship can be either the correlation between the input variable and the chosen indicator(s) or the correlation in the annual percent changes
Parameters
----------
data: A pandas dataframe that contains a column of countries called "Country," a column of years called "Year," and a column of data for a variable
col: The integer index of the column in which the data of your variable exists in your dataframe
indicator: The indicator or list of indicators to check the relationship with the input variable. Can be a character string of the indicator ID or a list
of character strings. Indicator IDs can be found through use of the World Bank APIs
change: A Boolean value. When set to True, the correlation between the annual percent change of the input variable and the annual percent change of
chosen indicator(s) will be found and used to order the strength of relationships
Returns
----------
Pandas DataFrame
A Pandas DataFrame containing the indicator names as the index and the correlation between the indicator and the input variable. If change set to True,
another column including the correlation between the annual percent changes of the variables will be included. The DataFrame is ordered on the
correlation if change is set to False and on the correlation of percent changes if change is set to True.
The number of rows in the DataFrame will correspond to the number of indicators that were requested. The number of columns will be 1 if change is
set to False and 2 if change is True.
Examples
----------
>>> import ____
>>> wb_corr(my_df, 2, '3.0.Gini') #where my_df has columns Country, Year, Data
|Indicator | Correlation | n
--------------------------------------
|Gini Coefficient| -0.955466 | 172
>>> wb_corr(wb.get_series('SP.POP.TOTL',mrv=50).reset_index,3,['3.0.Gini','1.0.HCount.1.90usd'],True) # To compare one WB indicator with others
| Indicator | Correlation | n | Correlation_change | n_change
----------------------------------------------------------------------------------------
| Poverty Headcount ($1.90 a day)| -0.001202 |172 | 0.065375 | 134
| Gini Coefficient | 0.252892 |172 | 0.000300 | 134
"""
assert type(indicator)==str or type(indicator)==list, "indicator must be either a string or a list of strings"
assert type(col)==int, "col must be the integer index of the column containing data on the variable of interest"
assert 'Country' in data.columns, "data must have a column containing countries called 'Country'"
assert 'Year' in data.columns, "Data must have a column containing years called 'Year'"
assert col<data.shape[1], "col must be a column index belonging to data"
assert type(change)==bool, "change must be a Boolean value (True or False)"
cors=[]
indicators=[]
n=[]
if type(indicator)==str:
assert indicator in list(pd.read_xml(requests.get('http://api.worldbank.org/v2/indicator?per_page=21000').content)['id']), "indicator must be the id of an indicator in the World Bank Data. Indicators can be found using the World Bank APIs. http://api.worldbank.org/v2/indicator?per_page=21000 to see all indicators or http://api.worldbank.org/v2/topic/_/indicator? to see indicators under a chosen topic (replace _ with integer 1-21)"
thing=pd.DataFrame(wb.get_series(indicator,mrv=50)) # Create a Pandas DataFrame with the data on the chosen indicator using the world_bank_data package
merged=pd.merge(data,thing,how='inner',on=['Country','Year'])
cors.append(merged.iloc[:,col].corr(merged.iloc[:,(merged.shape[1]-1)]))
indicators.append(pd.DataFrame(wb.get_series(indicator,mrv=1)).reset_index()['Series'][0])
n.append(len(merged[merged.iloc[:,col].notnull() & merged.iloc[:,(merged.shape[1]-1)].notnull()]))
if change==False:
return pd.DataFrame(list(zip(indicators,cors,n)),columns=['Indicator','Correlation','n']).sort_values(by='Correlation',key=abs,ascending=False).set_index('Indicator')
if change==True:
mumbo= | pd.DataFrame() | pandas.DataFrame |
# To add a new cell, type '#%%'
# To add a new markdown cell, type '#%% [markdown]'
#%% Change working directory from the workspace root to the ipynb file location. Turn this addition off with the DataScience.changeDirOnImportExport setting
# ms-python.python added
import os
try:
os.chdir(os.path.join(os.getcwd(), 'assessment'))
print(os.getcwd())
except:
pass
#%% [markdown]
# ### *IPCC SR15 scenario assessment*
#
# <img style="float: right; height: 120px; margin-top: 10px;" src="../_static/IIASA_logo.png">
# <img style="float: right; height: 100px;" src="../_static/IAMC_logo.jpg">
#
# # Scenario categorization and indicators
#
# This notebook assigns the categorization by warming outcome and computes a range of descriptive indicators
# for the scenario assessment of the IPCC's _"Special Report on Global Warming of 1.5°C"_.
# It generates a `sr15_metadata_indicators.xlsx` spreadsheet, which is used in other notebooks for this assessment
# for categorization and extracting descriptive indicators.
#
# ## Scenario ensemble data
#
# The scenario data used in this analysis can be accessed and downloaded at [https://data.ene.iiasa.ac.at/iamc-1.5c-explorer](https://data.ene.iiasa.ac.at/iamc-1.5c-explorer).
#
# Bibliographic details of the scenario ensemble and all studies that contributed scenarios to the ensemble
# are included in this repository
# as [Endnote (enl)](../bibliography/iamc_1.5c_scenario_data.enl),
# [Reference Manager (ris)](../bibliography/iamc_1.5c_scenario_data.ris),
# and [BibTex (bib)](../bibliography/iamc_1.5c_scenario_data.bib) format.
#
# ## License and recommended citation
#
# This notebook is licensed under the Apache License, Version 2.0.
#
# Please refer to the [README](../README.md) for the recommended citation of the scenario ensemble and the notebooks in this repository.
#
# ***
#%% [markdown]
# ## Import dependencies and define general notebook settings
#%%
import math
import io
import yaml
import re
import pandas as pd
import numpy as np
from IPython.display import display
#%% [markdown]
# ### Introduction and tutorial for the `pyam` package
#
# This notebook (and all other analysis notebooks in this repository) uses the `pyam` package,
# an open-source Python package for IAM scenario analysis and visualization
# ([https://software.ene.iiasa.ac.at/pyam/](http://software.ene.iiasa.ac.at/pyam/)).
#
# For an introduction of the notation and features of the `pyam` package,
# please refer to [this tutorial](https://github.com/IAMconsortium/pyam/blob/master/doc/source/tutorials/pyam_first_steps.ipynb).
# It will take you through the basic functions and options used here,
# and provide further introduction and guidelines.
#%%
import pyam
logger = pyam.logger()
#%% [markdown]
# ### Import Matplotlib and set figure layout defaults in line with SR1.5 guidelines
#%%
import matplotlib.pyplot as plt
plt.style.use('style_sr15.mplstyle')
#%% [markdown]
# ## Import scenario snapshot and define auxiliary dictionaries
#
# This notebook only assigns indicator based on global timeseries data.
#
# The dictionary `meta_tables` is used to collect definitions
# of categories and secondary scenario classification throughout this script.
# These definitions are exported to the metadata/categorization Excel workbook
# at the end of the script for completeness.
# The dictionary `meta_docs` collects definitions used for the documentation tags
# in the online scenario explorer.
#
# The dictionary `specs` collects lists and the run control specifications to be exported to JSON
# and used by other notebooks for the SR1.5 scenario analysis.
#
# The `plotting_args` dictionary assigns the default plotting arguemnts in this notebook.
#%%
sr1p5 = pyam.IamDataFrame(data='../data/iamc15_scenario_data_world_r1.1.xlsx')
#%%
meta_tables = {}
meta_docs = {}
#%%
specs = {}
#%%
plotting_args = {'color': 'category', 'linewidth': 0.2}
specs['plotting_args'] = plotting_args
#%% [markdown]
# ## Verify completeness of scenario submissions for key variables
#
# Verify that every scenario except for *Shell Sky* and the historical reference scenarios reports CO2 Emissions in 2030.
#%%
sr1p5.require_variable(variable='Emissions|CO2', year=2030, exclude_on_fail=False)
#%% [markdown]
# ## Check MAGICC postprocessing prior to categorization
#
# Assign scenarios that could not be postprocessed by probabilistic MAGICC to respective categories:
# - data not available for full century
# - insufficient reporting of emission species
# - reference scenario
#%%
sr1p5.set_meta(name='category', meta= 'uncategorized')
#%%
reference = sr1p5.filter(model='Reference')
pd.DataFrame(index=reference.meta.index)
#%%
sr1p5.set_meta(meta='reference', name='category', index=reference)
#%%
no_climate_assessment = (
sr1p5.filter(category='uncategorized').meta.index
.difference(sr1p5.filter(year=2100, variable='Emissions|CO2').meta.index)
)
pd.DataFrame(index=no_climate_assessment)
#%%
sr1p5.set_meta(meta='no-climate-assessment', name='category', index=no_climate_assessment)
#%% [markdown]
# ## Categorization of scenarios
#
# This section applies the categorization of scenario as defined in Chapter 2 of the Special Report
# for unique assignment of scenarios.
#
# The category specification as agreed upon at LAM 3 in Malmö is repeated here for easier reference.
#
# The term $P_{x°C}$ refers to the probability of exceeding warming of $x°C$ throughout the century in at least one year
# and $P_{x°C}(y)$ refers to the probability of exceedance in a specific year $y$.
#
# |**Categories** |**Subcategories**|**Probability to exceed warming threshold**|**Acronym** |**Color** |
# |---------------|-----------------|-------------------------------------------|-----------------|----------------|
# | Below 1.5°C | Below 1.5°C (I) | $P_{1.5°C} \leq 0.34$ | Below 1.5C (I) | xkcd:baby blue |
# | | Below 1.5°C (II)| $0.34 < P_{1.5°C} \leq 0.50$ | Below 1.5C (II) | |
# | 1.5°C return with low OS | Lower 1.5°C return with low OS | $0.50 < P_{1.5°C} \leq 0.67$ and $P_{1.5°C}(2100) \leq 0.34$ |(Lower) 1.5C low OS | xkcd:bluish |
# | | Higher 1.5°C return with low OS | $0.50 < P_{1.5°C} \leq 0.67$ and $0.34 < P_{1.5°C}(2100) \leq 0.50$ |(Higher) 1.5C low OS | |
# | 1.5°C return with high OS | Lower 1.5°C return with high OS | $0.67 < P_{1.5°C}$ and $P_{1.5°C}(2100) \leq 0.34$ | (Lower) 1.5C high OS | xkcd:darkish blue |
# | | Higher 1.5°C return with high OS | $0.67 < P_{1.5°C}$ and $0.34 < P_{1.5°C}(2100) \leq 0.50$ | (Higher) 1.5C high OS | |
# | Lower 2.0°C | | $P_{2.0°C} \leq 0.34$ (excluding above) | Lower 2C | xkcd:orange |
# | Higher 2.0°C | | $0.34 < P_{2.0°C} \leq 0.50$ (excluding above) | Higher 2C | xkcd:red |
# | Above 2.0°C | | $P_{2.0°C} > 0.50$ for at least 1 year | Above 2C | darkgrey |
#%% [markdown]
# ### Category definitions to Excel
#
# The following dictionary repeats the category definitions from the table above
# and saves them as a `pandas.DataFrame` to a dictionary `meta_tables`.
# Throughout the notebook, this dictionary is used to collect definitions
# of categories and secondary scenario classification.
# These definitions are exported to the metadata/categorization Excel workbook
# at the end of the script for easy reference.
#%%
dct = {'Categories of scenarios':
['Below 1.5°C',
'',
'1.5°C return with low overshoot',
'',
'1.5°C return with high overshoot',
'',
'Lower 2.0°C',
'Higher 2.0°C',
'Above 2.0°C'],
'Subcategories':
['Below 1.5°C (I)',
'Below 1.5°C (II)',
'Lower 1.5°C return with low overshoot',
'Higher 1.5°C return with low overshoot',
'Lower 1.5°C return with high overshoot',
'Higher 1.5°C return with high overshoot',
'',
'',
''],
'Criteria for assignment to category':
['P1.5°C ≤ 0.34',
'0.34 < P1.5°C ≤ 0.50',
'0.50 < P1.5°C ≤ 0.67 and P1.5°C(2100) ≤ 0.34',
'0.50 < P1.5°C ≤ 0.67 and 0.34 < P1.5°C(2100) ≤ 0.50',
'0.67 < P1.5°C and P1.5°C(2100) ≤ 0.34',
'0.67 < P1.5°C and 0.34 < P1.5°C(2100) ≤ 0.50',
'P2.0°C ≤ 0.34 (excluding above)',
'0.34 < P2.0°C ≤ 0.50 (excluding above)',
'P2.0°C > 0.50 during at least 1 year'
],
'Acronym':
['Below 1.5C (I)',
'Below 1.5C (II)',
'Lower 1.5C low overshoot',
'Higher 1.5C low overshoot',
'Lower 1.5C high overshoot',
'Higher 1.5C high overshoot',
'Lower 2C',
'Higher 2C',
'Above 2C'],
'Color':
['xkcd:baby blue',
'',
'xkcd:bluish',
'',
'xkcd:darkish blue',
'',
'xkcd:orange',
'xkcd:red',
'darkgrey']
}
cols = ['Categories of scenarios', 'Subcategories', 'Criteria for assignment to category', 'Acronym', 'Color']
categories_doc = pd.DataFrame(dct)[cols]
meta_tables['categories'] = categories_doc
meta_docs['category'] = 'Categorization of scenarios by global warming impact'
meta_docs['subcategory'] = 'Sub-categorization of scenarios by global warming impact'
#%%
other_cats = ['no-climate-assessment', 'reference']
cats = ['Below 1.5C', '1.5C low overshoot', '1.5C high overshoot', 'Lower 2C', 'Higher 2C', 'Above 2C']
all_cats = cats + other_cats
subcats = dct['Acronym']
all_subcats = subcats + other_cats
#%%
specs['cats'] = cats
specs['all_cats'] = all_cats
specs['subcats'] = subcats
specs['all_subcats'] = all_subcats
#%% [markdown]
# ### Subcategory assignment
#
# We first assign the subcategories, then aggregate those assignment to the main categories.
# The categories assigned above to indicate reasons for non-processing by MAGICC are copied over to the subcategories.
#
# Keep in mind that setting a category will re-assign scenarios (in case they have already been assigned).
# So in case of going back and forth in this notebook (i.e., not executing the cells in the correct order),
# make sure to reset the categorization.
#%%
def warming_exccedance_prob(x):
return 'AR5 climate diagnostics|Temperature|Exceedance Probability|{} °C|MAGICC6'.format(x)
expected_warming = 'AR5 climate diagnostics|Temperature|Global Mean|MAGICC6|Expected value'
median_warming = 'AR5 climate diagnostics|Temperature|Global Mean|MAGICC6|MED'
#%%
sr1p5.set_meta(meta=sr1p5['category'], name='subcategory')
#%%
pyam.categorize(sr1p5, exclude=False, subcategory='uncategorized',
value='Below 1.5C (I)', name='subcategory',
criteria={warming_exccedance_prob(1.5): {'up': 0.34}},
color='xkcd:baby blue')
#%%
pyam.categorize(sr1p5, exclude=False, subcategory='uncategorized',
value='Below 1.5C (II)', name='subcategory',
criteria={warming_exccedance_prob(1.5): {'up': 0.50}},
color='xkcd:baby blue')
#%% [markdown]
# To categorize by a variable using multiple filters (here: less than 66% probability of exceeding 1.5°C at any point during the century and less than 34% probability of exceeding that threshold in 2100) requires to perform the assignment in three steps - first, categorize to an intermediate `low OS` category and, in a second step, assign to the category in question. The third step resets all scenarios still categorized as intermediate after the second step back to `uncategorized`.
#%%
pyam.categorize(sr1p5, exclude=False, subcategory='uncategorized',
value='low overshoot', name='subcategory',
criteria={warming_exccedance_prob(1.5): {'up': 0.67}})
#%%
pyam.categorize(sr1p5, exclude=False, subcategory='low overshoot',
value='Lower 1.5C low overshoot', name='subcategory',
criteria={warming_exccedance_prob(1.5): {'up': 0.34, 'year': 2100}},
color='xkcd:bluish')
#%%
pyam.categorize(sr1p5, exclude=False, subcategory='low overshoot',
value='Higher 1.5C low overshoot', name='subcategory',
criteria={warming_exccedance_prob(1.5): {'up': 0.50, 'year': 2100}},
color='xkcd:bluish')
#%% [markdown]
# Display scenarios that satisfy the `low overshoot` criterion
# but are not assigned to `Lower 1.5C lower overshoot` or `Higher 1.5C lower overshoot`.
# Then, reset them to uncategorized.
#%%
sr1p5.filter(subcategory='low overshoot').meta
#%%
sr1p5.set_meta(meta='uncategorized', name='subcategory', index=sr1p5.filter(subcategory='low overshoot'))
#%% [markdown]
# Determine all scenarios with a probability to exceed 1.5°C greater than 66% in any year throughout the century.
# The function `categorize()` cannot be used for this selection, because it would either check for the criteria being true for all years or for a particular year.
#%%
df = sr1p5.filter(exclude=False, subcategory='uncategorized', variable=warming_exccedance_prob(1.5)).timeseries()
sr1p5.set_meta(meta='high overshoot', name='subcategory',
index=df[df.apply(lambda x: max(x), axis=1) > 0.66].index)
#%%
pyam.categorize(sr1p5, exclude=False, subcategory='high overshoot',
value='Lower 1.5C high overshoot', name='subcategory',
criteria={warming_exccedance_prob(1.5): {'up': 0.34, 'year': 2100}},
color='xkcd:darkish blue')
#%%
pyam.categorize(sr1p5, exclude=False, subcategory='high overshoot',
value='Higher 1.5C high overshoot', name='subcategory',
criteria={warming_exccedance_prob(1.5): {'up': 0.50, 'year': 2100}},
color='xkcd:darkish blue')
#%% [markdown]
# Reset scenarios that satisfy the `high overshoot` criterion
# but are not assigned to `Lower 1.5C high overshoot` or `Higher 1.5C high overshoot`.
#%%
sr1p5.set_meta(meta='uncategorized', name='subcategory', index=sr1p5.filter(subcategory='high overshoot'))
#%%
pyam.categorize(sr1p5, exclude=False, subcategory='uncategorized',
value='Lower 2C', name='subcategory',
criteria={warming_exccedance_prob(2.0): {'up': 0.34}},
color='xkcd:orange')
#%%
pyam.categorize(sr1p5, exclude=False, subcategory='uncategorized',
value='Higher 2C', name='subcategory',
criteria={warming_exccedance_prob(2.0): {'up': 0.50}},
color='xkcd:red')
#%%
pyam.categorize(sr1p5, exclude=False, subcategory='uncategorized',
value='Above 2C', name='subcategory',
criteria={warming_exccedance_prob(2.0): {'up': 1.0}},
color='darkgrey')
#%% [markdown]
# ### Aggregation of subcategories to categories
#%%
rc = pyam.run_control()
def assign_rc_color_from_sub(cat, sub):
rc.update({'color': {'category': {cat: rc['color']['subcategory'][sub]}}})
#%%
sr1p5.set_meta(meta='Below 1.5C', name='category',
index=sr1p5.filter(subcategory=['Below 1.5C (I)', 'Below 1.5C (II)']).meta.index)
assign_rc_color_from_sub('Below 1.5C', 'Below 1.5C (II)')
#%%
sr1p5.set_meta(meta='1.5C low overshoot', name='category',
index=sr1p5.filter(subcategory=['Lower 1.5C low overshoot', 'Higher 1.5C low overshoot']))
assign_rc_color_from_sub('1.5C low overshoot', 'Lower 1.5C low overshoot')
#%%
sr1p5.set_meta(meta='1.5C high overshoot', name='category',
index=sr1p5.filter(subcategory=['Lower 1.5C high overshoot', 'Higher 1.5C high overshoot']))
assign_rc_color_from_sub('1.5C high overshoot', 'Lower 1.5C high overshoot')
#%%
cats_non15 = ['Lower 2C', 'Higher 2C', 'Above 2C']
df_2c = sr1p5.filter(subcategory=cats_non15)
sr1p5.set_meta(meta=df_2c['subcategory'], name='category')
for c in cats_non15:
assign_rc_color_from_sub(c, c)
#%% [markdown]
# ### Additional assessment of categorization
#
# Check whether there are any scenarios that return to 1.5°C by the end of the century and exceed the 2°C threshold with a likelyhood higher than 34% or 50% (i.e., the `Lower 2C` or the `Higher 2C` categories respectively). Having scenario categorized as `1.5C` but with a higher-than-50% probability of exceeding 2°C at some point in the century may need to be considered separately in subsequent assessment.
#%%
cats_15 = ['Below 1.5C', '1.5C low overshoot', '1.5C high overshoot']
specs['cats_15'] = cats_15
#%%
cats_15_no_lo = ['Below 1.5C', '1.5C low overshoot']
specs['cats_15_no_lo'] = cats_15_no_lo
#%%
cats_2 = ['Lower 2C', 'Higher 2C']
specs['cats_2'] = cats_2
#%%
df = sr1p5.filter(exclude=False, category=cats_15, variable=warming_exccedance_prob(2.0)).timeseries()
ex_prob_2 = df.apply(lambda x: max(x))
#%%
if max(ex_prob_2) > 0.34:
logger.warning('The following 1.5C-scenarios are not `Lower 2C` scenarios:')
display(df[df.apply(lambda x: max(x), axis=1) > 0.34])
#%%
if max(ex_prob_2) > 0.50:
logger.warning('The following 1.5C-scenarios are not `2C` scenarios:')
display(df[df.apply(lambda x: max(x), axis=1) > 0.50])
#%% [markdown]
# ### Counting and evaluation of scenario assignment categories
#
# Count the number of scenarios assigned to each category.
#
# This table is the basis for **Tables 2.1 and 2.A.11** in the SR1.5.
#%%
lst = sr1p5.meta.groupby(['category', 'subcategory']).count()
(
lst
.reindex(all_cats, axis='index', level=0)
.reindex(all_subcats, axis='index', level=1)
.rename(columns={'exclude': 'count'})
)
#%% [markdown]
# Check whether any scenarios are still marked as `uncategorized`. This may be due to missing MAGICC postprocessing.
#%%
if any(sr1p5['category'] == 'uncategorized'):
logger.warning('There are scenarios that are no yet categorized!')
display(sr1p5.filter(category='uncategorized').meta)
#%% [markdown]
# ## Validation of Kyoto GHG emissions range (SAR-GWP100)
#
# Validate all scenarios whther aggregate Kyoto gases are outside the range as assessed by the Second Assessment Report (SAR) using the Global Warming Potential over 100 years (GWP100). These scenarios are excluded from some figures and tables in the assessment.
#%%
invalid_sar_gwp = sr1p5.validate(criteria={'Emissions|Kyoto Gases (SAR-GWP100)':
{'lo': 44500, 'up': 53500, 'year':2010}}, exclude_on_fail=False)
#%%
name='Kyoto-GHG|2010 (SAR)'
sr1p5.set_meta(meta='in range', name=name)
sr1p5.set_meta(meta='exclude', name=name, index=invalid_sar_gwp)
meta_docs[name] = 'Indicator whether 2010 Kyoto-GHG reported by the scenario (as assessed by IPCC SAR) are in the valid range'
#%% [markdown]
# ## Assignment of baseline scenarios
#
# This section assigns a `baseline` reference for scenarios from selected model intercomparison projects and indivitual submissions.
#%%
def set_baseline_reference(x):
m, s = (x.name[0], x.name[1])
b = None
if s.startswith('SSP') and not 'Baseline' in s:
b = '{}Baseline'.format(s[0:5])
if s.startswith('CD-LINKS') and not 'NoPolicy' in s:
b = '{}NoPolicy'.format(s[0:9])
if s.startswith('EMF33') and not 'Baseline' in s:
b = '{}Baseline'.format(s[0:6])
if s.startswith('ADVANCE') and not 'NoPolicy' in s:
b = '{}NoPolicy'.format(s[0:8])
if s.startswith('GEA') and not 'base' in s:
b = '{}base'.format(s[0:8])
if s.startswith('TERL') and not 'Baseline' in s:
b = s.replace('15D', 'Baseline').replace('2D', 'Baseline')
if s.startswith('SFCM') and not 'Baseline' in s:
b = s.replace('1p5Degree', 'Baseline').replace('2Degree', 'Baseline')
if s.startswith('CEMICS') and not s == 'CEMICS-Ref':
b = 'CEMICS-Ref'
if s.startswith('SMP') and not 'REF' in s:
if s.endswith('Def') or s.endswith('regul'):
b = 'SMP_REF_Def'
else:
b = 'SMP_REF_Sust'
if s.startswith('DAC'):
b = 'BAU'
# check that baseline scenario exists for specific model `m`
if (m, b) in sr1p5.meta.index:
return b
# else (or if scenario name not in list above), return None
return None
#%%
name = 'baseline'
sr1p5.set_meta(sr1p5.meta.apply(set_baseline_reference, raw=True, axis=1), name)
meta_docs[name] = 'Name of the respective baseline (or reference/no-policy) scenario'
#%% [markdown]
# ## Assignent of marker scenarios
#
# The following scenarios are used as marker throughout the analysis and visualization, cf. Figure 2.7 (SOD):
#
# |**Marker** |**Model & scenario name** |**Reference** | **Symbol** |
# |------------|--------------------------------|-------------------------------|-----------------|
# | *S1* | AIM/CGE 2.0 / SSP1-19 | Fujimori et al., 2017 | `white square` |
# | *S2* | MESSAGE-GLOBIOM 1.0 / SSP2-19 | Fricko et al., 2017 | `yellow square` |
# | *S5* | REMIND-MAgPIE 1.5 / SSP5-19 | Kriegler et al., 2017 | `black square` |
# | *LED* | MESSAGEix-GLOBIOM 1.0 / LowEnergyDemand | Grubler et al., 2018 | `white circle` |
#%%
dct = {'Marker':
['S1',
'S2',
'S5',
'LED'],
'Model and scenario name':
['AIM/CGE 2.0 / SSP1-19',
'MESSAGE-GLOBIOM 1.0 / SSP2-19',
'REMIND-MAgPIE 1.5 / SSP5-19',
'MESSAGEix-GLOBIOM 1.0 / LowEnergyDemand'],
'Reference':
['Fujimori et al., 2017',
'Fricko et al., 2017',
'Kriegler et al., 2017',
'Grubler et al., 2018'],
'Symbol':
['white square',
'yellow square',
'black square',
'white circle']
}
cols = ['Marker', 'Model and scenario name', 'Reference', 'Symbol']
markers_doc = pd.DataFrame(dct)[cols]
meta_tables['marker scenarios'] = markers_doc
meta_docs['marker'] = 'Illustrative pathways (marker scenarios)'
#%%
specs['marker'] = ['S1', 'S2', 'S5', 'LED']
#%%
sr1p5.set_meta('', 'marker')
rc.update({'marker': {'marker': {'': None}}})
#%%
m = 'S1'
sr1p5.set_meta(m, 'marker',
sr1p5.filter(model='AIM/CGE 2.0', scenario='SSP1-19'))
rc.update({'marker': {'marker': {m: 's'}},
'c': {'marker': {m: 'white'}},
'edgecolors': {'marker': {m: 'black'}}}
)
#%%
m = 'S2'
sr1p5.set_meta(m, 'marker',
sr1p5.filter(model='MESSAGE-GLOBIOM 1.0', scenario='SSP2-19'))
rc.update({'marker': {'marker': {m: 's'}},
'c': {'marker': {m: 'yellow'}},
'edgecolors': {'marker': {m: 'black'}}})
#%%
m = 'S5'
sr1p5.set_meta(m, 'marker',
sr1p5.filter(model='REMIND-MAgPIE 1.5', scenario='SSP5-19'))
rc.update({'marker': {'marker': {m: 's'}},
'c': {'marker': {m: 'black'}},
'edgecolors': {'marker': {m: 'black'}}})
#%%
m = 'LED'
sr1p5.set_meta(m, 'marker',
sr1p5.filter(model='MESSAGEix-GLOBIOM 1.0', scenario='LowEnergyDemand'))
rc.update({'marker': {'marker': {m: 'o'}},
'c': {'marker': {m: 'white'}},
'edgecolors': {'marker': {m: 'black'}}})
#%% [markdown]
# ## Visual analysis of emission and temperature pathways by category
#
# First, we plot all carbon dioxide emissions trajectories colored by category and the CO2 emissions from the AFOLU sector. Then, show the warming trajectories by category.
#%%
horizon = list(range(2000, 2020, 5)) + list(range(2020, 2101, 10))
df = sr1p5.filter(year=horizon)
#%%
df.filter(exclude=False, variable='Emissions|CO2').line_plot(**plotting_args, marker='marker')
#%%
df.filter(exclude=False, variable='Emissions|CO2|AFOLU').line_plot(**plotting_args, marker='marker')
#%%
df.filter(exclude=False, variable=expected_warming).line_plot(**plotting_args, marker='marker')
#%% [markdown]
# ## Import scientific references and publication status
# The following block reads in an Excel table with the details of the scientific references for each scenario.
#
# The main cell of this section loops over all entries in this Excel table, filters for the relevant scenarios,
# and assigns a short reference and the publication status. If multiple references are relevant for a scenario, the references are compiled, and the 'highest' publication status is written to the metadata.
#%%
ref_cols = ['project', 'model', 'scenario', 'reference', 'doi', 'bibliography']
#%%
sr1p5.set_meta('undefined', 'reference')
sr1p5.set_meta('unknown', 'project')
#%%
refs = pd.read_csv('../bibliography/scenario_references.csv', encoding='iso-8859-1')
_refs = {'index': []}
for i in ref_cols:
_refs.update({i.title(): []})
#%%
for cols in refs.iterrows():
c = cols[1]
filters = {}
# check that filters are defined
if c.model is np.NaN and c.scenario is np.NaN:
logger.warn('project `{}` on line {} has no filters assigned'
.format(c.project, cols[0]))
continue
# filter for scenarios to apply the project and publication tags
filters = {}
for i in ['model', 'scenario']:
if c[i] is not np.NaN:
if ";" in c[i]:
filters.update({i: re.sub(";", "", c[i]).split()})
else:
filters.update({i: c[i]})
df = sr1p5.filter(**filters)
if df.scenarios().empty:
logger.warn('no scenarios satisfy filters for project `{}` on line {} ({})'
.format(c.project, cols[0], filters))
continue
# write to meta-tables dictionary
_refs['index'].append(cols[0])
for i in ref_cols:
_refs[i.title()].append(c[i])
sr1p5.meta.loc[df.meta.index, 'project'] = c['project']
for i in df.meta.index:
r = c['reference']
sr1p5.meta.loc[i, 'reference'] = r if sr1p5.meta.loc[i, 'reference'] == 'undefined' else '{}; {}'.format(sr1p5.meta.loc[i, 'reference'], r)
#%%
cols = [i.title() for i in ref_cols]
meta_tables['references'] = pd.DataFrame(_refs)[cols]
meta_docs['reference'] = 'Scientific references'
meta_docs['project'] = 'Project identifier contributing the scenario'
#%% [markdown]
# ## Peak warming and indicator of median global warming peak-and-decline
#
# Determine peak warming (relative to pre-industrial temperature) and end-of century warming
# and add this to the scenario metadata.
# Then, compute the "peak-and-decline" indicator as the difference between peak warming and warming in 2100.
#%%
def peak_warming(x, return_year=False):
peak = x[x == x.max()]
if return_year:
return peak.index[0]
else:
return float(max(peak))
#%%
median_temperature = sr1p5.filter(variable=median_warming).timeseries()
#%%
name = 'median warming at peak (MAGICC6)'
sr1p5.set_meta(median_temperature.apply(peak_warming, raw=False, axis=1), name)
meta_docs[name] = 'median warming above pre-industrial temperature at peak (°C) as computed by MAGICC6'
#%%
name = 'year of peak warming (MAGICC6)'
sr1p5.set_meta(median_temperature.apply(peak_warming, return_year=True, raw=False, axis=1), name)
meta_docs[name] = 'year of peak median warming as computed by MAGICC6'
#%%
name = 'median warming in 2100 (MAGICC6)'
sr1p5.set_meta(median_temperature[2100], name)
meta_docs[name] = 'median warming above at peak above pre-industrial temperature as computed by MAGICC6'
#%%
name = 'median warming peak-and-decline (MAGICC6)'
peak_decline = sr1p5['median warming at peak (MAGICC6)'] - sr1p5['median warming in 2100 (MAGICC6)']
sr1p5.set_meta(peak_decline, name)
meta_docs[name] = 'median warming peak-and-decline from peak to temperature in 2100 (°C) as computed by MAGICC6'
#%% [markdown]
# ### Add mean temperature at peak from 'FAIR' model diagnostics
#%%
median_temperature_fair = sr1p5.filter(variable='AR5 climate diagnostics|Temperature|Global Mean|FAIR|MED') .timeseries()
#%%
name = 'median warming at peak (FAIR)'
sr1p5.set_meta(median_temperature_fair.apply(peak_warming, raw=False, axis=1), name)
meta_docs[name] = 'median warming above pre-industrial temperature at peak (°C) as computed by FAIR'
#%%
name = 'year of peak warming (FAIR)'
sr1p5.set_meta(median_temperature_fair.apply(peak_warming, return_year=True, raw=False, axis=1), name)
meta_docs[name] = 'year of peak median warming as computed by FAIR'
#%%
fig, ax = plt.subplots()
sr1p5.filter(category=cats).scatter(ax=ax,
x='median warming at peak (MAGICC6)',
y='median warming at peak (FAIR)', color='category')
ax.plot(ax.get_xlim(), ax.get_xlim())
#%%
import matplotlib
matplotlib.__version__
#%%
fig, ax = plt.subplots()
sr1p5.scatter(ax=ax, x='year of peak warming (MAGICC6)', y='year of peak warming (FAIR)', color='category')
ax.plot(ax.get_xlim(), ax.get_xlim())
#%% [markdown]
# ## Computation of threshold exceedance year and 'overshoot' year count
#
# Determine the year when a scenario exceeds a specific temperature threshold,
# and for how many years the threshold is exceeded.
#
# This section uses the function ``exceedance()`` to determine the exceedance and return years.
# The function ``overshoot_severity()`` computes the cumulative exceedance of the 1.5°C threshold
# (i.e., the sum of temperature-years above the threshold).
#%%
def exceedance(temperature, years, threshold):
exceedance_yr = None
return_yr = None
overshoot_yr_count = None
prev_temp = 0
prev_yr = None
for yr, curr_temp in zip(years, temperature):
if np.isnan(curr_temp):
continue
if exceedance_yr is None and curr_temp > threshold:
x = (curr_temp - prev_temp) / (yr - prev_yr) # temperature change per year
exceedance_yr = prev_yr + int((threshold - prev_temp) / x) + 1 # add one because int() rounds down
if exceedance_yr is not None and return_yr is None and curr_temp < threshold:
x = (prev_temp - curr_temp) / (yr - prev_yr) # temperature change per year
return_yr = prev_yr + int((prev_temp - threshold) / x) + 1
prev_temp = curr_temp
prev_yr = yr
if return_yr is not None and exceedance_yr is not None:
overshoot_yr_count = int(return_yr - exceedance_yr)
if exceedance_yr is not None:
exceedance_yr = int(exceedance_yr)
if return_yr is not None:
return_yr = int(return_yr)
return [exceedance_yr, return_yr, overshoot_yr_count]
#%%
exceedance_meta = median_temperature.apply(exceedance, axis=1, raw=True,
years=median_temperature.columns, threshold=1.5)
#%%
name = 'exceedance year|1.5°C'
sr1p5.set_meta(exceedance_meta.apply(lambda x: x[0]), name)
meta_docs[name] = 'year in which the 1.5°C median warming threshold is exceeded'
name = 'return year|1.5°C'
sr1p5.set_meta(exceedance_meta.apply(lambda x: x[1]), name)
meta_docs[name] = 'year in which median warming returns below the 1.5°C threshold'
name = 'overshoot years|1.5°C'
sr1p5.set_meta(exceedance_meta.apply(lambda x: x[2]), name)
meta_docs[name] = 'number of years where 1.5°C median warming threshold is exceeded'
#%%
def overshoot_severity(x, meta):
exceedance_yr = meta.loc[x.name[0:2]]['exceedance year|1.5°C']
return_yr = meta.loc[x.name[0:2]]['return year|1.5°C'] - 1
# do not include year in which mean temperature returns to below 1.5
if exceedance_yr > 0 and return_yr > 0:
return pyam.cumulative(x, exceedance_yr, return_yr) - (return_yr - exceedance_yr + 1) * 1.5
#%%
name = 'exceedance severity|1.5°C'
sr1p5.set_meta(median_temperature.apply(overshoot_severity, axis=1, raw=False, meta=sr1p5.meta), name)
meta_docs[name] = 'sum of median temperature exceeding the 1.5°C threshold'
#%%
exceedance_meta = median_temperature.apply(exceedance, axis=1, raw=True,
years=median_temperature.columns, threshold=2)
#%%
name = 'exceedance year|2.0°C'
sr1p5.set_meta(exceedance_meta.apply(lambda x: x[0]), name)
meta_docs[name] = 'year in which the 2.0°C median warming threshold is exceeded'
name = 'return year|2.0°C'
sr1p5.set_meta(exceedance_meta.apply(lambda x: x[1]), name)
meta_docs[name] = 'year in which median warming returns below the 2.0°C threshold'
name = 'overshoot years|2.0°C'
sr1p5.set_meta(exceedance_meta.apply(lambda x: x[2]), name)
meta_docs[name] = 'number of years where 2.0°C median warming threshold is exceeded'
#%% [markdown]
# ## Secondary categorization and meta-data assignment according to CO2 emissions
#%% [markdown]
# ### Defining the range for cumulative indicators and units
#
# All cumulative indicators are computed over the time horizon 2016-2100 (including the year 2100 in every summation).
#%%
baseyear = 2016
lastyear = 2100
#%%
def filter_and_convert(variable):
return (sr1p5
.filter(variable=variable)
.convert_unit({'Mt CO2/yr': ('Gt CO2/yr', 0.001)})
.timeseries()
)
unit = 'Gt CO2/yr'
cumulative_unit = 'Gt CO2'
#%%
co2 = filter_and_convert('Emissions|CO2')
#%%
name = 'minimum net CO2 emissions ({})'.format(unit)
sr1p5.set_meta(co2.apply(np.nanmin, axis=1), name)
meta_docs[name] = 'Minimum of net CO2 emissions over the century ({})'.format(unit)
#%% [markdown]
# ### Indicators from cumulative CO2 emissions over the entire century (2016-2100)
#
# Compute the total cumulative CO2 emissions for secondary categorization of scenarios.
# Cumulative CO2 emissions are a first-order proxy for global mean temperature change.
# Emissions are interpolated linearly between years. The `last_year` value is included in the summation.
#
# The function `pyam.cumulative()` defined below aggregates timeseries values from `first_year` until `last_year`,
# including both first and last year in the total. The function assumes linear interpolation for years where no values are provided.
#%%
name = 'cumulative CO2 emissions ({}-{}, {})'.format(baseyear, lastyear, cumulative_unit)
sr1p5.set_meta(co2.apply(pyam.cumulative, raw=False, axis=1, first_year=baseyear, last_year=lastyear), name)
meta_docs[name] = 'Cumulative net CO2 emissions from {} until {} (including the last year, {})'.format(
baseyear, lastyear, cumulative_unit)
#%%
ccs = filter_and_convert('Carbon Sequestration|CCS')
#%%
cum_ccs_label = 'cumulative CCS ({}-{}, {})'.format(baseyear, lastyear, cumulative_unit)
sr1p5.set_meta(ccs.apply(pyam.cumulative, raw=False, axis=1, first_year=baseyear, last_year=lastyear), cum_ccs_label)
meta_docs[cum_ccs_label] = 'Cumulative carbon capture and sequestration from {} until {} (including the last year, {})' .format(baseyear, lastyear, cumulative_unit)
#%%
beccs = filter_and_convert('Carbon Sequestration|CCS|Biomass')
#%%
cum_beccs_label = 'cumulative BECCS ({}-{}, {})'.format(baseyear, lastyear, cumulative_unit)
sr1p5.set_meta(beccs.apply(pyam.cumulative, raw=False, axis=1, first_year=baseyear, last_year=lastyear), cum_beccs_label)
meta_docs[cum_beccs_label] = 'Cumulative carbon capture and sequestration from bioenergy from {} until {} (including the last year, {})'.format(
baseyear, lastyear, cumulative_unit)
#%% [markdown]
# Issue [#9](https://github.com/iiasa/ipcc_sr15_scenario_analysis/issues/9) requested to add the data for scenario where timeseries data for bioenergy with CCS was not provided explicitly (and hence not captured by the computation above) but could implicitly by assessed from the CCS timeseries data.
#%%
filled_ccs = sr1p5.meta[sr1p5.meta[cum_ccs_label] == 0][cum_beccs_label]
#%%
sr1p5.set_meta(name=cum_beccs_label, meta=0, index=filled_ccs[filled_ccs.isna()].index)
#%%
seq_lu = filter_and_convert('Carbon Sequestration|Land Use')
#%%
name = 'cumulative sequestration land-use ({}-{}, {})'.format(baseyear, lastyear, cumulative_unit)
sr1p5.set_meta(seq_lu.apply(pyam.cumulative, raw=False, axis=1, first_year=baseyear, last_year=lastyear), name)
meta_docs[name] = 'Cumulative carbon sequestration from land use from {} until {} (including the last year, {})'.format(
baseyear, lastyear, cumulative_unit)
#%% [markdown]
# ### Cumulative CO2 emissions until peak warming
#%%
def get_from_meta_column(df, x, col):
val = df.meta.loc[x.name[0:2], col]
return val if val < np.inf else max(x.index)
#%%
name = 'cumulative CO2 emissions ({} to peak warming, {})'.format(baseyear, cumulative_unit)
sr1p5.set_meta(co2.apply(lambda x: pyam.cumulative(x, first_year=baseyear,
last_year=get_from_meta_column(sr1p5, x,
'year of peak warming (MAGICC6)')),
raw=False, axis=1), name)
meta_docs[name] = 'cumulative net CO2 emissions from {} until the year of peak warming as computed by MAGICC6 (including the year of peak warming, {})'.format(
baseyear, cumulative_unit)
#%%
(
sr1p5
.filter(category=cats)
.scatter(x='cumulative CO2 emissions (2016 to peak warming, {})'.format(cumulative_unit),
y='median warming at peak (MAGICC6)', color='category')
)
#%% [markdown]
# ### Cumulative CO2 emissions until net-zero of total emissions
#%%
def year_of_net_zero(data, years, threshold):
prev_val = 0
prev_yr = np.nan
for yr, val in zip(years, data):
if np.isnan(val):
continue
if val < threshold:
x = (val - prev_val) / (yr - prev_yr) # absolute change per year
return prev_yr + int((threshold - prev_val) / x) + 1 # add one because int() rounds down
prev_val = val
prev_yr = yr
return np.inf
#%%
name = 'year of netzero CO2 emissions'
sr1p5.set_meta(co2.apply(year_of_net_zero, years=co2.columns, threshold=0, axis=1), name)
meta_docs[name] = 'year in which net CO2 emissions reach zero'
#%%
name = 'cumulative CO2 emissions ({} to netzero, {})'.format(baseyear, cumulative_unit)
sr1p5.set_meta(co2.apply(lambda x: pyam.cumulative(x, first_year=baseyear,
last_year=get_from_meta_column(sr1p5, x,
'year of netzero CO2 emissions')),
raw=False, axis=1), name)
meta_docs[name] = 'net CO2 emissions from {} until the year of peak warming (including the last year, {})'.format(
baseyear, cumulative_unit)
#%%
name = 'warming at netzero (MAGICC6)'
sr1p5.set_meta(median_temperature.apply(lambda x: x[get_from_meta_column(sr1p5, x,
'year of netzero CO2 emissions')],
raw=False, axis=1), name)
meta_docs[name] = 'median warming above pre-industrial temperatures in the year of net-zero CO2 emission (MAGICC, °C)'.format(
baseyear, cumulative_unit)
#%%
(
sr1p5
.scatter(x='cumulative CO2 emissions (2016 to netzero, {})'.format(cumulative_unit),
y='warming at netzero (MAGICC6)', color='category')
)
#%%
fig, ax = plt.subplots()
(
sr1p5
.scatter(ax=ax, x='cumulative CO2 emissions (2016 to peak warming, {})'.format(cumulative_unit),
y='cumulative CO2 emissions (2016 to netzero, {})'.format(cumulative_unit),
color='category')
)
ax.plot(ax.get_xlim(), ax.get_xlim())
#%%
fig, ax = plt.subplots()
(
sr1p5
.scatter(ax=ax, x='median warming at peak (MAGICC6)',
y='warming at netzero (MAGICC6)', color='category')
)
x = np.linspace(*ax.get_xlim())
ax.plot(ax.get_xlim(), ax.get_xlim())
#%%
fig, ax = plt.subplots()
(
sr1p5
.scatter(ax=ax, x='median warming in 2100 (MAGICC6)',
y='warming at netzero (MAGICC6)', color='category')
)
x = np.linspace(*ax.get_xlim())
ax.plot(ax.get_xlim(), ax.get_xlim())
#%% [markdown]
# ## Categorization and meta-data assignment according to final energy demand
#
# Add a categorization column to the metadata categorization based on final energy demand at the end of the century.
#%%
horizon = list(range(2000, 2020, 5)) + list(range(2020, 2101, 10))
df = sr1p5.filter(year=horizon)
#%%
fe_df = df.filter(variable='Final Energy')
fe_df.line_plot(**plotting_args, marker='marker')
#%%
fe = fe_df.timeseries()
#%%
name = 'final energy|2100'
sr1p5.set_meta(fe[2100], name)
meta_docs[name] = 'Final energy demand at the end of the century (EJ/yr)'
#%% [markdown]
# ## Indicators on carbon price development
#
# Retrieve the carbon price timeseries and derive the following indicators:
# - carbon price in 2030, 2050, and 2100 given both in 2010$ as reported by models and as NPV
# - simple average of NPV (2030-2100)
# - annual compounded NPV (2030-2100)
# - continuously compounded NPV (2030-2100)
#
# All net present values (NPV) are given relative to the year 2020.
# They are calculated assuming a 5% discount rate.
#%%
dct = {'Indicator type':
['Price by year',
'Price by year (as NPV)',
'Average (Avg) NPV',
'Annual compounded (AC) NPV)',
'Continuously compounded (CC) NPV',
'', 'Note on NPV'
],
'Description':
['Global carbon price as reported by each scenario',
'Global carbon price as reported by each scenario discounted to 2020 NPV',
'Cumulative NPV carbon price from 2030 until 2100 divided by number of years (71)',
'Annual compounded NPV carbon price from 2030 until 2100 divided by number of years (71)',
'Continuously compounded NPV carbon price from 2030 until 2100 divided by number of years (71)',
'', 'All NPV indicators are discounted to 2020 using a 5% discount rate'
]
}
meta_tables['carbon_price'] = | pd.DataFrame(dct) | pandas.DataFrame |
import numpy as np
import anndata as ad
import pandas as pd
def load_met_noimput(matrix_file, path='', save=False):
"""
read the raw count matrix and convert it into an AnnData object.
write down the matrix as .h5ad (AnnData object) if save = True.
Return AnnData object
"""
matrix = []
cell_names = []
feature_names = []
with open(path+matrix_file) as f:
line = f.readline()[:-2].split('\t')
if line[0] == 'sample_name':
feature_names = line[1:]
else:
matrix.append(line[1:])
cell_names.append(line[0])
if matrix == []:
line = f.readline()[:-2].split('\t')
matrix.append(line[1:])
cell_names.append(line[0])
for line in f:
line = line[:-2].split('\t')
matrix.append(line[1:])
cell_names.append(line[0])
matrix = np.array(matrix)
if feature_names != []:
adata = ad.AnnData(matrix, obs=pd.DataFrame(index=cell_names), var=pd.DataFrame(index=feature_names))
else:
adata = ad.AnnData(matrix, obs=pd.DataFrame(index=cell_names))
adata.uns['omic'] = 'methylation'
adata.uns['imputation'] = 'no_imputation'
if save:
adata.write("".join([".".split(matrix_file)[0],'.h5ad']))
return(adata)
def imputation_met(adata, number_cell_covered=10, imputation_value='mean', save=None, copy=False):
"""
Impute missing values in methyaltion level matrices. The imputsation is based on the average
methylation value of the given variable.
It also filter out variables that are covered in an unsufficient number of cells in order to
reduce the feature space to meaningful variables and discard potential coverage biases.
Parameters
----------
adata: AnnData object containing 'nan'
number_cell_covered: minimum number of cells to be covered in order to retain a variable
imputation_value: imputation of the missing value can be made either on the mean or the median
Return
------
Return a new AnnData object
"""
# This step need to be sped up and could be multithread.
# Only the mean available for now. And only the minimum number of cells covered and not the variety of the
# methylation levels
# also, it odes not return the variable annoations and force to add 2 values
old_features = adata.var_names.tolist()
new_matrix = []
new_features_name = []
means = []
medians = []
feat_nb = 0
length1 = len(adata.X[0,:])
length2 = len(adata.X[:,0])
adata.obs['coverage_cells'] = [length1 - np.isnan(line).sum() for line in adata.X]
adata.obs['mean_cell_methylation'] = [np.nansum(line)/length1 for line in adata.X]
adata.var['coverage_feature'] = [length2 - np.isnan(line).sum() for line in adata.X.T]
adata.var['mean_feature_methylation'] = [np.nansum(line)/length2 for line in adata.X.T]
adata2 = adata[:, adata.var['coverage_feature']>=number_cell_covered].copy()
for index in range(len(adata2.var_names.tolist())):
adata2.X[:,index] = np.nan_to_num(adata2.X[:,index], nan=adata2.var['mean_feature_methylation'][index])
if save!= None:
adata2.write(save.rstrip('.h5ad')+'.h5ad')
if copy==False:
adata = adata2.copy()
else:
return(adata2)
def readandimputematrix(file_name, min_coverage=1):
"""
Temporary function to load and impute methyaltion count matrix into an AnnData object
Parameters
----------
file_name : file name to read and load
min_coverage : minimum number of cells covered for which we keep and impute a variable
Returns
-------
adata : :class:`~anndata.AnnData`
Annotated data matrix.
"""
with open(file_name) as f:
file = f.readlines()
# separate annotation from data
head_var = file[0]
head_var = head_var.split('\t')
# Then, extract the sample names
sample_names = []
data_raw = []
for l in file[1:]:
l = l.split('\t')
sample_names.append(l[0])
data_raw.append(l[1:])
# clear memory of useless variables
del file
##########################################
# now, removing empty columns
empties = []
partial = []
full = []
for index in range(1, len(data_raw[0])):
column = [element[index] for element in data_raw]
if len(list(set(column))) == 1:
empties.append(index)
elif len(list(set(column))) <= min_coverage:
partial.append(index)
else:
full.append(index)
##########################################
intermed_matrix = []
name_windows_covered = []
# let's remove the compltetly uninformative columns
for index in range(1, len(head_var[1:])):
if index in full:
intermed_matrix.append([element[index] for element in data_raw])
name_windows_covered.append(head_var[index])
########################################
# imputing values.
imputed_matrix = []
for row in intermed_matrix:
imputed_row = []
if "nan" in row:
mean = np.mean([float(e) for e in row if e != "nan"])
for element in row:
if element == "nan":
imputed_row.append(str(mean))
else:
imputed_row.append(element)
imputed_matrix.append(imputed_row)
else:
imputed_matrix.append(row)
imputed_matrix = np.matrix(imputed_matrix).transpose()
return(ad.AnnData(imputed_matrix, obs=pd.DataFrame(index=sample_names), var= | pd.DataFrame(index=name_windows_covered) | pandas.DataFrame |
from __future__ import division
import configparser
import logging
import os
import re
import time
from collections import OrderedDict
import numpy as np
import pandas as pd
import scipy.interpolate as itp
from joblib import Parallel
from joblib import delayed
from matplotlib import pyplot as plt
from pyplanscoring.core.dicomparser import ScoringDicomParser
from pyplanscoring.core.dosimetric import read_scoring_criteria, constrains, Competition2016
from pyplanscoring.core.dvhcalculation import Structure, prepare_dvh_data, calc_dvhs_upsampled, save_dicom_dvhs, load
from pyplanscoring.core.dvhdoses import get_dvh_max
from pyplanscoring.core.geometry import get_axis_grid, get_interpolated_structure_planes
from pyplanscoring.core.scoring import DVHMetrics, Scoring, Participant
# TODO extract constrains from analytical curves
class CurveCompare(object):
"""
Statistical analysis of the DVH volume (%) error histograms. volume (cm 3 ) differences (numerical–analytical)
were calculated for points on the DVH curve sampled at every 10 cGy then normalized to
the structure's total volume (cm 3 ) to give the error in volume (%)
"""
def __init__(self, a_dose, a_dvh, calc_dose, calc_dvh, structure_name='', dose_grid='', gradient=''):
self.calc_data = ''
self.ref_data = ''
self.a_dose = a_dose
self.a_dvh = a_dvh
self.cal_dose = calc_dose
self.calc_dvh = calc_dvh
self.sampling_size = 10/100.0
self.dose_samples = np.arange(0, len(calc_dvh)/100, self.sampling_size) # The DVH curve sampled at every 10 cGy
self.ref_dvh = itp.interp1d(a_dose, a_dvh, fill_value='extrapolate')
self.calc_dvh = itp.interp1d(calc_dose, calc_dvh, fill_value='extrapolate')
self.delta_dvh = self.calc_dvh(self.dose_samples) - self.ref_dvh(self.dose_samples)
self.delta_dvh_pp = (self.delta_dvh / a_dvh[0]) * 100
# prepare data dict
# self.calc_dvh_dict = _prepare_dvh_data(self.dose_samples, self.calc_dvh(self.dose_samples))
# self.ref_dvh_dict = _prepare_dvh_data(self.dose_samples, self.ref_dvh(self.dose_samples))
# title data
self.structure_name = structure_name
self.dose_grid = dose_grid
self.gradient = gradient
def stats(self):
df = | pd.DataFrame(self.delta_dvh_pp, columns=['delta_pp']) | pandas.DataFrame |
import gc
import warnings
import numpy as np
import pandas as pd
warnings.simplefilter(action='ignore', category=FutureWarning)
# One-hot encoding for categorical columns with get_dummies
def one_hot_encoder(df, nan_as_category=True):
original_columns = list(df.columns)
categorical_columns = [col for col in df.columns if df[col].dtype == 'object']
df = pd.get_dummies(df, columns=categorical_columns, dummy_na=nan_as_category)
new_columns = [c for c in df.columns if c not in original_columns]
return df, new_columns
# Preprocess application_train.csv and application_test.csv
def application_train_test(num_rows=None, nan_as_category=False):
# Read data and merge
df = pd.read_csv('data/application_train.csv', nrows=num_rows)
test_df = pd.read_csv('data/application_test.csv', nrows=num_rows)
print("Train samples: {}, test samples: {}".format(len(df), len(test_df)))
df = df.append(test_df).reset_index()
# Optional: Remove 4 applications with XNA CODE_GENDER (train set)
df = df[df['CODE_GENDER'] != 'XNA']
docs = [_f for _f in df.columns if 'FLAG_DOC' in _f]
live = [_f for _f in df.columns if ('FLAG_' in _f) & ('FLAG_DOC' not in _f) & ('_FLAG_' not in _f)]
# NaN values for DAYS_EMPLOYED: 365.243 -> nan
df['DAYS_EMPLOYED'].replace(365243, np.nan, inplace=True)
inc_by_org = df[['AMT_INCOME_TOTAL', 'ORGANIZATION_TYPE']].groupby('ORGANIZATION_TYPE').median()['AMT_INCOME_TOTAL']
df['NEW_CREDIT_TO_ANNUITY_RATIO'] = df['AMT_CREDIT'] / df['AMT_ANNUITY']
df['NEW_CREDIT_TO_GOODS_RATIO'] = df['AMT_CREDIT'] / df['AMT_GOODS_PRICE']
df['NEW_DOC_IND_AVG'] = df[docs].mean(axis=1)
df['NEW_DOC_IND_STD'] = df[docs].std(axis=1)
df['NEW_DOC_IND_KURT'] = df[docs].kurtosis(axis=1)
df['NEW_LIVE_IND_SUM'] = df[live].sum(axis=1)
df['NEW_LIVE_IND_STD'] = df[live].std(axis=1)
df['NEW_LIVE_IND_KURT'] = df[live].kurtosis(axis=1)
df['NEW_INC_PER_CHLD'] = df['AMT_INCOME_TOTAL'] / (1 + df['CNT_CHILDREN'])
df['NEW_INC_BY_ORG'] = df['ORGANIZATION_TYPE'].map(inc_by_org)
df['NEW_EMPLOY_TO_BIRTH_RATIO'] = df['DAYS_EMPLOYED'] / df['DAYS_BIRTH']
df['NEW_ANNUITY_TO_INCOME_RATIO'] = df['AMT_ANNUITY'] / (1 + df['AMT_INCOME_TOTAL'])
df['NEW_SOURCES_PROD'] = df['EXT_SOURCE_1'] * df['EXT_SOURCE_2'] * df['EXT_SOURCE_3']
df['NEW_EXT_SOURCES_MEAN'] = df[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].mean(axis=1)
df['NEW_SCORES_STD'] = df[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].std(axis=1)
df['NEW_SCORES_STD'] = df['NEW_SCORES_STD'].fillna(df['NEW_SCORES_STD'].mean())
df['NEW_CAR_TO_BIRTH_RATIO'] = df['OWN_CAR_AGE'] / df['DAYS_BIRTH']
df['NEW_CAR_TO_EMPLOY_RATIO'] = df['OWN_CAR_AGE'] / df['DAYS_EMPLOYED']
df['NEW_PHONE_TO_BIRTH_RATIO'] = df['DAYS_LAST_PHONE_CHANGE'] / df['DAYS_BIRTH']
df['NEW_PHONE_TO_EMPLOY_RATIO'] = df['DAYS_LAST_PHONE_CHANGE'] / df['DAYS_EMPLOYED']
df['NEW_CREDIT_TO_INCOME_RATIO'] = df['AMT_CREDIT'] / df['AMT_INCOME_TOTAL']
# Categorical features with Binary encode (0 or 1; two categories)
for bin_feature in ['CODE_GENDER', 'FLAG_OWN_CAR', 'FLAG_OWN_REALTY']:
df[bin_feature], uniques = | pd.factorize(df[bin_feature]) | pandas.factorize |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# *****************************************************************************/
# * Authors: <NAME>
# *****************************************************************************/
"""transformCSV.py
This module contains the basic functions for creating the content of a configuration file from CSV.
Args:
--inFile: Path for the configuration file where the time series data values CSV
--outFile: Path for the configuration file where the time series data values INI
--debug: Boolean flag to activate verbose printing for debug use
Example:
Default usage:
$ python transformCSV.py
Specific usage:
$ python transformCSV.py
--inFile C:\raad\src\software\time-series.csv
--outFile C:\raad\src\software\time-series.ini
--debug True
"""
import sys
import datetime
import optparse
import traceback
import pandas
import numpy
import os
import pprint
import csv
if sys.version_info.major > 2:
import configparser as cF
else:
import ConfigParser as cF
class TransformMetaData(object):
debug = False
fileName = None
fileLocation = None
columnsList = None
analysisFrameFormat = None
uniqueLists = None
analysisFrame = None
def __init__(self, inputFileName=None, debug=False, transform=False, sectionName=None, outFolder=None,
outFile='time-series-madness.ini'):
if isinstance(debug, bool):
self.debug = debug
if inputFileName is None:
return
elif os.path.exists(os.path.abspath(inputFileName)):
self.fileName = inputFileName
self.fileLocation = os.path.exists(os.path.abspath(inputFileName))
(analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList) = self.CSVtoFrame(
inputFileName=self.fileName)
self.analysisFrame = analysisFrame
self.columnsList = columnNamesList
self.analysisFrameFormat = analysisFrameFormat
self.uniqueLists = uniqueLists
if transform:
passWrite = self.frameToINI(analysisFrame=analysisFrame, sectionName=sectionName, outFolder=outFolder,
outFile=outFile)
print(f"Pass Status is : {passWrite}")
return
def getColumnList(self):
return self.columnsList
def getAnalysisFrameFormat(self):
return self.analysisFrameFormat
def getuniqueLists(self):
return self.uniqueLists
def getAnalysisFrame(self):
return self.analysisFrame
@staticmethod
def getDateParser(formatString="%Y-%m-%d %H:%M:%S.%f"):
return (lambda x: pandas.datetime.strptime(x, formatString)) # 2020-06-09 19:14:00.000
def getHeaderFromFile(self, headerFilePath=None, method=1):
if headerFilePath is None:
return (None, None)
if method == 1:
fieldnames = pandas.read_csv(headerFilePath, index_col=0, nrows=0).columns.tolist()
elif method == 2:
with open(headerFilePath, 'r') as infile:
reader = csv.DictReader(infile)
fieldnames = list(reader.fieldnames)
elif method == 3:
fieldnames = list(pandas.read_csv(headerFilePath, nrows=1).columns)
else:
fieldnames = None
fieldDict = {}
for indexName, valueName in enumerate(fieldnames):
fieldDict[valueName] = pandas.StringDtype()
return (fieldnames, fieldDict)
def CSVtoFrame(self, inputFileName=None):
if inputFileName is None:
return (None, None)
# Load File
print("Processing File: {0}...\n".format(inputFileName))
self.fileLocation = inputFileName
# Create data frame
analysisFrame = pandas.DataFrame()
analysisFrameFormat = self._getDataFormat()
inputDataFrame = pandas.read_csv(filepath_or_buffer=inputFileName,
sep='\t',
names=self._getDataFormat(),
# dtype=self._getDataFormat()
# header=None
# float_precision='round_trip'
# engine='c',
# parse_dates=['date_column'],
# date_parser=True,
# na_values=['NULL']
)
if self.debug: # Preview data.
print(inputDataFrame.head(5))
# analysisFrame.astype(dtype=analysisFrameFormat)
# Cleanup data
analysisFrame = inputDataFrame.copy(deep=True)
analysisFrame.apply(pandas.to_numeric, errors='coerce') # Fill in bad data with Not-a-Number (NaN)
# Create lists of unique strings
uniqueLists = []
columnNamesList = []
for columnName in analysisFrame.columns:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', analysisFrame[columnName].values)
if isinstance(analysisFrame[columnName].dtypes, str):
columnUniqueList = analysisFrame[columnName].unique().tolist()
else:
columnUniqueList = None
columnNamesList.append(columnName)
uniqueLists.append([columnName, columnUniqueList])
if self.debug: # Preview data.
print(analysisFrame.head(5))
return (analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList)
def frameToINI(self, analysisFrame=None, sectionName='Unknown', outFolder=None, outFile='nil.ini'):
if analysisFrame is None:
return False
try:
if outFolder is None:
outFolder = os.getcwd()
configFilePath = os.path.join(outFolder, outFile)
configINI = cF.ConfigParser()
configINI.add_section(sectionName)
for (columnName, columnData) in analysisFrame:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', columnData.values)
print("Column Contents Length:", len(columnData.values))
print("Column Contents Type", type(columnData.values))
writeList = "["
for colIndex, colValue in enumerate(columnData):
writeList = f"{writeList}'{colValue}'"
if colIndex < len(columnData) - 1:
writeList = f"{writeList}, "
writeList = f"{writeList}]"
configINI.set(sectionName, columnName, writeList)
if not os.path.exists(configFilePath) or os.stat(configFilePath).st_size == 0:
with open(configFilePath, 'w') as configWritingFile:
configINI.write(configWritingFile)
noErrors = True
except ValueError as e:
errorString = ("ERROR in {__file__} @{framePrintNo} with {ErrorFound}".format(__file__=str(__file__),
framePrintNo=str(
sys._getframe().f_lineno),
ErrorFound=e))
print(errorString)
noErrors = False
return noErrors
@staticmethod
def _validNumericalFloat(inValue):
"""
Determines if the value is a valid numerical object.
Args:
inValue: floating-point value
Returns: Value in floating-point or Not-A-Number.
"""
try:
return numpy.float128(inValue)
except ValueError:
return numpy.nan
@staticmethod
def _calculateMean(x):
"""
Calculates the mean in a multiplication method since division produces an infinity or NaN
Args:
x: Input data set. We use a data frame.
Returns: Calculated mean for a vector data frame.
"""
try:
mean = numpy.float128(numpy.average(x, weights=numpy.ones_like(numpy.float128(x)) / numpy.float128(x.size)))
except ValueError:
mean = 0
pass
return mean
def _calculateStd(self, data):
"""
Calculates the standard deviation in a multiplication method since division produces a infinity or NaN
Args:
data: Input data set. We use a data frame.
Returns: Calculated standard deviation for a vector data frame.
"""
sd = 0
try:
n = numpy.float128(data.size)
if n <= 1:
return numpy.float128(0.0)
# Use multiplication version of mean since numpy bug causes infinity.
mean = self._calculateMean(data)
sd = numpy.float128(mean)
# Calculate standard deviation
for el in data:
diff = numpy.float128(el) - numpy.float128(mean)
sd += (diff) ** 2
points = numpy.float128(n - 1)
sd = numpy.float128(numpy.sqrt(numpy.float128(sd) / numpy.float128(points)))
except ValueError:
pass
return sd
def _determineQuickStats(self, dataAnalysisFrame, columnName=None, multiplierSigma=3.0):
"""
Determines stats based on a vector to get the data shape.
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
multiplierSigma: Sigma range for the stats.
Returns: Set of stats.
"""
meanValue = 0
sigmaValue = 0
sigmaRangeValue = 0
topValue = 0
try:
# Clean out anomoly due to random invalid inputs.
if (columnName is not None):
meanValue = self._calculateMean(dataAnalysisFrame[columnName])
if meanValue == numpy.nan:
meanValue = numpy.float128(1)
sigmaValue = self._calculateStd(dataAnalysisFrame[columnName])
if float(sigmaValue) is float(numpy.nan):
sigmaValue = numpy.float128(1)
multiplier = numpy.float128(multiplierSigma) # Stats: 1 sigma = 68%, 2 sigma = 95%, 3 sigma = 99.7
sigmaRangeValue = (sigmaValue * multiplier)
if float(sigmaRangeValue) is float(numpy.nan):
sigmaRangeValue = numpy.float128(1)
topValue = numpy.float128(meanValue + sigmaRangeValue)
print("Name:{} Mean= {}, Sigma= {}, {}*Sigma= {}".format(columnName,
meanValue,
sigmaValue,
multiplier,
sigmaRangeValue))
except ValueError:
pass
return (meanValue, sigmaValue, sigmaRangeValue, topValue)
def _cleanZerosForColumnInFrame(self, dataAnalysisFrame, columnName='cycles'):
"""
Cleans the data frame with data values that are invalid. I.E. inf, NaN
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
Returns: Cleaned dataframe.
"""
dataAnalysisCleaned = None
try:
# Clean out anomoly due to random invalid inputs.
(meanValue, sigmaValue, sigmaRangeValue, topValue) = self._determineQuickStats(
dataAnalysisFrame=dataAnalysisFrame, columnName=columnName)
# dataAnalysisCleaned = dataAnalysisFrame[dataAnalysisFrame[columnName] != 0]
# When the cycles are negative or zero we missed cleaning up a row.
# logicVector = (dataAnalysisFrame[columnName] != 0)
# dataAnalysisCleaned = dataAnalysisFrame[logicVector]
logicVector = (dataAnalysisCleaned[columnName] >= 1)
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
# These timed out mean + 2 * sd
logicVector = (dataAnalysisCleaned[columnName] < topValue) # Data range
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
except ValueError:
pass
return dataAnalysisCleaned
def _cleanFrame(self, dataAnalysisTemp, cleanColumn=False, columnName='cycles'):
"""
Args:
dataAnalysisTemp: Dataframe to do analysis on.
cleanColumn: Flag to clean the data frame.
columnName: Column name of the data frame.
Returns: cleaned dataframe
"""
try:
replacementList = [pandas.NaT, numpy.Infinity, numpy.NINF, 'NaN', 'inf', '-inf', 'NULL']
if cleanColumn is True:
dataAnalysisTemp = self._cleanZerosForColumnInFrame(dataAnalysisTemp, columnName=columnName)
dataAnalysisTemp = dataAnalysisTemp.replace(to_replace=replacementList,
value=numpy.nan)
dataAnalysisTemp = dataAnalysisTemp.dropna()
except ValueError:
pass
return dataAnalysisTemp
@staticmethod
def _getDataFormat():
"""
Return the dataframe setup for the CSV file generated from server.
Returns: dictionary data format for pandas.
"""
dataFormat = {
"Serial_Number": pandas.StringDtype(),
"LogTime0": pandas.StringDtype(), # @todo force rename
"Id0": pandas.StringDtype(), # @todo force rename
"DriveId": pandas.StringDtype(),
"JobRunId": pandas.StringDtype(),
"LogTime1": pandas.StringDtype(), # @todo force rename
"Comment0": pandas.StringDtype(), # @todo force rename
"CriticalWarning": pandas.StringDtype(),
"Temperature": pandas.StringDtype(),
"AvailableSpare": pandas.StringDtype(),
"AvailableSpareThreshold": pandas.StringDtype(),
"PercentageUsed": pandas.StringDtype(),
"DataUnitsReadL": pandas.StringDtype(),
"DataUnitsReadU": pandas.StringDtype(),
"DataUnitsWrittenL": pandas.StringDtype(),
"DataUnitsWrittenU": pandas.StringDtype(),
"HostReadCommandsL": pandas.StringDtype(),
"HostReadCommandsU": pandas.StringDtype(),
"HostWriteCommandsL": pandas.StringDtype(),
"HostWriteCommandsU": pandas.StringDtype(),
"ControllerBusyTimeL": pandas.StringDtype(),
"ControllerBusyTimeU": pandas.StringDtype(),
"PowerCyclesL": pandas.StringDtype(),
"PowerCyclesU": pandas.StringDtype(),
"PowerOnHoursL": pandas.StringDtype(),
"PowerOnHoursU": pandas.StringDtype(),
"UnsafeShutdownsL": pandas.StringDtype(),
"UnsafeShutdownsU": pandas.StringDtype(),
"MediaErrorsL": pandas.StringDtype(),
"MediaErrorsU": pandas.StringDtype(),
"NumErrorInfoLogsL": pandas.StringDtype(),
"NumErrorInfoLogsU": pandas.StringDtype(),
"ProgramFailCountN": pandas.StringDtype(),
"ProgramFailCountR": pandas.StringDtype(),
"EraseFailCountN": pandas.StringDtype(),
"EraseFailCountR": pandas.StringDtype(),
"WearLevelingCountN": pandas.StringDtype(),
"WearLevelingCountR": pandas.StringDtype(),
"E2EErrorDetectCountN": pandas.StringDtype(),
"E2EErrorDetectCountR": pandas.StringDtype(),
"CRCErrorCountN": pandas.StringDtype(),
"CRCErrorCountR": pandas.StringDtype(),
"MediaWearPercentageN": pandas.StringDtype(),
"MediaWearPercentageR": pandas.StringDtype(),
"HostReadsN": pandas.StringDtype(),
"HostReadsR": pandas.StringDtype(),
"TimedWorkloadN": pandas.StringDtype(),
"TimedWorkloadR": pandas.StringDtype(),
"ThermalThrottleStatusN": pandas.StringDtype(),
"ThermalThrottleStatusR": pandas.StringDtype(),
"RetryBuffOverflowCountN": pandas.StringDtype(),
"RetryBuffOverflowCountR": pandas.StringDtype(),
"PLLLockLossCounterN": pandas.StringDtype(),
"PLLLockLossCounterR": pandas.StringDtype(),
"NandBytesWrittenN": pandas.StringDtype(),
"NandBytesWrittenR": pandas.StringDtype(),
"HostBytesWrittenN": pandas.StringDtype(),
"HostBytesWrittenR": pandas.StringDtype(),
"SystemAreaLifeRemainingN": pandas.StringDtype(),
"SystemAreaLifeRemainingR": pandas.StringDtype(),
"RelocatableSectorCountN": pandas.StringDtype(),
"RelocatableSectorCountR": pandas.StringDtype(),
"SoftECCErrorRateN": pandas.StringDtype(),
"SoftECCErrorRateR": pandas.StringDtype(),
"UnexpectedPowerLossN": pandas.StringDtype(),
"UnexpectedPowerLossR": pandas.StringDtype(),
"MediaErrorCountN": pandas.StringDtype(),
"MediaErrorCountR": pandas.StringDtype(),
"NandBytesReadN": pandas.StringDtype(),
"NandBytesReadR": pandas.StringDtype(),
"WarningCompTempTime": pandas.StringDtype(),
"CriticalCompTempTime": pandas.StringDtype(),
"TempSensor1": pandas.StringDtype(),
"TempSensor2": pandas.StringDtype(),
"TempSensor3": pandas.StringDtype(),
"TempSensor4": pandas.StringDtype(),
"TempSensor5": pandas.StringDtype(),
"TempSensor6": pandas.StringDtype(),
"TempSensor7": pandas.StringDtype(),
"TempSensor8": pandas.StringDtype(),
"ThermalManagementTemp1TransitionCount": pandas.StringDtype(),
"ThermalManagementTemp2TransitionCount": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp1": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp2": pandas.StringDtype(),
"Core_Num": pandas.StringDtype(),
"Id1": pandas.StringDtype(), # @todo force rename
"Job_Run_Id": pandas.StringDtype(),
"Stats_Time": pandas.StringDtype(),
"HostReads": pandas.StringDtype(),
"HostWrites": pandas.StringDtype(),
"NandReads": pandas.StringDtype(),
"NandWrites": pandas.StringDtype(),
"ProgramErrors": pandas.StringDtype(),
"EraseErrors": pandas.StringDtype(),
"ErrorCount": pandas.StringDtype(),
"BitErrorsHost1": pandas.StringDtype(),
"BitErrorsHost2": pandas.StringDtype(),
"BitErrorsHost3": pandas.StringDtype(),
"BitErrorsHost4": pandas.StringDtype(),
"BitErrorsHost5": pandas.StringDtype(),
"BitErrorsHost6": pandas.StringDtype(),
"BitErrorsHost7": pandas.StringDtype(),
"BitErrorsHost8": pandas.StringDtype(),
"BitErrorsHost9": pandas.StringDtype(),
"BitErrorsHost10": pandas.StringDtype(),
"BitErrorsHost11": pandas.StringDtype(),
"BitErrorsHost12": pandas.StringDtype(),
"BitErrorsHost13": pandas.StringDtype(),
"BitErrorsHost14": pandas.StringDtype(),
"BitErrorsHost15": pandas.StringDtype(),
"ECCFail": pandas.StringDtype(),
"GrownDefects": pandas.StringDtype(),
"FreeMemory": pandas.StringDtype(),
"WriteAllowance": pandas.StringDtype(),
"ModelString": pandas.StringDtype(),
"ValidBlocks": pandas.StringDtype(),
"TokenBlocks": pandas.StringDtype(),
"SpuriousPFCount": pandas.StringDtype(),
"SpuriousPFLocations1": pandas.StringDtype(),
"SpuriousPFLocations2": pandas.StringDtype(),
"SpuriousPFLocations3": pandas.StringDtype(),
"SpuriousPFLocations4": pandas.StringDtype(),
"SpuriousPFLocations5": pandas.StringDtype(),
"SpuriousPFLocations6": pandas.StringDtype(),
"SpuriousPFLocations7": pandas.StringDtype(),
"SpuriousPFLocations8": pandas.StringDtype(),
"BitErrorsNonHost1": pandas.StringDtype(),
"BitErrorsNonHost2": pandas.StringDtype(),
"BitErrorsNonHost3": pandas.StringDtype(),
"BitErrorsNonHost4": pandas.StringDtype(),
"BitErrorsNonHost5": pandas.StringDtype(),
"BitErrorsNonHost6": pandas.StringDtype(),
"BitErrorsNonHost7": pandas.StringDtype(),
"BitErrorsNonHost8": pandas.StringDtype(),
"BitErrorsNonHost9": pandas.StringDtype(),
"BitErrorsNonHost10": pandas.StringDtype(),
"BitErrorsNonHost11": pandas.StringDtype(),
"BitErrorsNonHost12": pandas.StringDtype(),
"BitErrorsNonHost13": pandas.StringDtype(),
"BitErrorsNonHost14": pandas.StringDtype(),
"BitErrorsNonHost15": pandas.StringDtype(),
"ECCFailNonHost": pandas.StringDtype(),
"NSversion": pandas.StringDtype(),
"numBands": pandas.StringDtype(),
"minErase": pandas.StringDtype(),
"maxErase": pandas.StringDtype(),
"avgErase": pandas.StringDtype(),
"minMVolt": pandas.StringDtype(),
"maxMVolt": pandas.StringDtype(),
"avgMVolt": pandas.StringDtype(),
"minMAmp": pandas.StringDtype(),
"maxMAmp": pandas.StringDtype(),
"avgMAmp": pandas.StringDtype(),
"comment1": pandas.StringDtype(), # @todo force rename
"minMVolt12v": pandas.StringDtype(),
"maxMVolt12v": pandas.StringDtype(),
"avgMVolt12v": pandas.StringDtype(),
"minMAmp12v": pandas.StringDtype(),
"maxMAmp12v": pandas.StringDtype(),
"avgMAmp12v": pandas.StringDtype(),
"nearMissSector": pandas.StringDtype(),
"nearMissDefect": pandas.StringDtype(),
"nearMissOverflow": pandas.StringDtype(),
"replayUNC": pandas.StringDtype(),
"Drive_Id": pandas.StringDtype(),
"indirectionMisses": pandas.StringDtype(),
"BitErrorsHost16": pandas.StringDtype(),
"BitErrorsHost17": pandas.StringDtype(),
"BitErrorsHost18": pandas.StringDtype(),
"BitErrorsHost19": pandas.StringDtype(),
"BitErrorsHost20": pandas.StringDtype(),
"BitErrorsHost21": pandas.StringDtype(),
"BitErrorsHost22": pandas.StringDtype(),
"BitErrorsHost23": pandas.StringDtype(),
"BitErrorsHost24": pandas.StringDtype(),
"BitErrorsHost25": pandas.StringDtype(),
"BitErrorsHost26": pandas.StringDtype(),
"BitErrorsHost27": pandas.StringDtype(),
"BitErrorsHost28": pandas.StringDtype(),
"BitErrorsHost29": pandas.StringDtype(),
"BitErrorsHost30": pandas.StringDtype(),
"BitErrorsHost31": pandas.StringDtype(),
"BitErrorsHost32": pandas.StringDtype(),
"BitErrorsHost33": pandas.StringDtype(),
"BitErrorsHost34": pandas.StringDtype(),
"BitErrorsHost35": pandas.StringDtype(),
"BitErrorsHost36": pandas.StringDtype(),
"BitErrorsHost37": pandas.StringDtype(),
"BitErrorsHost38": pandas.StringDtype(),
"BitErrorsHost39": pandas.StringDtype(),
"BitErrorsHost40": pandas.StringDtype(),
"XORRebuildSuccess": pandas.StringDtype(),
"XORRebuildFail": pandas.StringDtype(),
"BandReloForError": pandas.StringDtype(),
"mrrSuccess": pandas.StringDtype(),
"mrrFail": pandas.StringDtype(),
"mrrNudgeSuccess": pandas.StringDtype(),
"mrrNudgeHarmless": pandas.StringDtype(),
"mrrNudgeFail": pandas.StringDtype(),
"totalErases": pandas.StringDtype(),
"dieOfflineCount": pandas.StringDtype(),
"curtemp": pandas.StringDtype(),
"mintemp": pandas.StringDtype(),
"maxtemp": pandas.StringDtype(),
"oventemp": pandas.StringDtype(),
"allZeroSectors": pandas.StringDtype(),
"ctxRecoveryEvents": pandas.StringDtype(),
"ctxRecoveryErases": pandas.StringDtype(),
"NSversionMinor": pandas.StringDtype(),
"lifeMinTemp": pandas.StringDtype(),
"lifeMaxTemp": pandas.StringDtype(),
"powerCycles": pandas.StringDtype(),
"systemReads": pandas.StringDtype(),
"systemWrites": pandas.StringDtype(),
"readRetryOverflow": pandas.StringDtype(),
"unplannedPowerCycles": pandas.StringDtype(),
"unsafeShutdowns": pandas.StringDtype(),
"defragForcedReloCount": pandas.StringDtype(),
"bandReloForBDR": pandas.StringDtype(),
"bandReloForDieOffline": pandas.StringDtype(),
"bandReloForPFail": pandas.StringDtype(),
"bandReloForWL": pandas.StringDtype(),
"provisionalDefects": pandas.StringDtype(),
"uncorrectableProgErrors": pandas.StringDtype(),
"powerOnSeconds": pandas.StringDtype(),
"bandReloForChannelTimeout": pandas.StringDtype(),
"fwDowngradeCount": pandas.StringDtype(),
"dramCorrectablesTotal": pandas.StringDtype(),
"hb_id": pandas.StringDtype(),
"dramCorrectables1to1": pandas.StringDtype(),
"dramCorrectables4to1": pandas.StringDtype(),
"dramCorrectablesSram": pandas.StringDtype(),
"dramCorrectablesUnknown": pandas.StringDtype(),
"pliCapTestInterval": pandas.StringDtype(),
"pliCapTestCount": pandas.StringDtype(),
"pliCapTestResult": pandas.StringDtype(),
"pliCapTestTimeStamp": pandas.StringDtype(),
"channelHangSuccess": pandas.StringDtype(),
"channelHangFail": pandas.StringDtype(),
"BitErrorsHost41": pandas.StringDtype(),
"BitErrorsHost42": pandas.StringDtype(),
"BitErrorsHost43": pandas.StringDtype(),
"BitErrorsHost44": pandas.StringDtype(),
"BitErrorsHost45": pandas.StringDtype(),
"BitErrorsHost46": pandas.StringDtype(),
"BitErrorsHost47": pandas.StringDtype(),
"BitErrorsHost48": pandas.StringDtype(),
"BitErrorsHost49": pandas.StringDtype(),
"BitErrorsHost50": pandas.StringDtype(),
"BitErrorsHost51": pandas.StringDtype(),
"BitErrorsHost52": pandas.StringDtype(),
"BitErrorsHost53": pandas.StringDtype(),
"BitErrorsHost54": pandas.StringDtype(),
"BitErrorsHost55": pandas.StringDtype(),
"BitErrorsHost56": pandas.StringDtype(),
"mrrNearMiss": pandas.StringDtype(),
"mrrRereadAvg": pandas.StringDtype(),
"readDisturbEvictions": pandas.StringDtype(),
"L1L2ParityError": pandas.StringDtype(),
"pageDefects": pandas.StringDtype(),
"pageProvisionalTotal": pandas.StringDtype(),
"ASICTemp": pandas.StringDtype(),
"PMICTemp": pandas.StringDtype(),
"size": pandas.StringDtype(),
"lastWrite": pandas.StringDtype(),
"timesWritten": pandas.StringDtype(),
"maxNumContextBands": pandas.StringDtype(),
"blankCount": pandas.StringDtype(),
"cleanBands": pandas.StringDtype(),
"avgTprog": pandas.StringDtype(),
"avgEraseCount": pandas.StringDtype(),
"edtcHandledBandCnt": pandas.StringDtype(),
"bandReloForNLBA": pandas.StringDtype(),
"bandCrossingDuringPliCount": pandas.StringDtype(),
"bitErrBucketNum": pandas.StringDtype(),
"sramCorrectablesTotal": pandas.StringDtype(),
"l1SramCorrErrCnt": pandas.StringDtype(),
"l2SramCorrErrCnt": pandas.StringDtype(),
"parityErrorValue": pandas.StringDtype(),
"parityErrorType": pandas.StringDtype(),
"mrr_LutValidDataSize": pandas.StringDtype(),
"pageProvisionalDefects": pandas.StringDtype(),
"plisWithErasesInProgress": pandas.StringDtype(),
"lastReplayDebug": pandas.StringDtype(),
"externalPreReadFatals": pandas.StringDtype(),
"hostReadCmd": pandas.StringDtype(),
"hostWriteCmd": pandas.StringDtype(),
"trimmedSectors": pandas.StringDtype(),
"trimTokens": pandas.StringDtype(),
"mrrEventsInCodewords": pandas.StringDtype(),
"mrrEventsInSectors": pandas.StringDtype(),
"powerOnMicroseconds": pandas.StringDtype(),
"mrrInXorRecEvents": pandas.StringDtype(),
"mrrFailInXorRecEvents": pandas.StringDtype(),
"mrrUpperpageEvents": pandas.StringDtype(),
"mrrLowerpageEvents": pandas.StringDtype(),
"mrrSlcpageEvents": pandas.StringDtype(),
"mrrReReadTotal": pandas.StringDtype(),
"powerOnResets": pandas.StringDtype(),
"powerOnMinutes": pandas.StringDtype(),
"throttleOnMilliseconds": pandas.StringDtype(),
"ctxTailMagic": pandas.StringDtype(),
"contextDropCount": pandas.StringDtype(),
"lastCtxSequenceId": pandas.StringDtype(),
"currCtxSequenceId": pandas.StringDtype(),
"mbliEraseCount": pandas.StringDtype(),
"pageAverageProgramCount": pandas.StringDtype(),
"bandAverageEraseCount": pandas.StringDtype(),
"bandTotalEraseCount": pandas.StringDtype(),
"bandReloForXorRebuildFail": pandas.StringDtype(),
"defragSpeculativeMiss": pandas.StringDtype(),
"uncorrectableBackgroundScan": pandas.StringDtype(),
"BitErrorsHost57": pandas.StringDtype(),
"BitErrorsHost58": pandas.StringDtype(),
"BitErrorsHost59": pandas.StringDtype(),
"BitErrorsHost60": pandas.StringDtype(),
"BitErrorsHost61": pandas.StringDtype(),
"BitErrorsHost62": pandas.StringDtype(),
"BitErrorsHost63": pandas.StringDtype(),
"BitErrorsHost64": pandas.StringDtype(),
"BitErrorsHost65": pandas.StringDtype(),
"BitErrorsHost66": pandas.StringDtype(),
"BitErrorsHost67": pandas.StringDtype(),
"BitErrorsHost68": pandas.StringDtype(),
"BitErrorsHost69": pandas.StringDtype(),
"BitErrorsHost70": pandas.StringDtype(),
"BitErrorsHost71": pandas.StringDtype(),
"BitErrorsHost72": pandas.StringDtype(),
"BitErrorsHost73": pandas.StringDtype(),
"BitErrorsHost74": pandas.StringDtype(),
"BitErrorsHost75": pandas.StringDtype(),
"BitErrorsHost76": pandas.StringDtype(),
"BitErrorsHost77": pandas.StringDtype(),
"BitErrorsHost78": pandas.StringDtype(),
"BitErrorsHost79": pandas.StringDtype(),
"BitErrorsHost80": pandas.StringDtype(),
"bitErrBucketArray1": pandas.StringDtype(),
"bitErrBucketArray2": pandas.StringDtype(),
"bitErrBucketArray3": pandas.StringDtype(),
"bitErrBucketArray4": pandas.StringDtype(),
"bitErrBucketArray5": pandas.StringDtype(),
"bitErrBucketArray6": pandas.StringDtype(),
"bitErrBucketArray7": pandas.StringDtype(),
"bitErrBucketArray8": pandas.StringDtype(),
"bitErrBucketArray9": pandas.StringDtype(),
"bitErrBucketArray10": pandas.StringDtype(),
"bitErrBucketArray11": pandas.StringDtype(),
"bitErrBucketArray12": pandas.StringDtype(),
"bitErrBucketArray13": pandas.StringDtype(),
"bitErrBucketArray14": pandas.StringDtype(),
"bitErrBucketArray15": pandas.StringDtype(),
"bitErrBucketArray16": pandas.StringDtype(),
"bitErrBucketArray17": pandas.StringDtype(),
"bitErrBucketArray18": pandas.StringDtype(),
"bitErrBucketArray19": pandas.StringDtype(),
"bitErrBucketArray20": pandas.StringDtype(),
"bitErrBucketArray21": pandas.StringDtype(),
"bitErrBucketArray22": pandas.StringDtype(),
"bitErrBucketArray23": pandas.StringDtype(),
"bitErrBucketArray24": pandas.StringDtype(),
"bitErrBucketArray25": pandas.StringDtype(),
"bitErrBucketArray26": pandas.StringDtype(),
"bitErrBucketArray27": pandas.StringDtype(),
"bitErrBucketArray28": pandas.StringDtype(),
"bitErrBucketArray29": pandas.StringDtype(),
"bitErrBucketArray30": pandas.StringDtype(),
"bitErrBucketArray31": pandas.StringDtype(),
"bitErrBucketArray32": pandas.StringDtype(),
"bitErrBucketArray33": pandas.StringDtype(),
"bitErrBucketArray34": pandas.StringDtype(),
"bitErrBucketArray35": pandas.StringDtype(),
"bitErrBucketArray36": pandas.StringDtype(),
"bitErrBucketArray37": pandas.StringDtype(),
"bitErrBucketArray38": pandas.StringDtype(),
"bitErrBucketArray39": pandas.StringDtype(),
"bitErrBucketArray40": pandas.StringDtype(),
"bitErrBucketArray41": pandas.StringDtype(),
"bitErrBucketArray42": pandas.StringDtype(),
"bitErrBucketArray43": pandas.StringDtype(),
"bitErrBucketArray44": pandas.StringDtype(),
"bitErrBucketArray45": pandas.StringDtype(),
"bitErrBucketArray46": pandas.StringDtype(),
"bitErrBucketArray47": pandas.StringDtype(),
"bitErrBucketArray48": pandas.StringDtype(),
"bitErrBucketArray49": pandas.StringDtype(),
"bitErrBucketArray50": pandas.StringDtype(),
"bitErrBucketArray51": pandas.StringDtype(),
"bitErrBucketArray52": pandas.StringDtype(),
"bitErrBucketArray53": pandas.StringDtype(),
"bitErrBucketArray54": pandas.StringDtype(),
"bitErrBucketArray55": pandas.StringDtype(),
"bitErrBucketArray56": pandas.StringDtype(),
"bitErrBucketArray57": pandas.StringDtype(),
"bitErrBucketArray58": pandas.StringDtype(),
"bitErrBucketArray59": pandas.StringDtype(),
"bitErrBucketArray60": pandas.StringDtype(),
"bitErrBucketArray61": pandas.StringDtype(),
"bitErrBucketArray62": pandas.StringDtype(),
"bitErrBucketArray63": pandas.StringDtype(),
"bitErrBucketArray64": pandas.StringDtype(),
"bitErrBucketArray65": pandas.StringDtype(),
"bitErrBucketArray66": pandas.StringDtype(),
"bitErrBucketArray67": pandas.StringDtype(),
"bitErrBucketArray68": pandas.StringDtype(),
"bitErrBucketArray69": pandas.StringDtype(),
"bitErrBucketArray70": pandas.StringDtype(),
"bitErrBucketArray71": pandas.StringDtype(),
"bitErrBucketArray72": pandas.StringDtype(),
"bitErrBucketArray73": pandas.StringDtype(),
"bitErrBucketArray74": pandas.StringDtype(),
"bitErrBucketArray75": pandas.StringDtype(),
"bitErrBucketArray76": pandas.StringDtype(),
"bitErrBucketArray77": pandas.StringDtype(),
"bitErrBucketArray78": pandas.StringDtype(),
"bitErrBucketArray79": pandas.StringDtype(),
"bitErrBucketArray80": pandas.StringDtype(),
"mrr_successDistribution1": pandas.StringDtype(),
"mrr_successDistribution2": pandas.StringDtype(),
"mrr_successDistribution3": pandas.StringDtype(),
"mrr_successDistribution4": pandas.StringDtype(),
"mrr_successDistribution5": pandas.StringDtype(),
"mrr_successDistribution6": pandas.StringDtype(),
"mrr_successDistribution7": pandas.StringDtype(),
"mrr_successDistribution8": pandas.StringDtype(),
"mrr_successDistribution9": pandas.StringDtype(),
"mrr_successDistribution10": pandas.StringDtype(),
"mrr_successDistribution11": pandas.StringDtype(),
"mrr_successDistribution12": pandas.StringDtype(),
"mrr_successDistribution13": pandas.StringDtype(),
"mrr_successDistribution14": pandas.StringDtype(),
"mrr_successDistribution15": pandas.StringDtype(),
"mrr_successDistribution16": pandas.StringDtype(),
"mrr_successDistribution17": pandas.StringDtype(),
"mrr_successDistribution18": pandas.StringDtype(),
"mrr_successDistribution19": pandas.StringDtype(),
"mrr_successDistribution20": pandas.StringDtype(),
"mrr_successDistribution21": pandas.StringDtype(),
"mrr_successDistribution22": pandas.StringDtype(),
"mrr_successDistribution23": pandas.StringDtype(),
"mrr_successDistribution24": pandas.StringDtype(),
"mrr_successDistribution25": pandas.StringDtype(),
"mrr_successDistribution26": pandas.StringDtype(),
"mrr_successDistribution27": pandas.StringDtype(),
"mrr_successDistribution28": pandas.StringDtype(),
"mrr_successDistribution29": pandas.StringDtype(),
"mrr_successDistribution30": pandas.StringDtype(),
"mrr_successDistribution31": pandas.StringDtype(),
"mrr_successDistribution32": pandas.StringDtype(),
"mrr_successDistribution33": pandas.StringDtype(),
"mrr_successDistribution34": pandas.StringDtype(),
"mrr_successDistribution35": pandas.StringDtype(),
"mrr_successDistribution36": pandas.StringDtype(),
"mrr_successDistribution37": pandas.StringDtype(),
"mrr_successDistribution38": pandas.StringDtype(),
"mrr_successDistribution39": pandas.StringDtype(),
"mrr_successDistribution40": pandas.StringDtype(),
"mrr_successDistribution41": pandas.StringDtype(),
"mrr_successDistribution42": pandas.StringDtype(),
"mrr_successDistribution43": pandas.StringDtype(),
"mrr_successDistribution44": pandas.StringDtype(),
"mrr_successDistribution45": pandas.StringDtype(),
"mrr_successDistribution46": pandas.StringDtype(),
"mrr_successDistribution47": pandas.StringDtype(),
"mrr_successDistribution48": pandas.StringDtype(),
"mrr_successDistribution49": pandas.StringDtype(),
"mrr_successDistribution50": pandas.StringDtype(),
"mrr_successDistribution51": pandas.StringDtype(),
"mrr_successDistribution52": pandas.StringDtype(),
"mrr_successDistribution53": pandas.StringDtype(),
"mrr_successDistribution54": pandas.StringDtype(),
"mrr_successDistribution55": pandas.StringDtype(),
"mrr_successDistribution56": pandas.StringDtype(),
"mrr_successDistribution57": pandas.StringDtype(),
"mrr_successDistribution58": pandas.StringDtype(),
"mrr_successDistribution59": pandas.StringDtype(),
"mrr_successDistribution60": pandas.StringDtype(),
"mrr_successDistribution61": pandas.StringDtype(),
"mrr_successDistribution62": pandas.StringDtype(),
"mrr_successDistribution63": pandas.StringDtype(),
"mrr_successDistribution64": pandas.StringDtype(),
"blDowngradeCount": pandas.StringDtype(),
"snapReads": pandas.StringDtype(),
"pliCapTestTime": pandas.StringDtype(),
"currentTimeToFreeSpaceRecovery": pandas.StringDtype(),
"worstTimeToFreeSpaceRecovery": pandas.StringDtype(),
"rspnandReads": pandas.StringDtype(),
"cachednandReads": pandas.StringDtype(),
"spnandReads": pandas.StringDtype(),
"dpnandReads": pandas.StringDtype(),
"qpnandReads": pandas.StringDtype(),
"verifynandReads": pandas.StringDtype(),
"softnandReads": pandas.StringDtype(),
"spnandWrites": pandas.StringDtype(),
"dpnandWrites": pandas.StringDtype(),
"qpnandWrites": pandas.StringDtype(),
"opnandWrites": pandas.StringDtype(),
"xpnandWrites": pandas.StringDtype(),
"unalignedHostWriteCmd": pandas.StringDtype(),
"randomReadCmd": pandas.StringDtype(),
"randomWriteCmd": pandas.StringDtype(),
"secVenCmdCount": pandas.StringDtype(),
"secVenCmdCountFails": pandas.StringDtype(),
"mrrFailOnSlcOtfPages": pandas.StringDtype(),
"mrrFailOnSlcOtfPageMarkedAsMBPD": pandas.StringDtype(),
"lcorParitySeedErrors": pandas.StringDtype(),
"fwDownloadFails": pandas.StringDtype(),
"fwAuthenticationFails": pandas.StringDtype(),
"fwSecurityRev": pandas.StringDtype(),
"isCapacitorHealthly": pandas.StringDtype(),
"fwWRCounter": pandas.StringDtype(),
"sysAreaEraseFailCount": pandas.StringDtype(),
"iusDefragRelocated4DataRetention": pandas.StringDtype(),
"I2CTemp": pandas.StringDtype(),
"lbaMismatchOnNandReads": pandas.StringDtype(),
"currentWriteStreamsCount": pandas.StringDtype(),
"nandWritesPerStream1": pandas.StringDtype(),
"nandWritesPerStream2": pandas.StringDtype(),
"nandWritesPerStream3": pandas.StringDtype(),
"nandWritesPerStream4": pandas.StringDtype(),
"nandWritesPerStream5": pandas.StringDtype(),
"nandWritesPerStream6": pandas.StringDtype(),
"nandWritesPerStream7": pandas.StringDtype(),
"nandWritesPerStream8": pandas.StringDtype(),
"nandWritesPerStream9": pandas.StringDtype(),
"nandWritesPerStream10": pandas.StringDtype(),
"nandWritesPerStream11": pandas.StringDtype(),
"nandWritesPerStream12": pandas.StringDtype(),
"nandWritesPerStream13": pandas.StringDtype(),
"nandWritesPerStream14": pandas.StringDtype(),
"nandWritesPerStream15": pandas.StringDtype(),
"nandWritesPerStream16": pandas.StringDtype(),
"nandWritesPerStream17": pandas.StringDtype(),
"nandWritesPerStream18": pandas.StringDtype(),
"nandWritesPerStream19": pandas.StringDtype(),
"nandWritesPerStream20": pandas.StringDtype(),
"nandWritesPerStream21": pandas.StringDtype(),
"nandWritesPerStream22": pandas.StringDtype(),
"nandWritesPerStream23": pandas.StringDtype(),
"nandWritesPerStream24": pandas.StringDtype(),
"nandWritesPerStream25": pandas.StringDtype(),
"nandWritesPerStream26": pandas.StringDtype(),
"nandWritesPerStream27": pandas.StringDtype(),
"nandWritesPerStream28": pandas.StringDtype(),
"nandWritesPerStream29": pandas.StringDtype(),
"nandWritesPerStream30": pandas.StringDtype(),
"nandWritesPerStream31": pandas.StringDtype(),
"nandWritesPerStream32": pandas.StringDtype(),
"hostSoftReadSuccess": pandas.StringDtype(),
"xorInvokedCount": pandas.StringDtype(),
"comresets": pandas.StringDtype(),
"syncEscapes": pandas.StringDtype(),
"rErrHost": pandas.StringDtype(),
"rErrDevice": pandas.StringDtype(),
"iCrcs": pandas.StringDtype(),
"linkSpeedDrops": pandas.StringDtype(),
"mrrXtrapageEvents": pandas.StringDtype(),
"mrrToppageEvents": pandas.StringDtype(),
"hostXorSuccessCount": pandas.StringDtype(),
"hostXorFailCount": pandas.StringDtype(),
"nandWritesWithPreReadPerStream1": pandas.StringDtype(),
"nandWritesWithPreReadPerStream2": pandas.StringDtype(),
"nandWritesWithPreReadPerStream3": pandas.StringDtype(),
"nandWritesWithPreReadPerStream4": pandas.StringDtype(),
"nandWritesWithPreReadPerStream5": pandas.StringDtype(),
"nandWritesWithPreReadPerStream6": pandas.StringDtype(),
"nandWritesWithPreReadPerStream7": pandas.StringDtype(),
"nandWritesWithPreReadPerStream8": pandas.StringDtype(),
"nandWritesWithPreReadPerStream9": pandas.StringDtype(),
"nandWritesWithPreReadPerStream10": pandas.StringDtype(),
"nandWritesWithPreReadPerStream11": pandas.StringDtype(),
"nandWritesWithPreReadPerStream12": pandas.StringDtype(),
"nandWritesWithPreReadPerStream13": pandas.StringDtype(),
"nandWritesWithPreReadPerStream14": pandas.StringDtype(),
"nandWritesWithPreReadPerStream15": pandas.StringDtype(),
"nandWritesWithPreReadPerStream16": pandas.StringDtype(),
"nandWritesWithPreReadPerStream17": pandas.StringDtype(),
"nandWritesWithPreReadPerStream18": pandas.StringDtype(),
"nandWritesWithPreReadPerStream19": pandas.StringDtype(),
"nandWritesWithPreReadPerStream20": pandas.StringDtype(),
"nandWritesWithPreReadPerStream21": pandas.StringDtype(),
"nandWritesWithPreReadPerStream22": pandas.StringDtype(),
"nandWritesWithPreReadPerStream23": pandas.StringDtype(),
"nandWritesWithPreReadPerStream24": pandas.StringDtype(),
"nandWritesWithPreReadPerStream25": pandas.StringDtype(),
"nandWritesWithPreReadPerStream26": pandas.StringDtype(),
"nandWritesWithPreReadPerStream27": pandas.StringDtype(),
"nandWritesWithPreReadPerStream28": pandas.StringDtype(),
"nandWritesWithPreReadPerStream29": pandas.StringDtype(),
"nandWritesWithPreReadPerStream30": pandas.StringDtype(),
"nandWritesWithPreReadPerStream31": pandas.StringDtype(),
"nandWritesWithPreReadPerStream32": pandas.StringDtype(),
"dramCorrectables8to1": pandas.StringDtype(),
"driveRecoveryCount": pandas.StringDtype(),
"mprLiteReads": pandas.StringDtype(),
"eccErrOnMprLiteReads": pandas.StringDtype(),
"readForwardingXpPreReadCount": pandas.StringDtype(),
"readForwardingUpPreReadCount": pandas.StringDtype(),
"readForwardingLpPreReadCount": pandas.StringDtype(),
"pweDefectCompensationCredit": pandas.StringDtype(),
"planarXorRebuildFailure": pandas.StringDtype(),
"itgXorRebuildFailure": pandas.StringDtype(),
"planarXorRebuildSuccess": pandas.StringDtype(),
"itgXorRebuildSuccess": pandas.StringDtype(),
"xorLoggingSkippedSIcBand": pandas.StringDtype(),
"xorLoggingSkippedDieOffline": pandas.StringDtype(),
"xorLoggingSkippedDieAbsent": pandas.StringDtype(),
"xorLoggingSkippedBandErased": pandas.StringDtype(),
"xorLoggingSkippedNoEntry": pandas.StringDtype(),
"xorAuditSuccess": pandas.StringDtype(),
"maxSuspendCount": pandas.StringDtype(),
"suspendLimitPerPrgm": pandas.StringDtype(),
"psrCountStats": pandas.StringDtype(),
"readNandBuffCount": pandas.StringDtype(),
"readNandBufferRspErrorCount": pandas.StringDtype(),
"ddpNandWrites": pandas.StringDtype(),
"totalDeallocatedSectorsInCore": pandas.StringDtype(),
"prefetchHostReads": pandas.StringDtype(),
"hostReadtoDSMDCount": pandas.StringDtype(),
"hostWritetoDSMDCount": pandas.StringDtype(),
"snapReads4k": pandas.StringDtype(),
"snapReads8k": pandas.StringDtype(),
"snapReads16k": pandas.StringDtype(),
"xorLoggingTriggered": pandas.StringDtype(),
"xorLoggingAborted": pandas.StringDtype(),
"xorLoggingSkippedHistory": pandas.StringDtype(),
"deckDisturbRelocationUD": pandas.StringDtype(),
"deckDisturbRelocationMD": pandas.StringDtype(),
"deckDisturbRelocationLD": pandas.StringDtype(),
"bbdProactiveReadRetry": pandas.StringDtype(),
"statsRestoreRequired": pandas.StringDtype(),
"statsAESCount": pandas.StringDtype(),
"statsHESCount": pandas.StringDtype(),
"psrCountStats1": pandas.StringDtype(),
"psrCountStats2": pandas.StringDtype(),
"psrCountStats3": pandas.StringDtype(),
"psrCountStats4": pandas.StringDtype(),
"psrCountStats5": pandas.StringDtype(),
"psrCountStats6": pandas.StringDtype(),
"psrCountStats7": pandas.StringDtype(),
"psrCountStats8": pandas.StringDtype(),
"psrCountStats9": pandas.StringDtype(),
"psrCountStats10": pandas.StringDtype(),
"psrCountStats11": pandas.StringDtype(),
"psrCountStats12": pandas.StringDtype(),
"psrCountStats13": pandas.StringDtype(),
"psrCountStats14": pandas.StringDtype(),
"psrCountStats15": pandas.StringDtype(),
"psrCountStats16": pandas.StringDtype(),
"psrCountStats17": pandas.StringDtype(),
"psrCountStats18": pandas.StringDtype(),
"psrCountStats19": pandas.StringDtype(),
"psrCountStats20": pandas.StringDtype(),
"psrCountStats21": pandas.StringDtype(),
"psrCountStats22": pandas.StringDtype(),
"psrCountStats23": pandas.StringDtype(),
"psrCountStats24": pandas.StringDtype(),
"psrCountStats25": pandas.StringDtype(),
"psrCountStats26": pandas.StringDtype(),
"psrCountStats27": pandas.StringDtype(),
"psrCountStats28": pandas.StringDtype(),
"psrCountStats29": pandas.StringDtype(),
"psrCountStats30": pandas.StringDtype(),
"psrCountStats31": pandas.StringDtype(),
"psrCountStats32": pandas.StringDtype(),
"psrCountStats33": pandas.StringDtype(),
"psrCountStats34": pandas.StringDtype(),
"psrCountStats35": pandas.StringDtype(),
"psrCountStats36": pandas.StringDtype(),
"psrCountStats37": | pandas.StringDtype() | pandas.StringDtype |
"""Miscellaneous internal PyJanitor helper functions."""
import functools
import os
import sys
import warnings
from typing import Callable, Dict, List, Union
import numpy as np
import pandas as pd
from .errors import JanitorError
def check(varname: str, value, expected_types: list):
"""
One-liner syntactic sugar for checking types.
Should be used like this::
check('x', x, [int, float])
:param varname: The name of the variable.
:param value: The value of the varname.
:param expected_types: The types we expect the item to be.
:returns: TypeError if data is not the expected type.
"""
is_expected_type = False
for t in expected_types:
if isinstance(value, t):
is_expected_type = True
break
if not is_expected_type:
raise TypeError(
"{varname} should be one of {expected_types}".format(
varname=varname, expected_types=expected_types
)
)
def _clean_accounting_column(x: str) -> float:
"""
Perform the logic for the `cleaning_style == "accounting"` attribute.
This is a private function, not intended to be used outside of
``currency_column_to_numeric``.
It is intended to be used in a pandas `apply` method.
:returns: An object with a cleaned column.
"""
y = x.strip()
y = y.replace(",", "")
y = y.replace(")", "")
y = y.replace("(", "-")
if y == "-":
return 0.00
return float(y)
def _currency_column_to_numeric(x, cast_non_numeric=None) -> str:
"""
Perform logic for changing cell values.
This is a private function intended to be used only in
``currency_column_to_numeric``.
It is intended to be used in a pandas `apply` method, after being passed
through `partial`.
"""
acceptable_currency_characters = {
"-",
".",
"1",
"2",
"3",
"4",
"5",
"6",
"7",
"8",
"9",
"0",
}
if len(x) == 0:
return "ORIGINAL_NA"
if cast_non_numeric:
if x in cast_non_numeric.keys():
check(
"{%r: %r}" % (x, str(cast_non_numeric[x])),
cast_non_numeric[x],
[int, float],
)
return cast_non_numeric[x]
else:
return "".join(i for i in x if i in acceptable_currency_characters)
else:
return "".join(i for i in x if i in acceptable_currency_characters)
def _replace_empty_string_with_none(column_series):
column_series.loc[column_series == ""] = None
return column_series
def _replace_original_empty_string_with_none(column_series):
column_series.loc[column_series == "ORIGINAL_NA"] = None
return column_series
def _strip_underscores(
df: pd.DataFrame, strip_underscores: Union[str, bool] = None
) -> pd.DataFrame:
"""
Strip underscores from DataFrames column names.
Underscores can be stripped from the beginning, end or both.
.. code-block:: python
df = _strip_underscores(df, strip_underscores='left')
:param df: The pandas DataFrame object.
:param strip_underscores: (optional) Removes the outer underscores from all
column names. Default None keeps outer underscores. Values can be
either 'left', 'right' or 'both' or the respective shorthand 'l', 'r'
and True.
:returns: A pandas DataFrame with underscores removed.
"""
df = df.rename(
columns=lambda x: _strip_underscores_func(x, strip_underscores)
)
return df
def _strip_underscores_func(
col: str, strip_underscores: Union[str, bool] = None
) -> pd.DataFrame:
"""Strip underscores from a string."""
underscore_options = [None, "left", "right", "both", "l", "r", True]
if strip_underscores not in underscore_options:
raise JanitorError(
f"strip_underscores must be one of: {underscore_options}"
)
if strip_underscores in ["left", "l"]:
col = col.lstrip("_")
elif strip_underscores in ["right", "r"]:
col = col.rstrip("_")
elif strip_underscores == "both" or strip_underscores is True:
col = col.strip("_")
return col
def import_message(
submodule: str,
package: str,
conda_channel: str = None,
pip_install: bool = False,
):
"""
Return warning if package is not found.
Generic message for indicating to the user when a function relies on an
optional module / package that is not currently installed. Includes
installation instructions. Used in `chemistry.py` and `biology.py`.
:param submodule: pyjanitor submodule that needs an external dependency.
:param package: External package this submodule relies on.
:param conda_channel: Conda channel package can be installed from,
if at all.
:param pip_install: Whether package can be installed via pip.
"""
is_conda = os.path.exists(os.path.join(sys.prefix, "conda-meta"))
installable = True
if is_conda:
if conda_channel is None:
installable = False
installation = f"{package} cannot be installed via conda"
else:
installation = f"conda install -c {conda_channel} {package}"
else:
if pip_install:
installation = f"pip install {package}"
else:
installable = False
installation = f"{package} cannot be installed via pip"
print(
f"To use the janitor submodule {submodule}, you need to install "
f"{package}."
)
print()
if installable:
print("To do so, use the following command:")
print()
print(f" {installation}")
else:
print(f"{installation}")
def idempotent(func: Callable, df: pd.DataFrame, *args, **kwargs):
"""
Raises error if a function operating on a `DataFrame` is not idempotent,
that is, `func(func(df)) = func(df)` is not true for all `df`.
:param func: A python method.
:param df: A pandas `DataFrame`.
:param args: Positional arguments supplied to the method.
:param kwargs: Keyword arguments supplied to the method.
:raises ValueError: If `func` is found to not be idempotent for the given
`DataFrame` `df`.
"""
if not func(df, *args, **kwargs) == func(
func(df, *args, **kwargs), *args, **kwargs
):
raise ValueError(
"Supplied function is not idempotent for the given " "DataFrame."
)
def deprecated_alias(**aliases) -> Callable:
"""
Used as a decorator when deprecating old function argument names, while
keeping backwards compatibility.
Implementation is inspired from `StackOverflow`_.
.. _StackOverflow: https://stackoverflow.com/questions/49802412/how-to-implement-deprecation-in-python-with-argument-alias
Functional usage example:
.. code-block:: python
@deprecated_alias(a='alpha', b='beta')
def simple_sum(alpha, beta):
return alpha + beta
:param aliases: Dictionary of aliases for a function's arguments.
:return: Your original function wrapped with the kwarg redirection
function.
""" # noqa: E501
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
rename_kwargs(func.__name__, kwargs, aliases)
return func(*args, **kwargs)
return wrapper
return decorator
def refactored_function(message: str) -> Callable:
"""Used as a decorator when refactoring functions
Implementation is inspired from `Hacker Noon`_.
.. Hacker Noon: https://hackernoon.com/why-refactoring-how-to-restructure-python-package-51b89aa91987
Functional usage example:
.. code-block:: python
@refactored_function(
message="simple_sum() has been refactored. Use hard_sum() instead."
)
def simple_sum(alpha, beta):
return alpha + beta
:param message: Message to use in warning user about refactoring.
:return: Your original function wrapped with the kwarg redirection
function.
""" # noqa: E501
def decorator(func):
def emit_warning(*args, **kwargs):
warnings.warn(message, FutureWarning)
return func(*args, **kwargs)
return emit_warning
return decorator
def rename_kwargs(func_name: str, kwargs: Dict, aliases: Dict):
"""
Used to update deprecated argument names with new names. Throws a
TypeError if both arguments are provided, and warns if old alias is used.
Implementation is inspired from `StackOverflow`_.
.. _StackOverflow: https://stackoverflow.com/questions/49802412/how-to-implement-deprecation-in-python-with-argument-alias
:param func_name: name of decorated function.
:param kwargs: Arguments supplied to the method.
:param aliases: Dictionary of aliases for a function's arguments.
:return: Nothing; the passed `kwargs` are modified directly.
""" # noqa: E501
for old_alias, new_alias in aliases.items():
if old_alias in kwargs:
if new_alias in kwargs:
raise TypeError(
f"{func_name} received both {old_alias} and {new_alias}"
)
warnings.warn(
f"{old_alias} is deprecated; use {new_alias}",
DeprecationWarning,
)
kwargs[new_alias] = kwargs.pop(old_alias)
def check_column(
df: pd.DataFrame, old_column_names: List, present: bool = True
):
"""
One-liner syntactic sugar for checking the presence or absence of a column.
Should be used like this::
check(df, ['a', 'b'], present=True)
:param df: The name of the variable.
:param old_column_names: A list of column names we want to check to see if
present (or absent) in df.
:param present: If True (default), checks to see if all of old_column_names
are in df.columns. If False, checks that none of old_column_names are
in df.columns.
:returns: ValueError if data is not the expected type.
"""
for column_name in old_column_names:
if present:
if column_name not in df.columns:
raise ValueError(
f"{column_name} not present in dataframe columns!"
)
else: # Tests for exclusion
if column_name in df.columns:
raise ValueError(
f"{column_name} already present in dataframe columns!"
)
def skipna(f: Callable) -> Callable:
"""
Decorator for escaping np.nan and None in a function
Should be used like this::
df[column].apply(skipna(transform))
or::
@skipna
def transform(x):
pass
:param f: the function to be wrapped
:returns: _wrapped, the wrapped function
"""
def _wrapped(x, *args, **kwargs):
if (type(x) is float and np.isnan(x)) or x is None:
return np.nan
else:
return f(x, *args, **kwargs)
return _wrapped
def skiperror(
f: Callable, return_x: bool = False, return_val=np.nan
) -> Callable:
"""
Decorator for escaping errors in a function
Should be used like this::
df[column].apply(
skiperror(transform, return_val=3, return_x=False))
or::
@skiperror(return_val=3, return_x=False)
def transform(x):
pass
:param f: the function to be wrapped
:param return_x: whether or not the original value that caused error
should be returned
:param return_val: the value to be returned when an error hits.
Ignored if return_x is True
:returns: _wrapped, the wrapped function
"""
def _wrapped(x, *args, **kwargs):
try:
return f(x, *args, **kwargs)
except Exception:
if return_x:
return x
return return_val
return _wrapped
def _check_instance(entry: Dict):
"""
Function to check instances in the expand_grid function.
This checks if entry is a dictionary,
checks the instance of value in key:value pairs in entry,
and makes changes to other types as deemed necessary.
Additionally, type-specific errors are raised
if unsupported data types are passed in as values
in the entry dictionary.
How each type is handled, and their associated exceptions,
are pretty clear from the code.
"""
# dictionary should not be empty
if not entry:
raise ValueError("passed dictionary cannot be empty")
# If it is a NoneType, number, Boolean, or string,
# then wrap in a list
entry = {
key: [value]
if isinstance(value, (type(None), int, float, bool, str))
else value
for key, value in entry.items()
}
# Convert to list if value is a set|tuple|range
entry = {
key: list(value) if isinstance(value, (set, tuple, range)) else value
for key, value in entry.items()
}
# collect dataframes here
dfs = []
# collect non dataframes here, proper dicts
dicts = {}
for key, value in entry.items():
# exclude dicts:
if isinstance(value, dict):
raise TypeError("Nested dictionaries are not allowed")
# process arrays
if isinstance(value, np.ndarray):
if value.size == 0:
raise ValueError("array cannot be empty")
if value.ndim == 1:
dfs.append(pd.DataFrame(value, columns=[key]))
elif value.ndim == 2:
dfs.append( | pd.DataFrame(value) | pandas.DataFrame |
# pylint: disable-msg=E1101,W0612
from datetime import datetime, time, timedelta, date
import sys
import os
import operator
from distutils.version import LooseVersion
import nose
import numpy as np
randn = np.random.randn
from pandas import (Index, Series, TimeSeries, DataFrame,
isnull, date_range, Timestamp, Period, DatetimeIndex,
Int64Index, to_datetime, bdate_range, Float64Index)
import pandas.core.datetools as datetools
import pandas.tseries.offsets as offsets
import pandas.tseries.tools as tools
import pandas.tseries.frequencies as fmod
import pandas as pd
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from pandas.tslib import NaT, iNaT
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.index as _index
from pandas.compat import range, long, StringIO, lrange, lmap, zip, product
import pandas.core.datetools as dt
from numpy.random import rand
from numpy.testing import assert_array_equal
from pandas.util.testing import assert_frame_equal
import pandas.compat as compat
import pandas.core.common as com
from pandas import concat
from pandas import _np_version_under1p7
from numpy.testing.decorators import slow
def _skip_if_no_pytz():
try:
import pytz
except ImportError:
raise nose.SkipTest("pytz not installed")
def _skip_if_has_locale():
import locale
lang, _ = locale.getlocale()
if lang is not None:
raise nose.SkipTest("Specific locale is set {0}".format(lang))
class TestTimeSeriesDuplicates(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
dates = [datetime(2000, 1, 2), datetime(2000, 1, 2),
datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 3), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 4),
datetime(2000, 1, 4), datetime(2000, 1, 5)]
self.dups = Series(np.random.randn(len(dates)), index=dates)
def test_constructor(self):
tm.assert_isinstance(self.dups, TimeSeries)
tm.assert_isinstance(self.dups.index, DatetimeIndex)
def test_is_unique_monotonic(self):
self.assertFalse(self.dups.index.is_unique)
def test_index_unique(self):
uniques = self.dups.index.unique()
expected = DatetimeIndex([datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 5)])
self.assertEqual(uniques.dtype, 'M8[ns]') # sanity
self.assertTrue(uniques.equals(expected))
self.assertEqual(self.dups.index.nunique(), 4)
# #2563
self.assertTrue(isinstance(uniques, DatetimeIndex))
dups_local = self.dups.index.tz_localize('US/Eastern')
dups_local.name = 'foo'
result = dups_local.unique()
expected = DatetimeIndex(expected, tz='US/Eastern')
self.assertTrue(result.tz is not None)
self.assertEqual(result.name, 'foo')
self.assertTrue(result.equals(expected))
# NaT
arr = [ 1370745748 + t for t in range(20) ] + [iNaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
arr = [ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
def test_index_dupes_contains(self):
d = datetime(2011, 12, 5, 20, 30)
ix = DatetimeIndex([d, d])
self.assertTrue(d in ix)
def test_duplicate_dates_indexing(self):
ts = self.dups
uniques = ts.index.unique()
for date in uniques:
result = ts[date]
mask = ts.index == date
total = (ts.index == date).sum()
expected = ts[mask]
if total > 1:
assert_series_equal(result, expected)
else:
assert_almost_equal(result, expected[0])
cp = ts.copy()
cp[date] = 0
expected = Series(np.where(mask, 0, ts), index=ts.index)
assert_series_equal(cp, expected)
self.assertRaises(KeyError, ts.__getitem__, datetime(2000, 1, 6))
# new index
ts[datetime(2000,1,6)] = 0
self.assertEqual(ts[datetime(2000,1,6)], 0)
def test_range_slice(self):
idx = DatetimeIndex(['1/1/2000', '1/2/2000', '1/2/2000', '1/3/2000',
'1/4/2000'])
ts = Series(np.random.randn(len(idx)), index=idx)
result = ts['1/2/2000':]
expected = ts[1:]
assert_series_equal(result, expected)
result = ts['1/2/2000':'1/3/2000']
expected = ts[1:4]
assert_series_equal(result, expected)
def test_groupby_average_dup_values(self):
result = self.dups.groupby(level=0).mean()
expected = self.dups.groupby(self.dups.index).mean()
assert_series_equal(result, expected)
def test_indexing_over_size_cutoff(self):
import datetime
# #1821
old_cutoff = _index._SIZE_CUTOFF
try:
_index._SIZE_CUTOFF = 1000
# create large list of non periodic datetime
dates = []
sec = datetime.timedelta(seconds=1)
half_sec = datetime.timedelta(microseconds=500000)
d = datetime.datetime(2011, 12, 5, 20, 30)
n = 1100
for i in range(n):
dates.append(d)
dates.append(d + sec)
dates.append(d + sec + half_sec)
dates.append(d + sec + sec + half_sec)
d += 3 * sec
# duplicate some values in the list
duplicate_positions = np.random.randint(0, len(dates) - 1, 20)
for p in duplicate_positions:
dates[p + 1] = dates[p]
df = DataFrame(np.random.randn(len(dates), 4),
index=dates,
columns=list('ABCD'))
pos = n * 3
timestamp = df.index[pos]
self.assertIn(timestamp, df.index)
# it works!
df.ix[timestamp]
self.assertTrue(len(df.ix[[timestamp]]) > 0)
finally:
_index._SIZE_CUTOFF = old_cutoff
def test_indexing_unordered(self):
# GH 2437
rng = date_range(start='2011-01-01', end='2011-01-15')
ts = Series(randn(len(rng)), index=rng)
ts2 = concat([ts[0:4],ts[-4:],ts[4:-4]])
for t in ts.index:
s = str(t)
expected = ts[t]
result = ts2[t]
self.assertTrue(expected == result)
# GH 3448 (ranges)
def compare(slobj):
result = ts2[slobj].copy()
result = result.sort_index()
expected = ts[slobj]
assert_series_equal(result,expected)
compare(slice('2011-01-01','2011-01-15'))
compare(slice('2010-12-30','2011-01-15'))
compare(slice('2011-01-01','2011-01-16'))
# partial ranges
compare(slice('2011-01-01','2011-01-6'))
compare(slice('2011-01-06','2011-01-8'))
compare(slice('2011-01-06','2011-01-12'))
# single values
result = ts2['2011'].sort_index()
expected = ts['2011']
assert_series_equal(result,expected)
# diff freq
rng = date_range(datetime(2005, 1, 1), periods=20, freq='M')
ts = Series(np.arange(len(rng)), index=rng)
ts = ts.take(np.random.permutation(20))
result = ts['2005']
for t in result.index:
self.assertTrue(t.year == 2005)
def test_indexing(self):
idx = date_range("2001-1-1", periods=20, freq='M')
ts = Series(np.random.rand(len(idx)),index=idx)
# getting
# GH 3070, make sure semantics work on Series/Frame
expected = ts['2001']
df = DataFrame(dict(A = ts))
result = df['2001']['A']
assert_series_equal(expected,result)
# setting
ts['2001'] = 1
expected = ts['2001']
df.loc['2001','A'] = 1
result = df['2001']['A']
assert_series_equal(expected,result)
# GH3546 (not including times on the last day)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:00', freq='H')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:59', freq='S')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = [ Timestamp('2013-05-31 00:00'), Timestamp(datetime(2013,5,31,23,59,59,999999))]
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013']
assert_series_equal(expected,ts)
# GH 3925, indexing with a seconds resolution string / datetime object
df = DataFrame(randn(5,5),columns=['open','high','low','close','volume'],index=date_range('2012-01-02 18:01:00',periods=5,tz='US/Central',freq='s'))
expected = df.loc[[df.index[2]]]
result = df['2012-01-02 18:01:02']
assert_frame_equal(result,expected)
# this is a single date, so will raise
self.assertRaises(KeyError, df.__getitem__, df.index[2],)
def test_recreate_from_data(self):
if _np_version_under1p7:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N', 'C']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, periods=1)
idx = DatetimeIndex(org, freq=f)
self.assertTrue(idx.equals(org))
# unbale to create tz-aware 'A' and 'C' freq
if _np_version_under1p7:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, tz='US/Pacific', periods=1)
idx = DatetimeIndex(org, freq=f, tz='US/Pacific')
self.assertTrue(idx.equals(org))
def assert_range_equal(left, right):
assert(left.equals(right))
assert(left.freq == right.freq)
assert(left.tz == right.tz)
class TestTimeSeries(tm.TestCase):
_multiprocess_can_split_ = True
def test_is_(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
self.assertTrue(dti.is_(dti))
self.assertTrue(dti.is_(dti.view()))
self.assertFalse(dti.is_(dti.copy()))
def test_dti_slicing(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
dti2 = dti[[1, 3, 5]]
v1 = dti2[0]
v2 = dti2[1]
v3 = dti2[2]
self.assertEqual(v1, Timestamp('2/28/2005'))
self.assertEqual(v2, Timestamp('4/30/2005'))
self.assertEqual(v3, Timestamp('6/30/2005'))
# don't carry freq through irregular slicing
self.assertIsNone(dti2.freq)
def test_pass_datetimeindex_to_index(self):
# Bugs in #1396
rng = date_range('1/1/2000', '3/1/2000')
idx = Index(rng, dtype=object)
expected = Index(rng.to_pydatetime(), dtype=object)
self.assert_numpy_array_equal(idx.values, expected.values)
def test_contiguous_boolean_preserve_freq(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
mask = np.zeros(len(rng), dtype=bool)
mask[10:20] = True
masked = rng[mask]
expected = rng[10:20]
self.assertIsNotNone(expected.freq)
assert_range_equal(masked, expected)
mask[22] = True
masked = rng[mask]
self.assertIsNone(masked.freq)
def test_getitem_median_slice_bug(self):
index = date_range('20090415', '20090519', freq='2B')
s = Series(np.random.randn(13), index=index)
indexer = [slice(6, 7, None)]
result = s[indexer]
expected = s[indexer[0]]
assert_series_equal(result, expected)
def test_series_box_timestamp(self):
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng)
tm.assert_isinstance(s[5], Timestamp)
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng, index=rng)
tm.assert_isinstance(s[5], Timestamp)
tm.assert_isinstance(s.iget_value(5), Timestamp)
def test_date_range_ambiguous_arguments(self):
# #2538
start = datetime(2011, 1, 1, 5, 3, 40)
end = datetime(2011, 1, 1, 8, 9, 40)
self.assertRaises(ValueError, date_range, start, end,
freq='s', periods=10)
def test_timestamp_to_datetime(self):
_skip_if_no_pytz()
rng = date_range('20090415', '20090519',
tz='US/Eastern')
stamp = rng[0]
dtval = stamp.to_pydatetime()
self.assertEqual(stamp, dtval)
self.assertEqual(stamp.tzinfo, dtval.tzinfo)
def test_index_convert_to_datetime_array(self):
_skip_if_no_pytz()
def _check_rng(rng):
converted = rng.to_pydatetime()
tm.assert_isinstance(converted, np.ndarray)
for x, stamp in zip(converted, rng):
tm.assert_isinstance(x, datetime)
self.assertEqual(x, stamp.to_pydatetime())
self.assertEqual(x.tzinfo, stamp.tzinfo)
rng = date_range('20090415', '20090519')
rng_eastern = date_range('20090415', '20090519', tz='US/Eastern')
rng_utc = date_range('20090415', '20090519', tz='utc')
_check_rng(rng)
_check_rng(rng_eastern)
_check_rng(rng_utc)
def test_ctor_str_intraday(self):
rng = DatetimeIndex(['1-1-2000 00:00:01'])
self.assertEqual(rng[0].second, 1)
def test_series_ctor_plus_datetimeindex(self):
rng = date_range('20090415', '20090519', freq='B')
data = dict((k, 1) for k in rng)
result = Series(data, index=rng)
self.assertIs(result.index, rng)
def test_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index)
result = result.fillna(method='bfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index, method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index, method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_setitem_timestamp(self):
# 2155
columns = DatetimeIndex(start='1/1/2012', end='2/1/2012',
freq=datetools.bday)
index = lrange(10)
data = DataFrame(columns=columns, index=index)
t = datetime(2012, 11, 1)
ts = Timestamp(t)
data[ts] = np.nan # works
def test_sparse_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
ss = s[:2].reindex(index).to_sparse()
result = ss.fillna(method='pad', limit=5)
expected = ss.fillna(method='pad', limit=5)
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
ss = s[-2:].reindex(index).to_sparse()
result = ss.fillna(method='backfill', limit=5)
expected = ss.fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
s = s.to_sparse()
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index, method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index, method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_sparse_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_pad_require_monotonicity(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
rng2 = rng[::2][::-1]
self.assertRaises(ValueError, rng2.get_indexer, rng,
method='pad')
def test_frame_ctor_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
dates = np.asarray(rng)
df = DataFrame({'A': np.random.randn(len(rng)), 'B': dates})
self.assertTrue(np.issubdtype(df['B'].dtype, np.dtype('M8[ns]')))
def test_frame_add_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
df = DataFrame(index=np.arange(len(rng)))
df['A'] = rng
self.assertTrue(np.issubdtype(df['A'].dtype, np.dtype('M8[ns]')))
def test_frame_datetime64_pre1900_repr(self):
df = DataFrame({'year': date_range('1/1/1700', periods=50,
freq='A-DEC')})
# it works!
repr(df)
def test_frame_add_datetime64_col_other_units(self):
n = 100
units = ['h', 'm', 's', 'ms', 'D', 'M', 'Y']
ns_dtype = np.dtype('M8[ns]')
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df[unit] = vals
ex_vals = to_datetime(vals.astype('O'))
self.assertEqual(df[unit].dtype, ns_dtype)
self.assertTrue((df[unit].values == ex_vals).all())
# Test insertion into existing datetime64 column
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df['dates'] = np.arange(n, dtype=np.int64).view(ns_dtype)
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
tmp = df.copy()
tmp['dates'] = vals
ex_vals = to_datetime(vals.astype('O'))
self.assertTrue((tmp['dates'].values == ex_vals).all())
def test_to_datetime_unit(self):
epoch = 1370745748
s = Series([ epoch + t for t in range(20) ])
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ]).astype(float)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ] + [iNaT])
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ] + [iNaT]).astype(float)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
s = concat([Series([ epoch + t for t in range(20) ]).astype(float),Series([np.nan])],ignore_index=True)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
def test_series_ctor_datetime64(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
dates = np.asarray(rng)
series = Series(dates)
self.assertTrue(np.issubdtype(series.dtype, np.dtype('M8[ns]')))
def test_index_cast_datetime64_other_units(self):
arr = np.arange(0, 100, 10, dtype=np.int64).view('M8[D]')
idx = Index(arr)
self.assertTrue((idx.values == tslib.cast_to_nanoseconds(arr)).all())
def test_index_astype_datetime64(self):
idx = Index([datetime(2012, 1, 1)], dtype=object)
if not _np_version_under1p7:
raise nose.SkipTest("test only valid in numpy < 1.7")
casted = idx.astype(np.dtype('M8[D]'))
expected = DatetimeIndex(idx.values)
tm.assert_isinstance(casted, DatetimeIndex)
self.assertTrue(casted.equals(expected))
def test_reindex_series_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
series = Series(rng)
result = series.reindex(lrange(15))
self.assertTrue(np.issubdtype(result.dtype, np.dtype('M8[ns]')))
mask = result.isnull()
self.assertTrue(mask[-5:].all())
self.assertFalse(mask[:-5].any())
def test_reindex_frame_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
df = DataFrame({'A': np.random.randn(len(rng)), 'B': rng})
result = df.reindex(lrange(15))
self.assertTrue(np.issubdtype(result['B'].dtype, np.dtype('M8[ns]')))
mask = com.isnull(result)['B']
self.assertTrue(mask[-5:].all())
self.assertFalse(mask[:-5].any())
def test_series_repr_nat(self):
series = Series([0, 1000, 2000, iNaT], dtype='M8[ns]')
result = repr(series)
expected = ('0 1970-01-01 00:00:00\n'
'1 1970-01-01 00:00:00.000001\n'
'2 1970-01-01 00:00:00.000002\n'
'3 NaT\n'
'dtype: datetime64[ns]')
self.assertEqual(result, expected)
def test_fillna_nat(self):
series = Series([0, 1, 2, iNaT], dtype='M8[ns]')
filled = series.fillna(method='pad')
filled2 = series.fillna(value=series.values[2])
expected = series.copy()
expected.values[3] = expected.values[2]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='pad')
filled2 = df.fillna(value=series.values[2])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
series = Series([iNaT, 0, 1, 2], dtype='M8[ns]')
filled = series.fillna(method='bfill')
filled2 = series.fillna(value=series[1])
expected = series.copy()
expected[0] = expected[1]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='bfill')
filled2 = df.fillna(value=series[1])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
def test_string_na_nat_conversion(self):
# GH #999, #858
from pandas.compat import parse_date
strings = np.array(['1/1/2000', '1/2/2000', np.nan,
'1/4/2000, 12:34:56'], dtype=object)
expected = np.empty(4, dtype='M8[ns]')
for i, val in enumerate(strings):
if com.isnull(val):
expected[i] = iNaT
else:
expected[i] = parse_date(val)
result = tslib.array_to_datetime(strings)
assert_almost_equal(result, expected)
result2 = to_datetime(strings)
tm.assert_isinstance(result2, DatetimeIndex)
assert_almost_equal(result, result2)
malformed = np.array(['1/100/2000', np.nan], dtype=object)
result = to_datetime(malformed)
assert_almost_equal(result, malformed)
self.assertRaises(ValueError, to_datetime, malformed,
errors='raise')
idx = ['a', 'b', 'c', 'd', 'e']
series = Series(['1/1/2000', np.nan, '1/3/2000', np.nan,
'1/5/2000'], index=idx, name='foo')
dseries = Series([to_datetime('1/1/2000'), np.nan,
to_datetime('1/3/2000'), np.nan,
to_datetime('1/5/2000')], index=idx, name='foo')
result = to_datetime(series)
dresult = to_datetime(dseries)
expected = Series(np.empty(5, dtype='M8[ns]'), index=idx)
for i in range(5):
x = series[i]
if isnull(x):
expected[i] = iNaT
else:
expected[i] = to_datetime(x)
assert_series_equal(result, expected)
self.assertEqual(result.name, 'foo')
assert_series_equal(dresult, expected)
self.assertEqual(dresult.name, 'foo')
def test_to_datetime_iso8601(self):
result = to_datetime(["2012-01-01 00:00:00"])
exp = Timestamp("2012-01-01 00:00:00")
self.assertEqual(result[0], exp)
result = to_datetime(['20121001']) # bad iso 8601
exp = Timestamp('2012-10-01')
self.assertEqual(result[0], exp)
def test_to_datetime_default(self):
rs = to_datetime('2001')
xp = datetime(2001, 1, 1)
self.assertTrue(rs, xp)
#### dayfirst is essentially broken
#### to_datetime('01-13-2012', dayfirst=True)
#### self.assertRaises(ValueError, to_datetime('01-13-2012', dayfirst=True))
def test_to_datetime_on_datetime64_series(self):
# #2699
s = Series(date_range('1/1/2000', periods=10))
result = to_datetime(s)
self.assertEqual(result[0], s[0])
def test_to_datetime_with_apply(self):
# this is only locale tested with US/None locales
_skip_if_has_locale()
# GH 5195
# with a format and coerce a single item to_datetime fails
td = Series(['May 04', 'Jun 02', 'Dec 11'], index=[1,2,3])
expected = pd.to_datetime(td, format='%b %y')
result = td.apply(pd.to_datetime, format='%b %y')
assert_series_equal(result, expected)
td = pd.Series(['May 04', 'Jun 02', ''], index=[1,2,3])
self.assertRaises(ValueError, lambda : pd.to_datetime(td,format='%b %y'))
self.assertRaises(ValueError, lambda : td.apply(pd.to_datetime, format='%b %y'))
expected = pd.to_datetime(td, format='%b %y', coerce=True)
result = td.apply(lambda x: pd.to_datetime(x, format='%b %y', coerce=True))
assert_series_equal(result, expected)
def test_nat_vector_field_access(self):
idx = DatetimeIndex(['1/1/2000', None, None, '1/4/2000'])
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'microsecond', 'nanosecond',
'week', 'dayofyear']
for field in fields:
result = getattr(idx, field)
expected = [getattr(x, field) if x is not NaT else -1
for x in idx]
self.assert_numpy_array_equal(result, expected)
def test_nat_scalar_field_access(self):
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'microsecond', 'nanosecond',
'week', 'dayofyear']
for field in fields:
result = getattr(NaT, field)
self.assertEqual(result, -1)
self.assertEqual(NaT.weekday(), -1)
def test_to_datetime_types(self):
# empty string
result = to_datetime('')
self.assertIs(result, NaT)
result = to_datetime(['', ''])
self.assertTrue(isnull(result).all())
# ints
result = Timestamp(0)
expected = to_datetime(0)
self.assertEqual(result, expected)
# GH 3888 (strings)
expected = to_datetime(['2012'])[0]
result = to_datetime('2012')
self.assertEqual(result, expected)
### array = ['2012','20120101','20120101 12:01:01']
array = ['20120101','20120101 12:01:01']
expected = list(to_datetime(array))
result = lmap(Timestamp,array)
tm.assert_almost_equal(result,expected)
### currently fails ###
### result = Timestamp('2012')
### expected = to_datetime('2012')
### self.assertEqual(result, expected)
def test_to_datetime_unprocessable_input(self):
# GH 4928
self.assert_numpy_array_equal(
to_datetime([1, '1']),
np.array([1, '1'], dtype='O')
)
self.assertRaises(TypeError, to_datetime, [1, '1'], errors='raise')
def test_to_datetime_other_datetime64_units(self):
# 5/25/2012
scalar = np.int64(1337904000000000).view('M8[us]')
as_obj = scalar.astype('O')
index = DatetimeIndex([scalar])
self.assertEqual(index[0], scalar.astype('O'))
value = Timestamp(scalar)
self.assertEqual(value, as_obj)
def test_to_datetime_list_of_integers(self):
rng = date_range('1/1/2000', periods=20)
rng = DatetimeIndex(rng.values)
ints = list(rng.asi8)
result = DatetimeIndex(ints)
self.assertTrue(rng.equals(result))
def test_to_datetime_dt64s(self):
in_bound_dts = [
np.datetime64('2000-01-01'),
np.datetime64('2000-01-02'),
]
for dt in in_bound_dts:
self.assertEqual(
pd.to_datetime(dt),
Timestamp(dt)
)
oob_dts = [
np.datetime64('1000-01-01'),
np.datetime64('5000-01-02'),
]
for dt in oob_dts:
self.assertRaises(ValueError, pd.to_datetime, dt, errors='raise')
self.assertRaises(ValueError, tslib.Timestamp, dt)
self.assertIs(pd.to_datetime(dt, coerce=True), NaT)
def test_to_datetime_array_of_dt64s(self):
dts = [
np.datetime64('2000-01-01'),
np.datetime64('2000-01-02'),
]
# Assuming all datetimes are in bounds, to_datetime() returns
# an array that is equal to Timestamp() parsing
self.assert_numpy_array_equal(
pd.to_datetime(dts, box=False),
np.array([Timestamp(x).asm8 for x in dts])
)
# A list of datetimes where the last one is out of bounds
dts_with_oob = dts + [np.datetime64('9999-01-01')]
self.assertRaises(
ValueError,
pd.to_datetime,
dts_with_oob,
coerce=False,
errors='raise'
)
self.assert_numpy_array_equal(
pd.to_datetime(dts_with_oob, box=False, coerce=True),
np.array(
[
Timestamp(dts_with_oob[0]).asm8,
Timestamp(dts_with_oob[1]).asm8,
iNaT,
],
dtype='M8'
)
)
# With coerce=False and errors='ignore', out of bounds datetime64s
# are converted to their .item(), which depending on the version of
# numpy is either a python datetime.datetime or datetime.date
self.assert_numpy_array_equal(
pd.to_datetime(dts_with_oob, box=False, coerce=False),
np.array(
[dt.item() for dt in dts_with_oob],
dtype='O'
)
)
def test_index_to_datetime(self):
idx = Index(['1/1/2000', '1/2/2000', '1/3/2000'])
result = idx.to_datetime()
expected = DatetimeIndex(datetools.to_datetime(idx.values))
self.assertTrue(result.equals(expected))
today = datetime.today()
idx = Index([today], dtype=object)
result = idx.to_datetime()
expected = DatetimeIndex([today])
self.assertTrue(result.equals(expected))
def test_to_datetime_freq(self):
xp = bdate_range('2000-1-1', periods=10, tz='UTC')
rs = xp.to_datetime()
self.assertEqual(xp.freq, rs.freq)
self.assertEqual(xp.tzinfo, rs.tzinfo)
def test_range_misspecified(self):
# GH #1095
self.assertRaises(ValueError, date_range, '1/1/2000')
self.assertRaises(ValueError, date_range, end='1/1/2000')
self.assertRaises(ValueError, date_range, periods=10)
self.assertRaises(ValueError, date_range, '1/1/2000', freq='H')
self.assertRaises(ValueError, date_range, end='1/1/2000', freq='H')
self.assertRaises(ValueError, date_range, periods=10, freq='H')
def test_reasonable_keyerror(self):
# GH #1062
index = DatetimeIndex(['1/3/2000'])
try:
index.get_loc('1/1/2000')
except KeyError as e:
self.assertIn('2000', str(e))
def test_reindex_with_datetimes(self):
rng = date_range('1/1/2000', periods=20)
ts = Series(np.random.randn(20), index=rng)
result = ts.reindex(list(ts.index[5:10]))
expected = ts[5:10]
tm.assert_series_equal(result, expected)
result = ts[list(ts.index[5:10])]
tm.assert_series_equal(result, expected)
def test_promote_datetime_date(self):
rng = date_range('1/1/2000', periods=20)
ts = Series(np.random.randn(20), index=rng)
ts_slice = ts[5:]
ts2 = ts_slice.copy()
ts2.index = [x.date() for x in ts2.index]
result = ts + ts2
result2 = ts2 + ts
expected = ts + ts[5:]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# test asfreq
result = ts2.asfreq('4H', method='ffill')
expected = ts[5:].asfreq('4H', method='ffill')
assert_series_equal(result, expected)
result = rng.get_indexer(ts2.index)
expected = rng.get_indexer(ts_slice.index)
self.assert_numpy_array_equal(result, expected)
def test_asfreq_normalize(self):
rng = date_range('1/1/2000 09:30', periods=20)
norm = date_range('1/1/2000', periods=20)
vals = np.random.randn(20)
ts = Series(vals, index=rng)
result = ts.asfreq('D', normalize=True)
norm = date_range('1/1/2000', periods=20)
expected = Series(vals, index=norm)
assert_series_equal(result, expected)
vals = np.random.randn(20, 3)
ts = DataFrame(vals, index=rng)
result = ts.asfreq('D', normalize=True)
expected = DataFrame(vals, index=norm)
assert_frame_equal(result, expected)
def test_date_range_gen_error(self):
rng = date_range('1/1/2000 00:00', '1/1/2000 00:18', freq='5min')
self.assertEqual(len(rng), 4)
def test_first_subset(self):
ts = _simple_ts('1/1/2000', '1/1/2010', freq='12h')
result = ts.first('10d')
self.assertEqual(len(result), 20)
ts = _simple_ts('1/1/2000', '1/1/2010')
result = ts.first('10d')
self.assertEqual(len(result), 10)
result = ts.first('3M')
expected = ts[:'3/31/2000']
assert_series_equal(result, expected)
result = ts.first('21D')
expected = ts[:21]
assert_series_equal(result, expected)
result = ts[:0].first('3M')
assert_series_equal(result, ts[:0])
def test_last_subset(self):
ts = _simple_ts('1/1/2000', '1/1/2010', freq='12h')
result = ts.last('10d')
self.assertEqual(len(result), 20)
ts = _simple_ts('1/1/2000', '1/1/2010')
result = ts.last('10d')
self.assertEqual(len(result), 10)
result = ts.last('21D')
expected = ts['12/12/2009':]
assert_series_equal(result, expected)
result = ts.last('21D')
expected = ts[-21:]
assert_series_equal(result, expected)
result = ts[:0].last('3M')
assert_series_equal(result, ts[:0])
def test_add_offset(self):
rng = date_range('1/1/2000', '2/1/2000')
result = rng + offsets.Hour(2)
expected = date_range('1/1/2000 02:00', '2/1/2000 02:00')
self.assertTrue(result.equals(expected))
def test_format_pre_1900_dates(self):
rng = date_range('1/1/1850', '1/1/1950', freq='A-DEC')
rng.format()
ts = Series(1, index=rng)
repr(ts)
def test_repeat(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
self.assertIsNone(result.freq)
self.assertEqual(len(result), 5 * len(rng))
def test_at_time(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
rs = ts.at_time(rng[1])
self.assertTrue((rs.index.hour == rng[1].hour).all())
self.assertTrue((rs.index.minute == rng[1].minute).all())
self.assertTrue((rs.index.second == rng[1].second).all())
result = ts.at_time('9:30')
expected = ts.at_time(time(9, 30))
assert_series_equal(result, expected)
df = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts[time(9, 30)]
result_df = df.ix[time(9, 30)]
expected = ts[(rng.hour == 9) & (rng.minute == 30)]
exp_df = df[(rng.hour == 9) & (rng.minute == 30)]
# expected.index = date_range('1/1/2000', '1/4/2000')
assert_series_equal(result, expected)
tm.assert_frame_equal(result_df, exp_df)
chunk = df.ix['1/4/2000':]
result = chunk.ix[time(9, 30)]
expected = result_df[-1:]
tm.assert_frame_equal(result, expected)
# midnight, everything
rng = date_range('1/1/2000', '1/31/2000')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.at_time(time(0, 0))
assert_series_equal(result, ts)
# time doesn't exist
rng = date_range('1/1/2012', freq='23Min', periods=384)
ts = Series(np.random.randn(len(rng)), rng)
rs = ts.at_time('16:00')
self.assertEqual(len(rs), 0)
def test_at_time_frame(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
rs = ts.at_time(rng[1])
self.assertTrue((rs.index.hour == rng[1].hour).all())
self.assertTrue((rs.index.minute == rng[1].minute).all())
self.assertTrue((rs.index.second == rng[1].second).all())
result = ts.at_time('9:30')
expected = ts.at_time(time(9, 30))
assert_frame_equal(result, expected)
result = ts.ix[time(9, 30)]
expected = ts.ix[(rng.hour == 9) & (rng.minute == 30)]
assert_frame_equal(result, expected)
# midnight, everything
rng = date_range('1/1/2000', '1/31/2000')
ts = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts.at_time(time(0, 0))
assert_frame_equal(result, ts)
# time doesn't exist
rng = date_range('1/1/2012', freq='23Min', periods=384)
ts = DataFrame(np.random.randn(len(rng), 2), rng)
rs = ts.at_time('16:00')
self.assertEqual(len(rs), 0)
def test_between_time(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
stime = time(0, 0)
etime = time(1, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = 13 * 4 + 1
if not inc_start:
exp_len -= 5
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue(t >= stime)
else:
self.assertTrue(t > stime)
if inc_end:
self.assertTrue(t <= etime)
else:
self.assertTrue(t < etime)
result = ts.between_time('00:00', '01:00')
expected = ts.between_time(stime, etime)
assert_series_equal(result, expected)
# across midnight
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
stime = time(22, 0)
etime = time(9, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = (12 * 11 + 1) * 4 + 1
if not inc_start:
exp_len -= 4
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue((t >= stime) or (t <= etime))
else:
self.assertTrue((t > stime) or (t <= etime))
if inc_end:
self.assertTrue((t <= etime) or (t >= stime))
else:
self.assertTrue((t < etime) or (t >= stime))
def test_between_time_frame(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(0, 0)
etime = time(1, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = 13 * 4 + 1
if not inc_start:
exp_len -= 5
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue(t >= stime)
else:
self.assertTrue(t > stime)
if inc_end:
self.assertTrue(t <= etime)
else:
self.assertTrue(t < etime)
result = ts.between_time('00:00', '01:00')
expected = ts.between_time(stime, etime)
assert_frame_equal(result, expected)
# across midnight
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(22, 0)
etime = time(9, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = (12 * 11 + 1) * 4 + 1
if not inc_start:
exp_len -= 4
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue((t >= stime) or (t <= etime))
else:
self.assertTrue((t > stime) or (t <= etime))
if inc_end:
self.assertTrue((t <= etime) or (t >= stime))
else:
self.assertTrue((t < etime) or (t >= stime))
def test_dti_constructor_preserve_dti_freq(self):
rng = date_range('1/1/2000', '1/2/2000', freq='5min')
rng2 = DatetimeIndex(rng)
self.assertEqual(rng.freq, rng2.freq)
def test_normalize(self):
rng = date_range('1/1/2000 9:30', periods=10, freq='D')
result = rng.normalize()
expected = date_range('1/1/2000', periods=10, freq='D')
self.assertTrue(result.equals(expected))
rng_ns = pd.DatetimeIndex(np.array([1380585623454345752, 1380585612343234312]).astype("datetime64[ns]"))
rng_ns_normalized = rng_ns.normalize()
expected = pd.DatetimeIndex(np.array([1380585600000000000, 1380585600000000000]).astype("datetime64[ns]"))
self.assertTrue(rng_ns_normalized.equals(expected))
self.assertTrue(result.is_normalized)
self.assertFalse(rng.is_normalized)
def test_to_period(self):
from pandas.tseries.period import period_range
ts = _simple_ts('1/1/2000', '1/1/2001')
pts = ts.to_period()
exp = ts.copy()
exp.index = period_range('1/1/2000', '1/1/2001')
assert_series_equal(pts, exp)
pts = ts.to_period('M')
self.assertTrue(pts.index.equals(exp.index.asfreq('M')))
def create_dt64_based_index(self):
data = [Timestamp('2007-01-01 10:11:12.123456Z'),
Timestamp('2007-01-01 10:11:13.789123Z')]
index = DatetimeIndex(data)
return index
def test_to_period_millisecond(self):
index = self.create_dt64_based_index()
period = index.to_period(freq='L')
self.assertEqual(2, len(period))
self.assertEqual(period[0], Period('2007-01-01 10:11:12.123Z', 'L'))
self.assertEqual(period[1], Period('2007-01-01 10:11:13.789Z', 'L'))
def test_to_period_microsecond(self):
index = self.create_dt64_based_index()
period = index.to_period(freq='U')
self.assertEqual(2, len(period))
self.assertEqual(period[0], Period('2007-01-01 10:11:12.123456Z', 'U'))
self.assertEqual(period[1], Period('2007-01-01 10:11:13.789123Z', 'U'))
def test_to_period_tz(self):
_skip_if_no_pytz()
from dateutil.tz import tzlocal
from pytz import utc as UTC
xp = date_range('1/1/2000', '4/1/2000').to_period()
ts = date_range('1/1/2000', '4/1/2000', tz='US/Eastern')
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assertEqual(result, expected)
self.assertTrue(ts.to_period().equals(xp))
ts = date_range('1/1/2000', '4/1/2000', tz=UTC)
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assertEqual(result, expected)
self.assertTrue(ts.to_period().equals(xp))
ts = date_range('1/1/2000', '4/1/2000', tz=tzlocal())
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assertEqual(result, expected)
self.assertTrue(ts.to_period().equals(xp))
def test_frame_to_period(self):
K = 5
from pandas.tseries.period import period_range
dr = date_range('1/1/2000', '1/1/2001')
pr = period_range('1/1/2000', '1/1/2001')
df = DataFrame(randn(len(dr), K), index=dr)
df['mix'] = 'a'
pts = df.to_period()
exp = df.copy()
exp.index = pr
assert_frame_equal(pts, exp)
pts = df.to_period('M')
self.assertTrue(pts.index.equals(exp.index.asfreq('M')))
df = df.T
pts = df.to_period(axis=1)
exp = df.copy()
exp.columns = pr
assert_frame_equal(pts, exp)
pts = df.to_period('M', axis=1)
self.assertTrue(pts.columns.equals(exp.columns.asfreq('M')))
self.assertRaises(ValueError, df.to_period, axis=2)
def test_timestamp_fields(self):
# extra fields from DatetimeIndex like quarter and week
idx = tm.makeDateIndex(100)
fields = ['dayofweek', 'dayofyear', 'week', 'weekofyear', 'quarter', 'is_month_start', 'is_month_end', 'is_quarter_start', 'is_quarter_end', 'is_year_start', 'is_year_end']
for f in fields:
expected = getattr(idx, f)[-1]
result = getattr(Timestamp(idx[-1]), f)
self.assertEqual(result, expected)
self.assertEqual(idx.freq, Timestamp(idx[-1], idx.freq).freq)
self.assertEqual(idx.freqstr, Timestamp(idx[-1], idx.freq).freqstr)
def test_woy_boundary(self):
# make sure weeks at year boundaries are correct
d = datetime(2013,12,31)
result = Timestamp(d).week
expected = 1 # ISO standard
self.assertEqual(result, expected)
d = datetime(2008,12,28)
result = Timestamp(d).week
expected = 52 # ISO standard
self.assertEqual(result, expected)
d = datetime(2009,12,31)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
d = datetime(2010,1,1)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
d = datetime(2010,1,3)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
result = np.array([Timestamp(datetime(*args)).week for args in
[(2000,1,1),(2000,1,2),(2005,1,1),(2005,1,2)]])
self.assertTrue((result == [52, 52, 53, 53]).all())
def test_timestamp_date_out_of_range(self):
self.assertRaises(ValueError, Timestamp, '1676-01-01')
self.assertRaises(ValueError, Timestamp, '2263-01-01')
# 1475
self.assertRaises(ValueError, DatetimeIndex, ['1400-01-01'])
self.assertRaises(ValueError, DatetimeIndex, [datetime(1400, 1, 1)])
def test_timestamp_repr(self):
# pre-1900
stamp = Timestamp('1850-01-01', tz='US/Eastern')
repr(stamp)
iso8601 = '1850-01-01 01:23:45.012345'
stamp = Timestamp(iso8601, tz='US/Eastern')
result = repr(stamp)
self.assertIn(iso8601, result)
def test_timestamp_from_ordinal(self):
# GH 3042
dt = datetime(2011, 4, 16, 0, 0)
ts = Timestamp.fromordinal(dt.toordinal())
self.assertEqual(ts.to_pydatetime(), dt)
# with a tzinfo
stamp = Timestamp('2011-4-16', tz='US/Eastern')
dt_tz = stamp.to_pydatetime()
ts = Timestamp.fromordinal(dt_tz.toordinal(),tz='US/Eastern')
self.assertEqual(ts.to_pydatetime(), dt_tz)
def test_datetimeindex_integers_shift(self):
rng = date_range('1/1/2000', periods=20)
result = rng + 5
expected = rng.shift(5)
self.assertTrue(result.equals(expected))
result = rng - 5
expected = rng.shift(-5)
self.assertTrue(result.equals(expected))
def test_astype_object(self):
# NumPy 1.6.1 weak ns support
rng = date_range('1/1/2000', periods=20)
casted = rng.astype('O')
exp_values = list(rng)
self.assert_numpy_array_equal(casted, exp_values)
def test_catch_infinite_loop(self):
offset = datetools.DateOffset(minute=5)
# blow up, don't loop forever
self.assertRaises(Exception, date_range, datetime(2011, 11, 11),
datetime(2011, 11, 12), freq=offset)
def test_append_concat(self):
rng = date_range('5/8/2012 1:45', periods=10, freq='5T')
ts = Series(np.random.randn(len(rng)), rng)
df = DataFrame(np.random.randn(len(rng), 4), index=rng)
result = ts.append(ts)
result_df = df.append(df)
ex_index = DatetimeIndex(np.tile(rng.values, 2))
self.assertTrue(result.index.equals(ex_index))
self.assertTrue(result_df.index.equals(ex_index))
appended = rng.append(rng)
self.assertTrue(appended.equals(ex_index))
appended = rng.append([rng, rng])
ex_index = DatetimeIndex(np.tile(rng.values, 3))
self.assertTrue(appended.equals(ex_index))
# different index names
rng1 = rng.copy()
rng2 = rng.copy()
rng1.name = 'foo'
rng2.name = 'bar'
self.assertEqual(rng1.append(rng1).name, 'foo')
self.assertIsNone(rng1.append(rng2).name)
def test_append_concat_tz(self):
#GH 2938
_skip_if_no_pytz()
rng = date_range('5/8/2012 1:45', periods=10, freq='5T',
tz='US/Eastern')
rng2 = date_range('5/8/2012 2:35', periods=10, freq='5T',
tz='US/Eastern')
rng3 = date_range('5/8/2012 1:45', periods=20, freq='5T',
tz='US/Eastern')
ts = Series(np.random.randn(len(rng)), rng)
df = DataFrame(np.random.randn(len(rng), 4), index=rng)
ts2 = Series(np.random.randn(len(rng2)), rng2)
df2 = DataFrame(np.random.randn(len(rng2), 4), index=rng2)
result = ts.append(ts2)
result_df = df.append(df2)
self.assertTrue(result.index.equals(rng3))
self.assertTrue(result_df.index.equals(rng3))
appended = rng.append(rng2)
self.assertTrue(appended.equals(rng3))
def test_set_dataframe_column_ns_dtype(self):
x = DataFrame([datetime.now(), datetime.now()])
self.assertEqual(x[0].dtype, np.dtype('M8[ns]'))
def test_groupby_count_dateparseerror(self):
dr = date_range(start='1/1/2012', freq='5min', periods=10)
# BAD Example, datetimes first
s = Series(np.arange(10), index=[dr, lrange(10)])
grouped = s.groupby(lambda x: x[1] % 2 == 0)
result = grouped.count()
s = Series(np.arange(10), index=[lrange(10), dr])
grouped = s.groupby(lambda x: x[0] % 2 == 0)
expected = grouped.count()
assert_series_equal(result, expected)
def test_datetimeindex_repr_short(self):
dr = date_range(start='1/1/2012', periods=1)
repr(dr)
dr = date_range(start='1/1/2012', periods=2)
repr(dr)
dr = date_range(start='1/1/2012', periods=3)
repr(dr)
def test_constructor_int64_nocopy(self):
# #1624
arr = np.arange(1000, dtype=np.int64)
index = DatetimeIndex(arr)
arr[50:100] = -1
self.assertTrue((index.asi8[50:100] == -1).all())
arr = np.arange(1000, dtype=np.int64)
index = DatetimeIndex(arr, copy=True)
arr[50:100] = -1
self.assertTrue((index.asi8[50:100] != -1).all())
def test_series_interpolate_method_values(self):
# #1646
ts = _simple_ts('1/1/2000', '1/20/2000')
ts[::2] = np.nan
result = ts.interpolate(method='values')
exp = ts.interpolate()
assert_series_equal(result, exp)
def test_frame_datetime64_handling_groupby(self):
# it works!
df = DataFrame([(3, np.datetime64('2012-07-03')),
(3, np.datetime64('2012-07-04'))],
columns=['a', 'date'])
result = df.groupby('a').first()
self.assertEqual(result['date'][3], Timestamp('2012-07-03'))
def test_series_interpolate_intraday(self):
# #1698
index = pd.date_range('1/1/2012', periods=4, freq='12D')
ts = pd.Series([0, 12, 24, 36], index)
new_index = index.append(index + pd.DateOffset(days=1)).order()
exp = ts.reindex(new_index).interpolate(method='time')
index = pd.date_range('1/1/2012', periods=4, freq='12H')
ts = pd.Series([0, 12, 24, 36], index)
new_index = index.append(index + pd.DateOffset(hours=1)).order()
result = ts.reindex(new_index).interpolate(method='time')
self.assert_numpy_array_equal(result.values, exp.values)
def test_frame_dict_constructor_datetime64_1680(self):
dr = date_range('1/1/2012', periods=10)
s = Series(dr, index=dr)
# it works!
DataFrame({'a': 'foo', 'b': s}, index=dr)
DataFrame({'a': 'foo', 'b': s.values}, index=dr)
def test_frame_datetime64_mixed_index_ctor_1681(self):
dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI')
ts = Series(dr)
# it works!
d = DataFrame({'A': 'foo', 'B': ts}, index=dr)
self.assertTrue(d['B'].isnull().all())
def test_frame_timeseries_to_records(self):
index = date_range('1/1/2000', periods=10)
df = DataFrame(np.random.randn(10, 3), index=index,
columns=['a', 'b', 'c'])
result = df.to_records()
result['index'].dtype == 'M8[ns]'
result = df.to_records(index=False)
def test_frame_datetime64_duplicated(self):
dates = date_range('2010-07-01', end='2010-08-05')
tst = DataFrame({'symbol': 'AAA', 'date': dates})
result = tst.duplicated(['date', 'symbol'])
self.assertTrue((-result).all())
tst = DataFrame({'date': dates})
result = tst.duplicated()
self.assertTrue((-result).all())
def test_timestamp_compare_with_early_datetime(self):
# e.g. datetime.min
stamp = Timestamp('2012-01-01')
self.assertFalse(stamp == datetime.min)
self.assertFalse(stamp == datetime(1600, 1, 1))
self.assertFalse(stamp == datetime(2700, 1, 1))
self.assertNotEqual(stamp, datetime.min)
self.assertNotEqual(stamp, datetime(1600, 1, 1))
self.assertNotEqual(stamp, datetime(2700, 1, 1))
self.assertTrue(stamp > datetime(1600, 1, 1))
self.assertTrue(stamp >= datetime(1600, 1, 1))
self.assertTrue(stamp < datetime(2700, 1, 1))
self.assertTrue(stamp <= datetime(2700, 1, 1))
def test_to_html_timestamp(self):
rng = date_range('2000-01-01', periods=10)
df = DataFrame(np.random.randn(10, 4), index=rng)
result = df.to_html()
self.assertIn('2000-01-01', result)
def test_to_csv_numpy_16_bug(self):
frame = DataFrame({'a': date_range('1/1/2000', periods=10)})
buf = StringIO()
frame.to_csv(buf)
result = buf.getvalue()
self.assertIn('2000-01-01', result)
def test_series_map_box_timestamps(self):
# #2689, #2627
s = Series(date_range('1/1/2000', periods=10))
def f(x):
return (x.hour, x.day, x.month)
# it works!
s.map(f)
s.apply(f)
DataFrame(s).applymap(f)
def test_concat_datetime_datetime64_frame(self):
# #2624
rows = []
rows.append([datetime(2010, 1, 1), 1])
rows.append([datetime(2010, 1, 2), 'hi'])
df2_obj = DataFrame.from_records(rows, columns=['date', 'test'])
ind = date_range(start="2000/1/1", freq="D", periods=10)
df1 = DataFrame({'date': ind, 'test':lrange(10)})
# it works!
pd.concat([df1, df2_obj])
def test_period_resample(self):
# GH3609
s = Series(range(100),index=date_range('20130101', freq='s', periods=100), dtype='float')
s[10:30] = np.nan
expected = Series([34.5, 79.5], index=[Period('2013-01-01 00:00', 'T'), Period('2013-01-01 00:01', 'T')])
result = s.to_period().resample('T', kind='period')
assert_series_equal(result, expected)
result2 = s.resample('T', kind='period')
assert_series_equal(result2, expected)
def test_period_resample_with_local_timezone(self):
# GH5430
_skip_if_no_pytz()
import pytz
local_timezone = pytz.timezone('America/Los_Angeles')
start = datetime(year=2013, month=11, day=1, hour=0, minute=0, tzinfo=pytz.utc)
# 1 day later
end = datetime(year=2013, month=11, day=2, hour=0, minute=0, tzinfo=pytz.utc)
index = pd.date_range(start, end, freq='H')
series = pd.Series(1, index=index)
series = series.tz_convert(local_timezone)
result = series.resample('D', kind='period')
# Create the expected series
expected_index = (pd.period_range(start=start, end=end, freq='D') - 1) # Index is moved back a day with the timezone conversion from UTC to Pacific
expected = pd.Series(1, index=expected_index)
assert_series_equal(result, expected)
def test_pickle(self):
#GH4606
from pandas.compat import cPickle
import pickle
for pick in [pickle, cPickle]:
p = pick.loads(pick.dumps(NaT))
self.assertTrue(p is NaT)
idx = pd.to_datetime(['2013-01-01', NaT, '2014-01-06'])
idx_p = pick.loads(pick.dumps(idx))
self.assertTrue(idx_p[0] == idx[0])
self.assertTrue(idx_p[1] is NaT)
self.assertTrue(idx_p[2] == idx[2])
def _simple_ts(start, end, freq='D'):
rng = date_range(start, end, freq=freq)
return Series(np.random.randn(len(rng)), index=rng)
class TestDatetimeIndex(tm.TestCase):
_multiprocess_can_split_ = True
def test_hash_error(self):
index = date_range('20010101', periods=10)
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(index).__name__):
hash(index)
def test_stringified_slice_with_tz(self):
#GH2658
import datetime
start=datetime.datetime.now()
idx=DatetimeIndex(start=start,freq="1d",periods=10)
df=DataFrame(lrange(10),index=idx)
df["2013-01-14 23:44:34.437768-05:00":] # no exception here
def test_append_join_nondatetimeindex(self):
rng = date_range('1/1/2000', periods=10)
idx = Index(['a', 'b', 'c', 'd'])
result = rng.append(idx)
tm.assert_isinstance(result[0], Timestamp)
# it works
rng.join(idx, how='outer')
def test_astype(self):
rng = date_range('1/1/2000', periods=10)
result = rng.astype('i8')
self.assert_numpy_array_equal(result, rng.asi8)
def test_to_period_nofreq(self):
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-04'])
self.assertRaises(ValueError, idx.to_period)
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03'],
freq='infer')
idx.to_period()
def test_000constructor_resolution(self):
# 2252
t1 = Timestamp((1352934390 * 1000000000) + 1000000 + 1000 + 1)
idx = DatetimeIndex([t1])
self.assertEqual(idx.nanosecond[0], t1.nanosecond)
def test_constructor_coverage(self):
rng = date_range('1/1/2000', periods=10.5)
exp = date_range('1/1/2000', periods=10)
self.assertTrue(rng.equals(exp))
self.assertRaises(ValueError, DatetimeIndex, start='1/1/2000',
periods='foo', freq='D')
self.assertRaises(ValueError, DatetimeIndex, start='1/1/2000',
end='1/10/2000')
self.assertRaises(ValueError, DatetimeIndex, '1/1/2000')
# generator expression
gen = (datetime(2000, 1, 1) + timedelta(i) for i in range(10))
result = DatetimeIndex(gen)
expected = DatetimeIndex([datetime(2000, 1, 1) + timedelta(i)
for i in range(10)])
self.assertTrue(result.equals(expected))
# NumPy string array
strings = np.array(['2000-01-01', '2000-01-02', '2000-01-03'])
result = DatetimeIndex(strings)
expected = DatetimeIndex(strings.astype('O'))
self.assertTrue(result.equals(expected))
from_ints = DatetimeIndex(expected.asi8)
self.assertTrue(from_ints.equals(expected))
# non-conforming
self.assertRaises(ValueError, DatetimeIndex,
['2000-01-01', '2000-01-02', '2000-01-04'],
freq='D')
self.assertRaises(ValueError, DatetimeIndex,
start='2011-01-01', freq='b')
self.assertRaises(ValueError, DatetimeIndex,
end='2011-01-01', freq='B')
self.assertRaises(ValueError, DatetimeIndex, periods=10, freq='D')
def test_constructor_name(self):
idx = DatetimeIndex(start='2000-01-01', periods=1, freq='A',
name='TEST')
self.assertEqual(idx.name, 'TEST')
def test_comparisons_coverage(self):
rng = date_range('1/1/2000', periods=10)
# raise TypeError for now
self.assertRaises(TypeError, rng.__lt__, rng[3].value)
result = rng == list(rng)
exp = rng == rng
self.assert_numpy_array_equal(result, exp)
def test_map(self):
rng = date_range('1/1/2000', periods=10)
f = lambda x: x.strftime('%Y%m%d')
result = rng.map(f)
exp = [f(x) for x in rng]
self.assert_numpy_array_equal(result, exp)
def test_add_union(self):
rng = date_range('1/1/2000', periods=5)
rng2 = date_range('1/6/2000', periods=5)
result = rng + rng2
expected = rng.union(rng2)
self.assertTrue(result.equals(expected))
def test_misc_coverage(self):
rng = date_range('1/1/2000', periods=5)
result = rng.groupby(rng.day)
tm.assert_isinstance(list(result.values())[0][0], Timestamp)
idx = DatetimeIndex(['2000-01-03', '2000-01-01', '2000-01-02'])
self.assertTrue(idx.equals(list(idx)))
non_datetime = Index(list('abc'))
self.assertFalse(idx.equals(list(non_datetime)))
def test_union_coverage(self):
idx = DatetimeIndex(['2000-01-03', '2000-01-01', '2000-01-02'])
ordered = DatetimeIndex(idx.order(), freq='infer')
result = ordered.union(idx)
self.assertTrue(result.equals(ordered))
result = ordered[:0].union(ordered)
self.assertTrue(result.equals(ordered))
self.assertEqual(result.freq, ordered.freq)
def test_union_bug_1730(self):
rng_a = date_range('1/1/2012', periods=4, freq='3H')
rng_b = date_range('1/1/2012', periods=4, freq='4H')
result = rng_a.union(rng_b)
exp = DatetimeIndex(sorted(set(list(rng_a)) | set(list(rng_b))))
self.assertTrue(result.equals(exp))
def test_union_bug_1745(self):
left = DatetimeIndex(['2012-05-11 15:19:49.695000'])
right = DatetimeIndex(['2012-05-29 13:04:21.322000',
'2012-05-11 15:27:24.873000',
'2012-05-11 15:31:05.350000'])
result = left.union(right)
exp = DatetimeIndex(sorted(set(list(left)) | set(list(right))))
self.assertTrue(result.equals(exp))
def test_union_bug_4564(self):
from pandas import DateOffset
left = date_range("2013-01-01", "2013-02-01")
right = left + DateOffset(minutes=15)
result = left.union(right)
exp = DatetimeIndex(sorted(set(list(left)) | set(list(right))))
self.assertTrue(result.equals(exp))
def test_intersection_bug_1708(self):
from pandas import DateOffset
index_1 = date_range('1/1/2012', periods=4, freq='12H')
index_2 = index_1 + DateOffset(hours=1)
result = index_1 & index_2
self.assertEqual(len(result), 0)
# def test_add_timedelta64(self):
# rng = date_range('1/1/2000', periods=5)
# delta = rng.values[3] - rng.values[1]
# result = rng + delta
# expected = rng + timedelta(2)
# self.assertTrue(result.equals(expected))
def test_get_duplicates(self):
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-02',
'2000-01-03', '2000-01-03', '2000-01-04'])
result = idx.get_duplicates()
ex = DatetimeIndex(['2000-01-02', '2000-01-03'])
self.assertTrue(result.equals(ex))
def test_argmin_argmax(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
self.assertEqual(idx.argmin(), 1)
self.assertEqual(idx.argmax(), 0)
def test_order(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
ordered = idx.order()
self.assertTrue(ordered.is_monotonic)
ordered = idx.order(ascending=False)
self.assertTrue(ordered[::-1].is_monotonic)
ordered, dexer = idx.order(return_indexer=True)
self.assertTrue(ordered.is_monotonic)
self.assert_numpy_array_equal(dexer, [1, 2, 0])
ordered, dexer = idx.order(return_indexer=True, ascending=False)
self.assertTrue(ordered[::-1].is_monotonic)
self.assert_numpy_array_equal(dexer, [0, 2, 1])
def test_insert(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
result = idx.insert(2, datetime(2000, 1, 5))
exp = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-05',
'2000-01-02'])
self.assertTrue(result.equals(exp))
# insertion of non-datetime should coerce to object index
result = idx.insert(1, 'inserted')
expected = Index([datetime(2000, 1, 4), 'inserted', datetime(2000, 1, 1),
datetime(2000, 1, 2)])
self.assertNotIsInstance(result, DatetimeIndex)
tm.assert_index_equal(result, expected)
idx = date_range('1/1/2000', periods=3, freq='M')
result = idx.insert(3, datetime(2000, 4, 30))
self.assertEqual(result.freqstr, 'M')
def test_map_bug_1677(self):
index = DatetimeIndex(['2012-04-25 09:30:00.393000'])
f = index.asof
result = index.map(f)
expected = np.array([f(index[0])])
self.assert_numpy_array_equal(result, expected)
def test_groupby_function_tuple_1677(self):
df = DataFrame(np.random.rand(100),
index=date_range("1/1/2000", periods=100))
monthly_group = df.groupby(lambda x: (x.year, x.month))
result = monthly_group.mean()
tm.assert_isinstance(result.index[0], tuple)
def test_append_numpy_bug_1681(self):
# another datetime64 bug
dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI')
a = DataFrame()
c = DataFrame({'A': 'foo', 'B': dr}, index=dr)
result = a.append(c)
self.assertTrue((result['B'] == dr).all())
def test_isin(self):
index = tm.makeDateIndex(4)
result = index.isin(index)
self.assertTrue(result.all())
result = index.isin(list(index))
self.assertTrue(result.all())
assert_almost_equal(index.isin([index[2], 5]),
[False, False, True, False])
def test_union(self):
i1 = Int64Index(np.arange(0, 20, 2))
i2 = Int64Index(np.arange(10, 30, 2))
result = i1.union(i2)
expected = Int64Index(np.arange(0, 30, 2))
self.assert_numpy_array_equal(result, expected)
def test_union_with_DatetimeIndex(self):
i1 = Int64Index(np.arange(0, 20, 2))
i2 = DatetimeIndex(start='2012-01-03 00:00:00', periods=10, freq='D')
i1.union(i2) # Works
i2.union(i1) # Fails with "AttributeError: can't set attribute"
def test_time(self):
rng = pd.date_range('1/1/2000', freq='12min', periods=10)
result = pd.Index(rng).time
expected = [t.time() for t in rng]
self.assertTrue((result == expected).all())
def test_date(self):
rng = pd.date_range('1/1/2000', freq='12H', periods=10)
result = pd.Index(rng).date
expected = [t.date() for t in rng]
self.assertTrue((result == expected).all())
def test_does_not_convert_mixed_integer(self):
df = tm.makeCustomDataframe(10, 10, data_gen_f=lambda *args, **kwargs:
randn(), r_idx_type='i', c_idx_type='dt')
cols = df.columns.join(df.index, how='outer')
joined = cols.join(df.columns)
self.assertEqual(cols.dtype, np.dtype('O'))
self.assertEqual(cols.dtype, joined.dtype)
assert_array_equal(cols.values, joined.values)
def test_slice_keeps_name(self):
# GH4226
st = pd.Timestamp('2013-07-01 00:00:00', tz='America/Los_Angeles')
et = pd.Timestamp('2013-07-02 00:00:00', tz='America/Los_Angeles')
dr = pd.date_range(st, et, freq='H', name='timebucket')
self.assertEqual(dr[1:].name, dr.name)
def test_join_self(self):
index = date_range('1/1/2000', periods=10)
kinds = 'outer', 'inner', 'left', 'right'
for kind in kinds:
joined = index.join(index, how=kind)
self.assertIs(index, joined)
def assert_index_parameters(self, index):
assert index.freq == '40960N'
assert index.inferred_freq == '40960N'
def test_ns_index(self):
if _np_version_under1p7:
raise nose.SkipTest
nsamples = 400
ns = int(1e9 / 24414)
dtstart = np.datetime64('2012-09-20T00:00:00')
dt = dtstart + np.arange(nsamples) * np.timedelta64(ns, 'ns')
freq = ns * pd.datetools.Nano()
index = pd.DatetimeIndex(dt, freq=freq, name='time')
self.assert_index_parameters(index)
new_index = pd.DatetimeIndex(start=index[0], end=index[-1], freq=index.freq)
self.assert_index_parameters(new_index)
def test_join_with_period_index(self):
df = tm.makeCustomDataframe(10, 10, data_gen_f=lambda *args:
np.random.randint(2), c_idx_type='p',
r_idx_type='dt')
s = df.iloc[:5, 0]
joins = 'left', 'right', 'inner', 'outer'
for join in joins:
with tm.assertRaisesRegexp(ValueError, 'can only call with other '
'PeriodIndex-ed objects'):
df.columns.join(s.index, how=join)
def test_factorize(self):
idx1 = DatetimeIndex(['2014-01', '2014-01', '2014-02',
'2014-02', '2014-03', '2014-03'])
exp_arr = np.array([0, 0, 1, 1, 2, 2])
exp_idx = DatetimeIndex(['2014-01', '2014-02', '2014-03'])
arr, idx = idx1.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
arr, idx = idx1.factorize(sort=True)
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
# tz must be preserved
idx1 = idx1.tz_localize('Asia/Tokyo')
exp_idx = exp_idx.tz_localize('Asia/Tokyo')
arr, idx = idx1.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
idx2 = pd.DatetimeIndex(['2014-03', '2014-03', '2014-02', '2014-01',
'2014-03', '2014-01'])
exp_arr = np.array([2, 2, 1, 0, 2, 0])
exp_idx = DatetimeIndex(['2014-01', '2014-02', '2014-03'])
arr, idx = idx2.factorize(sort=True)
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
exp_arr = np.array([0, 0, 1, 2, 0, 2])
exp_idx = DatetimeIndex(['2014-03', '2014-02', '2014-01'])
arr, idx = idx2.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
# freq must be preserved
idx3 = date_range('2000-01', periods=4, freq='M', tz='Asia/Tokyo')
exp_arr = np.array([0, 1, 2, 3])
arr, idx = idx3.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(idx3))
class TestDatetime64(tm.TestCase):
"""
Also test support for datetime64[ns] in Series / DataFrame
"""
def setUp(self):
dti = DatetimeIndex(start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10), freq='Min')
self.series = Series(rand(len(dti)), dti)
def test_datetimeindex_accessors(self):
dti = DatetimeIndex(
freq='D', start=datetime(1998, 1, 1), periods=365)
self.assertEqual(dti.year[0], 1998)
self.assertEqual(dti.month[0], 1)
self.assertEqual(dti.day[0], 1)
self.assertEqual(dti.hour[0], 0)
self.assertEqual(dti.minute[0], 0)
self.assertEqual(dti.second[0], 0)
self.assertEqual(dti.microsecond[0], 0)
self.assertEqual(dti.dayofweek[0], 3)
self.assertEqual(dti.dayofyear[0], 1)
self.assertEqual(dti.dayofyear[120], 121)
self.assertEqual(dti.weekofyear[0], 1)
self.assertEqual(dti.weekofyear[120], 18)
self.assertEqual(dti.quarter[0], 1)
self.assertEqual(dti.quarter[120], 2)
self.assertEqual(dti.is_month_start[0], True)
self.assertEqual(dti.is_month_start[1], False)
self.assertEqual(dti.is_month_start[31], True)
self.assertEqual(dti.is_quarter_start[0], True)
self.assertEqual(dti.is_quarter_start[90], True)
self.assertEqual(dti.is_year_start[0], True)
self.assertEqual(dti.is_year_start[364], False)
self.assertEqual(dti.is_month_end[0], False)
self.assertEqual(dti.is_month_end[30], True)
self.assertEqual(dti.is_month_end[31], False)
self.assertEqual(dti.is_month_end[364], True)
self.assertEqual(dti.is_quarter_end[0], False)
self.assertEqual(dti.is_quarter_end[30], False)
self.assertEqual(dti.is_quarter_end[89], True)
self.assertEqual(dti.is_quarter_end[364], True)
self.assertEqual(dti.is_year_end[0], False)
self.assertEqual(dti.is_year_end[364], True)
self.assertEqual(len(dti.year), 365)
self.assertEqual(len(dti.month), 365)
self.assertEqual(len(dti.day), 365)
self.assertEqual(len(dti.hour), 365)
self.assertEqual(len(dti.minute), 365)
self.assertEqual(len(dti.second), 365)
self.assertEqual(len(dti.microsecond), 365)
self.assertEqual(len(dti.dayofweek), 365)
self.assertEqual(len(dti.dayofyear), 365)
self.assertEqual(len(dti.weekofyear), 365)
self.assertEqual(len(dti.quarter), 365)
self.assertEqual(len(dti.is_month_start), 365)
self.assertEqual(len(dti.is_month_end), 365)
self.assertEqual(len(dti.is_quarter_start), 365)
self.assertEqual(len(dti.is_quarter_end), 365)
self.assertEqual(len(dti.is_year_start), 365)
self.assertEqual(len(dti.is_year_end), 365)
dti = DatetimeIndex(
freq='BQ-FEB', start=datetime(1998, 1, 1), periods=4)
self.assertEqual(sum(dti.is_quarter_start), 0)
self.assertEqual(sum(dti.is_quarter_end), 4)
self.assertEqual(sum(dti.is_year_start), 0)
self.assertEqual(sum(dti.is_year_end), 1)
# Ensure is_start/end accessors throw ValueError for CustomBusinessDay, CBD requires np >= 1.7
if not _np_version_under1p7:
bday_egypt = offsets.CustomBusinessDay(weekmask='Sun Mon Tue Wed Thu')
dti = date_range(datetime(2013, 4, 30), periods=5, freq=bday_egypt)
self.assertRaises(ValueError, lambda: dti.is_month_start)
dti = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03'])
self.assertEqual(dti.is_month_start[0], 1)
tests = [
(Timestamp('2013-06-01', offset='M').is_month_start, 1),
(Timestamp('2013-06-01', offset='BM').is_month_start, 0),
(Timestamp('2013-06-03', offset='M').is_month_start, 0),
(Timestamp('2013-06-03', offset='BM').is_month_start, 1),
(Timestamp('2013-02-28', offset='Q-FEB').is_month_end, 1),
(Timestamp('2013-02-28', offset='Q-FEB').is_quarter_end, 1),
(Timestamp('2013-02-28', offset='Q-FEB').is_year_end, 1),
(Timestamp('2013-03-01', offset='Q-FEB').is_month_start, 1),
(Timestamp('2013-03-01', offset='Q-FEB').is_quarter_start, 1),
(Timestamp('2013-03-01', offset='Q-FEB').is_year_start, 1),
(Timestamp('2013-03-31', offset='QS-FEB').is_month_end, 1),
(Timestamp('2013-03-31', offset='QS-FEB').is_quarter_end, 0),
(Timestamp('2013-03-31', offset='QS-FEB').is_year_end, 0),
(Timestamp('2013-02-01', offset='QS-FEB').is_month_start, 1),
(Timestamp('2013-02-01', offset='QS-FEB').is_quarter_start, 1),
(Timestamp('2013-02-01', offset='QS-FEB').is_year_start, 1),
(Timestamp('2013-06-30', offset='BQ').is_month_end, 0),
(Timestamp('2013-06-30', offset='BQ').is_quarter_end, 0),
(Timestamp('2013-06-30', offset='BQ').is_year_end, 0),
(Timestamp('2013-06-28', offset='BQ').is_month_end, 1),
(Timestamp('2013-06-28', offset='BQ').is_quarter_end, 1),
(Timestamp('2013-06-28', offset='BQ').is_year_end, 0),
(Timestamp('2013-06-30', offset='BQS-APR').is_month_end, 0),
(Timestamp('2013-06-30', offset='BQS-APR').is_quarter_end, 0),
(Timestamp('2013-06-30', offset='BQS-APR').is_year_end, 0),
(Timestamp('2013-06-28', offset='BQS-APR').is_month_end, 1),
(Timestamp('2013-06-28', offset='BQS-APR').is_quarter_end, 1),
(Timestamp('2013-03-29', offset='BQS-APR').is_year_end, 1),
(Timestamp('2013-11-01', offset='AS-NOV').is_year_start, 1),
(Timestamp('2013-10-31', offset='AS-NOV').is_year_end, 1)]
for ts, value in tests:
self.assertEqual(ts, value)
def test_nanosecond_field(self):
dti = DatetimeIndex(np.arange(10))
self.assert_numpy_array_equal(dti.nanosecond, np.arange(10))
def test_datetimeindex_diff(self):
dti1 = DatetimeIndex(freq='Q-JAN', start=datetime(1997, 12, 31),
periods=100)
dti2 = DatetimeIndex(freq='Q-JAN', start=datetime(1997, 12, 31),
periods=98)
self.assertEqual(len(dti1.diff(dti2)), 2)
def test_fancy_getitem(self):
dti = DatetimeIndex(freq='WOM-1FRI', start=datetime(2005, 1, 1),
end=datetime(2010, 1, 1))
s = Series(np.arange(len(dti)), index=dti)
self.assertEqual(s[48], 48)
self.assertEqual(s['1/2/2009'], 48)
self.assertEqual(s['2009-1-2'], 48)
self.assertEqual(s[datetime(2009, 1, 2)], 48)
self.assertEqual(s[lib.Timestamp(datetime(2009, 1, 2))], 48)
self.assertRaises(KeyError, s.__getitem__, '2009-1-3')
assert_series_equal(s['3/6/2009':'2009-06-05'],
s[datetime(2009, 3, 6):datetime(2009, 6, 5)])
def test_fancy_setitem(self):
dti = DatetimeIndex(freq='WOM-1FRI', start=datetime(2005, 1, 1),
end=datetime(2010, 1, 1))
s = Series(np.arange(len(dti)), index=dti)
s[48] = -1
self.assertEqual(s[48], -1)
s['1/2/2009'] = -2
self.assertEqual(s[48], -2)
s['1/2/2009':'2009-06-05'] = -3
self.assertTrue((s[48:54] == -3).all())
def test_datetimeindex_constructor(self):
arr = ['1/1/2005', '1/2/2005', 'Jn 3, 2005', '2005-01-04']
self.assertRaises(Exception, DatetimeIndex, arr)
arr = ['1/1/2005', '1/2/2005', '1/3/2005', '2005-01-04']
idx1 = DatetimeIndex(arr)
arr = [datetime(2005, 1, 1), '1/2/2005', '1/3/2005', '2005-01-04']
idx2 = DatetimeIndex(arr)
arr = [lib.Timestamp(datetime(2005, 1, 1)), '1/2/2005', '1/3/2005',
'2005-01-04']
idx3 = DatetimeIndex(arr)
arr = np.array(['1/1/2005', '1/2/2005', '1/3/2005',
'2005-01-04'], dtype='O')
idx4 = DatetimeIndex(arr)
arr = to_datetime(['1/1/2005', '1/2/2005', '1/3/2005', '2005-01-04'])
idx5 = DatetimeIndex(arr)
arr = to_datetime(
['1/1/2005', '1/2/2005', 'Jan 3, 2005', '2005-01-04'])
idx6 = DatetimeIndex(arr)
idx7 = DatetimeIndex(['12/05/2007', '25/01/2008'], dayfirst=True)
idx8 = DatetimeIndex(['2007/05/12', '2008/01/25'], dayfirst=False,
yearfirst=True)
self.assertTrue(idx7.equals(idx8))
for other in [idx2, idx3, idx4, idx5, idx6]:
self.assertTrue((idx1.values == other.values).all())
sdate = datetime(1999, 12, 25)
edate = datetime(2000, 1, 1)
idx = DatetimeIndex(start=sdate, freq='1B', periods=20)
self.assertEqual(len(idx), 20)
self.assertEqual(idx[0], sdate + 0 * dt.bday)
self.assertEqual(idx.freq, 'B')
idx = DatetimeIndex(end=edate, freq=('D', 5), periods=20)
self.assertEqual(len(idx), 20)
self.assertEqual(idx[-1], edate)
self.assertEqual(idx.freq, '5D')
idx1 = DatetimeIndex(start=sdate, end=edate, freq='W-SUN')
idx2 = DatetimeIndex(start=sdate, end=edate,
freq=dt.Week(weekday=6))
self.assertEqual(len(idx1), len(idx2))
self.assertEqual(idx1.offset, idx2.offset)
idx1 = DatetimeIndex(start=sdate, end=edate, freq='QS')
idx2 = DatetimeIndex(start=sdate, end=edate,
freq=dt.QuarterBegin(startingMonth=1))
self.assertEqual(len(idx1), len(idx2))
self.assertEqual(idx1.offset, idx2.offset)
idx1 = DatetimeIndex(start=sdate, end=edate, freq='BQ')
idx2 = DatetimeIndex(start=sdate, end=edate,
freq=dt.BQuarterEnd(startingMonth=12))
self.assertEqual(len(idx1), len(idx2))
self.assertEqual(idx1.offset, idx2.offset)
def test_dayfirst(self):
# GH 5917
arr = ['10/02/2014', '11/02/2014', '12/02/2014']
expected = DatetimeIndex([datetime(2014, 2, 10),
datetime(2014, 2, 11),
datetime(2014, 2, 12)])
idx1 = DatetimeIndex(arr, dayfirst=True)
idx2 = DatetimeIndex(np.array(arr), dayfirst=True)
idx3 = to_datetime(arr, dayfirst=True)
idx4 = to_datetime(np.array(arr), dayfirst=True)
idx5 = DatetimeIndex(Index(arr), dayfirst=True)
idx6 = DatetimeIndex(Series(arr), dayfirst=True)
self.assertTrue(expected.equals(idx1))
self.assertTrue(expected.equals(idx2))
self.assertTrue(expected.equals(idx3))
self.assertTrue(expected.equals(idx4))
self.assertTrue(expected.equals(idx5))
self.assertTrue(expected.equals(idx6))
def test_dti_snap(self):
dti = DatetimeIndex(['1/1/2002', '1/2/2002', '1/3/2002', '1/4/2002',
'1/5/2002', '1/6/2002', '1/7/2002'], freq='D')
res = dti.snap(freq='W-MON')
exp = date_range('12/31/2001', '1/7/2002', freq='w-mon')
exp = exp.repeat([3, 4])
self.assertTrue((res == exp).all())
res = dti.snap(freq='B')
exp = date_range('1/1/2002', '1/7/2002', freq='b')
exp = exp.repeat([1, 1, 1, 2, 2])
self.assertTrue((res == exp).all())
def test_dti_reset_index_round_trip(self):
dti = DatetimeIndex(start='1/1/2001', end='6/1/2001', freq='D')
d1 = DataFrame({'v': np.random.rand(len(dti))}, index=dti)
d2 = d1.reset_index()
self.assertEqual(d2.dtypes[0], np.dtype('M8[ns]'))
d3 = d2.set_index('index')
assert_frame_equal(d1, d3, check_names=False)
# #2329
stamp = datetime(2012, 11, 22)
df = DataFrame([[stamp, 12.1]], columns=['Date', 'Value'])
df = df.set_index('Date')
self.assertEqual(df.index[0], stamp)
self.assertEqual(df.reset_index()['Date'][0], stamp)
def test_dti_set_index_reindex(self):
# GH 6631
df = DataFrame(np.random.random(6))
idx1 = date_range('2011/01/01', periods=6, freq='M', tz='US/Eastern')
idx2 = date_range('2013', periods=6, freq='A', tz='Asia/Tokyo')
df = df.set_index(idx1)
self.assertTrue(df.index.equals(idx1))
df = df.reindex(idx2)
self.assertTrue(df.index.equals(idx2))
def test_datetimeindex_union_join_empty(self):
dti = DatetimeIndex(start='1/1/2001', end='2/1/2001', freq='D')
empty = Index([])
result = dti.union(empty)
tm.assert_isinstance(result, DatetimeIndex)
self.assertIs(result, result)
result = dti.join(empty)
tm.assert_isinstance(result, DatetimeIndex)
def test_series_set_value(self):
# #1561
dates = [datetime(2001, 1, 1), datetime(2001, 1, 2)]
index = DatetimeIndex(dates)
s = Series().set_value(dates[0], 1.)
s2 = s.set_value(dates[1], np.nan)
exp = Series([1., np.nan], index=index)
assert_series_equal(s2, exp)
# s = Series(index[:1], index[:1])
# s2 = s.set_value(dates[1], index[1])
# self.assertEqual(s2.values.dtype, 'M8[ns]')
@slow
def test_slice_locs_indexerror(self):
times = [datetime(2000, 1, 1) + timedelta(minutes=i * 10)
for i in range(100000)]
s = Series(lrange(100000), times)
s.ix[datetime(1900, 1, 1):datetime(2100, 1, 1)]
class TestSeriesDatetime64(tm.TestCase):
def setUp(self):
self.series = Series(date_range('1/1/2000', periods=10))
def test_auto_conversion(self):
series = Series(list(date_range('1/1/2000', periods=10)))
self.assertEqual(series.dtype, 'M8[ns]')
def test_constructor_cant_cast_datetime64(self):
self.assertRaises(TypeError, Series,
date_range('1/1/2000', periods=10), dtype=float)
def test_series_comparison_scalars(self):
val = datetime(2000, 1, 4)
result = self.series > val
expected = np.array([x > val for x in self.series])
self.assert_numpy_array_equal(result, expected)
val = self.series[5]
result = self.series > val
expected = np.array([x > val for x in self.series])
self.assert_numpy_array_equal(result, expected)
def test_between(self):
left, right = self.series[[2, 7]]
result = self.series.between(left, right)
expected = (self.series >= left) & (self.series <= right)
assert_series_equal(result, expected)
#----------------------------------------------------------------------
# NaT support
def test_NaT_scalar(self):
series = Series([0, 1000, 2000, iNaT], dtype='M8[ns]')
val = series[3]
self.assertTrue(com.isnull(val))
series[2] = val
self.assertTrue(com.isnull(series[2]))
def test_set_none_nan(self):
self.series[3] = None
self.assertIs(self.series[3], NaT)
self.series[3:5] = None
self.assertIs(self.series[4], NaT)
self.series[5] = np.nan
self.assertIs(self.series[5], NaT)
self.series[5:7] = np.nan
self.assertIs(self.series[6], NaT)
def test_intercept_astype_object(self):
# this test no longer makes sense as series is by default already M8[ns]
expected = self.series.astype('object')
df = DataFrame({'a': self.series,
'b': np.random.randn(len(self.series))})
result = df.values.squeeze()
self.assertTrue((result[:, 0] == expected.values).all())
df = DataFrame({'a': self.series,
'b': ['foo'] * len(self.series)})
result = df.values.squeeze()
self.assertTrue((result[:, 0] == expected.values).all())
def test_union(self):
rng1 = date_range('1/1/1999', '1/1/2012', freq='MS')
s1 = Series(np.random.randn(len(rng1)), rng1)
rng2 = date_range('1/1/1980', '12/1/2001', freq='MS')
s2 = Series(np.random.randn(len(rng2)), rng2)
df = DataFrame({'s1': s1, 's2': s2})
self.assertEqual(df.index.values.dtype, np.dtype('M8[ns]'))
def test_intersection(self):
rng = date_range('6/1/2000', '6/15/2000', freq='D')
rng = rng.delete(5)
rng2 = date_range('5/15/2000', '6/20/2000', freq='D')
rng2 = DatetimeIndex(rng2.values)
result = rng.intersection(rng2)
self.assertTrue(result.equals(rng))
# empty same freq GH2129
rng = date_range('6/1/2000', '6/15/2000', freq='T')
result = rng[0:0].intersection(rng)
self.assertEqual(len(result), 0)
result = rng.intersection(rng[0:0])
self.assertEqual(len(result), 0)
def test_date_range_bms_bug(self):
# #1645
rng = date_range('1/1/2000', periods=10, freq='BMS')
ex_first = Timestamp('2000-01-03')
self.assertEqual(rng[0], ex_first)
def test_string_index_series_name_converted(self):
# #1644
df = DataFrame(np.random.randn(10, 4),
index=date_range('1/1/2000', periods=10))
result = df.ix['1/3/2000']
self.assertEqual(result.name, df.index[2])
result = df.T['1/3/2000']
self.assertEqual(result.name, df.index[2])
class TestTimestamp(tm.TestCase):
def test_class_ops(self):
_skip_if_no_pytz()
import pytz
def compare(x,y):
self.assertEqual(int(Timestamp(x).value/1e9), int(Timestamp(y).value/1e9))
compare(Timestamp.now(),datetime.now())
compare(Timestamp.now('UTC'),datetime.now(pytz.timezone('UTC')))
compare(Timestamp.utcnow(),datetime.utcnow())
compare(Timestamp.today(),datetime.today())
def test_basics_nanos(self):
val = np.int64(946684800000000000).view('M8[ns]')
stamp = Timestamp(val.view('i8') + 500)
self.assertEqual(stamp.year, 2000)
self.assertEqual(stamp.month, 1)
self.assertEqual(stamp.microsecond, 0)
self.assertEqual(stamp.nanosecond, 500)
def test_unit(self):
def check(val,unit=None,h=1,s=1,us=0):
stamp = Timestamp(val, unit=unit)
self.assertEqual(stamp.year, 2000)
self.assertEqual(stamp.month, 1)
self.assertEqual(stamp.day, 1)
self.assertEqual(stamp.hour, h)
if unit != 'D':
self.assertEqual(stamp.minute, 1)
self.assertEqual(stamp.second, s)
self.assertEqual(stamp.microsecond, us)
else:
self.assertEqual(stamp.minute, 0)
self.assertEqual(stamp.second, 0)
self.assertEqual(stamp.microsecond, 0)
self.assertEqual(stamp.nanosecond, 0)
ts = Timestamp('20000101 01:01:01')
val = ts.value
days = (ts - Timestamp('1970-01-01')).days
check(val)
check(val/long(1000),unit='us')
check(val/long(1000000),unit='ms')
check(val/long(1000000000),unit='s')
check(days,unit='D',h=0)
# using truediv, so these are like floats
if compat.PY3:
check((val+500000)/long(1000000000),unit='s',us=500)
check((val+500000000)/long(1000000000),unit='s',us=500000)
check((val+500000)/long(1000000),unit='ms',us=500)
# get chopped in py2
else:
check((val+500000)/long(1000000000),unit='s')
check((val+500000000)/long(1000000000),unit='s')
check((val+500000)/long(1000000),unit='ms')
# ok
check((val+500000)/long(1000),unit='us',us=500)
check((val+500000000)/long(1000000),unit='ms',us=500000)
# floats
check(val/1000.0 + 5,unit='us',us=5)
check(val/1000.0 + 5000,unit='us',us=5000)
check(val/1000000.0 + 0.5,unit='ms',us=500)
check(val/1000000.0 + 0.005,unit='ms',us=5)
check(val/1000000000.0 + 0.5,unit='s',us=500000)
check(days + 0.5,unit='D',h=12)
# nan
result = Timestamp(np.nan)
self.assertIs(result, NaT)
result = Timestamp(None)
self.assertIs(result, NaT)
result = Timestamp(iNaT)
self.assertIs(result, NaT)
result = Timestamp(NaT)
self.assertIs(result, NaT)
def test_comparison(self):
# 5-18-2012 00:00:00.000
stamp = long(1337299200000000000)
val = Timestamp(stamp)
self.assertEqual(val, val)
self.assertFalse(val != val)
self.assertFalse(val < val)
self.assertTrue(val <= val)
self.assertFalse(val > val)
self.assertTrue(val >= val)
other = datetime(2012, 5, 18)
self.assertEqual(val, other)
self.assertFalse(val != other)
self.assertFalse(val < other)
self.assertTrue(val <= other)
self.assertFalse(val > other)
self.assertTrue(val >= other)
other = Timestamp(stamp + 100)
self.assertNotEqual(val, other)
self.assertNotEqual(val, other)
self.assertTrue(val < other)
self.assertTrue(val <= other)
self.assertTrue(other > val)
self.assertTrue(other >= val)
def test_cant_compare_tz_naive_w_aware(self):
_skip_if_no_pytz()
# #1404
a = Timestamp('3/12/2012')
b = Timestamp('3/12/2012', tz='utc')
self.assertRaises(Exception, a.__eq__, b)
self.assertRaises(Exception, a.__ne__, b)
self.assertRaises(Exception, a.__lt__, b)
self.assertRaises(Exception, a.__gt__, b)
self.assertRaises(Exception, b.__eq__, a)
self.assertRaises(Exception, b.__ne__, a)
self.assertRaises(Exception, b.__lt__, a)
self.assertRaises(Exception, b.__gt__, a)
if sys.version_info < (3, 3):
self.assertRaises(Exception, a.__eq__, b.to_pydatetime())
self.assertRaises(Exception, a.to_pydatetime().__eq__, b)
else:
self.assertFalse(a == b.to_pydatetime())
self.assertFalse(a.to_pydatetime() == b)
def test_delta_preserve_nanos(self):
val = Timestamp(long(1337299200000000123))
result = val + timedelta(1)
self.assertEqual(result.nanosecond, val.nanosecond)
def test_frequency_misc(self):
self.assertEqual(fmod.get_freq_group('T'),
fmod.FreqGroup.FR_MIN)
code, stride = fmod.get_freq_code(offsets.Hour())
self.assertEqual(code, fmod.FreqGroup.FR_HR)
code, stride = fmod.get_freq_code((5, 'T'))
self.assertEqual(code, fmod.FreqGroup.FR_MIN)
self.assertEqual(stride, 5)
offset = offsets.Hour()
result = fmod.to_offset(offset)
self.assertEqual(result, offset)
result = fmod.to_offset((5, 'T'))
expected = offsets.Minute(5)
self.assertEqual(result, expected)
self.assertRaises(ValueError, fmod.get_freq_code, (5, 'baz'))
self.assertRaises(ValueError, fmod.to_offset, '100foo')
self.assertRaises(ValueError, fmod.to_offset, ('', ''))
result = fmod.get_standard_freq(offsets.Hour())
self.assertEqual(result, 'H')
def test_hash_equivalent(self):
d = {datetime(2011, 1, 1): 5}
stamp = Timestamp(datetime(2011, 1, 1))
self.assertEqual(d[stamp], 5)
def test_timestamp_compare_scalars(self):
# case where ndim == 0
lhs = np.datetime64(datetime(2013, 12, 6))
rhs = Timestamp('now')
nat = Timestamp('nat')
ops = {'gt': 'lt', 'lt': 'gt', 'ge': 'le', 'le': 'ge', 'eq': 'eq',
'ne': 'ne'}
for left, right in ops.items():
left_f = getattr(operator, left)
right_f = getattr(operator, right)
if pd._np_version_under1p7:
# you have to convert to timestamp for this to work with numpy
# scalars
expected = left_f(Timestamp(lhs), rhs)
# otherwise a TypeError is thrown
if left not in ('eq', 'ne'):
with tm.assertRaises(TypeError):
left_f(lhs, rhs)
else:
expected = left_f(lhs, rhs)
result = right_f(rhs, lhs)
self.assertEqual(result, expected)
expected = left_f(rhs, nat)
result = right_f(nat, rhs)
self.assertEqual(result, expected)
def test_timestamp_compare_series(self):
# make sure we can compare Timestamps on the right AND left hand side
# GH4982
s = Series(date_range('20010101', periods=10), name='dates')
s_nat = s.copy(deep=True)
s[0] = pd.Timestamp('nat')
s[3] = pd.Timestamp('nat')
ops = {'lt': 'gt', 'le': 'ge', 'eq': 'eq', 'ne': 'ne'}
for left, right in ops.items():
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# no nats
expected = left_f(s, Timestamp('20010109'))
result = right_f(Timestamp('20010109'), s)
tm.assert_series_equal(result, expected)
# nats
expected = left_f(s, Timestamp('nat'))
result = right_f(Timestamp('nat'), s)
tm.assert_series_equal(result, expected)
# compare to timestamp with series containing nats
expected = left_f(s_nat, Timestamp('20010109'))
result = right_f(Timestamp('20010109'), s_nat)
tm.assert_series_equal(result, expected)
# compare to nat with series containing nats
expected = left_f(s_nat, Timestamp('nat'))
result = right_f(Timestamp('nat'), s_nat)
tm.assert_series_equal(result, expected)
class TestSlicing(tm.TestCase):
def test_slice_year(self):
dti = DatetimeIndex(freq='B', start=datetime(2005, 1, 1), periods=500)
s = Series(np.arange(len(dti)), index=dti)
result = s['2005']
expected = s[s.index.year == 2005]
assert_series_equal(result, expected)
df = DataFrame(np.random.rand(len(dti), 5), index=dti)
result = df.ix['2005']
expected = df[df.index.year == 2005]
assert_frame_equal(result, expected)
rng = date_range('1/1/2000', '1/1/2010')
result = rng.get_loc('2009')
expected = slice(3288, 3653)
self.assertEqual(result, expected)
def test_slice_quarter(self):
dti = DatetimeIndex(freq='D', start=datetime(2000, 6, 1), periods=500)
s = Series(np.arange(len(dti)), index=dti)
self.assertEqual(len(s['2001Q1']), 90)
df = DataFrame(np.random.rand(len(dti), 5), index=dti)
self.assertEqual(len(df.ix['1Q01']), 90)
def test_slice_month(self):
dti = DatetimeIndex(freq='D', start=datetime(2005, 1, 1), periods=500)
s = Series(np.arange(len(dti)), index=dti)
self.assertEqual(len(s['2005-11']), 30)
df = DataFrame(np.random.rand(len(dti), 5), index=dti)
self.assertEqual(len(df.ix['2005-11']), 30)
assert_series_equal(s['2005-11'], s['11-2005'])
def test_partial_slice(self):
rng = DatetimeIndex(freq='D', start=datetime(2005, 1, 1), periods=500)
s = Series(np.arange(len(rng)), index=rng)
result = s['2005-05':'2006-02']
expected = s['20050501':'20060228']
assert_series_equal(result, expected)
result = s['2005-05':]
expected = s['20050501':]
assert_series_equal(result, expected)
result = s[:'2006-02']
expected = s[:'20060228']
assert_series_equal(result, expected)
result = s['2005-1-1']
self.assertEqual(result, s.irow(0))
self.assertRaises(Exception, s.__getitem__, '2004-12-31')
def test_partial_slice_daily(self):
rng = DatetimeIndex(freq='H', start=datetime(2005, 1, 31), periods=500)
s = Series(np.arange(len(rng)), index=rng)
result = s['2005-1-31']
assert_series_equal(result, s.ix[:24])
self.assertRaises(Exception, s.__getitem__, '2004-12-31 00')
def test_partial_slice_hourly(self):
rng = DatetimeIndex(freq='T', start=datetime(2005, 1, 1, 20, 0, 0),
periods=500)
s = Series(np.arange(len(rng)), index=rng)
result = s['2005-1-1']
assert_series_equal(result, s.ix[:60 * 4])
result = s['2005-1-1 20']
assert_series_equal(result, s.ix[:60])
self.assertEqual(s['2005-1-1 20:00'], s.ix[0])
self.assertRaises(Exception, s.__getitem__, '2004-12-31 00:15')
def test_partial_slice_minutely(self):
rng = DatetimeIndex(freq='S', start=datetime(2005, 1, 1, 23, 59, 0),
periods=500)
s = Series(np.arange(len(rng)), index=rng)
result = s['2005-1-1 23:59']
assert_series_equal(result, s.ix[:60])
result = s['2005-1-1']
| assert_series_equal(result, s.ix[:60]) | pandas.util.testing.assert_series_equal |
import numpy as np
import pandas as pd
from sklearn import preprocessing
from keras.layers.core import Dense, Dropout, Activation
from keras.activations import linear
from keras.layers.recurrent import LSTM
from keras.models import Sequential
from matplotlib import pyplot
#read and prepare data from datafile
data_file_name = "DailyDemandDataFactors.csv"
data_csv = pd.read_csv(data_file_name, delimiter = ';',header=None, usecols=[3,4,5,6,7,8,9,10,11,12,13,14])
yt = data_csv[1:]
data = yt
data.columns = ['SumRetrait','CountTransaction','ConsommationHier','MSemaineDernier','MSemaine7','ConsoMmJrAnP','ConsoMmJrMP','ConsoMMJrSmDer','MoyenneMoisPrec','MoyenneMMSAnPrec','MoyenneMMmAnPrec','ConsommationMaxMDer']
# print (data.head(10))
pd.options.display.float_format = '{:,.0f}'.format
data = data.dropna ()
y=data['SumRetrait'].astype(int)
cols=['CountTransaction','ConsommationHier','MSemaineDernier','MSemaine7','ConsoMmJrAnP','ConsoMmJrMP','ConsoMMJrSmDer','MoyenneMoisPrec','MoyenneMMSAnPrec','MoyenneMMmAnPrec','ConsommationMaxMDer']
x=data[cols].astype(float)
print("longeur de y",len(y))
print(x.head())
print(y.head())
#scaling data
scaler_x = preprocessing.MinMaxScaler(feature_range =(-1, 1))
x = np.array(x).reshape ((len(x),11 ))
x = scaler_x.fit_transform(x)
scaler_y = preprocessing.MinMaxScaler(feature_range =(-1, 1))
y = np.array(y).reshape ((len(y), 1))
y = scaler_y.fit_transform(y)
# Split train and test data
train_end = 80
x_train=x[0: train_end ,]
x_test=x[train_end +1: ,]
y_train=y[0: train_end]
y_test=y[train_end +1:]
x_train=x_train.reshape(x_train.shape +(1,))
x_test=x_test.reshape(x_test.shape + (1,))
print("Data well prepared")
print ('x_train shape ', x_train.shape)
print ('y_train', y_train.shape)
#Design the model - LSTM Network
seed = 2016
np.random.seed(seed)
fit1 = Sequential ()
fit1.add(LSTM(
output_dim = 6,
activation='tanh',
input_shape =(11, 1)))
fit1.add(Dropout(0.01))
fit1.add(Dense(output_dim =1))
fit1.add(Activation(linear))
#rmsprop or sgd
batchsize = 7
fit1.compile(loss="mean_squared_error",optimizer="rmsprop")
#train the model
fit1.fit(x_train , y_train , batch_size = batchsize, nb_epoch =200, shuffle=False)
print(fit1.summary ())
#Model error
score_train = fit1.evaluate(x_train ,y_train ,batch_size =batchsize)
score_test = fit1.evaluate(x_test , y_test ,batch_size =batchsize)
print("in train MSE = ",round(score_train,4))
print("in test MSE = ",round(score_test ,4))
#Make prediction
pred1=fit1.predict(x_test)
pred1 = scaler_y.inverse_transform(np.array(pred1).reshape ((len(pred1), 1)))
real_test = scaler_y.inverse_transform(np.array(y_test).reshape ((len(y_test), 1))).astype(int)
# rmse = np.square(np.subtract(real_test, pred1)).mean()
# print('Test RMSE: %.3f' % rmse)
# serialize model to JSON
model_json = fit1.to_json()
with open("model.json", "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
fit1.save_weights("model.h5")
print("Saved model to disk")
####
# FeaturesTest = [36,95900,695,676,126700,88100,122800,768,659,741,419300]
# xaa = np.array(FeaturesTest).reshape ((1,11 ))
# xaa = scaler_x.fit_transform(xaa)
# xaa=xaa.reshape(xaa.shape +(1,))
# tomorrowDemand = fit1.predict(xaa)
# prediction = scaler_y.inverse_transform(np.array(tomorrowDemand).reshape ((len(tomorrowDemand), 1))).astype(int)
# print ("la demande est: ",prediction)
####
#save prediction
testData = pd.DataFrame(real_test)
preddData = | pd.DataFrame(pred1) | pandas.DataFrame |
#!/usr/bin/env python
# PROGRAM: plot_sst.py
# ----------------------------------------------------------------------------------
# Version 0.18
# 19 August, 2019
# michael.taylor AT reading DOT ac DOT uk
# PYTHON DEBUGGER CONTROL:
#------------------------
# import os; os._exit(0)
# import ipdb
# ipdb.set_trace()
import os.path
import optparse
from optparse import OptionParser
import sys
import numpy as np
import xarray
import pandas as pd
from pandas import Series, DataFrame, Panel
import seaborn as sns; sns.set(style="darkgrid")
import datetime
import matplotlib
import matplotlib.pyplot as plt; plt.close("all")
#import typhon
#from typhon.plots import plot_bitfield
#cmap = 'tab20c' # https://matplotlib.org/users/colormaps
def calc_median(counts,bins):
"""
# -------------------------------
# CALCULATE MEDIUM FROM HISTOGRAM
# -------------------------------
# M_estimated ~ L_m + [ ( N/2 - F_{m-1} ) / f_m] * c
#
# where,
#
# L_m =lower limit of the median bar
# N = is the total number of observations
# F_{m-1} = cumulative frequency (total number of observations) in all bars below the median bar
# f_m = frequency of the median bar
# c = median bar width
"""
M = 0
counts_cumsum = counts.cumsum()
counts_half = counts_cumsum[-1]/2.0
for i in np.arange(0,bins.shape[0]-1):
counts_l = counts_cumsum[i]
counts_r = counts_cumsum[i+1]
if (counts_half >= counts_l) & (counts_half < counts_r):
c = bins[1]-bins[0]
L_m = bins[i+1]
F_m_minus_1 = counts_cumsum[i]
f_m = counts[i+1]
M = L_m + ( (counts_half - F_m_minus_1) / f_m ) * c
return M
def plot_n_sst(times,n_sst_q3,n_sst_q4,n_sst_q5):
"""
# ---------------------------------------
# PLOT CUMULATIVE SST OBSERVATION DENSITY
# ---------------------------------------
"""
ocean_area = 361900000.0
t = np.array(times, dtype=np.datetime64)
years = (t[-1] - t[0]).astype('timedelta64[D]') / np.timedelta64(1, 'D') / 365.0
Q3 = pd.Series(n_sst_q3, index=times).fillna(0) / ocean_area / years
Q4 = pd.Series(n_sst_q4, index=times).fillna(0) / ocean_area / years
Q5 = pd.Series(n_sst_q5, index=times).fillna(0) / ocean_area / years
df = pd.DataFrame({'QL=3':Q3, 'QL=4':Q4, 'QL=5':Q5})
df['QL=4 & 5'] = df['QL=4'] + df['QL=5']
df = df.mask(np.isinf(df))
fig = plt.figure()
plt.plot(times,df['QL=4 & 5'].cumsum(), drawstyle='steps')
plt.plot(times,df['QL=3'].cumsum(), drawstyle='steps')
plt.tick_params(labelsize=12)
plt.ylabel("Observation density / $\mathrm{km^{-2} \ yr^{-1}}$", fontsize=12)
title_str = ' ' + 'QL=3:max=' + "{0:.5f}".format(df['QL=3'].cumsum().max()) + ' ' + 'QL=4 & 5:max=' + "{0:.5f}".format(df['QL=4 & 5'].cumsum().max())
print(title_str)
plt.legend(loc='best')
plt.savefig('n_sst.pdf')
# plt.savefig('n_sst.png', dpi=600)
# plt.savefig('n_sst.eps', format='eps', rasterized=True, dpi=1200)
plt.close('all')
def plot_n_sst_lat(lat_vec,n_sst_q3_lat,n_sst_q4_lat,n_sst_q5_lat):
"""
# ------------------------------------------
# PLOT SST OBSERVATION DENSITY WITH LATITUDE
# ------------------------------------------
"""
interpolation = np.arange(-90,90,1)
multiplier = 1.0
Q3 = multiplier * pd.Series(np.interp(interpolation,lat_vec,n_sst_q3_lat), index=interpolation)
Q4 = multiplier * pd.Series(np.interp(interpolation,lat_vec,n_sst_q4_lat), index=interpolation)
Q5 = multiplier * pd.Series(np.interp(interpolation,lat_vec,n_sst_q5_lat), index=interpolation)
df = pd.DataFrame({'QL=3':Q3, 'QL=4':Q4, 'QL=5':Q5})
df['QL=4 & 5'] = df['QL=4'] + df['QL=5']
df['QL=3 & 4 & 5'] = df['QL=3'] + df['QL=4'] + df['QL=5']
df = df.mask(np.isinf(df))
fig = plt.figure()
plt.fill_between(interpolation, df['QL=4 & 5'], step="post", alpha=0.4)
plt.fill_between(interpolation, df['QL=3'], step="post", alpha=0.4)
plt.plot(interpolation, df['QL=4 & 5'], drawstyle='steps-post', label='QL=4 & 5')
plt.plot(interpolation, df['QL=3'], drawstyle='steps-post', label='QL=3')
ax = plt.gca()
ax.set_xlim([-90,90])
ticks = ax.get_xticks()
ax.set_xticks(np.linspace(-90, 90, 7))
plt.tick_params(labelsize=12)
plt.xlabel("Latitude / $\mathrm{\degree N}$", fontsize=12)
plt.ylabel("Observation density / $\mathrm{km^{-2} \ yr^{-1}}$", fontsize=12)
plt.legend(loc='best')
plt.savefig('n_sst_lat.pdf')
# plt.savefig('n_sst_lat.png', dpi=600)
# plt.savefig('n_sst_lat.eps', format='eps', rasterized=True, dpi=1200)
plt.close('all')
def plot_histogram_sst(sst_midpoints,sst_q3_hist,sst_q4_hist,sst_q5_hist):
"""
# ------------------------------
# PLOT HISTOGRAM OF SST + MEDIAN
# ------------------------------
"""
# interpolation = np.arange(260.05,319.95,0.1) # original bin midpoints
i = np.arange(260,320,0.1) # bin edges
n = len(i)
m = 1.0
q3 = m * pd.Series(np.interp(i,sst_midpoints,sst_q3_hist), index=i)
q4 = m * pd.Series(np.interp(i,sst_midpoints,sst_q4_hist), index=i)
q5 = m * pd.Series(np.interp(i,sst_midpoints,sst_q5_hist), index=i)
dq = pd.DataFrame({'QL=3':q3, 'QL=4':q4, 'QL=5':q5})
dq['QL=4 & 5'] = 0.5 * (dq['QL=4'] + dq['QL=5'])
# dq = dq.mask(np.isinf(df))
M3 = calc_median(dq['QL=3'].values,i[0:n])
M4_5 = calc_median(dq['QL=4 & 5'].values,i[0:n])
interpolation = np.arange(260,320,1) # 10x original resolution
n = len(interpolation)
multiplier = 10.0
Q3 = multiplier * pd.Series(np.interp(interpolation,sst_midpoints,sst_q3_hist), index=interpolation)
Q4 = multiplier * pd.Series(np.interp(interpolation,sst_midpoints,sst_q4_hist), index=interpolation)
Q5 = multiplier * pd.Series(np.interp(interpolation,sst_midpoints,sst_q5_hist), index=interpolation)
df = pd.DataFrame({'QL=3':Q3, 'QL=4':Q4, 'QL=5':Q5})
df['QL=4 & 5'] = 0.5 * (df['QL=4'] + df['QL=5'])
# df = df.mask(np.isinf(df))
fig = plt.figure()
plt.fill_between(interpolation,df['QL=4 & 5'], step="post", alpha=0.4)
plt.fill_between(interpolation,df['QL=3'], step="post", alpha=0.4)
plt.plot(interpolation,df['QL=4 & 5'], drawstyle='steps-post')
plt.plot(interpolation,df['QL=3'], drawstyle='steps-post')
ax = plt.gca()
ax.set_xlim([260,310])
plt.tick_params(labelsize=12)
plt.xlabel("SST / $\mathrm{K}$", fontsize=12)
plt.ylabel("Frequency / $\mathrm{\% \ K^{-1}}$", fontsize=12)
title_str = 'SST: QL=3:median=' + "{0:.5f}".format(M3) + ' ' + 'QL=4 & 5:median=' + "{0:.5f}".format(M4_5)
print(title_str)
plt.legend(loc='best')
plt.savefig('hist_sst.pdf')
# plt.savefig('hist_sst.png', dpi=600)
# plt.savefig('hist_sst.eps', format='eps', rasterized=True, dpi=1200)
plt.close('all')
def plot_histogram_sensitivity(sensitivity_midpoints,sensitivity_q3_hist,sensitivity_q4_hist,sensitivity_q5_hist):
"""
# ------------------------------------------------
# PLOT HISTOGRAM OF RETRIEVAL SENSITIVITY + MEDIAN
# ------------------------------------------------
"""
# interpolation = np.arange(0.005,1.995,0.01) # original bin midpoints
interpolation = np.arange(0,2,0.01)
n = len(interpolation)
multiplier = 1.0
Q3 = multiplier * pd.Series(np.interp(interpolation,sensitivity_midpoints,sensitivity_q3_hist), index=interpolation)
Q4 = multiplier * pd.Series(np.interp(interpolation,sensitivity_midpoints,sensitivity_q4_hist), index=interpolation)
Q5 = multiplier * pd.Series(np.interp(interpolation,sensitivity_midpoints,sensitivity_q5_hist), index=interpolation)
df = pd.DataFrame({'QL=3':Q3, 'QL=4':Q4, 'QL=5':Q5})
df['QL=4 & 5'] = 0.5 * (df['QL=4'] + df['QL=5'])
# df = df.mask(np.isinf(df))
M3 = calc_median(df['QL=3'].values,interpolation[0:n])
M4_5 = calc_median(df['QL=4 & 5'].values,interpolation[0:n])
fig = plt.figure()
plt.fill_between(100.0*interpolation,df['QL=4 & 5'], step="post", alpha=0.4)
plt.fill_between(100.0*interpolation,df['QL=3'], step="post", alpha=0.4)
plt.plot(100.0*interpolation,df['QL=4 & 5'], drawstyle='steps-post')
plt.plot(100.0*interpolation,df['QL=3'], drawstyle='steps-post')
ax = plt.gca()
ax.set_xlim([85,110])
plt.tick_params(labelsize=12)
plt.xlabel("Retrieval sensitivity / $\mathrm{\%}$", fontsize=12)
plt.ylabel("Frequency / $\mathrm{\% \ {\%}^{-1} }$", fontsize=12)
title_str = 'Sensitivity: QL=3:median=' + "{0:.5f}".format(M3) + ' ' + 'QL=4 & 5:median=' + "{0:.5f}".format(M4_5)
print(title_str)
plt.legend(loc='best')
plt.savefig('hist_sensitivity.pdf')
# plt.savefig('hist_sensitivity.png', dpi=600)
# plt.savefig('hist_sensitivity.eps', format='eps', rasterized=True, dpi=1200)
plt.close('all')
def plot_histogram_total_uncertainty(total_uncertainty_midpoints,total_uncertainty_q3_hist,total_uncertainty_q4_hist,total_uncertainty_q5_hist):
"""
# --------------------------------------------
# PLOT HISTOGRAM OF TOTAL UNCERTAINTY + MEDIAN
# --------------------------------------------
"""
# interpolation = np.arange(0.005,3.995+0.01,0.01) # original bin midpoints
interpolation = np.arange(0,4,0.01)
n = len(interpolation)
multiplier = 1.0
Q3 = multiplier * pd.Series(np.interp(interpolation,total_uncertainty_midpoints,total_uncertainty_q3_hist), index=interpolation)
Q4 = multiplier * pd.Series(np.interp(interpolation,total_uncertainty_midpoints,total_uncertainty_q4_hist), index=interpolation)
Q5 = multiplier * pd.Series(np.interp(interpolation,total_uncertainty_midpoints,total_uncertainty_q5_hist), index=interpolation)
df = pd.DataFrame({'QL=3':Q3, 'QL=4':Q4, 'QL=5':Q5})
df['QL=4 & 5'] = 0.5 * (df['QL=4'] + df['QL=5'])
# df = df.mask(np.isinf(df))
M3 = calc_median(df['QL=3'].values,interpolation[0:n])
M4_5 = calc_median(df['QL=4 & 5'].values,interpolation[0:n])
fig = plt.figure()
plt.fill_between(total_uncertainty_midpoints,df['QL=4 & 5'], step="post", alpha=0.4)
plt.fill_between(total_uncertainty_midpoints,df['QL=3'], step="post", alpha=0.4)
plt.plot(total_uncertainty_midpoints,df['QL=4 & 5'], drawstyle='steps-post')
plt.plot(total_uncertainty_midpoints,df['QL=3'], drawstyle='steps-post')
ax = plt.gca()
ax.set_xlim([0.0,1.25])
plt.tick_params(labelsize=12)
plt.xlabel("Total uncertainty / $\mathrm{K}$", fontsize=12)
plt.ylabel("Frequency / $\mathrm{\% \ cK^{-1}}$", fontsize=12)
title_str = 'Uncertainty: QL=3:median=' + "{0:.5f}".format(M3) + ' ' + 'QL=4 & 5:median=' + "{0:.5f}".format(M4_5)
print(title_str)
plt.legend(loc='best')
plt.savefig('hist_total_uncertainty.pdf')
# plt.savefig('hist_total_uncertainty.png', dpi=600)
# plt.savefig('hist_total_uncertainty.eps', format='eps', rasterized=True, dpi=1200)
plt.close('all')
def plot_histogram_total_uncertainty2(total_uncertainty_midpoints,total_uncertainty_q3_hist_avhrr,total_uncertainty_q4_hist_avhrr,total_uncertainty_q5_hist_avhrr,total_uncertainty_q3_hist_atsr,total_uncertainty_q4_hist_atsr,total_uncertainty_q5_hist_atsr):
"""
# --------------------------------------------------------------
# PLOT HISTOGRAM OF TOTAL UNCERTAINTY + MEDIAN FOR AVHRR VS ATSR
# --------------------------------------------------------------
"""
# interpolation = np.arange(0.005,3.995,0.01) # original bin midpoints
interpolation = np.arange(0,4,0.01)
n = len(interpolation)
multiplier = 1.0
Q3_avhrr = multiplier * pd.Series(np.interp(interpolation,total_uncertainty_midpoints,total_uncertainty_q3_hist_avhrr), index=interpolation)
Q4_avhrr = multiplier * pd.Series(np.interp(interpolation,total_uncertainty_midpoints,total_uncertainty_q4_hist_avhrr), index=interpolation)
Q5_avhrr = multiplier * pd.Series(np.interp(interpolation,total_uncertainty_midpoints,total_uncertainty_q5_hist_avhrr), index=interpolation)
df_avhrr = pd.DataFrame({'QL=3':Q3_avhrr, 'QL=4':Q4_avhrr, 'QL=5':Q5_avhrr})
# df_avhrr['QL=4 & 5'] = 0.5 * (df_avhrr['QL=4'] + df_avhrr['QL=5'])
df_avhrr['QL=4 & 5'] = df_avhrr['QL=5']
# df_avhrr = df_avhrr.mask(np.isinf(df_avhrr))
Q3_atsr = multiplier * pd.Series(np.interp(interpolation,total_uncertainty_midpoints,total_uncertainty_q3_hist_atsr), index=interpolation)
Q4_atsr = multiplier * pd.Series(np.interp(interpolation,total_uncertainty_midpoints,total_uncertainty_q4_hist_atsr), index=interpolation)
Q5_atsr = multiplier * pd.Series(np.interp(interpolation,total_uncertainty_midpoints,total_uncertainty_q5_hist_atsr), index=interpolation)
df_atsr = pd.DataFrame({'QL=3':Q3_atsr, 'QL=4':Q4_atsr, 'QL=5':Q5_atsr})
df_atsr['QL=4 & 5'] = 0.5 * (df_atsr['QL=4'] + df_atsr['QL=5'])
# df_atsr = df_atsr.mask(np.isinf(df_atsr))
fig = plt.figure()
plt.fill_between(total_uncertainty_midpoints,df_avhrr['QL=4 & 5'], step="post", alpha=0.4)
plt.fill_between(total_uncertainty_midpoints,df_avhrr['QL=3'], step="post", alpha=0.4)
plt.plot(total_uncertainty_midpoints,df_avhrr['QL=4 & 5'], drawstyle='steps-post')
plt.plot(total_uncertainty_midpoints,df_avhrr['QL=3'], drawstyle='steps-post')
ax = plt.gca()
ax.set_xlim([0.0,1.25])
plt.tick_params(labelsize=12)
plt.xlabel("Total uncertainty / $\mathrm{K}$", fontsize=12)
plt.ylabel("Frequency / $\mathrm{\% \ cK^{-1}}$", fontsize=12)
M3 = calc_median(df_avhrr['QL=3'].values,interpolation[0:n])
M4_5 = calc_median(df_avhrr['QL=4 & 5'].values,interpolation[0:n])
title_str = 'AVHRR: QL=3:median=' + "{0:.5f}".format(M3) + ' ' + 'QL=4 & 5:median=' + "{0:.5f}".format(M4_5)
print(title_str)
plt.legend(loc='best')
plt.savefig('hist_total_uncertainty_avhrr.pdf')
# plt.savefig('hist_total_uncertainty_avhrr.png', dpi=600)
# plt.savefig('hist_total_uncertainty_avhrr.eps', format='eps', rasterized=True, dpi=1200)
plt.close('all')
fig = plt.figure()
plt.fill_between(total_uncertainty_midpoints,df_atsr['QL=4 & 5'], step="post", alpha=0.4)
plt.fill_between(total_uncertainty_midpoints,df_atsr['QL=3'], step="post", alpha=0.4)
plt.plot(total_uncertainty_midpoints,df_atsr['QL=4 & 5'], drawstyle='steps-post')
plt.plot(total_uncertainty_midpoints,df_atsr['QL=3'], drawstyle='steps-post')
ax = plt.gca()
ax.set_xlim([0.0,1.25])
plt.tick_params(labelsize=12)
plt.xlabel("Total uncertainty / $\mathrm{K}$", fontsize=12)
plt.ylabel("Frequency / $\mathrm{\% \ cK^{-1}}$", fontsize=12)
M3 = calc_median(df_atsr['QL=3'].values,interpolation[0:n])
M4_5 = calc_median(df_atsr['QL=4 & 5'].values,interpolation[0:n])
title_str = 'ATSR: QL=3:median=' + "{0:.5f}".format(M3) + ' ' + 'QL=4 & 5:median=' + "{0:.5f}".format(M4_5)
print(title_str)
plt.legend(loc='best')
plt.savefig('hist_total_uncertainty_atsr.pdf')
# plt.savefig('hist_total_uncertainty_atsr.png', dpi=600)
# plt.savefig('hist_total_uncertainty_atsr.eps', format='eps', rasterized=True, dpi=1200)
plt.close('all')
def calc_n_sst_timeseries(satellites):
"""
# ---------------------------------------------------------------
# CALC MEAN OF TIMESERIES OF DAILY OBSERVATION DENSITY PER SENSOR
# ---------------------------------------------------------------
"""
ocean_area = 361900000.0
labels = ['ATSR1','ATSR2','AATSR','NOAA07','NOAA09','NOAA11','NOAA12','NOAA14','NOAA15','NOAA16','NOAA17','NOAA18','NOAA19','METOPA']
satellites = ['ATSR1','ATSR2','AATSR','AVHRR07_G','AVHRR09_G','AVHRR11_G','AVHRR12_G','AVHRR14_G','AVHRR15_G','AVHRR16_G','AVHRR17_G','AVHRR18_G','AVHRR19_G','AVHRRMTA_G']
df_all = pd.DataFrame()
for i in range(0,len(satellites)):
filename = satellites[i] + '_summary.nc'
ds = xarray.open_dataset(filename)
dates = ds['time']
idx = np.argsort(dates, axis=0)
t = np.array(dates)[idx]
days = (t[-1] - t[0]).astype('timedelta64[D]') / np.timedelta64(1, 'D')
years = days/365.0
times_duplicates = pd.Series(t)
times = times_duplicates.drop_duplicates()
Q3_duplicates = pd.Series(ds['n_sst_q3'].values[idx], index=t)
Q4_duplicates = pd.Series(ds['n_sst_q4'].values[idx], index=t)
Q5_duplicates = pd.Series(ds['n_sst_q5'].values[idx], index=t)
n_sst_q3 = 365.0 * Q3_duplicates.groupby(Q3_duplicates.index).sum() / ocean_area
n_sst_q4 = 365.0 * Q4_duplicates.groupby(Q4_duplicates.index).sum() / ocean_area
n_sst_q5 = 365.0 * Q5_duplicates.groupby(Q5_duplicates.index).sum() / ocean_area
df = DataFrame({'Q3' : n_sst_q3, 'Q4' : n_sst_q4, 'Q5' : n_sst_q5})
df['Sum'] = df['Q4'] + df['Q5']
df_all = df_all.append(df,ignore_index=True)
satellites_avhrr = ['AVHRR07_G','AVHRR09_G','AVHRR11_G','AVHRR12_G','AVHRR14_G','AVHRR15_G','AVHRR16_G','AVHRR17_G','AVHRR18_G','AVHRR19_G','AVHRRMTA_G']
df_avhrr = pd.DataFrame()
for i in range(0,len(satellites_avhrr)):
filename = satellites_avhrr[i] + '_summary.nc'
ds = xarray.open_dataset(filename)
dates = ds['time']
idx = np.argsort(dates, axis=0)
t = np.array(dates)[idx]
days = (t[-1] - t[0]).astype('timedelta64[D]') / np.timedelta64(1, 'D')
years = days/365.0
times_duplicates = pd.Series(t)
times = times_duplicates.drop_duplicates()
Q3_duplicates = pd.Series(ds['n_sst_q3'].values[idx], index=t)
Q4_duplicates = pd.Series(ds['n_sst_q4'].values[idx], index=t)
Q5_duplicates = pd.Series(ds['n_sst_q5'].values[idx], index=t)
n_sst_q3 = 365.0 * Q3_duplicates.groupby(Q3_duplicates.index).sum() / ocean_area
n_sst_q4 = 365.0 * Q4_duplicates.groupby(Q4_duplicates.index).sum() / ocean_area
n_sst_q5 = 365.0 * Q5_duplicates.groupby(Q5_duplicates.index).sum() / ocean_area
df = DataFrame({'Q3' : n_sst_q3, 'Q4' : n_sst_q4, 'Q5' : n_sst_q5})
df['Sum'] = df['Q4'] + df['Q5']
df_avhrr = df_avhrr.append(df,ignore_index=True)
satellites_atsr = ['AATSR','ATSR1','ATSR2']
df_atsr = pd.DataFrame()
for i in range(0,len(satellites_atsr)):
filename = satellites_atsr[i] + '_summary.nc'
ds = xarray.open_dataset(filename)
dates = ds['time']
idx = np.argsort(dates, axis=0)
t = np.array(dates)[idx]
days = (t[-1] - t[0]).astype('timedelta64[D]') / np.timedelta64(1, 'D')
years = days/365.0
times_duplicates = pd.Series(t)
times = times_duplicates.drop_duplicates()
Q3_duplicates = pd.Series(ds['n_sst_q3'].values[idx], index=t)
Q4_duplicates = pd.Series(ds['n_sst_q4'].values[idx], index=t)
Q5_duplicates = pd.Series(ds['n_sst_q5'].values[idx], index=t)
n_sst_q3 = 365.0 * Q3_duplicates.groupby(Q3_duplicates.index).sum() / ocean_area
n_sst_q4 = 365.0 * Q4_duplicates.groupby(Q4_duplicates.index).sum() / ocean_area
n_sst_q5 = 365.0 * Q5_duplicates.groupby(Q5_duplicates.index).sum() / ocean_area
df = DataFrame({'Q3' : n_sst_q3, 'Q4' : n_sst_q4, 'Q5' : n_sst_q5})
df['Sum'] = df['Q4'] + df['Q5']
df_atsr = df_atsr.append(df,ignore_index=True)
return df_all, df_avhrr, df_atsr
def plot_n_sst_timeseries(satellites):
"""
# -------------------------------------------------------
# PLOT TIMESERIES OF DAILY OBSERVATION DENSITY PER SENSOR
# -------------------------------------------------------
"""
ocean_area = 361900000.0
labels = ['ATSR1','ATSR2','AATSR','NOAA07','NOAA09','NOAA11','NOAA12','NOAA14','NOAA15','NOAA16','NOAA17','NOAA18','NOAA19','METOPA']
satellites = ['ATSR1','ATSR2','AATSR','AVHRR07_G','AVHRR09_G','AVHRR11_G','AVHRR12_G','AVHRR14_G','AVHRR15_G','AVHRR16_G','AVHRR17_G','AVHRR18_G','AVHRR19_G','AVHRRMTA_G']
fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True)
lab = []
ncolors = len(satellites)
ax1.set_prop_cycle('color',[plt.cm.gnuplot2(j) for j in np.linspace(0, 1, ncolors)])
for i in range(0,len(satellites)):
filename = satellites[i] + '_summary.nc'
ds = xarray.open_dataset(filename)
dates = ds['time']
idx = np.argsort(dates, axis=0)
t = np.array(dates)[idx]
days = (t[-1] - t[0]).astype('timedelta64[D]') / np.timedelta64(1, 'D')
years = days/365.0
times_duplicates = pd.Series(t)
times = times_duplicates.drop_duplicates()
Q4_duplicates = pd.Series(ds['n_sst_q4'].values[idx], index=t)
Q5_duplicates = pd.Series(ds['n_sst_q5'].values[idx], index=t)
n_sst_q4 = 365.0 * Q4_duplicates.groupby(Q4_duplicates.index).sum() / ocean_area
n_sst_q5 = 365.0 * Q5_duplicates.groupby(Q5_duplicates.index).sum() / ocean_area
df = DataFrame({'Q4' : n_sst_q4, 'Q5' : n_sst_q5})
df['Sum'] = df['Q4'] + df['Q5']
# df['Sum'] = df['Q4'].fillna(0) + df['Q5'].fillna(0)
# df['Sum_mean'] = df['Sum'].resample("1d").sum().fillna(0).rolling(window=31, min_periods=1).median()
# df['Sum_mean'].plot(ax=ax1)
lab.append(labels[i])
ax1.plot(times, df['Sum'], '.', markersize=0.2)
ax1.set_ylim([0,18])
print(labels[i] + "," + str(df['Sum'].mean()) + "," + str(df['Sum'].shape[0]))
plt.tick_params(labelsize=12)
title_str = 'QL=4 & 5'
ax1.set_title(title_str, fontsize=10)
lab = []
ncolors = len(satellites)
ax2.set_prop_cycle('color',[plt.cm.gnuplot2(j) for j in np.linspace(0, 1, ncolors)])
for i in range(0,len(satellites)):
filename = satellites[i] + '_summary.nc'
ds = xarray.open_dataset(filename)
dates = ds['time']
idx = np.argsort(dates, axis=0)
t = np.array(dates)[idx]
days = (t[-1] - t[0]).astype('timedelta64[D]') / np.timedelta64(1, 'D')
years = days/365.0
times_duplicates = pd.Series(t)
times = times_duplicates.drop_duplicates()
Q3_duplicates = pd.Series(ds['n_sst_q3'].values[idx], index=t)
n_sst_q3 = 365.0 * Q3_duplicates.groupby(Q3_duplicates.index).sum() / ocean_area
df = DataFrame({'Q3' : n_sst_q3})
# df['Q3_mean'] = df['Q3'].resample("1d").sum().rolling(window=31, min_periods=1).median()
# df['Q3_mean'].plot(ax=ax2)
lab.append(labels[i])
ax2.plot(times, df['Q3'], '.', markersize=0.2)
ax2.set_ylim([0,18])
print(labels[i] + "," + str(df['Q3'].mean()) + "," + str(df['Q3'].shape[0]))
plt.tick_params(labelsize=12)
title_str = 'QL=3'
ax2.set_title(title_str, fontsize=10)
fig.legend(lab, fontsize=8, loc=7, markerscale=20, scatterpoints=5)
fig.subplots_adjust(right=0.8)
fig.text(0.01, 0.5, 'Observation density / $\mathrm{km^{-2} \ yr^{-1}}$', va='center', rotation='vertical')
plt.savefig('n_sst_timeseries.pdf')
# plt.savefig('n_sst_timeseries.png', dpi=600)
# plt.savefig('n_sst_timeseries.eps', format='eps', rasterized=True, dpi=1200)
plt.close('all')
def plot_n_sst_boxplots(satellites):
"""
# --------------------------------------------------------------
# PLOT YEARLY BOXPLOTS FROM DAILY OBSERVATION DENSITY PER SENSOR
# --------------------------------------------------------------
"""
ocean_area = 361900000.0
labels = ['ATSR1','ATSR2','AATSR','NOAA07','NOAA09','NOAA11','NOAA12','NOAA14','NOAA15','NOAA16','NOAA17','NOAA18','NOAA19','METOPA']
satellites = ['ATSR1','ATSR2','AATSR','AVHRR07_G','AVHRR09_G','AVHRR11_G','AVHRR12_G','AVHRR14_G','AVHRR15_G','AVHRR16_G','AVHRR17_G','AVHRR18_G','AVHRR19_G','AVHRRMTA_G']
for i in range(0,len(satellites)):
filename = satellites[i] + '_summary.nc'
ds = xarray.open_dataset(filename)
dates = ds['time']
idx = np.argsort(dates, axis=0)
t = np.array(dates)[idx]
days = (t[-1] - t[0]).astype('timedelta64[D]') / np.timedelta64(1, 'D')
years = days/365.0
times_duplicates = pd.Series(t)
times = times_duplicates.drop_duplicates()
Q4_duplicates = | pd.Series(ds['n_sst_q4'].values[idx], index=t) | pandas.Series |
import unittest
import pandas as pd
import pandas.util.testing as pt
import tia.util.fmt as fmt
def tof(astr):
return float(astr.replace(",", ""))
class TestFormat(unittest.TestCase):
def ae(self, expected, fct, value, **kwargs):
cb = fct(**kwargs)
actual = cb(value)
self.assertEqual(expected, actual)
def test_default_formats(self):
B = float("-1,250,500,880.76".replace(",", ""))
M = B / 1000.0
k = M / 1000.0
p = k / 1000000.0
tests = [
(B, "$(1.3B)", fmt.BillionDollarsFormatter),
(B, "(1.3B)", fmt.BillionsFormatter),
(M, "$(1.3M)", fmt.MillionDollarsFormatter),
(M, "(1.3M)", fmt.MillionsFormatter),
(k, "$(1.3k)", fmt.ThousandDollarsFormatter),
(k, "(1.3k)", fmt.ThousandsFormatter),
(k, "(1,250.50)", fmt.FloatFormatter),
(k, "(1,251)", fmt.IntFormatter),
# Floats
(k, "-1,251", fmt.new_int_formatter(commas=1, parens=False)),
(k, "-1251", fmt.new_int_formatter(commas=0, parens=False)),
(abs(k), "1251", fmt.new_int_formatter(commas=0, parens=False)),
(abs(k), "1,251", fmt.new_int_formatter(commas=1)),
(str(k), "-1,251", fmt.new_int_formatter(commas=1, coerce=True, parens=0)),
# Ints
(k, "-1,251", fmt.new_int_formatter(commas=1, parens=False)),
(k, "-1251", fmt.new_int_formatter(commas=0, parens=False)),
(abs(k), "1251", fmt.new_int_formatter(commas=0, parens=False)),
(abs(k), "1,251", fmt.new_int_formatter(commas=1)),
# Percents
(0.12433, "12.4%", fmt.new_percent_formatter(commas=1, precision=1)),
(0.12433, "12.433%", fmt.new_percent_formatter(commas=1, precision=3)),
(
-0.12433,
"-12.4%",
fmt.new_percent_formatter(commas=1, parens=0, precision=1),
),
(
-0.12433,
"(12.4%)",
fmt.new_percent_formatter(commas=1, parens=1, precision=1),
),
]
for val, expected, fct in tests:
actual = fct(val)
self.assertEqual(expected, actual)
# Test if it were a list
actual = fct([val] * 5)
self.assertEqual([expected] * 5, actual)
# Test if it were a series
actual = fct(pd.Series([val] * 5))
pt.assert_series_equal(pd.Series([expected] * 5), actual)
# Test if it were a DataFrame
actual = fct(pd.DataFrame({"a": [val] * 5, "b": [val] * 5}))
pt.assert_frame_equal(
pd.DataFrame({"a": [expected] * 5, "b": [expected] * 5}), actual
)
def test_fmt_datetime(self):
self.assertEqual(
fmt.new_datetime_formatter("%Y-%m")(pd.to_datetime("1/1/2013")), "2013-01"
)
def test_guess_formatter(self):
for n, t in (3, "k"), (6, "M"), (9, "B"):
m = 10 ** n
s = | pd.Series([2.1 * m, -20.1 * m, 200.1 * m]) | pandas.Series |
# coding: utf-8
# author: wamhanwan
"""Tushare API"""
import tushare as ts
import pandas as pd
import numpy as np
from time import sleep
from FactorLib.utils.tool_funcs import get_members_of_date
from functools import update_wrapper
_token = '6135b90bf40bb5446ef2fe7aa20a9467ad10023eda97234739743f46'
SHEXG = 'SSE' # 上交所代码
SZEXG = 'SZSE' # 深交所代码
ts.set_token(_token)
pro_api = ts.pro_api()
def set_call_limit(max_times=60, sleep_seconds=60):
if isinstance(max_times, int):
if max_times < 0:
max_times = 1
else:
raise TypeError("Expected max_times to be an integer")
def decorating_function(user_function):
wrapper = _time_limit_wrapper(user_function, max_times, sleep_seconds)
return update_wrapper(wrapper, user_function)
return decorating_function
def _time_limit_wrapper(user_function, max_times, sleep_seconds):
times = 0
def wrapper(*args, **kwargs):
nonlocal times
if times == max_times:
print(f"sleep {sleep_seconds} sceonds")
sleep(sleep_seconds)
times = 0
times += 1
return user_function(*args, **kwargs)
return wrapper
class TushareDB(object):
_instance = None
def __new__(cls, *args, **kwargs):
if cls._instance is None:
cls._instance = object.__new__(cls)
return cls._instance
return cls._instance
def __init__(self):
self._api = ts
self._pro_api = pro_api
self._token = _token
@classmethod
def get_instance(cls):
return TushareDB()
def run_api(self, api_name, *args, **kwargs):
return getattr(self._pro_api, api_name)(*args, **kwargs)
def format_date(self, data, column=None):
data[column] = pd.to_datetime(data[column], format='%Y%m%d')
return data
def stock_basic_get(self, list_status=None, exchange=None, is_hs=None,
fields=None):
"""基础数据-股票列表(只能取最新数据)
Parameters:
----------
list_status: str
上市状态 L上市 D退市 P暂停上市
exchange: str
交易所 SHEXG上交所 SZEXG深交所
is_hs: str
是否深港通标的 N否 H沪股通 S深股通
Fields:
------
symbol 股票代码(ticker)
name 股票简称
industry 行业
list_status 上市状态
list_date 上市日期
delist_date 退市日期
is_hs 是否深港通标的
"""
data1 = self.run_api('stock_basic', list_status='L', exchange=exchange, is_hs=is_hs,
fields=fields)
data2 = self.run_api('stock_basic', list_status='P', exchange=exchange, is_hs=is_hs,
fields=fields)
data3 = self.run_api('stock_basic', list_status='D', exchange=exchange, is_hs=is_hs,
fields=fields)
l = [data1,data2,data3]
if list_status:
status_index =[['L', 'P', 'D'].index(x) for x in ','.split(list_status)]
return pd.concat([l[i] for i in status_index]).sort_values('symbol')
return pd.concat(l).sort_values('symbol')
def stock_st_get(self, date):
"""A股戴帽摘帽
Paramters:
---------
date: str
日期 YYYYMMDD
Returns:
------
DataFrame: IDs name start_date end_date
"""
data = self.run_api('namechange', end_date=date)
data = data[data['name'].str.contains('ST')]
data = data.fillna({'end_date': '21001231'})
data = data[(data['start_date']<=date)&(data['end_date']>=date)]
data = data[~data['ts_code'].duplicated(keep='last')]
data['IDs'] = data['ts_code'].str[:6]
return data
def stock_onlist_get(self, date):
"""A股特定日期的股票列表
Return
------
DataFrame: symbol name list_date delist_date
"""
all_stocks = self.stock_basic_get(
fields='symbol,name,list_date,delist_date'
).fillna({'delist_date':'21001231'})
indices = (all_stocks['list_date']<=date)&(all_stocks['delist_date']>date)
return all_stocks[indices]
def index_weight_get(self, index_code, date):
"""A股指数成分股权重
Parameters:
-----------
index_code: str
指数代码 399300.SZ沪深300 000905.SH中证500 000906.SH中证800
date: str
日期 YYYYMMDD
Returns:
--------
DataFrame index_code con_code trade_date weight
"""
start_date = (pd.to_datetime(date)-pd.Timedelta(days=30)).strftime('%Y%m%d')
data = self.run_api('index_weight',index_code=index_code, start_date=start_date, end_date=date)
data = data[data['trade_date']==data['trade_date'].max()]
data['index_code'] = data['index_code'].str[:6]
data['con_code'] = data['con_code'].str[:6]
data['trade_date'] = date
data['weight'] /= 100.0
return data
def quota_get(self, start_date, end_date, ticker=None, adj=None, freq='D', ma=None, asset='E'):
"""行情通用接口
Paramters:
----------
ticker: str
证券代码, 必填且为单只证券代码
start_date: str
开始日期 YYYYMMDD
end_date: str
结束日期 YYYYMMDD
adj: str
复权类型 qfq前复权 hfq后复权
freq:str
数据频率 1\5\15\30\60min D
ma: int
均线数值
asset: str
资产类型 E股票 I指数 FT期货 FD基金 O期权 CB可转债
Returns:
--------
DataFrame: ts_code, trade_date ...
"""
if ticker and asset=='E':
ts_code = ticker+'.SH' if ticker[0]=='6' else ticker+'.SZ'
elif ticker:
ts_code = ticker
else:
ts_code = None
data = self._api.pro_bar(ts_code=ts_code, start_date=start_date, end_date=end_date,
freq=freq, adj=adj, ma=ma, asset=asset, adjfactor=True)
if data is None:
return pd.DataFrame()
if not data.empty:
data['trade_date'] = pd.to_datetime(data['trade_date'], format='%Y%m%d')
return data
def stock_daily_basic_get(self, trade_date=None, ticker=None, start_date=None, end_date=None,
fields=None):
"""股票每日行情指标
Parameters:
----------
trade_date: str
交易日期
ticker: str
股票代码 与交易日期必须至少一个参数非空。
start_date: str
开始日期
end_date: str
结束日期
fields: str
返回字段 ts_code股票代码 trade_date交易日期 close当日收盘价 turnover_rate换手率(%)
turnover_rate_f换手率(自由流通股) volume_ratio量比 pe市盈率 pe_ttm市盈率(TTM)
pb市净率 ps市销率 ps_ttm市销率(TTM) dv_ratio股息率 dv_ttm股息率(TTM) total_share总股本(万股)
float_share流通股本(万) free_share自由流通股本(万) total_mv总市值(万) circ_mv流通市值(万)
Returns:
-------
DataFrame: ts_code, trade_date ...
"""
if ticker:
ticker = ticker+'.SH' if ticker[0]=='6' else ticker+'.SZ'
data = self.run_api('daily_basic', ts_code=ticker, trade_date=trade_date,
start_date=start_date, end_date=end_date, fields=fields)
data['trade_date'] = pd.to_datetime(data['trade_date'], format='%Y%m%d')
return data
def stock_daily_price_get(self, ticker=None, trade_date=None, start_date=None, end_date=None,
fields=None):
"""
A股日行情数据
Parameters:
-----------
trade_date: str
交易日期
ticker: str
股票代码 与交易日期必须至少一个参数非空。
start_date: str
开始日期
end_date: str
结束日期
fields: str
返回字段 ts_code股票代码 trade_date交易日期 close当日收盘价 open开盘价
high最高价 low最低价 pre_close前收盘价 change涨跌 pct_chg涨跌幅(未复权)
vol成交量(手) amount成交额(千元)
Returns:
-------
DataFrame: ts_code, trade_date ...
"""
if ticker:
ticker = ticker+'.SH' if ticker[0]=='6' else ticker+'.SZ'
data = self.run_api('daily', ts_code=ticker, trade_date=trade_date,
start_date=start_date, end_date=end_date, fields=fields)
data['trade_date'] = pd.to_datetime(data['trade_date'], format='%Y%m%d')
return data
def stock_monthly_price_get(self, ticker='', trade_date='', start_date='', end_date='',
fields=None):
"""
A股月行情数据
Parameters:
-----------
trade_date: str
交易日期
ticker: str
股票代码 与交易日期必须至少一个参数非空。
start_date: str
开始日期
end_date: str
结束日期
fields: str
返回字段 ts_code股票代码 trade_date交易日期 close当日收盘价 open开盘价
high最高价 low最低价 pre_close前收盘价 change涨跌 pct_chg涨跌幅(未复权)
vol成交量(手) amount成交额(千元)
"""
if ticker:
ticker = ticker+'.SH' if ticker[0]=='6' else ticker+'.SZ'
data = self.run_api('monthly', ts_code=ticker, trade_date=trade_date,
start_date=start_date, end_date=end_date, fields=fields)
data['trade_date'] = pd.to_datetime(data['trade_date'], format='%Y%m%d')
return data
def option_basic_get(self, exchange='SSE', fields=None):
"""
期权合约信息
:param exchange: str
交易所 SSE上交所 SZSE深交所
:param fields: str
ts_codeTS代码 name合约名称 per_unit合约单位 opt_code标准合约代码 opt_type合约类型 call_put 期权类型
exercise_price行权价 s_month结算价格 maturity_date到期日期 list_price挂牌基准价 list_date开始交易日期
delist_date最后交割日期 quote_unit报价单位
:return: DataFrame ts_code, name, opt_code, ...
"""
data = self.run_api('opt_basic', exchange=exchange)
if fields:
data = data[fields.strip().split(',')]
return data
def option_daily_price_get(self, trade_date=None, ticker=None, start_date=None, end_date=None,
fields=None, exchange=None):
"""
期权日行情数据(单次最大1000行)
:param trade_date: str
交易日期 YYYYMMDD
:param ticker: str
证券代码 深交所300ETF期权以9开头;上交所期权以1开头;
:param start_date: str
起始日期
:param end_date: str
结束日期
:param exchange: str
交易所 上交所SSE、中金所CFFEX、深交所SZSE、上期所SHFE、CZCE郑商所、DCE大商所
:param fields: str
字段名称 ts_code合约代码 trade_date交易日期 close当日收盘价 open开盘价
high最高价 low最低价 pre_close前收盘价 pre_settle昨结算价
settle结算价 vol成交量(手) amount成交金额(万元) oi持仓量(手)
:return: DataFrame ts_code, trade_date, ...
"""
if ticker and ticker.find('.')<0:
if ticker[0] == '1':
ticker = ticker+'.SH'
elif ticker[0] == '9':
ticker = ticker+'.SZ'
elif ticker[:2] == 'IO':
ticker = ticker + '.CFX'
data = self.run_api('opt_daily', exchange=exchange, ts_code=ticker,
trade_date=trade_date, start_date=start_date,
end_date=end_date)
data['vol'] *= 10000
if fields:
data = data[fields.strip().split(',')]
data['trade_date'] = pd.to_datetime(data['trade_date'], format='%Y%m%d')
return data
def fund_basic_get(self, market=None, fields=None):
"""
公募基金数据基本信息
:param market: str
交易市场 E:场内 O场外
:param fields: str
返回字段 ts_code基金代码 name基金简称 management管理人 fund_type投资类型
found_date成立日期 due_date到期日期 list_date上市日期 issue_date发行日期
delist_date退市日期 issue_amount发行份额 invest_type投资风格 type基金类型
benchmark基准
"""
data = self.run_api('fund_basic', market=market)
if fields:
data = data[fields.strip().split(',')]
return data
def fund_portfolio_get(self, ts_code, start_period=None, end_period=None, fields=None):
"""
公募基金持仓信息
Parameters
----------
ts_code : str
基金代码(带后缀)
start_period: str
起始报告期
end_period: str
终止报告期
fields: str
返回字段 ts_code基金代码 ann_date公告日期 end_date截止日期
symbol股票代码 mkv持有股票市值(元) amount持有股票数量(股)
stk_mkt_ratio占股票市值比 stk_float_ratio占流通股本比
Returns
-------
DataFrame ts_code end_date ...
"""
data = self.run_api('fund_portfolio', ts_code=ts_code)
if fields:
data = data[fields.strip().split(',')]
if start_period:
data = data[data['end_date'] >= start_period]
if end_period:
data = data[data['end_date'] <= end_period]
return data
def fund_nav_get(self, ts_code=None, end_date=None, market=None, fields=None):
"""
公募基金净值数据
Parameters:
-----------
ts_code: str
基金代码(带后缀)
end_date: str
净值日期YYYYMMDD
market: str
交易市场 E:场内 O场外
fields: str
ts_code基金代码 end_date截止日期 unit_nav单位净值 accum_div累计分红
net_asset资产净值 total_netasset合计资产净值 adj_nav复权单位净值
returns
-------
DataFrame: ts_code end_date ...
"""
data = self.run_api('fund_nav', ts_code = ts_code, end_date = end_date,
market = market)
if fields is not None:
data = data[fields.strip().split(',')]
data['end_date'] = pd.to_datetime(data['end_date'], format='%Y%m%d')
return data
def fund_daily_price_get(self, ts_code=None, trade_date=None, start_date=None, end_date=None,
fields=None):
"""
基金日行情数据(每次最多返回800行数据)
Parameters:
-----------
ts_code: str
基金代码(带后缀)
start_date: str
开始日期YYYYMMDD
end_date: str
结束日期
trade_date: str
交易日期
fields: str
详见:https://tushare.pro/document/2?doc_id=127
returns:
-------
DataFrame ts_code, trade_date....
"""
data = self.run_api('fund_daily', ts_code = ts_code,
start_date = start_date,
end_date = end_date,
trade_date = trade_date
)
if fields is not None:
data = data[fields.strip().split(',')]
data['trade_date'] = pd.to_datetime(data['trade_date'], format='%Y%m%d')
return data
def fund_manager_get(self, ts_code, fields=None):
"""
基金经理任职信息
Parameters:
-----------
ts_code: str
基金代码(带后缀)
fields: str
返回字段,以逗号分隔。
具体详见:https://tushare.pro/document/2?doc_id=208
"""
data = self.run_api('fund_manager', ts_code = ts_code)
if fields:
data = data[fields.strip().split(',')]
return data
def income_sheet(self, ticker=None, start_period=None, end_period=None, period=None,
report_type=None, fields=None):
"""
A股利润表
Parameters:
-----------
ticker: str
股票代码
start_period: str
起始报告期
end_period: str
结束报告期
period: str
报告期
report_type: str
报告类型
1合并报表 2单季合并 3调整单季合并表 4调整合并报表 5调整前合并报表
6母公司报表 7母公司单季表 8母公司调整单季表 9母公司调整表 10母公司调整前报表
11调整前合并报表 12母公司调整前报表
fields: str
返回字段,字段太多,参照:https://tushare.pro/document/2?doc_id=33
"""
if ticker:
ticker = ticker + '.SH' if ticker[0] == '6' else ticker + '.SZ'
if start_period is not None and end_period is not None:
periods = pd.date_range(start_period, end_period, freq='1Q').strftime("%Y%m%d")
else:
periods = [period]
df = []
for p in periods:
data = self.run_api('vip_income', ts_code=ticker, period=p, report_type=report_type,
fields=fields)
df.append(df)
data = | pd.concat(df) | pandas.concat |
def ConvMAT2CSV(rootDir, codeDir):
"""
Written by <NAME> and <NAME> to work with macOS/Unix-based systems
Purpose: Extract data from .mat files and format into DataFrames
Export as csv file
Inputs: PythonData.mat files, animalNotes_baselines.mat file
Outputs: .csv files
Last Revised: April 2nd, 2019
"""
from scipy.io import loadmat
import numpy as np
import pandas as pd
import sys
import os
sys.path.append(codeDir)
from PreProcData import ResampFiltData
from GraphData import GraphData
# Load the baseline file
baseFileStr = ("baselineInfo.mat")
baseData = loadmat(rootDir + baseFileStr)
# Build list of keys and values for the baseline data
baseVals = baseData['animalNotes_baselines'][0,0]
baseKeys = baseData['animalNotes_baselines'][0,0].dtype.descr
baseResultsArray = | pd.DataFrame() | pandas.DataFrame |
"""Yahoo Finance view"""
__docformat__ = "numpy"
import os
import pandas as pd
from matplotlib import pyplot as plt
from tabulate import tabulate
from gamestonk_terminal.etf import yfinance_model
from gamestonk_terminal import feature_flags as gtff
from gamestonk_terminal.config_plot import PLOT_DPI
from gamestonk_terminal.helper_funcs import plot_autoscale, export_data
from gamestonk_terminal.rich_config import console
def display_etf_weightings(
name: str,
raw: bool = False,
min_pct_to_display: float = 5,
export: str = "",
):
"""Display sector weightings allocation of ETF. [Source: Yahoo Finance]
Parameters
----------
name: str
ETF name
raw: bool
Display sector weighting allocation
min_pct_to_display: float
Minimum percentage to display sector
export: str
Type of format to export data
"""
sectors = yfinance_model.get_etf_sector_weightings(name)
if not sectors:
console.print("No data was found for that ETF\n")
return
holdings = | pd.DataFrame(sectors, index=[0]) | pandas.DataFrame |
import datetime as dt
import pandas as pd
from .. import AShareDataReader, DateUtils, DBInterface, utils
from ..config import get_db_interface
class IndustryComparison(object):
def __init__(self, index: str, industry_provider: str, industry_level: int, db_interface: DBInterface = None):
if not db_interface:
db_interface = get_db_interface()
self.data_reader = AShareDataReader(db_interface)
self.industry_info = self.data_reader.industry(industry_provider, industry_level)
self.index = index
def holding_comparison(self, holding: pd.Series):
holding_ratio = self._holding_to_ratio(holding)
return self.industry_ratio_comparison(holding_ratio)
def industry_ratio_comparison(self, holding_ratio: pd.Series):
date = holding_ratio.index.get_level_values('DateTime').unique()[0]
industry_info = self.industry_info.get_data(dates=date).stack()
industry_info.name = 'industry'
index_comp = self.data_reader.index_constitute.get_data(index_ticker=self.index, date=date)
holding_industry = self._industry_ratio(holding_ratio, industry_info) * 100
index_industry = self._industry_ratio(index_comp, industry_info)
diff_df = pd.concat([holding_industry, index_industry], axis=1, sort=True).fillna(0)
return diff_df.iloc[:, 0] - diff_df.iloc[:, 1]
def _holding_to_ratio(self, holding: pd.Series):
date = holding.index.get_level_values('DateTime').unique()[0]
price_info = self.data_reader.stock_close.get_data(dates=[date]).stack()
price_info.name = 'close'
tmp = holding.join(price_info, how='inner')
cap = tmp['quantity'] * tmp['close']
ratio = cap / cap.sum()
ratio.name = 'ratio'
return ratio
@staticmethod
def _industry_ratio(ratio: pd.Series, industry_info: pd.Series):
tmp = | pd.concat([ratio, industry_info], join='inner', axis=1) | pandas.concat |
# --------------
#Importing header files
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
#Code starts here
#Code ends here
data=pd.read_csv(path)
#Plotting histogram of Rating
data['Rating'].plot(kind='hist')
plt.show()
#Subsetting the dataframe based on `Rating` column
data=data[data['Rating']<=5]
#Plotting histogram of Rating
data['Rating'].plot(kind='hist')
# --------------
# code starts here
# code ends here
total_null=data.isnull().sum()
total_null
k=[]
for i in range (0,len(total_null)):
s=(total_null[i]/len(data))*100
k.append(s)
k
percent_null=pd.Series(k,total_null.index)
percent_null
missing_data=pd.DataFrame({'Total':total_null,'Percent':percent_null})
missing_data
data=data.dropna()
total_null_1=data.isnull().sum()
total_null_1
r=[]
for i in range (0,len(total_null_1)):
t=(total_null_1[i]/len(data))*100
r.append(t)
r
percent_null_1=pd.Series(r,total_null_1.index)
percent_null_1
missing_data_1= | pd.DataFrame({'Total':total_null_1,'Percent':percent_null_1}) | pandas.DataFrame |
import os
import re
import warnings
import matplotlib.pyplot as plt
from numpy import array, isnan
import pandas as pd
import pyflux as pf
from statsmodels.tsa.stattools import adfuller
from statsmodels.tsa.stattools import kpss
warnings.simplefilter("ignore")
from datetime import datetime
def adf_test(timeseries):
# print('Results of Dickey-Fuller Test:')
dftest = adfuller(timeseries, autolag='AIC')
dfoutput = pd.Series(dftest[0:4], index=['Test Statistic', 'p-value', '#Lags Used',
'Number of Observations Used'])
for key, value in dftest[4].items():
dfoutput['Critical Value (%s)' % key] = value
# print(dfoutput)
def kpss_test(timeseries):
# print('Results of KPSS Test:')
kpsstest = kpss(timeseries, regression='c')
kpss_output = pd.Series(kpsstest[0:3], index=['Test Statistic', 'p-value', 'Lags Used'])
for key, value in kpsstest[3].items():
kpss_output['Critical Value (%s)' % key] = value
# print(kpss_output)
macine_list = ["ibmq_16_melbourne.csv","ibmq_ourense.csv","ibmq_vigo.csv"]
for machines in macine_list:
date_analysis = datetime.now().strftime("%Y_%m_%d-%I_%M_%S_%p")
machine_name=""
path = ".\\Log\\"
dirs = os.listdir(path)
| pd.set_option('display.max_columns', None) | pandas.set_option |
"""
Base and utility classes for pandas objects.
"""
import textwrap
import warnings
import numpy as np
import pandas._libs.lib as lib
import pandas.compat as compat
from pandas.compat import PYPY, OrderedDict, builtins, map, range
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError
from pandas.util._decorators import Appender, Substitution, cache_readonly
from pandas.util._validators import validate_bool_kwarg
from pandas.core.dtypes.common import (
is_datetime64tz_dtype, is_datetimelike, is_extension_array_dtype,
is_extension_type, is_list_like, is_object_dtype, is_scalar)
from pandas.core.dtypes.generic import ABCDataFrame, ABCIndexClass, ABCSeries
from pandas.core.dtypes.missing import isna
from pandas.core import algorithms, common as com
from pandas.core.accessor import DirNamesMixin
import pandas.core.nanops as nanops
_shared_docs = dict()
_indexops_doc_kwargs = dict(klass='IndexOpsMixin', inplace='',
unique='IndexOpsMixin', duplicated='IndexOpsMixin')
class StringMixin(object):
"""implements string methods so long as object defines a `__unicode__`
method.
Handles Python2/3 compatibility transparently.
"""
# side note - this could be made into a metaclass if more than one
# object needs
# ----------------------------------------------------------------------
# Formatting
def __unicode__(self):
raise AbstractMethodError(self)
def __str__(self):
"""
Return a string representation for a particular Object
Invoked by str(df) in both py2/py3.
Yields Bytestring in Py2, Unicode String in py3.
"""
if compat.PY3:
return self.__unicode__()
return self.__bytes__()
def __bytes__(self):
"""
Return a string representation for a particular object.
Invoked by bytes(obj) in py3 only.
Yields a bytestring in both py2/py3.
"""
from pandas.core.config import get_option
encoding = get_option("display.encoding")
return self.__unicode__().encode(encoding, 'replace')
def __repr__(self):
"""
Return a string representation for a particular object.
Yields Bytestring in Py2, Unicode String in py3.
"""
return str(self)
class PandasObject(StringMixin, DirNamesMixin):
"""baseclass for various pandas objects"""
@property
def _constructor(self):
"""class constructor (for this class it's just `__class__`"""
return self.__class__
def __unicode__(self):
"""
Return a string representation for a particular object.
Invoked by unicode(obj) in py2 only. Yields a Unicode String in both
py2/py3.
"""
# Should be overwritten by base classes
return object.__repr__(self)
def _reset_cache(self, key=None):
"""
Reset cached properties. If ``key`` is passed, only clears that key.
"""
if getattr(self, '_cache', None) is None:
return
if key is None:
self._cache.clear()
else:
self._cache.pop(key, None)
def __sizeof__(self):
"""
Generates the total memory usage for an object that returns
either a value or Series of values
"""
if hasattr(self, 'memory_usage'):
mem = self.memory_usage(deep=True)
if not is_scalar(mem):
mem = mem.sum()
return int(mem)
# no memory_usage attribute, so fall back to
# object's 'sizeof'
return super(PandasObject, self).__sizeof__()
class NoNewAttributesMixin(object):
"""Mixin which prevents adding new attributes.
Prevents additional attributes via xxx.attribute = "something" after a
call to `self.__freeze()`. Mainly used to prevent the user from using
wrong attributes on a accessor (`Series.cat/.str/.dt`).
If you really want to add a new attribute at a later time, you need to use
`object.__setattr__(self, key, value)`.
"""
def _freeze(self):
"""Prevents setting additional attributes"""
object.__setattr__(self, "__frozen", True)
# prevent adding any attribute via s.xxx.new_attribute = ...
def __setattr__(self, key, value):
# _cache is used by a decorator
# We need to check both 1.) cls.__dict__ and 2.) getattr(self, key)
# because
# 1.) getattr is false for attributes that raise errors
# 2.) cls.__dict__ doesn't traverse into base classes
if (getattr(self, "__frozen", False) and not
(key == "_cache" or
key in type(self).__dict__ or
getattr(self, key, None) is not None)):
raise AttributeError("You cannot add any new attribute '{key}'".
format(key=key))
object.__setattr__(self, key, value)
class GroupByError(Exception):
pass
class DataError(GroupByError):
pass
class SpecificationError(GroupByError):
pass
class SelectionMixin(object):
"""
mixin implementing the selection & aggregation interface on a group-like
object sub-classes need to define: obj, exclusions
"""
_selection = None
_internal_names = ['_cache', '__setstate__']
_internal_names_set = set(_internal_names)
_builtin_table = OrderedDict((
(builtins.sum, np.sum),
(builtins.max, np.max),
(builtins.min, np.min),
))
_cython_table = OrderedDict((
(builtins.sum, 'sum'),
(builtins.max, 'max'),
(builtins.min, 'min'),
(np.all, 'all'),
(np.any, 'any'),
(np.sum, 'sum'),
(np.nansum, 'sum'),
(np.mean, 'mean'),
(np.nanmean, 'mean'),
(np.prod, 'prod'),
(np.nanprod, 'prod'),
(np.std, 'std'),
(np.nanstd, 'std'),
(np.var, 'var'),
(np.nanvar, 'var'),
(np.median, 'median'),
(np.nanmedian, 'median'),
(np.max, 'max'),
(np.nanmax, 'max'),
(np.min, 'min'),
(np.nanmin, 'min'),
(np.cumprod, 'cumprod'),
(np.nancumprod, 'cumprod'),
(np.cumsum, 'cumsum'),
(np.nancumsum, 'cumsum'),
))
@property
def _selection_name(self):
"""
return a name for myself; this would ideally be called
the 'name' property, but we cannot conflict with the
Series.name property which can be set
"""
if self._selection is None:
return None # 'result'
else:
return self._selection
@property
def _selection_list(self):
if not isinstance(self._selection, (list, tuple, ABCSeries,
ABCIndexClass, np.ndarray)):
return [self._selection]
return self._selection
@cache_readonly
def _selected_obj(self):
if self._selection is None or isinstance(self.obj, ABCSeries):
return self.obj
else:
return self.obj[self._selection]
@cache_readonly
def ndim(self):
return self._selected_obj.ndim
@cache_readonly
def _obj_with_exclusions(self):
if self._selection is not None and isinstance(self.obj,
ABCDataFrame):
return self.obj.reindex(columns=self._selection_list)
if len(self.exclusions) > 0:
return self.obj.drop(self.exclusions, axis=1)
else:
return self.obj
def __getitem__(self, key):
if self._selection is not None:
raise IndexError('Column(s) {selection} already selected'
.format(selection=self._selection))
if isinstance(key, (list, tuple, ABCSeries, ABCIndexClass,
np.ndarray)):
if len(self.obj.columns.intersection(key)) != len(key):
bad_keys = list(set(key).difference(self.obj.columns))
raise KeyError("Columns not found: {missing}"
.format(missing=str(bad_keys)[1:-1]))
return self._gotitem(list(key), ndim=2)
elif not getattr(self, 'as_index', False):
if key not in self.obj.columns:
raise KeyError("Column not found: {key}".format(key=key))
return self._gotitem(key, ndim=2)
else:
if key not in self.obj:
raise KeyError("Column not found: {key}".format(key=key))
return self._gotitem(key, ndim=1)
def _gotitem(self, key, ndim, subset=None):
"""
sub-classes to define
return a sliced object
Parameters
----------
key : string / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
raise AbstractMethodError(self)
def aggregate(self, func, *args, **kwargs):
raise AbstractMethodError(self)
agg = aggregate
def _try_aggregate_string_function(self, arg, *args, **kwargs):
"""
if arg is a string, then try to operate on it:
- try to find a function (or attribute) on ourselves
- try to find a numpy function
- raise
"""
assert isinstance(arg, compat.string_types)
f = getattr(self, arg, None)
if f is not None:
if callable(f):
return f(*args, **kwargs)
# people may try to aggregate on a non-callable attribute
# but don't let them think they can pass args to it
assert len(args) == 0
assert len([kwarg for kwarg in kwargs
if kwarg not in ['axis', '_level']]) == 0
return f
f = getattr(np, arg, None)
if f is not None:
return f(self, *args, **kwargs)
raise ValueError("{arg} is an unknown string function".format(arg=arg))
def _aggregate(self, arg, *args, **kwargs):
"""
provide an implementation for the aggregators
Parameters
----------
arg : string, dict, function
*args : args to pass on to the function
**kwargs : kwargs to pass on to the function
Returns
-------
tuple of result, how
Notes
-----
how can be a string describe the required post-processing, or
None if not required
"""
is_aggregator = lambda x: isinstance(x, (list, tuple, dict))
is_nested_renamer = False
_axis = kwargs.pop('_axis', None)
if _axis is None:
_axis = getattr(self, 'axis', 0)
_level = kwargs.pop('_level', None)
if isinstance(arg, compat.string_types):
return self._try_aggregate_string_function(arg, *args,
**kwargs), None
if isinstance(arg, dict):
# aggregate based on the passed dict
if _axis != 0: # pragma: no cover
raise ValueError('Can only pass dict with axis=0')
obj = self._selected_obj
def nested_renaming_depr(level=4):
# deprecation of nested renaming
# GH 15931
warnings.warn(
("using a dict with renaming "
"is deprecated and will be removed in a future "
"version"),
FutureWarning, stacklevel=level)
# if we have a dict of any non-scalars
# eg. {'A' : ['mean']}, normalize all to
# be list-likes
if any(is_aggregator(x) for x in compat.itervalues(arg)):
new_arg = compat.OrderedDict()
for k, v in compat.iteritems(arg):
if not isinstance(v, (tuple, list, dict)):
new_arg[k] = [v]
else:
new_arg[k] = v
# the keys must be in the columns
# for ndim=2, or renamers for ndim=1
# ok for now, but deprecated
# {'A': { 'ra': 'mean' }}
# {'A': { 'ra': ['mean'] }}
# {'ra': ['mean']}
# not ok
# {'ra' : { 'A' : 'mean' }}
if isinstance(v, dict):
is_nested_renamer = True
if k not in obj.columns:
msg = ('cannot perform renaming for {key} with a '
'nested dictionary').format(key=k)
raise SpecificationError(msg)
nested_renaming_depr(4 + (_level or 0))
elif isinstance(obj, ABCSeries):
nested_renaming_depr()
elif (isinstance(obj, ABCDataFrame) and
k not in obj.columns):
raise KeyError(
"Column '{col}' does not exist!".format(col=k))
arg = new_arg
else:
# deprecation of renaming keys
# GH 15931
keys = list(compat.iterkeys(arg))
if (isinstance(obj, ABCDataFrame) and
len(obj.columns.intersection(keys)) != len(keys)):
nested_renaming_depr()
from pandas.core.reshape.concat import concat
def _agg_1dim(name, how, subset=None):
"""
aggregate a 1-dim with how
"""
colg = self._gotitem(name, ndim=1, subset=subset)
if colg.ndim != 1:
raise SpecificationError("nested dictionary is ambiguous "
"in aggregation")
return colg.aggregate(how, _level=(_level or 0) + 1)
def _agg_2dim(name, how):
"""
aggregate a 2-dim with how
"""
colg = self._gotitem(self._selection, ndim=2,
subset=obj)
return colg.aggregate(how, _level=None)
def _agg(arg, func):
"""
run the aggregations over the arg with func
return an OrderedDict
"""
result = compat.OrderedDict()
for fname, agg_how in compat.iteritems(arg):
result[fname] = func(fname, agg_how)
return result
# set the final keys
keys = list(compat.iterkeys(arg))
result = | compat.OrderedDict() | pandas.compat.OrderedDict |
from IMLearn.learners import UnivariateGaussian, MultivariateGaussian
import plotly.graph_objects as go
import plotly.express as px
import plotly.io as pio
import pandas as pd
import numpy as np
pio.templates.default = "simple_white"
EXPECTED_VALUE = 10
VARIANCE = 1
NUM_OF_SAMPLES = 1000
SAMPLES_DIFF = 10
DIFF_COL = 'Difference between actual and estimated variance'
NUM_OF_SAMPLES_COL = 'Sample size'
SAMPLE_VAL_COL = 'Sample value'
PDF_COL = 'Probability density function value'
TITLE = 'Difference between actual and estimated variance by num of samples'
PDF_TITLE = 'Sample value to its probability density function value'
MV_MEAN = np.array([0, 0, 4, 0])
MV_COV = np.array([
[1, 0.2, 0, 0.5],
[0.2, 2, 0, 0],
[0, 0, 1, 0],
[0.5, 0, 0, 1]
])
MV_NUM_OF_SAMPLES = 1000
MV_HEATMAP_TITLE = "Log Likelihood Heatmap based on f1, f3 values"
F1_COL = 'f1'
F3_COL = 'f3'
LOG_LIKELIHOOD_COL = 'Log Likelihood'
FORMAT = lambda x: format(x, '.3f')
def test_univariate_gaussian():
# Question 1
uni_variate_gaussian = UnivariateGaussian()
samples = np.random.normal(EXPECTED_VALUE, VARIANCE, NUM_OF_SAMPLES)
uni_variate_gaussian = uni_variate_gaussian.fit(samples)
print(f"({FORMAT(uni_variate_gaussian.mu_)}, {FORMAT(uni_variate_gaussian.var_)})")
# Question 2
num_of_samples = SAMPLES_DIFF
results = []
for i in range(10, 1010, 10):
partial_samples = samples[:i]
uni_variate_gaussian.fit(partial_samples)
results.append((abs(uni_variate_gaussian.mu_ - EXPECTED_VALUE),
partial_samples.size))
num_of_samples += SAMPLES_DIFF
df = pd.DataFrame(results, columns=[DIFF_COL, NUM_OF_SAMPLES_COL])
px.bar(df, y=DIFF_COL, x=NUM_OF_SAMPLES_COL, title=TITLE).show()
# Question 3
pdf = uni_variate_gaussian.pdf(samples)
df = pd.DataFrame(zip(samples, pdf), columns=[SAMPLE_VAL_COL, PDF_COL])
px.scatter(df, x=SAMPLE_VAL_COL, y=PDF_COL, title=PDF_TITLE).show()
def test_multivariate_gaussian():
# Question 4 - Draw samples and print fitted model
samples = np.random.multivariate_normal(MV_MEAN, MV_COV, MV_NUM_OF_SAMPLES)
mv_gaussian = MultivariateGaussian()
mv_gaussian = mv_gaussian.fit(samples)
print(mv_gaussian.mu_)
print(mv_gaussian.cov_)
# Question 5 - Likelihood evaluation
results = []
vals_range = np.linspace(-10, 10, 200)
for f1 in vals_range:
for f3 in vals_range:
mu = np.array([f1, 0, f3, 0])
log_likelihood = MultivariateGaussian.log_likelihood(mu, MV_COV,
samples)
results.append((f1, f3, log_likelihood))
df = | pd.DataFrame(results, columns=[F1_COL, F3_COL, LOG_LIKELIHOOD_COL]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 26 09:40:28 2022
@author: Featherine
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as md
df = pd.read_csv('features - Final.csv')
df = df.fillna(0)
# df = df[0:48]
df['DateTime'] = pd.to_datetime(df['DateTime'], format='%Y-%m-%d %H:%M:%S')
fig, axs = plt.subplots(4, 2, figsize=(15,12))
# A random day
df_day = df[0:48]
axs[0, 0].plot('DateTime', '1006', data=df_day)
axs[0, 0].set_xlim(df_day['DateTime'].min()-pd.Timedelta(1,'h'),
df_day['DateTime'].max()+pd.Timedelta(1,'h'))
axs[0, 0].xaxis.set_major_locator(md.HourLocator(interval = 1))
axs[0, 0].xaxis.set_major_formatter(md.DateFormatter('%H:%M:%S'))
fig.autofmt_xdate()
axs[0, 0].set_title('Traffic in a Random Day')
axs[0, 0].set_xlabel('Time')
axs[0, 0].set_ylabel('Number of Cars')
axs[0, 1].plot('DateTime', 'Temp', data=df_day)
axs[0, 1].set_xlim(df_day['DateTime'].min()-pd.Timedelta(1,'h'),
df_day['DateTime'].max()+pd.Timedelta(1,'h'))
axs[0, 1].xaxis.set_major_locator(md.HourLocator(interval = 1))
axs[0, 1].xaxis.set_major_formatter(md.DateFormatter('%H:%M:%S'))
fig.autofmt_xdate()
axs[0, 1].set_title('Temperature in a Random Day')
axs[0, 1].set_ylabel('Temperature')
# Per over a year
axs[1, 0].plot('DateTime', '1006', data=df)
axs[1, 0].set_xlim(df['DateTime'].min()- | pd.Timedelta(1,'h') | pandas.Timedelta |
import pytest
import pytz
import dateutil
import numpy as np
from datetime import datetime
from dateutil.tz import tzlocal
import pandas as pd
import pandas.util.testing as tm
from pandas import (DatetimeIndex, date_range, Series, NaT, Index, Timestamp,
Int64Index, Period)
class TestDatetimeIndex(object):
def test_astype(self):
# GH 13149, GH 13209
idx = DatetimeIndex(['2016-05-16', 'NaT', NaT, np.NaN])
result = idx.astype(object)
expected = Index([Timestamp('2016-05-16')] + [NaT] * 3, dtype=object)
tm.assert_index_equal(result, expected)
result = idx.astype(int)
expected = Int64Index([1463356800000000000] +
[-9223372036854775808] * 3, dtype=np.int64)
tm.assert_index_equal(result, expected)
rng = date_range('1/1/2000', periods=10)
result = rng.astype('i8')
tm.assert_index_equal(result, Index(rng.asi8))
tm.assert_numpy_array_equal(result.values, rng.asi8)
def test_astype_with_tz(self):
# with tz
rng = date_range('1/1/2000', periods=10, tz='US/Eastern')
result = rng.astype('datetime64[ns]')
expected = (date_range('1/1/2000', periods=10,
tz='US/Eastern')
.tz_convert('UTC').tz_localize(None))
tm.assert_index_equal(result, expected)
# BUG#10442 : testing astype(str) is correct for Series/DatetimeIndex
result = pd.Series(pd.date_range('2012-01-01', periods=3)).astype(str)
expected = pd.Series(
['2012-01-01', '2012-01-02', '2012-01-03'], dtype=object)
tm.assert_series_equal(result, expected)
result = Series(pd.date_range('2012-01-01', periods=3,
tz='US/Eastern')).astype(str)
expected = Series(['2012-01-01 00:00:00-05:00',
'2012-01-02 00:00:00-05:00',
'2012-01-03 00:00:00-05:00'],
dtype=object)
tm.assert_series_equal(result, expected)
# GH 18951: tz-aware to tz-aware
idx = date_range('20170101', periods=4, tz='US/Pacific')
result = idx.astype('datetime64[ns, US/Eastern]')
expected = date_range('20170101 03:00:00', periods=4, tz='US/Eastern')
tm.assert_index_equal(result, expected)
# GH 18951: tz-naive to tz-aware
idx = date_range('20170101', periods=4)
result = idx.astype('datetime64[ns, US/Eastern]')
expected = date_range('20170101', periods=4, tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_astype_str_compat(self):
# GH 13149, GH 13209
# verify that we are returning NaT as a string (and not unicode)
idx = DatetimeIndex(['2016-05-16', 'NaT', NaT, np.NaN])
result = idx.astype(str)
expected = Index(['2016-05-16', 'NaT', 'NaT', 'NaT'], dtype=object)
tm.assert_index_equal(result, expected)
def test_astype_str(self):
# test astype string - #10442
result = date_range('2012-01-01', periods=4,
name='test_name').astype(str)
expected = Index(['2012-01-01', '2012-01-02', '2012-01-03',
'2012-01-04'], name='test_name', dtype=object)
tm.assert_index_equal(result, expected)
# test astype string with tz and name
result = date_range('2012-01-01', periods=3, name='test_name',
tz='US/Eastern').astype(str)
expected = Index(['2012-01-01 00:00:00-05:00',
'2012-01-02 00:00:00-05:00',
'2012-01-03 00:00:00-05:00'],
name='test_name', dtype=object)
tm.assert_index_equal(result, expected)
# test astype string with freqH and name
result = date_range('1/1/2011', periods=3, freq='H',
name='test_name').astype(str)
expected = Index(['2011-01-01 00:00:00', '2011-01-01 01:00:00',
'2011-01-01 02:00:00'],
name='test_name', dtype=object)
tm.assert_index_equal(result, expected)
# test astype string with freqH and timezone
result = date_range('3/6/2012 00:00', periods=2, freq='H',
tz='Europe/London', name='test_name').astype(str)
expected = Index(['2012-03-06 00:00:00+00:00',
'2012-03-06 01:00:00+00:00'],
dtype=object, name='test_name')
tm.assert_index_equal(result, expected)
def test_astype_datetime64(self):
# GH 13149, GH 13209
idx = DatetimeIndex(['2016-05-16', 'NaT', NaT, np.NaN])
result = idx.astype('datetime64[ns]')
tm.assert_index_equal(result, idx)
assert result is not idx
result = idx.astype('datetime64[ns]', copy=False)
tm.assert_index_equal(result, idx)
assert result is idx
idx_tz = DatetimeIndex(['2016-05-16', 'NaT', NaT, np.NaN], tz='EST')
result = idx_tz.astype('datetime64[ns]')
expected = DatetimeIndex(['2016-05-16 05:00:00', 'NaT', 'NaT', 'NaT'],
dtype='datetime64[ns]')
tm.assert_index_equal(result, expected)
def test_astype_object(self):
rng = date_range('1/1/2000', periods=20)
casted = rng.astype('O')
exp_values = list(rng)
tm.assert_index_equal(casted, Index(exp_values, dtype=np.object_))
assert casted.tolist() == exp_values
@pytest.mark.parametrize('tz', [None, 'Asia/Tokyo'])
def test_astype_object_tz(self, tz):
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx', tz=tz)
expected_list = [Timestamp('2013-01-31', tz=tz),
Timestamp('2013-02-28', tz=tz),
Timestamp('2013-03-31', tz=tz),
Timestamp('2013-04-30', tz=tz)]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.astype(object)
tm.assert_index_equal(result, expected)
assert idx.tolist() == expected_list
def test_astype_object_with_nat(self):
idx = DatetimeIndex([datetime(2013, 1, 1), datetime(2013, 1, 2),
pd.NaT, datetime(2013, 1, 4)], name='idx')
expected_list = [Timestamp('2013-01-01'),
Timestamp('2013-01-02'), pd.NaT,
Timestamp('2013-01-04')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.astype(object)
tm.assert_index_equal(result, expected)
assert idx.tolist() == expected_list
@pytest.mark.parametrize('dtype', [
float, 'timedelta64', 'timedelta64[ns]', 'datetime64',
'datetime64[D]'])
def test_astype_raises(self, dtype):
# GH 13149, GH 13209
idx = | DatetimeIndex(['2016-05-16', 'NaT', NaT, np.NaN]) | pandas.DatetimeIndex |
# AUTOGENERATED! DO NOT EDIT! File to edit: notebooks/04_Create_Acs_Indicators.ipynb (unless otherwise specified).
__all__ = ['getColName', 'getColByName', 'addKey', 'nullIfEqual', 'sumInts', 'age5', 'age18', 'age24', 'age64', 'age65',
'bahigher', 'carpool', 'drvalone', 'elheat', 'empl', 'fam', 'female', 'femhhs', 'heatgas', 'hisp', 'hh25inc',
'hh40inc', 'hh60inc', 'hh75inc', 'hhchpov', 'hhm75', 'hhs', 'hsdipl', 'lesshs', 'male', 'mhhi', 'drvalone',
'novhcl', 'nohhint', 'othercom', 'paa', 'p2more', 'pasi', 'pubtran', 'pwhite', 'sclemp', 'tpop', 'trav14',
'trav14', 'trav45', 'trav44', 'unempr', 'unempr', 'walked', 'createAcsIndicator']
# Cell
#@title Run This Cell: Misc Function Declarations
# These functions right here are used in the calculations below.
# Finds a column matchings a substring
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
# Pulls a column from one dataset into a new dataset.
# This is not a crosswalk. calls getColByName()
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
# Return 0 if two specified columns are equal.
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
# I'm thinking this doesnt need to be a function..
def sumInts(df): return df.sum(numeric_only=True)
# Cell
#@title Run This Cell: Create age5
#File: age5.py
#Author: <NAME>
#Date: 4/16/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B01001 - SEX BY AGE
# Universe: Total population
# Table Creates: tpop, female, male, age5 age18 age24 age64 age65
#purpose:
#input: #output:
import pandas as pd
import glob
def age5( df, columnsToInclude ):
fi = pd.DataFrame()
columns = ['B01001_027E_Total_Female_Under_5_years',
'B01001_003E_Total_Male_Under_5_years',
'B01001_001E_Total' , 'tract']
columns.extend(columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df[ 'B01001_003E_Total_Male_Under_5_years' ]
+ df[ 'B01001_027E_Total_Female_Under_5_years' ]
) / df['B01001_001E_Total'] * 100
return fi
# Cell
#@title Run This Cell: age18
#File: age18.py
#Author: <NAME>
#Date: 4/16/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B01001 - SEX BY AGE
# Universe: Total population
# Table Creates: tpop, female, male, age5 age18 age24 age64 age65
#purpose:
#input: #output:
import pandas as pd
import glob
def age18( df, columnsToInclude ):
fi = pd.DataFrame()
columns = ['B01001_001E_Total',
'B01001_004E_Total_Male_5_to_9_years',
'B01001_005E_Total_Male_10_to_14_years' ,
'B01001_006E_Total_Male_15_to_17_years',
'B01001_028E_Total_Female_5_to_9_years',
'B01001_029E_Total_Female_10_to_14_years' ,
'B01001_030E_Total_Female_15_to_17_years']
columns = df.filter(regex='001E|004E|005E|006E|028E|029E|030E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='004E|005E|006E|028E|029E|030E').sum(axis=1)
) / df['B01001_001E_Total:'] * 100
return fi
# Cell
#@title Run This Cell: Create age24
#File: age24.py
#Author: <NAME>
#Date: 9/8/21
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B01001 - SEX BY AGE
# Universe: Total population
# Table Creates: tpop, female, male, age5 age18 age24 age64 age65
#purpose:
#input: #output:
import pandas as pd
import glob
def age24( df, columnsToInclude ):
fi = pd.DataFrame()
columns = ['B01001_007E_Total_Male_18_and_19_years',
'B01001_008E_Total_Male_20_years',
'B01001_009E_Total_Male_21_years' ,
'B01001_010E_Total_Male_22_to_24_years' ,
'B01001_031E_Total_Female_18_and_19_years' ,
'B01001_032E_Total_Female_20_years' ,
'B01001_033E_Total_Female_21_years' ,
'B01001_034E_Total_Female_22_to_24_years',
'tract']
columns.extend(columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df[ 'B01001_007E_Total_Male_18_and_19_years' ]
+ df[ 'B01001_008E_Total_Male_20_years' ]
+ df[ 'B01001_009E_Total_Male_21_years' ]
+ df[ 'B01001_010E_Total_Male_22_to_24_years' ]
+ df[ 'B01001_031E_Total_Female_18_and_19_years' ]
+ df[ 'B01001_032E_Total_Female_20_years' ]
+ df[ 'B01001_033E_Total_Female_21_years' ]
+ df[ 'B01001_034E_Total_Female_22_to_24_years' ]
) / df['B01001_001E_Total'] * 100
return fi
# Cell
#@title Run This Cell: age64
import pandas as pd
import glob
def age64( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='012E|013E|014E|015E|016E|017E|018E|019E|036E|037E|038E|039E|040E|041E|042E|043E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='012E|013E|014E|015E|016E|017E|018E|019E|036E|037E|038E|039E|040E|041E|042E|043E').sum(axis=1)
) / df['B01001_001E_Total:'] * 100
return fi
# Cell
#@title Run This Cell: age65
import pandas as pd
import glob
def age65( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='001E|020E|021E|022E|023E|024E|025E|044E|045E|046E|047E|048E|049E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='020E|021E|022E|023E|024E|025E|044E|045E|046E|047E|048E|049E').sum(axis=1)
) / df['B01001_001E_Total:'] * 100
return fi
# Cell
#@title Run This Cell: bahigher
import pandas as pd
import glob
def bahigher( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='005E|006E|001E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='005E|006E').sum(axis=1)
) / df['B06009_001E'] * 100
return fi
# Cell
#@title Run This Cell: - carpool
import pandas as pd
import glob
def carpool( df, columnsToInclude ):
# Final Dataframe
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|017E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='B08101_017E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
#@title Run This Cell: - drvalone
import pandas as pd
import glob
def drvalone( df, columnsToInclude ):
# Final Dataframe
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|009E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='B08101_009E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
#@ title Run This Cell: -elheat
import pandas as pd
import glob
def elheat( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='B25040_004E|B25040_001E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='B25040_004E').sum(axis=1)
) / ( df.filter(regex='B25040_001E').sum(axis=1)
) * 100
return fi
# Cell
#@ title Run This Cell: -empl
import pandas as pd
import glob
def empl( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|009E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='B08101_009E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
#@ title Run This Cell: -fam
import pandas as pd
import glob
def fam( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|009E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='B08101_009E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
#@ title Run This Cell: -female
import pandas as pd
import glob
def female( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|009E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['female'] = ( df.filter(regex='B08101_009E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
#@ title Run This Cell: -femhhs
import pandas as pd
import glob
def femhhs( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|009E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['femhhs'] = ( df.filter(regex='B08101_009E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
#@ title Run This Cell: -heatgas
import pandas as pd
import glob
def heatgas( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|009E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='B08101_009E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
#@title Run This Cell: hisp
import pandas as pd
import glob
def hisp( df, columnsToInclude ):
fi = pd.DataFrame()
columns = ['B03002_001E_Total',
'B03002_012E_Total_Hispanic_or_Latino']
columns = df.filter(regex='001E|012E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
print('addKey df',df.columns,'fi',fi.columns,'col: ', col)
fi = addKey(df, fi, col)
print(' ')
fi['final'] = ( df.filter(regex='012E').sum(axis=1)
) / df['B03002_001E_Total:'] * 100
return fi
# Cell
#@title Run This Cell: hh25inc
import pandas as pd
import glob
def hh25inc( df, columnsToInclude ):
df.columns = df.columns.str.replace(r"[$]", "")
fi = pd.DataFrame()
columns = ['B19001_001E_Total',
"B19001_002E_Total_Less_than_10,000",
"B19001_003E_Total_10,000_to_14,999",
"B19001_004E_Total_15,000_to_19,999",
"B19001_005E_Total_20,000_to_24,999"]
columns = df.filter(regex='002E|003E|004E|005E|001E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
print('addKey col: ', col, df.columns)
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='002E|003E|004E|005E').sum(axis=1)
) / df['B19001_001E_Total:'] * 100
return fi
# Cell
#@ title Run This Cell: -hh40inc
import pandas as pd
import glob
def hh40inc( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|009E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='B08101_009E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
#@ title Run This Cell: -hh60inc
import pandas as pd
import glob
def hh60inc( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|009E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='B08101_009E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
#@ title Run This Cell: -hh75inc
import pandas as pd
import glob
def hh75inc( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|009E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='B08101_009E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
#@ title Run This Cell: -hhchpov
import pandas as pd
import glob
def hhchpov( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|009E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='B08101_009E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
#@ title Run This Cell: -hhm75
import pandas as pd
import glob
def hhm75( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|009E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='B08101_009E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
#@ title Run This Cell: -hhs
import pandas as pd
import glob
def hhs( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|009E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='B08101_009E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
#@ title Run This Cell: -hsdipl
import pandas as pd
import glob
def hsdipl( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|009E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='B08101_009E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
#@ title Run This Cell: -lesshs
import pandas as pd
import glob
def lesshs( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|009E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='B08101_009E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
#@ title Run This Cell: -male
import pandas as pd
import glob
def male( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|009E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='B08101_009E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
# @title Run This Cell : Create MHHI
#File: mhhi.py
#Author: <NAME>
#Date: 1/24/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B19001 - HOUSEHOLD INCOME IN THE PAST 12 MONTHS (IN 2016 INFLATION-ADJUSTED DOLLARS)
# Universe: Households
# Table Creates: hh25 hh40 hh60 hh75 hhm75, mhhi
#purpose: Produce Sustainability - Percent of Population that Walks to Work Indicator
#input:
#output:
import pandas as pd
import glob
def mhhi( df, columnsToInclude = [] ):
info = pd.DataFrame(
[
['B19001_002E', 0, 10000],
['B19001_003E', 10000, 4999 ],
['B19001_004E', 15000, 4999 ],
['B19001_005E', 20000, 4999 ],
['B19001_006E', 25000, 4999 ],
['B19001_007E', 30000, 4999],
['B19001_008E', 35000, 4999 ],
['B19001_009E', 40000, 4999 ],
['B19001_010E', 45000, 4999 ],
['B19001_011E', 50000, 9999 ],
['B19001_012E', 60000, 14999],
['B19001_013E', 75000, 24999 ],
['B19001_014E', 100000, 24999 ],
['B19001_015E', 125000, 24999 ],
['B19001_016E', 150000, 49000 ],
['B19001_017E', 200000, 1000000000000000000000000 ],
],
columns=['variable', 'lower', 'range']
)
# Final Dataframe
data_table = pd.DataFrame()
for index, row in info.iterrows():
data_table = addKey(df, data_table, row['variable'])
# Accumulate totals accross the columns.
# Midpoint: Divide column index 16 (the last column) of the cumulative totals
temp_table = data_table.cumsum(axis=1)
temp_table['midpoint'] = (temp_table.iloc[ : , -1 :] /2) # V3
temp_table['midpoint_index'] = False
temp_table['midpoint_index_value'] = False # Z3
temp_table['midpoint_index_lower'] = False # W3
temp_table['midpoint_index_range'] = False # X3
temp_table['midpoint_index_minus_one_cumulative_sum'] = False #Y3
# step 3 - csa_agg3: get the midpoint index by "when midpoint > agg[1] and midpoint <= agg[2] then 2"
# Get CSA Midpoint Index using the breakpoints in our info table.
for index, row in temp_table.iterrows():
# Get the index of the first column where our midpoint is greater than the columns value.
midpoint = row['midpoint']
midpoint_index = 0
# For each column (except the 6 columns we just created)
# The tracts midpoint was < than the first tracts value at column 'B19001_002E_Total_Less_than_$10,000'
if( midpoint < int(row[0]) or row[-6] == False ):
temp_table.loc[ index, 'midpoint_index' ] = 0
else:
for column in row.iloc[:-6]:
# set midpoint index to the column with the highest value possible that is under midpoint
if( midpoint >= int(column) ):
if midpoint==False: print (str(column) + ' - ' + str(midpoint))
temp_table.loc[ index, 'midpoint_index' ] = midpoint_index +1
midpoint_index += 1
# temp_table = temp_table.drop('Unassigned--Jail')
for index, row in temp_table.iterrows():
temp_table.loc[ index, 'midpoint_index_value' ] = data_table.loc[ index, data_table.columns[row['midpoint_index']] ]
temp_table.loc[ index, 'midpoint_index_lower' ] = info.loc[ row['midpoint_index'] ]['lower']
temp_table.loc[ index, 'midpoint_index_range' ] = info.loc[ row['midpoint_index'] ]['range']
temp_table.loc[ index, 'midpoint_index_minus_one_cumulative_sum'] = row[ row['midpoint_index']-1 ]
# This is our denominator, which cant be negative.
for index, row in temp_table.iterrows():
if row['midpoint_index_value']==False:
temp_table.at[index, 'midpoint_index_value']=1;
#~~~~~~~~~~~~~~~
# Step 3)
# Run the Calculation
# Calculation = (midpoint_lower::numeric + (midpoint_range::numeric * ( (midpoint - midpoint_upto_agg) / nullif(midpoint_total,0)
# Calculation = W3+X3*((V3-Y3)/Z3)
# v3 -> 1 - midpoint of households == sum / 2
# w3 -> 2 - lower limit of the income range containing the midpoint of the housing total == row[lower]
# x3 -> width of the interval containing the medium == row[range]
# z3 -> number of hhs within the interval containing the median == row[total]
# y3 -> 4 - cumulative frequency up to, but no==NOT including the median interval
#~~~~~~~~~~~~~~~
def finalCalc(x):
return ( x['midpoint_index_lower']+ x['midpoint_index_range']*(
( x['midpoint']-x['midpoint_index_minus_one_cumulative_sum'])/ x['midpoint_index_value'] )
)
temp_table['final'] = temp_table.apply(lambda x: finalCalc(x), axis=1)
temp_table[columnsToInclude] = df[columnsToInclude]
return temp_table
# Cell
#@ title Run This Cell: -nilf
import pandas as pd
import glob
def drvalone( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|009E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='B08101_009E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
#@title Run This Cell: novhcl
import pandas as pd
import glob
def novhcl( df, columnsToInclude ):
fi = pd.DataFrame()
columns = ['B08201_002E_Total_No_vehicle_available','B08201_001E_Total']
columns = df.filter(regex='002E|003E|004E|005E|001E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
print('addKey df',df.columns,'fi',fi.columns,'col: ', col)
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='002E').sum(axis=1)
) / df['B08201_001E_Total:'] * 100
return fi
# Cell
#@title Run This Cell: nohhint
import pandas as pd
import glob
def nohhint( df, columnsToInclude ):
fi = pd.DataFrame()
columns = ['B28011_001E_Total',
'B28011_002E_Total_With_an_Internet_subscription',
'B28011_003E_Total_With_an_Internet_subscription_Dial-up_alone',
'B28011_004E_Total_With_an_Internet_subscription_Broadband_such_as_cable,_fiber_optic,_or_DSL',
'B28011_005E_Total_With_an_Internet_subscription_Satellite_Internet_service',
'B28011_006E_Total_With_an_Internet_subscription_Other_service',
'B28011_007E_Total_Internet_access_without_a_subscription',
'B28011_008E_Total_No_Internet_access']
columns = df.filter(regex='008E|001E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
print('addKey df',df.columns,'col: ', col)
fi = addKey(df, fi, col)
print(' ')
# Calculate
fi['nohhint'] = ( df.filter(regex='008E').sum(axis=1)
) / df['B28011_001E_Total:'] * 100
return fi
# Cell
#@ title Run This Cell: -othercom
import pandas as pd
import glob
def othercom( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|009E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['othercom'] = ( df.filter(regex='B08101_009E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
#@title Run This Cell: paa
import pandas as pd
import glob
def paa( df, columnsToInclude ):
fi = pd.DataFrame()
columns = ['B03002_001E_Total:',
'B03002_004E_Total_Not_Hispanic_or_Latino_Black_or_African_American_alone']
columns = df.filter(regex='001E|004E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
print('addKey df',df.columns,'fi',fi.columns,'col: ', col)
fi = addKey(df, fi, col)
fi['paa'] = ( df.filter(regex='004E').sum(axis=1)
) / df['B03002_001E_Total:'] * 100
return fi
# Cell
#@title Run This Cell: -p2more
import pandas as pd
import glob
def p2more( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|009E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='B08101_009E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
#@title Run This Cell: -pasi ***
import pandas as pd
import glob
def pasi( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|009E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='006E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
#@title Run This Cell: -pubtran
import pandas as pd
import glob
def pubtran( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='025E|001E|049E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['pubtran'] = ( df.filter(regex='025E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
#@title Run This Cell: pwhite
import pandas as pd
import glob
def pwhite( df, columnsToInclude ):
fi = pd.DataFrame()
columns = ['B03002_001E_Total',
'B03002_003E_Total_Not_Hispanic_or_Latino_White_alone']
columns = df.filter(regex='001E|003E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
print('addKey df',df.columns,'fi',fi.columns,'col: ', col)
fi = addKey(df, fi, col)
print(' ')
# Calculate
fi['pwhite'] = ( df.filter(regex='003E').sum(axis=1)
) / df['B03002_001E_Total:'] * 100
return fi
# Cell
#@title Run This Cell: -racdiv ***
# Cell
#@title Run This Cell: -sclemp
import pandas as pd
import glob
def sclemp( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='001E|004E|005E|006E|009E|013E|018E|019E|020E|023E|027E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['sclemp'] = ( df.filter(regex='004E|005E|006E|009E|013E|018E|019E|020E|023E|027E').sum(axis=1)
) / ( df.filter(regex='001E').sum(axis=1)
) * 100
return fi
# Cell
#@title Run This Cell: -tpop
import pandas as pd
import glob
def tpop( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='001E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['tpop'] = ( df.filter(regex='001E').sum(axis=1)
)
return fi
# Cell
#@title Run This Cell: trav14
import pandas as pd
import glob
def trav14( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='002E|003E|004E|001E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['age65'] = ( df.filter(regex='002E|003E|004E').sum(axis=1)
) / df['B08303_001E'] * 100
return fi
# Cell
#@title Run This Cell: trav29
import pandas as pd
import glob
def trav14( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='005E|006E|007E|001E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['age65'] = ( df.filter(regex='005E|006E|007E').sum(axis=1)
) / df['B08303_001E'] * 100
return fi
# Cell
#@title Run This Cell: Create trav45
#File: trav45.py
#Author: <NAME>
#Date: 1/17/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B08303 - TRAVEL TIME TO WORK,
# (Universe: Workers 16 years and over who did not work at home)
# Table Creates: trav14, trav29, trav44, trav45
#purpose: Produce Sustainability - Percent of Employed Population with Travel Time to Work of 45 Minutes and Over Indicator
#input:
#output:
import pandas as pd
import glob
def trav45(df, columnsToInclude = [] ):
# Final Dataframe
fi = pd.DataFrame()
columns = ['B08303_011E','B08303_012E','B08303_013E','B08303_001E', 'tract']
columns.extend(columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
# Numerators
numerators = | pd.DataFrame() | pandas.DataFrame |
# # Explore overfitting and underfitting
# # (https://www.tensorflow.org/alpha/tutorials/keras/overfit_and_underfit)
import altair as alt
import numpy as np
import pandas as pd
from tensorflow import keras
# ## Download the IMDB dataset
# !Multi-hot-encoding
NUM_WORDS = 10000
(train_data, train_labels), (test_data, test_labels) = keras.datasets.imdb.load_data(
num_words=NUM_WORDS
)
def multi_hot_sequences(sequences, dimension):
# Create an all-zero matrix of shape (len(sequences), dimension)
results = np.zeros((len(sequences), dimension))
for i, word_indices in enumerate(sequences):
results[i, word_indices] = 1.0 # set specific indices of results[i] to 1s
return results
train_data = multi_hot_sequences(train_data, dimension=NUM_WORDS)
test_data = multi_hot_sequences(test_data, dimension=NUM_WORDS)
# -
df = | pd.DataFrame({"label": train_data[0]}) | pandas.DataFrame |
#definition of add_dataset that creates the meta-dataset
import pandas as pd
from pandas.core.dtypes.common import is_numeric_dtype
from scipy.stats import pearsonr
from sklearn.model_selection import train_test_split
from supervised.automl import AutoML
import os
import pandas as pd
from sklearn.preprocessing import LabelEncoder
import numpy as np
rootdir = os.path.dirname(__file__)
results_dir = rootdir + '/results/'
dataset_dir = rootdir + '/datasets_list_final/'
datasets_to_add_dir = rootdir + '/datasets_list_toadd/'
algorithm_list = ['Linear', 'Random Forest', 'Decision Tree', 'Neural Network']
def encode_y(y):
le = LabelEncoder()
le.fit(y)
y_enc = le.transform(y)
return y_enc
def compute_max_corr(df):
y = encode_y(df[df.columns[-1]])
y = pd.Series(y)
corr = df[df.columns[:-1]].corrwith(y)
return np.max(np.absolute(corr))
def compute_max_corr_between_X_and_y(X, y):
y = encode_y(y)
y = pd.Series(y)
X = X.apply(pd.to_numeric, errors='ignore')
return np.max(np.absolute(X.apply(lambda x: x.corr(y) if is_numeric_dtype(x) else 0)))
def add_dataset(dataset, dataset_dataframe):
path = rootdir + "/ml_dataset.csv"
try:
df = pd.read_csv(path)
except:
df = pd.DataFrame()
df['did'] = 0
dataset_id = dataset.dataset_id
if dataset_id in df['did'].values:
print("Dataset %d already present in the dataset!" % dataset_id)
else:
# PERFORM AUTOML
X, y, _, _ = dataset.get_data(
target=dataset.default_target_attribute)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25)
automl = AutoML(algorithms=algorithm_list, eval_metric='f1', results_path=results_dir + str(dataset_id),
explain_level=1, top_models_to_improve=4, random_state=2, optuna_verbose=False)
automl.fit(X_train, y_train)
predictions = automl.predict(X_test)
# ADD DATASET
# Retrieve results from automl
results_col_list = ['metric_value', 'train_time', 'model_type']
results_col_new_names = ['F1', 'time', 'algo']
df_automl_results = pd.read_csv(results_dir + str(dataset_id) + '/leaderboard.csv')[results_col_list]
df_automl_results.columns = results_col_new_names
# Add information about dataset
interesting_columns = dataset_dataframe.columns[6:]
for column in interesting_columns:
df_automl_results[column] = dataset_dataframe.loc[dataset_id, column]
df_automl_results['TDP'] = 250
df_automl_results['country'] = 'Switzerland'
df_automl_results['max_corr'] = compute_max_corr_between_X_and_y(X, y)
df_automl_results['did'] = dataset_id
# Set algo as the last column
i = list(df_automl_results.columns)
pos = i.index('algo')
new_i = i[0:pos] + i[pos + 1:] + [i[pos]]
df_automl_results = df_automl_results[new_i]
# Append new dataset
df = | pd.concat([df, df_automl_results]) | pandas.concat |
import pandas as pd
def cat_lump(x, n=5, prop=None, other_level="Other"):
"""
Lump together least common categories into an "Other" category
Parameters
----------
x : pd.Series
series to be modified
n : int
number of levels to preserve
prop : float
optional instead of n. Choose the minimum proportion for a level.
Must be between 0 and 1. Overrides n.
other_level : str
"other" category label
Returns
-------
y : pd.Series
modified series (with categorical type)
"""
counts = x.value_counts()
if prop:
assert 0 <= prop <= 1
min_count = int(prop * x.size)
if min_count > counts.min():
repl = counts.loc[counts < min_count].index
x = x.replace(repl, other_level)
elif len(counts) > n:
repl = counts.iloc[n:].index
x = x.replace(repl, other_level)
return x
def cat_count(x, sort=False, prop=False):
"""
Count entries in a factor
Parameters
----------
x : pd.Series
series to be counted
sort : boolean
If `True`, sort the result so that the most common values are displayed at the top.
prop : boolean
If `True`, compute the fraction of marginal table.
Returns
-------
y : pd.core.frame.DataFrame
A df with columns `f`, `n` and `p`, if prop is `True`.
"""
counts = x.value_counts(sort=sort)
df = pd.DataFrame({'f': counts.index, 'n': counts.values})
if(prop):
df['p'] = df['n']/sum(df['n'])
return df
def cat_anon(x, prefix = ""):
"""
Anonymise category levels
Parameters
----------
x : pd.Series
series to be modified
prefix : string
string prefix to insert in front of the numeric labels
Returns
-------
y : pd.Series
Anonymised pandas series
"""
x_array = pd.factorize(x)[0] + 1
digits = [len(str(x)) for x in x_array]
digits = max(digits)
x = [str(x).zfill(digits) for x in x_array]
x = prefix + | pd.Series(x) | pandas.Series |
import act
import requests
import json
import glob
import pandas as pd
import datetime as dt
import numpy as np
import xarray as xr
import dask
import matplotlib.pyplot as plt
import textwrap
from matplotlib.dates import DateFormatter
from matplotlib.dates import HourLocator
def get_doi(site, dsname, c_start, c_end):
# Get DOI Information
doi_url = 'https://adc.arm.gov/citationservice/citation/inst-class?id=' + inst[ii] + '&citationType=apa'
doi_url += '&site=' + site
doi_url += '&dataLevel=' + dsname.split('.')[-1]
doi_url += '&startDate=' + c_start
doi_url += '&endDate=' + c_end
doi = requests.get(url=doi_url).json()['citation']
return doi
def get_metadata(ds):
metadata_url = 'https://adc.arm.gov/solr8/metadata/select?q=datastream%3A' + ds
r = requests.get(url=metadata_url)
response = r.json()['response']
response = response['docs'][0]
description = response['instrument_name_text']
return description
if __name__ == '__main__':
now = pd.Timestamp.now()
# Import Configuration File
from anx_conf import conf
site = conf['site']
inst = list(conf['instruments'].keys())
c_start = conf['start_date']
c_end = conf['end_date']
nrows = len(inst)
ncols = 3
fig = plt.figure(figsize=(10, 1.5 * nrows), constrained_layout=True)
gs = fig.add_gridspec(nrows, ncols)
for ii in range(len(inst)):
dsname = conf['instruments'][inst[ii]]['dsname']
ds = site + dsname
files = glob.glob('/data/archive/' + site + '/' + ds + '/' + ds + '*nc')
if len(files) == 0:
files = glob.glob('/data/archive/' + site + '/' + ds + '/' + ds + '*cdf')
files = sorted(files)
obj = act.io.armfiles.read_netcdf(files, parallel=True)
if 'dsname2' in conf['instruments'][inst[ii]]:
dsname2 = conf['instruments'][inst[ii]]['dsname2']
ds2 = site + dsname2
files2 = glob.glob('/data/archive/' + site + '/' + ds2 + '/' + ds2 + '*nc')
if len(files2) == 0:
files2 = glob.glob('/data/archive/' + site + '/' + ds2 + '/' + ds2 + '*cdf')
files2 = sorted(files2)
obj2 = act.io.armfiles.read_netcdf(files2, parallel=True, concat_dim=['time'], combine='nested')
obj = xr.merge([obj, obj2])
obj2.close()
else:
dsname2 = None
if 'override_delta' in conf['instruments'][inst[ii]]:
t_delta = conf['instruments'][inst[ii]]['override_delta']
else:
t_delta = act.utils.determine_time_delta(obj['time'].values, default=60.)/60
print(ds, t_delta, int(1440/t_delta), 1440/t_delta)
start = pd.to_datetime(obj['time'].values[0]).floor('D')
end = | pd.to_datetime(obj['time'].values[-1]) | pandas.to_datetime |
'''
Functions for calculating soiling metrics from photovoltaic system data.
The soiling module is currently experimental. The API, results,
and default behaviors may change in future releases (including MINOR
and PATCH releases) as the code matures.
'''
import warnings
import pandas as pd
import numpy as np
from scipy.stats.mstats import theilslopes
warnings.warn(
'The soiling module is currently experimental. The API, results, '
'and default behaviors may change in future releases (including MINOR '
'and PATCH releases) as the code matures.'
)
# Custom exception
class NoValidIntervalError(Exception):
'''raised when no valid rows appear in the result dataframe'''
pass
class SRRAnalysis():
'''
Class for running the stochastic rate and recovery (SRR) photovoltaic
soiling loss analysis presented in Deceglie et al. JPV 8(2) p547 2018
Parameters
----------
energy_normalized_daily : pandas.Series
Daily performance metric (i.e. performance index, yield, etc.)
Alternatively, the soiling ratio output of a soiling sensor (e.g. the
photocurrent ratio between matched dirty and clean PV reference cells).
In either case, data should be insolation-weighted daily aggregates.
insolation_daily : pandas.Series
Daily plane-of-array insolation corresponding to
`energy_normalized_daily`. Arbitrary units.
precipitation_daily : pandas.Series, default None
Daily total precipitation. (Ignored if ``clean_criterion='shift'`` in
subsequent calculations.)
'''
def __init__(self, energy_normalized_daily, insolation_daily,
precipitation_daily=None):
self.pm = energy_normalized_daily # daily performance metric
self.insolation_daily = insolation_daily
self.precipitation_daily = precipitation_daily # daily precipitation
self.random_profiles = [] # random soiling profiles in _calc_monte
# insolation-weighted soiling ratios in _calc_monte:
self.monte_losses = []
if pd.infer_freq(self.pm.index) != 'D':
raise ValueError('Daily performance metric series must have '
'daily frequency')
if pd.infer_freq(self.insolation_daily.index) != 'D':
raise ValueError('Daily insolation series must have '
'daily frequency')
if self.precipitation_daily is not None:
if pd.infer_freq(self.precipitation_daily.index) != 'D':
raise ValueError('Precipitation series must have '
'daily frequency')
def _calc_daily_df(self, day_scale=13, clean_threshold='infer',
recenter=True, clean_criterion='shift', precip_threshold=0.01,
outlier_factor=1.5):
'''
Calculates self.daily_df, a pandas dataframe prepared for SRR analysis,
and self.renorm_factor, the renormalization factor for the daily
performance
Parameters
----------
day_scale : int, default 13
The number of days to use in rolling median for cleaning detection.
An odd value is recommended.
clean_threshold : float or 'infer', default 'infer'
If float: the fractional positive shift in rolling median for
cleaning detection.
If 'infer': automatically use outliers in the shift as the
threshold
recenter : bool, default True
Whether to recenter (renormalize) the daily performance to the
median of the first year
clean_criterion : str, {'shift', 'precip_and_shift', 'precip_or_shift', 'precip'} \
default 'shift'
The method of partitioning the dataset into soiling intervals.
* 'precip_and_shift' - rolling median shifts must coincide
with precipitation to be a valid cleaning event.
* 'precip_or_shift' - rolling median shifts and precipitation
events are each sufficient on their own to be a cleaning event.
* 'shift', only rolling median shifts are treated as cleaning events.
* 'precip', only precipitation events are treated as cleaning events.
precip_threshold : float, default 0.01
The daily precipitation threshold for defining precipitation
cleaning events.
Units must be consistent with ``self.precipitation_daily``.
outlier_factor : float, default 1.5
The factor used in the Tukey fence definition of outliers for flagging positive shifts
in the rolling median used for cleaning detection. A smaller value will cause more and
smaller shifts to be classified as cleaning events.
'''
if (day_scale % 2 == 0) and ('shift' in clean_criterion):
warnings.warn('An even value of day_scale was passed. An odd value is '
'recommended, otherwise, consecutive days may be erroneously '
'flagged as cleaning events. '
'See https://github.com/NREL/rdtools/issues/189')
df = self.pm.to_frame()
df.columns = ['pi']
df_insol = self.insolation_daily.to_frame()
df_insol.columns = ['insol']
df = df.join(df_insol)
precip = self.precipitation_daily
if precip is not None:
df_precip = precip.to_frame()
df_precip.columns = ['precip']
df = df.join(df_precip)
else:
df['precip'] = 0
# find first and last valid data point
start = df[~df.pi.isnull()].index[0]
end = df[~df.pi.isnull()].index[-1]
df = df[start:end]
# create a day count column
df['day'] = range(len(df))
# Recenter to median of first year, as in YoY degradation
if recenter:
oneyear = start + pd.Timedelta('364d')
renorm = df.loc[start:oneyear, 'pi'].median()
else:
renorm = 1
df['pi_norm'] = df['pi'] / renorm
# Find the beginning and ends of outages longer than dayscale
bfill = df['pi_norm'].fillna(method='bfill', limit=day_scale)
ffill = df['pi_norm'].fillna(method='ffill', limit=day_scale)
out_start = (~df['pi_norm'].isnull() & bfill.shift(-1).isnull())
out_end = (~df['pi_norm'].isnull() & ffill.shift(1).isnull())
# clean up the first and last elements
out_start.iloc[-1] = False
out_end.iloc[0] = False
# Make a forward filled copy, just for use in
# step, slope change detection
df_ffill = df.fillna(method='ffill', limit=day_scale).copy()
# Calculate rolling median
df['pi_roll_med'] = \
df_ffill.pi_norm.rolling(day_scale, center=True).median()
# Detect steps in rolling median
df['delta'] = df.pi_roll_med.diff()
if clean_threshold == 'infer':
deltas = abs(df.delta)
clean_threshold = deltas.quantile(0.75) + \
outlier_factor * (deltas.quantile(0.75) - deltas.quantile(0.25))
df['clean_event_detected'] = (df.delta > clean_threshold)
precip_event = (df['precip'] > precip_threshold)
if clean_criterion == 'precip_and_shift':
# Detect which cleaning events are associated with rain
# within a 3 day window
precip_event = precip_event.rolling(
3, center=True, min_periods=1).apply(any).astype(bool)
df['clean_event'] = (df['clean_event_detected'] & precip_event)
elif clean_criterion == 'precip_or_shift':
df['clean_event'] = (df['clean_event_detected'] | precip_event)
elif clean_criterion == 'precip':
df['clean_event'] = precip_event
elif clean_criterion == 'shift':
df['clean_event'] = df['clean_event_detected']
else:
raise ValueError('clean_criterion must be one of '
'{"precip_and_shift", "precip_or_shift", '
'"precip", "shift"}')
df['clean_event'] = df.clean_event | out_start | out_end
df = df.fillna(0)
# Give an index to each soiling interval/run
df['run'] = df.clean_event.cumsum()
df.index.name = 'date' # this gets used by name
self.renorm_factor = renorm
self.daily_df = df
def _calc_result_df(self, trim=False, max_relative_slope_error=500.0,
max_negative_step=0.05, min_interval_length=7):
'''
Calculates self.result_df, a pandas dataframe summarizing the soiling
intervals identified and self.analyzed_daily_df, a version of
self.daily_df with additional columns calculated during analysis.
Parameters
----------
trim : bool, default False
whether to trim (remove) the first and last soiling intervals to
avoid inclusion of partial intervals
max_relative_slope_error : float, default 500
the maximum relative size of the slope confidence interval for an
interval to be considered valid (percentage).
max_negative_step : float, default 0.05
The maximum magnitude of negative discrete steps allowed in an
interval for the interval to be considered valid (units of
normalized performance metric).
min_interval_length : int, default 7
The minimum duration for an interval to be considered
valid. Cannot be less than 2 (days).
'''
daily_df = self.daily_df
result_list = []
if trim:
# ignore first and last interval
res_loop = sorted(list(set(daily_df['run'])))[1:-1]
else:
res_loop = sorted(list(set(daily_df['run'])))
for r in res_loop:
run = daily_df[daily_df['run'] == r]
length = (run.day[-1] - run.day[0])
start_day = run.day[0]
end_day = run.day[-1]
start = run.index[0]
end = run.index[-1]
run_filtered = run[run.pi_norm > 0]
# use the filtered version if it contains any points
# otherwise use the unfiltered version to populate a
# valid=False row
if not run_filtered.empty:
run = run_filtered
result_dict = {
'start': start,
'end': end,
'length': length,
'run': r,
'run_slope': 0,
'run_slope_low': 0,
'run_slope_high': 0,
'max_neg_step': min(run.delta),
'start_loss': 1,
'inferred_start_loss': run.pi_norm.mean(),
'inferred_end_loss': run.pi_norm.mean(),
'valid': False
}
if len(run) > min_interval_length and run.pi_norm.sum() > 0:
fit = theilslopes(run.pi_norm, run.day)
fit_poly = np.poly1d(fit[0:2])
result_dict['run_slope'] = fit[0]
result_dict['run_slope_low'] = fit[2]
result_dict['run_slope_high'] = min([0.0, fit[3]])
result_dict['inferred_start_loss'] = fit_poly(start_day)
result_dict['inferred_end_loss'] = fit_poly(end_day)
result_dict['valid'] = True
result_list.append(result_dict)
results = pd.DataFrame(result_list)
if results.empty:
raise NoValidIntervalError('No valid soiling intervals were found')
# Filter results for each interval,
# setting invalid interval to slope of 0
results['slope_err'] = (
results.run_slope_high - results.run_slope_low)\
/ abs(results.run_slope)
# critera for exclusions
filt = (
(results.run_slope > 0) |
(results.slope_err >= max_relative_slope_error / 100.0) |
(results.max_neg_step <= -1.0 * max_negative_step)
)
results.loc[filt, 'run_slope'] = 0
results.loc[filt, 'run_slope_low'] = 0
results.loc[filt, 'run_slope_high'] = 0
results.loc[filt, 'valid'] = False
# Calculate the next inferred start loss from next valid interval
results['next_inferred_start_loss'] = np.clip(
results[results.valid].inferred_start_loss.shift(-1),
0, 1)
# Calculate the inferred recovery at the end of each interval
results['inferred_recovery'] = np.clip(
results.next_inferred_start_loss - results.inferred_end_loss,
0, 1)
if len(results[results.valid]) == 0:
raise NoValidIntervalError('No valid soiling intervals were found')
new_start = results.start.iloc[0]
new_end = results.end.iloc[-1]
pm_frame_out = daily_df[new_start:new_end]
pm_frame_out = pm_frame_out.reset_index() \
.merge(results, how='left', on='run') \
.set_index('date')
pm_frame_out['loss_perfect_clean'] = np.nan
pm_frame_out['loss_inferred_clean'] = np.nan
pm_frame_out['days_since_clean'] = \
(pm_frame_out.index - pm_frame_out.start).dt.days
# Calculate the daily derate
pm_frame_out['loss_perfect_clean'] = \
pm_frame_out.start_loss + \
pm_frame_out.days_since_clean * pm_frame_out.run_slope
# filling the flat intervals may need to be recalculated
# for different assumptions
pm_frame_out.loss_perfect_clean = \
pm_frame_out.loss_perfect_clean.fillna(1)
pm_frame_out['loss_inferred_clean'] = \
pm_frame_out.inferred_start_loss + \
pm_frame_out.days_since_clean * pm_frame_out.run_slope
# filling the flat intervals may need to be recalculated
# for different assumptions
pm_frame_out.loss_inferred_clean = \
pm_frame_out.loss_inferred_clean.fillna(1)
self.result_df = results
self.analyzed_daily_df = pm_frame_out
def _calc_monte(self, monte, method='half_norm_clean'):
'''
Runs the Monte Carlo step of the SRR method. Calculates
self.random_profiles, a list of the random soiling profiles realized in
the calculation, and self.monte_losses, a list of the
insolation-weighted soiling ratios associated with the realizations.
Parameters
----------
monte : int
number of Monte Carlo simulations to run
method : str, {'half_norm_clean', 'random_clean', 'perfect_clean'} \
default 'half_norm_clean'
How to treat the recovery of each cleaning event
* 'random_clean' - a random recovery between 0-100%
* 'perfect_clean' - each cleaning event returns the performance
metric to 1
* 'half_norm_clean' - The starting point of each interval is taken
randomly from a half normal distribution with its
mode (mu) at 1 and
its sigma equal to 1/3 * (1-b) where b is the intercept
of the fit to the interval.
'''
# Raise a warning if there is >20% invalid data
if (method == 'half_norm_clean') or (method == 'random_clean'):
valid_fraction = self.analyzed_daily_df['valid'].mean()
if valid_fraction <= 0.8:
warnings.warn('20% or more of the daily data is assigned to invalid soiling '
'intervals. This can be problematic with the "half_norm_clean" '
'and "random_clean" cleaning assumptions. Consider more permissive '
'validity criteria such as increasing "max_relative_slope_error" '
'and/or "max_negative_step" and/or decreasing "min_interval_length".'
' Alternatively, consider using method="perfect_clean". For more'
' info see https://github.com/NREL/rdtools/issues/272'
)
monte_losses = []
random_profiles = []
for _ in range(monte):
results_rand = self.result_df.copy()
df_rand = self.analyzed_daily_df.copy()
# only really need this column from the original frame:
df_rand = df_rand[['insol', 'run']]
results_rand['run_slope'] = \
np.random.uniform(results_rand.run_slope_low,
results_rand.run_slope_high)
results_rand['run_loss'] = \
results_rand.run_slope * results_rand.length
results_rand['end_loss'] = np.nan
results_rand['start_loss'] = np.nan
# Make groups that start with a valid interval and contain
# subsequent invalid intervals
group_list = []
group = 0
for x in results_rand.valid:
if x:
group += 1
group_list.append(group)
results_rand['group'] = group_list
# randomize the extent of the cleaning
inter_start = 1.0
start_list = []
if (method == 'half_norm_clean') or (method == 'random_clean'):
# Randomize recovery of valid intervals only
valid_intervals = results_rand[results_rand.valid].copy()
valid_intervals['inferred_recovery'] = \
valid_intervals.inferred_recovery.fillna(1.0)
end_list = []
for i, row in valid_intervals.iterrows():
start_list.append(inter_start)
end = inter_start + row.run_loss
end_list.append(end)
if method == 'half_norm_clean':
# Use a half normal with the inferred clean at the
# 3sigma point
x = np.clip(end + row.inferred_recovery, 0, 1)
inter_start = 1 - abs(np.random.normal(0.0, (1 - x) / 3))
elif method == 'random_clean':
inter_start = np.random.uniform(end, 1)
# Update the valid rows in results_rand
valid_update = pd.DataFrame()
valid_update['start_loss'] = start_list
valid_update['end_loss'] = end_list
valid_update.index = valid_intervals.index
results_rand.update(valid_update)
# forward and back fill to note the limits of random constant
# derate for invalid intervals
results_rand['previous_end'] = \
results_rand.end_loss.fillna(method='ffill')
results_rand['next_start'] = \
results_rand.start_loss.fillna(method='bfill')
# Randomly select random constant derate for invalid intervals
# based on previous end and next beginning
invalid_intervals = results_rand[~results_rand.valid].copy()
# fill NaNs at beggining and end
invalid_intervals.previous_end.fillna(1.0, inplace=True)
invalid_intervals.next_start.fillna(1.0, inplace=True)
groups = set(invalid_intervals.group)
replace_levels = []
if len(groups) > 0:
for g in groups:
rows = invalid_intervals[invalid_intervals.group == g]
n = len(rows)
low = rows.iloc[0].previous_end
high = rows.iloc[0].next_start
level = np.random.uniform(low, high)
replace_levels.append(np.full(n, level))
# Update results rand with the invalid rows
replace_levels = np.concatenate(replace_levels)
invalid_update = pd.DataFrame()
invalid_update['start_loss'] = replace_levels
invalid_update.index = invalid_intervals.index
results_rand.update(invalid_update)
elif method == 'perfect_clean':
for i, row in results_rand.iterrows():
start_list.append(inter_start)
end = inter_start + row.run_loss
inter_start = 1
results_rand['start_loss'] = start_list
else:
raise ValueError("Invalid method specification")
df_rand = df_rand.reset_index() \
.merge(results_rand, how='left', on='run') \
.set_index('date')
df_rand['loss'] = np.nan
df_rand['days_since_clean'] = \
(df_rand.index - df_rand.start).dt.days
df_rand['loss'] = df_rand.start_loss + \
df_rand.days_since_clean * df_rand.run_slope
df_rand['soil_insol'] = df_rand.loss * df_rand.insol
soiling_ratio = (
df_rand.soil_insol.sum() / df_rand.insol[
~df_rand.soil_insol.isnull()].sum()
)
monte_losses.append(soiling_ratio)
random_profile = df_rand['loss'].copy()
random_profile.name = 'stochastic_soiling_profile'
random_profiles.append(random_profile)
self.random_profiles = random_profiles
self.monte_losses = monte_losses
def run(self, reps=1000, day_scale=13, clean_threshold='infer',
trim=False, method='half_norm_clean',
clean_criterion='shift', precip_threshold=0.01, min_interval_length=7,
exceedance_prob=95.0, confidence_level=68.2, recenter=True,
max_relative_slope_error=500.0, max_negative_step=0.05, outlier_factor=1.5):
'''
Run the SRR method from beginning to end. Perform the stochastic rate
and recovery soiling loss calculation. Based on the methods presented
in Deceglie et al. "Quantifying Soiling Loss Directly From PV Yield"
JPV 8(2) p547 2018.
Parameters
----------
reps : int, default 1000
number of Monte Carlo realizations to calculate
day_scale : int, default 13
The number of days to use in rolling median for cleaning detection,
and the maximum number of days of missing data to tolerate in a
valid interval. An odd value is recommended.
clean_threshold : float or 'infer', default 'infer'
The fractional positive shift in rolling median for cleaning
detection. Or specify 'infer' to automatically use outliers in the
shift as the threshold.
trim : bool, default False
Whether to trim (remove) the first and last soiling intervals to
avoid inclusion of partial intervals
method : str, {'half_norm_clean', 'random_clean', 'perfect_clean'} \
default 'half_norm_clean'
How to treat the recovery of each cleaning event
* 'random_clean' - a random recovery between 0-100%
* 'perfect_clean' - each cleaning event returns the performance
metric to 1
* 'half_norm_clean' - The starting point of each interval is taken
randomly from a half normal distribution with its mode (mu) at 1 and
its sigma equal to 1/3 * (1-b) where b is the intercept of the fit to
the interval.
clean_criterion : str, {'shift', 'precip_and_shift', 'precip_or_shift', 'precip'} \
default 'shift'
The method of partitioning the dataset into soiling intervals
* 'precip_and_shift' - rolling median shifts must coincide
with precipitation to be a valid cleaning event.
* 'precip_or_shift' - rolling median shifts and precipitation
events are each sufficient on their own to be a cleaning event.
* 'shift', only rolling median shifts are treated as cleaning events.
* 'precip', only precipitation events are treated as cleaning events.
precip_threshold : float, default 0.01
The daily precipitation threshold for defining
precipitation cleaning events.
Units must be consistent with ``self.precipitation_daily``
min_interval_length : int, default 7
The minimum duration for an interval to be considered
valid. Cannot be less than 2 (days).
exceedance_prob : float, default 95.0
The probability level to use for exceedance value calculation in
percent
confidence_level : float, default 68.2
The size of the confidence interval to return, in percent
recenter : bool, default True
Specify whether data is centered to normalized yield of 1 based on
first year median
max_relative_slope_error : float, default 500
the maximum relative size of the slope confidence interval for an
interval to be considered valid (percentage).
max_negative_step : float, default 0.05
The maximum magnitude of negative discrete steps allowed in an
interval for the interval to be considered valid (units of
normalized performance metric).
outlier_factor : float, default 1.5
The factor used in the Tukey fence definition of outliers for flagging positive shifts
in the rolling median used for cleaning detection. A smaller value will cause more and
smaller shifts to be classified as cleaning events.
Returns
-------
insolation_weighted_soiling_ratio : float
P50 insolation-weighted soiling ratio based on stochastic rate and
recovery analysis
confidence_interval : numpy.array
confidence interval (size specified by confidence_level) of
insolation-weighted soiling ratio
calc_info : dict
* 'renormalizing_factor' - value used to recenter data
* 'exceedance_level' - the insolation-weighted soiling ratio that
was outperformed with probability of exceedance_prob
* 'stochastic_soiling_profiles' - List of Pandas series
corresponding to the Monte Carlo realizations of soiling ratio
profiles
* 'soiling_ratio_perfect_clean' - Pandas series of the soiling
ratio during valid soiling intervals assuming perfect cleaning
and P50 slopes
* 'soiling_interval_summary' - Pandas dataframe summarizing the
soiling intervals identified. The columns of the dataframe are
as follows:
+------------------------+----------------------------------------------+
| Column Name | Description |
+========================+==============================================+
| 'start' | Start timestamp of the soiling interval |
+------------------------+----------------------------------------------+
| 'end' | End timestamp of the soiling interval |
+------------------------+----------------------------------------------+
| 'soiling_rate' | P50 Soiling rate for interval, in day^−1 |
| | Negative value indicates soiling is |
| | occurring. E.g. a rate of −0.01 indicates 1% |
| | soiling loss per day. |
+------------------------+----------------------------------------------+
| 'soiling_rate_low' | Low edge of confidence interval for soiling |
| | rate for interval, in day^−1 |
+------------------------+----------------------------------------------+
| 'soiling_rate_high' | High edge of confidence interval for |
| | soiling rate for interval, in day^−1 |
+------------------------+----------------------------------------------+
| 'inferred_start_loss' | Estimated performance metric at the start |
| | of the interval |
+------------------------+----------------------------------------------+
| 'inferred_end_loss' | Estimated performance metric at the end |
| | of the interval |
+------------------------+----------------------------------------------+
| 'length' | Number of days in the interval |
+------------------------+----------------------------------------------+
| 'valid' | Whether the interval meets the criteria to |
| | be treated as a valid soiling interval |
+------------------------+----------------------------------------------+
'''
self._calc_daily_df(day_scale=day_scale,
clean_threshold=clean_threshold,
recenter=recenter,
clean_criterion=clean_criterion,
precip_threshold=precip_threshold,
outlier_factor=outlier_factor)
self._calc_result_df(trim=trim,
max_relative_slope_error=max_relative_slope_error,
max_negative_step=max_negative_step,
min_interval_length=min_interval_length)
self._calc_monte(reps, method=method)
# Calculate the P50 and confidence interval
half_ci = confidence_level / 2.0
result = np.percentile(self.monte_losses,
[50,
50.0 - half_ci,
50.0 + half_ci,
100 - exceedance_prob])
P_level = result[3]
# Construct calc_info output
intervals_out = self.result_df[
['start', 'end', 'run_slope', 'run_slope_low',
'run_slope_high', 'inferred_start_loss', 'inferred_end_loss',
'length', 'valid']].copy()
intervals_out.rename(columns={'run_slope': 'soiling_rate',
'run_slope_high': 'soiling_rate_high',
'run_slope_low': 'soiling_rate_low',
}, inplace=True)
df_d = self.analyzed_daily_df
sr_perfect = df_d[df_d['valid']]['loss_perfect_clean']
calc_info = {
'exceedance_level': P_level,
'renormalizing_factor': self.renorm_factor,
'stochastic_soiling_profiles': self.random_profiles,
'soiling_interval_summary': intervals_out,
'soiling_ratio_perfect_clean': sr_perfect
}
return (result[0], result[1:3], calc_info)
def soiling_srr(energy_normalized_daily, insolation_daily, reps=1000,
precipitation_daily=None, day_scale=13, clean_threshold='infer',
trim=False, method='half_norm_clean',
clean_criterion='shift', precip_threshold=0.01, min_interval_length=7,
exceedance_prob=95.0, confidence_level=68.2, recenter=True,
max_relative_slope_error=500.0, max_negative_step=0.05, outlier_factor=1.5):
'''
Functional wrapper for :py:class:`~rdtools.soiling.SRRAnalysis`. Perform
the stochastic rate and recovery soiling loss calculation. Based on the
methods presented in Deceglie et al. JPV 8(2) p547 2018.
Parameters
----------
energy_normalized_daily : pandas.Series
Daily performance metric (i.e. performance index, yield, etc.)
Alternatively, the soiling ratio output of a soiling sensor (e.g. the
photocurrent ratio between matched dirty and clean PV reference cells).
In either case, data should be insolation-weighted daily aggregates.
insolation_daily : pandas.Series
Daily plane-of-array insolation corresponding to
`energy_normalized_daily`. Arbitrary units.
reps : int, default 1000
number of Monte Carlo realizations to calculate
precipitation_daily : pandas.Series, default None
Daily total precipitation. Units ambiguous but should be the same as
precip_threshold. Note default behavior of precip_threshold. (Ignored
if ``clean_criterion='shift'``.)
day_scale : int, default 13
The number of days to use in rolling median for cleaning detection,
and the maximum number of days of missing data to tolerate in a valid
interval. An odd value is recommended.
clean_threshold : float or 'infer', default 'infer'
The fractional positive shift in rolling median for cleaning detection.
Or specify 'infer' to automatically use outliers in the shift as the
threshold.
trim : bool, default False
Whether to trim (remove) the first and last soiling intervals to avoid
inclusion of partial intervals
method : str, {'half_norm_clean', 'random_clean', 'perfect_clean'} \
default 'half_norm_clean'
How to treat the recovery of each cleaning event
* 'random_clean' - a random recovery between 0-100%
* 'perfect_clean' - each cleaning event returns the performance
metric to 1
* 'half_norm_clean' - The starting point of each interval is taken
randomly from a half normal distribution with its mode (mu) at 1 and
its sigma equal to 1/3 * (1-b) where b is the intercept of the fit to
the interval.
clean_criterion : str, {'shift', 'precip_and_shift', 'precip_or_shift', 'precip'} \
default 'shift'
The method of partitioning the dataset into soiling intervals
* 'precip_and_shift' - rolling median shifts must coincide
with precipitation to be a valid cleaning event.
* 'precip_or_shift' - rolling median shifts and precipitation
events are each sufficient on their own to be a cleaning event.
* 'shift', only rolling median shifts are treated as cleaning events.
* 'precip', only precipitation events are treated as cleaning events.
precip_threshold : float, default 0.01
The daily precipitation threshold for defining precipitation
cleaning events. Units must be consistent with precip.
min_interval_length : int, default 7
The minimum duration, in days, for an interval to be considered
valid. Cannot be less than 2 (days).
exceedance_prob : float, default 95.0
the probability level to use for exceedance value calculation in
percent
confidence_level : float, default 68.2
the size of the confidence interval to return, in percent
recenter : bool, default True
specify whether data is centered to normalized yield of 1 based on
first year median
max_relative_slope_error : float, default 500.0
the maximum relative size of the slope confidence interval for an
interval to be considered valid (percentage).
max_negative_step : float, default 0.05
The maximum magnitude of negative discrete steps allowed in an interval
for the interval to be considered valid (units of normalized
performance metric).
outlier_factor : float, default 1.5
The factor used in the Tukey fence definition of outliers for flagging positive shifts
in the rolling median used for cleaning detection. A smaller value will cause more and
smaller shifts to be classified as cleaning events.
Returns
-------
insolation_weighted_soiling_ratio : float
P50 insolation weighted soiling ratio based on stochastic rate and
recovery analysis
confidence_interval : numpy.array
confidence interval (size specified by ``confidence_level``) of
degradation rate estimate
calc_info : dict
* 'renormalizing_factor' - value used to recenter data
* 'exceedance_level' - the insolation-weighted soiling ratio that
was outperformed with probability of exceedance_prob
* 'stochastic_soiling_profiles' - List of Pandas series
corresponding to the Monte Carlo realizations of soiling ratio
profiles
* 'soiling_ratio_perfect_clean' - Pandas series of the soiling
ratio during valid soiling intervals assuming perfect cleaning
and P50 slopes
* 'soiling_interval_summary' - Pandas dataframe summarizing the
soiling intervals identified. The columns of the dataframe are
as follows:
+------------------------+----------------------------------------------+
| Column Name | Description |
+========================+==============================================+
| 'start' | Start timestamp of the soiling interval |
+------------------------+----------------------------------------------+
| 'end' | End timestamp of the soiling interval |
+------------------------+----------------------------------------------+
| 'soiling_rate' | P50 Soiling rate for interval, in day^−1 |
| | Negative value indicates soiling is |
| | occurring. E.g. a rate of −0.01 indicates 1% |
| | soiling loss per day. |
+------------------------+----------------------------------------------+
| 'soiling_rate_low' | Low edge of confidence interval for soiling |
| | rate for interval, in day^−1 |
+------------------------+----------------------------------------------+
| 'soiling_rate_high' | High edge of confidence interval for |
| | soiling rate for interval, in day^−1 |
+------------------------+----------------------------------------------+
| 'inferred_start_loss' | Estimated performance metric at the start |
| | of the interval |
+------------------------+----------------------------------------------+
| 'inferred_end_loss' | Estimated performance metric at the end |
| | of the interval |
+------------------------+----------------------------------------------+
| 'length' | Number of days in the interval |
+------------------------+----------------------------------------------+
| 'valid' | Whether the interval meets the criteria to |
| | be treated as a valid soiling interval |
+------------------------+----------------------------------------------+
'''
srr = SRRAnalysis(energy_normalized_daily,
insolation_daily,
precipitation_daily=precipitation_daily)
sr, sr_ci, soiling_info = srr.run(
reps=reps,
day_scale=day_scale,
clean_threshold=clean_threshold,
trim=trim,
method=method,
clean_criterion=clean_criterion,
precip_threshold=precip_threshold,
min_interval_length=min_interval_length,
exceedance_prob=exceedance_prob,
confidence_level=confidence_level,
recenter=recenter,
max_relative_slope_error=max_relative_slope_error,
max_negative_step=max_negative_step,
outlier_factor=outlier_factor)
return sr, sr_ci, soiling_info
def _count_month_days(start, end):
'''Return a dict of number of days between start and end
(inclusive) in each month'''
days = | pd.date_range(start, end) | pandas.date_range |
import pandas as pd
from texthero import representation
from texthero import preprocessing
from . import PandasTestCase
import doctest
import unittest
import string
"""
Test doctest
"""
def load_tests(loader, tests, ignore):
tests.addTests(doctest.DocTestSuite(representation))
return tests
class TestRepresentation(PandasTestCase):
"""
Term Frequency.
"""
def test_term_frequency_single_document(self):
s = pd.Series("a b c c")
s_true = pd.Series([[1, 1, 2]])
self.assertEqual(representation.term_frequency(s), s_true)
def test_term_frequency_multiple_documents(self):
s = pd.Series(["doc_one", "doc_two"])
s_true = | pd.Series([[1, 0], [0, 1]]) | pandas.Series |
from __future__ import division
import pytest
import numpy as np
from pandas import (Interval, IntervalIndex, Index, isna,
interval_range, Timestamp, Timedelta,
compat)
from pandas._libs.interval import IntervalTree
from pandas.tests.indexes.common import Base
import pandas.util.testing as tm
import pandas as pd
class TestIntervalIndex(Base):
_holder = IntervalIndex
def setup_method(self, method):
self.index = IntervalIndex.from_arrays([0, 1], [1, 2])
self.index_with_nan = IntervalIndex.from_tuples(
[(0, 1), np.nan, (1, 2)])
self.indices = dict(intervalIndex=tm.makeIntervalIndex(10))
def create_index(self):
return IntervalIndex.from_breaks(np.arange(10))
def test_constructors(self):
expected = self.index
actual = IntervalIndex.from_breaks(np.arange(3), closed='right')
assert expected.equals(actual)
alternate = IntervalIndex.from_breaks(np.arange(3), closed='left')
assert not expected.equals(alternate)
actual = IntervalIndex.from_intervals([Interval(0, 1), Interval(1, 2)])
assert expected.equals(actual)
actual = IntervalIndex([Interval(0, 1), Interval(1, 2)])
assert expected.equals(actual)
actual = IntervalIndex.from_arrays(np.arange(2), np.arange(2) + 1,
closed='right')
assert expected.equals(actual)
actual = Index([Interval(0, 1), Interval(1, 2)])
assert isinstance(actual, IntervalIndex)
assert expected.equals(actual)
actual = Index(expected)
assert isinstance(actual, IntervalIndex)
assert expected.equals(actual)
def test_constructors_other(self):
# all-nan
result = IntervalIndex.from_intervals([np.nan])
expected = np.array([np.nan], dtype=object)
tm.assert_numpy_array_equal(result.values, expected)
# empty
result = IntervalIndex.from_intervals([])
expected = np.array([], dtype=object)
tm.assert_numpy_array_equal(result.values, expected)
def test_constructors_errors(self):
# scalar
with pytest.raises(TypeError):
IntervalIndex(5)
# not an interval
with pytest.raises(TypeError):
IntervalIndex([0, 1])
with pytest.raises(TypeError):
IntervalIndex.from_intervals([0, 1])
# invalid closed
with pytest.raises(ValueError):
IntervalIndex.from_arrays([0, 1], [1, 2], closed='invalid')
# mismatched closed
with pytest.raises(ValueError):
IntervalIndex.from_intervals([Interval(0, 1),
Interval(1, 2, closed='left')])
with pytest.raises(ValueError):
IntervalIndex.from_arrays([0, 10], [3, 5])
with pytest.raises(ValueError):
Index([Interval(0, 1), Interval(2, 3, closed='left')])
# no point in nesting periods in an IntervalIndex
with pytest.raises(ValueError):
IntervalIndex.from_breaks(
pd.period_range('2000-01-01', periods=3))
def test_constructors_datetimelike(self):
# DTI / TDI
for idx in [pd.date_range('20130101', periods=5),
pd.timedelta_range('1 day', periods=5)]:
result = IntervalIndex.from_breaks(idx)
expected = IntervalIndex.from_breaks(idx.values)
tm.assert_index_equal(result, expected)
expected_scalar_type = type(idx[0])
i = result[0]
assert isinstance(i.left, expected_scalar_type)
assert isinstance(i.right, expected_scalar_type)
def test_constructors_error(self):
# non-intervals
def f():
IntervalIndex.from_intervals([0.997, 4.0])
pytest.raises(TypeError, f)
def test_properties(self):
index = self.index
assert len(index) == 2
assert index.size == 2
assert index.shape == (2, )
tm.assert_index_equal(index.left, Index([0, 1]))
tm.assert_index_equal(index.right, Index([1, 2]))
tm.assert_index_equal(index.mid, Index([0.5, 1.5]))
assert index.closed == 'right'
expected = np.array([Interval(0, 1), Interval(1, 2)], dtype=object)
tm.assert_numpy_array_equal(np.asarray(index), expected)
tm.assert_numpy_array_equal(index.values, expected)
# with nans
index = self.index_with_nan
assert len(index) == 3
assert index.size == 3
assert index.shape == (3, )
tm.assert_index_equal(index.left, Index([0, np.nan, 1]))
tm.assert_index_equal(index.right, Index([1, np.nan, 2]))
tm.assert_index_equal(index.mid, Index([0.5, np.nan, 1.5]))
assert index.closed == 'right'
expected = np.array([Interval(0, 1), np.nan,
Interval(1, 2)], dtype=object)
tm.assert_numpy_array_equal(np.asarray(index), expected)
tm.assert_numpy_array_equal(index.values, expected)
def test_with_nans(self):
index = self.index
assert not index.hasnans
tm.assert_numpy_array_equal(index.isna(),
np.array([False, False]))
tm.assert_numpy_array_equal(index.notna(),
np.array([True, True]))
index = self.index_with_nan
assert index.hasnans
tm.assert_numpy_array_equal(index.notna(),
np.array([True, False, True]))
tm.assert_numpy_array_equal(index.isna(),
np.array([False, True, False]))
def test_copy(self):
actual = self.index.copy()
assert actual.equals(self.index)
actual = self.index.copy(deep=True)
assert actual.equals(self.index)
assert actual.left is not self.index.left
def test_ensure_copied_data(self):
# exercise the copy flag in the constructor
# not copying
index = self.index
result = IntervalIndex(index, copy=False)
tm.assert_numpy_array_equal(index.left.values, result.left.values,
check_same='same')
tm.assert_numpy_array_equal(index.right.values, result.right.values,
check_same='same')
# by-definition make a copy
result = IntervalIndex.from_intervals(index.values, copy=False)
tm.assert_numpy_array_equal(index.left.values, result.left.values,
check_same='copy')
tm.assert_numpy_array_equal(index.right.values, result.right.values,
check_same='copy')
def test_equals(self):
idx = self.index
assert idx.equals(idx)
assert idx.equals(idx.copy())
assert not idx.equals(idx.astype(object))
assert not idx.equals(np.array(idx))
assert not idx.equals(list(idx))
assert not idx.equals([1, 2])
assert not idx.equals(np.array([1, 2]))
assert not idx.equals(pd.date_range('20130101', periods=2))
def test_astype(self):
idx = self.index
for dtype in [np.int64, np.float64, 'datetime64[ns]',
'datetime64[ns, US/Eastern]', 'timedelta64',
'period[M]']:
pytest.raises(ValueError, idx.astype, dtype)
result = idx.astype(object)
tm.assert_index_equal(result, Index(idx.values, dtype='object'))
assert not idx.equals(result)
assert idx.equals(IntervalIndex.from_intervals(result))
result = idx.astype('interval')
tm.assert_index_equal(result, idx)
assert result.equals(idx)
result = idx.astype('category')
expected = pd.Categorical(idx, ordered=True)
tm.assert_categorical_equal(result, expected)
def test_where(self):
expected = self.index
result = self.index.where(self.index.notna())
tm.assert_index_equal(result, expected)
idx = IntervalIndex.from_breaks([1, 2])
result = idx.where([True, False])
expected = IntervalIndex.from_intervals(
[Interval(1.0, 2.0, closed='right'), np.nan])
tm.assert_index_equal(result, expected)
def test_where_array_like(self):
pass
def test_delete(self):
expected = IntervalIndex.from_breaks([1, 2])
actual = self.index.delete(0)
assert expected.equals(actual)
def test_insert(self):
expected = IntervalIndex.from_breaks(range(4))
actual = self.index.insert(2, Interval(2, 3))
assert expected.equals(actual)
pytest.raises(ValueError, self.index.insert, 0, 1)
pytest.raises(ValueError, self.index.insert, 0,
Interval(2, 3, closed='left'))
def test_take(self):
actual = self.index.take([0, 1])
assert self.index.equals(actual)
expected = IntervalIndex.from_arrays([0, 0, 1], [1, 1, 2])
actual = self.index.take([0, 0, 1])
assert expected.equals(actual)
def test_monotonic_and_unique(self):
assert self.index.is_monotonic
assert self.index.is_unique
idx = IntervalIndex.from_tuples([(0, 1), (0.5, 1.5)])
assert idx.is_monotonic
assert idx.is_unique
idx = IntervalIndex.from_tuples([(0, 1), (2, 3), (1, 2)])
assert not idx.is_monotonic
assert idx.is_unique
idx = IntervalIndex.from_tuples([(0, 2), (0, 2)])
assert not idx.is_unique
assert idx.is_monotonic
@pytest.mark.xfail(reason='not a valid repr as we use interval notation')
def test_repr(self):
i = IntervalIndex.from_tuples([(0, 1), (1, 2)], closed='right')
expected = ("IntervalIndex(left=[0, 1],"
"\n right=[1, 2],"
"\n closed='right',"
"\n dtype='interval[int64]')")
assert repr(i) == expected
i = IntervalIndex.from_tuples((Timestamp('20130101'),
Timestamp('20130102')),
(Timestamp('20130102'),
Timestamp('20130103')),
closed='right')
expected = ("IntervalIndex(left=['2013-01-01', '2013-01-02'],"
"\n right=['2013-01-02', '2013-01-03'],"
"\n closed='right',"
"\n dtype='interval[datetime64[ns]]')")
assert repr(i) == expected
@pytest.mark.xfail(reason='not a valid repr as we use interval notation')
def test_repr_max_seq_item_setting(self):
super(TestIntervalIndex, self).test_repr_max_seq_item_setting()
@pytest.mark.xfail(reason='not a valid repr as we use interval notation')
def test_repr_roundtrip(self):
super(TestIntervalIndex, self).test_repr_roundtrip()
def test_get_item(self):
i = IntervalIndex.from_arrays((0, 1, np.nan), (1, 2, np.nan),
closed='right')
assert i[0] == Interval(0.0, 1.0)
assert i[1] == Interval(1.0, 2.0)
assert isna(i[2])
result = i[0:1]
expected = IntervalIndex.from_arrays((0.,), (1.,), closed='right')
tm.assert_index_equal(result, expected)
result = i[0:2]
expected = IntervalIndex.from_arrays((0., 1), (1., 2.), closed='right')
tm.assert_index_equal(result, expected)
result = i[1:3]
expected = IntervalIndex.from_arrays((1., np.nan), (2., np.nan),
closed='right')
tm.assert_index_equal(result, expected)
def test_get_loc_value(self):
pytest.raises(KeyError, self.index.get_loc, 0)
assert self.index.get_loc(0.5) == 0
assert self.index.get_loc(1) == 0
assert self.index.get_loc(1.5) == 1
assert self.index.get_loc(2) == 1
pytest.raises(KeyError, self.index.get_loc, -1)
pytest.raises(KeyError, self.index.get_loc, 3)
idx = IntervalIndex.from_tuples([(0, 2), (1, 3)])
assert idx.get_loc(0.5) == 0
assert idx.get_loc(1) == 0
tm.assert_numpy_array_equal(idx.get_loc(1.5),
np.array([0, 1], dtype='int64'))
tm.assert_numpy_array_equal(np.sort(idx.get_loc(2)),
np.array([0, 1], dtype='int64'))
assert idx.get_loc(3) == 1
pytest.raises(KeyError, idx.get_loc, 3.5)
idx = | IntervalIndex.from_arrays([0, 2], [1, 3]) | pandas.IntervalIndex.from_arrays |
#!/usr/bin/env python
# coding: utf-8
# usage:
# python gen_csv_denoised_pad_train_val.py 200015779
import sys
import pandas as pd
import numpy as np
try:
val_label = sys.argv[1]
except:
print("specify book name for validation")
sys.exit(1)
df_train = pd.read_csv('./input/train_characters.csv', header=None)
df_train.columns = ['Unicode', 'filepath']
uniq_char = df_train.Unicode.unique()
train_df_list = []
val_df_list = []
for i, cur_char in enumerate(uniq_char):
cur_df = df_train[df_train.Unicode == cur_char]
tmp_train = cur_df.drop(cur_df.index[cur_df.filepath.str.contains(val_label)])
tmp_val = cur_df[cur_df.filepath.str.contains(val_label)]
if len(tmp_val) == 0:
# If there is no character of the specified book, random sample up to 20 copies from train
val_count = int(len(tmp_train) * 0.10)
if val_count > 20:
cur_val = tmp_train.sample(20)
tmp_train = tmp_train.drop(cur_val.index)
else:
# characters that occur 20 times or less are also copied to validation
cur_val = cur_df
else:
cur_val = tmp_val
if len(tmp_train) == 0:
# Random samples up to 20 if there are no characters in the train
# except for the specified book characters.
train_count = int(len(tmp_val) * 0.10)
if train_count > 20:
cur_train = tmp_val.sample(20)
cur_val = tmp_val.drop(cur_train.index)
else:
# characters that occur 20 times or less are also copied to train
cur_train = cur_df
else:
cur_train = tmp_train
train_df_list.append(cur_train)
val_df_list.append(cur_val)
if i % 100 == 0:
print(".", end='')
sys.stdout.flush()
print("preprocess done!")
train_df = pd.concat(train_df_list)
val_df = pd.concat(val_df_list)
print("postprocess for train data for class contains less than 100 images...")
# Oversample characters that appear less than 100 times more than 100 times
counter = train_df.Unicode.value_counts()
code_and_count = {}
for elem in train_df.Unicode.unique():
if counter[elem] < 100:
code_and_count[elem] = counter[elem]
add_train_df_list = []
for elem, count in code_and_count.items():
multi_count = int(100 / count)
for i in range(multi_count):
add_train_df_list.append(train_df[train_df.Unicode == elem])
add_train_df = pd.concat(add_train_df_list)
train_df = pd.concat([train_df, add_train_df])
print("done!")
print("postprocess for validation data for class contains less than 20 images...")
# Oversample characters that appear less than 20 times more than 20 times
counter = val_df.Unicode.value_counts()
code_and_count = {}
for elem in val_df.Unicode.unique():
if counter[elem] < 20:
code_and_count[elem] = counter[elem]
add_val_df_list = []
for elem, count in code_and_count.items():
multi_count = int(20 / count)
for i in range(multi_count):
add_val_df_list.append(val_df[val_df.Unicode == elem])
print("done!")
print("finalizing...")
add_val_df = | pd.concat(add_val_df_list) | pandas.concat |
# Copyright 2020, <NAME>, <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND
# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS
# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
import numpy as np
from typing import Dict, List
import yaml
import os
def _read_IMARIS_cell_migration_data(filename):
'''
'''
if (not os.path.isfile(filename)):
return pd.DataFrame()
print("Open file: %s" % filename)
sep = _count_delimiters(open(filename, "r", encoding="ISO-8859-1").read())
num_lines = sum(1 for line in open(filename, encoding="ISO-8859-1"))
if num_lines < 10:
return pd.DataFrame()
f = open(filename, "r", encoding="ISO-8859-1")
skip_row = 0
for line in f.readlines():
row = line.split(sep)
if len(row) > 0:
if row[0] == "Variable":
break
if row[0] == "Position X":
break
skip_row += 1
f.close()
if skip_row > 10:
return pd.DataFrame()
df_stat = pd.read_csv(filename, skiprows=skip_row, sep=sep, encoding="ISO-8859-1")
return df_stat
def _count_delimiters(s):
valid_seps = [' ', '|', ',', ';', '\t']
cnt = {' ': 0, '|': 0, ',': 0, ';': 0, '\t': 0}
for c in s:
if c in valid_seps: cnt[c] = cnt[c] + 1
tup = [(value, key) for key, value in cnt.items()]
if (cnt[';'] > 0):
print("File contains semicolons, check if opened correctly! %s")
print(cnt)
return max(tup)[1]
def CMSO_movement_data(imaris_key_file: pd.DataFrame, parameters: Dict, start_time, end_time) -> pd.DataFrame:
processed_key_file = imaris_key_file.copy()
imaris_data = dict()
unified_imaris = dict()
object_data = dict()
link_data = dict()
tracking_data = dict()
json_meta = dict()
#json_meta["test.json"] = {"cmso_space_unit": "micron", "cmso_time_unit": "frame"}
object_data_statistics = pd.DataFrame()
counter = 1
with open("./conf/base/catalog.yml") as file:
catalog_dict = yaml.load(file, Loader=yaml.FullLoader)
cmso_object_dir = catalog_dict['CMSO_object_data']['filepath']
cmso_link_dir = catalog_dict['CMSO_link_data']['filepath']
cmso_track_dir = catalog_dict['CMSO_track_data']['filepath']
for fish_number in imaris_key_file["fish_number"].unique():
if (np.isnan(fish_number)):
continue
df_single_fish_all_groups = imaris_key_file[imaris_key_file['fish_number'] == fish_number]
for analysis_group in df_single_fish_all_groups["analysis_group"].unique():
df_single_fish = df_single_fish_all_groups[df_single_fish_all_groups["analysis_group"] == analysis_group]
print("=================================================")
print("fish number: %s" % int(fish_number))
print("=================================================")
print(df_single_fish)
object_filename = 'objects_fish_%s_%s.csv' % (int(fish_number), analysis_group)
#object_data[filename] = objects_d am not sharing that piece about Kate to be disparaging about herf
object_data_statistics.at[counter, "filename"] = object_filename
for index, row in df_single_fish.iterrows():
filename = parameters["data_dir"] + row["filename"]
#print("FILENAME: %s" % filename)
imaris_df = _read_IMARIS_cell_migration_data(filename)
object_data_statistics.at[counter,"imaris_df.size"] = imaris_df.size
if imaris_df.size <= 1:
print("ERROR: Imaris df too small")
print(imaris_df.head())
else:
imaris_data['tracks_fish_%s_%s_%s.csv' % (
analysis_group, int(fish_number), row["vessel_type"])] = imaris_df
unified_df = _unify_tidy_wide_IMARIS_formats(imaris_df)
print(unified_df.head())
if unified_df.size <= 1:
print("ERROR: Unified df too small")
print(unified_df.head())
else:
object_data_statistics.at[counter, "unified_df.size"] = unified_df.size
imaris_data['tracks_fish_%s_%s_%s.csv' % (
analysis_group, int(fish_number), row["vessel_type"])] = imaris_df
unified_imaris['tracks_fish_%s_%s_%s.csv' % (
analysis_group, int(fish_number), row["vessel_type"])] = unified_df
object_filename = 'objects_fish_%s_%s_%s.csv' % (
analysis_group, int(fish_number), row["vessel_type"])
object_data[object_filename] = unified_df[["object_id", "x", "y", "z", "frame"]]
link_filename = 'link_fish_%s_%s_%s.csv' % (analysis_group, int(fish_number), row["vessel_type"])
link_df = unified_df[["object_id", "track_id"]]
link_df = link_df.rename(columns={"track_id": "link_id"})
link_data[link_filename] = link_df
json_filename = "meta_fish_%s_%s_%s.json" % (analysis_group, int(fish_number), row["vessel_type"])
json_meta[json_filename] = {"cmso_space_unit": "micron",
"cmso_time_unit": "frame",
"name": "cmso_tracks_zebrafish_%s" % analysis_group,
"resources": [
{
"name" : "objects_table_fish_%s_%s_%s" % (
analysis_group, int(fish_number), row["vessel_type"]),
"path": "../CMSO_object_data/%s" % object_filename,
"schema": {
"fields": [
{
"constraints": {
"unique": True
},
"description": "",
"format": "default",
"name": "object_id",
"title": "",
"type": "integer"
},
{
"description": "",
"format": "default",
"name": "frame",
"title": "",
"type": "integer"
},
{
"description": "",
"format": "default",
"name": "x",
"title": "",
"type": "number"
},
{
"description": "",
"format": "default",
"name": "y",
"title": "",
"type": "number"
},
{
"description": "",
"format": "default",
"name": "z",
"title": "",
"type": "number"
},
],
"primaryKey": "cmso_object_id"
}
},
{
"name": "objects_table_fish_%s_%s_%s" % (
analysis_group, int(fish_number),
row["vessel_type"]),
"path": "../CMSO_link_data/%s" % link_filename,
"schema": {
"fields": [
{
"description": "",
"format": "default",
"name": "link_id",
"title": "",
"type": "integer"
},
{
"description": "",
"format": "default",
"name": "object_id",
"title": "",
"type": "integer"
}
],
"foreignKeys": [
{
"fields": "cmso_object_id",
"reference": {
"datapackage": "",
"fields": "object_id",
"resource": "cmso_objects_table"
}
}
]
}
}
]
}
track_counter = 0
track_df = | pd.DataFrame() | pandas.DataFrame |
import json
import os
from urllib.error import HTTPError, URLError
from urllib.request import urlopen
import pandas as pd
from pandas.tseries.offsets import DateOffset
def from_download(tok, start_date, end_date, offset_days, series_list):
"""Download and assemble dataset of demand data per balancing authority for desired
date range.
:param str tok: token obtained by registering with EIA.
:param pandas.Timestamp/numpy.datetime64/datetime.datetime start_date: start date.
:param pandas.Timestamp/numpy.datetime64/datetime.datetime end_date: end data.
:param list series_list: list of demand series names provided by EIA, e.g.,
['EBA.AVA-ALL.D.H', 'EBA.AZPS-ALL.D.H'].
:param int offset_days: number of business days for data to stabilize.
:return: (*pandas.DataFrame*) -- data frame with UTC timestamp as indices and
BA series name as column names.
"""
timespan = pd.date_range(
start_date, end_date - DateOffset(days=offset_days), tz="UTC", freq="H"
)
df_all = pd.DataFrame(index=timespan)
for ba in series_list:
print("Downloading", ba)
d = EIAgov(tok, [ba])
df = d.get_data()
if df is not None:
df.index = | pd.to_datetime(df["Date"]) | pandas.to_datetime |
import math
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score
import random
import socket
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect(('3.142.167.4', 15271))
# client.connect(('127.0.0.1', 60000))
import rss22
pid = 2
def rssrd(r, xy):
f = {}
g = {}
R = {}
esend = {}
epk1 = {}
for j in range(rss22.n):
if j+1==pid:
continue
f[pid,(j+1)] = round(random.uniform(3*(10**7),4*(10**7)),6)
g[pid,(j+1)] = round(random.uniform(3*(10**7),4*(10**7)),6)
R[pid,(j+1)] = random.uniform(11*(10**18),19*(10**18))
for j in range(rss22.n):
if j+1==pid:
continue
prod = f[pid,(j+1)] * r
esend[pid,(j+1)] = ( rss22.public_key.encrypt(prod) , f[pid,(j+1)] )
for j in range(1,4):
if j == pid:
rss22.client_send(esend, client)
print('sent')
else:
print("Ready to receive")
rss22.client_receive(pid, client)
print("Received data")
print(rss22.erecive)
fj = {}
for i in rss22.erecive.keys():
epk1[i[0],i[1]]=( rss22.erecive[i][0] * g[i[1],i[0]] * xy + R[i[1],i[0]] , g[i[1],i[0]] )
fj[i] = rss22.erecive[i][1]
print("fj ",fj,"\n")
print()
for j in range(1,4):
if j == pid:
rss22.epk_send(epk1, client)
else:
rss22.epk_receive(pid, client)
print("Received dat 01a")
print(rss22.epkfinal)
share1 = {}
share2 = {}
for i in rss22.epkfinal.keys():
nr = rss22.private_key.decrypt(rss22.epkfinal[i][0])
dr = rss22.epkfinal[i][1] * f[i]
share1[i] = nr/dr
share2[i] = - R[i] / ( fj[(i[1],i[0])] * g[i] )
print('ok')
t = round(random.uniform((-0.5),(0.5)),6)
si = 0
for i in share1.keys():
si += share1[i] + share2[i] + ( r + t ) * xy
rss22.s = []
for j in range(1,4):
if j == pid:
rss22.si_send(si, client)
else:
rss22.si_receive(client)
rss22.s.append(si)
print(rss22.s)
return sum(rss22.s)
def rss(d):
x, y = d['x'], d['y']
alphax = round(random.uniform((-0.5),(0.5)),6)
alphay = round(random.uniform((-0.5),(0.5)),6)
x = x + alphax
y = y + alphay
r = round(random.uniform(3000,4000),6)
sx = rssrd(r, x)
sy = rssrd(r, y)
return sx/sy
def fdrt(dat, alpha, beta):
cos_alpha = round(math.cos(alpha),4)
sin_alpha = round(math.sin(alpha), 4)
cos_beta = round(math.cos(beta),4)
sin_beta = round(math.sin(beta), 4)
x = [[cos_alpha,-sin_alpha, 0, 0],
[ sin_alpha, cos_alpha, 0, 0],
[ 0, 0, cos_beta, -sin_beta],
[ 0, 0, sin_beta, cos_beta]
]
length=len(dat.columns)
cnt = length//4
if length % 4>0:
cnt+=1
i=0
j=4
k=0
while k<cnt:
if j<length:
dat4 = dat.iloc[:,i:j]
else:
dat4 = dat.iloc[:,-4:]
i=length-4
j=length
dat4 = dat4.values.tolist()
prod = np.dot(dat4,x)
dat.iloc[:,i:j] = prod
i=j
j+=4
k+=1
return dat
###########################################################################
data = pd.read_csv('Test/cryo3.csv')
ndata = data.iloc[:,:-1].select_dtypes(include=np.number)
print(ndata)
dat = ndata.apply(lambda x: 5*(x - x.min()) / (x.max() - x.min()))
print(dat)
length=len(dat.columns)
cnt = length//4
if length % 4>0:
cnt+=1
i=0
j=4
k=0
while k<cnt:
if j<length:
dat4 = dat.iloc[:,i:j]
i=j
j+=4
else:
dat4 = dat.iloc[:,-4:]
rot = | pd.DataFrame(columns=[0,1,2,3]) | pandas.DataFrame |
from __future__ import print_function
import pandas as pd
import numpy as np
import tensorflow as tf
import os
import shutil
import copy
from time import time
from datetime import timedelta
import h5py
tf.compat.v1.disable_eager_execution()
'''
CHRONOS: population modeling of CRISPR readcount data
<NAME> (<EMAIL>)
The Broad Institute
'''
def write_hdf5(df, filename):
if os.path.exists(filename):
os.remove(filename)
dest = h5py.File(filename, 'w')
try:
dim_0 = [x.encode('utf8') for x in df.index]
dim_1 = [x.encode('utf8') for x in df.columns]
dest_dim_0 = dest.create_dataset('dim_0', track_times=False, data=dim_0)
dest_dim_1 = dest.create_dataset('dim_1', track_times=False, data=dim_1)
dest.create_dataset("data", track_times=False, data=df.values)
finally:
dest.close()
def read_hdf5(filename):
src = h5py.File(filename, 'r')
try:
dim_0 = [x.decode('utf8') for x in src['dim_0']]
dim_1 = [x.decode('utf8') for x in src['dim_1']]
data = np.array(src['data'])
return pd.DataFrame(index=dim_0, columns=dim_1, data=data)
finally:
src.close()
def extract_last_reps(sequence_map):
'''get the sequence IDs of replicates at their last measured timepoint'''
rep_map = sequence_map[sequence_map.cell_line_name != 'pDNA']
last_days = rep_map.groupby('cell_line_name').days.max()
last_reps = rep_map[rep_map.days == last_days.loc[rep_map.cell_line_name].values].sequence_ID
return last_reps
def check_inputs(readcounts=None, guide_gene_map=None, sequence_map=None):
keys = None
sequence_expected = set(['sequence_ID', 'cell_line_name', 'days', 'pDNA_batch'])
guide_expected = set(['sgrna', 'gene'])
for name, entry in zip(['readcounts', 'guide_gene_map', 'sequence_map'], [readcounts, guide_gene_map, sequence_map]):
if entry is None:
continue
if not isinstance(entry, dict):
raise ValueError("Expected dict, but received %r" %entry)
if keys is None:
keys = set(entry.keys())
else:
if not set(entry.keys()) == keys:
raise ValueError("The keys for %s (%r) do not match the other keys found (%r)" % (name, keys, set(entry.keys)))
for key, val in entry.items():
if not isinstance(val, pd.DataFrame):
raise ValueError('expected Pandas dataframe for %s[%r]' %(name, key))
if name == 'readcounts':
assert val.index.duplicated().sum() == 0, "duplicated sequence IDs for readcounts %r" %key
assert not val.isnull().all(axis=1).any(), \
"All readcounts are null for one or more replicates in %s, please drop them" % key
assert not val.isnull().all(axis=0).any(),\
"All readcounts are null for one or more guides in %s, please drop them" % key
elif name == 'guide_gene_map':
assert not guide_expected - set(val.columns), \
"not all expected columns %r found for guide-gene map for %s. Found %r" %(guide_expected, key, val.columns)
assert val.sgrna.duplicated().sum() == 0, "duplicated sgRNAs for guide-gene map %r. Multiple gene alignments for sgRNAs are not supported." %key
elif name == 'sequence_map':
assert not sequence_expected - set(val.columns), \
"not all expected columns %r found for sequence map for %s. Found %r" %(sequence_expected, key, val.columns)
assert val.sequence_ID.duplicated().sum() == 0, "duplicated sequence IDs for sequence map %r" %key
for batch in val.query('cell_line_name != "pDNA"').pDNA_batch.unique():
assert batch in val.query('cell_line_name == "pDNA"').pDNA_batch.values, \
"there are sequences with pDNA batch %s in library %s, but no pDNA measurements for that batch" %(batch, key)
if val.days.max() > 50:
print("\t\t\tWARNING: many days (%1.2f) found for %s.\n\t\t\tThis may cause numerical issues in fitting the model.\n\
Consider rescaling all days by a constant factor so the max is less than 50." % (val.days.max(), key))
for key in keys:
if not readcounts is None and not sequence_map is None:
assert not set(readcounts[key].index) ^ set(sequence_map[key].sequence_ID), \
"\t\t\t mismatched sequence IDs between readcounts and sequence map for %r.\n\
Chronos expects `readcounts` to have guides as columns, sequence IDs as rows.\n\
Is your data transposed?" %key
if not readcounts is None and not guide_gene_map is None:
assert not set(readcounts[key].columns) ^ set(guide_gene_map[key].sgrna), \
"mismatched map keys between readcounts and guide map for %s" % key
def filter_guides(guide_gene_map, max_guides=15):
'''
removes sgRNAs that target multiple genes, then genes that have less than two guides.
Parameters:
`guide_gene_map` (`pandas.DataFrame`): See Model.__init__ for formatting of guide_gene_map
Returns:
`pandas.DataFrame`: filtered guide_gene_map
'''
alignment_counts = guide_gene_map.groupby("sgrna").gene.count()
guide_gene_map = guide_gene_map[guide_gene_map['sgrna'].isin(alignment_counts.loc[lambda x: x == 1].index)]
guide_counts = guide_gene_map.groupby('gene')['sgrna'].count()
guide_gene_map = guide_gene_map[guide_gene_map.gene.isin(guide_counts.loc[lambda x: (x > 1)& (x <= max_guides)].index)]
return guide_gene_map
def calculate_fold_change(readcounts, sequence_map):
'''
Calculates fold change as the ratio of the RPM+1 of late time points to pDNA
Parameters:
readcounts (`pandas.DataFrame`): readcount matrix with replicates on rows, guides on columns
sequence_map (`pandas.DataFrame`): has string columns "sequence_ID", "cell_line_name", and "pDNA_batch"
returns:
fold_change (`pd.DataFrame`)
'''
check_inputs(readcounts={'default': readcounts}, sequence_map={'default': sequence_map})
reps = sequence_map.query('cell_line_name != "pDNA"').sequence_ID
pdna = sequence_map.query('cell_line_name == "pDNA"').sequence_ID
rpm = pd.DataFrame(
(1e6 * readcounts.values.T / readcounts.sum(axis=1).values + 1).T,
index=readcounts.index, columns=readcounts.columns
)
fc = rpm.loc[reps]
norm = rpm.loc[pdna].groupby(sequence_map.set_index('sequence_ID')['pDNA_batch']).median()
try:
fc = pd.DataFrame(fc.values/norm.loc[sequence_map.set_index('sequence_ID').loc[reps, 'pDNA_batch']].values,
index=fc.index, columns=fc.columns
)
except Exception as e:
print(fc.iloc[:3, :3],'\n')
print(norm[:3], '\n')
print(reps[:3], '\n')
print(sequence_map[:3], '\n')
raise e
errors = []
# if np.sum(fc.values <= 0) > 0:
# errors.append("Fold change has zero or negative values:\n%r\n" % fc[fc <= 0].stack()[:10])
# if (fc.min(axis=1) >= 1).any():
# errors.append("Fold change has no values less than 1 for replicates\n%r" % fc.min(axis=1).loc[lambda x: x>= 1])
if errors:
raise RuntimeError('\n'.join(errors))
return fc
def nan_outgrowths(readcounts, sequence_map, guide_gene_map, absolute_cutoff=2, gap_cutoff=2):
'''
NaNs readcounts in cases where all of the following are true:
- The value for the guide/replicate pair corresponds to the most positive log fold change of all guides and all replicates for a cell line
- The logfold change for the guide/replicate pair is greater than `absolute_cutoff`
- The difference between the lfc for this pair and the next most positive pair for that gene and cell line is greater than gap_cutoff
Readcounts are mutated in place.
Parameters:
readcounts (`pandas.DataFrame`): readcount matrix with replicates on rows, guides on columns
sequence_map (`pandas.DataFrame`): has string columns "sequence_ID", "cell_line_name", and "pDNA_batch"
guide_gene_map (`pandas.DataFrame`): has string columns "sequence_ID", "cell_line_name", and "pDNA_batch"
'''
check_inputs(readcounts={'default': readcounts}, sequence_map={'default': sequence_map},
guide_gene_map={'default': guide_gene_map})
print('calculating LFC')
lfc = np.log2(calculate_fold_change(readcounts, sequence_map))
print('finding maximum LFC cells')
ggtemp = guide_gene_map.set_index('sgrna').gene.sort_index()
sqtemp = sequence_map.set_index('sequence_ID').cell_line_name.sort_index()
max_lfc = lfc.groupby(ggtemp, axis=1).max()
potential_cols = max_lfc.columns[max_lfc.max() > absolute_cutoff]
potential_rows= max_lfc.index[max_lfc.max(axis=1) > absolute_cutoff]
max_lfc = max_lfc.loc[potential_rows, potential_cols]
ggtemp = ggtemp[ggtemp.isin(potential_cols)]
sqtemp = sqtemp[sqtemp.isin(potential_rows)]
ggreversed = pd.Series(ggtemp.index.values, index=ggtemp.values).sort_index()
sqreversed = pd.Series(sqtemp.index.values, index=sqtemp.values).sort_index()
def second_highest(x):
if len(x) == 1:
return -np.inf
return x.values[np.argpartition(-x.values, 1)[1]]
max_row_2nd_column = lfc.T.groupby(ggtemp, axis=0).agg(second_highest).T
# print('constructing second of two second-highest matrices')
# max_col_2nd_row = lfc.groupby(ggtemp, axis=1).max()\
# .groupby(sqtemp, axis=0).agg(second_highest)
second_highest = max_row_2nd_column.loc[max_lfc.index, max_lfc.columns].values
# max_col_2nd_row.loc[max_lfc.index, max_lfc.columns].values
# )
gap = pd.DataFrame(max_lfc.values - second_highest, #second_highest
index=max_lfc.index, columns=max_lfc.columns)
print('finding sequences and guides with outgrowth')
cases = max_lfc[(max_lfc > absolute_cutoff) & (gap > gap_cutoff)]
cases = cases.stack()
print('%i (%1.5f%% of) readcounts to be removed' % (
len(cases),
100*len(cases)/np.product(readcounts.shape)
))
print(cases[:10])
problems = pd.Series()
for ind in cases.index:
block = lfc.loc[ind[0], ggreversed.loc[[ind[1]]]]
stacked = block[block == cases.loc[ind]]
guide = stacked.index[0]
problems.loc['%s&%s' % ind] = (ind[0], guide)
print('NaNing bad outgrowths')
for rep, guide in problems.values:
readcounts.loc[rep, guide] = np.nan
##################################################################
# M O D E L #
##################################################################
class Chronos(object):
'''
Model class for inferring effect of gene knockout from readcount data. Takes in readcounts, mapping dataframes, and hyperparameters at init,
then is trained with `train`.
Note on axes:
Replicates and cell lines are always the rows/major axis of all dataframes and tensors. Guides and genes are always the columns/second axis.
In cases where values vary per library, the object is a dict, and the library name is the key.
Notes on attribute names:
Attributes with single preceding underscores are tensorflow constants or tensorflow nodes, in analogy
with the idea of "private" attributes not meant to be interacted with directly. For tensorflow nodes,
there is usually a defined class attribute with no underscore which runs the node and returns
a pandas Series or DataFrame or dict of the same.
In other words `Chronos.v_a` (tensor) --(tensorflow function)-> `Chronos._a` (tensor) --(session run)-> `Chronos.a` (pandas object)
Some intermediate tensorflow nodes do not have corresponding numpy/pandas attributes.
Most parameters with a pandas interface can be set using the pandas interface. Do NOT try set tensorflow tensors directly - there
are usually transformations Chronos expects, such as rescaling time values. Use the pandas interface, i.e.
my_chronos_model.gene_effect = my_pandas_dataframe.
Every set of parameters that are fit per-library are dicts. If `Chronos.v_a` is a dict, the subsequent attributes in the graph are also dicts.
Settable Attributes: these CAN be set manually to interrogate the model or for other advanced uses, but NOT RECOMMENDED. Most users
will just want to read them out after training.
guide_efficacy (`pandas.Series`): estimated on-target KO efficacy of reagents, between 0 and 1
cell_efficacy (`dict` of `pandas.Series`): estimated cell line KO efficacy per library, between 0 and 1
growth_rate (`dict` of `pandas.Series`): relative growth rate of cell lines, positive float. 1 is the average of all lines in lbrary.
gene_effect ('pandas.DataFrame'): cell line by gene matrix of inferred change in growth rate caused by gene knockout
screen_delay (`pandas.Series`): per gene delay between infection and appearance of growth rate phenotype
initial_offset (`dict` of 'pandas.Series'): per sgrna estimated log fold pDNA error, per library. This value is exponentiated and mean-cented,
then multiplied by the measured pDNA to infer the actual pDNA RPM of each guide.
If there are fewer than 2 late time points, the mean of this value per gene is 0.
days (`dict` of `pandas.Series`): number of days in culture for each replicate.
learning_rate (`float`): current model learning rate. Will be overwritten when `train` is called.
Unsettable (Calculated) Attributes:
cost (`float`): the NB2 negative log-likelihood of the data under the current model, shifted to be 0 when the output RPM
perfectly matches the input RPM. Does not include regularization or terms involving only constants.
cost_presum (`dict` of `pd.DataFrame`): the per-library, per-replicate, per-guide contribution to the cost.
out (`dict` of `pd.DataFrame`): the per-library, per-replicate, per-guide model estimate of reads, unnormalized.
output_norm (`dict` of `pandas.DataFrame`): `out` normalized so the sum of reads for each replicate is 1.
efficacy (`pandas.DataFrame`): cell by guide efficacy matrix generated from the outer product of cell and guide efficacies
initial (`dict` of `pandas.DataFrame`): estimated initial abundance of guides
rpm (`dict` of `pandas.DataFrame`): the RPM of the measured readcounts / 1 million. Effectively a constant.
'''
default_timepoint_scale = .1 * np.log(2)
default_cost_value = 0.67
persistent_handles = set([])
def __init__(self,
readcounts,
#copy_number_matrix,
guide_gene_map,
sequence_map,
gene_effect_hierarchical=.1,
gene_effect_smoothing=.25,
kernel_width=5,
gene_effect_L1=0.1,
gene_effect_L2=0,
excess_variance=0.05,
guide_efficacy_reg=.5,
offset_reg=1,
growth_rate_reg=0.01,
smart_init=True,
cell_efficacy_guide_quantile=0.01,
initial_screen_delay=3,
scale_cost=0.67,
max_learning_rate=.02,
dtype=tf.double,
verify_integrity=True,
log_dir=None,
):
'''
Parameters:
readcounts (`dict` of `pandas.DataFrame`): Values are matrices with sequenced entities on rows,
guides as column headers, and total readcounts for the guide in the replicate as entries. There should be at least one key
for each library, but the user can also make separate individual datasets according to some other condition,
such as screening site.
sequence_map (`dict` of `pandas.DataFrame`): Keys must match the keys of readcounts. Values are tables with the columns:
sequence_ID: matches a row index in the corresponding readcounts matrix. Should uniquely identify a combination of
cell line, replicate, and sequence passage.
cell_line: name of corresponding cell line. 'pDNA' if this is a plasmid DNA or initial count measurement.
days: estimate number of cell days from infection when readcounts were performed. Plasmid DNA entries should be 0.
pDNA_batch: Unique identifier for associating readcounts to time 0 readcounts.
guide_gene_map (`dict` of `pandas.DataFrame`): Values are tables with the columns:
sgrna: guide sequence or unique guide identifier
gene: gene mapped to by guide. Genes should follow consistent naming conventions between libraries
gene_effect_hierarchical (`float`): regularization of individual gene effect scores towards the mean across cell lines
gene_effect_smoothing (`float`): regularization of individual gene scores towards mean after Gaussian kernel convolution
kernel_width (`float`): width (SD) of the Gaussian kernel for the smoothing regularization
gene_effect_L1 (`float`): regularization of gene effect CELL LINE MEAN towards zero with L1 penalty
gene_effect_L2 (`float`): regularization of individual gene scores towards zero with L2 penalty
offset_reg (`float`): regularization of pDNA error
growth_rate_reg (`float`): regularization of the negative log of the relative growth rate
guide_efficacy_reg (`float`): regularization of the gap between the two strongest guides' efficacy per gene,
or of the gap between them and 1 if only one late timepoint is present in readcounts for that library
excess_variance (`float` or `dict`): measure of Negative Binomial overdispersion for the cost function,
overall or per cell line and library.
max_learning_rate (`float`): passed to AdamOptimizer after initial burn-in period during training
verify_integrity (`bool`): whether to check each itnermediate tensor computed by Chronos for innappropriate values
log_dir (`path` or None): if provided, location where Tensorboard snapshots will be saved
cell_efficacy_init (`bool`): whether to initialize cell efficacies using the fold change of the most depleted guides
at the last timepoint
celll_efficacy_guide_quantile (`float`): quantile of guides to use to estimate cell screen efficacy. Between 0 and 0.5.
initial_screen_delay (`float`): how long after infection before growth phenotype kicks in, in days. If there are fewer than
3 late timepoints this initial value will be left unchanged.
dtype (`tensorflow.double` or `tensorflow.float`): numerical precision of the computation. Strongly recommend to leave this unchanged.
scale_cost (`bool`): The likelihood cost will be scaled to always be initially this value (default 0.67) for all data.
This encourages more consistent behavior across datasets when leaving the other regularization hyperparameters
constant. Pass 0, False, or None to avoid cost scaling.
'''
########################### I N I T I A L C H E C K S ############################
check_inputs(readcounts=readcounts, sequence_map=sequence_map, guide_gene_map=guide_gene_map)
sequence_map = self._make_pdna_unique(sequence_map, readcounts)
excess_variance = self._check_excess_variance(excess_variance, readcounts, sequence_map)
self.np_dtype = {tf.double: np.float64, tf.float32: np.float32}[dtype]
self.keys = list(readcounts.keys())
if scale_cost:
try:
scale_cost = float(scale_cost)
assert 0 < scale_cost, "scale_cost must be positive"
except:
raise ValueError("scale_cost must be None, False, or a semi-positive number")
#################### C R E A T E M A P P I N G S ########################
(self.guides, self.genes, self.all_guides, self.all_genes,
self.guide_map, self.column_map
) = self._get_column_attributes(readcounts, guide_gene_map)
(self.sequences, self.pDNA_unique, self.cells, self.all_sequences, \
self.all_cells, self.cell_indices, self.replicate_map, self.index_map,
self.line_index_map, self.batch_map
) = self._get_row_attributes(readcounts, sequence_map)
################## A S S I G N C O N S T A N T S #######################
print('\n\nassigning float constants')
self.guide_efficacy_reg = float(guide_efficacy_reg)
self.gene_effect_L1 = float(gene_effect_L1)
self.gene_effect_L2 = float(gene_effect_L2)
self.gene_effect_hierarchical = float(gene_effect_hierarchical)
self.growth_rate_reg = float(growth_rate_reg)
self.offset_reg = float(offset_reg)
self.gene_effect_smoothing = float(gene_effect_smoothing)
self.kernel_width = float(kernel_width)
self.cell_efficacy_guide_quantile = float(cell_efficacy_guide_quantile)
if not 0 < self.cell_efficacy_guide_quantile < .5:
raise ValueError("cell_efficacy_guide_quantile should be greater than 0 and less than 0.5")
self.nguides, self.ngenes, self.nlines, self.nsequences = (
len(self.all_guides), len(self.all_genes), len(self.all_cells), len(self.all_sequences)
)
self._excess_variance = self._get_excess_variance_tf(excess_variance)
self.median_timepoint_counts = self._summarize_timepoint(sequence_map, np.median)
self._initialize_graph(max_learning_rate, dtype)
self._gene_effect_mask, self.mask_count = self._get_gene_effect_mask(dtype)
self._days = self._get_days(sequence_map, dtype)
self._rpm, self._mask = self._get_late_tf_timepoints(readcounts, dtype)
self._measured_initial = self._get_tf_measured_initial(readcounts, sequence_map, dtype)
################## C R E A T E V A R I A B L E S #######################
print('\n\nBuilding variables')
(self.v_initial, self._initial_core,
self._initial, self._initial_offset, self._grouped_initial_offset) = self._get_initial_tf_variables(dtype)
(self.v_guide_efficacy, self._guide_efficacy) = self._get_tf_guide_efficacy(dtype)
(self.v_growth_rate, self._growth_rate, self._line_presence_boolean) = self._get_tf_growth_rate(dtype)
(self.v_cell_efficacy, self._cell_efficacy) = self._get_tf_cell_efficacy(dtype)
(self.v_screen_delay, self._screen_delay) = self._get_tf_screen_delay(initial_screen_delay, dtype)
(self.v_mean_effect, self.v_residue, self._residue, self._true_residue, self._combined_gene_effect
) = self._get_tf_gene_effect(dtype)
############################# C O R E M O D E L ##############################
print("\n\nConnecting graph nodes in model")
self._effective_days = self._get_effect_days(self._screen_delay, self._days)
self._gene_effect_growth = self._get_gene_effect_growth(self._combined_gene_effect, self._growth_rate)
self._efficacy, self._selected_efficacies = self._get_combined_efficacy(self._cell_efficacy,self. _guide_efficacy)
self._growth, self._change = self._get_growth_and_fold_change(self._gene_effect_growth, self._effective_days,
self._selected_efficacies)
self._out, self._output_norm = self._get_abundance_estimates(self._initial, self._change)
##################################### C O S T #########################################
print("\n\nBuilding all costs")
self._total_guide_reg_cost = self._get_guide_regularization(self._guide_efficacy, dtype)
self._smoothed_presum = self._get_smoothed_ge_regularization(self.v_mean_effect, self._true_residue, kernel_width, dtype)
self._initial_cost = self._get_initial_regularization(self._initial_offset)
self._cost_presum, self._cost, self._scale = self._get_nb2_cost(self._excess_variance, self._output_norm, self._rpm, self._mask,
dtype)
self.run_dict.update({self._scale: 1.0})
self._full_cost = self._get_full_cost(dtype)
######################### F I N A L I Z I N G ###################################
print('\nCreating optimizer')
self.optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=self._learning_rate)
default_var_list = [
self.v_mean_effect,
self.v_residue,
self.v_guide_efficacy,
self.v_initial,
self.v_growth_rate
]
# if all([val > 2 for val in self.median_timepoint_counts.values()]):
# "All libraries have sufficient timepoints to estimate screen_delay, adding to estimate"
# default_var_list.append(self.v_screen_delay)
self._ge_only_step = self.optimizer.minimize(self._full_cost, var_list=[self.v_mean_effect, self.v_residue])
self._step = self.optimizer.minimize(self._full_cost, var_list=default_var_list)
self._merged = tf.compat.v1.summary.merge_all()
if log_dir is not None:
print("\tcreating log at %s" %log_dir)
if os.path.isdir(log_dir):
shutil.rmtree(log_dir)
os.mkdir(log_dir)
self.log_dir = log_dir
self.writer = tf.compat.v1.summary.FileWriter(log_dir, self.sess.graph)
init_op = tf.compat.v1.global_variables_initializer()
print('initializing variables')
self.sess.run(init_op)
if scale_cost:
denom = self.cost
self.run_dict.update({self._scale: scale_cost/denom})
if smart_init:
print("estimating initial screen efficacy")
self.smart_initialize(readcounts, sequence_map, cell_efficacy_guide_quantile)
if verify_integrity:
print("\tverifying graph integrity")
self.nan_check()
self.epoch = 0
print('ready to train')
################################################################################################
############## I N I T I A L I Z A T I O N M E T H O D S #########################
################################################################################################
def get_persistent_input(self, dtype, data, name=''):
placeholder = tf.compat.v1.placeholder(dtype=dtype, shape=data.shape)
# Persistent tensor to hold the data in tensorflow. Helpful because TF doesn't allow
# graph definitions larger than 2GB (so can't use constants), and passing the feed dict each time is slow.
# This feature is poorly documented, but the handle seems to refer not to a tensor but rather a tensor "state" -
# the state of a placeholder that's been passed the feed dict. This is what persists. Annoyingly, it then becomes
# impossible to track the shape of the tensor.
state_handle = self.sess.run(tf.compat.v1.get_session_handle(placeholder), {placeholder: data})
# why TF's persistence requires two handles, I don't know. But it does.
tensor_handle, data = tf.compat.v1.get_session_tensor(state_handle.handle, dtype=dtype, name=name)
self.run_dict[tensor_handle] = state_handle.handle
self.persistent_handles.add(state_handle.handle)
return data
########################### I N I T I A L C H E C K S ############################
def _make_pdna_unique(self, sequence_map, readcounts):
#guarantee unique pDNA batches
sequence_map = {key: val.query('sequence_ID in %r' % list(readcounts[key].index)) for key, val in sequence_map.items()}
for key, val in sequence_map.items():
val['pDNA_batch'] = val['pDNA_batch'].apply(lambda s: '%s_%s' % (key, s))
return sequence_map
def _check_excess_variance(self, excess_variance, readcounts, sequence_map):
if not isinstance(excess_variance, dict):
try:
excess_variance = float(excess_variance)
except ValueError:
raise ValueError("if provided, excess_variance must be a dict of pd.Series per library or a float")
else:
for key, val in excess_variance.items():
assert key in readcounts, "excess_variance key %s not found in the rest of the data" % key
assert isinstance(val, pd.Series), "the excess_variance values provided for the different datasets must be pandas.Series objects, not\n%r" % val
diff = set(val.index) ^ set(sequence_map[key].cell_line_name)
assert len(diff) < 2, "difference between index values\n%r\nfor excess_variance and cell lines found in %s" % (diff, key)
return excess_variance
#################### C R E A T E M A P P I N G S ########################
def make_map(melted_map, outer_list, inner_list, dtype=np.float64):
'''
takes a sorted list of indices, targets, and a pd.Series that maps between them and recomputes the mapping between them
as two arrays of integer indices suitable for gather function calls.
The mapping can only include a subset of either the outer or inner list and vice versa.
The mapping's indices must be unique.
'''
melted_map = melted_map[melted_map.index.isin(outer_list) & melted_map.isin(inner_list)]
outer_array = np.array(outer_list)
gather_outer = np.searchsorted(outer_array, melted_map.index).astype(np.int)
inner_array = np.array(inner_list)
gather_inner = np.searchsorted(inner_array, melted_map.values).astype(np.int)
args = {
'gather_ind_inner': gather_inner,
'gather_ind_outer': gather_outer}
return args
def _get_column_attributes(self, readcounts, guide_gene_map):
print('\n\nFinding all unique guides and genes')
#guarantees the same sequence of guides and genes within each library
guides = {key: val.columns for key, val in readcounts.items()}
genes = {key: val.set_index('sgrna').loc[guides[key], 'gene'] for key, val in guide_gene_map.items()}
all_guides = sorted(set.union(*[set(v) for v in guides.values()]))
all_genes = sorted(set.union(*[set(v.values) for v in genes.values()]))
for key in self.keys:
print("found %i unique guides and %i unique genes in %s" %(
len(set(guides[key])), len(set(genes[key])), key
))
print("found %i unique guides and %i unique genes overall" %(len(all_guides), len(all_genes)))
print('\nfinding guide-gene mapping indices')
guide_map = {key:
Chronos.make_map(guide_gene_map[key][['sgrna', 'gene']].set_index('sgrna').iloc[:, 0],
all_guides, all_genes, self.np_dtype)
for key in self.keys}
column_map = {key: np.array(all_guides)[guide_map[key]['gather_ind_outer']]
for key in self.keys}
return guides, genes, all_guides, all_genes, guide_map, column_map
def _get_row_attributes(self, readcounts, sequence_map):
print('\nfinding all unique sequenced replicates, cell lines, and pDNA batches')
#guarantees the same sequence of sequence_IDs and cell lines within each library.
sequences = {key: val[val.cell_line_name != 'pDNA'].sequence_ID for key, val in sequence_map.items()}
pDNA_batches = {key: list(val[val.cell_line_name != 'pDNA'].pDNA_batch.values)
for key, val in sequence_map.items()}
pDNA_unique = {key: sorted(set(val)) for key, val in pDNA_batches.items()}
cells = {key: val[val.cell_line_name != 'pDNA']['cell_line_name'].unique() for key, val in sequence_map.items()}
all_sequences = sorted(set.union(*tuple([set(v.values) for v in sequences.values()])))
all_cells = sorted(set.union(*tuple([set(v) for v in cells.values()])))
#This is necessary consume copy number provided for only the cell-guide blocks present in each library
cell_indices = {key: [all_cells.index(s) for s in v]
for key, v in cells.items()}
assert len(all_sequences) == sum([len(val) for val in sequences.values()]
), "sequence IDs must be unique among all datasets"
for key in self.keys:
print("found %i unique sequences (excluding pDNA) and %i unique cell lines in %s" %(
len(set(sequences[key])), len(set(cells[key])), key
))
print("found %i unique replicates and %i unique cell lines overall" %(len(all_sequences), len(all_cells)))
print('\nfinding replicate-cell line mappings indices')
replicate_map = {key:
Chronos.make_map(sequence_map[key][['sequence_ID', 'cell_line_name']].set_index('sequence_ID').iloc[:, 0],
all_sequences, all_cells, self.np_dtype)
for key in self.keys}
index_map = {key: np.array(all_sequences)[replicate_map[key]['gather_ind_outer']]
for key in self.keys}
line_index_map = {key: np.array(all_cells)[replicate_map[key]['gather_ind_inner']]
for key in self.keys}
print('\nfinding replicate-pDNA mappings indices')
batch_map = {key:
Chronos.make_map(sequence_map[key][['sequence_ID', 'pDNA_batch']].set_index('sequence_ID').iloc[:, 0],
all_sequences, pDNA_unique[key], self.np_dtype)
for key in self.keys}
return sequences, pDNA_unique, cells, all_sequences, all_cells, cell_indices, replicate_map, index_map, line_index_map, batch_map
################## A S S I G N C O N S T A N T S #######################
def _get_excess_variance_tf(self, excess_variance):
_excess_variance = {}
for key in self.keys:
try:
_excess_variance[key] = tf.constant(excess_variance[key][self.line_index_map[key]].values.reshape((-1, 1)))
except IndexError:
raise IndexError("difference between index values for excess_variance and cell lines found in %s" % key)
except TypeError:
_excess_variance[key] = tf.constant(excess_variance * np.ones(shape=(len(self.line_index_map[key]), 1)))
return _excess_variance
def _summarize_timepoint(self, sequence_map, func):
out = {}
for key, val in sequence_map.items():
out[key] = func(val.groupby("cell_line_name").days.agg(lambda v: len(v.unique())).drop('pDNA').values)
return out
def _initialize_graph(self, max_learning_rate, dtype):
print('initializing graph')
self.sess = tf.compat.v1.Session()
self._learning_rate = tf.compat.v1.placeholder(shape=tuple(), dtype=dtype)
self.run_dict = {self._learning_rate: max_learning_rate}
self.max_learning_rate = max_learning_rate
self.persistent_handles = set([])
def _get_gene_effect_mask(self, dtype):
# excludes genes in a cell line with reads from only one library
print('\nbuilding gene effect mask')
if len(self.keys) == 1: #only one library, therefore no mask
_gene_effect_mask = tf.constant(1, shape=(len(self.all_cells), len(self.all_genes)), dtype=dtype)
mask_count = len(self.all_cells) * len(self.all_genes)
print("built mask with no exclusions")
return _gene_effect_mask, mask_count
else:
print(self.keys)
mask = {}#pd.DataFrame(0, index=self.all_cells, columns=self.all_genes, dtype=np.bool)
for cell in self.all_cells:
libraries = [key for key in self.keys if cell in self.cells[key]]
covered_genes = sorted(set.intersection(*[set(self.genes[key]) for key in libraries]))
mask[cell] = pd.Series(1, index=covered_genes, dtype=self.np_dtype)
mask = pd.DataFrame(mask).T.reindex(index=self.all_cells, columns=self.all_genes).fillna(0)
_gene_effect_mask = tf.constant(mask.values, dtype=dtype)
mask_count = (mask == 1).sum().sum()
print('made gene_effect mask, excluded %i (%1.5f) values' % ((mask == 0).sum().sum(), (mask == 0).mean().mean()))
return _gene_effect_mask, mask_count
def _get_days(self, sequence_map, dtype):
print('\nbuilding doubling vectors')
_days = {key:
tf.constant(Chronos.default_timepoint_scale * val.set_index('sequence_ID').loc[self.index_map[key]].days.astype(self.np_dtype).values,
dtype=dtype, shape=(len(self.index_map[key]), 1), name="days_%s" % key)
for key, val in sequence_map.items()}
for key in self.keys:
print("made days vector of shape %r for %s" %(
_days[key].get_shape().as_list(), key))
return _days
def _get_late_tf_timepoints(self, readcounts, dtype):
print("\nbuilding late observed timepoints")
_rpm = {}
_mask = {}
for key in self.keys:
rpm_np = readcounts[key].loc[self.index_map[key], self.column_map[key]].copy()
rpm_np = 1e6 * (rpm_np.values + 1e-32) / (rpm_np.fillna(0).values + 1e-32).sum(axis=1).reshape((-1, 1))
mask = pd.notnull(rpm_np)
_mask[key] = tf.constant(mask, dtype=tf.bool, name='NaN_mask_%s' % key)
rpm_np[~mask] = 0
_rpm[key] = self.get_persistent_input(dtype, rpm_np, name='rpm_%s' % key)
print("\tbuilt normalized timepoints for %s with shape %r (replicates X guides)" %(
key, rpm_np.shape))
return _rpm, _mask
def _get_tf_measured_initial(self, readcounts, sequence_map, dtype):
print('\nbuilding initial reads')
_measured_initial = {}
for key in self.keys:
rc = readcounts[key]
sm = sequence_map[key]
sm = sm[sm.cell_line_name == 'pDNA']
batch = rc.loc[sm.sequence_ID]
if batch.empty:
raise ValueError("No sequenced entities are labeled 'pDNA', or there are no readcounts for those that are")
if batch.shape[0] > 1:
batch = batch.groupby(sm.pDNA_batch.values).sum().astype(self.np_dtype)
else:
batch = | pd.DataFrame({self.pDNA_unique[key][0]: batch.iloc[0]}) | pandas.DataFrame |
'''
Run this to get html files
This file contains code to obtain html data from oslo bors and yahoo finance
'''
import argparse
import re
import threading
import time
from pprint import pprint
from typing import List
import sys
import pathlib
import os
import numpy as np
import pandas as pd
import pypatconsole as ppc
from bs4 import BeautifulSoup as bs
from pandas import DataFrame, to_numeric
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
from tqdm import tqdm
import config as cng
import yfinance_hotfix as yf
import utils
def dump_assert(file: str):
assert file is not None, 'File parameter must be specified when dump=True'
def get_osebx_htmlfile(url: str, timeout: int=cng.DEFAULT_TIMEOUT, wait_target_class: str=None,
verbose: int=1, dump: bool=True, file: str=None) -> str:
'''Load OSEBX html files using selenium'''
if verbose >= 1: print(f'Gathering data from {url}')
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument("--headless")
chrome_options.add_argument("--no-sandbox")
chrome_options.add_argument("--disable-dev-shm-usage")
chrome_options.add_argument("--disable-gpu")
driver = webdriver.Chrome(options=chrome_options)
if verbose >= 2: print('Initialized chromedriver')
driver.get(url)
if verbose >= 2: print('Waiting for target HTML class to appear')
# If the webpage dynamically loads the table with the stock information. This code will force the webdriver
# wait until the wanted element is loaded.
if not wait_target_class is None:
try:
WebDriverWait(driver, timeout).until(
EC.presence_of_element_located((By.CLASS_NAME, wait_target_class))
)
except:
print(f'Timeout: Could not load class {wait_target_class} from {url}')
driver.quit()
exit()
if verbose >= 2: print('Element located')
page_src = driver.page_source
driver.quit()
if dump:
if verbose >= 1: print(f'Dumping HTML file: {file}')
dump_assert(file)
with open(file, 'w+') as file:
file.write(page_src)
return page_src
def get_osebx_htmlfiles():
'''Get OSEBX HTML files'''
get_osebx_htmlfile(url=cng.BORS_QUOTES_URL,
wait_target_class=cng.QUOTES_WAIT_TARGET_CLASS,
dump=True,
file=cng.QUOTES_HTML_DATE_FILE,
verbose=2)
get_osebx_htmlfile(url=cng.BORS_RETURNS_URL,
wait_target_class=cng.RETURNS_WAIT_TARGET_CLASS,
dump=True,
file=cng.RETURNS_HTML_DATE_FILE,
verbose=2)
def scrape_osebx_html(quotes: str=None, returns: str=None, verbose: int=0, dump: bool=True,
file: str=None) -> pd.DataFrame:
'''
Scrape stocks from oslo bors HTML files.
HTML of websites of quotes and returns
should be located in same folder this file.
quotes: https://www.oslobors.no/ob_eng/markedsaktivitet/#/list/shares/quotelist/ob/all/all/false
returns: https://www.oslobors.no/ob_eng/markedsaktivitet/#/list/shares/return/ob/all/all/false
'''
if quotes is None:
quotes = cng.QUOTES_HTML_FILE
if returns is None:
returns = cng.RETURNS_HTML_FILE
with open(quotes) as html_source:
soup_quotes = bs(html_source, 'html.parser')
with open(returns) as html_source:
soup_return = bs(html_source, 'html.parser')
# Filter out the stock tables
html_quotes = soup_quotes.find('div', class_="ng-scope").find('ui-view').find('ui-view').find('tbody').find_all('tr')
html_return = soup_return.find('div', class_="ng-scope").find('ui-view').find('ui-view').find('tbody').find_all('tr')
tickers = []
names = []
lasts = []
buys = []
sells = []
tradecounts = []
marketcaps = []
sectors = []
infos = []
profits_today = []
profits_1wk = []
profits_1month = []
profits_ytd = []
profits_1yr = []
# Create lists with features. Only preprocessing for strings are done (values are all strings).
# Further preprocessing will be done later when the values are in a pandas DataFrame.
for quotesrow, returnrow in tqdm(zip(html_quotes, html_return), total=len(html_quotes), disable=verbose):
# Scrape ticker, name, marketcap, sector and info.
tickers.append(quotesrow.a.text)
names.append(quotesrow.find('td', {'data-header':'Navn'}).text)
lasts.append(quotesrow.find('td', {'data-header':'Last'}).text.replace(',', ''))
buys.append(quotesrow.find('td', {'data-header':'Buy'}).text.replace(',', ''))
sells.append(quotesrow.find('td', {'data-header':'Sell'}).text.replace(',', ''))
tradecounts.append(quotesrow.find('td', {'data-header':'No. of trades'}).text.replace(',', ''))
marketcaps.append(quotesrow.find('td', {'data-header':'Market cap (MNOK)'}).text.replace(',', ''))
# Marketcap unit is in millions, multiply by 10e6 to get normal values
sectors.append(quotesrow.find('td', class_='icons').get('title'))
# Info is whether instrument is a Liquidit y provider or not
infos.append('LP' if 'fa-bolt' in quotesrow.find('td', class_='infoIcon').i.get('class') else np.nan)
# Scrape return values
# Values are percentages, and are currently in text form. Divide by 100 to get normal values
profits_today.append(returnrow.find('td', class_='CHANGE_PCT_SLACK').text.replace('%', ''))
profits_1wk.append(returnrow.find('td', class_='CHANGE_1WEEK_PCT_SLACK').text.replace('%', ''))
profits_1month.append(returnrow.find('td', class_='CHANGE_1MONTH_PCT_SLACK').text.replace('%', ''))
profits_ytd.append(returnrow.find('td', class_='CHANGE_YEAR_PCT_SLACK').text.replace('%', ''))
profits_1yr.append(returnrow.find('td', class_='CHANGE_1YEAR_PCT_SLACK').text.replace('%', ''))
if verbose >= 1:
print(f'Ticker: {tickers[-1]}')
print(f'Name: {names[-1]}')
print(f'Last: {lasts[-1]}')
print(f'Buy: {buys[-1]}')
print(f'Sell: {sells[-1]}')
print(f'Cap: {marketcaps[-1]}')
print(f'Sector: {sectors[-1]}')
print(f'Info: {infos[-1]}')
print(f'Profit today: {profits_today[-1]}')
print(f'Profit 1 week: {profits_1wk[-1]}')
print(f'Profit 1 month: {profits_1month[-1]}')
print(f'Profit YTD: {profits_ytd[-1]}')
print(f'Profit 1 year: {profits_1yr[-1]}')
print()
df = DataFrame(dict(
ticker=tickers,
name=names,
sector=sectors,
last_=lasts, # DataFrame.last is a method, hence the underscore
buy=buys,
sell=sells,
tradecount=tradecounts,
info=infos,
marketcap=marketcaps,
profit_today=profits_today,
profit_1wk=profits_1wk,
profit_1month=profits_1month,
profit_ytd=profits_ytd,
profit_1yr=profits_1yr
))
# Turn returns to floats then divide by 100 to convert from percentages to "numbers"
columns_to_num = ['profit_today', 'profit_1wk', 'profit_1month', 'profit_ytd', 'profit_1yr']
df[columns_to_num] = df[columns_to_num].apply(to_numeric, errors='coerce') / 100
# Turn other things to numeric as well
# coerce turns missing or invalid values to nan
df.last_ = | to_numeric(df.last_, errors='coerce') | pandas.to_numeric |
#!/usr/bin/python
from threading import Thread
from threading import Lock
from http.server import BaseHTTPRequestHandler, HTTPServer
import cgi
import json
from urllib import parse
import pandas as pd
import csv
from pandas import DataFrame
from pandas import Series
from pandas import concat
from pandas import read_csv
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from math import sqrt
import numpy
import random
import traceback
from keras.models import load_model
from sklearn.externals import joblib
PORT_NUMBER = 8080
lock = Lock()
models = {}
# frame a sequence as a supervised learning problem
def timeseries_to_supervised(data, lag=1):
df = DataFrame(data)
columns = [df.shift(i) for i in range(1, lag + 1)]
columns.append(df)
df = concat(columns, axis=1)
df = df.drop(0)
return df
# create a differenced series
def difference(dataset, interval=1):
diff = list()
for i in range(interval, len(dataset)):
value = dataset[i] - dataset[i - interval]
diff.append(value)
return Series(diff)
# invert differenced value
def inverse_difference(history, yhat, interval=1):
return yhat + history[-interval]
# inverse scaling for a forecasted value
def invert_scale(scaler, X, yhat):
new_row = [x for x in X] + [yhat]
array = numpy.array(new_row)
array = array.reshape(1, len(array))
inverted = scaler.inverse_transform(array)
return inverted[0, -1]
# fit an LSTM network to training data
def fit_lstm(train, batch_size2, nb_epoch, neurons):
X, y = train[:, 0:-1], train[:, -1]
X = X.reshape(X.shape[0], 1, X.shape[1])
model = Sequential()
model.add(LSTM(neurons, batch_input_shape=(batch_size2, X.shape[1], X.shape[2]), stateful=True))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
for i in range(nb_epoch):
model.fit(X, y, epochs=1, batch_size=batch_size2, verbose=0, shuffle=False)
# loss = model.evaluate(X, y)
# print("Epoch {}/{}, loss = {}".format(i, nb_epoch, loss))
print("Epoch {}/{}".format(i, nb_epoch))
model.reset_states()
return model
def train_models(job):
lock.acquire()
if job not in models:
models[job] = {
'lock': Lock()
}
lock.release()
models[job]['lock'].acquire()
# load dataset
series = read_csv('./data/' + job + '.csv', header=0, index_col=0, squeeze=True)
# transform data to be stationary
raw_values = series.values
diff_values = difference(raw_values, 1)
# transform data to be supervised learning
lag = 4
supervised = timeseries_to_supervised(diff_values, lag)
supervised_values = supervised.values
batch_size = 32
if supervised_values.shape[0] < 100:
batch_size = 16
if supervised_values.shape[0] < 60:
batch_size = 8
# split data into train and test-sets
train = supervised_values
# transform the scale of the data
# scale data to [-1, 1]
# fit scaler
scaler = MinMaxScaler(feature_range=(-1, 1))
scaler = scaler.fit(train)
# transform train
train = train.reshape(train.shape[0], train.shape[1])
train_scaled = scaler.transform(train)
# fit the model
t1 = train.shape[0] % batch_size
train_trimmed = train_scaled[t1:, :]
model = fit_lstm(train_trimmed, batch_size, 30, 4)
model.save('./data/checkpoint-' + job)
scaler_filename = './data/checkpoint-' + job + "-scaler.save"
joblib.dump(scaler, scaler_filename)
models[job]['batch_size'] = batch_size
models[job]['lock'].release()
def predict(job, seq):
if job not in models or 'batch_size' not in models[job]:
return -1, False
batch_size = int(models[job]['batch_size'])
data = {
'seq': seq,
'value': 0,
}
model = load_model('./data/checkpoint-' + job)
scaler_filename = './data/checkpoint-' + job + "-scaler.save"
scaler = joblib.load(scaler_filename)
file = './data/' + job + '.' + str(random.randint(1000, 9999)) + '.csv'
df = | pd.read_csv('./data/' + job + '.csv', usecols=['seq', 'value']) | pandas.read_csv |
from collections import OrderedDict
from datetime import datetime, timedelta
import numpy as np
import numpy.ma as ma
import pytest
from pandas._libs import iNaT, lib
from pandas.core.dtypes.common import is_categorical_dtype, is_datetime64tz_dtype
from pandas.core.dtypes.dtypes import (
CategoricalDtype,
DatetimeTZDtype,
IntervalDtype,
PeriodDtype,
)
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Index,
Interval,
IntervalIndex,
MultiIndex,
NaT,
Period,
Series,
Timestamp,
date_range,
isna,
period_range,
timedelta_range,
)
import pandas._testing as tm
from pandas.core.arrays import IntervalArray, period_array
class TestSeriesConstructors:
@pytest.mark.parametrize(
"constructor,check_index_type",
[
# NOTE: some overlap with test_constructor_empty but that test does not
# test for None or an empty generator.
# test_constructor_pass_none tests None but only with the index also
# passed.
(lambda: Series(), True),
(lambda: Series(None), True),
(lambda: Series({}), True),
(lambda: Series(()), False), # creates a RangeIndex
(lambda: Series([]), False), # creates a RangeIndex
(lambda: Series((_ for _ in [])), False), # creates a RangeIndex
(lambda: Series(data=None), True),
(lambda: Series(data={}), True),
(lambda: Series(data=()), False), # creates a RangeIndex
(lambda: Series(data=[]), False), # creates a RangeIndex
(lambda: Series(data=(_ for _ in [])), False), # creates a RangeIndex
],
)
def test_empty_constructor(self, constructor, check_index_type):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
expected = Series()
result = constructor()
assert len(result.index) == 0
tm.assert_series_equal(result, expected, check_index_type=check_index_type)
def test_invalid_dtype(self):
# GH15520
msg = "not understood"
invalid_list = [pd.Timestamp, "pd.Timestamp", list]
for dtype in invalid_list:
with pytest.raises(TypeError, match=msg):
Series([], name="time", dtype=dtype)
def test_invalid_compound_dtype(self):
# GH#13296
c_dtype = np.dtype([("a", "i8"), ("b", "f4")])
cdt_arr = np.array([(1, 0.4), (256, -13)], dtype=c_dtype)
with pytest.raises(ValueError, match="Use DataFrame instead"):
Series(cdt_arr, index=["A", "B"])
def test_scalar_conversion(self):
# Pass in scalar is disabled
scalar = Series(0.5)
assert not isinstance(scalar, float)
# Coercion
assert float(Series([1.0])) == 1.0
assert int(Series([1.0])) == 1
def test_constructor(self, datetime_series):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
empty_series = Series()
assert datetime_series.index.is_all_dates
# Pass in Series
derived = Series(datetime_series)
assert derived.index.is_all_dates
assert tm.equalContents(derived.index, datetime_series.index)
# Ensure new index is not created
assert id(datetime_series.index) == id(derived.index)
# Mixed type Series
mixed = Series(["hello", np.NaN], index=[0, 1])
assert mixed.dtype == np.object_
assert mixed[1] is np.NaN
assert not empty_series.index.is_all_dates
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
assert not Series().index.is_all_dates
# exception raised is of type Exception
with pytest.raises(Exception, match="Data must be 1-dimensional"):
Series(np.random.randn(3, 3), index=np.arange(3))
mixed.name = "Series"
rs = Series(mixed).name
xp = "Series"
assert rs == xp
# raise on MultiIndex GH4187
m = MultiIndex.from_arrays([[1, 2], [3, 4]])
msg = "initializing a Series from a MultiIndex is not supported"
with pytest.raises(NotImplementedError, match=msg):
Series(m)
@pytest.mark.parametrize("input_class", [list, dict, OrderedDict])
def test_constructor_empty(self, input_class):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
empty = Series()
empty2 = Series(input_class())
# these are Index() and RangeIndex() which don't compare type equal
# but are just .equals
tm.assert_series_equal(empty, empty2, check_index_type=False)
# With explicit dtype:
empty = Series(dtype="float64")
empty2 = Series(input_class(), dtype="float64")
tm.assert_series_equal(empty, empty2, check_index_type=False)
# GH 18515 : with dtype=category:
empty = Series(dtype="category")
empty2 = Series(input_class(), dtype="category")
tm.assert_series_equal(empty, empty2, check_index_type=False)
if input_class is not list:
# With index:
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
empty = Series(index=range(10))
empty2 = Series(input_class(), index=range(10))
tm.assert_series_equal(empty, empty2)
# With index and dtype float64:
empty = Series(np.nan, index=range(10))
empty2 = Series(input_class(), index=range(10), dtype="float64")
tm.assert_series_equal(empty, empty2)
# GH 19853 : with empty string, index and dtype str
empty = Series("", dtype=str, index=range(3))
empty2 = Series("", index=range(3))
tm.assert_series_equal(empty, empty2)
@pytest.mark.parametrize("input_arg", [np.nan, float("nan")])
def test_constructor_nan(self, input_arg):
empty = Series(dtype="float64", index=range(10))
empty2 = Series(input_arg, index=range(10))
tm.assert_series_equal(empty, empty2, check_index_type=False)
@pytest.mark.parametrize(
"dtype",
["f8", "i8", "M8[ns]", "m8[ns]", "category", "object", "datetime64[ns, UTC]"],
)
@pytest.mark.parametrize("index", [None, pd.Index([])])
def test_constructor_dtype_only(self, dtype, index):
# GH-20865
result = pd.Series(dtype=dtype, index=index)
assert result.dtype == dtype
assert len(result) == 0
def test_constructor_no_data_index_order(self):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
result = pd.Series(index=["b", "a", "c"])
assert result.index.tolist() == ["b", "a", "c"]
def test_constructor_no_data_string_type(self):
# GH 22477
result = pd.Series(index=[1], dtype=str)
assert np.isnan(result.iloc[0])
@pytest.mark.parametrize("item", ["entry", "ѐ", 13])
def test_constructor_string_element_string_type(self, item):
# GH 22477
result = pd.Series(item, index=[1], dtype=str)
assert result.iloc[0] == str(item)
def test_constructor_dtype_str_na_values(self, string_dtype):
# https://github.com/pandas-dev/pandas/issues/21083
ser = Series(["x", None], dtype=string_dtype)
result = ser.isna()
expected = Series([False, True])
tm.assert_series_equal(result, expected)
assert ser.iloc[1] is None
ser = Series(["x", np.nan], dtype=string_dtype)
assert np.isnan(ser.iloc[1])
def test_constructor_series(self):
index1 = ["d", "b", "a", "c"]
index2 = sorted(index1)
s1 = Series([4, 7, -5, 3], index=index1)
s2 = Series(s1, index=index2)
tm.assert_series_equal(s2, s1.sort_index())
def test_constructor_iterable(self):
# GH 21987
class Iter:
def __iter__(self):
for i in range(10):
yield i
expected = Series(list(range(10)), dtype="int64")
result = Series(Iter(), dtype="int64")
tm.assert_series_equal(result, expected)
def test_constructor_sequence(self):
# GH 21987
expected = Series(list(range(10)), dtype="int64")
result = Series(range(10), dtype="int64")
tm.assert_series_equal(result, expected)
def test_constructor_single_str(self):
# GH 21987
expected = Series(["abc"])
result = Series("abc")
tm.assert_series_equal(result, expected)
def test_constructor_list_like(self):
# make sure that we are coercing different
# list-likes to standard dtypes and not
# platform specific
expected = Series([1, 2, 3], dtype="int64")
for obj in [[1, 2, 3], (1, 2, 3), np.array([1, 2, 3], dtype="int64")]:
result = Series(obj, index=[0, 1, 2])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dtype", ["bool", "int32", "int64", "float64"])
def test_constructor_index_dtype(self, dtype):
# GH 17088
s = Series(Index([0, 2, 4]), dtype=dtype)
assert s.dtype == dtype
@pytest.mark.parametrize(
"input_vals",
[
([1, 2]),
(["1", "2"]),
(list(pd.date_range("1/1/2011", periods=2, freq="H"))),
(list(pd.date_range("1/1/2011", periods=2, freq="H", tz="US/Eastern"))),
([pd.Interval(left=0, right=5)]),
],
)
def test_constructor_list_str(self, input_vals, string_dtype):
# GH 16605
# Ensure that data elements from a list are converted to strings
# when dtype is str, 'str', or 'U'
result = Series(input_vals, dtype=string_dtype)
expected = Series(input_vals).astype(string_dtype)
tm.assert_series_equal(result, expected)
def test_constructor_list_str_na(self, string_dtype):
result = Series([1.0, 2.0, np.nan], dtype=string_dtype)
expected = Series(["1.0", "2.0", np.nan], dtype=object)
tm.assert_series_equal(result, expected)
assert np.isnan(result[2])
def test_constructor_generator(self):
gen = (i for i in range(10))
result = Series(gen)
exp = Series(range(10))
tm.assert_series_equal(result, exp)
gen = (i for i in range(10))
result = Series(gen, index=range(10, 20))
exp.index = range(10, 20)
tm.assert_series_equal(result, exp)
def test_constructor_map(self):
# GH8909
m = map(lambda x: x, range(10))
result = Series(m)
exp = Series(range(10))
tm.assert_series_equal(result, exp)
m = map(lambda x: x, range(10))
result = Series(m, index=range(10, 20))
exp.index = range(10, 20)
tm.assert_series_equal(result, exp)
def test_constructor_categorical(self):
cat = pd.Categorical([0, 1, 2, 0, 1, 2], ["a", "b", "c"], fastpath=True)
res = Series(cat)
tm.assert_categorical_equal(res.values, cat)
# can cast to a new dtype
result = Series(pd.Categorical([1, 2, 3]), dtype="int64")
expected = pd.Series([1, 2, 3], dtype="int64")
tm.assert_series_equal(result, expected)
# GH12574
cat = Series(pd.Categorical([1, 2, 3]), dtype="category")
assert is_categorical_dtype(cat)
assert is_categorical_dtype(cat.dtype)
s = Series([1, 2, 3], dtype="category")
assert is_categorical_dtype(s)
assert is_categorical_dtype(s.dtype)
def test_constructor_categorical_with_coercion(self):
factor = Categorical(["a", "b", "b", "a", "a", "c", "c", "c"])
# test basic creation / coercion of categoricals
s = Series(factor, name="A")
assert s.dtype == "category"
assert len(s) == len(factor)
str(s.values)
str(s)
# in a frame
df = DataFrame({"A": factor})
result = df["A"]
tm.assert_series_equal(result, s)
result = df.iloc[:, 0]
tm.assert_series_equal(result, s)
assert len(df) == len(factor)
str(df.values)
str(df)
df = DataFrame({"A": s})
result = df["A"]
tm.assert_series_equal(result, s)
assert len(df) == len(factor)
str(df.values)
str(df)
# multiples
df = DataFrame({"A": s, "B": s, "C": 1})
result1 = df["A"]
result2 = df["B"]
tm.assert_series_equal(result1, s)
tm.assert_series_equal(result2, s, check_names=False)
assert result2.name == "B"
assert len(df) == len(factor)
str(df.values)
str(df)
# GH8623
x = DataFrame(
[[1, "<NAME>"], [2, "<NAME>"], [1, "<NAME>"]],
columns=["person_id", "person_name"],
)
x["person_name"] = Categorical(x.person_name) # doing this breaks transform
expected = x.iloc[0].person_name
result = x.person_name.iloc[0]
assert result == expected
result = x.person_name[0]
assert result == expected
result = x.person_name.loc[0]
assert result == expected
def test_constructor_categorical_dtype(self):
result = pd.Series(
["a", "b"], dtype=CategoricalDtype(["a", "b", "c"], ordered=True)
)
assert is_categorical_dtype(result.dtype) is True
tm.assert_index_equal(result.cat.categories, pd.Index(["a", "b", "c"]))
assert result.cat.ordered
result = pd.Series(["a", "b"], dtype=CategoricalDtype(["b", "a"]))
assert is_categorical_dtype(result.dtype)
tm.assert_index_equal(result.cat.categories, pd.Index(["b", "a"]))
assert result.cat.ordered is False
# GH 19565 - Check broadcasting of scalar with Categorical dtype
result = Series(
"a", index=[0, 1], dtype=CategoricalDtype(["a", "b"], ordered=True)
)
expected = Series(
["a", "a"], index=[0, 1], dtype=CategoricalDtype(["a", "b"], ordered=True)
)
tm.assert_series_equal(result, expected)
def test_constructor_categorical_string(self):
# GH 26336: the string 'category' maintains existing CategoricalDtype
cdt = CategoricalDtype(categories=list("dabc"), ordered=True)
expected = Series(list("abcabc"), dtype=cdt)
# Series(Categorical, dtype='category') keeps existing dtype
cat = Categorical(list("abcabc"), dtype=cdt)
result = Series(cat, dtype="category")
tm.assert_series_equal(result, expected)
# Series(Series[Categorical], dtype='category') keeps existing dtype
result = Series(result, dtype="category")
tm.assert_series_equal(result, expected)
def test_categorical_sideeffects_free(self):
# Passing a categorical to a Series and then changing values in either
# the series or the categorical should not change the values in the
# other one, IF you specify copy!
cat = Categorical(["a", "b", "c", "a"])
s = Series(cat, copy=True)
assert s.cat is not cat
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1], dtype=np.int64)
exp_cat = np.array(["a", "b", "c", "a"], dtype=np.object_)
tm.assert_numpy_array_equal(s.__array__(), exp_s)
tm.assert_numpy_array_equal(cat.__array__(), exp_cat)
# setting
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1], dtype=np.int64)
tm.assert_numpy_array_equal(s.__array__(), exp_s2)
tm.assert_numpy_array_equal(cat.__array__(), exp_cat)
# however, copy is False by default
# so this WILL change values
cat = Categorical(["a", "b", "c", "a"])
s = Series(cat)
assert s.values is cat
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1], dtype=np.int64)
tm.assert_numpy_array_equal(s.__array__(), exp_s)
tm.assert_numpy_array_equal(cat.__array__(), exp_s)
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1], dtype=np.int64)
tm.assert_numpy_array_equal(s.__array__(), exp_s2)
tm.assert_numpy_array_equal(cat.__array__(), exp_s2)
def test_unordered_compare_equal(self):
left = pd.Series(["a", "b", "c"], dtype=CategoricalDtype(["a", "b"]))
right = pd.Series(pd.Categorical(["a", "b", np.nan], categories=["a", "b"]))
tm.assert_series_equal(left, right)
def test_constructor_maskedarray(self):
data = ma.masked_all((3,), dtype=float)
result = Series(data)
expected = Series([np.nan, np.nan, np.nan])
tm.assert_series_equal(result, expected)
data[0] = 0.0
data[2] = 2.0
index = ["a", "b", "c"]
result = Series(data, index=index)
expected = Series([0.0, np.nan, 2.0], index=index)
tm.assert_series_equal(result, expected)
data[1] = 1.0
result = Series(data, index=index)
expected = Series([0.0, 1.0, 2.0], index=index)
tm.assert_series_equal(result, expected)
data = ma.masked_all((3,), dtype=int)
result = Series(data)
expected = Series([np.nan, np.nan, np.nan], dtype=float)
tm.assert_series_equal(result, expected)
data[0] = 0
data[2] = 2
index = ["a", "b", "c"]
result = Series(data, index=index)
expected = Series([0, np.nan, 2], index=index, dtype=float)
tm.assert_series_equal(result, expected)
data[1] = 1
result = Series(data, index=index)
expected = Series([0, 1, 2], index=index, dtype=int)
tm.assert_series_equal(result, expected)
data = ma.masked_all((3,), dtype=bool)
result = Series(data)
expected = Series([np.nan, np.nan, np.nan], dtype=object)
tm.assert_series_equal(result, expected)
data[0] = True
data[2] = False
index = ["a", "b", "c"]
result = Series(data, index=index)
expected = Series([True, np.nan, False], index=index, dtype=object)
tm.assert_series_equal(result, expected)
data[1] = True
result = Series(data, index=index)
expected = Series([True, True, False], index=index, dtype=bool)
tm.assert_series_equal(result, expected)
data = ma.masked_all((3,), dtype="M8[ns]")
result = Series(data)
expected = Series([iNaT, iNaT, iNaT], dtype="M8[ns]")
tm.assert_series_equal(result, expected)
data[0] = datetime(2001, 1, 1)
data[2] = datetime(2001, 1, 3)
index = ["a", "b", "c"]
result = Series(data, index=index)
expected = Series(
[datetime(2001, 1, 1), iNaT, datetime(2001, 1, 3)],
index=index,
dtype="M8[ns]",
)
tm.assert_series_equal(result, expected)
data[1] = datetime(2001, 1, 2)
result = Series(data, index=index)
expected = Series(
[datetime(2001, 1, 1), datetime(2001, 1, 2), datetime(2001, 1, 3)],
index=index,
dtype="M8[ns]",
)
tm.assert_series_equal(result, expected)
def test_constructor_maskedarray_hardened(self):
# Check numpy masked arrays with hard masks -- from GH24574
data = ma.masked_all((3,), dtype=float).harden_mask()
result = pd.Series(data)
expected = pd.Series([np.nan, np.nan, np.nan])
tm.assert_series_equal(result, expected)
def test_series_ctor_plus_datetimeindex(self):
rng = date_range("20090415", "20090519", freq="B")
data = {k: 1 for k in rng}
result = Series(data, index=rng)
assert result.index is rng
def test_constructor_default_index(self):
s = Series([0, 1, 2])
tm.assert_index_equal(s.index, pd.Index(np.arange(3)))
@pytest.mark.parametrize(
"input",
[
[1, 2, 3],
(1, 2, 3),
list(range(3)),
pd.Categorical(["a", "b", "a"]),
(i for i in range(3)),
map(lambda x: x, range(3)),
],
)
def test_constructor_index_mismatch(self, input):
# GH 19342
# test that construction of a Series with an index of different length
# raises an error
msg = "Length of passed values is 3, index implies 4"
with pytest.raises(ValueError, match=msg):
Series(input, index=np.arange(4))
def test_constructor_numpy_scalar(self):
# GH 19342
# construction with a numpy scalar
# should not raise
result = Series(np.array(100), index=np.arange(4), dtype="int64")
expected = Series(100, index=np.arange(4), dtype="int64")
tm.assert_series_equal(result, expected)
def test_constructor_broadcast_list(self):
# GH 19342
# construction with single-element container and index
# should raise
msg = "Length of passed values is 1, index implies 3"
with pytest.raises(ValueError, match=msg):
Series(["foo"], index=["a", "b", "c"])
def test_constructor_corner(self):
df = tm.makeTimeDataFrame()
objs = [df, df]
s = Series(objs, index=[0, 1])
assert isinstance(s, Series)
def test_constructor_sanitize(self):
s = Series(np.array([1.0, 1.0, 8.0]), dtype="i8")
assert s.dtype == np.dtype("i8")
s = Series(np.array([1.0, 1.0, np.nan]), copy=True, dtype="i8")
assert s.dtype == np.dtype("f8")
def test_constructor_copy(self):
# GH15125
# test dtype parameter has no side effects on copy=True
for data in [[1.0], np.array([1.0])]:
x = Series(data)
y = pd.Series(x, copy=True, dtype=float)
# copy=True maintains original data in Series
tm.assert_series_equal(x, y)
# changes to origin of copy does not affect the copy
x[0] = 2.0
assert not x.equals(y)
assert x[0] == 2.0
assert y[0] == 1.0
@pytest.mark.parametrize(
"index",
[
pd.date_range("20170101", periods=3, tz="US/Eastern"),
pd.date_range("20170101", periods=3),
pd.timedelta_range("1 day", periods=3),
| pd.period_range("2012Q1", periods=3, freq="Q") | pandas.period_range |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from kneed import KneeLocator
from jupyter_utils import AllDataset
data_dir = '../drp-data/'
GDSC_GENE_EXPRESSION = 'preprocessed/gdsc_tcga/gdsc_rma_gene_expr.csv'
TCGA_GENE_EXPRESSION = 'preprocessed/gdsc_tcga/tcga_log2_gene_expr.csv'
TCGA_CANCER = 'preprocessed/cancer_type/TCGA_cancer_one_hot.csv'
GDSC_CANCER = 'preprocessed/cancer_type/GDSC_cancer_one_hot.csv'
GDSC_lnIC50 = 'preprocessed/drug_response/gdsc_lnic50.csv'
TCGA_DR = 'preprocessed/drug_response/tcga_drug_response.csv'
gdsc_dr = pd.read_csv(data_dir + GDSC_lnIC50, index_col=0)
tcga_dr = pd.read_csv(data_dir + TCGA_DR, index_col=0)
gdsc_cancer = pd.read_csv(data_dir + GDSC_CANCER, index_col=0)
tcga_cancer = pd.read_csv(data_dir + TCGA_CANCER, index_col=0)
# dataset = AllDataset(data_dir, GDSC_GENE_EXPRESSION, TCGA_GENE_EXPRESSION,
# GDSC_lnIC50, TCGA_DR, TCGA_TISSUE)
# cancer_typetcga =
drugs = [
'bleomycin',
'cisplatin',
'cyclophosphamide',
'docetaxel',
'doxorubicin',
'etoposide',
'gemcitabine',
'irinotecan',
'oxaliplatin',
'paclitaxel',
'pemetrexed',
'tamoxifen',
'temozolomide',
'vinorelbine']
writer_a = pd.ExcelWriter('gene_finding/sample_counts.xlsx', engine='xlsxwriter')
gdsc_df = | pd.DataFrame(columns=drugs) | pandas.DataFrame |
import math
import numpy as np
import matplotlib.pyplot as plt
import argparse
import logging
import sys
import pandas as pd
from scipy import integrate
def parse_args():
parser = argparse.ArgumentParser("Compute precession of orbits")
parser.add_argument('--config', type=str, default="config.yml",
help="yaml config file")
parser.add_argument('--planet', type=str, default="jupiter.yml",
help="planet yml file")
return parser.parse_args()
def initial_values(config, planet, mercury):
# initial values are arranged as:
# [r_p(0), theta_p(0), v_p(0), omega_p(0), r_m(0), theta_m(0), v_m(0), omega_m(0)]
# note that v = dr_dt and omega = dtheta_dt
# place the planet and mercury with phase difference of pi
if (config.onlyMercury):
return [0., 0., 0., 0., mercury.RMin, 0., 0., (mercury.vMax / mercury.RMin)]
else:
return [planet.a, math.pi, 0., (planet.L / planet.a**2), mercury.RMin, 0., 0., (mercury.vMax / mercury.RMin)]
def compute_rhs(t, y, config, planet, mercury):
# incoming values are arranged as:
# [r_p(t-), theta_p(t-), v_p(t-), omega_p(t-), r_m(t-), theta_m(t-), v_m(t-), omega_m(t-)]
# note that v = dr_dt and omega = dtheta_dt
[r_p, theta_p, v_p, omega_p, r_m, theta_m, v_m, omega_m] = y
theta_mp = theta_m - theta_p
cos_theta_mp = math.cos(theta_mp)
sin_theta_mp = math.sin(theta_mp)
r_mp = math.sqrt(r_m**2 + r_p**2 - 2 * r_m * r_p * cos_theta_mp)
r_mp3 = r_mp**3
alpha_p = planet.GM / r_mp3
alpha_m = mercury.GM / r_mp3
d_rp_dt = v_p
d_thetap_dt = omega_p
d_vp_dt = (r_p * omega_p**2) - (planet.GMS / r_p**2) - \
(alpha_m * (r_p - r_m * cos_theta_mp))
d_omegap_dt = ((-2. * v_p * omega_p) / r_p) - \
(alpha_m * (r_m / r_p) * sin_theta_mp)
d_rm_dt = v_m
d_thetam_dt = omega_m
d_vm_dt = (r_m * omega_m**2) - (mercury.GMS / r_m**2) - \
(alpha_p * (r_m - r_p * cos_theta_mp))
d_omegam_dt = ((-2. * v_m * omega_m) / r_m) + \
(alpha_p * (r_p / r_m) * sin_theta_mp)
# return values are arranged as:
# [d_rp_dt(t), d_thetap_dt(t), d_vp_dt(t), d_omegap_dt(t), d_rm_dt(t), d_thetam_dt(t), d_vm_dt(t), d_omegam_dt(t)]
return [d_rp_dt, d_thetap_dt, d_vp_dt, d_omegap_dt, d_rm_dt, d_thetam_dt, d_vm_dt, d_omegam_dt]
def solve(config, planet, mercury):
y = initial_values(config, planet, mercury)
solution = integrate.solve_ivp(fun=compute_rhs, max_step=config.max_step, t_span=(
0., config.tEnd), y0=y, atol=config.targetTolerance, args=(config, planet, mercury))
df = | pd.DataFrame(solution.y) | pandas.DataFrame |
__author__ = "<NAME>"
import os
import re
import gzip
import logging
import pandas
import csv
from .Exceptions import ReportableException
def folder_contents(folder, pattern=None):
regexp = re.compile(pattern) if pattern else None
p = os .listdir(folder)
if regexp: p = [x for x in p if regexp.search(x)]
p = [os.path.join(folder,x) for x in p]
return p
def ensure_requisite_folders(path):
folder = os.path.split(path)[0]
if len(folder) and not os.path.exists(folder):
os.makedirs(folder)
def maybe_create_folder(path):
if not os.path.exists(path):
os.makedirs(path)
########################################################################################################################
def file_logic(folder, pattern):
r = re.compile(pattern)
f = sorted([x for x in os.listdir(folder) if r.search(x)])
p_ = [r.search(x).group(1) for x in f]
p = [os.path.join(folder,x) for x in f]
return pandas.DataFrame({"name":p_, "path":p, "file":f})
def file_logic_2(folder, pattern, sub_fields, filter=None):
r = re.compile(pattern)
f = os.listdir(folder)
if filter is not None:
filter = re.compile(filter)
f = [x for x in f if filter.search(x)]
f = sorted([x for x in f if r.search(x)])
r, subfield_names, subfield_positions = name_parse_prepare(pattern, sub_fields)
values=[]
for x in f:
values.append((x, os.path.join(folder, x))+name_parse(x, r, subfield_positions))
columns = ["file", "path"] + subfield_names
values = pandas.DataFrame(values, columns=columns)
return values
########################################################################################################################
def name_parse_prepare(name_subfield_regexp, name_subfield):
if name_subfield and name_subfield_regexp:
r = re.compile(name_subfield_regexp)
subfield_names = [x[0] for x in name_subfield]
subfield_positions = [int(x[1]) for x in name_subfield]
else:
r = None
subfield_names = None
subfield_positions = None
return r, subfield_names, subfield_positions
def name_parse(file, subfield_regexp, subfield_positions):
if subfield_positions:
values = []
s_ = subfield_regexp.search(file)
for position in subfield_positions:
values.append(s_.group(position))
values = tuple(values)
else:
values = None
return values
def name_parse_argumentize(subfield_names, subfield_positions, values):
return {subfield_names[i-1]:values[i-1] for i in subfield_positions}
########################################################################################################################
class PercentReporter(object):
def __init__(self, level, total, increment=10, pattern="%i %% complete"):
self.level = level
self.total = total
self.increment = increment
self.last_reported = 0
self.pattern = pattern
def update(self, i, text=None, force=False):
percent = int(i*100.0/self.total)
if force or percent >= self.last_reported + self.increment:
self.last_reported = percent
if not text:
text = self.pattern
logging.log(self.level, text, percent)
ERROR_REGEXP = re.compile('[^0-9a-zA-Z]+')
########################################################################################################################
def load_list(path):
k = pandas.read_table(path, header=None)
return k.iloc[:,0]
def to_dataframe(data, columns, fill_na=None, to_numeric=None):
if len(data):
data = pandas.DataFrame(data, columns=columns)
data = data[columns]
else:
data =pandas.DataFrame(columns=columns)
if to_numeric:
if type(to_numeric) == list:
for k in to_numeric:
data[k] = | pandas.to_numeric(data[k]) | pandas.to_numeric |
import warnings
import pandas_datareader as web
import numpy as np
import pandas as pd
from sklearn import metrics # for the check the error and accuracy of the model
from sklearn.metrics import (
confusion_matrix,
classification_report,
r2_score,
accuracy_score,
r2_score,
)
from sklearn.model_selection import (
train_test_split,
KFold,
StratifiedKFold,
cross_val_score,
GridSearchCV,
cross_validate,
)
from sklearn.model_selection import (
cross_val_score,
KFold,
cross_validate,
train_test_split,
TimeSeriesSplit,
)
from math import sqrt
import xgboost as xgb
from xgboost import XGBRegressor
import matplotlib
import matplotlib as mpl
from matplotlib import style
from matplotlib import pyplot as plt
import seaborn as sns
import pyforest
import pyfolio as pf
import backtrader as bt
from backtrader.feeds import PandasData
import streamlit as st
from tscv import GapKFold
warnings.filterwarnings("ignore")
warnings.simplefilter(action="ignore", category=FutureWarning)
pd.set_option("display.max_rows", 500)
pd.set_option("display.max_columns", 500)
| pd.set_option("display.width", 150) | pandas.set_option |
import pandas as pd
from collections import Counter
from natsort import index_natsorted
import numpy as np
ids = []
text = []
ab_ids = []
ab_text = []
normal_vocab_freq_dist = Counter()
ab_vocab_freq_dist = Counter()
# keywords that most likely associated with abnormalities
KEYWORDS = ['emphysema', 'cardiomegaly', 'borderline', 'mild', 'chronic', 'minimal', 'copd', 'hernia',
'hyperinflated', 'hemodialysis', 'atelectasis', 'degenerative', 'effusion', 'atherosclerotic',
'aneurysmal', 'granuloma', 'fracture', 'severe', 'concerns', 'fibrosis', 'scarring', 'crowding', 'opacities',
'persistent', 'ectatic', 'hyperinflation', 'moderate', 'opacity', 'calcified', 'effusions', 'edema',
'continued', 'low lung volume', 'pacing lead', 'resection', 'dilated', 'left', 'right', 'bilateral',
'hyperexpanded', 'calcification', 'concerning', 'concern', 'enlargement', 'lines', 'tubes', 'Emphysema',
'Hyperexpanded', 'advanced', 'Advanced', 'tortuosity']
with open('files/normal.txt', mode='r', encoding='utf-8') as f, open('files/abnormal.txt', mode='r', encoding='utf-8') as af:
for line in f:
xml, *label_text = line.split()
ids.append(xml)
normal_vocab_freq_dist.update(label_text)
text.append(' '.join(label_text))
for line in af:
xml, *label_text = line.split()
ab_ids.append(xml)
ab_vocab_freq_dist.update(label_text)
ab_text.append(' '.join(label_text))
def first_filter_normal_label(a_string):
if a_string.startswith(('no acute', 'no evidence', 'no active', 'no radiographic evidence')) and a_string.endswith(
('process.', 'disease.', 'abnormality.', 'abnormalities.', 'findings.', 'finding.', 'identified.',
'infiltrates.', 'infiltrate.')):
return 0
else:
return a_string
def second_filter_normal(a_string):
if isinstance(a_string, int):
return a_string
if a_string.startswith(('normal chest', 'normal exam', 'unremarkable chest', 'unremarkable examination',
'unremarkable radiographs')):
return 0
if a_string.startswith('clear') and a_string.endswith('lungs.'):
return 0
if a_string.startswith(('negative for', 'negative chest')):
return 0
if a_string.startswith('negative') and a_string.endswith('negative.'):
return 0
else:
return a_string
def third_filter_normal(a_string):
if isinstance(a_string, int):
return a_string
if a_string.startswith(('stable appearance', 'stable chest radiograph', 'stable exam', 'stable',
'stable post-procedural', 'stable radiographic')):
if any(w in a_string for w in KEYWORDS):
return a_string
else:
return 0
if a_string.startswith('clear') or a_string.endswith('clear.'):
if any(w in a_string for w in KEYWORDS):
return a_string
else:
return 0
return a_string
def fourth_filter_normal(a_string):
if isinstance(a_string, int):
return a_string
if 'no acute' or 'without acute' in a_string:
if any(w in a_string for w in KEYWORDS):
return 2
elif 'stable' or 'clear' or 'normal' in a_string:
return 0
return a_string
print(normal_vocab_freq_dist.most_common(50))
print(ab_vocab_freq_dist.most_common(50))
# filtering strickt normal from borderline/mild abnormal e.g. stable/chronic conditions but no acute findings
normal = {'xmlId': ids, 'label_text': text}
normal_df = | pd.DataFrame(normal) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright 2018 Open Energy Efficiency, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from datetime import datetime
import pandas as pd
import pytest
import pytz
from eeweather import (
ISDStation,
get_isd_station_metadata,
get_isd_filenames,
get_gsod_filenames,
get_isd_file_metadata,
fetch_isd_raw_temp_data,
fetch_isd_hourly_temp_data,
fetch_isd_daily_temp_data,
fetch_gsod_raw_temp_data,
fetch_gsod_daily_temp_data,
fetch_tmy3_hourly_temp_data,
fetch_cz2010_hourly_temp_data,
get_isd_hourly_temp_data_cache_key,
get_isd_daily_temp_data_cache_key,
get_gsod_daily_temp_data_cache_key,
get_tmy3_hourly_temp_data_cache_key,
get_cz2010_hourly_temp_data_cache_key,
cached_isd_hourly_temp_data_is_expired,
cached_isd_daily_temp_data_is_expired,
cached_gsod_daily_temp_data_is_expired,
validate_isd_hourly_temp_data_cache,
validate_isd_daily_temp_data_cache,
validate_gsod_daily_temp_data_cache,
validate_tmy3_hourly_temp_data_cache,
validate_cz2010_hourly_temp_data_cache,
serialize_isd_hourly_temp_data,
serialize_isd_daily_temp_data,
serialize_gsod_daily_temp_data,
serialize_tmy3_hourly_temp_data,
serialize_cz2010_hourly_temp_data,
deserialize_isd_hourly_temp_data,
deserialize_isd_daily_temp_data,
deserialize_gsod_daily_temp_data,
deserialize_tmy3_hourly_temp_data,
deserialize_cz2010_hourly_temp_data,
read_isd_hourly_temp_data_from_cache,
read_isd_daily_temp_data_from_cache,
read_gsod_daily_temp_data_from_cache,
read_tmy3_hourly_temp_data_from_cache,
read_cz2010_hourly_temp_data_from_cache,
write_isd_hourly_temp_data_to_cache,
write_isd_daily_temp_data_to_cache,
write_gsod_daily_temp_data_to_cache,
write_tmy3_hourly_temp_data_to_cache,
write_cz2010_hourly_temp_data_to_cache,
destroy_cached_isd_hourly_temp_data,
destroy_cached_isd_daily_temp_data,
destroy_cached_gsod_daily_temp_data,
destroy_cached_tmy3_hourly_temp_data,
destroy_cached_cz2010_hourly_temp_data,
load_isd_hourly_temp_data_cached_proxy,
load_isd_daily_temp_data_cached_proxy,
load_gsod_daily_temp_data_cached_proxy,
load_tmy3_hourly_temp_data_cached_proxy,
load_cz2010_hourly_temp_data_cached_proxy,
load_isd_hourly_temp_data,
load_isd_daily_temp_data,
load_gsod_daily_temp_data,
load_tmy3_hourly_temp_data,
load_cz2010_hourly_temp_data,
load_cached_isd_hourly_temp_data,
load_cached_isd_daily_temp_data,
load_cached_gsod_daily_temp_data,
load_cached_tmy3_hourly_temp_data,
load_cached_cz2010_hourly_temp_data,
)
from eeweather.exceptions import (
UnrecognizedUSAFIDError,
ISDDataNotAvailableError,
GSODDataNotAvailableError,
TMY3DataNotAvailableError,
CZ2010DataNotAvailableError,
NonUTCTimezoneInfoError,
)
from eeweather.testing import (
MockNOAAFTPConnectionProxy,
MockKeyValueStoreProxy,
mock_request_text_tmy3,
mock_request_text_cz2010,
)
@pytest.fixture
def monkeypatch_noaa_ftp(monkeypatch):
monkeypatch.setattr(
"eeweather.connections.noaa_ftp_connection_proxy", MockNOAAFTPConnectionProxy()
)
@pytest.fixture
def monkeypatch_tmy3_request(monkeypatch):
monkeypatch.setattr("eeweather.mockable.request_text", mock_request_text_tmy3)
@pytest.fixture
def monkeypatch_cz2010_request(monkeypatch):
monkeypatch.setattr("eeweather.mockable.request_text", mock_request_text_cz2010)
@pytest.fixture
def monkeypatch_key_value_store(monkeypatch):
key_value_store_proxy = MockKeyValueStoreProxy()
monkeypatch.setattr(
"eeweather.connections.key_value_store_proxy", key_value_store_proxy
)
return key_value_store_proxy.get_store()
def test_get_isd_station_metadata():
assert get_isd_station_metadata("722874") == {
"ba_climate_zone": "Hot-Dry",
"ca_climate_zone": "CA_08",
"elevation": "+0054.6",
"icao_code": "KCQT",
"iecc_climate_zone": "3",
"iecc_moisture_regime": "B",
"latitude": "+34.024",
"longitude": "-118.291",
"name": "DOWNTOWN L.A./USC CAMPUS",
"quality": "high",
"recent_wban_id": "93134",
"state": "CA",
"usaf_id": "722874",
"wban_ids": "93134",
}
def test_isd_station_no_load_metadata():
station = ISDStation("722880", load_metadata=False)
assert station.usaf_id == "722880"
assert station.iecc_climate_zone is None
assert station.iecc_moisture_regime is None
assert station.ba_climate_zone is None
assert station.ca_climate_zone is None
assert station.elevation is None
assert station.latitude is None
assert station.longitude is None
assert station.coords is None
assert station.name is None
assert station.quality is None
assert station.wban_ids is None
assert station.recent_wban_id is None
assert station.climate_zones == {}
assert str(station) == "722880"
assert repr(station) == "ISDStation('722880')"
def test_isd_station_no_load_metadata_invalid():
with pytest.raises(UnrecognizedUSAFIDError):
station = ISDStation("FAKE", load_metadata=False)
def test_isd_station_with_load_metadata():
station = ISDStation("722880", load_metadata=True)
assert station.usaf_id == "722880"
assert station.iecc_climate_zone == "3"
assert station.iecc_moisture_regime == "B"
assert station.ba_climate_zone == "Hot-Dry"
assert station.ca_climate_zone == "CA_09"
assert station.elevation == 236.2
assert station.icao_code == "KBUR"
assert station.latitude == 34.201
assert station.longitude == -118.358
assert station.coords == (34.201, -118.358)
assert station.name == "<NAME>"
assert station.quality == "high"
assert station.wban_ids == ["23152", "99999"]
assert station.recent_wban_id == "23152"
assert station.climate_zones == {
"ba_climate_zone": "Hot-Dry",
"ca_climate_zone": "CA_09",
"iecc_climate_zone": "3",
"iecc_moisture_regime": "B",
}
def test_isd_station_json():
station = ISDStation("722880", load_metadata=True)
assert station.json() == {
"elevation": 236.2,
"icao_code": "KBUR",
"latitude": 34.201,
"longitude": -118.358,
"name": "<NAME>",
"quality": "high",
"recent_wban_id": "23152",
"wban_ids": ["23152", "99999"],
"climate_zones": {
"ba_climate_zone": "Hot-Dry",
"ca_climate_zone": "CA_09",
"iecc_climate_zone": "3",
"iecc_moisture_regime": "B",
},
}
def test_isd_station_unrecognized_usaf_id():
with pytest.raises(UnrecognizedUSAFIDError):
station = ISDStation("FAKE", load_metadata=True)
def test_get_isd_filenames_bad_usaf_id():
with pytest.raises(UnrecognizedUSAFIDError) as excinfo:
get_isd_filenames("000000", 2007)
assert excinfo.value.value == "000000"
def test_get_isd_filenames_single_year(snapshot):
filenames = get_isd_filenames("722860", 2007)
snapshot.assert_match(filenames, "filenames")
def test_get_isd_filenames_multiple_year(snapshot):
filenames = get_isd_filenames("722860")
snapshot.assert_match(filenames, "filenames")
def test_get_isd_filenames_future_year():
filenames = get_isd_filenames("722860", 2050)
assert filenames == ["/pub/data/noaa/2050/722860-23119-2050.gz"]
def test_get_isd_filenames_with_host():
filenames = get_isd_filenames("722860", 2017, with_host=True)
assert filenames == [
"ftp://ftp.ncdc.noaa.gov/pub/data/noaa/2017/722860-23119-2017.gz"
]
def test_isd_station_get_isd_filenames(snapshot):
station = ISDStation("722860")
filenames = station.get_isd_filenames()
snapshot.assert_match(filenames, "filenames")
def test_isd_station_get_isd_filenames_with_year(snapshot):
station = ISDStation("722860")
filenames = station.get_isd_filenames(2007)
snapshot.assert_match(filenames, "filenames")
def test_isd_station_get_isd_filenames_with_host():
station = ISDStation("722860")
filenames = station.get_isd_filenames(2017, with_host=True)
assert filenames == [
"ftp://ftp.ncdc.noaa.gov/pub/data/noaa/2017/722860-23119-2017.gz"
]
def test_get_gsod_filenames_bad_usaf_id():
with pytest.raises(UnrecognizedUSAFIDError) as excinfo:
get_gsod_filenames("000000", 2007)
assert excinfo.value.value == "000000"
def test_get_gsod_filenames_single_year(snapshot):
filenames = get_gsod_filenames("722860", 2007)
snapshot.assert_match(filenames, "filenames")
def test_get_gsod_filenames_multiple_year(snapshot):
filenames = get_gsod_filenames("722860")
snapshot.assert_match(filenames, "filenames")
def test_get_gsod_filenames_future_year():
filenames = get_gsod_filenames("722860", 2050)
assert filenames == ["/pub/data/gsod/2050/722860-23119-2050.op.gz"]
def test_get_gsod_filenames_with_host():
filenames = get_gsod_filenames("722860", 2017, with_host=True)
assert filenames == [
"ftp://ftp.ncdc.noaa.gov/pub/data/gsod/2017/722860-23119-2017.op.gz"
]
def test_isd_station_get_gsod_filenames(snapshot):
station = ISDStation("722860")
filenames = station.get_gsod_filenames()
snapshot.assert_match(filenames, "filenames")
def test_isd_station_get_gsod_filenames_with_year(snapshot):
station = ISDStation("722860")
filenames = station.get_gsod_filenames(2007)
snapshot.assert_match(filenames, "filenames")
def test_isd_station_get_gsod_filenames_with_host():
station = ISDStation("722860")
filenames = station.get_gsod_filenames(2017, with_host=True)
assert filenames == [
"ftp://ftp.ncdc.noaa.gov/pub/data/gsod/2017/722860-23119-2017.op.gz"
]
def test_get_isd_file_metadata():
assert get_isd_file_metadata("722874") == [
{"usaf_id": "722874", "wban_id": "93134", "year": "2006"},
{"usaf_id": "722874", "wban_id": "93134", "year": "2007"},
{"usaf_id": "722874", "wban_id": "93134", "year": "2008"},
{"usaf_id": "722874", "wban_id": "93134", "year": "2009"},
{"usaf_id": "722874", "wban_id": "93134", "year": "2010"},
{"usaf_id": "722874", "wban_id": "93134", "year": "2011"},
{"usaf_id": "722874", "wban_id": "93134", "year": "2012"},
{"usaf_id": "722874", "wban_id": "93134", "year": "2013"},
{"usaf_id": "722874", "wban_id": "93134", "year": "2014"},
{"usaf_id": "722874", "wban_id": "93134", "year": "2015"},
{"usaf_id": "722874", "wban_id": "93134", "year": "2016"},
{"usaf_id": "722874", "wban_id": "93134", "year": "2017"},
{"usaf_id": "722874", "wban_id": "93134", "year": "2018"},
]
with pytest.raises(UnrecognizedUSAFIDError) as excinfo:
get_isd_file_metadata("000000")
assert excinfo.value.value == "000000"
def test_isd_station_get_isd_file_metadata():
station = ISDStation("722874")
assert station.get_isd_file_metadata() == [
{"usaf_id": "722874", "wban_id": "93134", "year": "2006"},
{"usaf_id": "722874", "wban_id": "93134", "year": "2007"},
{"usaf_id": "722874", "wban_id": "93134", "year": "2008"},
{"usaf_id": "722874", "wban_id": "93134", "year": "2009"},
{"usaf_id": "722874", "wban_id": "93134", "year": "2010"},
{"usaf_id": "722874", "wban_id": "93134", "year": "2011"},
{"usaf_id": "722874", "wban_id": "93134", "year": "2012"},
{"usaf_id": "722874", "wban_id": "93134", "year": "2013"},
{"usaf_id": "722874", "wban_id": "93134", "year": "2014"},
{"usaf_id": "722874", "wban_id": "93134", "year": "2015"},
{"usaf_id": "722874", "wban_id": "93134", "year": "2016"},
{"usaf_id": "722874", "wban_id": "93134", "year": "2017"},
{"usaf_id": "722874", "wban_id": "93134", "year": "2018"},
]
# fetch raw
def test_fetch_isd_raw_temp_data(monkeypatch_noaa_ftp):
data = fetch_isd_raw_temp_data("722874", 2007)
assert round(data.sum()) == 185945
assert data.shape == (11094,)
def test_fetch_gsod_raw_temp_data(monkeypatch_noaa_ftp):
data = fetch_gsod_raw_temp_data("722874", 2007)
assert data.sum() == 6509.5
assert data.shape == (365,)
# station fetch raw
def test_isd_station_fetch_isd_raw_temp_data(monkeypatch_noaa_ftp):
station = ISDStation("722874")
data = station.fetch_isd_raw_temp_data(2007)
assert round(data.sum()) == 185945
assert data.shape == (11094,)
def test_isd_station_fetch_gsod_raw_temp_data(monkeypatch_noaa_ftp):
station = ISDStation("722874")
data = station.fetch_gsod_raw_temp_data(2007)
assert data.sum() == 6509.5
assert data.shape == (365,)
# fetch raw invalid station
def test_fetch_isd_raw_temp_data_invalid_station():
with pytest.raises(UnrecognizedUSAFIDError):
fetch_isd_raw_temp_data("INVALID", 2007)
def test_fetch_gsod_raw_temp_data_invalid_station():
with pytest.raises(UnrecognizedUSAFIDError):
fetch_gsod_raw_temp_data("INVALID", 2007)
# fetch raw invalid year
def test_fetch_isd_raw_temp_data_invalid_year(monkeypatch_noaa_ftp):
with pytest.raises(ISDDataNotAvailableError):
fetch_isd_raw_temp_data("722874", 1800)
def test_fetch_gsod_raw_temp_data_invalid_year(monkeypatch_noaa_ftp):
with pytest.raises(GSODDataNotAvailableError):
fetch_gsod_raw_temp_data("722874", 1800)
# fetch file full of nans
def test_isd_station_fetch_isd_raw_temp_data_all_nan(monkeypatch_noaa_ftp):
station = ISDStation("994035")
data = station.fetch_isd_raw_temp_data(2013)
assert round(data.sum()) == 0
assert data.shape == (8611,)
# fetch
def test_fetch_isd_hourly_temp_data(monkeypatch_noaa_ftp):
data = fetch_isd_hourly_temp_data("722874", 2007)
assert data.sum() == 156160.0355
assert data.shape == (8760,)
def test_fetch_isd_daily_temp_data(monkeypatch_noaa_ftp):
data = fetch_isd_daily_temp_data("722874", 2007)
assert data.sum() == 6510.002260821784
assert data.shape == (365,)
def test_fetch_gsod_daily_temp_data(monkeypatch_noaa_ftp):
data = fetch_gsod_daily_temp_data("722874", 2007)
assert data.sum() == 6509.5
assert data.shape == (365,)
def test_fetch_tmy3_hourly_temp_data(monkeypatch_tmy3_request):
data = fetch_tmy3_hourly_temp_data("722880")
assert data.sum() == 156194.3
assert data.shape == (8760,)
def test_fetch_cz2010_hourly_temp_data(monkeypatch_cz2010_request):
data = fetch_cz2010_hourly_temp_data("722880")
assert data.sum() == 153430.90000000002
assert data.shape == (8760,)
# station fetch
def test_isd_station_fetch_isd_hourly_temp_data(monkeypatch_noaa_ftp):
station = ISDStation("722874")
data = station.fetch_isd_hourly_temp_data(2007)
assert data.sum() == 156160.0355
assert data.shape == (8760,)
def test_isd_station_fetch_isd_daily_temp_data(monkeypatch_noaa_ftp):
station = ISDStation("722874")
data = station.fetch_isd_daily_temp_data(2007)
assert data.sum() == 6510.002260821784
assert data.shape == (365,)
def test_isd_station_fetch_gsod_daily_temp_data(monkeypatch_noaa_ftp):
station = ISDStation("722874")
data = station.fetch_gsod_daily_temp_data(2007)
assert data.sum() == 6509.5
assert data.shape == (365,)
def test_tmy3_station_hourly_temp_data(monkeypatch_tmy3_request):
station = ISDStation("722880")
data = station.fetch_tmy3_hourly_temp_data()
assert data.sum() == 156194.3
assert data.shape == (8760,)
def test_cz2010_station_hourly_temp_data(monkeypatch_cz2010_request):
station = ISDStation("722880")
data = station.fetch_cz2010_hourly_temp_data()
assert data.sum() == 153430.90000000002
assert data.shape == (8760,)
# fetch invalid station
def test_fetch_isd_hourly_temp_data_invalid():
with pytest.raises(UnrecognizedUSAFIDError):
fetch_isd_hourly_temp_data("INVALID", 2007)
def test_fetch_isd_daily_temp_data_invalid():
with pytest.raises(UnrecognizedUSAFIDError):
fetch_isd_daily_temp_data("INVALID", 2007)
def test_fetch_gsod_daily_temp_data_invalid():
with pytest.raises(UnrecognizedUSAFIDError):
fetch_gsod_daily_temp_data("INVALID", 2007)
def test_fetch_tmy3_hourly_temp_data_invalid():
with pytest.raises(TMY3DataNotAvailableError):
fetch_tmy3_hourly_temp_data("INVALID")
def test_fetch_cz2010_hourly_temp_data_invalid():
with pytest.raises(CZ2010DataNotAvailableError):
fetch_cz2010_hourly_temp_data("INVALID")
def test_fetch_tmy3_hourly_temp_data_not_in_tmy3_list(monkeypatch_noaa_ftp):
data = fetch_isd_hourly_temp_data("722874", 2007)
assert data.sum() == 156160.0355
assert data.shape == (8760,)
with pytest.raises(TMY3DataNotAvailableError):
fetch_tmy3_hourly_temp_data("722874")
def test_fetch_cz2010_hourly_temp_data_not_in_cz2010_list(monkeypatch_cz2010_request):
data = fetch_cz2010_hourly_temp_data("722880")
assert data.sum() == 153430.90000000002
assert data.shape == (8760,)
with pytest.raises(CZ2010DataNotAvailableError):
fetch_cz2010_hourly_temp_data("725340")
# get cache key
def test_get_isd_hourly_temp_data_cache_key():
assert (
get_isd_hourly_temp_data_cache_key("722874", 2007) == "isd-hourly-722874-2007"
)
def test_get_isd_daily_temp_data_cache_key():
assert get_isd_daily_temp_data_cache_key("722874", 2007) == "isd-daily-722874-2007"
def test_get_gsod_daily_temp_data_cache_key():
assert (
get_gsod_daily_temp_data_cache_key("722874", 2007) == "gsod-daily-722874-2007"
)
def test_get_tmy3_hourly_temp_data_cache_key():
assert get_tmy3_hourly_temp_data_cache_key("722880") == "tmy3-hourly-722880"
def test_get_cz2010_hourly_temp_data_cache_key():
assert get_cz2010_hourly_temp_data_cache_key("722880") == "cz2010-hourly-722880"
# station get cache key
def test_isd_station_get_isd_hourly_temp_data_cache_key():
station = ISDStation("722874")
assert station.get_isd_hourly_temp_data_cache_key(2007) == "isd-hourly-722874-2007"
def test_isd_station_get_isd_daily_temp_data_cache_key():
station = ISDStation("722874")
assert station.get_isd_daily_temp_data_cache_key(2007) == "isd-daily-722874-2007"
def test_isd_station_get_gsod_daily_temp_data_cache_key():
station = ISDStation("722874")
assert station.get_gsod_daily_temp_data_cache_key(2007) == "gsod-daily-722874-2007"
def test_tmy3_station_get_isd_hourly_temp_data_cache_key():
station = ISDStation("722880")
assert station.get_tmy3_hourly_temp_data_cache_key() == "tmy3-hourly-722880"
def test_cz2010_station_get_isd_hourly_temp_data_cache_key():
station = ISDStation("722880")
assert station.get_cz2010_hourly_temp_data_cache_key() == "cz2010-hourly-722880"
# cache expired empty
def test_cached_isd_hourly_temp_data_is_expired_empty(monkeypatch_key_value_store):
assert cached_isd_hourly_temp_data_is_expired("722874", 2007) is True
def test_cached_isd_daily_temp_data_is_expired_empty(monkeypatch_key_value_store):
assert cached_isd_daily_temp_data_is_expired("722874", 2007) is True
def test_cached_gsod_daily_temp_data_is_expired_empty(monkeypatch_key_value_store):
assert cached_gsod_daily_temp_data_is_expired("722874", 2007) is True
# station cache expired empty
def test_isd_station_cached_isd_hourly_temp_data_is_expired_empty(
monkeypatch_key_value_store
):
station = ISDStation("722874")
assert station.cached_isd_hourly_temp_data_is_expired(2007) is True
def test_isd_station_cached_isd_daily_temp_data_is_expired_empty(
monkeypatch_key_value_store
):
station = ISDStation("722874")
assert station.cached_isd_daily_temp_data_is_expired(2007) is True
def test_isd_station_cached_gsod_daily_temp_data_is_expired_empty(
monkeypatch_key_value_store
):
station = ISDStation("722874")
assert station.cached_gsod_daily_temp_data_is_expired(2007) is True
# cache expired false
def test_cached_isd_hourly_temp_data_is_expired_false(
monkeypatch_noaa_ftp, monkeypatch_key_value_store
):
load_isd_hourly_temp_data_cached_proxy("722874", 2007)
assert cached_isd_hourly_temp_data_is_expired("722874", 2007) is False
def test_cached_isd_daily_temp_data_is_expired_false(
monkeypatch_noaa_ftp, monkeypatch_key_value_store
):
load_isd_daily_temp_data_cached_proxy("722874", 2007)
assert cached_isd_daily_temp_data_is_expired("722874", 2007) is False
def test_cached_gsod_daily_temp_data_is_expired_false(
monkeypatch_noaa_ftp, monkeypatch_key_value_store
):
load_gsod_daily_temp_data_cached_proxy("722874", 2007)
assert cached_gsod_daily_temp_data_is_expired("722874", 2007) is False
# cache expired true
def test_cached_isd_hourly_temp_data_is_expired_true(
monkeypatch_noaa_ftp, monkeypatch_key_value_store
):
load_isd_hourly_temp_data_cached_proxy("722874", 2007)
# manually expire key value item
key = get_isd_hourly_temp_data_cache_key("722874", 2007)
store = monkeypatch_key_value_store
store.items.update().where(store.items.c.key == key).values(
updated=pytz.UTC.localize(datetime(2007, 3, 3))
).execute()
assert cached_isd_hourly_temp_data_is_expired("722874", 2007) is True
def test_cached_isd_daily_temp_data_is_expired_true(
monkeypatch_noaa_ftp, monkeypatch_key_value_store
):
load_isd_daily_temp_data_cached_proxy("722874", 2007)
# manually expire key value item
key = get_isd_daily_temp_data_cache_key("722874", 2007)
store = monkeypatch_key_value_store
store.items.update().where(store.items.c.key == key).values(
updated=pytz.UTC.localize(datetime(2007, 3, 3))
).execute()
assert cached_isd_daily_temp_data_is_expired("722874", 2007) is True
def test_cached_gsod_daily_temp_data_is_expired_true(
monkeypatch_noaa_ftp, monkeypatch_key_value_store
):
load_gsod_daily_temp_data_cached_proxy("722874", 2007)
# manually expire key value item
key = get_gsod_daily_temp_data_cache_key("722874", 2007)
store = monkeypatch_key_value_store
store.items.update().where(store.items.c.key == key).values(
updated=pytz.UTC.localize(datetime(2007, 3, 3))
).execute()
assert cached_gsod_daily_temp_data_is_expired("722874", 2007) is True
# validate cache empty
def test_validate_isd_hourly_temp_data_cache_empty(monkeypatch_key_value_store):
assert validate_isd_hourly_temp_data_cache("722874", 2007) is False
def test_validate_isd_daily_temp_data_cache_empty(monkeypatch_key_value_store):
assert validate_isd_daily_temp_data_cache("722874", 2007) is False
def test_validate_gsod_daily_temp_data_cache_empty(monkeypatch_key_value_store):
assert validate_gsod_daily_temp_data_cache("722874", 2007) is False
def test_validate_tmy3_hourly_temp_data_cache_empty(monkeypatch_key_value_store):
assert validate_tmy3_hourly_temp_data_cache("722880") is False
def test_validate_cz2010_hourly_temp_data_cache_empty(monkeypatch_key_value_store):
assert validate_cz2010_hourly_temp_data_cache("722880") is False
# station validate cache empty
def test_isd_station_validate_isd_hourly_temp_data_cache_empty(
monkeypatch_key_value_store
):
station = ISDStation("722874")
assert station.validate_isd_hourly_temp_data_cache(2007) is False
def test_isd_station_validate_isd_daily_temp_data_cache_empty(
monkeypatch_key_value_store
):
station = ISDStation("722874")
assert station.validate_isd_daily_temp_data_cache(2007) is False
def test_isd_station_validate_gsod_daily_temp_data_cache_empty(
monkeypatch_key_value_store
):
station = ISDStation("722874")
assert station.validate_gsod_daily_temp_data_cache(2007) is False
def test_isd_station_validate_tmy3_hourly_temp_data_cache_empty(
monkeypatch_key_value_store
):
station = ISDStation("722880")
assert station.validate_tmy3_hourly_temp_data_cache() is False
def test_isd_station_validate_cz2010_hourly_temp_data_cache_empty(
monkeypatch_key_value_store
):
station = ISDStation("722880")
assert station.validate_cz2010_hourly_temp_data_cache() is False
# error on non-existent when relying on cache
def test_raise_on_missing_isd_hourly_temp_data_cache_data_no_web_fetch(
monkeypatch_noaa_ftp, monkeypatch_key_value_store
):
with pytest.raises(ISDDataNotAvailableError):
load_isd_hourly_temp_data_cached_proxy("722874", 1907, fetch_from_web=False)
def test_raise_on_missing_isd_daily_temp_data_cache_data_no_web_fetch(
monkeypatch_noaa_ftp, monkeypatch_key_value_store
):
with pytest.raises(ISDDataNotAvailableError):
load_isd_daily_temp_data_cached_proxy("722874", 1907, fetch_from_web=False)
def test_raise_on_missing_gsod_daily_temp_data_cache_data_no_web_fetch(
monkeypatch_noaa_ftp, monkeypatch_key_value_store
):
with pytest.raises(GSODDataNotAvailableError):
load_gsod_daily_temp_data_cached_proxy("722874", 1907, fetch_from_web=False)
def test_raise_on_missing_tmy3_hourly_temp_data_cache_data_no_web_fetch(
monkeypatch_noaa_ftp, monkeypatch_key_value_store
):
with pytest.raises(TMY3DataNotAvailableError):
load_tmy3_hourly_temp_data_cached_proxy("722874", fetch_from_web=False)
def test_raise_on_missing_cz2010_hourly_temp_data_cache_data_no_web_fetch(
monkeypatch_noaa_ftp, monkeypatch_key_value_store
):
with pytest.raises(CZ2010DataNotAvailableError):
load_cz2010_hourly_temp_data_cached_proxy("722874", fetch_from_web=False)
# validate updated recently
def test_validate_isd_hourly_temp_data_cache_updated_recently(
monkeypatch_noaa_ftp, monkeypatch_key_value_store
):
load_isd_hourly_temp_data_cached_proxy("722874", 2007)
assert validate_isd_hourly_temp_data_cache("722874", 2007) is True
def test_validate_isd_daily_temp_data_cache_updated_recently(
monkeypatch_noaa_ftp, monkeypatch_key_value_store
):
load_isd_daily_temp_data_cached_proxy("722874", 2007)
assert validate_isd_daily_temp_data_cache("722874", 2007) is True
def test_validate_gsod_daily_temp_data_cache_updated_recently(
monkeypatch_noaa_ftp, monkeypatch_key_value_store
):
load_gsod_daily_temp_data_cached_proxy("722874", 2007)
assert validate_gsod_daily_temp_data_cache("722874", 2007) is True
def test_validate_tmy3_hourly_temp_data_cache_updated_recently(
monkeypatch_tmy3_request, monkeypatch_key_value_store
):
load_tmy3_hourly_temp_data_cached_proxy("722880")
assert validate_tmy3_hourly_temp_data_cache("722880") is True
def test_validate_cz2010_hourly_temp_data_cache_updated_recently(
monkeypatch_cz2010_request, monkeypatch_key_value_store
):
load_cz2010_hourly_temp_data_cached_proxy("722880")
assert validate_cz2010_hourly_temp_data_cache("722880") is True
# validate expired
def test_validate_isd_hourly_temp_data_cache_expired(
monkeypatch_noaa_ftp, monkeypatch_key_value_store
):
load_isd_hourly_temp_data_cached_proxy("722874", 2007)
# manually expire key value item
key = get_isd_hourly_temp_data_cache_key("722874", 2007)
store = monkeypatch_key_value_store
store.items.update().where(store.items.c.key == key).values(
updated=pytz.UTC.localize(datetime(2007, 3, 3))
).execute()
assert validate_isd_hourly_temp_data_cache("722874", 2007) is False
def test_validate_isd_daily_temp_data_cache_expired(
monkeypatch_noaa_ftp, monkeypatch_key_value_store
):
load_isd_daily_temp_data_cached_proxy("722874", 2007)
# manually expire key value item
key = get_isd_daily_temp_data_cache_key("722874", 2007)
store = monkeypatch_key_value_store
store.items.update().where(store.items.c.key == key).values(
updated=pytz.UTC.localize(datetime(2007, 3, 3))
).execute()
assert validate_isd_daily_temp_data_cache("722874", 2007) is False
def test_validate_gsod_daily_temp_data_cache_expired(
monkeypatch_noaa_ftp, monkeypatch_key_value_store
):
load_gsod_daily_temp_data_cached_proxy("722874", 2007)
# manually expire key value item
key = get_gsod_daily_temp_data_cache_key("722874", 2007)
store = monkeypatch_key_value_store
store.items.update().where(store.items.c.key == key).values(
updated=pytz.UTC.localize(datetime(2007, 3, 3))
).execute()
assert validate_gsod_daily_temp_data_cache("722874", 2007) is False
# serialize
def test_serialize_isd_hourly_temp_data():
ts = pd.Series([1], index=[pytz.UTC.localize(datetime(2017, 1, 1))])
assert serialize_isd_hourly_temp_data(ts) == [["2017010100", 1]]
def test_serialize_isd_daily_temp_data():
ts = pd.Series([1], index=[pytz.UTC.localize(datetime(2017, 1, 1))])
assert serialize_isd_daily_temp_data(ts) == [["20170101", 1]]
def test_serialize_gsod_daily_temp_data():
ts = pd.Series([1], index=[pytz.UTC.localize(datetime(2017, 1, 1))])
assert serialize_gsod_daily_temp_data(ts) == [["20170101", 1]]
def test_serialize_tmy3_hourly_temp_data():
ts = pd.Series([1], index=[pytz.UTC.localize(datetime(2017, 1, 1))])
assert serialize_tmy3_hourly_temp_data(ts) == [["2017010100", 1]]
def test_serialize_cz2010_hourly_temp_data():
ts = pd.Series([1], index=[pytz.UTC.localize(datetime(2017, 1, 1))])
assert serialize_cz2010_hourly_temp_data(ts) == [["2017010100", 1]]
# station serialize
def test_isd_station_serialize_isd_hourly_temp_data():
station = ISDStation("722874")
ts = pd.Series([1], index=[pytz.UTC.localize(datetime(2017, 1, 1))])
assert station.serialize_isd_hourly_temp_data(ts) == [["2017010100", 1]]
def test_isd_station_serialize_isd_daily_temp_data():
station = ISDStation("722874")
ts = pd.Series([1], index=[pytz.UTC.localize(datetime(2017, 1, 1))])
assert station.serialize_isd_daily_temp_data(ts) == [["20170101", 1]]
def test_isd_station_serialize_gsod_daily_temp_data():
station = ISDStation("722874")
ts = pd.Series([1], index=[pytz.UTC.localize(datetime(2017, 1, 1))])
assert station.serialize_gsod_daily_temp_data(ts) == [["20170101", 1]]
def test_isd_station_serialize_tmy3_hourly_temp_data():
station = ISDStation("722880")
ts = pd.Series([1], index=[pytz.UTC.localize(datetime(2017, 1, 1))])
assert station.serialize_tmy3_hourly_temp_data(ts) == [["2017010100", 1]]
def test_isd_station_serialize_cz2010_hourly_temp_data():
station = ISDStation("722880")
ts = pd.Series([1], index=[pytz.UTC.localize(datetime(2017, 1, 1))])
assert station.serialize_cz2010_hourly_temp_data(ts) == [["2017010100", 1]]
# deserialize
def test_deserialize_isd_hourly_temp_data():
ts = deserialize_isd_hourly_temp_data([["2017010100", 1]])
assert ts.sum() == 1
assert ts.index.freq.name == "H"
def test_deserialize_isd_daily_temp_data():
ts = deserialize_isd_daily_temp_data([["20170101", 1]])
assert ts.sum() == 1
assert ts.index.freq.name == "D"
def test_deserialize_gsod_daily_temp_data():
ts = deserialize_gsod_daily_temp_data([["20170101", 1]])
assert ts.sum() == 1
assert ts.index.freq.name == "D"
def test_deserialize_tmy3_hourly_temp_data():
ts = deserialize_tmy3_hourly_temp_data([["2017010100", 1]])
assert ts.sum() == 1
assert ts.index.freq.name == "H"
def test_deserialize_cz2010_hourly_temp_data():
ts = deserialize_cz2010_hourly_temp_data([["2017010100", 1]])
assert ts.sum() == 1
assert ts.index.freq.name == "H"
# station deserialize
def test_isd_station_deserialize_isd_hourly_temp_data():
station = ISDStation("722874")
ts = station.deserialize_isd_hourly_temp_data([["2017010100", 1]])
assert ts.sum() == 1
assert ts.index.freq.name == "H"
def test_isd_station_deserialize_isd_daily_temp_data():
station = ISDStation("722874")
ts = station.deserialize_isd_daily_temp_data([["20170101", 1]])
assert ts.sum() == 1
assert ts.index.freq.name == "D"
def test_isd_station_deserialize_gsod_daily_temp_data():
station = ISDStation("722874")
ts = station.deserialize_gsod_daily_temp_data([["20170101", 1]])
assert ts.sum() == 1
assert ts.index.freq.name == "D"
def test_isd_station_deserialize_tmy3_hourly_temp_data():
station = ISDStation("722880")
ts = station.deserialize_tmy3_hourly_temp_data([["2017010100", 1]])
assert ts.sum() == 1
assert ts.index.freq.name == "H"
def test_isd_station_deserialize_cz2010_hourly_temp_data():
station = ISDStation("722880")
ts = station.deserialize_cz2010_hourly_temp_data([["2017010100", 1]])
assert ts.sum() == 1
assert ts.index.freq.name == "H"
# write read destroy
def test_write_read_destroy_isd_hourly_temp_data_to_from_cache(
monkeypatch_key_value_store
):
store = monkeypatch_key_value_store
key = get_isd_hourly_temp_data_cache_key("123456", 1990)
assert store.key_exists(key) is False
ts1 = pd.Series([1], index=[pytz.UTC.localize(datetime(1990, 1, 1))])
write_isd_hourly_temp_data_to_cache("123456", 1990, ts1)
assert store.key_exists(key) is True
ts2 = read_isd_hourly_temp_data_from_cache("123456", 1990)
assert store.key_exists(key) is True
assert int(ts1.sum()) == int(ts2.sum())
assert ts1.shape == ts2.shape
destroy_cached_isd_hourly_temp_data("123456", 1990)
assert store.key_exists(key) is False
def test_write_read_destroy_isd_daily_temp_data_to_from_cache(
monkeypatch_key_value_store
):
store = monkeypatch_key_value_store
key = get_isd_daily_temp_data_cache_key("123456", 1990)
assert store.key_exists(key) is False
ts1 = pd.Series([1], index=[pytz.UTC.localize(datetime(1990, 1, 1))])
write_isd_daily_temp_data_to_cache("123456", 1990, ts1)
assert store.key_exists(key) is True
ts2 = read_isd_daily_temp_data_from_cache("123456", 1990)
assert store.key_exists(key) is True
assert int(ts1.sum()) == int(ts2.sum())
assert ts1.shape == ts2.shape
destroy_cached_isd_daily_temp_data("123456", 1990)
assert store.key_exists(key) is False
def test_write_read_destroy_gsod_daily_temp_data_to_from_cache(
monkeypatch_key_value_store
):
store = monkeypatch_key_value_store
key = get_gsod_daily_temp_data_cache_key("123456", 1990)
assert store.key_exists(key) is False
ts1 = pd.Series([1], index=[pytz.UTC.localize(datetime(1990, 1, 1))])
write_gsod_daily_temp_data_to_cache("123456", 1990, ts1)
assert store.key_exists(key) is True
ts2 = read_gsod_daily_temp_data_from_cache("123456", 1990)
assert store.key_exists(key) is True
assert int(ts1.sum()) == int(ts2.sum())
assert ts1.shape == ts2.shape
destroy_cached_gsod_daily_temp_data("123456", 1990)
assert store.key_exists(key) is False
def test_write_read_destroy_tmy3_hourly_temp_data_to_from_cache(
monkeypatch_key_value_store
):
store = monkeypatch_key_value_store
key = get_tmy3_hourly_temp_data_cache_key("123456")
assert store.key_exists(key) is False
ts1 = pd.Series([1], index=[pytz.UTC.localize(datetime(1990, 1, 1))])
write_tmy3_hourly_temp_data_to_cache("123456", ts1)
assert store.key_exists(key) is True
ts2 = read_tmy3_hourly_temp_data_from_cache("123456")
assert store.key_exists(key) is True
assert int(ts1.sum()) == int(ts2.sum())
assert ts1.shape == ts2.shape
destroy_cached_tmy3_hourly_temp_data("123456")
assert store.key_exists(key) is False
def test_write_read_destroy_cz2010_hourly_temp_data_to_from_cache(
monkeypatch_key_value_store
):
store = monkeypatch_key_value_store
key = get_cz2010_hourly_temp_data_cache_key("123456")
assert store.key_exists(key) is False
ts1 = pd.Series([1], index=[pytz.UTC.localize(datetime(1990, 1, 1))])
write_cz2010_hourly_temp_data_to_cache("123456", ts1)
assert store.key_exists(key) is True
ts2 = read_cz2010_hourly_temp_data_from_cache("123456")
assert store.key_exists(key) is True
assert int(ts1.sum()) == int(ts2.sum())
assert ts1.shape == ts2.shape
destroy_cached_cz2010_hourly_temp_data("123456")
assert store.key_exists(key) is False
# station write read destroy
def test_isd_station_write_read_destroy_isd_hourly_temp_data_to_from_cache(
monkeypatch_key_value_store
):
station = ISDStation("722874")
store = monkeypatch_key_value_store
key = station.get_isd_hourly_temp_data_cache_key(1990)
assert store.key_exists(key) is False
ts1 = pd.Series([1], index=[pytz.UTC.localize(datetime(1990, 1, 1))])
station.write_isd_hourly_temp_data_to_cache(1990, ts1)
assert store.key_exists(key) is True
ts2 = station.read_isd_hourly_temp_data_from_cache(1990)
assert store.key_exists(key) is True
assert int(ts1.sum()) == int(ts2.sum())
assert ts1.shape == ts2.shape
station.destroy_cached_isd_hourly_temp_data(1990)
assert store.key_exists(key) is False
def test_isd_station_write_read_destroy_isd_daily_temp_data_to_from_cache(
monkeypatch_key_value_store
):
station = ISDStation("722874")
store = monkeypatch_key_value_store
key = station.get_isd_daily_temp_data_cache_key(1990)
assert store.key_exists(key) is False
ts1 = pd.Series([1], index=[pytz.UTC.localize(datetime(1990, 1, 1))])
station.write_isd_daily_temp_data_to_cache(1990, ts1)
assert store.key_exists(key) is True
ts2 = station.read_isd_daily_temp_data_from_cache(1990)
assert store.key_exists(key) is True
assert int(ts1.sum()) == int(ts2.sum())
assert ts1.shape == ts2.shape
station.destroy_cached_isd_daily_temp_data(1990)
assert store.key_exists(key) is False
def test_isd_station_write_read_destroy_gsod_daily_temp_data_to_from_cache(
monkeypatch_key_value_store
):
station = ISDStation("722874")
store = monkeypatch_key_value_store
key = station.get_gsod_daily_temp_data_cache_key(1990)
assert store.key_exists(key) is False
ts1 = pd.Series([1], index=[pytz.UTC.localize(datetime(1990, 1, 1))])
station.write_gsod_daily_temp_data_to_cache(1990, ts1)
assert store.key_exists(key) is True
ts2 = station.read_gsod_daily_temp_data_from_cache(1990)
assert store.key_exists(key) is True
assert int(ts1.sum()) == int(ts2.sum())
assert ts1.shape == ts2.shape
station.destroy_cached_gsod_daily_temp_data(1990)
assert store.key_exists(key) is False
def test_isd_station_write_read_destroy_tmy3_hourly_temp_data_to_from_cache(
monkeypatch_key_value_store
):
station = ISDStation("722880")
store = monkeypatch_key_value_store
key = station.get_tmy3_hourly_temp_data_cache_key()
assert store.key_exists(key) is False
ts1 = pd.Series([1], index=[pytz.UTC.localize(datetime(1990, 1, 1))])
station.write_tmy3_hourly_temp_data_to_cache(ts1)
assert store.key_exists(key) is True
ts2 = station.read_tmy3_hourly_temp_data_from_cache()
assert store.key_exists(key) is True
assert int(ts1.sum()) == int(ts2.sum())
assert ts1.shape == ts2.shape
station.destroy_cached_tmy3_hourly_temp_data()
assert store.key_exists(key) is False
def test_isd_station_write_read_destroy_cz2010_hourly_temp_data_to_from_cache(
monkeypatch_key_value_store
):
station = ISDStation("722880")
store = monkeypatch_key_value_store
key = station.get_cz2010_hourly_temp_data_cache_key()
assert store.key_exists(key) is False
ts1 = pd.Series([1], index=[pytz.UTC.localize(datetime(1990, 1, 1))])
station.write_cz2010_hourly_temp_data_to_cache(ts1)
assert store.key_exists(key) is True
ts2 = station.read_cz2010_hourly_temp_data_from_cache()
assert store.key_exists(key) is True
assert int(ts1.sum()) == int(ts2.sum())
assert ts1.shape == ts2.shape
station.destroy_cached_cz2010_hourly_temp_data()
assert store.key_exists(key) is False
# load cached proxy
def test_load_isd_hourly_temp_data_cached_proxy(
monkeypatch_noaa_ftp, monkeypatch_key_value_store
):
# doesn't yet guarantee that all code paths are taken,
# except that coverage picks it up either here or elsewhere
ts1 = load_isd_hourly_temp_data_cached_proxy("722874", 2007)
ts2 = load_isd_hourly_temp_data_cached_proxy("722874", 2007)
assert int(ts1.sum()) == int(ts2.sum())
assert ts1.shape == ts2.shape
def test_load_isd_daily_temp_data_cached_proxy(
monkeypatch_noaa_ftp, monkeypatch_key_value_store
):
# doesn't yet guarantee that all code paths are taken,
# except that coverage picks it up either here or elsewhere
ts1 = load_isd_daily_temp_data_cached_proxy("722874", 2007)
ts2 = load_isd_daily_temp_data_cached_proxy("722874", 2007)
assert int(ts1.sum()) == int(ts2.sum())
assert ts1.shape == ts2.shape
def test_load_gsod_daily_temp_data_cached_proxy(
monkeypatch_noaa_ftp, monkeypatch_key_value_store
):
# doesn't yet guarantee that all code paths are taken,
# except that coverage picks it up either here or elsewhere
ts1 = load_gsod_daily_temp_data_cached_proxy("722874", 2007)
ts2 = load_gsod_daily_temp_data_cached_proxy("722874", 2007)
assert int(ts1.sum()) == int(ts2.sum())
assert ts1.shape == ts2.shape
def test_load_tmy3_hourly_temp_data_cached_proxy(
monkeypatch_tmy3_request, monkeypatch_key_value_store
):
# doesn't yet guarantee that all code paths are taken,
# except that coverage picks it up either here or elsewhere
ts1 = load_tmy3_hourly_temp_data_cached_proxy("722880", 2007)
ts2 = load_tmy3_hourly_temp_data_cached_proxy("722880", 2007)
assert int(ts1.sum()) == int(ts2.sum())
assert ts1.shape == ts2.shape
def test_load_cz2010_hourly_temp_data_cached_proxy(
monkeypatch_cz2010_request, monkeypatch_key_value_store
):
# doesn't yet guarantee that all code paths are taken,
# except that coverage picks it up either here or elsewhere
ts1 = load_cz2010_hourly_temp_data_cached_proxy("722880", 2007)
ts2 = load_cz2010_hourly_temp_data_cached_proxy("722880", 2007)
assert int(ts1.sum()) == int(ts2.sum())
assert ts1.shape == ts2.shape
# station load cached proxy
def test_isd_station_load_isd_hourly_temp_data_cached_proxy(
monkeypatch_noaa_ftp, monkeypatch_key_value_store
):
station = ISDStation("722874")
# doesn't yet guarantee that all code paths are taken,
# except that coverage picks it up either here or elsewhere
ts1 = station.load_isd_hourly_temp_data_cached_proxy(2007)
ts2 = station.load_isd_hourly_temp_data_cached_proxy(2007)
assert int(ts1.sum()) == int(ts2.sum())
assert ts1.shape == ts2.shape
def test_isd_station_load_isd_daily_temp_data_cached_proxy(
monkeypatch_noaa_ftp, monkeypatch_key_value_store
):
station = ISDStation("722874")
# doesn't yet guarantee that all code paths are taken,
# except that coverage picks it up either here or elsewhere
ts1 = station.load_isd_daily_temp_data_cached_proxy(2007)
ts2 = station.load_isd_daily_temp_data_cached_proxy(2007)
assert int(ts1.sum()) == int(ts2.sum())
assert ts1.shape == ts2.shape
def test_isd_station_load_gsod_daily_temp_data_cached_proxy(
monkeypatch_noaa_ftp, monkeypatch_key_value_store
):
station = ISDStation("722874")
# doesn't yet guarantee that all code paths are taken,
# except that coverage picks it up either here or elsewhere
ts1 = station.load_gsod_daily_temp_data_cached_proxy(2007)
ts2 = station.load_gsod_daily_temp_data_cached_proxy(2007)
assert int(ts1.sum()) == int(ts2.sum())
assert ts1.shape == ts2.shape
def test_isd_station_load_tmy3_hourly_temp_data_cached_proxy(
monkeypatch_tmy3_request, monkeypatch_key_value_store
):
station = ISDStation("722880")
# doesn't yet guarantee that all code paths are taken,
# except that coverage picks it up either here or elsewhere
ts1 = station.load_tmy3_hourly_temp_data_cached_proxy()
ts2 = station.load_tmy3_hourly_temp_data_cached_proxy()
assert int(ts1.sum()) == int(ts2.sum())
assert ts1.shape == ts2.shape
def test_isd_station_load_cz2010_hourly_temp_data_cached_proxy(
monkeypatch_cz2010_request, monkeypatch_key_value_store
):
station = ISDStation("722880")
# doesn't yet guarantee that all code paths are taken,
# except that coverage picks it up either here or elsewhere
ts1 = station.load_cz2010_hourly_temp_data_cached_proxy()
ts2 = station.load_cz2010_hourly_temp_data_cached_proxy()
assert int(ts1.sum()) == int(ts2.sum())
assert ts1.shape == ts2.shape
# load data between dates
def test_load_isd_hourly_temp_data(monkeypatch_noaa_ftp, monkeypatch_key_value_store):
start = datetime(2006, 1, 3, tzinfo=pytz.UTC)
end = datetime(2007, 4, 3, tzinfo=pytz.UTC)
ts, warnings = load_isd_hourly_temp_data("722874", start, end)
assert ts.index[0] == start
assert pd.isnull(ts[0])
assert ts.index[-1] == end
assert pd.notnull(ts[-1])
def test_load_isd_hourly_temp_data_non_normalized_dates(
monkeypatch_noaa_ftp, monkeypatch_key_value_store
):
start = datetime(2006, 1, 3, 11, 12, 13, tzinfo=pytz.UTC)
end = datetime(2007, 4, 3, 12, 13, 14, tzinfo=pytz.UTC)
ts, warnings = load_isd_hourly_temp_data("722874", start, end)
assert ts.index[0] == datetime(2006, 1, 3, 12, tzinfo=pytz.UTC)
assert pd.isnull(ts[0])
assert ts.index[-1] == datetime(2007, 4, 3, 12, tzinfo=pytz.UTC)
assert pd.notnull(ts[-1])
def test_load_isd_daily_temp_data(monkeypatch_noaa_ftp, monkeypatch_key_value_store):
start = datetime(2006, 1, 3, tzinfo=pytz.UTC)
end = datetime(2007, 4, 3, tzinfo=pytz.UTC)
ts = load_isd_daily_temp_data("722874", start, end)
assert ts.index[0] == start
assert pd.isnull(ts[0])
assert ts.index[-1] == end
assert pd.notnull(ts[-1])
def test_load_isd_daily_temp_data_non_normalized_dates(
monkeypatch_noaa_ftp, monkeypatch_key_value_store
):
start = datetime(2006, 1, 3, 11, 12, 13, tzinfo=pytz.UTC)
end = datetime(2007, 4, 3, 12, 13, 14, tzinfo=pytz.UTC)
ts = load_isd_daily_temp_data("722874", start, end)
assert ts.index[0] == datetime(2006, 1, 4, tzinfo=pytz.UTC)
assert pd.isnull(ts[0])
assert ts.index[-1] == datetime(2007, 4, 3, tzinfo=pytz.UTC)
assert pd.notnull(ts[-1])
def test_load_gsod_daily_temp_data(monkeypatch_noaa_ftp, monkeypatch_key_value_store):
start = datetime(2006, 1, 3, tzinfo=pytz.UTC)
end = datetime(2007, 4, 3, tzinfo=pytz.UTC)
ts = load_gsod_daily_temp_data("722874", start, end)
assert ts.index[0] == start
assert pd.isnull(ts[0])
assert ts.index[-1] == end
assert pd.notnull(ts[-1])
def test_load_gsod_daily_temp_data_non_normalized_dates(
monkeypatch_noaa_ftp, monkeypatch_key_value_store
):
start = datetime(2006, 1, 3, 11, 12, 13, tzinfo=pytz.UTC)
end = datetime(2007, 4, 3, 12, 13, 14, tzinfo=pytz.UTC)
ts = load_gsod_daily_temp_data("722874", start, end)
assert ts.index[0] == datetime(2006, 1, 4, tzinfo=pytz.UTC)
assert pd.isnull(ts[0])
assert ts.index[-1] == datetime(2007, 4, 3, tzinfo=pytz.UTC)
assert pd.notnull(ts[-1])
def test_load_tmy3_hourly_temp_data(
monkeypatch_tmy3_request, monkeypatch_key_value_store
):
start = datetime(2006, 1, 3, tzinfo=pytz.UTC)
end = datetime(2007, 4, 3, tzinfo=pytz.UTC)
ts = load_tmy3_hourly_temp_data("722880", start, end)
assert ts.index[0] == start
assert pd.notnull(ts[0])
assert ts.index[-1] == end
assert | pd.notnull(ts[-1]) | pandas.notnull |
import numpy as np
import pandas as pd
from numba import njit
import pytest
import os
from collections import namedtuple
from itertools import product, combinations
from vectorbt import settings
from vectorbt.utils import checks, config, decorators, math, array, random, enum, data, params
from tests.utils import hash
seed = 42
# ############# config.py ############# #
class TestConfig:
def test_config(self):
conf = config.Config({'a': 0, 'b': {'c': 1}}, frozen=False)
conf['b']['d'] = 2
conf = config.Config({'a': 0, 'b': {'c': 1}}, frozen=True)
conf['a'] = 2
with pytest.raises(Exception) as e_info:
conf['d'] = 2
with pytest.raises(Exception) as e_info:
conf.update(d=2)
conf.update(d=2, force_update=True)
assert conf['d'] == 2
conf = config.Config({'a': 0, 'b': {'c': 1}}, read_only=True)
with pytest.raises(Exception) as e_info:
conf['a'] = 2
with pytest.raises(Exception) as e_info:
del conf['a']
with pytest.raises(Exception) as e_info:
conf.pop('a')
with pytest.raises(Exception) as e_info:
conf.popitem()
with pytest.raises(Exception) as e_info:
conf.clear()
with pytest.raises(Exception) as e_info:
conf.update(a=2)
assert isinstance(conf.merge_with(dict(b=dict(d=2))), config.Config)
assert conf.merge_with(dict(b=dict(d=2)), read_only=True).read_only
assert conf.merge_with(dict(b=dict(d=2)))['b']['d'] == 2
conf = config.Config({'a': 0, 'b': {'c': [1, 2]}})
conf['a'] = 1
conf['b']['c'].append(3)
conf['b']['d'] = 2
assert conf == {'a': 1, 'b': {'c': [1, 2, 3], 'd': 2}}
conf.reset()
assert conf == {'a': 0, 'b': {'c': [1, 2]}}
def test_merge_dicts(self):
assert config.merge_dicts({'a': 1}, {'b': 2}) == {'a': 1, 'b': 2}
assert config.merge_dicts({'a': 1}, {'a': 2}) == {'a': 2}
assert config.merge_dicts({'a': {'b': 2}}, {'a': {'c': 3}}) == {'a': {'b': 2, 'c': 3}}
assert config.merge_dicts({'a': {'b': 2}}, {'a': {'b': 3}}) == {'a': {'b': 3}}
def test_configured(self):
class H(config.Configured):
def __init__(self, a, b=2, **kwargs):
super().__init__(a=a, b=b, **kwargs)
assert H(1).config == {'a': 1, 'b': 2}
assert H(1).copy(b=3).config == {'a': 1, 'b': 3}
assert H(1).copy(c=4).config == {'a': 1, 'b': 2, 'c': 4}
assert H(pd.Series([1, 2, 3])) == H(pd.Series([1, 2, 3]))
assert H(pd.Series([1, 2, 3])) != H(pd.Series([1, 2, 4]))
assert H(pd.DataFrame([1, 2, 3])) == H(pd.DataFrame([1, 2, 3]))
assert H(pd.DataFrame([1, 2, 3])) != H(pd.DataFrame([1, 2, 4]))
assert H(pd.Index([1, 2, 3])) == H(pd.Index([1, 2, 3]))
assert H(pd.Index([1, 2, 3])) != H(pd.Index([1, 2, 4]))
assert H(np.array([1, 2, 3])) == H(np.array([1, 2, 3]))
assert H(np.array([1, 2, 3])) != H(np.array([1, 2, 4]))
assert H(None) == H(None)
assert H(None) != H(10.)
# ############# decorators.py ############# #
class TestDecorators:
def test_class_or_instancemethod(self):
class G:
@decorators.class_or_instancemethod
def g(self_or_cls):
if isinstance(self_or_cls, type):
return True # class
return False # instance
assert G.g()
assert not G().g()
def test_custom_property(self):
class G:
@decorators.custom_property(some='key')
def cache_me(self): return np.random.uniform()
assert 'some' in G.cache_me.kwargs
assert G.cache_me.kwargs['some'] == 'key'
def test_custom_method(self):
class G:
@decorators.custom_method(some='key')
def cache_me(self): return np.random.uniform()
assert 'some' in G.cache_me.kwargs
assert G.cache_me.kwargs['some'] == 'key'
def test_cached_property(self):
np.random.seed(seed)
class G:
@decorators.cached_property
def cache_me(self): return np.random.uniform()
g = G()
cached_number = g.cache_me
assert g.cache_me == cached_number
class G:
@decorators.cached_property(hello="world", hello2="world2")
def cache_me(self): return np.random.uniform()
assert 'hello' in G.cache_me.kwargs
assert G.cache_me.kwargs['hello'] == 'world'
g = G()
g2 = G()
class G3(G):
pass
g3 = G3()
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me == cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me == cached_number3
# clear_cache method
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
G.cache_me.clear_cache(g)
assert g.cache_me != cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me == cached_number3
# test blacklist
# instance + name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append((g, 'cache_me'))
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me != cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me == cached_number3
settings.caching.reset()
# name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append('cache_me')
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me != cached_number
assert g2.cache_me != cached_number2
assert g3.cache_me != cached_number3
settings.caching.reset()
# instance
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append(g)
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me != cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me == cached_number3
settings.caching.reset()
# class + name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append((G, 'cache_me'))
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me != cached_number
assert g2.cache_me != cached_number2
assert g3.cache_me == cached_number3
settings.caching.reset()
# class
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append(G)
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me != cached_number
assert g2.cache_me != cached_number2
assert g3.cache_me == cached_number3
settings.caching.reset()
# class name + name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append('G.cache_me')
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me != cached_number
assert g2.cache_me != cached_number2
assert g3.cache_me == cached_number3
settings.caching.reset()
# class name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append('G')
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me != cached_number
assert g2.cache_me != cached_number2
assert g3.cache_me == cached_number3
settings.caching.reset()
# improper class name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append('g')
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me == cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me == cached_number3
settings.caching.reset()
# kwargs
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append({'hello': 'world'})
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me != cached_number
assert g2.cache_me != cached_number2
assert g3.cache_me != cached_number3
settings.caching.reset()
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append({'hello': 'world', 'hello2': 'world2'})
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me != cached_number
assert g2.cache_me != cached_number2
assert g3.cache_me != cached_number3
settings.caching.reset()
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append({'hello': 'world', 'hello2': 'world2', 'hello3': 'world3'})
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me == cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me == cached_number3
settings.caching.reset()
# disabled globally
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me != cached_number
assert g2.cache_me != cached_number2
assert g3.cache_me != cached_number3
settings.caching.reset()
# test whitelist
# instance + name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append((g, 'cache_me'))
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me == cached_number
assert g2.cache_me != cached_number2
assert g3.cache_me != cached_number3
settings.caching.reset()
# name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append('cache_me')
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me == cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me == cached_number3
settings.caching.reset()
# instance
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append(g)
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me == cached_number
assert g2.cache_me != cached_number2
assert g3.cache_me != cached_number3
settings.caching.reset()
# class + name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append((G, 'cache_me'))
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me == cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me != cached_number3
settings.caching.reset()
# class
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append(G)
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me == cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me != cached_number3
settings.caching.reset()
# class name + name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append('G.cache_me')
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me == cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me != cached_number3
settings.caching.reset()
# class name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append('G')
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me == cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me != cached_number3
settings.caching.reset()
# improper class name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append('g')
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me != cached_number
assert g2.cache_me != cached_number2
assert g3.cache_me != cached_number3
settings.caching.reset()
# kwargs
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append({'hello': 'world'})
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me == cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me == cached_number3
settings.caching.reset()
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append({'hello': 'world', 'hello2': 'world2'})
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me == cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me == cached_number3
settings.caching.reset()
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append({'hello': 'world', 'hello2': 'world2', 'hello3': 'world3'})
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me != cached_number
assert g2.cache_me != cached_number2
assert g3.cache_me != cached_number3
settings.caching.reset()
def test_cached_method(self):
np.random.seed(seed)
class G:
@decorators.cached_method
def cache_me(self, b=10): return np.random.uniform()
g = G()
cached_number = g.cache_me
assert g.cache_me == cached_number
class G:
@decorators.cached_method(hello="world", hello2="world2")
def cache_me(self, b=10): return np.random.uniform()
assert 'hello' in G.cache_me.kwargs
assert G.cache_me.kwargs['hello'] == 'world'
g = G()
g2 = G()
class G3(G):
pass
g3 = G3()
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() == cached_number3
# clear_cache method
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
G.cache_me.clear_cache(g)
assert g.cache_me() != cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() == cached_number3
# test blacklist
# function
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append(g.cache_me)
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() == cached_number3
settings.caching.reset()
# instance + name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append((g, 'cache_me'))
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() == cached_number3
settings.caching.reset()
# name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append('cache_me')
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
# instance
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append(g)
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() == cached_number3
settings.caching.reset()
# class + name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append((G, 'cache_me'))
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() == cached_number3
settings.caching.reset()
# class
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append(G)
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() == cached_number3
settings.caching.reset()
# class name + name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append('G.cache_me')
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() == cached_number3
settings.caching.reset()
# class name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append('G')
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() == cached_number3
settings.caching.reset()
# improper class name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append('g')
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() == cached_number3
settings.caching.reset()
# kwargs
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append({'hello': 'world'})
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append({'hello': 'world', 'hello2': 'world2'})
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append({'hello': 'world', 'hello2': 'world2', 'hello3': 'world3'})
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() == cached_number3
settings.caching.reset()
# disabled globally
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
# test whitelist
# function
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append(g.cache_me)
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
# instance + name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append((g, 'cache_me'))
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
# name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append('cache_me')
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() == cached_number3
settings.caching.reset()
# instance
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append(g)
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
# class + name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append((G, 'cache_me'))
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
# class
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append(G)
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
# class name + name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append('G.cache_me')
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
# class name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append('G')
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
# improper class name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append('g')
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
# kwargs
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append({'hello': 'world'})
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() == cached_number3
settings.caching.reset()
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append({'hello': 'world', 'hello2': 'world2'})
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() == cached_number3
settings.caching.reset()
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append({'hello': 'world', 'hello2': 'world2', 'hello3': 'world3'})
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
# disabled by non-hashable args
G.cache_me.clear_cache(g)
cached_number = g.cache_me(b=np.zeros(1))
assert g.cache_me(b=np.zeros(1)) != cached_number
def test_traverse_attr_kwargs(self):
class A:
@decorators.custom_property(some_key=0)
def a(self): pass
class B:
@decorators.cached_property(some_key=0, child_cls=A)
def a(self): pass
@decorators.custom_method(some_key=1)
def b(self): pass
class C:
@decorators.cached_method(some_key=0, child_cls=B)
def b(self): pass
@decorators.custom_property(some_key=1)
def c(self): pass
assert hash(str(decorators.traverse_attr_kwargs(C))) == 16728515581653529580
assert hash(str(decorators.traverse_attr_kwargs(C, key='some_key'))) == 16728515581653529580
assert hash(str(decorators.traverse_attr_kwargs(C, key='some_key', value=1))) == 703070484833749378
assert hash(str(decorators.traverse_attr_kwargs(C, key='some_key', value=(0, 1)))) == 16728515581653529580
# ############# checks.py ############# #
class TestChecks:
def test_is_pandas(self):
assert not checks.is_pandas(0)
assert not checks.is_pandas(np.array([0]))
assert checks.is_pandas(pd.Series([1, 2, 3]))
assert checks.is_pandas(pd.DataFrame([1, 2, 3]))
def test_is_series(self):
assert not checks.is_series(0)
assert not checks.is_series(np.array([0]))
assert checks.is_series(pd.Series([1, 2, 3]))
assert not checks.is_series(pd.DataFrame([1, 2, 3]))
def test_is_frame(self):
assert not checks.is_frame(0)
assert not checks.is_frame(np.array([0]))
assert not checks.is_frame(pd.Series([1, 2, 3]))
assert checks.is_frame(pd.DataFrame([1, 2, 3]))
def test_is_array(self):
assert not checks.is_array(0)
assert checks.is_array(np.array([0]))
assert checks.is_array(pd.Series([1, 2, 3]))
assert checks.is_array(pd.DataFrame([1, 2, 3]))
def test_is_numba_func(self):
def test_func(x):
return x
@njit
def test_func_nb(x):
return x
assert not checks.is_numba_func(test_func)
assert checks.is_numba_func(test_func_nb)
def test_is_hashable(self):
assert checks.is_hashable(2)
assert not checks.is_hashable(np.asarray(2))
def test_is_index_equal(self):
assert checks.is_index_equal(
pd.Index([0]),
pd.Index([0])
)
assert not checks.is_index_equal(
pd.Index([0]),
pd.Index([1])
)
assert not checks.is_index_equal(
pd.Index([0], name='name'),
pd.Index([0])
)
assert checks.is_index_equal(
pd.Index([0], name='name'),
pd.Index([0]),
strict=False
)
assert not checks.is_index_equal(
pd.MultiIndex.from_arrays([[0], [1]]),
pd.Index([0])
)
assert checks.is_index_equal(
pd.MultiIndex.from_arrays([[0], [1]]),
pd.MultiIndex.from_arrays([[0], [1]])
)
assert checks.is_index_equal(
pd.MultiIndex.from_arrays([[0], [1]], names=['name1', 'name2']),
pd.MultiIndex.from_arrays([[0], [1]], names=['name1', 'name2'])
)
assert not checks.is_index_equal(
pd.MultiIndex.from_arrays([[0], [1]], names=['name1', 'name2']),
pd.MultiIndex.from_arrays([[0], [1]], names=['name3', 'name4'])
)
def test_is_default_index(self):
assert checks.is_default_index(pd.DataFrame([[1, 2, 3]]).columns)
assert checks.is_default_index(pd.Series([1, 2, 3]).to_frame().columns)
assert checks.is_default_index(pd.Index([0, 1, 2]))
assert not checks.is_default_index(pd.Index([0, 1, 2], name='name'))
def test_is_equal(self):
assert checks.is_equal(np.arange(3), np.arange(3), np.array_equal)
assert not checks.is_equal(np.arange(3), None, np.array_equal)
assert not checks.is_equal(None, np.arange(3), np.array_equal)
assert checks.is_equal(None, None, np.array_equal)
def test_is_namedtuple(self):
assert checks.is_namedtuple(namedtuple('Hello', ['world'])(*range(1)))
assert not checks.is_namedtuple((0,))
def test_method_accepts_argument(self):
def test(a, *args, b=2, **kwargs):
pass
assert checks.method_accepts_argument(test, 'a')
assert not checks.method_accepts_argument(test, 'args')
assert checks.method_accepts_argument(test, '*args')
assert checks.method_accepts_argument(test, 'b')
assert not checks.method_accepts_argument(test, 'kwargs')
assert checks.method_accepts_argument(test, '**kwargs')
assert not checks.method_accepts_argument(test, 'c')
def test_assert_in(self):
checks.assert_in(0, (0, 1))
with pytest.raises(Exception) as e_info:
checks.assert_in(2, (0, 1))
def test_assert_numba_func(self):
def test_func(x):
return x
@njit
def test_func_nb(x):
return x
checks.assert_numba_func(test_func_nb)
with pytest.raises(Exception) as e_info:
checks.assert_numba_func(test_func)
def test_assert_not_none(self):
checks.assert_not_none(0)
with pytest.raises(Exception) as e_info:
checks.assert_not_none(None)
def test_assert_type(self):
checks.assert_type(0, int)
checks.assert_type(np.zeros(1), (np.ndarray, pd.Series))
checks.assert_type(pd.Series([1, 2, 3]), (np.ndarray, pd.Series))
with pytest.raises(Exception) as e_info:
checks.assert_type(pd.DataFrame([1, 2, 3]), (np.ndarray, pd.Series))
def test_assert_subclass(self):
class A:
pass
class B(A):
pass
class C(B):
pass
checks.assert_subclass(B, A)
checks.assert_subclass(C, B)
checks.assert_subclass(C, A)
with pytest.raises(Exception) as e_info:
checks.assert_subclass(A, B)
def test_assert_type_equal(self):
checks.assert_type_equal(0, 1)
checks.assert_type_equal(np.zeros(1), np.empty(1))
with pytest.raises(Exception) as e_info:
checks.assert_type(0, np.zeros(1))
def test_assert_dtype(self):
checks.assert_dtype(np.zeros(1), np.float)
checks.assert_dtype(pd.Series([1, 2, 3]), np.int)
checks.assert_dtype(pd.DataFrame({'a': [1, 2], 'b': [3, 4]}), np.int)
with pytest.raises(Exception) as e_info:
checks.assert_dtype(pd.DataFrame({'a': [1, 2], 'b': [3., 4.]}), np.int)
def test_assert_subdtype(self):
checks.assert_subdtype([0], np.number)
checks.assert_subdtype(np.array([1, 2, 3]), np.number)
checks.assert_subdtype(pd.DataFrame({'a': [1, 2], 'b': [3., 4.]}), np.number)
with pytest.raises(Exception) as e_info:
checks.assert_subdtype(np.array([1, 2, 3]), np.float)
with pytest.raises(Exception) as e_info:
checks.assert_subdtype(pd.DataFrame({'a': [1, 2], 'b': [3., 4.]}), np.float)
def test_assert_dtype_equal(self):
checks.assert_dtype_equal([1], [1, 1, 1])
checks.assert_dtype_equal(pd.Series([1, 2, 3]), pd.DataFrame([[1, 2, 3]]))
checks.assert_dtype_equal(pd.DataFrame([[1, 2, 3.]]), pd.DataFrame([[1, 2, 3.]]))
with pytest.raises(Exception) as e_info:
checks.assert_dtype_equal(pd.DataFrame([[1, 2, 3]]), pd.DataFrame([[1, 2, 3.]]))
def test_assert_ndim(self):
checks.assert_ndim(0, 0)
checks.assert_ndim(np.zeros(1), 1)
checks.assert_ndim(pd.Series([1, 2, 3]), (1, 2))
checks.assert_ndim(pd.DataFrame([1, 2, 3]), (1, 2))
with pytest.raises(Exception) as e_info:
checks.assert_ndim(np.zeros((3, 3, 3)), (1, 2))
def test_assert_len_equal(self):
checks.assert_len_equal([[1]], [[2]])
checks.assert_len_equal([[1]], [[2, 3]])
with pytest.raises(Exception) as e_info:
checks.assert_len_equal([[1]], [[2], [3]])
def test_assert_shape_equal(self):
checks.assert_shape_equal(0, 1)
checks.assert_shape_equal([1, 2, 3], np.asarray([1, 2, 3]))
checks.assert_shape_equal([1, 2, 3], pd.Series([1, 2, 3]))
checks.assert_shape_equal(np.zeros((3, 3)), pd.Series([1, 2, 3]), axis=0)
checks.assert_shape_equal(np.zeros((2, 3)), pd.Series([1, 2, 3]), axis=(1, 0))
with pytest.raises(Exception) as e_info:
checks.assert_shape_equal(np.zeros((2, 3)), pd.Series([1, 2, 3]), axis=(0, 1))
def test_assert_index_equal(self):
checks.assert_index_equal(pd.Index([1, 2, 3]), pd.Index([1, 2, 3]))
with pytest.raises(Exception) as e_info:
checks.assert_index_equal(pd.Index([1, 2, 3]), pd.Index([2, 3, 4]))
def test_assert_meta_equal(self):
index = ['x', 'y', 'z']
columns = ['a', 'b', 'c']
checks.assert_meta_equal(np.array([1, 2, 3]), np.array([1, 2, 3]))
checks.assert_meta_equal(pd.Series([1, 2, 3], index=index), pd.Series([1, 2, 3], index=index))
checks.assert_meta_equal(pd.DataFrame([[1, 2, 3]], columns=columns), pd.DataFrame([[1, 2, 3]], columns=columns))
with pytest.raises(Exception) as e_info:
checks.assert_meta_equal(pd.Series([1, 2]), pd.DataFrame([1, 2]))
with pytest.raises(Exception) as e_info:
checks.assert_meta_equal(pd.DataFrame([1, 2]), pd.DataFrame([1, 2, 3]))
with pytest.raises(Exception) as e_info:
checks.assert_meta_equal(pd.DataFrame([1, 2, 3]), | pd.DataFrame([1, 2, 3], index=index) | pandas.DataFrame |
import tempfile
import pytest
import pandas as pd
from fuzzyfinder.database import SearchDatabase
def test_build_and_search():
db_filename = tempfile.NamedTemporaryFile().name
db = SearchDatabase(db_filename)
rec1 = {"unique_id": 1, "first_name": "robin", "surname": "linacre"}
rec2 = {"unique_id": 2, "first_name": "robyn", "surname": "linaker"}
rec3 = {"unique_id": 3, "first_name": "robin", "surname": "linacre"}
rec3 = {"unique_id": 4, "first_name": "david", "surname": "smith"}
dicts = [rec1, rec2, rec3]
db.write_list_dicts_parallel(dicts, unique_id_col="unique_id")
db.build_or_replace_stats_tables()
search_rec = {"unique_id": 4, "first_name": "robin", "surname": None}
assert 1 in db.find_potental_matches(search_rec).keys()
# With record caching, we want to make sure that if the search rec is changed but the unique id
# is for some reason left the same, we get different search results
search_rec = {"unique_id": 4, "first_name": "david", "surname": None}
assert 4 in db.find_potental_matches(search_rec).keys()
# See https://stackoverflow.com/questions/11942364/typeerror-integer-is-not-json-serializable-when-serializing-json-in-python
# Problem is that the new pandas nullable integer cannot be serialised to json
def test_json_problem():
db_filename = tempfile.NamedTemporaryFile().name
db = SearchDatabase(db_filename)
rec1 = {"unique_id": 1, "first_name": "robin", "int_problem": 1}
rec2 = {"unique_id": 2, "first_name": "robyn", "int_problem": 2}
rec3 = {"unique_id": 3, "first_name": "robin", "int_problem": 3}
rec3 = {"unique_id": 4, "first_name": "david", "int_problem": None}
import pandas as pd
dicts = [rec1, rec2, rec3]
df = pd.DataFrame(dicts)
df["int_problem"] = df["int_problem"].astype( | pd.Int64Dtype() | pandas.Int64Dtype |
__all__ = [
"add_net_meta", "convert_column", "and_filter", "get_outlier_bounds", "avg_over_net", "normalize",
"add_topo", "add_median_lh", "add_split_label", "remove_outliers", "calc_paired_diff", "calc_percentage_change",
"calc_icc", "normalize_series", "concat_dfs", "long_column_to_wide",
]
import itertools
import pandas as pd
import numpy as np
from matplotlib.cbook import boxplot_stats
import pandas_flavor as pf
from neuro_helper.statistics import percent_change, icc
@pf.register_dataframe_method
def add_net_meta(df, labels):
meta = pd.Series(index=df.index, name="net_meta")
for label, nets in labels.items():
meta.loc[df.network.isin(nets)] = label
df["net_meta"] = meta
return df
@pf.register_dataframe_method
def convert_column(df, **col_dict):
new_df = df.copy()
for col_name, func in col_dict.items():
new_df[col_name] = func(new_df[col_name])
return new_df
@pf.register_dataframe_method
def and_filter(df, drop_single=True, **kwargs):
filt = True
keys = []
for key, value in kwargs.items():
negate = False
if key.startswith("NOT"):
negate = True
key = key.replace("NOT", "")
keys.append(key)
if type(value) in [list, tuple, np.ndarray]:
this_filt = df[key].isin(value)
else:
this_filt = df[key] == value
filt &= ~this_filt if negate else this_filt
new_df = df[filt]
if drop_single:
return new_df.drop([c for c in keys if len(new_df[c].unique()) <= 1], 1)
else:
return new_df
@pf.register_dataframe_method
def get_outlier_bounds(df, of):
if isinstance(of, str):
of = [of, ]
out = []
for col in of:
stat = boxplot_stats(df[col])[0]
out.append((stat["whislo"], stat["whishi"]))
return out[0] if len(out) == 1 else out
@pf.register_dataframe_method
def avg_over_net(df):
return df.groupby(list(df.columns.drop(["region", "metric"]))).mean().reset_index()
@pf.register_dataframe_method
def normalize(x, columns, new_min=0, new_max=1):
if isinstance(columns, str):
columns = [columns, ]
df = x.copy()
for on in columns:
df[on] = normalize_series(df[on], new_min, new_max)
return df
@pf.register_dataframe_method
def add_topo(df, *args):
new_df = df
has_net = "network" in df.columns
for topo in args:
topo()
topo_data = topo.data
if has_net:
new_df = pd.merge(new_df, topo_data, on=["region", "network"])
else:
new_df = pd.merge(new_df, topo_data.drop("network", 1), on=["region"])
return new_df
@pf.register_dataframe_method
def add_median_lh(x, calc_med_col, values=("L", "H")):
med = x[calc_med_col].median()
return add_split_label(x, calc_med_col, calc_med_col, (values, med))
@pf.register_dataframe_method
def add_split_label(df, on, based, criteria):
x = df.copy()
if callable(criteria):
labels, borders = criteria(x[based])
else:
labels, borders = criteria
if np.isscalar(borders):
borders = [borders, ]
if len(labels) != len(borders) + 1:
raise ValueError("labels should be one more than borders")
new_col_name = f"{on}_split"
on_splitted = pd.Series(index=x.index, name=new_col_name, data=pd.NA)
borders.append(borders[-1])
for index, (label, border) in enumerate(zip(labels, borders)):
if index == 0:
filt = x[based] < border
elif index == len(labels) - 1:
filt = x[based] >= border
else:
filt = (x[based] < border) & (x[based] >= borders[index - 1])
on_splitted.loc[filt] = label
if on_splitted.isna().any():
raise ValueError(f"criteria does not cover the whole {on} bound")
x[new_col_name] = on_splitted
return x
@pf.register_dataframe_method
def remove_outliers(x, of):
stat = boxplot_stats(x[of])[0]
low, high = stat["whislo"], stat["whishi"]
return x.loc[(x[of] > low) & (x[of] < high)]
@pf.register_dataframe_method
def calc_paired_diff(x, diff_func=lambda left, right: abs(left - right), repeat=True):
"""
calculates the 2-by-2 difference on one single column.
:param x: a dataframe with only two columns.
First column is the label for the second column. The second one is the metric
:param diff_func: the difference function. default is L1 norm
:param repeat: if True return all combinations with repeteation (product), otherwise only unique combinations
:return: a dataframe with 3 columns. Left items, Right items and the difference between left and right
"""
diff = | pd.DataFrame(columns=("left", "right", "difference")) | pandas.DataFrame |
import pandas as pd
import numpy as np
import os
import requests
import json
import datetime
import time
MIN_FINAL_RATING = 1500 # top submission in a match must have reached this score
num_api_calls_today = 0
all_files = []
for root, dirs, files in os.walk('../input/', topdown=False):
all_files.extend(files)
seen_episodes = [int(f.split('.')[0]) for f in all_files
if '.' in f and f.split('.')[0].isdigit() and f.split('.')[1] == 'json']
print('{} games in existing library'.format(len(seen_episodes)))
NUM_TEAMS = 1
EPISODES = 600
BUFFER = 1
base_url = "https://www.kaggle.com/requests/EpisodeService/"
get_url = base_url + "GetEpisodeReplay"
list_url = base_url + "ListEpisodes"
# inital team list
r = requests.post(list_url, json = {"teamId": 5586412}) # arbitrary ID, change to leading ID during challenge
rj = r.json()
teams_df = pd.DataFrame(rj['result']['teams'])
teams_df.sort_values('publicLeaderboardRank', inplace = True)
teams_df.head(6)
def getTeamEpisodes(team_id):
# request
r = requests.post(list_url, json = {"teamId": int(team_id)})
rj = r.json()
# update teams list
global teams_df
teams_df_new = | pd.DataFrame(rj['result']['teams']) | pandas.DataFrame |
from os.path import dirname, join as pjoin
import scipy.io as sio
import numpy as np
import pandas as pd
import sys
import os
import argparse
import shutil
# Globally accessible:
csv_folderpath = os.path.join(sys.path[0], 'csvIndexes')
class ADEIndex():
def __init__(self):
self.image_index = None
self.object_name_list = None
self.object_image_matrix = None
self.CSVsExist = False
if os.path.exists(csv_folderpath)\
and os.path.exists(os.path.join(csv_folderpath, 'image_index.csv'))\
and os.path.exists(os.path.join(csv_folderpath, 'object_name_list.csv'))\
and os.path.exists(os.path.join(csv_folderpath,'object_image_matrix.csv')):
print("Now loading data from CSV files")
self.image_index = pd.read_csv(os.path.join(csv_folderpath, 'image_index.csv'))
self.object_name_list = pd.read_csv(os.path.join(csv_folderpath, 'object_name_list.csv'))
self.object_image_matrix = pd.read_csv(os.path.join(csv_folderpath, 'object_image_matrix.csv'))
self.CSVsExist = True
else:
print("No CSVs found - will save CSVs after loading MATLAB data")
# This script should be run from within the project's James_TompGAN/data/ folder
# data_dir = pjoin('ADE20K_2016_07_26')
# mat_fname = pjoin(data_dir, 'index_ade20k.mat')
mat_fname = os.path.join(sys.path[0], 'ADE20K_2016_07_26', 'index_ade20k.mat')
mat_contents = sio.loadmat(mat_fname)
matindex = mat_contents['index'][0,0]
# print("Index fields are ", matindex.dtype)
# The index does NOT have a consistent row or column structure, I assume
# the reason is just that it's composed of a bunch of different MATLAB arrays
# The columns are transposed occasionally because otherwise they don't fit
# together - they're imported from MATLAB in a bunch of inconsistent dimensions
num_examples = matindex[matindex.dtype.names[1]].size
print("There are ", num_examples, " images in the dataset")
# print("Here's a list of the attributes in the MATLAB data:")
# for name_i in matindex.dtype.names:
# print("Attribute: ", name_i)
# print("Dimensions of ", name_i, ": ", matindex[name_i].shape)
# --------
# putting image attributes in a DataFrame
filename_col_nested = pd.DataFrame(matindex['filename'].T, columns=['filename'])
filename_col = pd.DataFrame(columns=['filename'])
for index, row in filename_col_nested.iterrows():
filename_col.loc[index] = filename_col_nested['filename'][index][0]
folder_col_nested = pd.DataFrame(matindex['folder'].T, columns=['folder'])
folder_col = pd.DataFrame(columns=['folder'])
for index, row in folder_col_nested.iterrows():
folder_col.loc[index] = folder_col_nested['folder'][index][0]
# I don't know what this column is for (it's not documented on the dataset site)
typeset_col = pd.DataFrame(matindex['typeset'], columns=['typeset'])
# scene type of each image
scene_col = pd.DataFrame(matindex['scene'].T, columns=['scene'])
# putting the columns together
int_indexed_image_index = pd.concat([filename_col, folder_col, typeset_col, scene_col], axis=1)
self.image_index = int_indexed_image_index.set_index('filename')
# Need filename col to be the index AND a query-able column
# (because conversion to csv makes the index just an int)
# self.image_index = pd.concat([self.image_index, filename_col], axis=1)
# print(image_index.index)
# print(image_index)
# print(image_index['ADE_train_00011093.jpg'])
# image_index.to_csv("csvIndexes/image_index.csv")
# print(image_index['ADE_train_00011093.jpg'])
# -------
# Putting object attributes in a DataFrame
object_name_list_nested = | pd.DataFrame(matindex['objectnames'].T, columns=['objectnames']) | pandas.DataFrame |
# Overcommented for explanatory reasons
import re
import os
# Type annotations
from typing import IO, Text
# Reading pdf
from io import StringIO
from pdfminer.pdfinterp import PDFResourceManager
from pdfminer.pdfinterp import PDFPageInterpreter
from pdfminer.converter import TextConverter
from pdfminer.layout import LAParams
from pdfminer.pdfpage import PDFPage
# Pandas and numpy for visualization
import numpy as np
import pandas as pd
def read(pdf: IO) -> Text:
"""Acquire a pdf and return a text file"""
pdfname = pdf [:-4]
# Setting the conversion trio
pagenums = set()
output = StringIO()
manager = PDFResourceManager()
converter = TextConverter(manager, output, laparams=LAParams())
interpreter = PDFPageInterpreter(manager, converter)
# Opening the pdf
infile = open(pdf, 'rb')
# Readiing loop: get the page and use the interpreter
for page in PDFPage.get_pages(infile, pagenums):
interpreter.process_page(page)
infile.close()
converter.close()
# Get the values and turn them to byte
content = output.getvalue()
output.close()
# Specify encoding
tobyte = content.encode(encoding="latin-1",\
errors="backslashreplace")
# Coping to txt, finally!
with open(pdfname + '.txt', 'wb') as txt_file:
txt_file.write(tobyte)
def cleantxt(filetxt: Text) -> Text:
"""
Fix all the latin-1 badly imported chars.
Remove extra newlines
Ensure there's always a space after the dot
Adjust bad inputs, such as:
a. Ò is "
b. Ó is "
c. Õ is '
d. Ð is -
e. Ñ is -- (longdash)
Fix spacing the *
Fix space after )
Fix space after ]
"""
with open(filetxt) as f:
ftxt = f.read()
ftxt=ftxt.replace('\\u25a0', ' * ') # Black dot
ftxt=ftxt.replace('\\u2013', '-')
ftxt=ftxt.replace('\\u2014', '-')
ftxt=ftxt.replace('\\u2019', '\'')
ftxt=ftxt.replace('\\u2018', '\'')
ftxt=ftxt.replace('\\u201c', '\'')
ftxt=ftxt.replace('\\u201d', '\'')
ftxt=ftxt.replace('\\ufb01', 'fi')
ftxt=ftxt.replace('\\ufb02', 'fl')
ftxt=ftxt.replace('\\u0152', 'oe')
ftxt=ftxt.replace('\\u0153', 'oe')
ftxt=ftxt.replace('\\u2022', '-') # Point
ftxt=ftxt.replace('\\u27a4', '->') # Right arrow
ftxt=ftxt.replace('\\u27a5', '->') # Right curved arrow
ftxt=ftxt.replace('\\u2122', '') # TM logo
ftxt=ftxt.replace('\\u20ac', 'euro')
ftxt=ftxt.replace('\\u221e', 'infinity') # Infinity
ftxt=ftxt.replace('\x0c', '') # Up arrow
ftxt=ftxt.replace('VC ', '') # Copyright
# Umlaut related
ftxt=ftxt.replace('\\u20aca', 'a')
ftxt=ftxt.replace('\\u20aco', 'o')
ftxt=ftxt.replace('\\u20acu', 'u')
ftxt=ftxt.replace('\\u2026', '...')
# Slavic
ftxt=ftxt.replace('\\u017', 'z')
ftxt=ftxt.replace('\\u0161', 's')
ftxt=ftxt.replace('\\u0106', 'C')
ftxt=ftxt.replace("\.", "\. ")
ftxt=ftxt.replace("Ò", "\"")
ftxt=ftxt.replace("Ó", "\"")
ftxt=ftxt.replace("Õ", "\'")
ftxt=ftxt.replace("Ð", "-")
ftxt=ftxt.replace("Ñ", "--")
# Ad hoc cleaning given my corpusx
ftxt=ftxt.replace(" eg, ", "") # Messes up fulldata
ftxt=ftxt.replace(" Ltd 2004", "") # Bad input
ftxt=ftxt.replace(" Malden 0214", "") # Bad input
# Spacing
ftxt=ftxt.replace("\n", " ") # Newlines to space
ftxt=ftxt.replace("*", "") # Clean *
ftxt=ftxt.replace(")", ") ") # Space after )
ftxt=ftxt.replace("]", "] ") # Space after ]
ftxt=ftxt.replace(" ;", ";") # No space after ;
ftxt=ftxt.replace(" ,", ",") # No space after ,
ftxt=ftxt.replace(" .", ".") # No space after .
ftxt=ftxt.replace(" ", " ") # 2 spaces into 1
return ftxt
def preprocess(txt: Text) -> Text:
"""
Clean txt for regex processing using the
cleantxt function.
"""
filename = txt[:-4]
with open(filename +'_prep.txt', 'w') as preptxt:
preptxt.write(cleantxt(txt))
return cleantxt(txt)
def extractbiblio(txt: Text) -> Text:
"""
Apply regex patterns in REGEX_LIST to a
(processed) text. References are stored in
'Author Year' format.
"""
# Initialize the list containing the extracted refs
extractedlist = []
filename = txt[:-9] # The file passes ended with "_prep.txt"
for regex in REGEX_LIST:
print('Processing ', regex)
# Open the paper as corpus
corpus = open(txt).read()
# Run regexes over corpus
matches = re.findall(regex, corpus)
# Save matches in the extracted list
for match in matches:
# We using capturing groups for author and date
extractedlist.append(str(match[0]) \
+ ' ' + str(match[1]))
# Print statement to have an idea of the search
# feel free to comment it out
print(sorted(set(extractedlist)), len(set(extractedlist)))
# Saving to file
with open(filename + '_biblio.txt', 'w') as f:
for citatation in extractedlist:
f.write(citation + '\n')
return extractedlist
def extract_to_gephi(biblio: Text, papernamewithyear) -> Text:
"""
Format the data for usage in gephi. The gephi table has:
- a 'paper' column: the paper we exteracted references from;
- a 'references' column: the outcome of our biblio extraction;
- a 'weight' column: Gephi needs it set to 1.
The parameters are:
-biblio: a txt file with the extract biblio (_biblio.txt
is the output name of the extractbiblio(txt) function);
- papernamewith year: the paper we are extracting the
references from given the format we are using of 'Author
year' you have to insert it this way. Otherwise your Gephi
network will not display there references to the paper
you are processing.
"""
references = pd.read_csv(biblio, sep='\n', header=None,\
names = ['references', 'paper', 'weight'])
df = | pd.DataFrame(references) | pandas.DataFrame |
import streamlit as st
import pandas as pd
import base64
import numpy as np
from PIL import Image
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import r2_score, mean_squared_error
# predict
def pred(df_train, df_test):
X_train=df_train.iloc[:,:-1].values
Y_train=df_train.iloc[:,-1].values
X_test= df_test.iloc[:,:-1]
Y_test =df_test.iloc[:,-1]
model=RandomForestRegressor()
model.fit(X_train, Y_train)
pred=model.predict(X_test)
return Y_test, pred
# Calculates performance metrics
def calc_metrics(Y_actual,Y_predicted ):
mse=mean_squared_error(Y_actual,Y_predicted)
rmse=np.sqrt(mse)
r_squared=r2_score(Y_actual,Y_predicted)
mse_series= pd.Series(mse,name='MSE')
rmse_series= pd.Series(rmse,name='RMSE')
r2score_series= pd.Series(r_squared,name='R_squared')
df = pd.concat( [mse_series, rmse_series, r2score_series], axis=1 )
return df
# Load example data
def load_example_data():
df1 = pd.read_csv('vegitation_data_train.csv')
df2=pd.read_csv('vegitation_data_test.csv')
return df1,df2
# Download performance metrics
def filedownload(df,name):
csv = df.to_csv(index=False)
b64 = base64.b64encode(csv.encode()).decode() # strings <-> bytes conversions
href = f'<a href="data:file/csv;base64,{b64}" download={name}.csv>Download CSV File</a>'
return href
# Sidebar - Header
st.sidebar.header('Input panel')
st.sidebar.markdown("""
[Example CSV file](https://raw.githubusercontent.com/dataprofessor/model_performance_app/main/Y_example.csv)
""")
# Sidebar panel - Upload input file
uploaded_file = st.sidebar.file_uploader('Upload your train CSV file', type=['csv'])
uploaded_test = st.sidebar.file_uploader('Upload your test CSV file', type=['csv'])
# Sidebar panel - Performance metrics
performance_metrics = ['MSE', 'RMSE', 'R_squared']
selected_metrics = st.sidebar.multiselect('Performance metrics', performance_metrics, performance_metrics)
# Main panel
image = Image.open('logo.png')
st.image(image, width = 500)
st.title('Model Performance Calculator App')
st.markdown("""
This app calculates the model performance metrics given the actual and predicted values.
* **Python libraries:** `base64`, `pandas`, `streamlit`, `scikit-learn`
""")
if (uploaded_file and uploaded_test) is not None:
train_df = pd.read_csv(uploaded_file)
test_df = | pd.read_csv(uploaded_test) | pandas.read_csv |
import pandas as pd
import mlflow
import click
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
def human_readable(value):
if value == ['1']:
return "FAKE NEWS!"
return "REAL NEWS"
def predict(text):
print(f"Accepted payload: {text}")
my_data = {
"text": {0: text},
}
data = pd.DataFrame(data=my_data)
# Load model as a PyFuncModel.
loaded_model = mlflow.pyfunc.load_model('model')
result = loaded_model.predict( | pd.DataFrame(data) | pandas.DataFrame |
import pandas as pd
import numpy as np
import os
import datetime
import re
from tqdm import tqdm
# Run the create dataframe and clean data function
file = "../data_scheme_w.csv"
def windows_folder(folder):
"""
Modify foders from files in a dataframe\
To be used with pandas .apply()
"""
folder = str(folder).replace("\\", "/")
return folder
def create_dataframe(folder):
"""
Create a dataframe from set files found in folder
This function recursively scan folder for Axona .set files and return
a pandas dataframe with ['filename', 'folder', 'time', 'duration']
columns
Parameters:
folder (str): A folder containing the data
Returns:
dataframe: Pandas dataframe with all set files
"""
set_file = []
root_folder = []
time = []
duration = []
try:
print("Load files ..")
df = | pd.read_csv(file) | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 20 11:32:12 2020
@authors: <NAME> and <NAME>
"""
#Import Python core modules
import os
import pandas as pd
import numpy as np
#Import custom modules
import dataextract
import analyzer
path=os.getcwd()
os.chdir(path)
#defining file names
netVehicleDataFile = 'veh0122.xlsx'
evDataFile = 'veh0132.xlsx'
countyDataFile = 'CountyDistricts.xlsx'
la2CountyFile = 'LA2County.xlsx'
#set year and quarter
year = "2021"
Quarter = "Q1"
#Opening files
netVehicleData = dataextract.Dataextract(netVehicleDataFile)
netVehicleData.openfile(sheet=0, sheet_type = 'xlsx')
electricVehicleData = dataextract.Dataextract(evDataFile)
electricVehicleData.openfile(sheet=2,sheet_type = 'xlsx')
countyData = dataextract.Dataextract(countyDataFile)
countyData.openfile(sheet=1,sheet_type = 'xlsx')
la2CountyData = dataextract.Dataextract(la2CountyFile)
la2CountyData.openfile(sheet=1,sheet_type = 'xlsx')
#cleaning Data
netVehicleData.cleanseVehData(countyData = countyData.rawdf)
electricVehicleData.cleanseEVData(countyData = la2CountyData.rawdf)
#analysis and prepping for csv
regions = netVehicleData.rawdf.columns
historic_data_Qs = netVehicleData.rawdf.index
test_matching = electricVehicleData.rawdf.index
if (test_matching == historic_data_Qs).all() == False:
print("Quarters do not match")
data_to_save_historic = pd.DataFrame()
data_to_save_S_curve = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
def nasa_weather(df):
year = df['YEAR'].astype(str)
month = df['MO'].astype(str)
day = df['DY'].astype(str)
month = month.apply(lambda x: '0'+x if len(x) == 1 else x)
day = day.apply(lambda x: '0'+x if len(x) == 1 else x)
df['date'] = | pd.to_datetime(year + "-" + month + "-" + day) | pandas.to_datetime |
#!/usr/bin/env python3
#
# Copyright 2019 <NAME> <<EMAIL>>
#
# This file is part of Salus
# (see https://github.com/SymbioticLab/Salus).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 2 04:25:58 2018
@author: peifeng
"""
from __future__ import print_function, absolute_import, division
import re
from datetime import datetime
from collections import defaultdict
import multiprocessing as mp
from pathlib import Path
import subprocess as sp
import tempfile
import pandas as pd
#import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
import plotutils as pu
import compmem as cm
def load_case(path):
df = pd.read_csv(path, header=None, sep=' ',
names=['date', 'time', 'event', 'skip', 'Model'],
parse_dates=[['date', 'time']])
df = df[['date_time', 'event', 'Model']]
df['timestamp'] = df['date_time']
df = df.drop('date_time', axis=1)
wls = df.pivot_table(values='timestamp', index=['Model'],
columns='event', aggfunc='first').reset_index()
for col in ['Started', 'Queued', 'Finished']:
wls[col] = wls[col].str[:-1]
wls[col] = pd.to_datetime(wls[col])
wls['queuing'] = wls.Started - wls.Queued
wls['JCT'] = wls.Finished - wls.Queued
# for convinent
wls['No'] = pd.to_numeric(wls['Model'].str.rpartition('.')[2])
return wls
def load_trace(path, fifo=True):
df = pd.read_csv(path)
df = df.sort_values(by='submit_time')
if fifo:
models = defaultdict(dict)
curr = 0
for idx, row in df.iterrows():
if curr < row['submit_time']:
curr = row['submit_time']
models[idx]['Queued'] = row['submit_time']
models[idx]['Started'] = curr
curr += row['duration']
models[idx]['Finished'] = curr
data = [
{
"Model": '{model_name}.tf.{iterations}iter.{job_id}'.format(**df.iloc[idx]),
"Finished": m['Finished'],
"Queued": m['Queued'],
"Started": m['Started'],
"queuing": m['Started'] - m['Queued'],
"JCT": m['Finished'] - m['Queued']
}
for idx, m in models.items()
]
df = pd.DataFrame(data)
else:
data = [
{
"Model": f"{row.model_name}.tf.{row.iterations}iter.{row.job_id}",
"Finished": row.submit_time + row.duration,
"Queued": row.submit_time,
"Started": row.submit_time,
"queuing": 0,
"JCT": row.duration
}
for idx, row in df.iterrows()
]
df = pd.DataFrame(data)
for col in ['Finished', 'Queued', 'Started', 'queuing', 'JCT']:
df[col] = pd.to_timedelta(df[col], unit='s')
df['No'] = pd.to_numeric(df['Model'].str.rpartition('.')[2])
return df
def load_refine(pathdir):
# load preempt select events
with tempfile.NamedTemporaryFile() as f:
server_output = pathdir/'server.output'
sp.check_call(['grep', 'preempt_select_sess', str(server_output)], stdout=f)
f.flush()
df = cm.load_generic(f.name, event_filters=['preempt_select_sess'])
df = df.drop(['evt', 'level', 'loc', 'thread', 'type'], axis=1)
# convert UTC from server to local
df['timestamp'] = df.timestamp.dt.tz_localize('UTC').dt.tz_convert('US/Eastern').dt.tz_localize(None)
sess2Model = {}
# model name -> sess handle
ptn = re.compile('Created session with handle (?P<sess>.+)$')
for fpath in pathdir.glob('*.*.*.*.output'):
with fpath.open() as f:
for line in f:
m = ptn.search(line)
if m:
sess2Model[m.group('sess')] = fpath.name.rstrip('.output')
# add model name info to it
df['Model'] = df.Sess.map(sess2Model)
# make sure every session is covered
assert df.Model.isnull().sum() == 0
# for convinent
df['No'] = pd.to_numeric(df['Model'].str.rpartition('.')[2])
return df
def load_serverevents(pathdir):
# sess handle -> lane id
with tempfile.NamedTemporaryFile() as f:
server_output = pathdir/'server.output'
sp.check_call(['grep', 'lane_assigned', str(server_output)], stdout=f)
f.flush()
df = cm.load_generic(f.name, event_filters=['lane_assigned'])
df = df.drop(['evt', 'level', 'loc', 'thread', 'type'], axis=1)
# sess handles are unique
assert len(df.Sess.unique()) == len(df.Sess)
# make Sess as index so we can lookup
df = df.set_index('Sess')
# add a new column
df['Model'] = None
# model name -> sess handle
ptn = re.compile('Created session with handle (?P<sess>.+)$')
for fpath in pathdir.glob('*.*.*.*.output'):
with fpath.open() as f:
for line in f:
m = ptn.search(line)
if m:
df.loc[m.group('sess'), 'Model'] = fpath.name.rstrip('.output')
# reset index so we can use that later
df = df.reset_index()
return df
def refine_time_events(df, sevts):
"""Return a copy of df"""
assert df.Model.is_unique
assert sevts.Model.is_unique
df = df.set_index('Model').sort_index()
sevts = sevts.set_index('Model').sort_index()
# check sevts contains all needed info
assert sevts.index.equals(df.index)
# Server logs in UTC, convert to local
sevts['Started'] = sevts.timestamp.dt.tz_localize('UTC').dt.tz_convert('US/Eastern').dt.tz_localize(None)
sevts = sevts.drop(['timestamp'], axis=1)
df['Queued'] = df.Started
df = df.drop(['Started'], axis=1)
# set Model as index for both as then and then concat
df = pd.concat([df, sevts], axis=1)
# update queuing
df['queuing'] = df.Started - df.Queued
return df.reset_index()
def plot_timeline(df, colors=None, **kwargs):
ax = kwargs.pop('ax', None)
if ax is None:
ax = plt.gca()
# sort df by no
df['No'] = pd.to_numeric(df['Model'].str.rpartition('.')[2])
df = df.sort_values(by='No')
offset = df.Queued.min()
qmin = (df.Queued - offset) / pd.Timedelta(1, unit='s')
xmin = (df.Started - offset) / pd.Timedelta(1, unit='s')
xmax = (df.Finished - offset) / pd.Timedelta(1, unit='s')
if colors is None:
color_cycle = ax._get_lines.prop_cycler
colors = [next(color_cycle)['color'] for _ in qmin]
for (_, row), q, left, right, color in zip(df.iterrows(), qmin, xmin, xmax, colors):
barheight = 0.8
# queuing time
ax.barh(row.No, left - q, barheight, q, color='#b6b6b6')
# run time
bar = ax.barh(row.No, right - left, barheight, left,
color=color,
label='#{3}: {0}'.format(*row.Model.split('.')))
if 'LaneId' in row:
ax.text(right + 2, row.No, f'Lane {row.LaneId}',
ha='left', va='center', fontsize=3)
# ax.legend()
ax.set_xlabel('Time (s)')
# ax.set_ylabel('Workload')
ax.yaxis.set_ticks([])
return bar, colors
def plot_refine(ax, df, refine_data):
# so that we can access job using no
df = df.set_index('No')
# for every preempt event pair, mask jobs that's not the left event's switch_to job
offset = df.Queued.min()
refine_data['Ntime'] = (refine_data['timestamp'] - offset) / pd.Timedelta(1, unit='s')
# also convert df.Queued to relative time
df['Started'] = (df.Started - offset) / pd.Timedelta(1, unit='s')
df['Finished'] = (df.Finished - offset) / pd.Timedelta(1, unit='s')
bars = []
# group refine_data by laneId
for laneId, grp in refine_data.groupby('LaneId'):
magic = grp.iterrows()
next(magic)
for (_, left), (_, right) in zip(grp.iterrows(), magic):
for no in df.index.unique():
if no == left.No:
continue
if laneId != df.loc[no].LaneId:
continue
l = max(df.loc[no].Started, left.Ntime)
r = min(df.loc[no].Finished, right.Ntime)
if l >= r:
continue
# make sure left and right within job no's started and finished
# mask from left to right
bars.append(ax.barh(no, r - l, 0.5, l, color='#ffffff', edgecolor='#ffffff'))
return bars
def plot_lanes(refined_df, **kwargs):
lanes = refined_df.groupby(['LaneId', 'LaneSize']).agg({
'Queued': 'first',
'Finished': 'last'
}).rename(columns={'Queued':'Started'}).reset_index()
tables = []
for col in ['Started', 'Finished']:
t = lanes.pivot_table(values='LaneSize', columns='LaneId', index=[col], aggfunc='first')
tables.append(t)
lanes2 = pd.concat(tables).sort_index().interpolate(method='linear', limit_area='inside').fillna(0)
# x
x = (lanes2.index - lanes2.index.min()) / | pd.Timedelta(1, 's') | pandas.Timedelta |
import pandas as pd
from sklearn import linear_model
import statsmodels.api as sm
import numpy as np
from scipy import stats
# df_2018 = pd.read_csv("/mnt/nadavrap-students/STS/data/2018_2019.csv")
# df_2016 = pd.read_csv("/mnt/nadavrap-students/STS/data/2016_2017.csv")
# df_2014 = pd.read_csv("/mnt/nadavrap-students/STS/data/2014_2015.csv")
# df_2012 = pd.read_csv("/mnt/nadavrap-students/STS/data/2012_2013.csv")
# df_2010 = pd.read_csv("/mnt/nadavrap-students/STS/data/2010_2011.csv")
#
# print (df_2018.stsrcom.unique())
# print (df_2016.stsrcom.unique())
# print (df_2014.stsrcom.unique())
# print (df_2012.stsrcom.unique())
# print (df_2010.stsrcom.unique())
# print (df_2018.stsrcHospD.unique())
# print (df_2016.stsrcHospD.unique())
# print (df_2014.stsrcHospD.unique())
# print (df_2012.stsrcHospD.unique())
# print (df_2010.stsrcHospD.unique())
# # print (df_2018.columns.tolist())
# df_union = pd.concat([df_2010, df_2012,df_2014,df_2016,df_2018], ignore_index=True)
# print (df_union)
# print (df_union['surgyear'].value_counts())
# for col in df_union.columns:
# print("Column '{}' have :: {} missing values.".format(col,df_union[col].isna().sum()))
# df_union= pd.read_csv("df_union.csv")
# cols_to_remove = []
# samples = len(df_union)
# for col in df_union.columns:
# nan_vals = df_union[col].isna().sum()
# prec_missing_vals = nan_vals / samples
# print("Column '{}' have :: {} missing values. {}%".format(col, df_union[col].isna().sum(), round(prec_missing_vals,3)))
# print (cols_to_remove)
#
# df_union.drop(cols_to_remove, axis=1, inplace=True)
# print("Number of Features : ",len(df_union.columns))
# for col in df_union.columns:
# print("Column '{}' have :: {} missing values.".format(col,df_union[col].isna().sum()))
#
# df_union.to_csv("df union after remove.csv")
# df_2018_ = pd.read_csv("/mnt/nadavrap-students/STS/data/2018_2019.csv")
df_all= pd.read_csv("/tmp/pycharm_project_723/df_union.csv")
print (df_all.reoperation.unique())
print (df_all.stsrcHospD.unique())
print (df_all.stsrcom.unique())
# mask = df_2018_['surgyear'] == 2018
# df_all = df_2018_[mask]
# mask_reop = df_all['reoperation'] == 1
# df_reop = df_all[mask_reop]
# df_op = df_all[~mask_reop]
def create_df_for_bins_hospid(col_mort):
df1 = df_all.groupby(['hospid', 'surgyear'])['hospid'].count().reset_index(name='total')
df2 = df_all.groupby(['hospid', 'surgyear'])['reoperation'].apply(lambda x: (x == 1).sum()).reset_index(
name='Reop')
df3 = df_all.groupby(['hospid', 'surgyear'])['reoperation'].apply(lambda x: (x == 0).sum()).reset_index(
name='FirstOperation')
df_aggr = pd.read_csv("aggregate_csv.csv")
mask_reop = df_all['reoperation'] == 1
df_reop = df_all[mask_reop]
df_op = df_all[~mask_reop]
dfmort = df_all.groupby(['hospid', 'surgyear'])[col_mort].apply(lambda x: (x == 1).sum()).reset_index(
name='Mortality_all')
dfmortf = df_op.groupby(['hospid', 'surgyear'])[col_mort].apply(lambda x: (x == 1).sum()).reset_index(
name='Mortality_first')
dfmortr = df_reop.groupby(['hospid', 'surgyear'])[col_mort].apply(lambda x: (x == 1).sum()).reset_index(
name='Mortality_reop')
df_comp = df_all.groupby(['hospid', 'surgyear'])['complics'].apply(lambda x: (x == 1).sum()).reset_index(
name='Complics_all')
df_compr = df_reop.groupby(['hospid', 'surgyear'])['complics'].apply(lambda x: (x == 1).sum()).reset_index(
name='Complics_reop')
df_compf = df_op.groupby(['hospid', 'surgyear'])['complics'].apply(lambda x: (x == 1).sum()).reset_index(
name='Complics_FirstOperation')
d1 = pd.merge(df1, df3, left_on=['hospid', 'surgyear'], right_on=['hospid', 'surgyear'], how='outer')
d2 = pd.merge(d1, df2, left_on=['hospid', 'surgyear'], right_on=['hospid', 'surgyear'], how='outer')
df5 = pd.merge(df_aggr, d2, left_on=['hospid', 'surgyear'], right_on=['hospid', 'surgyear'],
how='inner') # how='left', on=['HospID','surgyear'])
del df5["Unnamed: 0"]
d3 = pd.merge(df5, dfmort, left_on=['hospid', 'surgyear'], right_on=['hospid', 'surgyear'], how='outer')
d4 = pd.merge(d3, dfmortf, left_on=['hospid', 'surgyear'], right_on=['hospid', 'surgyear'], how='outer')
d5 = pd.merge(d4, dfmortr, left_on=['hospid', 'surgyear'], right_on=['hospid', 'surgyear'], how='outer')
d6 = | pd.merge(d5, df_comp, left_on=['hospid', 'surgyear'], right_on=['hospid', 'surgyear'], how='outer') | pandas.merge |
from unittest import TestCase
import definitions
from src.Swell import Swell
from src.SwellDAO import SwellDAO
import pandas as pd
import os
class TestSwellDAO(TestCase):
def test_create_table(self):
swellDAO = SwellDAO()
swellDAO.create_table()
tables = swellDAO.show_tables()
tables_list = [elem[0] for elem in tables]
if ('SwellStaging') not in tables_list:
self.fail()
def test_empty_swell_staging(self):
swellDAO = SwellDAO()
swellDAO.create_table()
swellDAO.drop_table_if_exists()
tables = swellDAO.show_tables()
tables_list = [elem[0] for elem in tables]
if ('SwellStaging') in tables_list:
self.fail()
def test_insert_csv_into_database(self):
swellDAO = SwellDAO()
swellDAO.drop_table_if_exists()
swellDAO.create_table()
test_data_path = os.path.join(definitions.ROOT_DIR, 'testdata/testWaveDataWithHeaders.csv')
swell1 = Swell(test_data_path)
data = | pd.read_csv(test_data_path, encoding='unicode_escape') | pandas.read_csv |
import numpy as np
import pandas as pd
from asset_model import geometric_brownian_motion
#TODO
# class CcpiStrategy(InvestmentStrategy):
#
# def __init__(self, drawdown=None, multiplier=3):
# self.drawdown = drawdown
# self.multiplier = multiplier
#
# def update_portfolio_weighs(self, current_weights, account_value, returns):
# risky_weight = current_weights[0]
# safe_weight = current_weights[1]
# #TODO
# # if self.drawdown is not None:
# # peak = np.maximum(peak, account_value)
# # floor_value = peak * (1 - drawdown)
# cushion = (account_value - floor_value) / account_value
# risky_weight = self.multiplier * cushion
# risky_weight = np.minimum(risky_weight, 1)
# risky_weight = np.maximum(risky_weight, 0)
# safe_weight = 1 - risky_weight
# return np.array(risky_weight, safe_weight)
def backtest_cppi(risky_returns,
safe_returns=None,
risk_free_rate=0.03,
multiplier=3,
cushion_ratio=0.8,
drawdown=None,
start_value=1000):
"""
Run a backtest of the CPPI strategy, given a set of returns for the risky asset
Returns a dictionary containing: Asset Value History, Risk Budget History, Risky Weight History
:param risky_returns: history of risky returns
:param safe_returns: history of safe returns. If None, default to risk free rate
:param risk_free_rate: Rate of return of the risk free asset
:param multiplier: multiplier to allocate to risky asset = multiplier*(1-cushion)*wealth
:param cushion_ratio: ratio of the wealth to protect
:param drawdown: max drawdown allowed (as ratio)
:param start_value: Initial monetary value of the account
"""
# Ensure returns are a data frame
if isinstance(risky_returns, pd.Series):
risky_returns = pd.DataFrame(risky_returns, columns=["R"])
# If no safe asset is specified, default to the risk free rate
if safe_returns is None:
safe_returns = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import requests
import os
class extract_as_csv(object):
def extract_f1_json(url__f1_tv, url__drivers):
page = requests.get(url__f1_tv)
json__f1_tv = page.json()
# ergast data
page = requests.get(url__drivers)
json__drivers = page.json()
def db_name():
j_temp = json__f1_tv['path']
j_temp = j_temp[5:]
csv_name = ''
n = 0
while j_temp[n] != '/':
csv_name = csv_name + j_temp[n]
n = 1 + n
return csv_name
csv_name = db_name()
def create_path():
if not os.path.exists('Database/GrandPrix/'+csv_name):
os.makedirs('Database/GrandPrix/'+csv_name)
create_path()
def generate_csv_name(csv_type):
path_db = 'Database/GrandPrix/' + csv_name + '/' + csv_name + '-' + csv_type + '.csv'
return path_db
def for_loop_by_time(json):
Time = []
Something = []
i = 0
for value in json:
if i == 0:
Time.append(value)
i = 1
else:
Something.append(value)
i = 0
return Time, Something
def weather(json__f1_tv):
j_temp = json__f1_tv['Weather']
j_temp = j_temp['graph']
j_temp = j_temp['data']
def temperature(j_temp):
def temp_df(j_temp):
Time, Temp = for_loop_by_time(j_temp)
Track_Temp = {"Time": Time, "Temperature": Temp}
Track_Temp_Data = pd.DataFrame(data=Track_Temp)
Track_Temp_Data = Track_Temp_Data.set_index('Time')
return Track_Temp_Data
def track_temp(j_temp):
j_temp = j_temp['pTrack']
Track_Temp_Data = temp_df(j_temp)
path_db = generate_csv_name('Track_Temp')
Track_Temp_Data.to_csv(path_db)
def air_temp(j_temp):
j_temp = j_temp['pAir']
Track_Temp_Data = temp_df(j_temp)
path_db = generate_csv_name('Air_Temp')
Track_Temp_Data.to_csv(path_db)
track_temp(j_temp)
air_temp(j_temp)
def is_raining(j_temp):
j_temp = j_temp['pRaining']
Time, Raining = for_loop_by_time(j_temp)
Track_Temp = {"Time": Time, "Raining": Raining}
Track_Temp_Data = pd.DataFrame(data=Track_Temp)
Track_Temp_Data = Track_Temp_Data.set_index('Time')
path_db = generate_csv_name('Raining')
Track_Temp_Data.to_csv(path_db)
def wind_speed(j_temp):
j_temp = j_temp['pWind Speed']
Time, Wind_Speed = for_loop_by_time(j_temp)
Track_Temp = {"Time": Time, "Wind Speed": Wind_Speed}
Track_Temp_Data = pd.DataFrame(data=Track_Temp)
Track_Temp_Data = Track_Temp_Data.set_index('Time')
path_db = generate_csv_name('Wind_Speed')
Track_Temp_Data.to_csv(path_db)
def humidity(j_temp):
j_temp = j_temp['pHumidity']
Time, Humidity = for_loop_by_time(j_temp)
Track_Temp = {"Time": Time, "Humidity": Humidity}
Track_Temp_Data = pd.DataFrame(data=Track_Temp)
Track_Temp_Data = Track_Temp_Data.set_index('Time')
path_db = generate_csv_name('Humidity')
Track_Temp_Data.to_csv(path_db)
def air_pressure(j_temp):
j_temp = j_temp['pPressure']
Time, Air_Pressure = for_loop_by_time(j_temp)
Track_Temp = {"Time": Time, "Air Pressure": Air_Pressure}
Track_Temp_Data = pd.DataFrame(data=Track_Temp)
Track_Temp_Data = Track_Temp_Data.set_index('Time')
path_db = generate_csv_name('Air_Pressure')
Track_Temp_Data.to_csv(path_db)
def wind_direction(j_temp):
j_temp = j_temp['pWind Dir']
Time, Wind_Direction = for_loop_by_time(j_temp)
Track_Temp = {"Time": Time, "Wind Direction": Wind_Direction}
Track_Temp_Data = pd.DataFrame(data=Track_Temp)
Track_Temp_Data = Track_Temp_Data.set_index('Time')
path_db = generate_csv_name('Wind_Direction')
Track_Temp_Data.to_csv(path_db)
temperature(j_temp)
is_raining(j_temp)
wind_speed(j_temp)
humidity(j_temp)
air_pressure(j_temp)
wind_direction(j_temp)
def current_drivers(json__f1_tv):
j_temp = json__f1_tv['init']
j_temp = j_temp['data']
j_temp = j_temp['Drivers']
Driver_Name = []
Driver_Initials = []
Driver_Team = []
Driver_Num = []
Team_Color_Picker = []
for Driver in j_temp:
Driver_Name.append(Driver['FirstName'] + ' ' + Driver['Name'])
Driver_Initials.append(Driver['Initials'])
Driver_Team.append(Driver['Team'])
Driver_Num.append(Driver['Num'])
Team_Color_Picker.append(Driver['Color'])
Current_Drivers = {"Number": Driver_Num, "Driver Name": Driver_Name, "Driver Initials": Driver_Initials, "Driver Team": Driver_Team, "Color": Team_Color_Picker}
Current_Drivers_Data = pd.DataFrame(data=Current_Drivers)
Current_Drivers_Data = Current_Drivers_Data.set_index('Number')
path_db = generate_csv_name('Drivers')
Current_Drivers_Data.to_csv(path_db)
return Driver_Initials
def count_laps(json__f1_tv):
j_temp = json__f1_tv['free']
j_temp = j_temp['data']
Lap = []
i = 1
while i <= j_temp['L']:
Lap.append(i)
i = i + 1
return Lap
def track_status(json__f1_tv, Laps):
j_temp = json__f1_tv['Scores']
j_temp = j_temp['graph']
j_temp = j_temp['TrackStatus']
Track_Status = []
i = 0
for lap in j_temp:
if i == 1:
if lap == '':
lap = None
Track_Status.append(lap)
i = i - 1
else:
i = i + 1
Track_Status_Dict = {"Lap": Laps, "Status": Track_Status[1:Laps.__len__()+1]}
Track_Status_Data = pd.DataFrame(data=Track_Status_Dict)
Track_Status_Data = Track_Status_Data.set_index('Lap')
path_db = generate_csv_name('Track_Status')
Track_Status_Data.to_csv(path_db)
def drivers_performance_points(json__f1_tv, Driver_Initials, Laps):
j_temp = json__f1_tv['Scores']
j_temp = j_temp['graph']
j_temp = j_temp['Performance']
Driver_Performance = {}
Driver_Performance['Lap'] = Laps
for Driver in Driver_Initials:
i = 0
Performance_Gap = []
for Performance in j_temp['p'+Driver]:
if i == 0:
i = i + 1
else:
Performance_Gap.append(Performance)
i = i - 1
while Performance_Gap.__len__() < Laps.__len__():
Performance_Gap.append(None)
Driver_Performance[Driver] = Performance_Gap
Driver_Performance_Data = pd.DataFrame(data=Driver_Performance)
Driver_Performance_Data = Driver_Performance_Data.set_index('Lap')
path_db = generate_csv_name('Drivers_Performance')
Driver_Performance_Data.to_csv(path_db)
def race_result(json__f1_tv, Driver_Initials):
j_temp = json__f1_tv['opt']
j_temp = j_temp['data']
j_temp = j_temp['DR']
Race_Result = {}
i = 0
for item in j_temp:
for position in item['OC']:
Race_Result[Driver_Initials[i]] = position
i = i + 1
return Race_Result
def best__(json__f1_tv, Driver_Initials):
j_temp = json__f1_tv['best']
j_temp = j_temp['data']
j_temp = j_temp['DR']
Lap_Times = []
Lap = []
Rank = []
Sector_1 = []
Position_Sector_1 = []
Sector_2 = []
Position_Sector_2 = []
Sector_3 = []
Position_Sector_3 = []
Highest_Speed_Sector_1 = []
Position_Speed_Sector_1 = []
Highest_Speed_Sector_2 = []
Position_Speed_Sector_2 = []
Highest_Speed_Sector_3 = []
Position_Speed_Sector_3 = []
SpeedTrap = []
Position_SpeedTrap = []
for item in j_temp:
i = 0
for content in item['B']:
if i == 1:
Lap_Times.append(content)
elif i == 2:
Lap.append(content)
elif i == 3:
Rank.append(content)
elif i == 4:
Sector_1.append(content)
elif i == 6:
Position_Sector_1.append(content)
elif i == 7:
Sector_2.append(content)
elif i == 9:
Position_Sector_2.append(content)
elif i == 10:
Sector_3.append(content)
elif i == 12:
Position_Sector_3.append(content)
elif i == 13:
Highest_Speed_Sector_1.append(content)
elif i == 15:
Position_Speed_Sector_1.append(content)
elif i == 16:
Highest_Speed_Sector_2.append(content)
elif i == 18:
Position_Speed_Sector_2.append(content)
elif i == 19:
Highest_Speed_Sector_3.append(content)
elif i == 21:
Position_Speed_Sector_3.append(content)
i = i + 1
DataFrame = {'Ranking': Rank,
'Driver': Driver_Initials,
'Corresponding Lap': Lap,
'Lap Time': Lap_Times}
DataFrame = | pd.DataFrame(data=DataFrame) | pandas.DataFrame |
# General Packages
from math import atan2, degrees
from datetime import datetime
from pathlib import Path
import time
import pprint
import numpy as np
import pandas as pd
import pickle
# Plotting
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
from matplotlib.dates import date2num
import seaborn as sns
# Scaling
from sklearn.preprocessing import StandardScaler
settings = {
#
# audit settings
'data_name': 'credit',
'method_name': 'logreg',
'normalize_data': True,
'force_rational_actions': False,
#
# script flags
'audit_recourse': True,
'plot_audits': True,
'print_flag': True,
'save_flag': True,
'randomseed': 2338,
#
# placeholders
'method_suffixes': [''],
'audit_suffixes': [''],
}
# Paths
repo_dir = Path(__file__).absolute().parent.parent
paper_dir = repo_dir / 'paper/' # directory containing paper related info
data_dir = paper_dir / 'data/' # directory containing data files
results_dir = paper_dir / 'results/' # directory containing results
# create directories that don't exist
for d in [data_dir, results_dir]:
d.mkdir(exist_ok = True)
# Formatting Options
np.set_printoptions(precision = 4, suppress = False)
| pd.set_option('display.max_columns', 30) | pandas.set_option |
import os
from collections import Counter
from os import listdir
from os.path import isfile, join
from mpl_toolkits.mplot3d import axes3d
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.pyplot import figure
from matplotlib import style
style.use('ggplot')
import scipy
from matplotlib.ticker import MaxNLocator
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.collections import PolyCollection
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib import cm, ticker
import numpy as np
from sys import argv
import Orange
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from matplotlib import colors as mcolors, cm
from matplotlib.collections import PolyCollection
from classifiers import classifiers_list
from datasetsDelaunay import dataset_list_bi, dataset_list_mult
from folders import output_dir, dir_pca_biclasse, metricas_biclasse, dir_pca_multiclasse, metricas_multiclasse
from parameters import order, alphas
order_dict = {'area': 1,
'volume': 2,
'area_volume_ratio': 3,
'edge_ratio': 4,
'radius_ratio': 5,
'aspect_ratio': 6,
'max_solid_angle': 7,
'min_solid_angle': 8,
'solid_angle': 9}
class Statistics:
def __init__(self):
pass
def compute_CD_customizado(self, avranks, n, alpha="0.05", test="nemenyi"):
"""
Returns critical difference for Nemenyi or Bonferroni-Dunn test
according to given alpha (either alpha="0.05" or alpha="0.1") for average
ranks and number of tested datasets N. Test can be either "nemenyi" for
for Nemenyi two tailed test or "bonferroni-dunn" for Bonferroni-Dunn test.
"""
k = len(avranks)
d = {("nemenyi", "0.05"): [1.960, 2.344, 2.569, 2.728, 2.850, 2.948, 3.031, 3.102, 3.164, 3.219, 3.268, 3.313,
3.354, 3.391, 3.426,
3.458, 3.489, 3.517, 3.544, 3.569, 3.593, 3.616, 3.637, 3.658, 3.678, 3.696, 3.714,
3.732, 3.749, 3.765,
3.780, 3.795, 3.810, 3.824, 3.837, 3.850, 3.863, 3.876, 3.888, 3.899, 3.911, 3.922,
3.933, 3.943, 3.954,
3.964, 3.973, 3.983, 3.992],
("nemenyi", "0.1"): [0, 0, 1.644854, 2.052293, 2.291341, 2.459516,
2.588521, 2.692732, 2.779884, 2.854606, 2.919889,
2.977768, 3.029694, 3.076733, 3.119693, 3.159199,
3.195743, 3.229723, 3.261461, 3.291224, 3.319233],
("bonferroni-dunn", "0.05"): [0, 0, 1.960, 2.241, 2.394, 2.498, 2.576,
2.638, 2.690, 2.724, 2.773],
("bonferroni-dunn", "0.1"): [0, 0, 1.645, 1.960, 2.128, 2.241, 2.326,
2.394, 2.450, 2.498, 2.539]}
q = d[(test, alpha)]
cd = q[k] * (k * (k + 1) / (6.0 * n)) ** 0.5
return cd
def calcula_media_folds_biclasse(self, df):
t = pd.Series(data=np.arange(0, df.shape[0], 1))
dfr = pd.DataFrame(
columns=['MODE', 'DATASET', 'PREPROC', 'ALGORITHM', 'ORDER', 'ALPHA', 'PRE', 'REC', 'SPE', 'F1', 'GEO',
'IBA', 'AUC'],
index=np.arange(0, int(t.shape[0] / 5)))
df_temp = df.groupby(by=['MODE', 'DATASET', 'PREPROC', 'ALGORITHM'])
idx = dfr.index.values
i = idx[0]
for name, group in df_temp:
group = group.reset_index()
dfr.at[i, 'MODE'] = group.loc[0, 'MODE']
mode = group.loc[0, 'MODE']
dfr.at[i, 'DATASET'] = group.loc[0, 'DATASET']
dfr.at[i, 'PREPROC'] = group.loc[0, 'PREPROC']
dfr.at[i, 'ALGORITHM'] = group.loc[0, 'ALGORITHM']
dfr.at[i, 'ORDER'] = group.loc[0, 'ORDER']
dfr.at[i, 'ALPHA'] = group.loc[0, 'ALPHA']
dfr.at[i, 'PRE'] = group['PRE'].mean()
dfr.at[i, 'REC'] = group['REC'].mean()
dfr.at[i, 'SPE'] = group['SPE'].mean()
dfr.at[i, 'F1'] = group['F1'].mean()
dfr.at[i, 'GEO'] = group['GEO'].mean()
dfr.at[i, 'IBA'] = group['IBA'].mean()
dfr.at[i, 'AUC'] = group['AUC'].mean()
i = i + 1
print(i)
dfr.to_csv(output_dir + 'resultado_media_biclasse_' + mode + '.csv', index=False)
def calcula_media_folds_multiclass(self, df):
t = pd.Series(data=np.arange(0, df.shape[0], 1))
dfr = pd.DataFrame(
columns=['MODE', 'DATASET', 'PREPROC', 'ALGORITHM', 'ORDER', 'ALPHA', 'PRE', 'REC', 'SPE', 'F1', 'GEO',
'IBA', 'AUC'],
index=np.arange(0, int(t.shape[0] / 5)))
df_temp = df.groupby(by=['MODE', 'DATASET', 'PREPROC', 'ALGORITHM'])
idx = dfr.index.values
i = idx[0]
for name, group in df_temp:
group = group.reset_index()
dfr.at[i, 'MODE'] = group.loc[0, 'MODE']
mode = group.loc[0, 'MODE']
dfr.at[i, 'DATASET'] = group.loc[0, 'DATASET']
dfr.at[i, 'PREPROC'] = group.loc[0, 'PREPROC']
dfr.at[i, 'ALGORITHM'] = group.loc[0, 'ALGORITHM']
dfr.at[i, 'ORDER'] = group.loc[0, 'ORDER']
dfr.at[i, 'ALPHA'] = group.loc[0, 'ALPHA']
dfr.at[i, 'PRE'] = group['PRE'].mean()
dfr.at[i, 'REC'] = group['REC'].mean()
dfr.at[i, 'SPE'] = group['SPE'].mean()
dfr.at[i, 'F1'] = group['F1'].mean()
dfr.at[i, 'GEO'] = group['GEO'].mean()
dfr.at[i, 'IBA'] = group['IBA'].mean()
dfr.at[i, 'AUC'] = group['AUC'].mean()
i = i + 1
print(i)
dfr.to_csv(output_dir + 'resultado_media_multiclass_' + mode + '.csv', index=False)
def separa_delaunay_biclass(self, filename):
df = pd.read_csv(filename)
list_base = []
for p in np.arange(0, len(preproc_type)):
list_base.append(df[(df['PREPROC'] == preproc_type[p])])
df_base = list_base.pop(0)
for i in np.arange(0, len(list_base)):
df_base = pd.concat([df_base, list_base[i]], ignore_index=True)
for o in order:
for a in alphas:
dfr = df[(df['ORDER'] == o)]
dfr1 = dfr[(dfr['ALPHA'] == str(a))]
df_file = pd.concat([df_base, dfr1], ignore_index=True)
df_file.to_csv('./../output_dir/result_biclass' + '_' + o + '_' + str(a) + '.csv', index=False)
def read_dir_files(self, dir_name):
f = [f for f in listdir(dir_name) if isfile(join(dir_name, f))]
return f
def find_best_rank(self, results_dir, tipo):
results = self.read_dir_files(results_dir)
df = pd.DataFrame(columns=[['ARQUIVO', 'WINER']])
i = 0
for f in results:
df_temp = pd.read_csv(results_dir + f)
df.at[i, 'ARQUIVO'] = f
df.at[i, 'WINER'] = df_temp.iloc[0, 0]
i += 1
df.to_csv(output_dir + tipo)
def find_best_delaunay(self, results_dir, tipo):
df = pd.read_csv(results_dir + tipo)
i = 0
j = 0
df_best = pd.DataFrame(columns=['ARQUIVO', 'WINER'])
win = list(df['WINER'])
for w in win:
if w == 'DELAUNAY':
df_best.at[i, 'ARQUIVO'] = df.iloc[j, 1]
df_best.at[i, 'WINER'] = df.iloc[j, 2]
i += 1
j += 1
df_best.to_csv(output_dir + 'only_best_delaunay_pca_biclass_media_rank.csv')
def rank_by_algorithm(self, df, tipo, wd, reducao, order, alpha):
'''
Calcula rank
:param df:
:param tipo:
:param wd:
:param delaunay_type:
:return:
'''
df_tabela = pd.DataFrame(
columns=['DATASET', 'ALGORITHM', 'ORIGINAL', 'RANK_ORIGINAL', 'SMOTE', 'RANK_SMOTE', 'SMOTE_SVM',
'RANK_SMOTE_SVM', 'BORDERLINE1', 'RANK_BORDERLINE1', 'BORDERLINE2', 'RANK_BORDERLINE2',
'GEOMETRIC_SMOTE', 'RANK_GEOMETRIC_SMOTE',
'DELAUNAY', 'RANK_DELAUNAY', 'DELAUNAY_TYPE', 'ALPHA', 'unit'])
df_temp = df.groupby(by=['ALGORITHM'])
for name, group in df_temp:
group = group.reset_index()
group.drop('index', axis=1, inplace=True)
df.to_csv(dir_pca_biclasse + reducao + '_' + tipo + '_' + order + '_' + str(alpha) + '.csv')
j = 0
for d in dataset_list_bi:
for m in metricas_biclasse:
aux = group[group['DATASET'] == d]
aux = aux.reset_index()
df_tabela.at[j, 'DATASET'] = d
df_tabela.at[j, 'ALGORITHM'] = name
indice = aux.PREPROC[aux.PREPROC == '_train'].index.tolist()[0]
df_tabela.at[j, 'ORIGINAL'] = aux.at[indice, m]
indice = aux.PREPROC[aux.PREPROC == '_SMOTE'].index.tolist()[0]
df_tabela.at[j, 'SMOTE'] = aux.at[indice, m]
indice = aux.PREPROC[aux.PREPROC == '_smoteSVM'].index.tolist()[0]
df_tabela.at[j, 'SMOTE_SVM'] = aux.at[indice, m]
indice = aux.PREPROC[aux.PREPROC == '_Borderline1'].index.tolist()[0]
df_tabela.at[j, 'BORDERLINE1'] = aux.at[indice, m]
indice = aux.PREPROC[aux.PREPROC == '_Borderline2'].index.tolist()[0]
df_tabela.at[j, 'BORDERLINE2'] = aux.at[indice, m]
indice = aux.PREPROC[aux.PREPROC == '_Geometric_SMOTE'].index.tolist()[0]
df_tabela.at[j, 'GEOMETRIC_SMOTE'] = aux.at[indice, m]
indice = aux.PREPROC[aux.ORDER == order].index.tolist()[0]
df_tabela.at[j, 'DELAUNAY'] = aux.at[indice, m]
df_tabela.at[j, 'DELAUNAY_TYPE'] = order
df_tabela.at[j, 'ALPHA'] = alpha
df_tabela.at[j, 'unit'] = m
j += 1
df_pre = df_tabela[df_tabela['unit'] == 'PRE']
df_rec = df_tabela[df_tabela['unit'] == 'REC']
df_spe = df_tabela[df_tabela['unit'] == 'SPE']
df_f1 = df_tabela[df_tabela['unit'] == 'F1']
df_geo = df_tabela[df_tabela['unit'] == 'GEO']
df_iba = df_tabela[df_tabela['unit'] == 'IBA']
df_auc = df_tabela[df_tabela['unit'] == 'AUC']
pre = df_pre[
['ORIGINAL', 'SMOTE', 'SMOTE_SVM', 'BORDERLINE1', 'BORDERLINE2', 'GEOMETRIC_SMOTE', 'DELAUNAY']]
rec = df_rec[
['ORIGINAL', 'SMOTE', 'SMOTE_SVM', 'BORDERLINE1', 'BORDERLINE2', 'GEOMETRIC_SMOTE', 'DELAUNAY']]
spe = df_spe[
['ORIGINAL', 'SMOTE', 'SMOTE_SVM', 'BORDERLINE1', 'BORDERLINE2', 'GEOMETRIC_SMOTE', 'DELAUNAY']]
f1 = df_f1[['ORIGINAL', 'SMOTE', 'SMOTE_SVM', 'BORDERLINE1', 'BORDERLINE2', 'GEOMETRIC_SMOTE', 'DELAUNAY']]
geo = df_geo[
['ORIGINAL', 'SMOTE', 'SMOTE_SVM', 'BORDERLINE1', 'BORDERLINE2', 'GEOMETRIC_SMOTE', 'DELAUNAY']]
iba = df_iba[
['ORIGINAL', 'SMOTE', 'SMOTE_SVM', 'BORDERLINE1', 'BORDERLINE2', 'GEOMETRIC_SMOTE', 'DELAUNAY']]
auc = df_auc[
['ORIGINAL', 'SMOTE', 'SMOTE_SVM', 'BORDERLINE1', 'BORDERLINE2', 'GEOMETRIC_SMOTE', 'DELAUNAY']]
pre = pre.reset_index()
pre.drop('index', axis=1, inplace=True)
rec = rec.reset_index()
rec.drop('index', axis=1, inplace=True)
spe = spe.reset_index()
spe.drop('index', axis=1, inplace=True)
f1 = f1.reset_index()
f1.drop('index', axis=1, inplace=True)
geo = geo.reset_index()
geo.drop('index', axis=1, inplace=True)
iba = iba.reset_index()
iba.drop('index', axis=1, inplace=True)
auc = auc.reset_index()
auc.drop('index', axis=1, inplace=True)
# calcula rank linha a linha
pre_rank = pre.rank(axis=1, ascending=False)
rec_rank = rec.rank(axis=1, ascending=False)
spe_rank = spe.rank(axis=1, ascending=False)
f1_rank = f1.rank(axis=1, ascending=False)
geo_rank = geo.rank(axis=1, ascending=False)
iba_rank = iba.rank(axis=1, ascending=False)
auc_rank = auc.rank(axis=1, ascending=False)
df_pre = df_pre.reset_index()
df_pre.drop('index', axis=1, inplace=True)
df_pre['RANK_ORIGINAL'] = pre_rank['ORIGINAL']
df_pre['RANK_SMOTE'] = pre_rank['SMOTE']
df_pre['RANK_SMOTE_SVM'] = pre_rank['SMOTE_SVM']
df_pre['RANK_BORDERLINE1'] = pre_rank['BORDERLINE1']
df_pre['RANK_BORDERLINE2'] = pre_rank['BORDERLINE2']
df_pre['RANK_GEOMETRIC_SMOTE'] = pre_rank['GEOMETRIC_SMOTE']
df_pre['RANK_DELAUNAY'] = pre_rank['DELAUNAY']
df_rec = df_rec.reset_index()
df_rec.drop('index', axis=1, inplace=True)
df_rec['RANK_ORIGINAL'] = rec_rank['ORIGINAL']
df_rec['RANK_SMOTE'] = rec_rank['SMOTE']
df_rec['RANK_SMOTE_SVM'] = rec_rank['SMOTE_SVM']
df_rec['RANK_BORDERLINE1'] = rec_rank['BORDERLINE1']
df_rec['RANK_BORDERLINE2'] = rec_rank['BORDERLINE2']
df_rec['RANK_GEOMETRIC_SMOTE'] = rec_rank['GEOMETRIC_SMOTE']
df_rec['RANK_DELAUNAY'] = rec_rank['DELAUNAY']
df_spe = df_spe.reset_index()
df_spe.drop('index', axis=1, inplace=True)
df_spe['RANK_ORIGINAL'] = spe_rank['ORIGINAL']
df_spe['RANK_SMOTE'] = spe_rank['SMOTE']
df_spe['RANK_SMOTE_SVM'] = spe_rank['SMOTE_SVM']
df_spe['RANK_BORDERLINE1'] = spe_rank['BORDERLINE1']
df_spe['RANK_BORDERLINE2'] = spe_rank['BORDERLINE2']
df_spe['RANK_GEOMETRIC_SMOTE'] = spe_rank['GEOMETRIC_SMOTE']
df_spe['RANK_DELAUNAY'] = spe_rank['DELAUNAY']
df_f1 = df_f1.reset_index()
df_f1.drop('index', axis=1, inplace=True)
df_f1['RANK_ORIGINAL'] = f1_rank['ORIGINAL']
df_f1['RANK_SMOTE'] = f1_rank['SMOTE']
df_f1['RANK_SMOTE_SVM'] = f1_rank['SMOTE_SVM']
df_f1['RANK_BORDERLINE1'] = f1_rank['BORDERLINE1']
df_f1['RANK_BORDERLINE2'] = f1_rank['BORDERLINE2']
df_f1['RANK_GEOMETRIC_SMOTE'] = f1_rank['GEOMETRIC_SMOTE']
df_f1['RANK_DELAUNAY'] = f1_rank['DELAUNAY']
df_geo = df_geo.reset_index()
df_geo.drop('index', axis=1, inplace=True)
df_geo['RANK_ORIGINAL'] = geo_rank['ORIGINAL']
df_geo['RANK_SMOTE'] = geo_rank['SMOTE']
df_geo['RANK_SMOTE_SVM'] = geo_rank['SMOTE_SVM']
df_geo['RANK_BORDERLINE1'] = geo_rank['BORDERLINE1']
df_geo['RANK_BORDERLINE2'] = geo_rank['BORDERLINE2']
df_geo['RANK_GEOMETRIC_SMOTE'] = geo_rank['GEOMETRIC_SMOTE']
df_geo['RANK_DELAUNAY'] = geo_rank['DELAUNAY']
df_iba = df_iba.reset_index()
df_iba.drop('index', axis=1, inplace=True)
df_iba['RANK_ORIGINAL'] = iba_rank['ORIGINAL']
df_iba['RANK_SMOTE'] = iba_rank['SMOTE']
df_iba['RANK_SMOTE_SVM'] = iba_rank['SMOTE_SVM']
df_iba['RANK_BORDERLINE1'] = iba_rank['BORDERLINE1']
df_iba['RANK_BORDERLINE2'] = iba_rank['BORDERLINE2']
df_iba['RANK_GEOMETRIC_SMOTE'] = iba_rank['GEOMETRIC_SMOTE']
df_iba['RANK_DELAUNAY'] = iba_rank['DELAUNAY']
df_auc = df_auc.reset_index()
df_auc.drop('index', axis=1, inplace=True)
df_auc['RANK_ORIGINAL'] = auc_rank['ORIGINAL']
df_auc['RANK_SMOTE'] = auc_rank['SMOTE']
df_auc['RANK_SMOTE_SVM'] = auc_rank['SMOTE_SVM']
df_auc['RANK_BORDERLINE1'] = auc_rank['BORDERLINE1']
df_auc['RANK_BORDERLINE2'] = auc_rank['BORDERLINE2']
df_auc['RANK_GEOMETRIC_SMOTE'] = auc_rank['GEOMETRIC_SMOTE']
df_auc['RANK_DELAUNAY'] = auc_rank['DELAUNAY']
# avarege rank
media_pre_rank = pre_rank.mean(axis=0)
media_rec_rank = rec_rank.mean(axis=0)
media_spe_rank = spe_rank.mean(axis=0)
media_f1_rank = f1_rank.mean(axis=0)
media_geo_rank = geo_rank.mean(axis=0)
media_iba_rank = iba_rank.mean(axis=0)
media_auc_rank = auc_rank.mean(axis=0)
media_pre_rank_file = media_pre_rank.reset_index()
media_pre_rank_file = media_pre_rank_file.sort_values(by=0)
media_rec_rank_file = media_rec_rank.reset_index()
media_rec_rank_file = media_rec_rank_file.sort_values(by=0)
media_spe_rank_file = media_spe_rank.reset_index()
media_spe_rank_file = media_spe_rank_file.sort_values(by=0)
media_f1_rank_file = media_f1_rank.reset_index()
media_f1_rank_file = media_f1_rank_file.sort_values(by=0)
media_geo_rank_file = media_geo_rank.reset_index()
media_geo_rank_file = media_geo_rank_file.sort_values(by=0)
media_iba_rank_file = media_iba_rank.reset_index()
media_iba_rank_file = media_iba_rank_file.sort_values(by=0)
media_auc_rank_file = media_auc_rank.reset_index()
media_auc_rank_file = media_auc_rank_file.sort_values(by=0)
# Grava arquivos importantes
df_pre.to_csv(
wd + 'total_rank/' + reducao + '_total_rank_' + tipo + '_' + order + '_' + str(
alpha) + '_' + name + '_pre.csv',
index=False)
df_rec.to_csv(
wd + 'total_rank/' + reducao + '_total_rank_' + tipo + '_' + order + '_' + str(
alpha) + '_' + name + '_rec.csv',
index=False)
df_spe.to_csv(
wd + 'total_rank/' + reducao + '_total_rank_' + tipo + '_' + order + '_' + str(
alpha) + '_' + name + '_spe.csv',
index=False)
df_f1.to_csv(wd + 'total_rank/' + reducao + '_total_rank_' + tipo + '_' + order + '_' + str(
alpha) + '_' + name + '_f1.csv',
index=False)
df_geo.to_csv(
wd + 'total_rank/' + reducao + '_total_rank_' + tipo + '_' + order + '_' + str(
alpha) + '_' + name + '_geo.csv',
index=False)
df_iba.to_csv(
wd + 'total_rank/' + reducao + '_total_rank_' + tipo + '_' + order + '_' + str(
alpha) + '_' + name + '_iba.csv',
index=False)
df_auc.to_csv(
wd + 'total_rank/' + reducao + '_total_rank_' + tipo + '_' + order + '_' + str(
alpha) + '_' + name + '_auc.csv',
index=False)
media_pre_rank_file.to_csv(
wd + 'media_rank/' + reducao + '_media_rank_' + tipo + '_' + order + '_' + str(
alpha) + '_' + name + '_pre.csv',
index=False)
media_rec_rank_file.to_csv(
wd + 'media_rank/' + reducao + '_media_rank_' + tipo + '_' + order + '_' + str(
alpha) + '_' + name + '_rec.csv',
index=False)
media_spe_rank_file.to_csv(
wd + 'media_rank/' + reducao + '_media_rank_' + tipo + '_' + order + '_' + str(
alpha) + '_' + name + '_spe.csv',
index=False)
media_f1_rank_file.to_csv(
wd + 'media_rank/' + reducao + '_media_rank_' + tipo + '_' + order + '_' + str(
alpha) + '_' + name + '_f1.csv',
index=False)
media_geo_rank_file.to_csv(
wd + 'media_rank/' + reducao + '_media_rank_' + tipo + '_' + order + '_' + str(
alpha) + '_' + name + '_geo.csv',
index=False)
media_iba_rank_file.to_csv(
wd + 'media_rank/' + reducao + '_media_rank_' + tipo + '_' + order + '_' + str(
alpha) + '_' + name + '_iba.csv',
index=False)
media_auc_rank_file.to_csv(
wd + 'media_rank/' + reducao + '_media_rank_' + tipo + '_' + order + '_' + str(
alpha) + '_' + name + '_auc.csv',
index=False)
delaunay_type = order + '_' + str(alpha)
# grafico CD
identificadores = ['ORIGINAL', 'SMOTE', 'SMOTE_SVM', 'BORDERLINE1', 'BORDERLINE2', 'GEOMETRIC_SMOTE',
delaunay_type]
avranks = list(media_pre_rank)
cd = Orange.evaluation.compute_CD(avranks, len(dataset_list_bi))
Orange.evaluation.graph_ranks(avranks, identificadores, cd=cd, width=9, textspace=3)
plt.savefig(
wd + 'figurasCD/' + 'cd_' + reducao + '_' + tipo + '_' + delaunay_type + '_' + name + '_pre.pdf')
plt.close()
avranks = list(media_rec_rank)
cd = Orange.evaluation.compute_CD(avranks, len(dataset_list_bi))
Orange.evaluation.graph_ranks(avranks, identificadores, cd=cd, width=9, textspace=3)
plt.savefig(
wd + 'figurasCD/' + 'cd_' + reducao + '_' + tipo + '_' + delaunay_type + '_' + name + '_rec.pdf')
plt.close()
avranks = list(media_spe_rank)
cd = Orange.evaluation.compute_CD(avranks, len(dataset_list_bi))
Orange.evaluation.graph_ranks(avranks, identificadores, cd=cd, width=9, textspace=3)
plt.savefig(
wd + 'figurasCD/' + 'cd_' + reducao + '_' + tipo + '_' + delaunay_type + '_' + name + '_spe.pdf')
plt.close()
avranks = list(media_f1_rank)
cd = Orange.evaluation.compute_CD(avranks, len(dataset_list_bi))
Orange.evaluation.graph_ranks(avranks, identificadores, cd=cd, width=9, textspace=3)
plt.savefig(wd + 'figurasCD/' + 'cd_' + reducao + '_' + tipo + '_' + delaunay_type + '_' + name + '_f1.pdf')
plt.close()
avranks = list(media_geo_rank)
cd = Orange.evaluation.compute_CD(avranks, len(dataset_list_bi))
Orange.evaluation.graph_ranks(avranks, identificadores, cd=cd, width=9, textspace=3)
plt.savefig(
wd + 'figurasCD/' + 'cd_' + reducao + '_' + tipo + '_' + delaunay_type + '_' + name + '_geo.pdf')
plt.close()
avranks = list(media_iba_rank)
cd = Orange.evaluation.compute_CD(avranks, len(dataset_list_bi))
Orange.evaluation.graph_ranks(avranks, identificadores, cd=cd, width=9, textspace=3)
plt.savefig(
wd + 'figurasCD/' + 'cd_' + reducao + '_' + tipo + '_' + delaunay_type + '_' + name + '_iba.pdf')
plt.close()
'''avranks = list(media_auc_rank)
cd = Orange.evaluation.compute_CD(avranks, len(dataset_list_bi))
Orange.evaluation.graph_ranks(avranks, identificadores, cd=cd, width=9, textspace=3)
plt.savefig(
wd + 'figurasCD/' + 'cd_' + reducao + '_' + tipo + '_' + delaunay_type + '_' + name + '_auc.pdf')
plt.close()'''
print('Delaunay Type= ', delaunay_type)
print('Algorithm= ', name)
def rank_total_by_algorithm(self, tipo, wd, reducao, order, alpha):
delaunay_name = 'RANK_DTO_' + str(order) + '_' + str(alpha)
cols = ['ALGORITHM', 'RANK_ORIGINAL', 'RANK_SMOTE', 'RANK_SMOTE_SVM', 'RANK_BORDERLINE1',
'RANK_BORDERLINE2', 'RANK_GEOMETRIC_SMOTE', 'RANK_DELAUNAY']
for name in classifiers_list:
print(os.path.abspath(os.getcwd()))
# Grava arquivos importantes
path_name = wd + reducao + '_total_rank_' + tipo + '_' + order + '_' + str(alpha) + '_' + name + '_pre.csv'
df_pre = pd.read_csv(path_name)
path_name = wd + reducao + '_total_rank_' + tipo + '_' + order + '_' + str(alpha) + '_' + name + '_rec.csv'
df_rec = pd.read_csv(path_name)
path_name = wd + reducao + '_total_rank_' + tipo + '_' + order + '_' + str(alpha) + '_' + name + '_spe.csv'
df_spe = pd.read_csv(path_name)
path_name = wd + reducao + '_total_rank_' + tipo + '_' + order + '_' + str(alpha) + '_' + name + '_f1.csv'
df_f1 = pd.read_csv(path_name)
path_name = wd + reducao + '_total_rank_' + tipo + '_' + order + '_' + str(alpha) + '_' + name + '_geo.csv'
df_geo = pd.read_csv(path_name)
path_name = wd + reducao + '_total_rank_' + tipo + '_' + order + '_' + str(alpha) + '_' + name + '_iba.csv'
df_iba = pd.read_csv(path_name)
path_name = wd + reducao + '_total_rank_' + tipo + '_' + order + '_' + str(alpha) + '_' + name + '_auc.csv'
df_auc = pd.read_csv(path_name)
# PRE
df_pre_col = df_pre[cols]
df_pre_col.loc[:, delaunay_name] = df_pre_col['RANK_DELAUNAY'].values
df_pre_col.drop(['RANK_DELAUNAY'], axis=1, inplace=True)
ranking_pre = df_pre_col.groupby(['ALGORITHM']).mean()
path_name = wd + reducao + '_rank_by_algorithm_' + tipo + '_' + order + '_' + str(
alpha) + '_' + name + '_pre.csv'
ranking_pre['ALGORITHM'] = name
ranking_pre.to_csv(path_name, index=False)
# REC
df_rec_col = df_rec[cols]
df_rec_col.loc[:, delaunay_name] = df_rec_col['RANK_DELAUNAY'].values
df_rec_col.drop(['RANK_DELAUNAY'], axis=1, inplace=True)
ranking_rec = df_rec_col.groupby(['ALGORITHM']).mean()
path_name = wd + reducao + '_rank_by_algorithm_' + tipo + '_' + order + '_' + str(
alpha) + '_' + name + '_rec.csv'
ranking_rec['ALGORITHM'] = name
ranking_rec.to_csv(path_name, index=False)
# SPE
df_spe_col = df_spe[cols]
df_spe_col.loc[:, delaunay_name] = df_spe_col['RANK_DELAUNAY'].values
df_spe_col.drop(['RANK_DELAUNAY'], axis=1, inplace=True)
ranking_spe = df_spe_col.groupby(['ALGORITHM']).mean()
path_name = wd + reducao + '_rank_by_algorithm_' + tipo + '_' + order + '_' + str(
alpha) + '_' + name + '_spe.csv'
ranking_spe['ALGORITHM'] = name
ranking_spe.to_csv(path_name, index=False)
# F1
df_f1_col = df_f1[cols]
df_f1_col.loc[:, delaunay_name] = df_f1_col['RANK_DELAUNAY'].values
df_f1_col.drop(['RANK_DELAUNAY'], axis=1, inplace=True)
ranking_f1 = df_f1_col.groupby(['ALGORITHM']).mean()
path_name = wd + reducao + '_rank_by_algorithm_' + tipo + '_' + order + '_' + str(
alpha) + '_' + name + '_f1.csv'
ranking_f1['ALGORITHM'] = name
ranking_f1.to_csv(path_name, index=False)
# GEO
df_geo_col = df_geo[cols]
df_geo_col.loc[:, delaunay_name] = df_geo_col['RANK_DELAUNAY'].values
df_geo_col.drop(['RANK_DELAUNAY'], axis=1, inplace=True)
ranking_geo = df_geo_col.groupby(['ALGORITHM']).mean()
path_name = wd + reducao + '_rank_by_algorithm_' + tipo + '_' + order + '_' + str(
alpha) + '_' + name + '_geo.csv'
ranking_geo['ALGORITHM'] = name
ranking_geo.to_csv(path_name, index=False)
# IBA
df_iba_col = df_iba[cols]
df_iba_col.loc[:, delaunay_name] = df_iba_col['RANK_DELAUNAY'].values
df_iba_col.drop(['RANK_DELAUNAY'], axis=1, inplace=True)
ranking_iba = df_iba_col.groupby(['ALGORITHM']).mean()
path_name = wd + reducao + '_rank_by_algorithm_' + tipo + '_' + order + '_' + str(
alpha) + '_' + name + '_iba.csv'
ranking_iba['ALGORITHM'] = name
ranking_iba.to_csv(path_name, index=False)
# AUC
df_auc_col = df_auc[cols]
df_auc_col.loc[:, delaunay_name] = df_auc_col['RANK_DELAUNAY'].values
df_auc_col.drop(['RANK_DELAUNAY'], axis=1, inplace=True)
ranking_auc = df_auc_col.groupby(['ALGORITHM']).mean()
path_name = wd + reducao + '_rank_by_algorithm_' + tipo + '_' + order + '_' + str(
alpha) + '_' + name + '_auc.csv'
ranking_auc['ALGORITHM'] = name
ranking_auc.to_csv(path_name, index=False)
def rank_by_algorithm_dataset(self, filename):
df = pd.read_csv(filename)
df_temp = df.groupby(by=['ALGORITHM'])
for name, group in df_temp:
group = group.reset_index()
group.drop('index', axis=1, inplace=True)
df_temp1 = group.groupby(by=['DATASET'])
for name1, group1 in df_temp1:
group1 = group1.reset_index()
group1.drop('index', axis=1, inplace=True)
group1['rank_f1'] = group1['F1'].rank(ascending=False)
group1['rank_geo'] = group1['GEO'].rank(ascending=False)
group1['rank_iba'] = group1['IBA'].rank(ascending=False)
group1['rank_auc'] = group1['AUC'].rank(ascending=False)
group1.to_csv('./../output_dir/rank/rank_algorithm_dataset_' + name + '_' + name1 + '.csv', index=False)
def rank_by_algorithm_dataset_only_dto(self, filename):
df = | pd.read_csv(filename) | pandas.read_csv |
#
# Copyright (c) 2015 - 2022, Intel Corporation
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY LOG OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""
GEOPM IO - Helper module for parsing/processing report and trace files.
"""
from __future__ import absolute_import
from __future__ import division
from builtins import str
from collections import OrderedDict
import os
import json
import re
import pandas
import numpy
import glob
import sys
import subprocess
import psutil
import copy
import yaml
import io
import hashlib
from distutils.spawn import find_executable
from natsort import natsorted
from . import __version__
from . import update_report
try:
_, os.environ['COLUMNS'] = subprocess.check_output(['stty', 'size']).decode().split()
except subprocess.CalledProcessError:
os.environ['COLUMNS'] = "200"
pandas.set_option('display.width', int(os.environ['COLUMNS']))
pandas.set_option('display.max_colwidth', 80)
pandas.set_option('max_columns', 100)
class AppOutput(object):
"""The container class for all trace related data.
This class holds the relevant objects for parsing and indexing all
data that is output from GEOPM. This object can be created with a
a trace glob string that will be used
to search dir_name for the relevant files. If files are found
their data will be parsed into objects for easy data access.
Additionally a Pandas DataFrame is constructed containing all of
all of the
trace data. These DataFrames are indexed based on the version of
GEOPM found in the files, the profile name, agent name, and the number
of times that particular configuration has been seen by the parser
(i.e. experiment iteration).
Attributes:
trace_glob: The string pattern to use to search for trace files.
dir_name: The directory path to use when searching for files.
verbose: A bool to control whether verbose output is printed to stdout.
"""
def __init__(self, traces=None, dir_name='.', verbose=False, do_cache=True):
self._traces = {}
self._traces_df = pandas.DataFrame()
self._all_paths = []
self._index_tracker = IndexTracker()
self._node_names = None
self._region_names = None
if traces:
if type(traces) is list:
trace_paths = [os.path.join(dir_name, path) for path in traces]
else:
trace_glob = os.path.join(dir_name, traces)
try:
trace_paths = glob.glob(trace_glob)
except TypeError:
raise TypeError('<geopm> geopmpy.io: AppOutput: traces must be a list of paths or a glob pattern')
trace_paths = natsorted(trace_paths)
if len(trace_paths) == 0:
raise RuntimeError('<geopm> geopmpy.io: No trace files found with pattern {}.'.format(trace_glob))
self._all_paths.extend(trace_paths)
self._index_tracker.reset()
if do_cache:
# unique cache name based on trace files in this list
paths_str = str(trace_paths)
try:
h5_id = hashlib.shake_256(paths_str.encode()).hexdigest(14)
except AttributeError:
h5_id = hash(paths_str)
trace_h5_name = 'trace_{}.h5'.format(h5_id)
self._all_paths.append(trace_h5_name)
# check if cache is older than traces
if os.path.exists(trace_h5_name):
cache_mod_time = os.path.getmtime(trace_h5_name)
regen_cache = False
for trace_file in trace_paths:
mod_time = os.path.getmtime(trace_file)
if mod_time > cache_mod_time:
regen_cache = True
if regen_cache:
os.remove(trace_h5_name)
try:
self._traces_df = pandas.read_hdf(trace_h5_name, 'trace')
if verbose:
sys.stdout.write('Loaded traces from {}.\n'.format(trace_h5_name))
except IOError as err:
sys.stderr.write('Warning: <geopm> geopmpy.io: Trace HDF5 file not detected or older than traces. Data will be saved to {}.\n'
.format(trace_h5_name))
self.parse_traces(trace_paths, verbose)
# Cache traces dataframe
try:
if verbose:
sys.stdout.write('Generating HDF5 files... ')
self._traces_df.to_hdf(trace_h5_name, 'trace')
except ImportError as error:
sys.stderr.write('Warning: <geopm> geopmpy.io: Unable to write HDF5 file: {}\n'.format(str(error)))
if verbose:
sys.stdout.write('Done.\n')
sys.stdout.flush()
else:
self.parse_traces(trace_paths, verbose)
def parse_traces(self, trace_paths, verbose):
traces_df_list = []
fileno = 1
filesize = 0
for tp in trace_paths: # Get size of all trace files
filesize += os.stat(tp).st_size
# Abort if traces are too large
avail_mem = psutil.virtual_memory().available
if filesize > avail_mem // 2:
sys.stderr.write('Warning: <geopm> geopmpy.io: Total size of traces is greater than 50% of available memory. Parsing traces will be skipped.\n')
return
filesize = '{}MiB'.format(filesize // 1024 // 1024)
for tp in trace_paths:
if verbose:
sys.stdout.write('\rParsing trace file {} of {} ({})... '.format(fileno, len(trace_paths), filesize))
sys.stdout.flush()
fileno += 1
tt = Trace(tp)
self.add_trace_df(tt, traces_df_list) # Handles multiple traces per node
if verbose:
sys.stdout.write('Done.\n')
sys.stdout.flush()
if verbose:
sys.stdout.write('Creating combined traces DF... ')
sys.stdout.flush()
self._traces_df = pandas.concat(traces_df_list)
self._traces_df = self._traces_df.sort_index(ascending=True)
if verbose:
sys.stdout.write('Done.\n')
sys.stdout.flush()
def remove_files(self):
"""Deletes all files currently tracked by this object."""
for ff in self._all_paths:
try:
os.remove(ff)
except OSError:
pass
def add_trace_df(self, tt, traces_df_list):
"""Adds a trace DataFrame to the tracking list.
The report tracking list is used to create the combined
DataFrame once all reports are parsed.
Args:
tt: The Trace object used to extract the Trace DataFrame.
This DataFrame will be indexed and added to the
tracking list.
"""
tdf = tt.get_df() # TODO: this needs numeric cols optimization
tdf = tdf.set_index(self._index_tracker.get_multiindex(tt))
traces_df_list.append(tdf)
def get_trace_data(self, node_name=None):
idx = pandas.IndexSlice
df = self._traces_df
if node_name is not None:
df = df.loc[idx[:, :, :, :, node_name, :, :], ]
return df
def get_trace_df(self):
"""Getter for the combined DataFrame of all trace files parsed.
This DataFrame contains all data parsed, and has a complex
MultiIndex for accessing the unique data from each individual
trace. For more information on this index, see the
IndexTracker docstring.
Returns:
pandas.DataFrame: Contains all parsed data.
"""
return self._traces_df
class IndexTracker(object):
"""Tracks and uniquely identifies experiment configurations for
DataFrame indexing.
This object's purpose is to examine parsed data for reports or
traces and determine if a particular experiment configuration has
already been tracked. A user may run the same configuration
repeatedly in order to prove that results are repeatable and are
not outliers. Since the same configuration is used many times, it
must be tracked and counted to ensure that the unique data for
each run can be extracted later.
The parsed data is used to extract the following fields to build
the tracking index tuple:
(<GEOPM_VERSION>, <PROFILE_NAME>, <AGENT_NAME>, <NODE_NAME>)
If the tuple not contained in the _run_outputs dict, it is
inserted with a value of 1. The value is incremented if the tuple
is currently in the _run_outputs dict. This value is used to
uniquely identify a particular set of parsed data when the
MultiIndex is created.
"""
def __init__(self):
self._run_outputs = {}
def _check_increment(self, run_output):
"""Extracts the index tuple from the parsed data and tracks it.
Checks to see if the current run_output has been seen before.
If so, the count is incremented. Otherwise it is stored as 1.
Args:
run_output: The Trace object to be tracked.
"""
index = (run_output.get_version(), run_output.get_start_time(),
os.path.basename(run_output.get_profile_name()),
run_output.get_agent(), run_output.get_node_name())
if index not in self._run_outputs:
self._run_outputs[index] = 1
else:
self._run_outputs[index] += 1
def _get_base_index(self, run_output):
"""Constructs the actual index tuple to be used to construct a
uniquely-identifying MultiIndex for this data.
Takes a run_output as input, and returns the unique tuple to
identify this run_output in the DataFrame. Note that this
method appends the current experiment iteration to the end of
the returned tuple. E.g.:
>>> self._index_tracker.get_base_index(rr)
('0.1.1+dev365gfcda929', 'geopm_test_integration', 170,
'static_policy', 'power_balancing', 'mr-fusion2', 1)
Args:
run_output: The Trace object to produce an index tuple for.
Returns:
Tuple: This will contain all of the index fields needed to uniquely identify this data (including the
count of how many times this experiment has been seen.
"""
key = (run_output.get_version(), run_output.get_start_time(),
os.path.basename(run_output.get_profile_name()),
run_output.get_agent(), run_output.get_node_name())
return key + (self._run_outputs[key], )
def get_multiindex(self, run_output):
"""Returns a MultiIndex from this run_output. Used in DataFrame construction.
This will add the current run_output to the list of tracked
data, and return a unique muiltiindex tuple to identify this
data in a DataFrame.
For Trace objects, the integer index of the DataFrame is
appended to the tuple.
Args:
run_output: The Trace object to produce an index
tuple for.
Returns:
pandas.MultiIndex: The unique index to identify this data object.
"""
self._check_increment(run_output)
itl = []
index_names = ['version', 'start_time', 'name', 'agent', 'node_name', 'iteration']
# Trace file index
index_names.append('index')
for ii in range(len(run_output.get_df())): # Append the integer index to the DataFrame index
itl.append(self._get_base_index(run_output) + (ii, ))
mi = pandas.MultiIndex.from_tuples(itl, names=index_names)
return mi
def reset(self):
"""Clears the internal tracking dictionary.
Since only one type of data (reports OR traces) can be tracked
at once, this is necessary to reset the object's state so a
new type of data can be tracked.
"""
self._run_outputs = {}
class Trace(object):
"""Creates a pandas DataFrame comprised of the trace file data.
This object will parse both the header and the CSV data in a trace
file. The header identifies the uniquely-identifying configuration
for this file which is used for later indexing purposes.
Even though __getattr__() and __getitem__() allow this object to
effectively be treated like a DataFrame, you must use get_df() if
you're building a list of DataFrames to pass to pandas.concat().
Using the raw object in a list and calling concat will cause an
error.
Attributes:
trace_path: The path to the trace file to parse.
"""
def __init__(self, trace_path, use_agent=True):
self._path = trace_path
old_headers = {'time': 'TIME',
'epoch_count': 'EPOCH_COUNT',
'region_hash': 'REGION_HASH',
'region_hint': 'REGION_HINT',
'region_progress': 'REGION_PROGRESS',
'region_count': 'REGION_COUNT',
'region_runtime': 'REGION_RUNTIME',
'energy_package': 'ENERGY_PACKAGE',
'energy_dram': 'ENERGY_DRAM',
'power_package': 'POWER_PACKAGE',
'power_dram': 'POWER_DRAM',
'frequency': 'FREQUENCY',
'cycles_thread': 'CYCLES_THREAD',
'cycles_reference': 'CYCLES_REFERENCE',
'temperature_core': 'TEMPERATURE_CORE'}
old_balancer_headers = {'policy_power_cap': 'POLICY_POWER_CAP',
'policy_step_count': 'POLICY_STEP_COUNT',
'policy_max_epoch_runtime': 'POLICY_MAX_EPOCH_RUNTIME',
'policy_power_slack': 'POLICY_POWER_SLACK',
'epoch_runtime': 'EPOCH_RUNTIME',
'power_limit': 'POWER_LIMIT',
'enforced_power_limit': 'ENFORCED_POWER_LIMIT'}
old_headers.update(old_balancer_headers)
old_governor_headers = {'power_budget': 'POWER_BUDGET'}
old_headers.update(old_governor_headers)
# Need to determine how many lines are in the header
# explicitly. We cannot use '#' as a comment character since
# it occurs in raw MSR signal names.
skiprows = 0
with open(trace_path) as fid:
for ll in fid:
if ll.startswith('#'):
skiprows += 1
else:
break
column_headers = pandas.read_csv(trace_path, sep='|', skiprows=skiprows, nrows=0, encoding='utf-8').columns.tolist()
original_headers = copy.deepcopy(column_headers)
column_headers = [old_headers.get(ii, ii) for ii in column_headers]
if column_headers != original_headers:
sys.stderr.write('Warning: <geopm> geopmpy.io: Old trace file format detected. Old column headers will be forced ' \
'to UPPERCASE.\n')
# region_hash and region_hint must be a string for pretty printing pandas DataFrames
# You can force them to int64 by setting up a converter function then passing the hex string through it
# with the read_csv call, but the number will be displayed as an integer from then on. You'd have to convert
# it back to a hex string to compare it with the data in the reports.
self._df = pandas.read_csv(trace_path, sep='|', skiprows=skiprows, header=0, names=column_headers, encoding='utf-8',
dtype={'REGION_HASH': 'unicode', 'REGION_HINT': 'unicode'})
self._df.columns = list(map(str.strip, self._df[:0])) # Strip whitespace from column names
self._df['REGION_HASH'] = self._df['REGION_HASH'].astype('unicode').map(str.strip) # Strip whitespace from region hashes
self._df['REGION_HINT'] = self._df['REGION_HINT'].astype('unicode').map(str.strip) # Strip whitespace from region hints
self._version = None
self._start_time = None
self._profile_name = None
self._agent = None
self._node_name = None
self._use_agent = use_agent
self._parse_header(trace_path)
def __repr__(self):
return self._df.__repr__()
def __str__(self):
return self.__repr__()
def __getattr__(self, attr):
"""Pass through attribute requests to the underlying DataFrame.
This allows for Trace objects to be treated like DataFrames
for analysis. You can do things like:
>>> tt = geopmpy.io.Trace('170-4-balanced-minife-trace-mr-fusion5')
>>> tt.keys()
Index([u'region_hash', u'region_hint', u'seconds', u'pkg_energy-0', u'dram_energy-0',...
"""
return getattr(self._df, attr)
def __getitem__(self, key):
"""Pass through item requests to the underlying DataFrame.
This allows standard DataFrame slicing operations to take place.
@todo, update
>>> tt[['region_hash', 'region_hint', 'time', 'energy_package', 'energy_dram']][:5]
region_hash region_hint time energy_package-0 energy_dram-0
0 2305843009213693952 0.662906 106012.363770 25631.015519
1 2305843009213693952 0.667854 106012.873718 25631.045777
2 2305843009213693952 0.672882 106013.411621 25631.075807
3 2305843009213693952 0.677869 106013.998108 25631.105882
4 2305843009213693952 0.682849 106014.621704 25631.136186
"""
return self._df.__getitem__(key)
def _parse_header(self, trace_path):
"""Parses the configuration header out of the top of the trace file.
Args:
trace_path: The path to the trace file to parse.
"""
done = False
out = []
with open(trace_path) as fid:
while not done:
ll = fid.readline()
if ll.startswith('#'):
out.append(ll[1:])
else:
done = True
try:
yaml_fd = io.StringIO(u''.join(out))
dd = yaml.load(yaml_fd, Loader=yaml.SafeLoader)
except yaml.parser.ParserError:
out.insert(0, '{')
out.append('}')
json_str = ''.join(out)
dd = json.loads(json_str)
try:
self._version = dd['geopm_version']
self._start_time = dd['start_time']
self._profile_name = dd['profile_name']
if self._use_agent:
self._agent = dd['agent']
self._node_name = dd['node_name']
except KeyError:
raise SyntaxError('<geopm> geopmpy.io: Trace file header could not be parsed!')
def get_df(self):
return self._df
def get_version(self):
return self._version
def get_start_time(self):
return self._start_time
def get_profile_name(self):
return self._profile_name
def get_agent(self):
return self._agent
def get_node_name(self):
return self._node_name
@staticmethod
def diff_df(trace_df, column_regex, epoch=True):
"""Diff the DataFrame.
Since the counters in the trace files are monotonically
increasing, a diff must be performed to extract the useful
data.
Args:
trace_df: The MultiIndexed DataFrame created by the
AppOutput class.
column_regex: A string representing the regex search
pattern for the column names to diff.
epoch: A flag to set whether or not to focus solely on
epoch regions.
Returns:
pandas.DataFrame: With the diffed columns specified by
'column_regex', and an 'elapsed_time'
column.
Todo:
* Should I drop everything before the first epoch if
'epoch' is false?
"""
# drop_duplicates() is a workaround for #662. Duplicate data
# rows are showing up in the trace for unmarked.
tmp_df = trace_df.drop_duplicates()
filtered_df = tmp_df.filter(regex=column_regex).copy()
filtered_df['elapsed_time'] = tmp_df['time']
if epoch:
filtered_df['epoch_count'] = tmp_df['epoch_count']
filtered_df = filtered_df.diff()
# The following drops all 0's and the negative sample when traversing between 2 trace files.
# If the epoch_count column is included, this will also drop rows occuring mid-epoch.
filtered_df = filtered_df.loc[(filtered_df > 0).all(axis=1)]
# Reset 'index' to be 0 to the length of the unique trace files
traces_list = []
for (version, start_time, name, agent, node_name, iteration), df in \
filtered_df.groupby(level=['version', 'start_time', 'name', 'agent', 'node_name', 'iteration']):
df = df.reset_index(level='index')
df['index'] = pandas.Series(numpy.arange(len(df)), index=df.index)
df = df.set_index('index', append=True)
traces_list.append(df)
return pandas.concat(traces_list)
@staticmethod
def get_median_df(trace_df, column_regex, config):
"""Extract the median experiment iteration.
This logic calculates the sum of elapsed times for all of the
experiment iterations for all nodes in that iteration. It
then extracts the DataFrame for the iteration that is closest
to the median. For input DataFrames with a single iteration,
the single iteration is returned.
Args:
trace_df: The MultiIndexed DataFrame created by the
AppOutput class.
column_regex: A string representing the regex search
pattern for the column names to diff.
config: The TraceConfig object being used presently.
Returns:
pandas.DataFrame: Containing a single experiment iteration.
"""
diffed_trace_df = Trace.diff_df(trace_df, column_regex, config.epoch_only)
idx = pandas.IndexSlice
et_sums = diffed_trace_df.groupby(level=['iteration'])['elapsed_time'].sum()
median_index = (et_sums - et_sums.median()).abs().sort_values().index[0]
median_df = diffed_trace_df.loc[idx[:, :, :, :, :, median_index], ]
if config.verbose:
median_df_index = []
median_df_index.append(median_df.index.get_level_values('version').unique()[0])
median_df_index.append(median_df.index.get_level_values('start_time').unique()[0])
median_df_index.append(median_df.index.get_level_values('name').unique()[0])
median_df_index.append(median_df.index.get_level_values('agent').unique()[0])
median_df_index.append(median_df.index.get_level_values('iteration').unique()[0])
sys.stdout.write('Median DF index = ({})...\n'.format(' '.join(str(s) for s in median_df_index)))
sys.stdout.flush()
return median_df
class BenchConf(object):
"""The application configuration parameters.
Used to hold the config data for the integration test application.
This application allows for varying combinations of regions
(compute, IO, or network bound), complexity, desired execution
count, and amount of imbalance between nodes during execution.
Attributes:
path: The output path for this configuration file.
"""
def __init__(self, path):
self._path = path
self._loop_count = 1
self._region = []
self._big_o = []
self._hostname = []
self._imbalance = []
def __repr__(self):
template = """\
path : {path}
regions : {regions}
big-o : {big_o}
loop count: {loops}
hostnames : {hosts}
imbalance : {imbalance}
"""
return template.format(path=self._path,
regions=self._region,
big_o=self._big_o,
loops=self._loop_count,
hosts=self._hostname,
imbalance=self._imbalance)
def __str__(self):
return self.__repr__()
def set_loop_count(self, loop_count):
self._loop_count = loop_count
def append_region(self, name, big_o):
"""Appends a region to the internal list.
Args:
name: The string representation of the region.
big_o: The desired complexity of the region. This
affects compute, IO, or network complexity
depending on the type of region requested.
"""
self._region.append(name)
self._big_o.append(big_o)
def append_imbalance(self, hostname, imbalance):
"""Appends imbalance to the config for a particular node.
Args:
hostname: The name of the node.
imbalance: The amount of imbalance to apply to the node.
This is specified by a float in the range
[0,1]. For example, specifying a value of 0.25
means that this node will spend 25% more time
executing the work than a node would by
default. Nodes not specified with imbalance
configurations will perform normally.
"""
self._hostname.append(hostname)
self._imbalance.append(imbalance)
def get_path(self):
return self._path
def write(self):
"""Write the current config to a file."""
obj = {'loop-count': self._loop_count,
'region': self._region,
'big-o': self._big_o}
if (self._imbalance and self._hostname):
obj['imbalance'] = self._imbalance
obj['hostname'] = self._hostname
with open(self._path, 'w') as fid:
json.dump(obj, fid)
def get_exec_path(self):
# Using libtool causes sporadic issues with the Intel
# toolchain.
result = 'geopmbench'
path = find_executable(result)
source_dir = os.path.dirname(
os.path.dirname(
os.path.dirname(
os.path.realpath(__file__))))
source_bin = os.path.join(source_dir, '.libs', 'geopmbench')
if not path:
result = source_bin
else:
with open(path, 'rb') as fid:
buffer = fid.read(4096)
if b'Generated by libtool' in buffer:
result = source_bin
return result
def get_exec_args(self):
return [self._path]
class RawReport(object):
def __init__(self, path):
update_report.update_report(path)
# Fix issue with python yaml module where it is confused
# about floating point numbers of the form "1e+10" where
# the decimal point is missing.
# See PR: https://github.com/yaml/pyyaml/pull/174
# for upstream fix to pyyaml
loader = yaml.SafeLoader
loader.add_implicit_resolver(
u'tag:yaml.org,2002:float',
re.compile(r'''^(?:[-+]?(?:[0-9][0-9_]*)\.[0-9_]*(?:[eE][-+]?[0-9]+)?
|[-+]?(?:[0-9][0-9_]*)(?:[eE][-+]?[0-9]+)
|\.[0-9_]+(?:[eE][-+]?[0-9]+)?
|[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\.[0-9_]*
|[-+]?\.(?:inf|Inf|INF)
|\.(?:nan|NaN|NAN))$''', re.X),
list(u'-+0123456789.'))
with open(path) as fid:
self._raw_dict = yaml.load(fid, Loader=loader)
def raw_report(self):
return copy.deepcopy(self._raw_dict)
def dump_json(self, path):
jdata = json.dumps(self._raw_dict)
with open(path, 'w') as fid:
fid.write(jdata)
def meta_data(self):
result = dict()
all_keys = ['GEOPM Version',
'Start Time',
'Profile',
'Agent',
'Policy']
for kk in all_keys:
result[kk] = self._raw_dict[kk]
return result
def figure_of_merit(self):
result = None
try:
result = copy.deepcopy(self._raw_dict['Figure of Merit'])
except:
pass
return result
def total_runtime(self):
result = None
try:
result = copy.deepcopy(self._raw_dict['Total Runtime'])
except:
pass
return result
def host_names(self):
return list(self._raw_dict['Hosts'].keys())
def region_names(self, host_name):
return [rr['region'] for rr in self._raw_dict['Hosts'][host_name]['Regions']]
def raw_region(self, host_name, region_name):
result = None
for rr in self._raw_dict['Hosts'][host_name]['Regions']:
if rr['region'] == region_name:
result = copy.deepcopy(rr)
if not result:
raise RuntimeError('region name: {} not found'.format(region_name))
return result
def raw_unmarked(self, host_name):
host_data = self._raw_dict["Hosts"][host_name]
key = 'Unmarked Totals'
return copy.deepcopy(host_data[key])
def raw_epoch(self, host_name):
host_data = self._raw_dict["Hosts"][host_name]
key = 'Epoch Totals'
return copy.deepcopy(host_data[key])
def raw_totals(self, host_name):
host_data = self._raw_dict["Hosts"][host_name]
key = 'Application Totals'
return copy.deepcopy(host_data[key])
def agent_host_additions(self, host_name):
# other keys that are not region, epoch, or app
# total i.e. from Agent::report_host()
host_data = self._raw_dict[host_name]
result = {}
for key, val in host_data.items():
if key not in ['Epoch Totals', 'Application Totals'] and not key.startswith('Region '):
result[key] = copy.deepcopy(val)
return result
def get_field(self, raw_data, key, units=''):
matches = [(len(kk), kk) for kk in raw_data if key in kk and units in kk]
if len(matches) == 0:
raise KeyError('<geopm> geopmpy.io: Field not found: {}'.format(key))
match = sorted(matches)[0][1]
return copy.deepcopy(raw_data[match])
class RawReportCollection(object):
'''
Used to group together a collection of related RawReports.
'''
def __init__(self, report_paths, dir_name='.', dir_cache=None, verbose=True, do_cache=True):
self._reports_df = pandas.DataFrame()
self._app_reports_df = pandas.DataFrame()
self._epoch_reports_df = pandas.DataFrame()
self._meta_data = None
self.load_reports(report_paths, dir_name, dir_cache, verbose, do_cache)
@staticmethod
def make_h5_name(paths, outdir):
paths_str = str([os.path.realpath(rr) for rr in paths])
h5_id = hashlib.sha256(paths_str.encode()).hexdigest()[:14]
report_h5_name = os.path.join(outdir, 'cache_{}.h5'.format(h5_id))
return report_h5_name
@staticmethod
def fixup_metadata(metadata, df):
# This block works around having mixed datatypes in the Profile column by
# forcing the column into the NumPy S type.
for key, val in metadata.items():
if type(val) is not dict: # Policy is a dict and should be excluded
df[key] = df[key].astype('S')
return df
def load_reports(self, reports, dir_name, dir_cache, verbose, do_cache):
'''
TODO: copied from AppOutput. refactor to shared function.
- removed concept of tracked files to be deleted
- added separate epoch dataframe
'''
if type(reports) is list:
report_paths = [os.path.join(dir_name, path) for path in reports]
else:
report_glob = os.path.join(dir_name, reports)
try:
report_paths = glob.glob(report_glob)
except TypeError:
raise TypeError('<geopm> geopmpy.io: AppOutput: reports must be a list of paths or a glob pattern')
report_paths = natsorted(report_paths)
if len(report_paths) == 0:
raise RuntimeError('<geopm> geopmpy.io: No report files found with pattern {}.'.format(report_glob))
if do_cache:
if dir_cache is None:
dir_cache = dir_name
self._report_h5_name = RawReportCollection.make_h5_name(report_paths, dir_cache)
# check if cache is older than reports
if os.path.exists(self._report_h5_name):
cache_mod_time = os.path.getmtime(self._report_h5_name)
regen_cache = False
for report_file in report_paths:
mod_time = os.path.getmtime(report_file)
if mod_time > cache_mod_time:
regen_cache = True
if regen_cache:
os.remove(self._report_h5_name)
try:
if verbose:
sys.stdout.write('Attempting to read {}...\n'.format(self._report_h5_name))
# load dataframes from cache
self._reports_df = pandas.read_hdf(self._report_h5_name, 'report')
self._app_reports_df = pandas.read_hdf(self._report_h5_name, 'app_report')
# temporary workaround since old format cache is missing unmarked_data
try:
self._unmarked_reports_df = pandas.read_hdf(self._report_h5_name, 'unmarked_report')
except:
self._unmarked_reports_df = self._reports_df.loc[self._reports_df['region'] == 'unmarked-region']
self._epoch_reports_df = pandas.read_hdf(self._report_h5_name, 'epoch_report')
if verbose:
sys.stdout.write('Loaded report data from {}.\n'.format(self._report_h5_name))
except IOError:
sys.stderr.write('Warning: <geopm> geopmpy.io: Report HDF5 file not detected or older than reports. Data will be saved to {}.\n'
.format(self._report_h5_name))
self.parse_reports(report_paths, verbose)
# Cache report dataframe
cache_created = False
while not cache_created:
try:
if verbose:
sys.stdout.write('Generating HDF5 files... ')
self._reports_df.to_hdf(self._report_h5_name, 'report', format='table')
self._app_reports_df.to_hdf(self._report_h5_name, 'app_report', format='table', append=True)
self._unmarked_reports_df.to_hdf(self._report_h5_name, 'unmarked_report', format='table', append=True)
self._epoch_reports_df.to_hdf(self._report_h5_name, 'epoch_report', format='table', append=True)
cache_created = True
except TypeError as error:
fm = RawReportCollection.fixup_metadata
if verbose:
sys.stdout.write('Applying workaround for strings in HDF5 files... ')
self._reports_df = fm(self._meta_data, self._reports_df)
self._app_reports_df = fm(self._meta_data, self._app_reports_df)
self._unmarked_reports_df = fm(self._meta_data, self._unmarked_reports_df)
self._epoch_reports_df = fm(self._meta_data, self._epoch_reports_df)
except ImportError as error:
sys.stderr.write('Warning: <geopm> geopmpy.io: Unable to write HDF5 file: {}\n'.format(str(error)))
break
if verbose:
sys.stdout.write('Done.\n')
sys.stdout.flush()
except:
raise RuntimeError('<geopm> geopmpy.io: {} could not be read. Try removing and regenerating the cache file.'.format(self._report_h5_name))
else:
self.parse_reports(report_paths, verbose)
def parse_reports(self, report_paths, verbose):
# Note: overlapping key names can break this
# Insert repeated data for non-leaf levels
# TODO: iteration - for now, distinguishable from start time
def _init_tables():
self._columns_order = {}
self._columns_set = {}
for name in ['region', 'unmarked', 'epoch', 'app']:
self._columns_order[name] = []
self._columns_set[name] = set()
def _add_column(table_name, col_name):
'''
Used to add columns to the data frame in the order they appear in the report.
'''
if table_name not in ['region', 'unmarked', 'epoch', 'app', 'all']:
raise RuntimeError('Invalid table name')
if table_name == 'all':
_add_column('region', col_name)
_add_column('unmarked', col_name)
_add_column('epoch', col_name)
_add_column('app', col_name)
elif col_name not in self._columns_set[table_name]:
self._columns_set[table_name].add(col_name)
self._columns_order[table_name].append(col_name)
def _try_float(val):
'''
Attempt to convert values we assume are floats
'''
rv = val
try:
rv = float(val)
except:
pass
return rv
_init_tables()
region_df_list = []
unmarked_df_list = []
epoch_df_list = []
app_df_list = []
for report in report_paths:
if verbose:
sys.stdout.write("Loading data from {}.\n".format(report))
rr = RawReport(report)
self._meta_data = rr.meta_data()
header = {}
# report header
for top_key, top_val in self._meta_data.items():
# allow one level of dict nesting in header for policy
if type(top_val) is dict:
for in_key, in_val in top_val.items():
_add_column('all', in_key)
header[in_key] = _try_float(in_val)
else:
_add_column('all', top_key)
header[top_key] = str(top_val)
_add_column('all', 'host')
figure_of_merit = rr.figure_of_merit()
if figure_of_merit is not None:
_add_column('app', 'FOM')
total_runtime = rr.total_runtime()
if total_runtime is not None:
_add_column('app', 'total_runtime')
host_names = rr.host_names()
for host in host_names:
# data about host to be repeated over all rows
per_host_data = {'host': host}
# TODO: other host data may also contain dict
# TODO: leave out for now
#other_host_data = rr.agent_host_additions(host)
_add_column('region', 'region')
region_names = rr.region_names(host)
for region in region_names:
row = copy.deepcopy(header)
row.update(per_host_data)
#row['region'] = row.pop('name')
# TODO: region hash
region_data = rr.raw_region(host, region)
for key, val in region_data.items():
region_data[key] = _try_float(val)
row.update(region_data)
for cc in rr.raw_region(host, region).keys():
_add_column('region', cc)
region_df_list.append(pandas.DataFrame(row, index=[0]))
unmarked_row = copy.deepcopy(header)
unmarked_row.update(per_host_data)
unmarked_data = rr.raw_unmarked(host)
for key, val in unmarked_data.items():
unmarked_data[key] = _try_float(val)
for cc in unmarked_data.keys():
_add_column('unmarked', cc)
unmarked_row.update(unmarked_data)
unmarked_df_list.append( | pandas.DataFrame(unmarked_row, index=[0]) | pandas.DataFrame |
#!/usr/bin/env python
"""Script for generating figures of catalog statistics. Run `QCreport.py -h`
for command line usage.
"""
import os
import sys
import errno
import argparse
from datetime import date, datetime
from math import sqrt, radians, cos
import markdown
import numpy as np
import pandas as pd
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
from matplotlib.patches import Polygon
from obspy.geodetics.base import gps2dist_azimuth
# Python 2
try:
from urllib2 import urlopen, HTTPError
# Python 3
except ImportError:
from urllib.request import urlopen, HTTPError
import QCutils as qcu
from decorators import retry, printstatus
###############################################################################
###############################################################################
###############################################################################
@printstatus('Creating basic catalog summary')
def basic_cat_sum(catalog, dirname, dup1, dup2, timewindow, distwindow):
"""Gather basic catalog summary statistics."""
lines = []
lines.append('Catalog name: %s\n\n' % dirname[:-9].upper())
lines.append('First date in catalog: %s\n' % catalog['time'].min())
lines.append('Last date in catalog: %s\n\n' % catalog['time'].max())
lines.append('Total number of events: %s\n\n' % len(catalog))
lines.append('Minimum latitude: %s\n' % catalog['latitude'].min())
lines.append('Maximum latitude: %s\n' % catalog['latitude'].max())
lines.append('Minimum longitude: %s\n' % catalog['longitude'].min())
lines.append('Maximum longitude: %s\n\n' % catalog['longitude'].max())
lines.append('Minimum depth: %s\n' % catalog['depth'].min())
lines.append('Maximum depth: %s\n' % catalog['depth'].max())
lines.append('Number of 0 km depth events: %s\n'
% len(catalog[catalog['depth'] == 0]))
lines.append('Number of NaN depth events: %s\n\n'
% len(catalog[pd.isnull(catalog['depth'])]))
lines.append('Minimum magnitude: %s\n' % catalog['mag'].min())
lines.append('Maximum magnitude: %s\n' % catalog['mag'].max())
lines.append('Number of 0 magnitude events: %s\n'
% len(catalog[catalog['mag'] == 0]))
lines.append('Number of NaN magnitude events: %s\n\n'
% len(catalog[pd.isnull(catalog['mag'])]))
lines.append('Number of possible duplicates (%ss and %skm threshold): %d\n'
% (timewindow, distwindow, dup1))
lines.append('Number of possible duplicates (16s and 100km threshold): %d'
% dup2)
with open('%s_catalogsummary.txt' % dirname, 'w') as sumfile:
for line in lines:
sumfile.write(line)
def largest_ten(catalog, dirname):
"""Make a list of the 10 events with largest magnitude."""
catalog = catalog.sort_values(by='mag', ascending=False)
topten = catalog.head(n=10)
topten = topten[['time', 'id', 'latitude', 'longitude', 'depth', 'mag']]
with open('%s_largestten.txt' % dirname, 'w') as magfile:
for event in topten.itertuples():
line = ' '.join([str(x) for x in event[1:]]) + '\n'
magfile.write(line)
@printstatus('Finding possible duplicates')
def list_duplicates(catalog, dirname, timewindow=2, distwindow=15,
magwindow=None, minmag=-5, locfilter=None):
"""Make a list of possible duplicate events."""
catalog.loc[:, 'convtime'] = [' '.join(x.split('T'))
for x in catalog['time'].tolist()]
catalog.loc[:, 'convtime'] = catalog['convtime'].astype('datetime64[ns]')
catalog = catalog[catalog['mag'] >= minmag]
if locfilter:
catalog = catalog[catalog['place'].str.contains(locfilter, na=False)]
cat = catalog[['time', 'convtime', 'id', 'latitude', 'longitude', 'depth',
'mag']].copy()
cat.loc[:, 'time'] = [qcu.to_epoch(x) for x in cat['time']]
duplines1 = [('Possible duplicates using %ss time threshold and %skm '
'distance threshold\n') % (timewindow, distwindow),
'***********************\n'
'date time id latitude longitude depth magnitude '
'(distance) (Δ time) (Δ magnitude)\n']
duplines2 = [('\n\nPossible duplicates using 16s time threshold and 100km '
'distance threshold\n'),
'***********************\n'
'date time id latitude longitude depth magnitude '
'(distance) (Δ time) (Δ magnitude)\n']
sep = '-----------------------\n'
thresh1dupes, thresh2dupes = 0, 0
for event in cat.itertuples():
trimdf = cat[cat['convtime'].between(event.convtime, event.convtime
+ pd.Timedelta(seconds=16), inclusive=False)]
if len(trimdf) != 0:
for tevent in trimdf.itertuples():
dist = gps2dist_azimuth(event.latitude, event.longitude,
tevent.latitude, tevent.longitude)[0] / 1000.
if dist < 100:
dtime = (event.convtime - tevent.convtime).total_seconds()
dmag = event.mag - tevent.mag
diffs = map('{:.2f}'.format, [dist, dtime, dmag])
dupline1 = ' '.join([str(x) for x in event[1:]]) + ' ' +\
' '.join(diffs) + '\n'
dupline2 = ' '.join([str(x) for x in tevent[1:]]) + '\n'
duplines2.extend((sep, dupline1, dupline2))
thresh2dupes += 1
if (dist < distwindow) and (abs(dtime) < timewindow):
duplines1.extend((sep, dupline1, dupline2))
thresh1dupes += 1
continue
with open('%s_duplicates.txt' % dirname, 'w') as dupfile:
for dupline in duplines1:
dupfile.write(dupline)
for dupline in duplines2:
dupfile.write(dupline)
return thresh1dupes, thresh2dupes
@printstatus('Mapping earthquake locations')
def map_detecs(catalog, dirname, minmag=-5, mindep=-50, title=''):
"""Make scatter plot of detections with magnitudes (if applicable)."""
catalog = catalog[(catalog['mag'] >= minmag)
& (catalog['depth'] >= mindep)].copy()
if len(catalog) == 0:
print('\nCatalog contains no events deeper than %s.' % mindep)
return
# define map bounds
lllat, lllon, urlat, urlon, _, _, _, clon = qcu.get_map_bounds(catalog)
plt.figure(figsize=(12, 7))
mplmap = plt.axes(projection=ccrs.PlateCarree(central_longitude=clon))
mplmap.set_extent([lllon, urlon, lllat, urlat], ccrs.PlateCarree())
mplmap.coastlines('50m', facecolor='none')
# if catalog has magnitude data
if not catalog['mag'].isnull().all():
bins = [0, 5, 6, 7, 8, 15]
binnames = ['< 5', '5-6', '6-7', '7-8', r'$\geq$8']
binsizes = [10, 25, 50, 100, 400]
bincolors = ['g', 'b', 'y', 'r', 'r']
binmarks = ['o', 'o', 'o', 'o', '*']
catalog.loc[:, 'maggroup'] = pd.cut(catalog['mag'], bins,
labels=binnames)
for i, label in enumerate(binnames):
mgmask = catalog['maggroup'] == label
rcat = catalog[mgmask]
lons, lats = list(rcat['longitude']), list(rcat['latitude'])
if len(lons) > 0:
mplmap.scatter(lons, lats, s=binsizes[i], marker=binmarks[i],
c=bincolors[i], label=binnames[i], alpha=0.8,
zorder=10, transform=ccrs.PlateCarree())
plt.legend(loc='lower left', title='Magnitude')
# if catalog does not have magnitude data
else:
lons, lats = list(catalog['longitude']), list(catalog['latitude'])
mplmap.scatter(lons, lats, s=15, marker='x', c='r', zorder=10)
mplmap.add_feature(cfeature.NaturalEarthFeature('cultural',
'admin_1_states_provinces_lines', '50m', facecolor='none',
edgecolor='k', zorder=9))
mplmap.add_feature(cfeature.BORDERS)
plt.title(title, fontsize=20)
plt.subplots_adjust(left=0.05, right=0.95, top=0.95, bottom=0.05)
if mindep != -50:
plt.savefig('%s_morethan%sdetecs.png' % (dirname, mindep), dpi=300)
else:
plt.savefig('%s_mapdetecs.png' % dirname, dpi=300)
plt.close()
@printstatus('Mapping earthquake density')
def map_detec_nums(catalog, dirname, title='', numcolors=16, rmin=77, rmax=490,
minmag=-5, pltevents=True):
"""Map detections and a grid of detection density. rmax=510 is white,
rmin=0 is black.
"""
# generate bounds for map
mask = catalog['mag'] >= minmag
lllat, lllon, urlat, urlon, gridsize, hgridsize, _, clon = \
qcu.get_map_bounds(catalog[mask])
catalog = qcu.add_centers(catalog, gridsize)
groupedlatlons, _, cmax = qcu.group_lat_lons(catalog, minmag=minmag)
# print message if there are no detections with magnitudes above minmag
if cmax == 0:
print("No detections over magnitude %s" % minmag)
# create color gradient from light red to dark red
colors = qcu.range2rgb(rmin, rmax, numcolors)
# put each center into its corresponding color group
colorgroups = list(np.linspace(0, cmax, numcolors))
groupedlatlons.loc[:, 'group'] = np.digitize(groupedlatlons['count'],
colorgroups)
# create map
plt.figure(figsize=(12, 7))
mplmap = plt.axes(projection=ccrs.PlateCarree(central_longitude=clon))
mplmap.set_extent([lllon, urlon, lllat, urlat], ccrs.PlateCarree())
mplmap.coastlines('50m')
mplmap.add_feature(cfeature.BORDERS)
mplmap.add_feature(cfeature.NaturalEarthFeature('cultural',
'admin_1_states_provinces_lines', '50m', facecolor='none',
edgecolor='k', zorder=9))
plt.title(title, fontsize=20)
plt.subplots_adjust(left=0.01, right=0.9, top=0.95, bottom=0.05)
# create color map based on rmin and rmax
cmap = LinearSegmentedColormap.from_list('CM', colors)._resample(numcolors)
# make dummy plot for setting color bar
colormesh = mplmap.pcolormesh(colors, colors, colors, cmap=cmap, alpha=1,
vmin=0, vmax=cmax)
# format color bar
cbticks = [x for x in np.linspace(0, cmax, numcolors+1)]
cbar = plt.colorbar(colormesh, ticks=cbticks)
cbar.ax.set_yticklabels([('%.0f' % x) for x in cbticks])
cbar.set_label('# of detections', rotation=270, labelpad=15)
# plot rectangles with color corresponding to number of detections
for center, _, cgroup in groupedlatlons.itertuples():
minlat, maxlat = center[0]-hgridsize, center[0]+hgridsize
minlon, maxlon = center[1]-hgridsize, center[1]+hgridsize
glats = [minlat, maxlat, maxlat, minlat]
glons = [minlon, minlon, maxlon, maxlon]
color = colors[cgroup-1]
qcu.draw_grid(glats, glons, color, alpha=0.8)
# if provided, plot detection epicenters
if pltevents and not catalog['mag'].isnull().all():
magmask = catalog['mag'] >= minmag
lons = list(catalog['longitude'][magmask])
lats = list(catalog['latitude'][magmask])
mplmap.scatter(lons, lats, c='k', s=7, marker='x', zorder=5)
elif catalog['mag'].isnull().all():
lons = list(catalog['longitude'])
lats = list(catalog['latitude'])
mplmap.scatter(lons, lats, c='k', s=7, marker='x', zorder=5)
plt.savefig('%s_eqdensity.png' % dirname, dpi=300)
plt.close()
@printstatus('Making histogram of given parameter')
def make_hist(catalog, param, binsize, dirname, title='', xlabel='',
countlabel=False, maxval=None):
"""Plot histogram grouped by some parameter."""
paramlist = catalog[pd.notnull(catalog[param])][param].tolist()
minparam, maxparam = min(paramlist), max(paramlist)
paramdown = qcu.round2bin(minparam, binsize, 'down')
paramup = qcu.round2bin(maxparam, binsize, 'up')
numbins = int((paramup-paramdown) / binsize)
labelbuff = float(paramup-paramdown) / numbins * 0.5
diffs = [abs(paramlist[i+1]-paramlist[i]) for i in range(len(paramlist))
if i+1 < len(paramlist)]
diffs = [round(x, 1) for x in diffs if x > 0]
plt.figure(figsize=(10, 6))
plt.title(title, fontsize=20)
plt.xlabel(xlabel, fontsize=14)
plt.ylabel('Count', fontsize=14)
if param == 'ms':
parambins = np.linspace(paramdown, paramup, numbins+1)
plt.xlim(paramdown, paramup)
else:
parambins = np.linspace(paramdown, paramup+binsize,
numbins+2) - binsize/2.
plt.xlim(paramdown-binsize/2., paramup+binsize/2.)
phist = plt.hist(paramlist, parambins, alpha=0.7, color='b', edgecolor='k')
maxbarheight = max([phist[0][x] for x in range(numbins)] or [0])
labely = maxbarheight / 50.
plt.subplots_adjust(left=0.1, right=0.95, top=0.95, bottom=0.11)
if maxval:
plt.xlim(xmax=maxval)
plt.ylim(0, maxbarheight*1.1+0.1)
# put count numbers above the bars if countlabel=True
if countlabel:
for i in range(numbins):
plt.text(phist[1][i]+labelbuff, phist[0][i]+labely,
'%0.f' % phist[0][i], size=12, ha='center')
if maxval:
plt.savefig('%s_zoom%shistogram.png' % (dirname, param), dpi=300)
else:
plt.savefig('%s_%shistogram.png' % (dirname, param), dpi=300)
plt.close()
@printstatus('Making histogram of given time duration')
def make_time_hist(catalog, timelength, dirname, title=''):
"""Make histogram either by hour of the day or by date."""
timelist = catalog['time']
plt.figure(figsize=(10, 6))
plt.title(title, fontsize=20)
plt.ylabel('Count', fontsize=14)
if timelength == 'hour':
lons = np.linspace(-180, 180, 25).tolist()
hours = np.linspace(-12, 12, 25).tolist()
tlonlist = catalog.loc[:, ['longitude', 'time']]
tlonlist.loc[:, 'rLon'] = qcu.round2lon(tlonlist['longitude'])
tlonlist.loc[:, 'hour'] = [int(x.split('T')[1].split(':')[0])
for x in tlonlist['time']]
tlonlist.loc[:, 'rhour'] = [x.hour + hours[lons.index(x.rLon)]
for x in tlonlist.itertuples()]
tlonlist.loc[:, 'rhour'] = [x+24 if x < 0 else x-24 if x > 23 else x
for x in tlonlist['rhour']]
hourlist = tlonlist.rhour.tolist()
hourbins = np.linspace(-0.5, 23.5, 25)
plt.hist(hourlist, hourbins, alpha=1, color='b', edgecolor='k')
plt.xlabel('Hour of the Day', fontsize=14)
plt.xlim(-0.5, 23.5)
elif timelength == 'day':
daylist = [x.split('T')[0] for x in timelist]
daydf = pd.DataFrame({'date': daylist})
daydf['date'] = daydf['date'].astype('datetime64[ns]')
daydf = daydf.groupby([daydf['date'].dt.year,
daydf['date'].dt.month,
daydf['date'].dt.day]).count()
eqdates = daydf.index.tolist()
counts = daydf.date.tolist()
eqdates = [date(x[0], x[1], x[2]) for x in eqdates]
minday, maxday = min(eqdates), max(eqdates)
plt.bar(eqdates, counts, alpha=1, color='b', width=1)
plt.xlabel('Date', fontsize=14)
plt.xlim(minday, maxday)
plt.subplots_adjust(left=0.1, right=0.95, top=0.95, bottom=0.11)
plt.savefig('%s_%shistogram.png' % (dirname, timelength), dpi=300)
plt.close()
@printstatus('Graphing mean time separation')
def graph_time_sep(catalog, dirname):
"""Make bar graph of mean time separation between events by date."""
catalog.loc[:, 'convtime'] = [' '.join(x.split('T')).split('.')[0]
for x in catalog['time'].tolist()]
catalog.loc[:, 'convtime'] = catalog['convtime'].astype('datetime64[ns]')
catalog.loc[:, 'dt'] = catalog.convtime.diff().astype('timedelta64[ns]')
catalog.loc[:, 'dtmin'] = catalog['dt'] / pd.Timedelta(minutes=1)
mindate = catalog['convtime'].min()
maxdate = catalog['convtime'].max()
fig = plt.figure(figsize=(10, 6))
axfull = fig.add_subplot(111)
axfull.set_ylabel('Time separation (min)', fontsize=14, labelpad=20)
axfull.spines['top'].set_color('none')
axfull.spines['bottom'].set_color('none')
axfull.spines['left'].set_color('none')
axfull.spines['right'].set_color('none')
axfull.tick_params(labelcolor='w', top='off', bottom='off',
left='off', right='off')
if maxdate - mindate < pd.Timedelta(days=1460):
# time separation between events
fig.add_subplot(311)
plt.plot(catalog['convtime'], catalog['dtmin'], alpha=1, color='b')
plt.xlabel('Date')
plt.title('Time separation between events')
plt.xlim(mindate, maxdate)
plt.ylim(0)
# maximum monthly time separation
fig.add_subplot(312)
month_max = catalog.resample('1M', on='convtime').max()['dtmin']
months = month_max.index.map(lambda x: x.strftime('%Y-%m')).tolist()
months = [date(int(x[:4]), int(x[-2:]), 1) for x in months]
plt.bar(months, month_max.tolist(), color='b', alpha=1, width=31,
edgecolor='k')
plt.xlabel('Month')
plt.title('Maximum event separation by month')
plt.xlim(mindate - pd.Timedelta(days=15),
maxdate - pd.Timedelta(days=16))
# median monthly time separation
fig.add_subplot(313)
month_med = catalog.resample('1M', on='convtime').median()['dtmin']
plt.bar(months, month_med.tolist(), color='b', alpha=1, width=31,
edgecolor='k')
plt.xlabel('Month')
plt.title('Median event separation by month')
plt.tight_layout()
plt.xlim(mindate - pd.Timedelta(days=15),
maxdate - pd.Timedelta(days=16))
else:
# time separation between events
fig.add_subplot(311)
plt.plot(catalog['convtime'], catalog['dtmin'], alpha=1, color='b')
plt.xlabel('Date')
plt.title('Time separation between events')
plt.xlim(mindate, maxdate)
plt.ylim(0)
# maximum yearly time separation
fig.add_subplot(312)
year_max = catalog.resample('1Y', on='convtime').max()['dtmin']
years = year_max.index.map(lambda x: x.strftime('%Y')).tolist()
years = [date(int(x[:4]), 1, 1) for x in years]
plt.bar(years, year_max.tolist(), color='b', alpha=1, width=365,
edgecolor='k')
plt.xlabel('Year')
plt.title('Maximum event separation by year')
plt.xlim(mindate - pd.Timedelta(days=183),
maxdate - pd.Timedelta(days=183))
# median yearly time separation
fig.add_subplot(313)
year_med = catalog.resample('1Y', on='convtime').median()['dtmin']
plt.bar(years, year_med.tolist(), color='b', alpha=1, width=365,
edgecolor='k')
plt.xlabel('Year')
plt.title('Median event separation by year')
plt.tight_layout()
plt.xlim(mindate - pd.Timedelta(days=183),
maxdate - pd.Timedelta(days=183))
plt.savefig('%s_timeseparation.png' % dirname, dpi=300)
plt.close()
@printstatus('Graphing median magnitude by time')
def med_mag(catalog, dirname):
"""Make a bar graph of median event magnitude by year."""
catalog.loc[:, 'convtime'] = [' '.join(x.split('T')).split('.')[0]
for x in catalog['time'].tolist()]
catalog.loc[:, 'convtime'] = catalog['convtime'].astype('datetime64[ns]')
mindate = catalog['convtime'].min()
maxdate = catalog['convtime'].max()
if maxdate - mindate < pd.Timedelta(days=1460):
month_max = catalog.resample('1M', on='convtime').max()['mag']
months = month_max.index.map(lambda x: x.strftime('%Y-%m')).tolist()
months = [date(int(x[:4]), int(x[-2:]), 1) for x in months]
month_medmag = catalog.resample('1M', on='convtime').median()['mag']
fig = plt.figure(figsize=(10, 6))
ax = fig.add_subplot(111)
ax.tick_params(bottom='off')
plt.bar(months, month_medmag.tolist(), color='b', edgecolor='k',
alpha=1, width=31)
plt.xlabel('Month', fontsize=14)
plt.ylabel('Magnitude', fontsize=14)
plt.title('Monthly Median Magnitude', fontsize=20)
plt.xlim(min(months) - pd.Timedelta(days=15),
max(months) + pd.Timedelta(days=15))
else:
year_max = catalog.resample('1Y', on='convtime').max()['mag']
years = year_max.index.map(lambda x: x.strftime('%Y')).tolist()
years = [date(int(x[:4]), 1, 1) for x in years]
year_medmag = catalog.resample('1Y', on='convtime').median()['mag']
fig = plt.figure(figsize=(10, 6))
ax = fig.add_subplot(111)
ax.tick_params(bottom='off')
plt.bar(years, year_medmag.tolist(), color='b', edgecolor='k', alpha=1,
width=365)
plt.xlabel('Year', fontsize=14)
plt.ylabel('Magnitude', fontsize=14)
plt.title('Yearly Median Magnitude', fontsize=20)
plt.xlim(min(years) - pd.Timedelta(days=183),
max(years) - pd.Timedelta(days=183))
plt.savefig('%s_medianmag' % dirname, dpi=300)
plt.close()
@printstatus('Graphing magnitude completeness')
def cat_mag_comp(catalog, dirname, magbin=0.1):
"""Plot catalog magnitude completeness."""
catalog = catalog[pd.notnull(catalog['mag'])]
mags = np.array(catalog['mag'])
mags = np.around(mags, 1)
minmag, maxmag = min(min(mags), 0), max(mags)
mag_centers = np.arange(minmag, maxmag + 2*magbin, magbin)
cdf = np.zeros(len(mag_centers))
for idx in range(len(cdf)):
cdf[idx] = np.count_nonzero(
~np.isnan(mags[mags >= mag_centers[idx]-0.001]))
mag_edges = np.arange(minmag - magbin/2., maxmag+magbin, magbin)
g_r, _ = np.histogram(mags, mag_edges)
idx = list(g_r).index(max(g_r))
mc_est = mag_centers[idx]
try:
mc_est, bvalue, avalue, lval, mc_bins, std_dev = qcu.WW2000(mc_est,
mags, magbin)
except:
mc_est = mc_est + 0.3
mc_bins = np.arange(0, maxmag + magbin/2., magbin)
bvalue = np.log10(np.exp(1))/(np.average(mags[mags >= mc_est])
- (mc_est-magbin/2.))
avalue = np.log10(len(mags[mags >= mc_est])) + bvalue*mc_est
log_l = avalue-bvalue*mc_bins
lval = 10.**log_l
std_dev = bvalue/sqrt(len(mags[mags >= mc_est]))
plt.figure(figsize=(8, 6))
plt.scatter(mag_centers[:len(g_r)], g_r, edgecolor='r', marker='o',
facecolor='none', label='Incremental')
plt.scatter(mag_centers, cdf, c='k', marker='+', label='Cumulative')
plt.axvline(mc_est, c='r', linestyle='--', label='Mc = %2.1f' % mc_est)
plt.plot(mc_bins, lval, c='k', linestyle='--',
label='B = %1.3f%s%1.3f' % (bvalue, u'\u00B1', std_dev))
ax1 = plt.gca()
ax1.set_yscale('log')
max_count = np.amax(cdf) + 100000
ax1.set_xlim([minmag, maxmag])
ax1.set_ylim([1, max_count])
plt.title('Frequency-Magnitude Distribution', fontsize=18)
plt.xlabel('Magnitude', fontsize=14)
plt.ylabel('Log10 Count', fontsize=14)
plt.legend(numpoints=1)
plt.savefig('%s_catmagcomp.png' % dirname, dpi=300)
plt.close()
@printstatus('Graphing magnitude versus time for each earthquake')
def graph_mag_time(catalog, dirname):
"""Plot magnitudes vs. origin time."""
catalog = catalog[pd.notnull(catalog['mag'])]
catalog.loc[:, 'convtime'] = [' '.join(x.split('T')).split('.')[0]
for x in catalog['time'].tolist()]
catalog.loc[:, 'convtime'] = catalog['convtime'].astype('datetime64[ns]')
times = catalog['time'].copy()
mags = catalog['mag'].copy()
plt.figure(figsize=(10, 6))
plt.xlabel('Date', fontsize=14)
plt.ylabel('Magnitude', fontsize=14)
plt.plot_date(times, mags, alpha=0.7, markersize=2, c='b')
plt.xlim(min(times), max(times))
plt.title('Magnitude vs. Time', fontsize=20)
plt.savefig('%s_magvtime.png' % dirname, dpi=300)
plt.close()
@printstatus('Graphing event count by date and magnitude')
def graph_mag_count(catalog, dirname):
"""Graph event count grouped by magnitude and by date."""
catalog.loc[:, 'convtime'] = [' '.join(x.split('T')).split('.')[0]
for x in catalog['time'].tolist()]
catalog.loc[:, 'convtime'] = catalog['convtime'].astype('datetime64[ns]')
mindate, maxdate = catalog['convtime'].min(), catalog['convtime'].max()
bincond = maxdate - mindate < pd.Timedelta(days=1460)
barwidth = 31 if bincond else 365
timedelt = | pd.Timedelta(days=barwidth/2.) | pandas.Timedelta |
import requests
from bs4 import BeautifulSoup
import pandas as pd
import numpy as np
def get_soup(url):
headers = {'User-Agent': ('Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/39.0.2171.95 Safari/537.36')}
r = requests.get(url, headers=headers)
r.encoding = 'unicode-escape'
return BeautifulSoup(r.content, 'html.parser')
def get_data_from_table(table, data_type, skip_rows):
"""Helper method to get the data from a table. """
# https://stackoverflow.com/questions/42285417/how-to-preserve-links-when-scraping-a-table-with-beautiful-soup-and-pandas
if data_type == 'title':
data = [[td.a.get('title') if td.find('a') else ''.join(td.stripped_strings) for td in row.find_all('td')]
for row in table.find_all('tr')]
if data_type == 'link':
data = [[td.a['href'] if td.find('a') else ''.join(td.stripped_strings) for td in row.find_all('td')]
for row in table.find_all('tr')]
else:
data = [[td.a.string if td.find('a') else ''.join(td.stripped_strings) for td in row.find_all('td')]
for row in table.find_all('tr')]
data = [d for d in data if len(d)!=0][0::skip_rows]
return data
def get_fbref_big5(url):
soup = get_soup(url)
df = pd.read_html(str(soup))[0]
# column names - collapse the multiindex
col1 = list(df.columns.get_level_values(0))
col1 = ['' if c[:7]=='Unnamed' else c.replace(' ', '_').lower() for c in col1]
col2 = list(df.columns.get_level_values(1))
col2 = [c.replace(' ', '_').lower() for c in col2]
cols = [f'{c}_{col2[i]}' if c != '' else col2[i] for i, c in enumerate(col1)]
df.columns = cols
# remove lines that are the header row repeated
df = df[df.rk != 'Rk'].copy()
# add the url for the player profile and match logs
# https://stackoverflow.com/questions/42285417/how-to-preserve-links-when-scraping-a-table-with-beautiful-soup-and-pandas
parsed_table = soup.find_all('table')[0]
data = [[td.a['href'] if td.find('a') else ''.join(td.stripped_strings) for td in row.find_all('td')]
for row in parsed_table.find_all('tr')]
data = [d for d in data if len(d)!=0]
match_log = [d[-1] for d in data]
player_profile = [d[0] for d in data]
df['match_link'] = match_log
df['player_link'] = player_profile
# remove players who haven't played a minute from the playing time table
if 'playing_time_mp' in df.columns:
df = df[df.playing_time_mp != '0'].copy()
df.reset_index(drop=True, inplace=True)
df['rk'] = df.index + 1
# drop the matches column
df.drop('matches', axis='columns', inplace=True)
# columns to numeric columns
df[df.columns[6:-2]] = df[df.columns[6:-2]].apply(pd.to_numeric, errors='coerce', axis='columns')
return df
def get_fbref_player_dob(url):
soup = get_soup(url)
info = soup.findAll("div", {"class": "players"})[0]
squad = [p for p in info.find_all('p') if 'Club' in p.getText()]
if len(squad) == 0:
squad = None
else:
squad = squad[0].find('a').contents[0]
if info.find("span", itemprop="birthDate"):
dob = pd.to_datetime(info.find("span", itemprop="birthDate").contents[0].strip())
else:
dob = pd.to_datetime(np.nan)
info = BeautifulSoup(str(info)[:str(info).find('Position')], 'html.parser')
name = info.find('p').getText()
if name == '':
name = info.find('span').getText()
return name, dob, squad
def get_tm_team_league(soup):
"""Get the team name and league from a team page."""
team_name, league = soup.find("meta", attrs={'name':'keywords'})['content'].split(',')[:2]
return team_name, league
def get_tm_team_links(url):
"""Get links for each team from the league page."""
soup = get_soup(url)
table = soup.find_all('table')[3]
links = table.find_all('a', class_='vereinprofil_tooltip')
links = [l['href'] for l in links]
links = list(set(links))
links = [f'https://www.transfermarkt.com{l}' for l in links]
return links
def get_tm_team_squad(url):
""" Get the team squad from a team url."""
soup = get_soup(url)
team_name, league = get_tm_team_league(soup)
table = soup.find_all('table')[1]
data = get_data_from_table(table, 'string', 3)
# format data as a dataframe
df = pd.DataFrame(data)
if len(df.columns) == 14:
df = df.drop([2, 6, 7, 11], axis=1)
else:
df = df.drop([2, 6, 10], axis=1)
df.columns = ['number', 'transfer_details', 'player', 'position', 'dob_age', 'height', 'foot',
'joined', 'contract_expires', 'market_value']
df['team_name'] = team_name
df['league'] = league
# get the links and add to the dataframe
data2 = get_data_from_table(table, 'link', 3)
player_links = [d[3] for d in data2]
signed_from = [d[10] for d in data2]
df['player_link'] = player_links
df['signed_from_link'] = signed_from
return df
def get_tm_arrivals_and_departures(url):
"""Get the team arrivals and departures from a team transfer page."""
soup = get_soup(url)
team_name, league = get_team_league(soup)
# get data from tables
tables = soup.find_all('table')
# arrivals
arrival_table = tables[2]
arrival_data = get_data_from_table(arrival_table, 'string', 5)
arrival_data_links = get_data_from_table(arrival_table, 'link', 5)
# departures
departure_table = [t for t in tables if 'Joined' in str(t)][0]
departure_data = get_data_from_table(departure_table, 'string', 5)
departure_data_links = get_data_from_table(departure_table, 'link', 5)
# arrival dataframe
df_arrival = pd.DataFrame(arrival_data[:-1])
df_arrival.drop([0, 1, 2, 7, 8, 9], axis='columns', inplace=True)
df_arrival.columns = ['player', 'pos', 'age', 'market_value', 'left', 'left_league', 'fee']
arrival_player_links = [d[1] for d in arrival_data_links[:-1]]
df_arrival['player_link'] = arrival_player_links
df_arrival['joined'] = team_name
df_arrival['joined_league'] = league
# departure dataframe
df_departure = pd.DataFrame(departure_data[:-1])
df_departure.drop([0, 1, 2, 7, 8, 9], axis='columns', inplace=True)
df_departure.columns = ['player', 'pos', 'age', 'market_value', 'joined', 'joined_league', 'fee']
departure_player_links = [d[1] for d in departure_data_links[:-1]]
df_departure['player_link'] = departure_player_links
df_departure['left'] = team_name
df_departure['left_league'] = league
df = | pd.concat([df_arrival, df_departure]) | pandas.concat |
import pandas as pd
def model(buffer):
df = | pd.DataFrame.from_dict(buffer) | pandas.DataFrame.from_dict |
# -*- coding: utf-8 -*-
"""
Created on Fri May 15 12:52:53 2020
This script plots the boxplots of the distributions
@author: acn980
"""
import os, glob, sys
import pandas as pd
import numpy as np
import warnings
import matplotlib.pyplot as plt
sys.path.insert(0,r'E:\github\seasonality_risk\Functions')
from Functions_HCMC import detrend_fft, remove_NaN_skew, extract_MM
from matplotlib.ticker import AutoMinorLocator
warnings.filterwarnings("ignore")
#%%
save = False
fn_trunk = 'E:/surfdrive/Documents'
fn = os.path.join(fn_trunk, 'Master2019\Thomas\data\matlab_csv')
fn_files = 'Master2019/Thomas/data'
fn2 = os.path.join(fn_trunk,fn_files)
lag_joint = 0 #days
#%% GESLA OR WACC
fn_tide = os.path.join(fn,'Tide_WACC_VungTau_Cleaned_Detrended_Strict_sel_const.csv')
date_parser = lambda x: pd.datetime.strptime(x, "%d-%m-%Y %H:%M:%S")
tide = pd.read_csv(fn_tide, parse_dates = True, date_parser= date_parser, index_col = 'Date')
tide.rename(columns = {tide.columns[0]:'tide'}, inplace = True)
all_tide = tide.resample('M').max()
tide_day = tide.resample('D').max()
# Importing monthly data - rainfall
allfiles = glob.glob(os.path.join(fn2, 'NewRain\TRENDS\MONTH_CORRECTED', 'Thiessen_*.csv'))
all_rain = pd.DataFrame(data=None)
for file in allfiles:
month = pd.read_csv(file, index_col = 'Year', parse_dates=True)
month.rename(columns={month.columns[0]:'Thiessen'}, inplace = True)
all_rain = pd.concat([all_rain, month], axis = 0)
#Importing the monthly data surge
allfiles = glob.glob(os.path.join(fn2, 'NewSurge\TRENDS\MONTH_RAW', 'skew_fft_*.csv'))
all_skew = pd.DataFrame(data=None)
for file in allfiles:
month = pd.read_csv(file, index_col = 'Year', parse_dates=True)
all_skew = pd.concat([all_skew, month], axis = 0)
fn_skew = os.path.join(fn,'skew_WACC_VungTau_Cleaned_Detrended_Strict_sel_const.csv')
skew = | pd.read_csv(fn_skew, parse_dates = True, date_parser= date_parser, index_col = 'Date') | pandas.read_csv |
import pandas as pd
import os
import time
try:from ethnicolr import census_ln, pred_census_ln,pred_wiki_name,pred_fl_reg_name
except: os.system('pip install ethnicolr')
import seaborn as sns
import matplotlib.pylab as plt
import scipy
from itertools import permutations
import numpy as np
import matplotlib.gridspec as gridspec
from igraph import VertexClustering
from itertools import combinations
plt.rcParams['pdf.fonttype'] = 42
plt.rcParams['ps.fonttype'] = 42
plt.rcParams['font.sans-serif'] = "Palatino"
plt.rcParams['font.serif'] = "Palatino"
plt.rcParams['mathtext.fontset'] = 'custom'
plt.rcParams['mathtext.it'] = 'Palatino:italic'
plt.rcParams['mathtext.bf'] = 'Palatino:bold'
plt.rcParams['mathtext.cal'] = 'Palatino'
from matplotlib.ticker import FormatStrFormatter
from matplotlib import ticker
from sklearn.ensemble import RandomForestClassifier,RandomForestRegressor
from sklearn.neural_network import MLPClassifier,MLPRegressor
from sklearn.linear_model import RidgeClassifierCV
from sklearn.multioutput import MultiOutputRegressor
from sklearn.linear_model import RidgeCV
from sklearn.decomposition import PCA
from statsmodels.stats.multitest import multipletests
import multiprocessing
from multiprocessing import Pool
import tqdm
import igraph
from scipy.stats import pearsonr
global paper_df
global main_df
global g
global graphs
global pal
global homedir
global method
global node_2_a
global a_2_node
global a_2_paper
global control
global matrix_idxs
global prs
# matrix_idxs = {'white_M':0,'white_W':1,'white_U':2,'api_M':3,'api_W':4,'api_U':5,'hispanic_M':6,'hispanic_W':7,'hispanic_U':8,'black_M':9,'black_W':10,'black_U':11}
pal = np.array([[72,61,139],[82,139,139],[180,205,205],[205,129,98]])/255.
# global us_only
# us_only = True
"""
AF = author names, with the format LastName, FirstName; LastName, FirstName; etc..
SO = journal
DT = document type (review or article)
CR = reference list
TC = total citations received (at time of downloading about a year ago)
PD = month of publication
PY = year of publication
DI = DOI
"""
import argparse
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
return v
parser = argparse.ArgumentParser()
parser.add_argument('-homedir',action='store',dest='homedir',default='/Users/maxwell/Dropbox/Bertolero_Bassett_Projects/citations/')
parser.add_argument('-method',action='store',dest='method',default='wiki')
parser.add_argument('-continent',type=str2bool,action='store',dest='continent',default=False)
parser.add_argument('-continent_only',type=str2bool,action='store',dest='continent_only',default=False)
parser.add_argument('-control',type=str2bool,action='store',dest='control',default=False)
parser.add_argument('-within_poc',type=str2bool,action='store',dest='within_poc',default=False)
parser.add_argument('-walk_length',type=str,action='store',dest='walk_length',default='cited')
parser.add_argument('-walk_papers',type=str2bool,action='store',dest='walk_papers',default=False)
r = parser.parse_args()
locals().update(r.__dict__)
globals().update(r.__dict__)
wiki_2_race = {"Asian,GreaterEastAsian,EastAsian":'api', "Asian,GreaterEastAsian,Japanese":'api',
"Asian,IndianSubContinent":'api', "GreaterAfrican,Africans":'black', "GreaterAfrican,Muslim":'black',
"GreaterEuropean,British":'white', "GreaterEuropean,EastEuropean":'white',
"GreaterEuropean,Jewish":'white', "GreaterEuropean,WestEuropean,French":'white',
"GreaterEuropean,WestEuropean,Germanic":'white', "GreaterEuropean,WestEuropean,Hispanic":'hispanic',
"GreaterEuropean,WestEuropean,Italian":'white', "GreaterEuropean,WestEuropean,Nordic":'white'}
matrix_idxs = {'white_M':0,'api_M':1,'hispanic_M':2,'black_M':3,'white_W':4,'api_W':5,'hispanic_W':6,'black_W':7}
def log_p_value(p):
if p == 0.0:
p = "-log10($\it{p}$)>250"
elif p > 0.001:
p = np.around(p,3)
p = "$\it{p}$=%s"%(p)
else:
p = (-1) * np.log10(p)
p = "-log10($\it{p}$)=%s"%(np.around(p,0).astype(int))
return p
def convert_r_p(r,p):
return "$\it{r}$=%s\n%s"%(np.around(r,2),log_p_value(p))
def nan_pearsonr(x,y):
xmask = np.isnan(x)
ymask = np.isnan(y)
mask = (xmask==False) & (ymask==False)
return pearsonr(x[mask],y[mask])
def mean_confidence_interval(data, confidence=0.95):
a = 1.0 * np.array(data)
n = len(a)
m, se = np.mean(a), scipy.stats.sem(a)
h = se * scipy.stats.t.ppf((1 + confidence) / 2., n-1)
return m, m-h, m+h
def make_df(method=method):
"""
this makes the actual data by pulling the race from the census or wiki data
"""
# if os.path.exists('/%s/data/result_df_%s.csv'%(homedir,method)):
# df = pd.read_csv('/%s/data/result_df_%s.csv'%(homedir,method))
# return df
main_df = pd.read_csv('/%s/article_data/NewArticleData2019_filtered.csv'%(homedir),header=0)
result_df = pd.DataFrame(columns=['fa_race','la_race','citation_count'])
store_fa_race = []
store_la_race = []
store_citations = []
store_year = []
store_journal = []
store_fa_g = []
store_la_g = []
store_fa_category = []
store_la_category = []
for entry in tqdm.tqdm(main_df.iterrows(),total=len(main_df)):
store_year.append(entry[1]['PY'])
store_journal.append(entry[1]['SO'])
fa = entry[1].AF.split(';')[0]
la = entry[1].AF.split(';')[-1]
fa_lname,fa_fname = fa.split(', ')
la_lname,la_fname = la.split(', ')
try:store_citations.append(len(entry[1].cited.split(',')))
except:store_citations.append(0)
##wiki
if method =='wiki':
names = [{'lname': fa_lname,'fname':fa_fname}]
fa_df = pd.DataFrame(names,columns=['lname','fname'])
fa_race = wiki_2_race[pred_wiki_name(fa_df,'lname','fname').race.values[0]]
names = [{'lname': la_lname,'fname':la_fname}]
la_df = pd.DataFrame(names,columns=['lname','fname'])
la_race = wiki_2_race[pred_wiki_name(la_df,'lname','fname').race.values[0]]
if method =='florida':
names = [{'lname': fa_lname,'fname':fa_fname}]
fa_df = pd.DataFrame(names,columns=['lname','fname'])
fa_race = pred_fl_reg_name(fa_df,'lname','fname').race.values[0].split('_')[-1]
names = [{'lname': la_lname,'fname':la_fname}]
la_df = pd.DataFrame(names,columns=['lname','fname'])
la_race = pred_fl_reg_name(la_df,'lname','fname').race.values[0].split('_')[-1]
#census
if method =='census':
names = [{'name': fa_lname},{'name':la_lname}]
la_df = pd.DataFrame(names)
r = pred_census_ln(la_df,'name')
fa_race,la_race= r.race.values
if method =='combined':
##wiki
names = [{'lname': fa_lname,'fname':fa_fname}]
fa_df = pd.DataFrame(names,columns=['fname','lname'])
fa_race_wiki = wiki_2_race[pred_wiki_name(fa_df,'fname','lname').race.values[0]]
names = [{'lname': la_lname,'fname':la_fname}]
la_df = pd.DataFrame(names,columns=['fname','lname'])
la_race_wiki = wiki_2_race[pred_wiki_name(la_df,'fname','lname').race.values[0]]
names = [{'name': fa_lname},{'name':la_lname}]
la_df = pd.DataFrame(names)
r = pred_census_ln(la_df,'name')
fa_race_census,la_race_census= r.race.values
if la_race_census != la_race_wiki:
if la_race_wiki == 'white':
la_race = la_race_census
if la_race_census == 'white':
la_race = la_race_wiki
elif (la_race_census != 'white') & (la_race_wiki != 'white'): la_race = la_race_wiki
elif la_race_census == la_race_wiki: la_race = la_race_wiki
if fa_race_census != fa_race_wiki:
if fa_race_wiki == 'white':
fa_race = fa_race_census
if fa_race_census == 'white':
fa_race = fa_race_wiki
elif (fa_race_census != 'white') & (fa_race_wiki != 'white'): fa_race = fa_race_wiki
elif fa_race_census == fa_race_wiki: fa_race = fa_race_wiki
store_la_race.append(la_race)
store_fa_race.append(fa_race)
store_fa_g.append(entry[1].AG[0])
store_la_g.append(entry[1].AG[1])
store_fa_category.append('%s_%s' %(fa_race,entry[1].AG[0]))
store_la_category.append('%s_%s' %(la_race,entry[1].AG[1]))
result_df['fa_race'] = store_fa_race
result_df['la_race'] = store_la_race
result_df['fa_g'] = store_fa_g
result_df['la_g'] = store_la_g
result_df['journal'] = store_journal
result_df['year'] = store_year
result_df['citation_count'] = store_citations
result_df['fa_category'] = store_fa_category
result_df['la_category'] = store_la_category
# result_df.citation_count = result_df.citation_count.values.astype(int)
result_df.to_csv('/%s/data/result_df_%s.csv'%(homedir,method),index=False)
return result_df
def make_pr_df(method=method):
"""
this makes the actual data by pulling the race from the census or wiki data
"""
main_df = pd.read_csv('/%s/article_data/NewArticleData2019.csv'%(homedir),header=0)
prs = np.zeros((main_df.shape[0],8,8))
gender_base = {}
for year in np.unique(main_df.PY.values):
ydf = main_df[main_df.PY==year].AG
fa = np.array([x[0] for x in ydf.values])
la = np.array([x[1] for x in ydf.values])
fa_m = len(fa[fa=='M'])/ len(fa[fa!='U'])
fa_w = len(fa[fa=='W'])/ len(fa[fa!='U'])
la_m = len(la[fa=='M'])/ len(la[la!='U'])
la_w = len(la[fa=='W'])/ len(la[la!='U'])
gender_base[year] = [fa_m,fa_w,la_m,la_w]
asian = [0,1,2]
black = [3,4]
white = [5,6,7,8,9,11,12]
hispanic = [10]
if method =='wiki_black':
black = [3]
for entry in tqdm.tqdm(main_df.iterrows(),total=len(main_df)):
fa = entry[1].AF.split(';')[0]
la = entry[1].AF.split(';')[-1]
fa_lname,fa_fname = fa.split(', ')
la_lname,la_fname = la.split(', ')
fa_g = entry[1].AG[0]
la_g = entry[1].AG[1]
paper_matrix = np.zeros((2,8))
# 1/0
##wiki
if method =='wiki' or method == 'wiki_black':
names = [{'lname': fa_lname,'fname':fa_fname}]
fa_df = pd.DataFrame(names,columns=['lname','fname'])
fa_race = pred_wiki_name(fa_df,'lname','fname').values[0][3:]
fa_race = [np.sum(fa_race[white]),np.sum(fa_race[asian]),np.sum(fa_race[hispanic]),np.sum(fa_race[black])]
names = [{'lname': la_lname,'fname':la_fname}]
la_df = pd.DataFrame(names,columns=['lname','fname'])
la_race = pred_wiki_name(la_df,'lname','fname').values[0][3:]
la_race = [np.sum(la_race[white]),np.sum(la_race[asian]),np.sum(la_race[hispanic]),np.sum(la_race[black])]
# #census
if method =='census':
names = [{'name': fa_lname},{'name':la_lname}]
la_df = pd.DataFrame(names)
r = pred_census_ln(la_df,'name')
fa_race = [r.iloc[0]['white'],r.iloc[0]['api'],r.iloc[0]['hispanic'],r.iloc[0]['black']]
la_race = [r.iloc[1]['white'],r.iloc[1]['api'],r.iloc[1]['hispanic'],r.iloc[1]['black']]
if method =='florida':
names = [{'lname': fa_lname,'fname':fa_fname}]
fa_df = pd.DataFrame(names,columns=['lname','fname'])
asian, hispanic, black, white = pred_fl_reg_name(fa_df,'lname','fname').values[0][3:]
fa_race = [white,asian,hispanic,black]
names = [{'lname': la_lname,'fname':la_fname}]
la_df = pd.DataFrame(names,columns=['lname','fname'])
asian, hispanic, black, white = pred_fl_reg_name(la_df,'lname','fname').values[0][3:]
la_race = [white,asian,hispanic,black]
if method == 'combined':
names = [{'lname': fa_lname,'fname':fa_fname}]
fa_df = pd.DataFrame(names,columns=['fname','lname'])
fa_race_wiki = pred_wiki_name(fa_df,'lname','fname').values[0][3:]
fa_race_wiki = [np.sum(fa_race_wiki[white]),np.sum(fa_race_wiki[asian]),np.sum(fa_race_wiki[hispanic]),np.sum(fa_race_wiki[black])]
names = [{'lname': la_lname,'fname':la_fname}]
la_df = pd.DataFrame(names,columns=['fname','lname'])
la_race_wiki = pred_wiki_name(la_df,'lname','fname').values[0][3:]
la_race_wiki = [np.sum(la_race_wiki[white]),np.sum(la_race_wiki[asian]),np.sum(la_race_wiki[hispanic]),np.sum(la_race_wiki[black])]
names = [{'name': fa_lname},{'name':la_lname}]
la_df = pd.DataFrame(names)
r = pred_census_ln(la_df,'name')
fa_race_census = [r.iloc[0]['white'],r.iloc[0]['api'],r.iloc[0]['hispanic'],r.iloc[0]['black']]
la_race_census = [r.iloc[1]['white'],r.iloc[1]['api'],r.iloc[1]['hispanic'],r.iloc[1]['black']]
if fa_race_census[0] < fa_race_wiki[0]: fa_race = fa_race_census
else: fa_race = fa_race_wiki
if la_race_census[0] < la_race_wiki[0]: la_race = la_race_census
else: la_race = la_race_wiki
gender_b = gender_base[year]
if fa_g == 'M': paper_matrix[0] = np.outer([1,0],fa_race).flatten()
if fa_g == 'W': paper_matrix[0] = np.outer([0,1],fa_race).flatten()
if fa_g == 'U': paper_matrix[0] = np.outer([gender_b[0],gender_b[1]],fa_race).flatten()
if la_g == 'M': paper_matrix[1] = np.outer([1,0],la_race).flatten()
if la_g == 'W': paper_matrix[1] = np.outer([0,1],la_race).flatten()
if la_g == 'U': paper_matrix[1] = np.outer([gender_b[2],gender_b[3]],la_race).flatten()
paper_matrix = np.outer(paper_matrix[0],paper_matrix[1])
paper_matrix = paper_matrix / np.sum(paper_matrix)
prs[entry[0]] = paper_matrix
np.save('/%s/data/result_pr_df_%s.npy'%(homedir,method),prs)
def make_all_author_race():
"""
this makes the actual data by pulling the race from the census or wiki data,
but this version include middle authors, which we use for the co-authorship networks
"""
main_df = pd.read_csv('/%s/article_data/NewArticleData2019.csv'%(homedir),header=0)
names = []
lnames = []
fnames = []
for entry in main_df.iterrows():
for a in entry[1].AF.split('; '):
a_lname,a_fname = a.split(', ')
lnames.append(a_lname.strip())
fnames.append(a_fname.strip())
names.append(a)
df = pd.DataFrame(np.array([names,fnames,lnames]).swapaxes(0,1),columns=['name','fname','lname'])
df = df.drop_duplicates('name')
if method =='florida':
# 1/0
r = pred_fl_reg_name(df,'lname','fname')
r.rename(columns={'nh_black':'black','nh_white':'white'})
r.to_csv('/%s/data/result_df_%s_all.csv'%(homedir,method),index=False)
if method =='census':
r = pred_census_ln(df,'lname')
r.to_csv('/%s/data/result_df_%s_all.csv'%(homedir,method),index=False)
all_races = []
r = dict(zip(df.name.values,df.race.values))
for idx,paper in tqdm.tqdm(main_df.iterrows(),total=main_df.shape[0]):
races = []
for a in paper.AF.split('; '):
a_lname,a_fname = a.split(', ')
races.append(r[a_lname.strip()])
all_races.append('_'.join(str(x) for x in races))
main_df['all_races'] = all_races
main_df.to_csv('/%s/data/all_data_%s.csv'%(homedir,method),index=False)
race2wiki = {'api': ["Asian,GreaterEastAsian,EastAsian","Asian,GreaterEastAsian,Japanese", "Asian,IndianSubContinent"],
'black':["GreaterAfrican,Africans", "GreaterAfrican,Muslim"],
'white':["GreaterEuropean,British", "GreaterEuropean,EastEuropean", "GreaterEuropean,Jewish", "GreaterEuropean,WestEuropean,French",
"GreaterEuropean,WestEuropean,Germanic", "GreaterEuropean,WestEuropean,Nordic", "GreaterEuropean,WestEuropean,Italian"],
'hispanic':["GreaterEuropean,WestEuropean,Hispanic"]}
if method =='wiki':
r = pred_wiki_name(df,'lname','fname')
for race in ['api','black','hispanic','white']:
r[race] = 0.0
for e in race2wiki[race]:
r[race] = r[race] + r[e]
for race in ['api','black','hispanic','white']:
for e in race2wiki[race]:
r = r.drop(columns=[e])
r.to_csv('/%s/data/result_df_%s_all.csv'%(homedir,method),index=False)
all_races = []
for idx,paper in tqdm.tqdm(main_df.iterrows(),total=main_df.shape[0]):
races = []
for a in paper.AF.split('; '):
races.append(r[r.name==a].race.values[0])
all_races.append('_'.join(str(x) for x in races))
main_df['all_races'] = all_races
main_df.to_csv('/%s/data/all_data_%s.csv'%(homedir,method),index=False)
if method =='combined':
r_wiki = pred_wiki_name(df,'lname','fname')
for race in ['api','black','hispanic','white']:
r_wiki[race] = 0.0
for e in race2wiki[race]:
r_wiki[race] = r_wiki[race] + r_wiki[e]
for race in ['api','black','hispanic','white']:
for e in race2wiki[race]:
r_wiki = r_wiki.drop(columns=[e])
r_census = pred_census_ln(df,'lname')
census = r_census.white < r_wiki.white
wiki = r_census.white > r_wiki.white
r = r_census.copy()
r[census] = r_census
r[wiki] = r_wiki
r.to_csv('/%s/data/result_df_%s_all.csv'%(homedir,method),index=False)
def figure_1_pr_authors():
df = pd.read_csv('/%s/data/result_df_%s_all.csv'%(homedir,method))
paper_df = pd.read_csv('/%s/article_data/NewArticleData2019.csv'%(homedir),header=0)
results = []
for year in np.unique(paper_df.PY.values):
print (year)
ydf = paper_df[paper_df.PY==year]
names = []
for p in ydf.iterrows():
for n in p[1].AF.split(';'):
names.append(n.strip())
names = np.unique(names)
result = np.zeros((len(names),4))
for idx,name in enumerate(names):
try:result[idx] = df[df.name==name].values[0][-4:]
except:result[idx] = np.nan
results.append(np.nansum(result,axis=0))
results = np.array(results)
plt.close()
sns.set(style='white',font='Palatino')
# pal = sns.color_palette("Set2")
# pal = sns.color_palette("vlag",4)
fig = plt.figure(figsize=(7.5,4),constrained_layout=False)
gs = gridspec.GridSpec(15, 14, figure=fig,wspace=.75,hspace=0,left=.1,right=.9,top=.9,bottom=.1)
ax1 = fig.add_subplot(gs[:15,:7])
ax1_plot = plt.stackplot(np.unique(paper_df.PY),np.flip(results.transpose()[[3,0,2,1]],axis=0), labels=['Black','Hispanic','Asian','White'],colors=np.flip(pal,axis=0), alpha=1)
handles, labels = plt.gca().get_legend_handles_labels()
labels.reverse()
handles.reverse()
leg = plt.legend(loc=2,frameon=False,labels=labels,handles=handles,fontsize=8)
for text in leg.get_texts():
plt.setp(text, color = 'black')
plt.margins(0,0)
plt.ylabel('sum of predicted author race')
plt.xlabel('publication year')
ax1.tick_params(axis='y', which='major', pad=0)
plt.title('a',{'fontweight':'bold'},'left',pad=2)
# 1/0
ax2 = fig.add_subplot(gs[:15,8:])
ax2_plot = plt.stackplot(np.unique(paper_df.PY),np.flip(np.divide(results.transpose()[[3,0,2,1]],np.sum(results,axis=1)),axis=0)*100, labels=['Black','Hispanic','Asian','White'],colors=np.flip(pal,axis=0),alpha=1)
handles, labels = plt.gca().get_legend_handles_labels()
labels.reverse()
handles.reverse()
leg = plt.legend(loc=2,frameon=False,labels=labels,handles=handles,fontsize=8)
for text in leg.get_texts():
plt.setp(text, color = 'white')
plt.margins(0,0)
plt.ylabel('percentage of predicted author race',labelpad=-5)
plt.xlabel('publication year')
ax2.yaxis.set_major_formatter(ticker.PercentFormatter())
ax2.tick_params(axis='y', which='major', pad=0)
plt.title('b',{'fontweight':'bold'},'left',pad=2)
plt.savefig('authors.pdf')
def figure_1_pr():
n_iters = 1000
df =pd.read_csv('/%s/article_data/NewArticleData2019.csv'%(homedir),header=0).rename({'PY':'year','SO':'journal'},axis='columns')
matrix = np.load('/%s/data/result_pr_df_%s.npy'%(homedir,method))
results = np.zeros((len(np.unique(df.year)),4))
if within_poc == False:
labels = ['white author & white author','white author & author of color','author of color & white author','author of color &\nauthor of color']
groups = [np.vectorize(matrix_idxs.get)(['white_M','white_W',]),
np.vectorize(matrix_idxs.get)(['api_M','api_W','hispanic_M','hispanic_W','black_M','black_W',])]
names = ['white-white','white-poc','poc-white','poc-poc']
plot_matrix = np.zeros((matrix.shape[0],len(groups),len(groups)))
plot_base_matrix = np.zeros((matrix.shape[0],len(groups),len(groups)))
for i in range(len(groups)):
for j in range(len(groups)):
plot_matrix[:,i,j] = np.nansum(matrix[:,groups[i]][:,:,groups[j]].reshape(matrix.shape[0],-1),axis=1)
for yidx,year in enumerate(np.unique(df.year)):
papers = df[df.year==year].index
r = np.mean(plot_matrix[papers],axis=0).flatten()
results[yidx,0] = r[0]
results[yidx,1] = r[1]
results[yidx,2] = r[2]
results[yidx,3] = r[3]
if within_poc == True:
names = ['white author','Asian author','Hispanic author','Black author']
groups = [[0,4],[1,5],[2,6],[3,7]]
labels = names
plot_matrix = np.zeros((matrix.shape[0],len(groups)))
for i in range(4):
plot_matrix[:,i] = plot_matrix[:,i] + np.nansum(np.nanmean(matrix[:,groups[i],:],axis=-1),axis=-1)
plot_matrix[:,i] = plot_matrix[:,i] + np.nansum(np.nanmean(matrix[:,:,groups[i]],axis=-1),axis=-1)
for yidx,year in enumerate(np.unique(df.year)):
papers = df[df.year==year].index
r = np.mean(plot_matrix[papers],axis=0).flatten()
results[yidx,0] = r[0]
results[yidx,1] = r[1]
results[yidx,2] = r[2]
results[yidx,3] = r[3]
plt.close()
sns.set(style='white',font='Palatino')
# pal = sns.color_palette("Set2")
# pal = sns.color_palette("vlag",4)
fig = plt.figure(figsize=(7.5,4),constrained_layout=False)
gs = gridspec.GridSpec(15, 16, figure=fig,wspace=.75,hspace=0,left=.1,right=.9,top=.9,bottom=.1)
ax1 = fig.add_subplot(gs[:15,:5])
plt.sca(ax1)
ax1_plot = plt.stackplot(np.unique(df.year),np.flip(results.transpose(),axis=0)*100, labels=np.flip(labels),colors=np.flip(pal,axis=0), alpha=1)
handles, labels = plt.gca().get_legend_handles_labels()
labels.reverse()
handles.reverse()
leg = plt.legend(loc=9,frameon=False,labels=labels,handles=handles,fontsize=8)
for text in leg.get_texts():
plt.setp(text, color = 'w')
plt.margins(0,0)
plt.ylabel('percentage of publications')
plt.xlabel('publication year')
ax1.tick_params(axis='x', which='major', pad=-1)
ax1.tick_params(axis='y', which='major', pad=0)
i,j,k,l = np.flip(results[0]*100)
i,j,k,l = [i,(i+j),(i+j+k),(i+j+k+l)]
i,j,k,l = [np.mean([0,i]),np.mean([i,j]),np.mean([j,k]),np.mean([k,l])]
# i,j,k,l = np.array([100]) - np.array([i,j,k,l])
plt.sca(ax1)
ax1.yaxis.set_major_formatter(ticker.PercentFormatter())
ax1.set_yticks([i,j,k,l])
ax1.set_yticklabels(np.flip(np.around(results[0]*100,0).astype(int)))
ax2 = ax1_plot[0].axes.twinx()
plt.sca(ax2)
i,j,k,l = np.flip(results[-1]*100)
i,j,k,l = [i,(i+j),(i+j+k),(i+j+k+l)]
i,j,k,l = [np.mean([0,i]),np.mean([i,j]),np.mean([j,k]),np.mean([k,l])]
plt.ylim(0,100)
ax2.yaxis.set_major_formatter(ticker.PercentFormatter())
ax2.set_yticks([i,j,k,l])
ax2.set_yticklabels(np.flip(np.around(results[-1]*100,0)).astype(int))
plt.xticks([1995., 2000., 2005., 2010., 2015., 2019],np.array([1995., 2000., 2005., 2010., 2015., 2019]).astype(int))
ax2.tick_params(axis='y', which='major', pad=0)
plt.title('a',{'fontweight':'bold'},'left',pad=2)
plot_df = pd.DataFrame(columns=['year','percentage','iteration'])
for yidx,year in enumerate(np.unique(df.year)):
for i in range(n_iters):
data = df[(df.year==year)]
papers = data.sample(int(len(data)),replace=True).index
r = np.mean(plot_matrix[papers],axis=0).flatten()
total = r.sum()
r = np.array(r[1:])/total
r = r.sum()
tdf = pd.DataFrame(np.array([r,year,i]).reshape(1,-1),columns=['percentage','year','iteration'])
plot_df = plot_df.append(tdf,ignore_index=True)
plot_df.percentage = plot_df.percentage.astype(float)
plot_df.iteration = plot_df.iteration.astype(int)
plot_df.percentage = plot_df.percentage.astype(float) * 100
pct_df = pd.DataFrame(columns=['year','percentage','iteration'])
plot_df = plot_df.sort_values('year')
for i in range(n_iters):
a = plot_df[(plot_df.iteration==i)].percentage.values
# change = np.diff(a) / a[:-1] * 100.
change = np.diff(a)
tdf = pd.DataFrame(columns=['year','percentage','iteration'])
tdf.year = range(1997,2020)
tdf.percentage = change[1:]
tdf.iteration = i
pct_df = pct_df.append(tdf,ignore_index=True)
pct_df = pct_df.dropna()
pct_df = pct_df[np.isinf(pct_df.percentage)==False]
ci = mean_confidence_interval(pct_df.percentage)
ci = np.around(ci,2)
print ("Across 1000 bootstraps, the mean percent increase per year was %s%% (95 CI:%s%%,%s%%)"%(ci[0],ci[1],ci[2]))
plt.text(.5,.48,"Increasing at %s%% per year\n(95%% CI:%s%%,%s%%)"%(ci[0],ci[1],ci[2]),{'fontsize':8,'color':'white'},horizontalalignment='center',verticalalignment='bottom',rotation=9,transform=ax2.transAxes)
axes = []
jidx = 3
for makea in range(5):
axes.append(fig.add_subplot(gs[jidx-3:jidx,6:10]))
jidx=jidx+3
for aidx,journal in enumerate(np.unique(df.journal)):
ax = axes[aidx]
plt.sca(ax)
if aidx == 2: ax.set_ylabel('percentage of publications')
if aidx == 4: ax.set_xlabel('publication\nyear',labelpad=-10)
results = np.zeros(( len(np.unique(df[(df.journal==journal)].year)),4))
for yidx,year in enumerate(np.unique(df[(df.journal==journal)].year)):
papers = df[(df.year==year)&(df.journal==journal)].index
r = np.mean(plot_matrix[papers],axis=0).flatten()
results[yidx,0] = r[0]
results[yidx,1] = r[1]
results[yidx,2] = r[2]
results[yidx,3] = r[3]
data = df[df.journal==journal]
if journal == 'NATURE NEUROSCIENCE':
for i in range(3): results = np.concatenate([[[0,0,0,0]],results],axis=0)
ax1_plot = plt.stackplot(np.unique(df.year),np.flip(results.transpose(),axis=0)*100, labels=np.flip(labels,axis=0),colors=np.flip(pal,axis=0), alpha=1)
plt.margins(0,0)
ax.set_yticks([])
if aidx != 4:
ax.set_xticks([])
else: plt.xticks(np.array([1996.5,2017.5]),np.array([1995.,2019]).astype(int))
plt.title(journal.title(), pad=-10,color='w',fontsize=8)
if aidx == 0: plt.text(0,1,'b',{'fontweight':'bold'},horizontalalignment='left',verticalalignment='bottom',transform=ax.transAxes)
journals = np.unique(df.journal)
plot_df = pd.DataFrame(columns=['journal','year','percentage','iteration'])
for j in journals:
for yidx,year in enumerate(np.unique(df.year)):
for i in range(n_iters):
data = df[(df.year==year)&(df.journal==j)]
papers = data.sample(int(len(data)),replace=True).index
r = np.mean(plot_matrix[papers],axis=0).flatten()
total = r.sum()
r = np.array(r[1:])/total
r = r.sum()
tdf = pd.DataFrame(np.array([j,r,year,i]).reshape(1,-1),columns=['journal','percentage','year','iteration'])
plot_df = plot_df.append(tdf,ignore_index=True)
plot_df.percentage = plot_df.percentage.astype(float)
plot_df.iteration = plot_df.iteration.astype(int)
plot_df.percentage = plot_df.percentage.astype(float) * 100
pct_df = pd.DataFrame(columns=['journal','year','percentage','iteration'])
plot_df = plot_df.sort_values('year')
for i in range(n_iters):
for j in journals:
a = plot_df[(plot_df.iteration==i)&(plot_df.journal==j)].percentage.values
# change = np.diff(a) / a[:-1] * 100.
change = np.diff(a)
tdf = pd.DataFrame(columns=['journal','year','percentage','iteration'])
tdf.year = range(1997,2020)
tdf.percentage = change[1:]
tdf.journal = j
tdf.iteration = i
pct_df = pct_df.append(tdf,ignore_index=True)
pct_df = pct_df.dropna()
pct_df = pct_df[np.isinf(pct_df.percentage)==False]
ci = pct_df.groupby(['journal']).percentage.agg(mean_confidence_interval).values
axes = []
jidx = 3
for makea in range(5):
axes.append(fig.add_subplot(gs[jidx-3:jidx,11:]))
jidx=jidx+3
for i,ax,journal,color in zip(range(5),axes,journals,sns.color_palette("rocket_r", 5)):
plt.sca(ax)
ax.clear()
#
# plot_df[np.isnan(plot_df.percentage)] = 0.0
if i == 0: plt.text(0,1,'c',{'fontweight':'bold'},horizontalalignment='left',verticalalignment='bottom',transform=ax.transAxes)
lp = sns.lineplot(data=plot_df[plot_df.journal==journal],y='percentage',x='year',color=color,ci='sd')
plt.margins(0,0)
thisdf = plot_df[plot_df.journal==journal]
minp = int(np.around(thisdf.mean()['percentage'],0))
thisdf = thisdf[thisdf.year==thisdf.year.max()]
maxp = int(np.around(thisdf.mean()['percentage'],0))
plt.text(-0.01,.5,'%s'%(minp),horizontalalignment='right',verticalalignment='top', transform=ax.transAxes,fontsize=10)
plt.text(1.01,.9,'%s'%(maxp),horizontalalignment='left',verticalalignment='top', transform=ax.transAxes,fontsize=10)
ax.set_yticks([])
# ax.set_xticks([])
ax.set_ylabel('')
plt.margins(0,0)
ax.set_yticks([])
if i == 2:
ax.set_ylabel('percentage of publications',labelpad=12)
if i != 4: ax.set_xticks([])
else: plt.xticks(np.array([1.5,22.5]),np.array([1995.,2019]).astype(int))
mean_pc,min_pc,max_pc = np.around(ci[i],2)
if i == 4: ax.set_xlabel('publication\nyear',labelpad=-10)
else: ax.set_xlabel('')
plt.text(.99,0,'95%' + "CI: %s<%s<%s"%(min_pc,mean_pc,max_pc),horizontalalignment='right',verticalalignment='bottom', transform=ax.transAxes,fontsize=8)
if journal == 'NATURE NEUROSCIENCE':
plt.xlim(-3,21)
plt.savefig('/%s/figures/figure1_pr_%s_%s.pdf'%(homedir,method,within_poc))
def validate():
black_names = pd.read_csv('%s/data/Black scientists - Faculty.csv'%(homedir))['Name'].values[1:]
fnames = []
lnames = []
all_names =[]
for n in black_names:
try:
fn,la = n.split(' ')[:2]
fnames.append(fn.strip())
lnames.append(la.strip())
all_names.append('%s_%s'%(fn.strip(),la.strip()))
except:continue
black_df = pd.DataFrame(np.array([all_names,fnames,lnames]).swapaxes(0,1),columns=['name','fname','lname'])
main_df = pd.read_csv('/%s/article_data/NewArticleData2019.csv'%(homedir),header=0)
names = []
lnames = []
fnames = []
for entry in main_df.iterrows():
for a in entry[1].AF.split('; '):
a_lname,a_fname = a.split(', ')
lnames.append(a_lname.strip())
fnames.append(a_fname.strip())
names.append('%s_%s'%(a_fname,a_lname))
main_df = pd.DataFrame(np.array([names,fnames,lnames]).swapaxes(0,1),columns=['name','fname','lname'])
main_df = main_df.drop_duplicates('name')
if method == 'wiki':
black_r = pred_wiki_name(black_df,'lname','fname')
all_r = pred_wiki_name(main_df,'lname','fname')
asian = [0,1,2]
black = [3,4]
white = [5,6,7,8,9,11,12]
hispanic = [10]
all_df = pd.DataFrame(columns=['probability','sample'])
all_df['probability'] = all_r.as_matrix()[:,4:][:,black].sum(axis=1)
all_df['sample'] = 'papers'
black_df = pd.DataFrame(columns=['probability','sample'])
black_df['probability'] = black_r.as_matrix()[:,4:][:,black].sum(axis=1)
black_df['sample'] = 'Black-in-STEM'
if method == 'florida':
black_r = pred_fl_reg_name(black_df,'lname','fname')
all_r = pred_fl_reg_name(main_df,'lname','fname')
asian = [0,1,2]
black = [3,4]
white = [5,6,7,8,9,11,12]
hispanic = [10]
all_df = pd.DataFrame(columns=['probability','sample'])
all_df['probability'] = all_r.values[:,-2]
all_df['sample'] = 'papers'
black_df = pd.DataFrame(columns=['probability','sample'])
black_df['probability'] = black_r.values[:,-2]
black_df['sample'] = 'Black-in-STEM'
if method == 'census':
black_r = pred_census_ln(black_df,'lname')
all_r = pred_census_ln(main_df,'lname')
all_df = pd.DataFrame(columns=['probability','sample'])
all_df['probability'] = all_r.values[:,-3]
all_df['sample'] = 'papers'
black_df = pd.DataFrame(columns=['probability','sample'])
black_df['probability'] = black_r.values[:,-3]
black_df['sample'] = 'Black-in-STEM'
data = all_df.append(black_df,ignore_index=True)
data.probability = data.probability.astype(float)
plt.close()
sns.set(style='white',font='Palatino')
fig = plt.figure(figsize=(7.5,3),constrained_layout=True)
gs = gridspec.GridSpec(6,6, figure=fig)
ax1 = fig.add_subplot(gs[:6,:3])
plt.sca(ax1)
sns.histplot(data=data,x='probability',hue="sample",stat='density',common_norm=False,bins=20)
ax2 = fig.add_subplot(gs[:6,3:])
plt.sca(ax2)
sns.histplot(data=data,x='probability',hue="sample",stat='density',common_norm=False,bins=20)
plt.ylim(0,2.5)
plt.savefig('Black-in-STEM_%s.pdf'%(method))
plt.close()
sns.set(style='white',font='Palatino')
fig = plt.figure(figsize=(7.5,3),constrained_layout=True)
gs = gridspec.GridSpec(6,6, figure=fig)
ax1 = fig.add_subplot(gs[:6,:3])
plt.sca(ax1)
sns.histplot(data=data[data['sample']=='papers'],x='probability',stat='density',common_norm=False,bins=20)
ax2 = fig.add_subplot(gs[:6,3:])
plt.sca(ax2)
sns.histplot(data=data[data['sample']=='Black-in-STEM'],x='probability',hue="sample",stat='density',common_norm=False,bins=20)
# plt.ylim(0,2.5)
plt.savefig('Black-in-STEM_2.pdf')
def make_pr_control():
"""
control for features of citing article
"""
# 1) the year of publication
# 2) the journal in which it was published
# 3) the number of authors
# 4) whether the paper was a review article
# 5) the seniority of the paper’s first and last authors.
# 6) paper location
df = pd.read_csv('/%s/article_data/NewArticleData2019.csv'%(homedir),header=0)
prs = np.load('/%s/data/result_pr_df_%s.npy'%(homedir,method))
cont = pd.read_csv('/%s/article_data/CountryAndContData.csv'%(homedir))
df = df.merge(cont,how='outer',left_index=True, right_index=True)
df = df.merge(pd.read_csv('/%s/article_data/SeniorityData.csv'%(homedir)),left_index=True, right_index=True)
reg_df = pd.DataFrame(columns=['year','n_authors','journal','paper_type','senior','location'])
for entry in tqdm.tqdm(df.iterrows(),total=len(df)):
idx = entry[0]
paper = entry[1]
year = entry[1].PY
n_authors = len(paper.AF.split(';'))
journal = entry[1].SO
paper_type = paper.DT
senior = entry[1].V4
try: loc = entry[1]['FirstListed.Cont'].split()[0]
except: loc = 'None'
reg_df.loc[len(reg_df)] = [year,n_authors,journal,paper_type,senior,loc]
reg_df["n_authors"] = pd.to_numeric(reg_df["n_authors"])
reg_df["year"] = pd.to_numeric(reg_df["year"])
reg_df["senior"] = pd.to_numeric(reg_df["senior"])
skl_df = pd.get_dummies(reg_df).values
ridge = MultiOutputRegressor(RidgeCV(alphas=[1e-5,1e-4,1e-3, 1e-2, 1e-1, 1,10,25,50,75,100])).fit(skl_df,prs.reshape(prs.shape[0],-1))
ridge_probabilities = ridge.predict(skl_df)
ridge_probabilities = np.divide((ridge_probabilities), np.sum(ridge_probabilities,axis=1).reshape(-1,1))
ridge_probabilities = ridge_probabilities.reshape(ridge_probabilities.shape[0],8,8)
np.save('/%s/data/probabilities_pr_%s.npy'%(homedir,method),ridge_probabilities)
def make_pr_control_jn():
"""
control for features of citing article
"""
# 1) the year of publication
# 2) the journal in which it was published
# 3) the number of authors
# 4) whether the paper was a review article
# 5) the seniority of the paper’s first and last authors.
# 6) paper location
# 6) paper sub-field
df = pd.read_csv('/%s/article_data/NewArticleData2019.csv'%(homedir),header=0)
prs = np.load('/%s/data/result_pr_df_%s.npy'%(homedir,method))
cont = pd.read_csv('/%s/article_data/CountryAndContData.csv'%(homedir))
df = df.merge(cont,how='outer',left_index=True, right_index=True)
df = df.merge(pd.read_csv('/%s/article_data/SeniorityData.csv'%(homedir)),left_index=True, right_index=True)
df = df.rename(columns={'DI':'doi'})
df['category'] = 'none'
sub = pd.read_csv('/%s/article_data/JoNcategories_no2019.csv'%(homedir))
for cat,doi in zip(sub.category,sub.doi):
df.iloc[np.where(df.doi==doi)[0],-1] = cat
reg_df = pd.DataFrame(columns=['year','n_authors','journal','paper_type','senior','location','category'])
for entry in tqdm.tqdm(df.iterrows(),total=len(df)):
idx = entry[0]
paper = entry[1]
year = entry[1].PY
n_authors = len(paper.AF.split(';'))
journal = entry[1].SO
paper_type = paper.DT
senior = entry[1].V4
cat = entry[1].category
try: loc = entry[1]['FirstListed.Cont'].split()[0]
except: loc = 'None'
reg_df.loc[len(reg_df)] = [year,n_authors,journal,paper_type,senior,loc,cat]
reg_df["n_authors"] = pd.to_numeric(reg_df["n_authors"])
reg_df["year"] = pd.to_numeric(reg_df["year"])
reg_df["senior"] = pd.to_numeric(reg_df["senior"])
skl_df = pd.get_dummies(reg_df).values
ridge = MultiOutputRegressor(RidgeCV(alphas=[1e-5,1e-4,1e-3, 1e-2, 1e-1, 1,10,25,50,75,100])).fit(skl_df,prs.reshape(prs.shape[0],-1))
ridge_probabilities = ridge.predict(skl_df)
ridge_probabilities = np.divide((ridge_probabilities), np.sum(ridge_probabilities,axis=1).reshape(-1,1))
ridge_probabilities = ridge_probabilities.reshape(ridge_probabilities.shape[0],8,8)
np.save('/%s/data/probabilities_pr_%s_jn.npy'%(homedir,method),ridge_probabilities)
df = df.rename(columns={'DI':'doi'})
df['category'] = 'none'
def write_matrix():
main_df = pd.read_csv('/%s/data/ArticleDataNew.csv'%(homedir))
prs = np.load('/%s/data/result_pr_df_%s.npy'%(homedir,method))
small_matrix = np.zeros((2,2))
matrix_idxs = {'white':0,'api':1,'hispanic':2,'black':3}
small_idxs = {'white':0,'api':1,'hispanic':1,'black':1}
for fa_r in ['white','api','hispanic','black']:
for la_r in ['white','api','hispanic','black']:
small_matrix[small_idxs[fa_r],small_idxs[la_r]] += np.sum(prs[:,matrix_idxs[fa_r],matrix_idxs[la_r]],axis=0)
np.save('/Users/maxwell/Documents/GitHub/unbiasedciter/expected_matrix_%s.npy'%(method),np.sum(prs,axis=0))
np.save('//Users/maxwell/Documents/GitHub/unbiasedciter/expected_small_matrix_%s.npy'%(method),small_matrix)
def convert_df():
main_df = pd.read_csv('/%s/article_data/NewArticleData2019.csv'%(homedir),header=0)
race_df = pd.read_csv('/%s/data/result_df_%s.csv'%(homedir,method))
df = race_df.merge(main_df,how='outer',left_index=True, right_index=True)
df['cited'] = np.nan
for idx,paper in tqdm.tqdm(df.iterrows(),total=df.shape[0]):
self_cites = np.array(paper.SA.split(',')).astype(int)
try: cites = np.array(paper.CP.split(',')).astype(int)
except:
if np.isnan(paper.CP):
continue
cites = cites[np.isin(cites,self_cites) == False]
df.iloc[idx,-1] = ', '.join(cites.astype(str))
df.to_csv('/%s/article_data/NewArticleData2019_filtered.csv'%(homedir))
def make_pr_percentages(control):
print (control)
df = pd.read_csv('/%s/article_data/NewArticleData2019_filtered.csv'%(homedir),header=0)
citing_prs = np.load('/%s/data/result_pr_df_%s.npy'%(homedir,method))
if control == 'True_jn' or control == 'null_jn':
base_prs = np.load('/%s/data/probabilities_pr_%s_jn.npy'%(homedir,method))
if control == True: base_prs = np.load('/%s/data/probabilities_pr_%s.npy'%(homedir,method))
if control == 'null_True': base_prs = np.load('/%s/data/probabilities_pr_%s.npy'%(homedir,method))
if control == 'null_walk' or control == 'walk':
if walk_length == 'cited':
base_prs = np.load('/%s/data/walk_pr_probabilities_%s_cited.npy'%(homedir,method)).reshape(-1,8,8)
if walk_length[:3] == 'all':
base_prs = np.load('/%s/data/walk_pr_probabilities_%s_%s.npy'%(homedir,method,walk_length)).reshape(-1,8,8)
if type(control) != bool and control[:4] == 'null':
matrix = np.zeros((100,df.shape[0],8,8))
matrix[:] = np.nan
base_matrix = np.zeros((100,df.shape[0],8,8))
base_matrix[:] = np.nan
else:
matrix = np.zeros((df.shape[0],8,8))
matrix[:] = np.nan
base_matrix = np.zeros((df.shape[0],8,8))
base_matrix[:] = np.nan
if control == False:
year_df = pd.DataFrame(columns=['year','month','prs'])
citable_df = pd.DataFrame(columns=['year','month','index'])
for year in df.PY.unique():
if year < 2009:continue
for month in df.PD.unique():
rdf = df[(df.year<year) | ((df.year==year) & (df.PD<=month))]
this_base_matrix = citing_prs[rdf.index.values].mean(axis=0)
year_df = year_df.append(pd.DataFrame(np.array([year,month,this_base_matrix]).reshape(1,-1),columns=['year','month','prs']),ignore_index=True)
citable_df = citable_df.append(pd.DataFrame(np.array([year,month,rdf.index.values]).reshape(1,-1),columns=['year','month','index']),ignore_index=True)
if type(control) != bool and control[5:] == 'False':
year_df = pd.DataFrame(columns=['year','month','prs'])
citable_df = pd.DataFrame(columns=['year','month','index'])
for year in df.PY.unique():
if year < 2009:continue
for month in df.PD.unique():
rdf = df[(df.year<year) | ((df.year==year) & (df.PD<=month))]
this_base_matrix = citing_prs[rdf.index.values].mean(axis=0)
year_df = year_df.append(pd.DataFrame(np.array([year,month,this_base_matrix]).reshape(1,-1),columns=['year','month','prs']),ignore_index=True)
citable_df = citable_df.append(pd.DataFrame(np.array([year,month,rdf.index.values]).reshape(1,-1),columns=['year','month','index']),ignore_index=True)
for idx,paper in tqdm.tqdm(df.iterrows(),total=df.shape[0]):
#only look at papers published 2009 or later
year = paper.year
if year < 2009:continue
#only look at papers that cite at least 10 papers in our data
if type(paper.cited) != str:
if np.isnan(paper.cited)==True: continue
n_cites = len(paper['cited'].split(','))
if n_cites < 10: continue
if control == 'null_True' or control == 'null_jn':
for i in range(100):
this_base_matrix = []
this_matrix = []
for p in base_prs[np.array(paper['cited'].split(',')).astype(int)-1]: #for each cited paper
if np.min(p) < 0:p = p + abs(np.min(p))
p = p + abs(np.min(p))
p = p.flatten()/p.sum()
this_base_matrix.append(p.reshape((8,8))) #use model prs as base matrix
choice = np.zeros((8,8))
choice[np.unravel_index(np.random.choice(range(64),p=p),(8,8))] = 1 #and randomly assign race category as citation matrix
this_matrix.append(choice)
this_base_matrix = np.sum(this_base_matrix,axis=0)
this_matrix = np.sum(this_matrix,axis=0)
matrix[i,idx] = this_matrix
base_matrix[i,idx] = this_base_matrix
elif control == 'null_False':
citable = citable_df[(citable_df['year']==year)&(citable_df.month==paper.PD)]['index'].values[0]
for i in range(100):
this_base_matrix = []
this_matrix = []
for p in citing_prs[np.random.choice(citable,n_cites,False)]: #for each cited paper #for naive sampling random papers
if np.min(p) < 0:p = p + abs(np.min(p))
p = p + abs(np.min(p))
p = p.flatten()/p.sum()
this_base_matrix.append(p.reshape((8,8))) #use naive base rate as base matrix
choice = np.zeros((8,8))
choice[np.unravel_index(np.random.choice(range(64),p=p),(8,8))] = 1 #and randomly assign race category as citation matrix based on base rates
this_matrix.append(choice)
this_base_matrix = np.sum(this_base_matrix,axis=0)
this_matrix = np.sum(this_matrix,axis=0)
matrix[i,idx] = this_matrix
base_matrix[i,idx] = this_base_matrix
elif control == 'null_walk':
for i in range(100):
this_base_matrix = []
this_matrix = []
for p in base_prs[np.array(paper['cited'].split(',')).astype(int)-1]: #for each cited paper
choice = np.zeros((8,8))
if np.isnan(p).any():
this_base_matrix.append(p.reshape((8,8))) #use model prs as base matrix
choice[:] = np.nan
this_matrix.append(choice)
continue
if np.min(p) < 0:p = p + abs(np.min(p))
p = p + abs(np.min(p))
p = p.flatten()/p.sum()
this_base_matrix.append(p.reshape((8,8))) #use model prs as base matrix
choice[np.unravel_index(np.random.choice(range(64),p=p),(8,8))] = 1 #and randomly assign race category as citation matrix
this_matrix.append(choice)
this_base_matrix = np.nansum(this_base_matrix,axis=0)
this_matrix = np.nansum(this_matrix,axis=0)
matrix[i,idx] = this_matrix
base_matrix[i,idx] = this_base_matrix
else:
this_matrix = citing_prs[np.array(paper['cited'].split(',')).astype(int)-1].sum(axis=0)
if control == False:
this_base_matrix = year_df[(year_df.year==year) & (year_df.month<=month)]['prs'].values[0] * n_cites
if control == True:
this_base_matrix = base_prs[np.array(paper['cited'].split(',')).astype(int)-1].sum(axis=0)
if control == 'True_jn':
this_base_matrix = base_prs[np.array(paper['cited'].split(',')).astype(int)-1].sum(axis=0)
if control == 'walk':
this_base_matrix = np.nansum(base_prs[np.array(paper['cited'].split(',')).astype(int)-1],axis=0)
matrix[idx] = this_matrix
base_matrix[idx] = this_base_matrix
if type(control) == bool or control == 'True_jn':
np.save('/%s/data/citation_matrix_pr_%s_%s.npy'%(homedir,method,control),matrix)
np.save('/%s/data/base_citation_matrix_pr_%s_%s.npy'%(homedir,method,control),base_matrix)
elif control =='null_True' or control =='null_False' or control == 'null_jn':
np.save('/%s/data/citation_matrix_pr_%s_%s.npy'%(homedir,method,control),matrix)
np.save('/%s/data/base_citation_matrix_pr_%s_%s.npy'%(homedir,method,control),base_matrix)
else:
np.save('/%s/data/citation_matrix_pr_%s_%s_%s.npy'%(homedir,method,control,walk_length),matrix)
np.save('/%s/data/base_citation_matrix_pr_%s_%s_%s.npy'%(homedir,method,control,walk_length),base_matrix)
def self_citing(method):
main_df = pd.read_csv('/%s/article_data/NewArticleData2019.csv'%(homedir),header=0)
race_df = pd.read_csv('/%s/data/result_df_%s.csv'%(homedir,method))
df = race_df.merge(main_df,how='outer',left_index=True, right_index=True)
df['self_cites'] = np.zeros((df.shape[0]))
for idx,paper in tqdm.tqdm(df.iterrows(),total=df.shape[0]):
#only look at papers published 2009 or later
year = paper.year
if year < 2009: continue
df.iloc[idx,-1] = len(paper.SA.split(','))
scipy.stats.ttest_ind(df[(df.fa_race=='white')&(df.fa_race=='white')].self_cites,df[(df.fa_race!='white')|(df.fa_race!='white')].self_cites)
np.median(df[(df.fa_race=='white')&(df.fa_race=='white')].self_cites.values)
np.median(df[(df.fa_race!='white')|(df.fa_race!='white')].self_cites.values)
np.mean(df[(df.fa_race=='white')&(df.fa_race=='white')].self_cites.values)
np.mean(df[(df.fa_race!='white')|(df.fa_race!='white')].self_cites.values)
def plot_pr_intersections(control,citing):
main_df = pd.read_csv('/%s/article_data/NewArticleData2019.csv'%(homedir),header=0)
race_df = pd.read_csv('/%s/data/result_df_%s.csv'%(homedir,method.split('_')[0]))
df = race_df.merge(main_df,how='outer',left_index=True, right_index=True)
n_iters = 1000
if type(control) == bool:
matrix = np.load('/%s/data/citation_matrix_pr_%s_%s.npy'%(homedir,method,control))
base_matrix = np.load('/%s/data/base_citation_matrix_pr_%s_%s.npy'%(homedir,method,control))
elif control == 'all':
matrix = np.load('/%s/data/citation_matrix_pr_%s_%s.npy'%(homedir,method,False))
base_matrix = []
for control_type in [True,False]: base_matrix.append(np.load('/%s/data/base_citation_matrix_pr_%s_%s.npy'%(homedir,method,control_type)))
base_matrix.append(np.load('/%s/data/base_citation_matrix_pr_%s_%s_%s.npy'%(homedir,method,'walk','cited')))
base_matrix[0] = base_matrix[0] / np.nansum(base_matrix[0])
base_matrix[1] = base_matrix[1] / np.nansum(base_matrix[1])
base_matrix[2] = base_matrix[2] / np.nansum(base_matrix[2])
base_matrix = np.mean(base_matrix,axis=0)
else:
matrix = np.load('/%s/data/citation_matrix_pr_%s_%s_%s.npy'%(homedir,method,control,walk_length))
base_matrix = np.load('/%s/data/base_citation_matrix_pr_%s_%s_%s.npy'%(homedir,method,control,walk_length))
# np.save('/Users/maxwell/Documents/GitHub/unbiasedciter/expected_matrix_%s.npy'%(method),np.mean(matrix,axis=0))
if type(control) == bool:
null = np.load('/%s/data/citation_matrix_pr_%s_%s_%s.npy'%(homedir,method,'null',control))
null_base = np.load('/%s/data/base_citation_matrix_pr_%s_%s_%s.npy'%(homedir,method,'null',control))[0]
elif control == 'all':
null = np.load('/%s/data/citation_matrix_pr_%s_%s_%s.npy'%(homedir,method,'null',False))
null_base = []
null_base.append(np.load('/%s/data/base_citation_matrix_pr_%s_%s_%s.npy'%(homedir,method,'null',True))[0])
null_base.append(np.load('/%s/data/base_citation_matrix_pr_%s_%s_%s.npy'%(homedir,method,'null',True))[0])
null_base.append(np.load('/%s/data/base_citation_matrix_pr_%s_%s_%s_%s.npy'%(homedir,method,'null','walk','cited'))[0])
null_base = np.mean(null_base,axis=0)
else:
null = np.load('/%s/data/citation_matrix_pr_%s_%s_%s_%s.npy'%(homedir,method,'null',control,walk_length))
null_base = np.load('/%s/data/base_citation_matrix_pr_%s_%s_%s_%s.npy'%(homedir,method,'null',control,walk_length))[0]
boot_matrix = np.zeros((n_iters,8,8))
boot_r_matrix = np.zeros((n_iters,8,8))
ww_indices = df[(df.year>=2009)&(df.fa_race=='white')&(df.la_race=='white')].index
wa_indices = df[(df.year>=2009)&(df.fa_race=='white')&(df.la_race!='white')].index
aw_indices = df[(df.year>=2009)&(df.fa_race!='white')&(df.la_race=='white')].index
aa_indices = df[(df.year>=2009)&(df.fa_race!='white')&(df.la_race!='white')].index
black_indices = df[(df.year>=2009)&((df.fa_race=='black')|(df.la_race=='black'))].index
white_indices = df[(df.year>=2009)&((df.fa_race=='white')|(df.la_race=='white'))].index
hispanic_indices = df[(df.year>=2009)&((df.fa_race=='hispanic')|(df.la_race=='hispanic'))].index
api_indices = df[(df.year>=2009)&((df.fa_race=='api')|(df.la_race=='api'))].index
for b in range(n_iters):
if citing == 'all':
papers = np.random.choice(range(matrix.shape[0]),matrix.shape[0],replace=True)
if citing == 'ww':
papers = np.random.choice(ww_indices,ww_indices.shape[0],replace=True)
if citing == 'wa':
papers = np.random.choice(wa_indices,wa_indices.shape[0],replace=True)
if citing == 'aw':
papers = np.random.choice(aw_indices,aw_indices.shape[0],replace=True)
if citing == 'aa':
papers = np.random.choice(aa_indices,aa_indices.shape[0],replace=True)
if citing == 'black':
papers = np.random.choice(black_indices,black_indices.shape[0],replace=True)
if citing == 'hispanic':
papers = np.random.choice(hispanic_indices,hispanic_indices.shape[0],replace=True)
if citing == 'api':
papers = np.random.choice(api_indices,api_indices.shape[0],replace=True)
if citing == 'white':
papers = np.random.choice(white_indices,white_indices.shape[0],replace=True)
m = np.nansum(matrix[papers],axis=0)
m = m / np.sum(m)
e = np.nansum(base_matrix[papers],axis=0)
e = e / np.sum(e)
r = np.nansum(null[np.random.choice(100,1),papers],axis=0)
r = r / np.sum(r)
er = np.nansum(null_base[papers],axis=0)
er = er / np.sum(er)
rate = (m - e) / e
r_rate = (r - er) / er
boot_matrix[b] = rate
boot_r_matrix[b] = r_rate
# np.save('/%s/data/intersection_boot_matrix_%s.npy'%(homedir),boot_matrix,method)
p_matrix = np.zeros((8,8))
for i,j in combinations(range(8),2):
x = boot_matrix[:,i,j]
y = boot_r_matrix[:,i,j]
ay = abs(y)
ax = abs(x.mean())
p_matrix[i,j] = len(ay[ay>ax])
p_matrix = p_matrix / n_iters
multi_mask = multipletests(p_matrix.flatten(),0.05,'holm')[0].reshape(8,8)
names = ['white(m)','Asian(m)','Hispanic(m)','Black(m)','white(w)','Asian(w)','Hispanic(w)','Black(w)']
matrix_idxs = {'white(m)':0,'api(m)':1,'hispanic(m)':2,'black(m)':3,'white(w)':4,'api(w)':5,'hispanic(w)':6,'black(w)':7}
men_aoc = np.vectorize(matrix_idxs.get)(['api(m)','hispanic(m)','black(m)'])
women_aoc = np.vectorize(matrix_idxs.get)(['api(w)','hispanic(w)','black(w)'])
men_aoc = boot_matrix[:,men_aoc][:,:,men_aoc].flatten()
women_aoc = boot_matrix[:,women_aoc][:,:,women_aoc].flatten()
white_men = np.vectorize(matrix_idxs.get)(['white(m)'])
white_women = np.vectorize(matrix_idxs.get)(['white(w)'])
white_men= boot_matrix[:,white_men][:,:,white_men].flatten()
white_women = boot_matrix[:,white_women][:,:,white_women].flatten()
# def exact_mc_perm_test(xs, ys, nmc=10000):
# n, k = len(xs), 0
# diff = np.abs(np.mean(xs) - np.mean(ys))
# zs = np.concatenate([xs, ys])
# for j in range(nmc):
# np.random.shuffle(zs)
# k += diff <= np.abs(np.mean(zs[:n]) - np.mean(zs[n:]))
# return k / nmc
# p = exact_mc_perm_test(men_aoc,women_aoc)
# p = log_p_value(p)
def direction(d):
if d<=0: return 'less'
else: return 'greater'
diff = (men_aoc-women_aoc)
high,low = np.percentile(diff,97.5),np.percentile(diff,2.5)
low,high = np.around(low*100,2),np.around(high*100,2)
diff = np.around(diff.mean()*100,2)
print (control)
if control == 'walk': print (walk_length)
print ('AoC men papers are cited at %s percentage points %s than women AoC papers 95pecentCI=%s,%s'%(abs(diff),direction(diff),low,high))
diff = (white_men-men_aoc[:len(white_men)])
high,low = np.percentile(diff,97.5),np.percentile(diff,2.5)
low,high = np.around(low*100,2),np.around(high*100,2)
diff = np.around(diff.mean()*100,2)
if control == 'walk': print (walk_length)
print ('white men papers are cited at %s percentage points %s than men AoC papers 95pecentCI=%s,%s'%(abs(diff),direction(diff),low,high))
diff = (white_men-white_women)
high,low = np.percentile(diff,97.5),np.percentile(diff,2.5)
low,high = np.around(low*100,2),np.around(high*100,2)
diff = np.around(diff.mean()*100,2)
print ('white men papers are cited at %s percentage points %s than white women papers 95pecentCI=%s,%s'%(abs(diff),direction(diff),low,high))
diff = (white_women-women_aoc[:len(white_women)])
high,low = np.percentile(diff,97.5),np.percentile(diff,2.5)
low,high = np.around(low*100,2),np.around(high*100,2)
diff = np.around(diff.mean()*100,2)
print ('white women papers are cited at %s percentage points %s than women-AoC papers 95pecentCI=%s,%s'%(abs(diff),direction(diff),low,high))
diff = (white_women-men_aoc[:len(white_women)])
high,low = np.percentile(diff,97.5),np.percentile(diff,2.5)
low,high = np.around(low*100,2),np.around(high*100,2)
diff = np.around(diff.mean()*100,2)
if control == 'walk': print (walk_length)
print ('white women papers are cited at %s percentage points %s than men AoC papers 95pecentCI=%s,%s'%(abs(diff),direction(diff),low,high))
# 1/0
if type(control) == bool:
orig_matrix = np.load('/%s/data/citation_matrix_pr_%s_%s.npy'%(homedir,method,control))
orig_base_matrix = np.load('/%s/data/base_citation_matrix_pr_%s_%s.npy'%(homedir,method,control))
elif control == 'all':
orig_matrix = np.load('/%s/data/citation_matrix_pr_%s_%s.npy'%(homedir,method,False))
orig_base_matrix = []
for control_type in [True,False]:
orig_base_matrix.append(np.load('/%s/data/base_citation_matrix_pr_%s_%s.npy'%(homedir,method,control_type)))
orig_base_matrix.append(np.load('/%s/data/base_citation_matrix_pr_%s_%s_%s.npy'%(homedir,method,'walk','cited')))
orig_base_matrix[0] = orig_base_matrix[0] / np.nansum(orig_base_matrix[0])
orig_base_matrix[1] = orig_base_matrix[1] / np.nansum(orig_base_matrix[1])
orig_base_matrix[2] = orig_base_matrix[2] / np.nansum(orig_base_matrix[2])
orig_base_matrix = np.mean(orig_base_matrix,axis=0)
else:
orig_matrix = np.load('/%s/data/citation_matrix_pr_%s_%s_%s.npy'%(homedir,method,control,walk_length))
orig_base_matrix = np.load('/%s/data/base_citation_matrix_pr_%s_%s_%s.npy'%(homedir,method,control,walk_length))
matrix_idxs = {'white_M':0,'api_M':1,'hispanic_M':2,'black_M':3,'white_W':4,'api_W':5,'hispanic_W':6,'black_W':7}
df = | pd.DataFrame(columns=['bias type','bias amount','boot','race']) | pandas.DataFrame |
"""Plotting functions for linear models (broadly construed)."""
from __future__ import division
import copy
import itertools
import warnings
import numpy as np
import pandas as pd
from scipy.spatial import distance
import matplotlib as mpl
import matplotlib.pyplot as plt
try:
import statsmodels.api as sm
import statsmodels.formula.api as sf
_has_statsmodels = True
except ImportError:
_has_statsmodels = False
from .external.six import string_types
from .external.six.moves import range
from . import utils
from . import algorithms as algo
from .palettes import color_palette
from .axisgrid import FacetGrid
class _LinearPlotter(object):
"""Base class for plotting relational data in tidy format.
To get anything useful done you'll have to inherit from this, but setup
code that can be abstracted out should be put here.
"""
def establish_variables(self, data, **kws):
"""Extract variables from data or use directly."""
self.data = data
# Validate the inputs
any_strings = any([isinstance(v, string_types) for v in kws.values()])
if any_strings and data is None:
raise ValueError("Must pass `data` if using named variables.")
# Set the variables
for var, val in kws.items():
if isinstance(val, string_types):
setattr(self, var, data[val])
else:
setattr(self, var, val)
def dropna(self, *vars):
"""Remove observations with missing data."""
vals = [getattr(self, var) for var in vars]
vals = [v for v in vals if v is not None]
not_na = np.all(np.column_stack([pd.notnull(v) for v in vals]), axis=1)
for var in vars:
val = getattr(self, var)
if val is not None:
setattr(self, var, val[not_na])
def plot(self, ax):
raise NotImplementedError
class _DiscretePlotter(_LinearPlotter):
"""Plotter for data with discrete independent variable(s).
This will be used by the `barplot` and `pointplot` functions, and
thus indirectly by the `factorplot` function. It can produce plots
where some statistic for a dependent measure is estimated within
subsets of the data, which can be hierarchically structured at up to two
levels (`x` and `hue`). The plots can be drawn with a few different
visual representations of the same underlying data (`bar`, and `point`,
with `box` doing something similar but skipping the estimation).
"""
def __init__(self, x, y=None, hue=None, data=None, units=None,
x_order=None, hue_order=None, color=None, palette=None,
kind="auto", markers=None, linestyles=None, dodge=0,
join=True, hline=None, estimator=np.mean, ci=95,
n_boot=1000, dropna=True):
# This implies we have a single bar/point for each level of `x`
# but that the different levels should be mapped with a palette
self.x_palette = hue is None and palette is not None
# Set class attributes based on inputs
self.estimator = len if y is None else estimator
self.ci = None if y is None else ci
self.join = join
self.n_boot = n_boot
self.hline = hline
# Other attributes that are hardcoded for now
self.bar_widths = .8
self.err_color = "#444444"
self.lw = mpl.rcParams["lines.linewidth"] * 1.8
# Once we've set the above values, if `y` is None we want the actual
# y values to be the x values so we can count them
self.y_count = y is None
if y is None:
y = x
# Ascertain which values will be associated with what values
self.establish_variables(data, x=x, y=y, hue=hue, units=units)
# Figure out the order of the variables on the x axis
x_sorted = np.sort(pd.unique(self.x))
self.x_order = x_sorted if x_order is None else x_order
if self.hue is not None:
hue_sorted = np.sort(pd.unique(self.hue))
self.hue_order = hue_sorted if hue_order is None else hue_order
else:
self.hue_order = [None]
# Handle the other hue-mapped attributes
if markers is None:
self.markers = ["o"] * len(self.hue_order)
else:
if len(markers) != len(self.hue_order):
raise ValueError("Length of marker list must equal "
"number of hue levels")
self.markers = markers
if linestyles is None:
self.linestyles = ["-"] * len(self.hue_order)
else:
if len(linestyles) != len(self.hue_order):
raise ValueError("Length of linestyle list must equal "
"number of hue levels")
self.linestyles = linestyles
# Drop null observations
if dropna:
self.dropna("x", "y", "hue", "units")
# Settle whe kind of plot this is going to be
self.establish_plot_kind(kind)
# Determine the color palette
self.establish_palette(color, palette)
# Figure out where the data should be drawn
self.establish_positions(dodge)
def establish_palette(self, color, palette):
"""Set a list of colors for each plot element."""
n_hues = len(self.x_order) if self.x_palette else len(self.hue_order)
hue_names = self.x_order if self.x_palette else self.hue_order
if self.hue is None and not self.x_palette:
if color is None:
color = color_palette()[0]
palette = [color for _ in self.x_order]
elif palette is None:
palette = color_palette(n_colors=n_hues)
elif isinstance(palette, dict):
palette = [palette[k] for k in hue_names]
palette = color_palette(palette, n_hues)
else:
palette = color_palette(palette, n_hues)
self.palette = palette
if self.kind == "point":
self.err_palette = palette
else:
# TODO make this smarter
self.err_palette = [self.err_color] * len(palette)
def establish_positions(self, dodge):
"""Make list of center values for each x and offset for each hue."""
self.positions = np.arange(len(self.x_order))
# If there's no hue variable kind is irrelevant
if self.hue is None:
n_hues = 1
width = self.bar_widths
offset = np.zeros(n_hues)
else:
n_hues = len(self.hue_order)
# Bar offset is set by hardcoded bar width
if self.kind in ["bar", "box"]:
width = self.bar_widths / n_hues
offset = np.linspace(0, self.bar_widths - width, n_hues)
if self.kind == "box":
width *= .95
self.bar_widths = width
# Point offset is set by `dodge` parameter
elif self.kind == "point":
offset = np.linspace(0, dodge, n_hues)
offset -= offset.mean()
self.offset = offset
def establish_plot_kind(self, kind):
"""Use specified kind of apply heuristics to decide automatically."""
if kind == "auto":
y = self.y
# Walk through some heuristics to automatically assign a kind
if self.y_count:
kind = "bar"
elif y.max() <= 1:
kind = "point"
elif (y.mean() / y.std()) < 2.5:
kind = "bar"
else:
kind = "point"
self.kind = kind
elif kind in ["bar", "point", "box"]:
self.kind = kind
else:
raise ValueError("%s is not a valid kind of plot" % kind)
@property
def estimate_data(self):
"""Generator to yield x, y, and ci data for each hue subset."""
# First iterate through the hues, as plots are drawn for all
# positions of a given hue at the same time
for i, hue in enumerate(self.hue_order):
# Build intermediate lists of the values for each drawing
pos = []
height = []
ci = []
for j, x in enumerate(self.x_order):
pos.append(self.positions[j] + self.offset[i])
# Focus on the data for this specific bar/point
current_data = (self.x == x) & (self.hue == hue)
y_data = self.y[current_data]
if self.units is None:
unit_data = None
else:
unit_data = self.units[current_data]
# This is where the main computation happens
height.append(self.estimator(y_data))
if self.ci is not None:
boots = algo.bootstrap(y_data, func=self.estimator,
n_boot=self.n_boot,
units=unit_data)
ci.append(utils.ci(boots, self.ci))
yield pos, height, ci
@property
def binned_data(self):
"""Generator to yield entire subsets of data for each bin."""
# First iterate through the hues, as plots are drawn for all
# positions of a given hue at the same time
for i, hue in enumerate(self.hue_order):
# Build intermediate lists of the values for each drawing
pos = []
data = []
for j, x in enumerate(self.x_order):
pos.append(self.positions[j] + self.offset[i])
current_data = (self.x == x) & (self.hue == hue)
data.append(self.y[current_data])
yield pos, data
def plot(self, ax):
"""Plot based on the stored value for kind of plot."""
plotter = getattr(self, self.kind + "plot")
plotter(ax)
# Set the plot attributes (these are shared across plot kinds
if self.hue is not None:
leg = ax.legend(loc="best", scatterpoints=1)
if hasattr(self.hue, "name"):
leg.set_title(self.hue.name,
prop={"size": mpl.rcParams["axes.labelsize"]})
ax.xaxis.grid(False)
ax.set_xticks(self.positions)
ax.set_xticklabels(self.x_order)
if hasattr(self.x, "name"):
ax.set_xlabel(self.x.name)
if self.y_count:
ax.set_ylabel("count")
else:
if hasattr(self.y, "name"):
ax.set_ylabel(self.y.name)
if self.hline is not None:
ymin, ymax = ax.get_ylim()
if self.hline > ymin and self.hline < ymax:
ax.axhline(self.hline, c="#666666")
def barplot(self, ax):
"""Draw the plot with a bar representation."""
for i, (pos, height, ci) in enumerate(self.estimate_data):
color = self.palette if self.x_palette else self.palette[i]
ecolor = self.err_palette[i]
label = self.hue_order[i]
# The main plot
ax.bar(pos, height, self.bar_widths, color=color,
label=label, align="center")
# The error bars
for x, (low, high) in zip(pos, ci):
ax.plot([x, x], [low, high], linewidth=self.lw, color=ecolor)
# Set the x limits
offset = .5
xlim = self.positions.min() - offset, self.positions.max() + offset
ax.set_xlim(xlim)
def boxplot(self, ax):
"""Draw the plot with a bar representation."""
from .distributions import boxplot
for i, (pos, data) in enumerate(self.binned_data):
color = self.palette if self.x_palette else self.palette[i]
label = self.hue_order[i]
# The main plot
boxplot(data, widths=self.bar_widths, color=color,
positions=pos, label=label, ax=ax)
# Set the x limits
offset = .5
xlim = self.positions.min() - offset, self.positions.max() + offset
ax.set_xlim(xlim)
def pointplot(self, ax):
"""Draw the plot with a point representation."""
for i, (pos, height, ci) in enumerate(self.estimate_data):
color = self.palette if self.x_palette else self.palette[i]
err_palette = self.err_palette
label = self.hue_order[i]
marker = self.markers[i]
linestyle = self.linestyles[i]
# The error bars
for j, (x, (low, high)) in enumerate(zip(pos, ci)):
ecolor = err_palette[j] if self.x_palette else err_palette[i]
ax.plot([x, x], [low, high], linewidth=self.lw, color=ecolor)
# The main plot
ax.scatter(pos, height, s=75, color=color, label=label,
marker=marker)
# The join line
if self.join:
ax.plot(pos, height, color=color,
linewidth=self.lw, linestyle=linestyle)
# Set the x limits
xlim = (self.positions.min() + self.offset.min() - .3,
self.positions.max() + self.offset.max() + .3)
ax.set_xlim(xlim)
class _RegressionPlotter(_LinearPlotter):
"""Plotter for numeric independent variables with regression model.
This does the computations and drawing for the `regplot` function, and
is thus also used indirectly by `lmplot`. It is generally similar to
the `_DiscretePlotter`, but it's intended for use when the independent
variable is numeric (continuous or discrete), and its primary advantage
is that a regression model can be fit to the data and visualized, allowing
extrapolations beyond the observed datapoints.
"""
def __init__(self, x, y, data=None, x_estimator=None, x_bins=None,
x_ci="ci", scatter=True, fit_reg=True, ci=95, n_boot=1000,
units=None, order=1, logistic=False, lowess=False,
robust=False, x_partial=None, y_partial=None,
truncate=False, dropna=True, x_jitter=None, y_jitter=None,
color=None, label=None):
# Set member attributes
self.x_estimator = x_estimator
self.ci = ci
self.x_ci = ci if x_ci == "ci" else x_ci
self.n_boot = n_boot
self.scatter = scatter
self.fit_reg = fit_reg
self.order = order
self.logistic = logistic
self.lowess = lowess
self.robust = robust
self.truncate = truncate
self.x_jitter = x_jitter
self.y_jitter = y_jitter
self.color = color
self.label = label
# Validate the regression options:
if sum((order > 1, logistic, robust, lowess)) > 1:
raise ValueError("Mutually exclusive regression options.")
# Extract the data vals from the arguments or passed dataframe
self.establish_variables(data, x=x, y=y, units=units,
x_partial=x_partial, y_partial=y_partial)
# Drop null observations
if dropna:
self.dropna("x", "y", "units", "x_partial", "y_partial")
# Regress nuisance variables out of the data
if self.x_partial is not None:
self.x = self.regress_out(self.x, self.x_partial)
if self.y_partial is not None:
self.y = self.regress_out(self.y, self.y_partial)
# Possibly bin the predictor variable, which implies a point estimate
if x_bins is not None:
self.x_estimator = np.mean if x_estimator is None else x_estimator
x_discrete, x_bins = self.bin_predictor(x_bins)
self.x_discrete = x_discrete
else:
self.x_discrete = self.x
# Save the range of the x variable for the grid later
self.x_range = self.x.min(), self.x.max()
@property
def scatter_data(self):
"""Data where each observation is a point."""
x_j = self.x_jitter
if x_j is None:
x = self.x
else:
x = self.x + np.random.uniform(-x_j, x_j, len(self.x))
y_j = self.y_jitter
if y_j is None:
y = self.y
else:
y = self.y + np.random.uniform(-y_j, y_j, len(self.y))
return x, y
@property
def estimate_data(self):
"""Data with a point estimate and CI for each discrete x value."""
x, y = self.x_discrete, self.y
vals = sorted(np.unique(x))
points, cis = [], []
for val in vals:
# Get the point estimate of the y variable
_y = y[x == val]
est = self.x_estimator(_y)
points.append(est)
# Compute the confidence interval for this estimate
if self.x_ci is None:
cis.append(None)
else:
units = None
if self.units is not None:
units = self.units[x == val]
boots = algo.bootstrap(_y, func=self.x_estimator,
n_boot=self.n_boot, units=units)
_ci = utils.ci(boots, self.x_ci)
cis.append(_ci)
return vals, points, cis
def fit_regression(self, ax=None, x_range=None, grid=None):
"""Fit the regression model."""
# Create the grid for the regression
if grid is None:
if self.truncate:
x_min, x_max = self.x_range
else:
if ax is None:
x_min, x_max = x_range
else:
x_min, x_max = ax.get_xlim()
grid = np.linspace(x_min, x_max, 100)
ci = self.ci
# Fit the regression
if self.order > 1:
yhat, yhat_boots = self.fit_poly(grid, self.order)
elif self.logistic:
from statsmodels.api import GLM, families
yhat, yhat_boots = self.fit_statsmodels(grid, GLM,
family=families.Binomial())
elif self.lowess:
ci = None
grid, yhat = self.fit_lowess()
elif self.robust:
from statsmodels.api import RLM
yhat, yhat_boots = self.fit_statsmodels(grid, RLM)
else:
yhat, yhat_boots = self.fit_fast(grid)
# Compute the confidence interval at each grid point
if ci is None:
err_bands = None
else:
err_bands = utils.ci(yhat_boots, ci, axis=0)
return grid, yhat, err_bands
def fit_fast(self, grid):
"""Low-level regression and prediction using linear algebra."""
X, y = np.c_[np.ones(len(self.x)), self.x], self.y
grid = np.c_[np.ones(len(grid)), grid]
reg_func = lambda _x, _y: np.linalg.pinv(_x).dot(_y)
yhat = grid.dot(reg_func(X, y))
if self.ci is None:
return yhat, None
beta_boots = algo.bootstrap(X, y, func=reg_func,
n_boot=self.n_boot, units=self.units).T
yhat_boots = grid.dot(beta_boots).T
return yhat, yhat_boots
def fit_poly(self, grid, order):
"""Regression using numpy polyfit for higher-order trends."""
x, y = self.x, self.y
reg_func = lambda _x, _y: np.polyval(np.polyfit(_x, _y, order), grid)
yhat = reg_func(x, y)
if self.ci is None:
return yhat, None
yhat_boots = algo.bootstrap(x, y, func=reg_func,
n_boot=self.n_boot, units=self.units)
return yhat, yhat_boots
def fit_statsmodels(self, grid, model, **kwargs):
"""More general regression function using statsmodels objects."""
X, y = np.c_[np.ones(len(self.x)), self.x], self.y
grid = np.c_[np.ones(len(grid)), grid]
reg_func = lambda _x, _y: model(_y, _x, **kwargs).fit().predict(grid)
yhat = reg_func(X, y)
if self.ci is None:
return yhat, None
yhat_boots = algo.bootstrap(X, y, func=reg_func,
n_boot=self.n_boot, units=self.units)
return yhat, yhat_boots
def fit_lowess(self):
"""Fit a locally-weighted regression, which returns its own grid."""
from statsmodels.api import nonparametric
grid, yhat = nonparametric.lowess(self.y, self.x).T
return grid, yhat
def bin_predictor(self, bins):
"""Discretize a predictor by assigning value to closest bin."""
x = self.x
if np.isscalar(bins):
percentiles = np.linspace(0, 100, bins + 2)[1:-1]
bins = np.c_[utils.percentiles(x, percentiles)]
else:
bins = np.c_[np.ravel(bins)]
dist = distance.cdist(np.c_[x], bins)
x_binned = bins[np.argmin(dist, axis=1)].ravel()
return x_binned, bins.ravel()
def regress_out(self, a, b):
"""Regress b from a keeping a's original mean."""
a_mean = a.mean()
a = a - a_mean
b = b - b.mean()
b = np.c_[b]
a_prime = a - b.dot(np.linalg.pinv(b).dot(a))
return (a_prime + a_mean).reshape(a.shape)
def plot(self, ax, scatter_kws, line_kws):
"""Draw the full plot."""
# Insert the plot label into the correct set of keyword arguments
if self.fit_reg:
line_kws["label"] = self.label
else:
scatter_kws["label"] = self.label
# Use the current color cycle state as a default
if self.color is None:
lines, = plt.plot(self.x.mean(), self.y.mean())
color = lines.get_color()
lines.remove()
else:
color = self.color
# Let color in keyword arguments override overall plot color
scatter_kws.setdefault("color", color)
line_kws.setdefault("color", color)
# Draw the constituent plots
if self.scatter:
self.scatterplot(ax, scatter_kws)
if self.fit_reg:
self.lineplot(ax, line_kws)
# Label the axes
if hasattr(self.x, "name"):
ax.set_xlabel(self.x.name)
if hasattr(self.y, "name"):
ax.set_ylabel(self.y.name)
def scatterplot(self, ax, kws):
"""Draw the data."""
if self.x_estimator is None:
kws.setdefault("alpha", .8)
x, y = self.scatter_data
ax.scatter(x, y, **kws)
else:
# TODO abstraction
ci_kws = {"color": kws["color"]}
ci_kws["linewidth"] = mpl.rcParams["lines.linewidth"] * 1.75
kws.setdefault("s", 50)
xs, ys, cis = self.estimate_data
if [ci for ci in cis if ci is not None]:
for x, ci in zip(xs, cis):
ax.plot([x, x], ci, **ci_kws)
ax.scatter(xs, ys, **kws)
def lineplot(self, ax, kws):
"""Draw the model."""
xlim = ax.get_xlim()
# Fit the regression model
grid, yhat, err_bands = self.fit_regression(ax)
# Get set default aesthetics
fill_color = kws["color"]
lw = kws.pop("lw", mpl.rcParams["lines.linewidth"] * 1.5)
kws.setdefault("linewidth", lw)
# Draw the regression line and confidence interval
ax.plot(grid, yhat, **kws)
if err_bands is not None:
ax.fill_between(grid, *err_bands, color=fill_color, alpha=.15)
ax.set_xlim(*xlim)
def lmplot(x, y, data, hue=None, col=None, row=None, palette="husl",
col_wrap=None, size=5, aspect=1, sharex=True, sharey=True,
hue_order=None, col_order=None, row_order=None, dropna=True,
legend=True, legend_out=True, **kwargs):
"""Plot a linear regression model and data onto a FacetGrid.
Parameters
----------
x, y : strings
Column names in ``data``.
data : DataFrame
Long-form (tidy) dataframe with variables in columns and observations
in rows.
hue, col, row : strings, optional
Variable names to facet on the hue, col, or row dimensions (see
:class:`FacetGrid` docs for more information).
palette : seaborn palette or dict, optional
Color palette if using a `hue` facet. Should be something that
seaborn.color_palette can read, or a dictionary mapping values of the
hue variable to matplotlib colors.
col_wrap : int, optional
Wrap the column variable at this width. Incompatible with `row`.
size : scalar, optional
Height (in inches) of each facet.
aspect : scalar, optional
Aspect * size gives the width (in inches) of each facet.
share{x, y}: booleans, optional
Lock the limits of the vertical and horizontal axes across the
facets.
{hue, col, row}_order: sequence of strings
Order to plot the values in the faceting variables in, otherwise
sorts the unique values.
dropna : boolean, optional
Drop missing values from the data before plotting.
legend : boolean, optional
Draw a legend for the data when using a `hue` variable.
legend_out: boolean, optional
Draw the legend outside the grid of plots.
kwargs : key, value pairs
Other keyword arguments are pasted to :func:`regplot`
Returns
-------
facets : FacetGrid
Returns the :class:`FacetGrid` instance with the plot on it
for further tweaking.
See Also
--------
regplot : Axes-level function for plotting linear regressions.
"""
# Backwards-compatibility warning layer
if "color" in kwargs:
msg = "`color` is deprecated and will be removed; using `hue` instead."
warnings.warn(msg, UserWarning)
hue = kwargs.pop("color")
# Reduce the dataframe to only needed columns
# Otherwise when dropna is True we could lose data because it is missing
# in a column that isn't relevant to this plot
units = kwargs.get("units", None)
x_partial = kwargs.get("x_partial", None)
y_partial = kwargs.get("y_partial", None)
need_cols = [x, y, hue, col, row, units, x_partial, y_partial]
cols = np.unique([a for a in need_cols if a is not None]).tolist()
data = data[cols]
# Initialize the grid
facets = FacetGrid(data, row, col, hue, palette=palette,
row_order=row_order, col_order=col_order,
hue_order=hue_order, dropna=dropna,
size=size, aspect=aspect, col_wrap=col_wrap,
sharex=sharex, sharey=sharey,
legend=legend, legend_out=legend_out)
# Hack to set the x limits properly, which needs to happen here
# because the extent of the regression estimate is determined
# by the limits of the plot
if sharex:
for ax in facets.axes.flat:
scatter = ax.scatter(data[x], np.ones(len(data)) * data[y].mean())
scatter.remove()
# Draw the regression plot on each facet
facets.map_dataframe(regplot, x, y, **kwargs)
return facets
def factorplot(x, y=None, hue=None, data=None, row=None, col=None,
col_wrap=None, estimator=np.mean, ci=95, n_boot=1000,
units=None, x_order=None, hue_order=None, col_order=None,
row_order=None, kind="auto", markers=None, linestyles=None,
dodge=0, join=True, hline=None, size=5, aspect=1, palette=None,
legend=True, legend_out=True, dropna=True, sharex=True,
sharey=True, margin_titles=False):
"""Plot a variable estimate and error sorted by categorical factors.
Parameters
----------
x : string
Variable name in `data` for splitting the plot on the x axis.
y : string, optional
Variable name in `data` for the dependent variable. If omitted, the
counts within each bin are plotted (without confidence intervals).
data : DataFrame
Long-form (tidy) dataframe with variables in columns and observations
in rows.
hue : string, optional
Variable name in `data` for splitting the plot by color. In the case
of `kind="bar"`, this also influences the placement on the x axis.
row, col : strings, optional
Variable name(s) in `data` for splitting the plot into a facet grid
along row and columns.
col_wrap : int or None, optional
Wrap the column variable at this width (incompatible with `row`).
estimator : vector -> scalar function, optional
Function to aggregate `y` values at each level of the factors.
ci : int in {0, 100}, optional
Size of confidene interval to draw around the aggregated value.
n_boot : int, optional
Number of bootstrap resamples used to compute confidence interval.
units : vector, optional
Vector with ids for sampling units; bootstrap will be performed over
these units and then within them.
kind : {"auto", "point", "bar"}, optional
Visual representation of the plot. "auto" uses a few heuristics to
guess whether "bar" or "point" is more appropriate.
markers : list of strings, optional
Marker codes to map the `hue` variable with. Only relevant when kind
is "point".
linestyles : list of strings, optional
Linestyle codes to map the `hue` variable with. Only relevant when
kind is "point".
dodge : positive scalar, optional
Horizontal offset applies to different `hue` levels. Only relevant
when kind is "point".
join : boolean, optional
Whether points from the same level of `hue` should be joined. Only
relevant when kind is "point".
size : positive scalar, optional
Height (in inches) of each facet.
aspect : positive scalar, optional
Ratio of facet width to facet height.
palette : seaborn color palette, optional
Palette to map `hue` variable with (or `x` variable when `hue` is
None).
legend : boolean, optional
Draw a legend, only if `hue` is used and does not overlap with other
variables.
legend_out : boolean, optional
Draw the legend outside the grid; otherwise it is placed within the
first facet.
dropna : boolean, optional
Remove observations that are NA within any variables used to make
the plot.
share{x, y} : booleans, optional
Lock the limits of the vertical and/or horizontal axes across the
facets.
margin_titles : bool, optional
If True and there is a `row` variable, draw the titles on the right
margin of the grid (experimental).
Returns
-------
facet : FacetGrid
Returns the :class:`FacetGrid` instance with the plot on it
for further tweaking.
See Also
--------
pointplot : Axes-level function for drawing a point plot
barplot : Axes-level function for drawing a bar plot
boxplot : Axes-level function for drawing a box plot
"""
cols = [a for a in [x, y, hue, col, row, units] if a is not None]
cols = pd.unique(cols).tolist()
data = data[cols]
facet_hue = hue if hue in [row, col] else None
facet_palette = palette if hue in [row, col] else None
# Initialize the grid
facets = FacetGrid(data, row, col, facet_hue, palette=facet_palette,
row_order=row_order, col_order=col_order, dropna=dropna,
size=size, aspect=aspect, col_wrap=col_wrap,
legend=legend, legend_out=legend_out,
margin_titles=margin_titles)
if kind == "auto":
if y is None:
kind = "bar"
elif (data[y] <= 1).all():
kind = "point"
elif (data[y].mean() / data[y].std()) < 2.5:
kind = "bar"
else:
kind = "point"
# Draw the plot on each facet
kwargs = dict(estimator=estimator, ci=ci, n_boot=n_boot, units=units,
x_order=x_order, hue_order=hue_order, hline=hline)
# Delegate the hue variable to the plotter not the FacetGrid
if hue is not None and hue in [row, col]:
hue = None
else:
kwargs["palette"] = palette
# Plot by mapping a plot function across the facets
if kind == "bar":
facets.map_dataframe(barplot, x, y, hue, **kwargs)
elif kind == "box":
def _boxplot(x, y, hue, data=None, **kwargs):
p = _DiscretePlotter(x, y, hue, data, kind="box", **kwargs)
ax = plt.gca()
p.plot(ax)
facets.map_dataframe(_boxplot, x, y, hue, **kwargs)
elif kind == "point":
kwargs.update(dict(dodge=dodge, join=join,
markers=markers, linestyles=linestyles))
facets.map_dataframe(pointplot, x, y, hue, **kwargs)
# Draw legends and labels
if y is None:
facets.set_axis_labels(x, "count")
facets.fig.tight_layout()
if legend and (hue is not None) and (hue not in [x, row, col]):
facets.set_legend(title=hue, label_order=hue_order)
return facets
def barplot(x, y=None, hue=None, data=None, estimator=np.mean, hline=None,
ci=95, n_boot=1000, units=None, x_order=None, hue_order=None,
dropna=True, color=None, palette=None, label=None, ax=None):
"""Estimate data in categorical bins with a bar representation.
Parameters
----------
x : Vector or string
Data or variable name in `data` for splitting the plot on the x axis.
y : Vector or string, optional
Data or variable name in `data` for the dependent variable. If omitted,
the counts within each bin are plotted (without confidence intervals).
data : DataFrame, optional
Long-form (tidy) dataframe with variables in columns and observations
in rows.
estimator : vector -> scalar function, optional
Function to aggregate `y` values at each level of the factors.
ci : int in {0, 100}, optional
Size of confidene interval to draw around the aggregated value.
n_boot : int, optional
Number of bootstrap resamples used to compute confidence interval.
units : vector, optional
Vector with ids for sampling units; bootstrap will be performed over
these units and then within them.
palette : seaborn color palette, optional
Palette to map `hue` variable with (or `x` variable when `hue` is
None).
dropna : boolean, optional
Remove observations that are NA within any variables used to make
the plot.
Returns
-------
facet : FacetGrid
Returns the :class:`FacetGrid` instance with the plot on it
for further tweaking.
See Also
--------
factorplot : Combine barplot and FacetGrid
pointplot : Axes-level function for drawing a point plot
"""
plotter = _DiscretePlotter(x, y, hue, data, units, x_order, hue_order,
color, palette, "bar", None, None, 0, False,
hline, estimator, ci, n_boot, dropna)
if ax is None:
ax = plt.gca()
plotter.plot(ax)
return ax
def pointplot(x, y, hue=None, data=None, estimator=np.mean, hline=None,
ci=95, n_boot=1000, units=None, x_order=None, hue_order=None,
markers=None, linestyles=None, dodge=0, dropna=True, color=None,
palette=None, join=True, label=None, ax=None):
"""Estimate data in categorical bins with a point representation.
Parameters
----------
x : Vector or string
Data or variable name in `data` for splitting the plot on the x axis.
y : Vector or string, optional
Data or variable name in `data` for the dependent variable. If omitted,
the counts within each bin are plotted (without confidence intervals).
data : DataFrame, optional
Long-form (tidy) dataframe with variables in columns and observations
in rows.
estimator : vector -> scalar function, optional
Function to aggregate `y` values at each level of the factors.
ci : int in {0, 100}, optional
Size of confidene interval to draw around the aggregated value.
n_boot : int, optional
Number of bootstrap resamples used to compute confidence interval.
units : vector, optional
Vector with ids for sampling units; bootstrap will be performed over
these units and then within them.
markers : list of strings, optional
Marker codes to map the `hue` variable with.
linestyles : list of strings, optional
Linestyle codes to map the `hue` variable with.
dodge : positive scalar, optional
Horizontal offset applies to different `hue` levels. Only relevant
when kind is "point".
join : boolean, optional
Whether points from the same level of `hue` should be joined. Only
relevant when kind is "point".
palette : seaborn color palette, optional
Palette to map `hue` variable with (or `x` variable when `hue` is
None).
dropna : boolean, optional
Remove observations that are NA within any variables used to make
the plot.
Returns
-------
ax : Axes
Returns the matplotlib Axes with the plot on it for further tweaking.
See Also
--------
factorplot : Combine pointplot and FacetGrid
barplot : Axes-level function for drawing a bar plot
"""
plotter = _DiscretePlotter(x, y, hue, data, units, x_order, hue_order,
color, palette, "point", markers, linestyles,
dodge, join, hline, estimator, ci, n_boot,
dropna)
if ax is None:
ax = plt.gca()
plotter.plot(ax)
return ax
def regplot(x, y, data=None, x_estimator=None, x_bins=None, x_ci=95,
scatter=True, fit_reg=True, ci=95, n_boot=1000, units=None,
order=1, logistic=False, lowess=False, robust=False,
x_partial=None, y_partial=None,
truncate=False, dropna=True, x_jitter=None, y_jitter=None,
xlabel=None, ylabel=None, label=None,
color=None, scatter_kws=None, line_kws=None,
ax=None):
"""Draw a scatter plot between x and y with a regression line.
Parameters
----------
x : vector or string
Data or column name in `data` for the predictor variable.
y : vector or string
Data or column name in `data` for the response variable.
data : DataFrame, optional
DataFrame to use if `x` and `y` are column names.
x_estimator : function that aggregates a vector into one value, optional
When `x` is a discrete variable, apply this estimator to the data
at each value and plot the data as a series of point estimates and
confidence intervals rather than a scatter plot.
x_bins : int or vector, optional
When `x` is a continuous variable, use the values in this vector (or
a vector of evenly spaced values with this length) to discretize the
data by assigning each point to the closest bin value. This applies
only to the plot; the regression is fit to the original data. This
implies that `x_estimator` is numpy.mean if not otherwise provided.
x_ci: int between 0 and 100, optional
Confidence interval to compute and draw around the point estimates
when `x` is treated as a discrete variable.
scatter : boolean, optional
Draw the scatter plot or point estimates with CIs representing the
observed data.
fit_reg : boolean, optional
If False, don't fit a regression; just draw the scatterplot.
ci : int between 0 and 100 or None, optional
Confidence interval to compute for regression estimate, which is drawn
as translucent bands around the regression line.
n_boot : int, optional
Number of bootstrap resamples used to compute the confidence intervals.
units : vector or string
Data or column name in `data` with ids for sampling units, so that the
bootstrap is performed by resampling units and then observations within
units for more accurate confidence intervals when data have repeated
measures.
order : int, optional
Order of the polynomial to fit. Use order > 1 to explore higher-order
trends in the relationship.
logistic : boolean, optional
Fit a logistic regression model. This requires `y` to be dichotomous
with values of either 0 or 1.
lowess : boolean, optional
Plot a lowess model (locally weighted nonparametric regression).
robust : boolean, optional
Fit a robust linear regression, which may be useful when the data
appear to have outliers.
{x, y}_partial : matrix or string(s) , optional
Matrix with same first dimension as `x`, or column name(s) in `data`.
These variables are treated as confounding and are removed from
the `x` or `y` variables before plotting.
truncate : boolean, optional
If True, truncate the regression estimate at the minimum and maximum
values of the `x` variable.
dropna : boolean, optional
Remove observations that are NA in at least one of the variables.
{x, y}_jitter : floats, optional
Add uniform random noise from within this range (in data coordinates)
to each datapoint in the x and/or y direction. This can be helpful when
plotting discrete values.
label : string, optional
Label to use for the regression line, or for the scatterplot if not
fitting a regression.
color : matplotlib color, optional
Color to use for all elements of the plot. Can set the scatter and
regression colors separately using the `kws` dictionaries. If not
provided, the current color in the axis cycle is used.
{scatter, line}_kws : dictionaries, optional
Additional keyword arguments passed to scatter() and plot() for drawing
the components of the plot.
ax : matplotlib axis, optional
Plot into this axis, otherwise grab the current axis or make a new
one if not existing.
Returns
-------
ax: matplotlib axes
Axes with the regression plot.
See Also
--------
lmplot : Combine regplot and a FacetGrid.
residplot : Calculate and plot the residuals of a linear model.
jointplot (with kind="reg"): Draw a regplot with univariate marginal
distrbutions.
"""
plotter = _RegressionPlotter(x, y, data, x_estimator, x_bins, x_ci,
scatter, fit_reg, ci, n_boot, units,
order, logistic, lowess, robust,
x_partial, y_partial, truncate, dropna,
x_jitter, y_jitter, color, label)
if ax is None:
ax = plt.gca()
scatter_kws = {} if scatter_kws is None else copy.copy(scatter_kws)
line_kws = {} if line_kws is None else copy.copy(line_kws)
plotter.plot(ax, scatter_kws, line_kws)
return ax
def residplot(x, y, data=None, lowess=False, x_partial=None, y_partial=None,
order=1, robust=False, dropna=True, label=None, color=None,
scatter_kws=None, ax=None):
"""Plot the residuals of a linear regression.
This function will regress y on x (possibly as a robust or polynomial
regression) and then draw a scatterplot of the residuals. You can
optionally fit a lowess smoother to the residual plot, which can
help in determining if there is structure to the residuals.
Parameters
----------
x : vector or string
Data or column name in `data` for the predictor variable.
y : vector or string
Data or column name in `data` for the response variable.
data : DataFrame, optional
DataFrame to use if `x` and `y` are column names.
lowess : boolean, optional
Fit a lowess smoother to the residual scatterplot.
{x, y}_partial : matrix or string(s) , optional
Matrix with same first dimension as `x`, or column name(s) in `data`.
These variables are treated as confounding and are removed from
the `x` or `y` variables before plotting.
order : int, optional
Order of the polynomial to fit when calculating the residuals.
robust : boolean, optional
Fit a robust linear regression when calculating the residuals.
dropna : boolean, optional
If True, ignore observations with missing data when fitting and
plotting.
label : string, optional
Label that will be used in any plot legends.
color : matplotlib color, optional
Color to use for all elements of the plot.
scatter_kws : dictionaries, optional
Additional keyword arguments passed to scatter() for drawing.
ax : matplotlib axis, optional
Plot into this axis, otherwise grab the current axis or make a new
one if not existing.
Returns
-------
ax: matplotlib axes
Axes with the regression plot.
See Also
--------
regplot : Plot a simple linear regression model.
jointplot (with kind="resid"): Draw a residplot with univariate
marginal distrbutions.
"""
plotter = _RegressionPlotter(x, y, data, ci=None,
order=order, robust=robust,
x_partial=x_partial, y_partial=y_partial,
dropna=dropna, color=color, label=label)
if ax is None:
ax = plt.gca()
# Calculate the residual from a linear regression
_, yhat, _ = plotter.fit_regression(grid=plotter.x)
plotter.y = plotter.y - yhat
# Set the regression option on the plotter
if lowess:
plotter.lowess = True
else:
plotter.fit_reg = False
# Plot a horizontal line at 0
ax.axhline(0, ls=":", c=".2")
# Draw the scatterplot
scatter_kws = {} if scatter_kws is None else scatter_kws
plotter.plot(ax, scatter_kws, {})
return ax
def coefplot(formula, data, groupby=None, intercept=False, ci=95,
palette="husl"):
"""Plot the coefficients from a linear model.
Parameters
----------
formula : string
patsy formula for ols model
data : dataframe
data for the plot; formula terms must appear in columns
groupby : grouping object, optional
object to group data with to fit conditional models
intercept : bool, optional
if False, strips the intercept term before plotting
ci : float, optional
size of confidence intervals
palette : seaborn color palette, optional
palette for the horizonal plots
"""
if not _has_statsmodels:
raise ImportError("The `coefplot` function requires statsmodels")
alpha = 1 - ci / 100
if groupby is None:
coefs = sf.ols(formula, data).fit().params
cis = sf.ols(formula, data).fit().conf_int(alpha)
else:
grouped = data.groupby(groupby)
coefs = grouped.apply(lambda d: sf.ols(formula, d).fit().params).T
cis = grouped.apply(lambda d: sf.ols(formula, d).fit().conf_int(alpha))
# Possibly ignore the intercept
if not intercept:
coefs = coefs.ix[1:]
n_terms = len(coefs)
# Plot seperately depending on groupby
w, h = mpl.rcParams["figure.figsize"]
hsize = lambda n: n * (h / 2)
wsize = lambda n: n * (w / (4 * (n / 5)))
if groupby is None:
colors = itertools.cycle(color_palette(palette, n_terms))
f, ax = plt.subplots(1, 1, figsize=(wsize(n_terms), hsize(1)))
for i, term in enumerate(coefs.index):
color = next(colors)
low, high = cis.ix[term]
ax.plot([i, i], [low, high], c=color,
solid_capstyle="round", lw=2.5)
ax.plot(i, coefs.ix[term], "o", c=color, ms=8)
ax.set_xlim(-.5, n_terms - .5)
ax.axhline(0, ls="--", c="dimgray")
ax.set_xticks(range(n_terms))
ax.set_xticklabels(coefs.index)
else:
n_groups = len(coefs.columns)
f, axes = plt.subplots(n_terms, 1, sharex=True,
figsize=(wsize(n_groups), hsize(n_terms)))
if n_terms == 1:
axes = [axes]
colors = itertools.cycle(color_palette(palette, n_groups))
for ax, term in zip(axes, coefs.index):
for i, group in enumerate(coefs.columns):
color = next(colors)
low, high = cis.ix[(group, term)]
ax.plot([i, i], [low, high], c=color,
solid_capstyle="round", lw=2.5)
ax.plot(i, coefs.loc[term, group], "o", c=color, ms=8)
ax.set_xlim(-.5, n_groups - .5)
ax.axhline(0, ls="--", c="dimgray")
ax.set_title(term)
ax.set_xlabel(groupby)
ax.set_xticks(range(n_groups))
ax.set_xticklabels(coefs.columns)
def interactplot(x1, x2, y, data=None, filled=False, cmap="RdBu_r",
colorbar=True, levels=30, logistic=False,
contour_kws=None, scatter_kws=None, ax=None, **kwargs):
"""Visualize a continuous two-way interaction with a contour plot.
Parameters
----------
x1, x2, y, strings or array-like
Either the two independent variables and the dependent variable,
or keys to extract them from `data`
data : DataFrame
Pandas DataFrame with the data in the columns.
filled : bool
Whether to plot with filled or unfilled contours
cmap : matplotlib colormap
Colormap to represent yhat in the countour plot.
colorbar : bool
Whether to draw the colorbar for interpreting the color values.
levels : int or sequence
Number or position of contour plot levels.
logistic : bool
Fit a logistic regression model instead of linear regression.
contour_kws : dictionary
Keyword arguments for contour[f]().
scatter_kws : dictionary
Keyword arguments for plot().
ax : matplotlib axis
Axis to draw plot in.
Returns
-------
ax : Matplotlib axis
Axis with the contour plot.
"""
if not _has_statsmodels:
raise ImportError("The `interactplot` function requires statsmodels")
# Handle the form of the data
if data is not None:
x1 = data[x1]
x2 = data[x2]
y = data[y]
if hasattr(x1, "name"):
xlabel = x1.name
else:
xlabel = None
if hasattr(x2, "name"):
ylabel = x2.name
else:
ylabel = None
if hasattr(y, "name"):
clabel = y.name
else:
clabel = None
x1 = np.asarray(x1)
x2 = np.asarray(x2)
y = np.asarray(y)
# Initialize the scatter keyword dictionary
if scatter_kws is None:
scatter_kws = {}
if not ("color" in scatter_kws or "c" in scatter_kws):
scatter_kws["color"] = "#222222"
if not "alpha" in scatter_kws:
scatter_kws["alpha"] = 0.75
# Intialize the contour keyword dictionary
if contour_kws is None:
contour_kws = {}
# Initialize the axis
if ax is None:
ax = plt.gca()
# Plot once to let matplotlib sort out the axis limits
ax.plot(x1, x2, "o", **scatter_kws)
# Find the plot limits
x1min, x1max = ax.get_xlim()
x2min, x2max = ax.get_ylim()
# Make the grid for the contour plot
x1_points = np.linspace(x1min, x1max, 100)
x2_points = np.linspace(x2min, x2max, 100)
xx1, xx2 = np.meshgrid(x1_points, x2_points)
# Fit the model with an interaction
X = np.c_[np.ones(x1.size), x1, x2, x1 * x2]
if logistic:
lm = sm.GLM(y, X, family=sm.families.Binomial()).fit()
else:
lm = sm.OLS(y, X).fit()
# Evaluate the model on the grid
eval = np.vectorize(lambda x1_, x2_: lm.predict([1, x1_, x2_, x1_ * x2_]))
yhat = eval(xx1, xx2)
# Default color limits put the midpoint at mean(y)
y_bar = y.mean()
c_min = min(np.percentile(y, 2), yhat.min())
c_max = max(np.percentile(y, 98), yhat.max())
delta = max(c_max - y_bar, y_bar - c_min)
c_min, cmax = y_bar - delta, y_bar + delta
contour_kws.setdefault("vmin", c_min)
contour_kws.setdefault("vmax", c_max)
# Draw the contour plot
func_name = "contourf" if filled else "contour"
contour = getattr(ax, func_name)
c = contour(xx1, xx2, yhat, levels, cmap=cmap, **contour_kws)
# Draw the scatter again so it's visible
ax.plot(x1, x2, "o", **scatter_kws)
# Draw a colorbar, maybe
if colorbar:
bar = plt.colorbar(c)
# Label the axes
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
if clabel is not None and colorbar:
clabel = "P(%s)" % clabel if logistic else clabel
bar.set_label(clabel, labelpad=15, rotation=270)
return ax
def corrplot(data, names=None, annot=True, sig_stars=True, sig_tail="both",
sig_corr=True, cmap=None, cmap_range=None, cbar=True,
diag_names=True, ax=None, **kwargs):
"""Plot a correlation matrix with colormap and r values.
Parameters
----------
data : Dataframe or nobs x nvars array
Rectangular input data with variabes in the columns.
names : sequence of strings
Names to associate with variables if `data` is not a DataFrame.
annot : bool
Whether to annotate the upper triangle with correlation coefficients.
sig_stars : bool
If True, get significance with permutation test and denote with stars.
sig_tail : both | upper | lower
Direction for significance test. Also controls the default colorbar.
sig_corr : bool
If True, use FWE-corrected p values for the sig stars.
cmap : colormap
Colormap name as string or colormap object.
cmap_range : None, "full", (low, high)
Either truncate colormap at (-max(abs(r)), max(abs(r))), use the
full range (-1, 1), or specify (min, max) values for the colormap.
cbar : bool
If true, plot the colorbar legend.
ax : matplotlib axis
Axis to draw plot in.
kwargs : other keyword arguments
Passed to ax.matshow()
Returns
-------
ax : matplotlib axis
Axis object with plot.
"""
if not isinstance(data, pd.DataFrame):
if names is None:
names = ["var_%d" % i for i in range(data.shape[1])]
data = | pd.DataFrame(data, columns=names, dtype=np.float) | pandas.DataFrame |
import numpy as np
import pandas as pd
import random
import tensorflow.keras as keras
from sklearn.model_selection import train_test_split
def read_data(random_state=42,
otu_filename='../../Datasets/otu_table_all_80.csv',
metadata_filename='../../Datasets/metadata_table_all_80.csv'):
otu = | pd.read_csv(otu_filename, index_col=0, header=None, sep='\t') | pandas.read_csv |
""" test fancy indexing & misc """
from datetime import datetime
import re
import weakref
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import (
is_float_dtype,
is_integer_dtype,
)
import pandas as pd
from pandas import (
DataFrame,
Index,
NaT,
Series,
date_range,
offsets,
timedelta_range,
)
import pandas._testing as tm
from pandas.core.api import Float64Index
from pandas.tests.indexing.common import _mklbl
from pandas.tests.indexing.test_floats import gen_obj
# ------------------------------------------------------------------------
# Indexing test cases
class TestFancy:
"""pure get/set item & fancy indexing"""
def test_setitem_ndarray_1d(self):
# GH5508
# len of indexer vs length of the 1d ndarray
df = DataFrame(index=Index(np.arange(1, 11)))
df["foo"] = np.zeros(10, dtype=np.float64)
df["bar"] = np.zeros(10, dtype=complex)
# invalid
msg = "Must have equal len keys and value when setting with an iterable"
with pytest.raises(ValueError, match=msg):
df.loc[df.index[2:5], "bar"] = np.array([2.33j, 1.23 + 0.1j, 2.2, 1.0])
# valid
df.loc[df.index[2:6], "bar"] = np.array([2.33j, 1.23 + 0.1j, 2.2, 1.0])
result = df.loc[df.index[2:6], "bar"]
expected = Series(
[2.33j, 1.23 + 0.1j, 2.2, 1.0], index=[3, 4, 5, 6], name="bar"
)
tm.assert_series_equal(result, expected)
def test_setitem_ndarray_1d_2(self):
# GH5508
# dtype getting changed?
df = DataFrame(index=Index(np.arange(1, 11)))
df["foo"] = np.zeros(10, dtype=np.float64)
df["bar"] = np.zeros(10, dtype=complex)
msg = "Must have equal len keys and value when setting with an iterable"
with pytest.raises(ValueError, match=msg):
df[2:5] = np.arange(1, 4) * 1j
def test_getitem_ndarray_3d(
self, index, frame_or_series, indexer_sli, using_array_manager
):
# GH 25567
obj = gen_obj(frame_or_series, index)
idxr = indexer_sli(obj)
nd3 = np.random.randint(5, size=(2, 2, 2))
msgs = []
if frame_or_series is Series and indexer_sli in [tm.setitem, tm.iloc]:
msgs.append(r"Wrong number of dimensions. values.ndim > ndim \[3 > 1\]")
if using_array_manager:
msgs.append("Passed array should be 1-dimensional")
if frame_or_series is Series or indexer_sli is tm.iloc:
msgs.append(r"Buffer has wrong number of dimensions \(expected 1, got 3\)")
if using_array_manager:
msgs.append("indexer should be 1-dimensional")
if indexer_sli is tm.loc or (
frame_or_series is Series and indexer_sli is tm.setitem
):
msgs.append("Cannot index with multidimensional key")
if frame_or_series is DataFrame and indexer_sli is tm.setitem:
msgs.append("Index data must be 1-dimensional")
if isinstance(index, pd.IntervalIndex) and indexer_sli is tm.iloc:
msgs.append("Index data must be 1-dimensional")
if isinstance(index, (pd.TimedeltaIndex, pd.DatetimeIndex, pd.PeriodIndex)):
msgs.append("Data must be 1-dimensional")
if len(index) == 0 or isinstance(index, pd.MultiIndex):
msgs.append("positional indexers are out-of-bounds")
msg = "|".join(msgs)
potential_errors = (IndexError, ValueError, NotImplementedError)
with pytest.raises(potential_errors, match=msg):
idxr[nd3]
def test_setitem_ndarray_3d(self, index, frame_or_series, indexer_sli):
# GH 25567
obj = gen_obj(frame_or_series, index)
idxr = indexer_sli(obj)
nd3 = np.random.randint(5, size=(2, 2, 2))
if indexer_sli is tm.iloc:
err = ValueError
msg = f"Cannot set values with ndim > {obj.ndim}"
else:
err = ValueError
msg = "|".join(
[
r"Buffer has wrong number of dimensions \(expected 1, got 3\)",
"Cannot set values with ndim > 1",
"Index data must be 1-dimensional",
"Data must be 1-dimensional",
"Array conditional must be same shape as self",
]
)
with pytest.raises(err, match=msg):
idxr[nd3] = 0
def test_getitem_ndarray_0d(self):
# GH#24924
key = np.array(0)
# dataframe __getitem__
df = DataFrame([[1, 2], [3, 4]])
result = df[key]
expected = Series([1, 3], name=0)
tm.assert_series_equal(result, expected)
# series __getitem__
ser = Series([1, 2])
result = ser[key]
assert result == 1
def test_inf_upcast(self):
# GH 16957
# We should be able to use np.inf as a key
# np.inf should cause an index to convert to float
# Test with np.inf in rows
df = DataFrame(columns=[0])
df.loc[1] = 1
df.loc[2] = 2
df.loc[np.inf] = 3
# make sure we can look up the value
assert df.loc[np.inf, 0] == 3
result = df.index
expected = Float64Index([1, 2, np.inf])
tm.assert_index_equal(result, expected)
def test_setitem_dtype_upcast(self):
# GH3216
df = DataFrame([{"a": 1}, {"a": 3, "b": 2}])
df["c"] = np.nan
assert df["c"].dtype == np.float64
df.loc[0, "c"] = "foo"
expected = DataFrame(
[{"a": 1, "b": np.nan, "c": "foo"}, {"a": 3, "b": 2, "c": np.nan}]
)
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize("val", [3.14, "wxyz"])
def test_setitem_dtype_upcast2(self, val):
# GH10280
df = DataFrame(
np.arange(6, dtype="int64").reshape(2, 3),
index=list("ab"),
columns=["foo", "bar", "baz"],
)
left = df.copy()
left.loc["a", "bar"] = val
right = DataFrame(
[[0, val, 2], [3, 4, 5]],
index=list("ab"),
columns=["foo", "bar", "baz"],
)
tm.assert_frame_equal(left, right)
assert is_integer_dtype(left["foo"])
assert is_integer_dtype(left["baz"])
def test_setitem_dtype_upcast3(self):
left = DataFrame(
np.arange(6, dtype="int64").reshape(2, 3) / 10.0,
index=list("ab"),
columns=["foo", "bar", "baz"],
)
left.loc["a", "bar"] = "wxyz"
right = DataFrame(
[[0, "wxyz", 0.2], [0.3, 0.4, 0.5]],
index=list("ab"),
columns=["foo", "bar", "baz"],
)
tm.assert_frame_equal(left, right)
assert is_float_dtype(left["foo"])
assert is_float_dtype(left["baz"])
def test_dups_fancy_indexing(self):
# GH 3455
df = tm.makeCustomDataframe(10, 3)
df.columns = ["a", "a", "b"]
result = df[["b", "a"]].columns
expected = Index(["b", "a", "a"])
tm.assert_index_equal(result, expected)
def test_dups_fancy_indexing_across_dtypes(self):
# across dtypes
df = DataFrame([[1, 2, 1.0, 2.0, 3.0, "foo", "bar"]], columns=list("aaaaaaa"))
df.head()
str(df)
result = DataFrame([[1, 2, 1.0, 2.0, 3.0, "foo", "bar"]])
result.columns = list("aaaaaaa")
# TODO(wesm): unused?
df_v = df.iloc[:, 4] # noqa
res_v = result.iloc[:, 4] # noqa
tm.assert_frame_equal(df, result)
def test_dups_fancy_indexing_not_in_order(self):
# GH 3561, dups not in selected order
df = DataFrame(
{"test": [5, 7, 9, 11], "test1": [4.0, 5, 6, 7], "other": list("abcd")},
index=["A", "A", "B", "C"],
)
rows = ["C", "B"]
expected = DataFrame(
{"test": [11, 9], "test1": [7.0, 6], "other": ["d", "c"]}, index=rows
)
result = df.loc[rows]
tm.assert_frame_equal(result, expected)
result = df.loc[Index(rows)]
tm.assert_frame_equal(result, expected)
rows = ["C", "B", "E"]
with pytest.raises(KeyError, match="not in index"):
df.loc[rows]
# see GH5553, make sure we use the right indexer
rows = ["F", "G", "H", "C", "B", "E"]
with pytest.raises(KeyError, match="not in index"):
df.loc[rows]
def test_dups_fancy_indexing_only_missing_label(self):
# List containing only missing label
dfnu = DataFrame(np.random.randn(5, 3), index=list("AABCD"))
with pytest.raises(
KeyError,
match=re.escape(
"\"None of [Index(['E'], dtype='object')] are in the [index]\""
),
):
dfnu.loc[["E"]]
# ToDo: check_index_type can be True after GH 11497
@pytest.mark.parametrize("vals", [[0, 1, 2], list("abc")])
def test_dups_fancy_indexing_missing_label(self, vals):
# GH 4619; duplicate indexer with missing label
df = DataFrame({"A": vals})
with pytest.raises(KeyError, match="not in index"):
df.loc[[0, 8, 0]]
def test_dups_fancy_indexing_non_unique(self):
# non unique with non unique selector
df = DataFrame({"test": [5, 7, 9, 11]}, index=["A", "A", "B", "C"])
with pytest.raises(KeyError, match="not in index"):
df.loc[["A", "A", "E"]]
def test_dups_fancy_indexing2(self):
# GH 5835
# dups on index and missing values
df = DataFrame(np.random.randn(5, 5), columns=["A", "B", "B", "B", "A"])
with pytest.raises(KeyError, match="not in index"):
df.loc[:, ["A", "B", "C"]]
def test_dups_fancy_indexing3(self):
# GH 6504, multi-axis indexing
df = DataFrame(
np.random.randn(9, 2), index=[1, 1, 1, 2, 2, 2, 3, 3, 3], columns=["a", "b"]
)
expected = df.iloc[0:6]
result = df.loc[[1, 2]]
tm.assert_frame_equal(result, expected)
expected = df
result = df.loc[:, ["a", "b"]]
tm.assert_frame_equal(result, expected)
expected = df.iloc[0:6, :]
result = df.loc[[1, 2], ["a", "b"]]
tm.assert_frame_equal(result, expected)
def test_duplicate_int_indexing(self, indexer_sl):
# GH 17347
ser = Series(range(3), index=[1, 1, 3])
expected = Series(range(2), index=[1, 1])
result = indexer_sl(ser)[[1]]
tm.assert_series_equal(result, expected)
def test_indexing_mixed_frame_bug(self):
# GH3492
df = DataFrame(
{"a": {1: "aaa", 2: "bbb", 3: "ccc"}, "b": {1: 111, 2: 222, 3: 333}}
)
# this works, new column is created correctly
df["test"] = df["a"].apply(lambda x: "_" if x == "aaa" else x)
# this does not work, ie column test is not changed
idx = df["test"] == "_"
temp = df.loc[idx, "a"].apply(lambda x: "-----" if x == "aaa" else x)
df.loc[idx, "test"] = temp
assert df.iloc[0, 2] == "-----"
def test_multitype_list_index_access(self):
# GH 10610
df = DataFrame(np.random.random((10, 5)), columns=["a"] + [20, 21, 22, 23])
with pytest.raises(KeyError, match=re.escape("'[26, -8] not in index'")):
df[[22, 26, -8]]
assert df[21].shape[0] == df.shape[0]
def test_set_index_nan(self):
# GH 3586
df = DataFrame(
{
"PRuid": {
17: "nonQC",
18: "nonQC",
19: "nonQC",
20: "10",
21: "11",
22: "12",
23: "13",
24: "24",
25: "35",
26: "46",
27: "47",
28: "48",
29: "59",
30: "10",
},
"QC": {
17: 0.0,
18: 0.0,
19: 0.0,
20: np.nan,
21: np.nan,
22: np.nan,
23: np.nan,
24: 1.0,
25: np.nan,
26: np.nan,
27: np.nan,
28: np.nan,
29: np.nan,
30: np.nan,
},
"data": {
17: 7.9544899999999998,
18: 8.0142609999999994,
19: 7.8591520000000008,
20: 0.86140349999999999,
21: 0.87853110000000001,
22: 0.8427041999999999,
23: 0.78587700000000005,
24: 0.73062459999999996,
25: 0.81668560000000001,
26: 0.81927080000000008,
27: 0.80705009999999999,
28: 0.81440240000000008,
29: 0.80140849999999997,
30: 0.81307740000000006,
},
"year": {
17: 2006,
18: 2007,
19: 2008,
20: 1985,
21: 1985,
22: 1985,
23: 1985,
24: 1985,
25: 1985,
26: 1985,
27: 1985,
28: 1985,
29: 1985,
30: 1986,
},
}
).reset_index()
result = (
df.set_index(["year", "PRuid", "QC"])
.reset_index()
.reindex(columns=df.columns)
)
tm.assert_frame_equal(result, df)
def test_multi_assign(self):
# GH 3626, an assignment of a sub-df to a df
df = DataFrame(
{
"FC": ["a", "b", "a", "b", "a", "b"],
"PF": [0, 0, 0, 0, 1, 1],
"col1": list(range(6)),
"col2": list(range(6, 12)),
}
)
df.iloc[1, 0] = np.nan
df2 = df.copy()
mask = ~df2.FC.isna()
cols = ["col1", "col2"]
dft = df2 * 2
dft.iloc[3, 3] = np.nan
expected = DataFrame(
{
"FC": ["a", np.nan, "a", "b", "a", "b"],
"PF": [0, 0, 0, 0, 1, 1],
"col1": Series([0, 1, 4, 6, 8, 10]),
"col2": [12, 7, 16, np.nan, 20, 22],
}
)
# frame on rhs
df2.loc[mask, cols] = dft.loc[mask, cols]
tm.assert_frame_equal(df2, expected)
# with an ndarray on rhs
# coerces to float64 because values has float64 dtype
# GH 14001
expected = DataFrame(
{
"FC": ["a", np.nan, "a", "b", "a", "b"],
"PF": [0, 0, 0, 0, 1, 1],
"col1": [0.0, 1.0, 4.0, 6.0, 8.0, 10.0],
"col2": [12, 7, 16, np.nan, 20, 22],
}
)
df2 = df.copy()
df2.loc[mask, cols] = dft.loc[mask, cols].values
tm.assert_frame_equal(df2, expected)
def test_multi_assign_broadcasting_rhs(self):
# broadcasting on the rhs is required
df = DataFrame(
{
"A": [1, 2, 0, 0, 0],
"B": [0, 0, 0, 10, 11],
"C": [0, 0, 0, 10, 11],
"D": [3, 4, 5, 6, 7],
}
)
expected = df.copy()
mask = expected["A"] == 0
for col in ["A", "B"]:
expected.loc[mask, col] = df["D"]
df.loc[df["A"] == 0, ["A", "B"]] = df["D"]
tm.assert_frame_equal(df, expected)
# TODO(ArrayManager) setting single item with an iterable doesn't work yet
# in the "split" path
@td.skip_array_manager_not_yet_implemented
def test_setitem_list(self):
# GH 6043
# iloc with a list
df = | DataFrame(index=[0, 1], columns=[0]) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import pandas as pd
from astropy.coordinates import SkyCoord
from astropy.io.votable import parse
from sh import bzip2
from ...lib.context_managers import cd
# =============================================================================
# CONSTANTS
# =============================================================================
PATH = os.path.abspath(os.path.dirname(__file__))
CATALOG_PATH = os.path.join(PATH, "carpyncho_catalog.pkl")
# =============================================================================
# BUILD
# =============================================================================
def get_ogle_3_resume():
with cd(PATH):
bzip2("-f", "-dk", "ogleIII_all.csv.bz2")
df = pd.read_csv("ogleIII_all.csv")
ra = df["RA"].apply(
lambda d: d.replace(":", "h", 1).replace(":", "m", 1) + "s")
dec = df["Decl"].apply(
lambda d: d.replace(":", "d", 1).replace(":", "m", 1) + "s")
coords = SkyCoord(ra, dec, frame='icrs')
df['ra'] = pd.Series(coords.ra.deg, index=df.index)
df['dec'] = pd.Series(coords.dec.deg, index=df.index)
df["cls"] = df["Type"] + "-" + df["Subtype"]
df = df[["ID", "ra", "dec", "cls"]]
df["catalog"] = pd.Series("OGLE-3", index=df.index)
os.remove("ogleIII_all.csv")
return df
def get_ogle_4_resume():
with cd(PATH):
bzip2("-f", "-dk", "ogle4.csv.bz2")
df = pd.read_csv("ogle4.csv")
def _ra(d):
d = d.replace(":", "h", 1).replace(":", "m", 1)
return d.replace(":", ".") + "s"
ra = df["ra"].apply(_ra)
def _dec(d):
d = d.replace(":", "d", 1).replace(":", "m", 1)
return d.replace(":", ".") + "s"
dec = df["dec"].apply(_dec)
coords = SkyCoord(ra, dec, frame='icrs')
df['ra'] = pd.Series(coords.ra.deg, index=df.index)
df['dec'] = pd.Series(coords.dec.deg, index=df.index)
df["ID"] = df["id"]
df = df[["ID", "ra", "dec", "cls"]]
df["catalog"] = | pd.Series("OGLE-4", index=df.index) | pandas.Series |
# -*- coding: utf-8 -*-
"""
Joining files script, to be used prior to uploading the data on DataBricks
"""
import pandas as pd
import glob
# Name of the sensors whch files need to be joined
sensors = ["well_wh_p_", "well_dh_p_", "well_wh_t_", "well_dh_t", "well_wh_choke_"]
# loop through each sensor and find all files with sensor as name
for csvfile in sensors:
path = r'C:/Users/well_data/' #change path accordingly
all_files = glob.glob(path + "/" + csvfile + "*" )
print(len(all_files)) # Print total number of files present in repository
li = [] # Create list to store .csv data during joining
# get data out of each .csv file
for filename in all_files:
df = pd.read_csv(filename, index_col=None, header=0)
li.append(df)
print("All files read for sensor", csvfile)
# concatenate the files together
frame = | pd.concat(li, axis=0, ignore_index=True, sort=False) | pandas.concat |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 4 09:34:08 2017
@author: <NAME>
Answer query script: This script contains functions to query and manipulate DLR survey answer sets. It references datasets that must be stored in a /data/tables subdirectory in the parent directory.
"""
import numpy as np
import pandas as pd
import os
from glob import glob
import json
import feather
from support import feature_dir, fdata_dir, InputError, writeLog, validYears, table_dir
def loadTable(name, query=None, columns=None):
"""
This function loads all feather tables in filepath into workspace.
"""
dir_path = os.path.join(table_dir, 'feather')
file = os.path.join(dir_path, name +'.feather')
d = feather.read_dataframe(file)
if columns is None:
table = d
else:
table = d[columns]
try:
return table
except UnboundLocalError:
return('Could not find table with name '+name)
def loadID():
"""
This function subsets Answer or Profile IDs by year. Tables variable can be constructred with loadTables() function. Year input can be number or string. id_name is AnswerID or ProfileID.
"""
groups = loadTable('groups')
links = loadTable('links')
profiles = loadTable('profiles')
# a_id = links[(links.GroupID != 0) & (links['AnswerID'] != 0)].drop(columns=['ConsumerID','lock','ProfileID'])
p_id = links[(links.GroupID != 0) & (links['ProfileID'] != 0)].drop(labels=['ConsumerID','lock','AnswerID'], axis=1)
profile_meta = profiles.merge(p_id, how='left', left_on='ProfileId', right_on='ProfileID').drop(labels=['ProfileId','lock'], axis=1)
ap = links[links.GroupID==0].drop(labels=['ConsumerID','lock','GroupID'], axis=1)
x = profile_meta.merge(ap, how='outer', on = 'ProfileID')
join = x.merge(groups, on='GroupID', how='left')
#Wrangling data into right format
all_ids = join[join['Survey'] != 'Namibia'] # remove Namibia
all_ids = all_ids.dropna(subset=['GroupID','Year'])
all_ids.Year = all_ids.Year.astype(int)
all_ids.GroupID = all_ids.GroupID.astype(int)
all_ids.AnswerID.fillna(0, inplace=True)
all_ids.AnswerID = all_ids.AnswerID.astype(int)
all_ids.ProfileID = all_ids.ProfileID.astype(int)
return all_ids
def idsDuplicates():
ids = loadID()
i = ids[(ids.duplicated('AnswerID')==True)&(ids['AnswerID']!=0)]
ip = i.pivot_table(index='Year',columns='AnswerID',values='ProfileID',aggfunc='count')
return ip.T.describe()
def matchAIDToPID(year, pp):
#TODO still needs checking --- think about integrating with socios.loadID -> all PIDs and the 0 where there is no corresponding AID
a_id = loadID(year, id_name = 'AnswerID')['id']
# p_id = socios.loadID(year, id_name = 'ProfileID')['id']
#get dataframe of linkages between AnswerIDs and ProfileIDs
links = loadTable('links')
# year_links = links[links.ProfileID.isin(p_id)]
year_links = links[links.AnswerID.isin(a_id)]
year_links = year_links.loc[year_links.ProfileID != 0, ['AnswerID','ProfileID']]
#get profile metadata (recorder ID, recording channel, recorder type, units of measurement)
profiles = loadTable('profiles')
#add AnswerID information to profiles metadata
profile_meta = year_links.merge(profiles, left_on='ProfileID', right_on='ProfileId').drop('ProfileId', axis=1)
VI_profile_meta = profile_meta.loc[(profile_meta['Unit of measurement'] == 2), :] #select current profiles only
#THIS IS NB!!
output = pp.merge(VI_profile_meta.loc[:,['AnswerID','ProfileID']], left_on='ProfileID_i', right_on='ProfileID').drop(['ProfileID','Valid_i','Valid_v'], axis=1)
output = output[output.columns.sort_values()]
output.fillna({'valid_calculated':0}, inplace=True)
return output
def loadQuestions(dtype = None):
"""
This function gets all questions.
"""
qu = loadTable('questions').drop(labels='lock', axis=1)
qu.Datatype = qu.Datatype.astype('category')
qu.Datatype.cat.categories = ['blob','char','num']
qu['ColumnAlias'] = [x.strip() for x in qu['ColumnAlias']]
if dtype is None:
pass
else:
qu = qu[qu.Datatype == dtype]
return qu
def loadAnswers():
"""
This function returns all answer IDs and their question responses for a selected data type. If dtype is None, answer IDs and their corresponding questionaire IDs are returned instead.
"""
answer_meta = loadTable('answers', columns=['AnswerID', 'QuestionaireID'])
blob = loadTable('answers_blob_anonymised').drop(labels='lock', axis=1)
blob = blob.merge(answer_meta, how='left', on='AnswerID')
blob.fillna(np.nan, inplace = True)
char = loadTable('answers_char_anonymised').drop(labels='lock', axis=1)
char = char.merge(answer_meta, how='left', on='AnswerID')
char.fillna(np.nan, inplace = True)
num = loadTable('answers_number_anonymised').drop(labels='lock', axis=1)
num = num.merge(answer_meta, how='left', on='AnswerID')
num.fillna(np.nan, inplace = True)
return {'blob':blob, 'char':char, 'num':num}
def searchQuestions(search = None):
"""
Searches questions for a search term, taking questionaire ID and question data type (num, blob, char) as input.
A single search term can be specified as a string, or a list of search terms as list.
"""
questions = loadTable('questions').drop(labels='lock', axis=1)
questions.Datatype = questions.Datatype.astype('category')
questions.Datatype.cat.categories = ['blob','char','num']
if search is None:
searchterm = ''
else:
searchterm = search.replace(' ', '+')
trantab = str.maketrans({'(':'', ')':'', ' ':'', '/':''})
result = questions.loc[questions.Question.str.translate(trantab).str.contains(searchterm, case=False), ['Question', 'Datatype','QuestionaireID', 'ColumnNo']]
return result
def searchAnswers(search):
"""
This function returns the answer IDs and responses for a list of search terms
"""
answers = loadAnswers()
questions = searchQuestions(search) #get column numbers for query
result = pd.DataFrame(columns=['AnswerID','QuestionaireID'])
for dt in questions.Datatype.unique():
ans = answers[dt]
for i in questions.QuestionaireID.unique():
select = questions.loc[(questions.Datatype == dt)&(questions.QuestionaireID==i)]
fetchcolumns=['AnswerID'] + ['QuestionaireID'] + list(select.ColumnNo.astype(str))
newcolumns = ['AnswerID'] + ['QuestionaireID'] + list(select.Question.astype(str).str.lower())
df = ans.loc[ans['QuestionaireID']==i,fetchcolumns]
df.columns = newcolumns
result = result.merge(df, how='outer')
return result
def extractSocios(searchlist, year=None, col_names=None, geo=None):
"""
This function creates a dataframe containing the data for a set of selected features for a given year.
questionaire options: 6 - pre 1999, 3 - 2000 onwards
This function extracts a set of selected features for a given year.
'geo' adds location data and can be one of Municipality, District, Province or None
"""
if isinstance(searchlist, list):
pass
else:
searchlist = [searchlist]
if col_names is None:
search = dict(zip(searchlist, searchlist))
else:
search = dict(zip(searchlist, col_names))
#filter AnswerIDs by year
ids = loadID()
if year is None:
sub_ids = ids[ids.AnswerID!=0]
else:
sub_ids = ids[(ids.AnswerID!=0)&(ids.Year==year)]
sub_ids = sub_ids.drop_duplicates(subset='AnswerID')
#generate feature frame
result = pd.DataFrame(columns=['AnswerID','QuestionaireID'])
for s in search.keys():
d = searchAnswers(s)
ans = d[(d.AnswerID.isin(sub_ids.AnswerID)) & (d.QuestionaireID < 10)] # remove non-domestic results
ans = ans.dropna(axis=1, how='all')
#set feature frame column names
if len(ans.columns[2:])==1:
ans.columns = ['AnswerID','QuestionaireID'] + [search.get(s)]
try:
result = result.merge(ans, how='outer')
except Exception:
pass
if geo is None:
result = result.merge(sub_ids[['AnswerID', 'ProfileID']], how='left')
else:
result = result.merge(sub_ids[['AnswerID', 'ProfileID', geo]], how='left')
return result
def generateSociosSetSingle(year, spec_file, set_id='ProfileID'):
"""
This function generates a json formatted evidence text file compatible with the syntax for providing evidence to the python library libpgm for the specified year. The function requires a json formatted text file with feature specifications as input.
"""
#Get feature specficiations
files = glob(os.path.join(feature_dir, 'specification', spec_file + '*.txt'))
for file_path in files:
try:
with open(file_path, 'r') as f:
featurespec = json.load(f)
year_range = featurespec['year_range']
except:
raise InputError(year, 'Problem reading the spec file.')
if year >= int(year_range[0]) and year <= int(year_range[1]):
validYears(year) #check if year input is valid
break
else:
continue
searchlist = featurespec['searchlist']
features = featurespec['features']
transform = featurespec['transform']
bins = featurespec['bins']
labels = featurespec['labels']
cut = featurespec['cut']
replace = featurespec['replace']
if len(featurespec['geo'])==0:
geo = None
else:
geo = featurespec['geo']
#Get data and questions from socio-demographic survey responses
data = extractSocios(searchlist, year, col_names=searchlist, geo=geo)
missing_cols = list(set(searchlist) - set(data.columns))
data = data.append( | pd.DataFrame(columns=missing_cols) | pandas.DataFrame |
# Created by <NAME>
import numpy as np
import pandas as pd
from hics.scored_slices import ScoredSlices
class AbstractResultStorage:
def update_relevancies(self, new_relevancies: pd.DataFrame):
raise NotImplementedError()
def update_redundancies(self, new_redundancies: pd.DataFrame):
raise NotImplementedError()
def update_bivariate_redundancies(self, new_redundancies: pd.DataFrame, new_weights: pd.DataFrame):
raise NotImplementedError()
def update_slices(self, new_slices: dict()):
raise NotImplementedError()
def get_bivariate_redundancies(self):
raise NotImplementedError()
def get_redundancies(self):
raise NotImplementedError()
def get_relevancies(self):
raise NotImplementedError()
def get_slices(self):
raise NotImplementedError()
class DefaultResultStorage(AbstractResultStorage):
def __init__(self, features: list()):
self.relevancies = pd.DataFrame(columns=['relevancy', 'iteration'])
self.redundancies = | pd.DataFrame(columns=['redundancy', 'iteration']) | pandas.DataFrame |
import torch
import numpy as np
import pandas as pd
import time
import h5py
from tensorboardX import SummaryWriter
class DeepLogger(object):
def __init__(self, time_to_track, what_to_track, log_fname=None,
network_fname=None, seed_id=0, tboard_fname=None,
time_to_print=None, what_to_print=[], save_all_ckpth=False,
print_every_update=None):
"""
Logging object for Deep RL experiments - Parameters to specify:
- Where to log agent (.ckpth) & training stats (.hdf5) to
- Random seed & folderpath for tensorboard
- Time index & statistics to print & Verbosity level of logger
- Whether to save all or only most recent checkpoint of network
"""
self.current_optim_step = 0
self.log_save_counter = 0
self.log_update_counter = 0
self.seed_id = seed_id
self.print_every_update = print_every_update if print_every_update is not None else 1
self.start_time = time.time()
# Set where to log to (Stats - .hdf5, Network - .ckpth)
if isinstance(log_fname, str): self.log_save_fname = log_fname
else: self.log_save_fname = None
if isinstance(network_fname, str): self.network_save_fname = network_fname
else: self.network_save_fname = None
# Boolean and fname list for storing all weights during training
self.save_all_ckpth = save_all_ckpth
if self.save_all_ckpth:
self.ckpth_w_list = []
# Initialize tensorboard logger/summary writer
if isinstance(tboard_fname, str):
self.writer = SummaryWriter(tboard_fname + "_seed_" + str(self.seed_id))
else:
self.writer = None
# Initialize pd dataframes to store logging stats/times
self.time_to_track = time_to_track + ["time_elapsed"]
self.what_to_track = what_to_track
self.clock_to_track = pd.DataFrame(columns=self.time_to_track)
self.stats_to_track = | pd.DataFrame(columns=self.what_to_track) | pandas.DataFrame |
import sys
import argparse
from functools import reduce
from collections import OrderedDict
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import xgboost as xgb
from sklearn.metrics import roc_auc_score
from sklearn.linear_model import Ridge, LinearRegression
import torch
import torch.nn as nn
from zamlexplain.data import load_data
from model import RealNVP
def mean_sd(df_x, df_gen):
df_x = df_x.iloc[:,:17]
df_gen = df_gen.iloc[:,:17]
mean_x = df_x.mean()
mean_gen = df_gen.mean()
mean_err = 100*(mean_gen - mean_x)/mean_x
df_mean = pd.DataFrame(OrderedDict({
'data mean': mean_x,
'synth mean': mean_gen,
'err %': mean_err})).round({'data mean': 2, 'synth mean': 2, 'err %': 0})
std_x = df_x.std()
std_gen = df_gen.std()
std_err = 100*(std_gen - std_x)/std_x
df_std = pd.DataFrame(OrderedDict({
'data std': std_x,
'synth std': std_gen,
'err %': std_err})).round({'data std': 2, 'synth std': 2, 'err %': 0})
return df_mean, df_std
def fix_df(x, scaler, return_numpy=False):
x = scaler.inverse_transform(x.copy())
for cat_idx in scaler.cat_cols:
if len(cat_idx) == 1:
x[:, cat_idx] = (x[:,cat_idx] > 0.5).astype(np.float32)
else:
new_ohe = np.zeros((x.shape[0], len(cat_idx)), dtype=np.float32)
new_ohe[np.arange(x.shape[0]), np.argmax(x[:, cat_idx], axis=1)] = 1.0
x[:, cat_idx] = new_ohe
# delinq_2yrs, inq, mths, mths, open
for i in [5, 6, 7, 8, 9, 10, 12, 16]:
x[x[:,i] < 0, i] = 0.0
x[:, i] = np.round(x[:, i])
if return_numpy:
return x
else:
return | pd.DataFrame(x, columns=scaler.columns) | pandas.DataFrame |
import re
import os
import pandas as pd
import numpy as np
from .extract_tools import default_tokenizer as _default_tokenizer
def _getDictionnaryKeys(dictionnary):
"""
Function that get keys from a dict object and flatten sub dict.
"""
keys_array = []
for key in dictionnary.keys():
keys_array.append(key)
if (type(dictionnary[key]) == type({})):
keys_array = keys_array+_getDictionnaryKeys(dictionnary[key])
return(keys_array)
class pandasToBrat:
"""
Class for Pandas brat folder management.
For each brat folder, there is an instance of pandasToBrat.
It supports importation and exportation of configurations for relations and entities.
Documents importation and exportation.
Annotations and entities importation and exportation.
Inputs :
folder, str : path of brat folder
"""
def __init__(self, folder):
self.folder = folder
self.conf_file = 'annotation.conf'
self.emptyDFCols = {
"annotations":["id","type_id", "word", "label", "start", "end"],
"relations":["id","type_id","relation","Arg1","Arg2"]
}
# Adding '/' to folder path if missing
if(self.folder[-1] != '/'):
self.folder += '/'
# Creating folder if do not exist
if (os.path.isdir(self.folder)) == False:
os.mkdir(self.folder)
# Loading conf file if exists | creating empty conf file if not
self.read_conf()
def _emptyData(self):
fileList = self._getFileList()
nb_files = fileList.shape[0]
confirmation = input("Deleting all data ({} files), press y to confirm :".format(nb_files))
if confirmation == 'y':
fileList["filename"].apply(lambda x: os.remove(self.folder+x))
print("{} files deleted.".format(nb_files))
def _generateEntitiesStr (self, conf, data = '', level = 0):
if (type(conf) != type({})):
return data
# Parsing keys
for key in conf.keys():
value = conf[key]
if value == True:
data += '\n'+level*'\t'+key
elif value == False:
data += '\n'+level*'\t'+'!'+key
elif type(value) == type({}):
data += '\n'+level*'\t'+key
data = self._generateEntitiesStr(value, data, level+1)
return data
def _writeEntitiesLevel (self, conf, data, last_n = -1):
for n in range(last_n,len(conf)):
# If empty : pass, if not the last line : pass
if (conf[n] != '' and n > last_n):
level = len(conf[n].split("\t"))-1
if (n+1 <= len(conf)): # Level of next item
next_level = len(conf[n+1].split("\t"))-1
else:
next_level = level
splitted_str = conf[n].split("\t")
str_clean = splitted_str[len(splitted_str)-1]
if (level >= next_level): # On écrit les lignes de même niveau
if (str_clean[0] == '!'):
data[str_clean[1:]] = False
else:
data[str_clean] = True
if (level > next_level):
# On casse la boucle
break
elif (level < next_level): # On écrit les lignes inférieurs par récurence
splitted_str = conf[n].split("\t")
last_n, data[str_clean] = self._writeEntitiesLevel(conf, {}, n)
return(n, data)
def _readRelations(self, relations, entities = []):
data = {}
for relation in relations.split("\n"):
if relation != '':
relation_data = relation.split("\t")[0]
args = list(map(lambda x: x.split(":")[1], relation.split("\t")[1].split(", ")))
args_valid = list(filter(lambda x: x in entities, args))
if (len(args_valid) > 0):
data[relation_data] = {"args":args_valid}
return data
def _writeRelations(self, relations, entities = []):
data = ''
for relation in relations:
args_array = list(filter(lambda x: x in entities, relations[relation]["args"]))
if (len(args_array) > 0):
data += '\n'+relation+'\t'
for n in range(0, len(args_array)):
data += int(bool(n))*', '+'Arg'+str(n+1)+':'+args_array[n]
return data
def read_conf (self):
"""
Get the current Brat configuration.
Output :
Dict containing "entities" and "relations" configurations.
"""
if (os.path.isfile(self.folder+self.conf_file)):
# Reading file
file = open(self.folder+self.conf_file)
conf_str = file.read()
file.close()
# Splitting conf_str
conf_data = re.split(re.compile(r"\[[a-zA-Z]+\]", re.DOTALL), conf_str)[1:]
data = {}
# Reading enteties
data["entities"] = self._writeEntitiesLevel(conf_data[0].split("\n"), {})[1]
# Reading relations
entitiesKeys = _getDictionnaryKeys(data["entities"])
data["relations"] = self._readRelations(conf_data[1], entitiesKeys)
return(data)
else:
self.write_conf()
self.read_conf()
def write_conf(self, entities = {}, relations = {}, events = {}, attributes = {}):
"""
Write or overwrite configuration file.
It actually doesn't suppport events and attributes configuration data.
inputs :
entities, dict : dict containing the entities. If an entities do have children, his value is an other dict, otherwise, it is set as True.
relations, dict : dict containing the relations between entities, each key is a relation name, the value is a dict with a "args" key containing the list of related entities.
"""
# TODO : Add events and attributes support.
conf_str = ''
# Entities
conf_str += '\n\n[entities]'
conf_str += self._generateEntitiesStr(entities)
# relations
conf_str += '\n\n[relations]'
entitiesKeys = _getDictionnaryKeys(entities)
conf_str += self._writeRelations(relations, entitiesKeys)
# attributes
conf_str += '\n\n[attributes]'
# events
conf_str += '\n\n[events]'
# Write conf file
file = open(self.folder+self.conf_file,'w')
file.write(conf_str)
file.close()
def _getFileList(self):
# Listing files
filesDF = pd.DataFrame({'filename':pd.Series(os.listdir(self.folder))})
filesDFSplitted = filesDF["filename"].str.split(".", expand = True)
filesDF["id"] = filesDFSplitted[0]
filesDF["filetype"] = filesDFSplitted[1]
filesDF = filesDF[filesDF["filetype"].isin(["txt","ann"])]
return(filesDF)
def _parseData(self):
# Listing files
filesDF = self._getFileList()
# Getting data from txt and ann
filesDF_txt = filesDF.rename(columns = {"filename":"text_data"}).loc[filesDF["filetype"] == "txt", ["id","text_data"]]
filesDF_ann = filesDF.rename(columns = {"filename":"annotation"}).loc[filesDF["filetype"] == "ann", ["id","annotation"]]
dataDF = filesDF_txt.join(filesDF_ann.set_index("id"), on = "id")
dataDF["text_data"] = dataDF["text_data"].apply(lambda x: open(self.folder+x).read())
dataDF["annotation"] = dataDF["annotation"].apply(lambda x: open(self.folder+x).read())
return(dataDF)
def read_text(self):
"""
read_text
Get a pandas DataFrame containing the brat documents.
Input : None
Output : Pandas dataframe
"""
dataDF = self._parseData()
return(dataDF[["id","text_data"]])
def read_annotation(self, ids = []):
"""
read_annotation
Get annotations from the brat folder.
You can get specific annotation by filtering by id.
input :
ids, list (optionnal) : list of id for which you want the annotation data, if empty all annotations are returned.
output :
dict containing an annotations and relations data.
"""
data = {}
data["annotations"] = pd.DataFrame(columns=self.emptyDFCols["annotations"])
data["relations"] = pd.DataFrame(columns=self.emptyDFCols["relations"])
dataDF = self._parseData()[["id","annotation"]]
dataDF = dataDF[(dataDF["annotation"].isna() == False) & (dataDF["annotation"] != '')] # Removing empty annotation
# Filtering by ids
if (len(ids) > 0):
dataDF = dataDF[dataDF["id"].isin(pd.Series(ids).astype(str))]
if (dataDF.shape[0] > 0):
# Ann data to pandas
dataDF = dataDF.join(dataDF["annotation"].str.split("\n").apply(pd.Series).stack().reset_index(level = 0).set_index("level_0")).reset_index(drop = True).drop("annotation", axis = 1).rename(columns = {0: "annotation"})
dataDF = dataDF[dataDF["annotation"].str.len() > 0].reset_index(drop = True)
dataDF = dataDF.join(dataDF["annotation"].str.split("\t", expand = True).rename(columns = {0: 'type_id', 1: 'data', 2: 'word'})).drop("annotation", axis = 1)
dataDF["type"] = dataDF["type_id"].str.slice(0,1)
## Annotations
data["annotations"] = dataDF[dataDF["type"] == 'T']
if (data["annotations"].shape[0] > 0):
data["annotations"] = data["annotations"].join(data["annotations"]["data"].str.split(" ", expand = True).rename(columns = {0: "label", 1: "start", 2: "end"})).drop(columns = ["data","type"])
## Relations
data["relations"] = dataDF[dataDF["type"] == 'R']
if (data["relations"].shape[0] > 0):
tmp_splitted = data["relations"]["data"].str.split(" ", expand = True).rename(columns = {0: "relation"})
### Col names
rename_dict = dict(zip(list(tmp_splitted.columns.values[1:]), list("Arg"+tmp_splitted.columns.values[1:].astype(str).astype(object))))
tmp_splitted = tmp_splitted.rename(columns = rename_dict)
### Merging data
tmp_splitted = tmp_splitted[["relation"]].join(tmp_splitted.loc[:,tmp_splitted.columns[tmp_splitted.columns != 'relation']].applymap(lambda x: x.split(":")[1]))
data["relations"] = data["relations"].join(tmp_splitted).drop(columns = ["data","type","word"])
return(data)
def _write_function(self, x, filetype = "txt", overwrite = False):
filenames = []
if (filetype == 'txt' or filetype == 'both'):
filenames.append(self.folder+str(x["filename"])+'.txt')
if (filetype == 'ann' or filetype == 'both'):
filenames.append(self.folder+str(x["filename"])+'.ann')
for filename in filenames:
try:
open(str(filename), "r")
is_file = True
except FileNotFoundError:
is_file = False
if ((is_file == False) or (overwrite == True)):
file = open(str(filename), "w")
file.write(x["content"])
file.close()
def write_text(self, text_id, text, empty = False, overWriteAnnotations = False):
"""
write_text
Send text data from the brat folder.
input :
text_id, pd.Series : pandas series containing documents ids
text, pd.Series : pandas series containing documents text in the same order as text_id
empty, boolean : if True the brat folder is emptyied of all but configuration data (text and ann files) before writting
overwriteAnnotations, boolean : if True, the current annotation files are replaced by blank one
"""
if overWriteAnnotations == True: # On controle la façon dont la variable est écrite
overwriteAnn = True
else:
overwriteAnn = False
if (type(text) == type(pd.Series()) and type(text_id) == type(pd.Series()) and text.shape[0] == text_id.shape[0]):
# ID check : check should be smaller than text : check if not inverted
if (text_id.astype(str).str.len().max() < text.astype(str).str.len().max()):
# empty : option to erase existing data
if (empty):
self._emptyData()
# Writting data
print("Writting data")
df_text = pd.DataFrame({"filename":text_id, "content":text})
df_ann = pd.DataFrame({"filename":text_id, "content":""})
df_text.apply(lambda x: self._write_function(x, filetype = "txt", overwrite = True), axis = 1)
df_ann.apply(lambda x: self._write_function(x, filetype = "ann", overwrite = overwriteAnn), axis = 1)
print("data written.")
else:
raise ValueError('ID is larger than text, maybe you inverted them.')
else:
raise ValueError('Incorrect variable type, expected two Pandas Series of same shape.')
def write_annotations(self, df, text_id, word, label, start, end, overwrite = False):
"""
write_annotations
Send annotation data from the brat folder. Useful to pre-anotate some data.
input :
df, pd.Dataframe : dataframe containing annotations data, should contains the text id, the annotated word, the annotated label, the start and end offset.
text_id, str : name of the column in df which contains the document id
word, str : name of the column in df which contains the annotated word
label, str : name of the column in df which contains the label of the annotated word
start, str : name of the column in df which contains the start offset
end, str : name of the column in df which contains the end offset
overwrite, boolean : if True, the current annotation files are replaced by new data, otherwise, the new annotations are merged with existing one
"""
# Checking data types
if (type(df) == type(pd.DataFrame())):
# Loading df
df = df.rename(columns = {text_id:"id",word:"word",label:"label",start:"start",end:"end"})
df["type_id"] = df.groupby("id").cumcount()+1
# List of ids
ids = df["id"].unique()
# Loading current data
current_annotation = self.read_annotation(ids)
current_annotations = current_annotation["annotations"]
tmaxDFAnnotations = current_annotations.set_index(["id"])["type_id"].str.slice(1,).astype(int).reset_index().groupby("id").max().rename(columns = {"type_id":"Tmax"})
if (overwrite == True):
df["type_id"] = "T"+df["type_id"].astype(str)
new_annotations = df
else:
df = df.join(tmaxDFAnnotations, on = "id").fillna(0)
df["type_id"] = "T"+(df["type_id"]+df["Tmax"]).astype(int).astype(str)
df = df.drop(columns = ["Tmax"])
new_annotations = pd.concat((current_annotations, df[self.emptyDFCols["annotations"]])).reset_index(drop = True)
new_annotations.drop_duplicates() ## Removing duplicates
# Injecting new annotations
current_annotation["annotations"] = new_annotations
# Calling write function
self._write_annotation(current_annotation["annotations"], current_annotation["relations"])
else:
raise ValueError('Incorrect variable type, expected a Pandas DF.')
def write_relations(self, df, text_id, relation, overwrite = False):
"""
write_relations
Send relations data from the brat folder. Useful to pre-anotate some data.
input :
df, pd.Dataframe : dataframe containing relations data, should contains the text id, the relation name, the if of the linked annotations.
text_id, str : name of the column in df which contains the document id
relation, str : name of the column in df which contains the relation name
overwrite, boolean : if True, the current annotation files are replaced by new data, otherwise, the new annotations are merged with existing one
The other columns should contains the type_id of related entities, as outputed by the read_annotation method.
"""
# Checking data types
if (type(df) == type(pd.DataFrame())):
# Loading df
df = df.rename(columns = {text_id:"id",relation:"relation"})
df["type_id"] = df.groupby("id").cumcount()+1 # type_id
# Columns names
old_columns = df.columns[np.isin(df.columns, ["id", "relation","type_id"]) == False]
new_columns = "Arg"+np.array(list(range(1,len(old_columns)+1))).astype(str).astype(object)
df = df.rename(columns = dict(zip(old_columns, new_columns)))
# List of ids
ids = df["id"].unique()
# Loading current data
current_annotation = self.read_annotation(ids)
current_relations = current_annotation["relations"]
rmaxDFrelations = current_relations.set_index(["id"])["type_id"].str.slice(1,).astype(int).reset_index().groupby("id").max().rename(columns = {"type_id":"Rmax"})
if (overwrite == True):
df["type_id"] = "R"+df["type_id"].astype(str)
new_relations = df
else:
df = df.join(rmaxDFrelations, on = "id").fillna(0)
df["type_id"] = "R"+(df["type_id"]+df["Rmax"]).astype(int).astype(str)
df = df.drop(columns = ["Rmax"])
# Adding missing columns
if (len(df.columns) > len(current_relations.columns)):
for column in df.columns[np.isin(df.columns, current_relations.columns) == False]:
current_relations[column] = np.nan
else:
for column in current_relations.columns[np.isin(current_relations.columns, df.columns) == False]:
df[column] = np.nan
new_relations = | pd.concat((current_relations, df[current_relations.columns])) | pandas.concat |
import os
import torch
from tqdm import tqdm
import argparse
import multiprocessing as mp
import pandas as pd
from moses.models_storage import ModelsStorage
from moses.metrics.utils import average_agg_tanimoto, fingerprints, fingerprint
from rdkit import DataStructs, Chem
from scipy.spatial.distance import jaccard
import numpy as np
parser = argparse.ArgumentParser()
parser.add_argument("--model", required=True)
parser.add_argument("--lbann-weights-dir", required=True)
parser.add_argument("--lbann-load-epoch", type=int, required=True)
parser.add_argument("--lbann-load-step", type=int, required=True)
parser.add_argument(
"--vocab-path", type=str, default="", help="path to experiment vocabulary"
)
parser.add_argument("--num-layers", type=int)
parser.add_argument("--dropout", type=float)
parser.add_argument("--weight-prefix")
parser.add_argument("--n-samples", type=int, default=100)
parser.add_argument("--max-len", type=int, default=100)
parser.add_argument("--n-batch", type=int, default=10)
parser.add_argument("--gen-save", required=True)
parser.add_argument("--test-path", required=True)
parser.add_argument("--test-scaffolds-path")
parser.add_argument("--ptest-path")
parser.add_argument("--ptest-scaffolds-path")
parser.add_argument("--ks", type=int, nargs="+", help="list with values for unique@k. Will calculate number of unique molecules in the first k molecules.")
parser.add_argument("--n-jobs", type=int, default=mp.cpu_count()-1)
parser.add_argument("--gpu", type=int, help=" index of GPU for FCD metric and internal diversity, -1 means use CPU")
parser.add_argument("--batch-size", type=int, help="batch size for FCD metric")
parser.add_argument("--hidden", type=int)
parser.add_argument("--metrics", help="output path to store metrics")
parser.add_argument("--model-config", help="path to model configuration dict")
######################################
# These are things specific to the VAE
######################################
#parser.add_argument("--freeze-embeddings", action="store_true") # this turns off grad accumulation for embedding layer (see https://github.com/samadejacobs/moses/blob/master/moses/vae/model.py#L22)
#parser.add_argument("--q-cell", default="gru")
parser.add_argument("--seed-molecules", help="points to a file with molecules to use as the reference points in the experiment", required=True)
parser.add_argument("--k-neighbor-samples", help="number of neighbors to draw from the gaussian ball", type=int, required=True)
parser.add_argument("--scale-factor", help="scale factor (std) for gaussian", type=float, required=True)
parser.add_argument("--output", help="path to save output results", required=True)
model_config = parser.parse_args()
moses_config_dict = torch.load(model_config.model_config)
def load_model():
MODELS = ModelsStorage()
model_vocab = torch.load(model_config.vocab_path)
model = MODELS.get_model_class(model_config.model)(model_vocab, moses_config_dict)
# load the model
assert os.path.exists(model_config.lbann_weights_dir) is not None
weights_prefix = f"{model_config.lbann_weights_dir}/{model_config.weight_prefix}"
model.load_lbann_weights(model_config.lbann_weights_dir, epoch_count=model_config.lbann_load_epoch)
model.cuda()
model.eval()
return model
def sample_noise_add_to_vec(latent_vec, scale_factor=model_config.scale_factor):
noise = torch.normal(mean=0, std=torch.ones(latent_vec.shape)*scale_factor).numpy()
#print(noise)
return latent_vec + noise
def main(k=model_config.k_neighbor_samples):
model = load_model()
input_smiles_list = pd.read_csv(model_config.seed_molecules, header=None)[0].to_list()
#import ipdb
#ipdb.set_trace()
reference_latent_vec_list, reference_smiles_list = model.encode_smiles(input_smiles_list)
reference_latent_vec_list = [x.cpu().unsqueeze(0).numpy() for x in reference_latent_vec_list]
result_list = []
for reference_latent_vec, reference_smiles in tqdm(zip(reference_latent_vec_list, reference_smiles_list), desc="sampling neighbors for reference vec and decoding", total=len(reference_latent_vec_list)):
# TODO: this is just for debugging
#input_fp = fingerprint(input_smiles, fp_type='morgan')
#reference_mol = Chem.MolFromSmiles(reference_smiles)
neighbor_smiles_list = [model.decode_smiles(sample_noise_add_to_vec(reference_latent_vec))[0]['SMILES'][0] for i in range(k)]
neighbor_fps = [fingerprint(neighbor_smiles, fp_type='morgan') for neighbor_smiles in neighbor_smiles_list] #here is a bug in fingerprints funciton that references first_fp before assignment...
reference_fp = fingerprint(reference_smiles, fp_type='morgan')
neighbor_tani_list = [jaccard(reference_fp, neighbor_fp) for neighbor_fp in neighbor_fps]
neighbor_valid_list = [x for x in [Chem.MolFromSmiles(smiles) for smiles in neighbor_smiles_list] if x is not None]
result_list.append({"reference_smiles": reference_smiles, "mean_tani_sim": np.mean(neighbor_tani_list), "min_tani_sim": np.min(neighbor_tani_list), "max_tani_sim": np.max(neighbor_tani_list), "valid_rate": len(neighbor_valid_list)/k })
| pd.DataFrame(result_list) | pandas.DataFrame |
import pandas as pd
from sklearn.decomposition import TruncatedSVD, NMF
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import accuracy_score, f1_score, confusion_matrix
from sklearn.preprocessing import StandardScaler, LabelEncoder
from sklearn.cluster import KMeans
from sklearn.manifold import TSNE
import seaborn as sns
import matplotlib.pyplot as plt
from nltk.stem import PorterStemmer
from nltk.corpus import stopwords
def textPreprocessing(df):
stop = stopwords.words('english')
df['text'] = df['text'].str.replace('[^\w\s]', '')
df['text'] = df['text'].apply(lambda x: " ".join(x for x in x.split() if x not in stop))
df['text'] = df['text'].apply(lambda x: " ".join(x.lower() for x in x.split()))
stemmer = PorterStemmer()
df['text'] = df['text'].apply(lambda x: " ".join([stemmer.stem(word) for word in x.split()]))
return df
def loadDataset():
reviews0 = pd.read_csv('Analytics/reviews/reviews0.csv')
reviews1 = pd.read_csv('Analytics/reviews/reviews1.csv')
reviews2 = pd.read_csv('Analytics/reviews/reviews2.csv')
reviews3 = pd.read_csv('Analytics/reviews/reviews3.csv')
reviews4 = pd.read_csv('Analytics/reviews/reviews4.csv')
reviews5 = pd.read_csv('Analytics/reviews/reviews5.csv')
reviews6 = pd.read_csv('Analytics/reviews/reviews6.csv')
reviews7 = pd.read_csv('Analytics/reviews/reviews7.csv')
reviews = [reviews0, reviews1, reviews2, reviews3, reviews4, reviews5, reviews6, reviews7]
df = pd.concat(reviews, ignore_index=True)
return df
if __name__ == 'main':
# Load dataset
df = loadDataset()
# Keep text and username
df = df[['username', 'text']]
# group by username & merge reviews text to a unified corpus
df = df.groupby('username').agg({
'text': lambda x: ' '.join(x)
}).reset_index()
# Text Pre-Processing
df = textPreprocessing(df)
# Import demographics dataset
demographics = pd.read_csv('Analytics/demographics.csv')
# Keep only username and gender
demographics = demographics[['username', 'gender']]
# Merge the two dataframes
dataframe = | pd.merge(df, demographics, on='username') | pandas.merge |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
class Visualizer:
def __init__(self, action_labels):
self.n_action = len(action_labels)
self.action_labels = action_labels
def visualise_episode(self, env, cum_rewards, actions, pqs, ideal, fig_path):
_, (ax_price, ax_action, ax_Q) = plt.subplots(3, 1, sharex='all', figsize=(14, 14))
p = env.price_df.price.values - env.price_df.price.values[-1]
ax_price.plot(p, 'k-', label='prices')
ax_price.plot(cum_rewards, 'b', label='P&L')
ax_price.plot(ideal, 'r', label='ideal P&L')
ax_price.legend(loc='best', frameon=False)
ax_price.set_title(env.title + f', explored: {cum_rewards[-1]}, median ideal: {np.nanmedian(ideal)}')
ax_action.set_title('Actions: cash=0, open=1, close=2')
ax_action.plot(actions, 'b', label='explored')
ax_action.set_ylim(-0.4, self.n_action - 0.6)
ax_action.set_ylabel('action')
ax_action.set_yticks(range(self.n_action))
ax_action.legend(loc='best', frameon=False)
styles = ['k', 'r', 'b']
qs_df = | pd.DataFrame(pqs, columns=['cash', 'open', 'keep']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import warnings
from datetime import datetime, timedelta
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.errors import PerformanceWarning
from pandas import (Timestamp, Timedelta, Series,
DatetimeIndex, TimedeltaIndex,
date_range)
@pytest.fixture(params=[None, 'UTC', 'Asia/Tokyo',
'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific'])
def tz(request):
return request.param
@pytest.fixture(params=[pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)],
ids=str)
def delta(request):
# Several ways of representing two hours
return request.param
@pytest.fixture(
params=[
datetime(2011, 1, 1),
DatetimeIndex(['2011-01-01', '2011-01-02']),
DatetimeIndex(['2011-01-01', '2011-01-02']).tz_localize('US/Eastern'),
np.datetime64('2011-01-01'),
Timestamp('2011-01-01')],
ids=lambda x: type(x).__name__)
def addend(request):
return request.param
class TestDatetimeIndexArithmetic(object):
def test_dti_add_timestamp_raises(self):
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add DatetimeIndex and Timestamp"
with tm.assert_raises_regex(TypeError, msg):
idx + Timestamp('2011-01-01')
def test_dti_radd_timestamp_raises(self):
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add DatetimeIndex and Timestamp"
with tm.assert_raises_regex(TypeError, msg):
Timestamp('2011-01-01') + idx
# -------------------------------------------------------------
# Binary operations DatetimeIndex and int
def test_dti_add_int(self, tz, one):
# Variants of `one` for #19012
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
result = rng + one
expected = pd.date_range('2000-01-01 10:00', freq='H',
periods=10, tz=tz)
tm.assert_index_equal(result, expected)
def test_dti_iadd_int(self, tz, one):
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
expected = pd.date_range('2000-01-01 10:00', freq='H',
periods=10, tz=tz)
rng += one
tm.assert_index_equal(rng, expected)
def test_dti_sub_int(self, tz, one):
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
result = rng - one
expected = pd.date_range('2000-01-01 08:00', freq='H',
periods=10, tz=tz)
tm.assert_index_equal(result, expected)
def test_dti_isub_int(self, tz, one):
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
expected = pd.date_range('2000-01-01 08:00', freq='H',
periods=10, tz=tz)
rng -= one
tm.assert_index_equal(rng, expected)
# -------------------------------------------------------------
# Binary operations DatetimeIndex and timedelta-like
def test_dti_add_timedeltalike(self, tz, delta):
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
result = rng + delta
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
tm.assert_index_equal(result, expected)
def test_dti_iadd_timedeltalike(self, tz, delta):
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
rng += delta
tm.assert_index_equal(rng, expected)
def test_dti_sub_timedeltalike(self, tz, delta):
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
result = rng - delta
tm.assert_index_equal(result, expected)
def test_dti_isub_timedeltalike(self, tz, delta):
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
rng -= delta
tm.assert_index_equal(rng, expected)
# -------------------------------------------------------------
# Binary operations DatetimeIndex and TimedeltaIndex/array
def test_dti_add_tdi(self, tz):
# GH 17558
dti = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
tdi = pd.timedelta_range('0 days', periods=10)
expected = pd.date_range('2017-01-01', periods=10, tz=tz)
# add with TimdeltaIndex
result = dti + tdi
tm.assert_index_equal(result, expected)
result = tdi + dti
tm.assert_index_equal(result, expected)
# add with timedelta64 array
result = dti + tdi.values
tm.assert_index_equal(result, expected)
result = tdi.values + dti
tm.assert_index_equal(result, expected)
def test_dti_iadd_tdi(self, tz):
# GH 17558
dti = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
tdi = pd.timedelta_range('0 days', periods=10)
expected = pd.date_range('2017-01-01', periods=10, tz=tz)
# iadd with TimdeltaIndex
result = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
result += tdi
tm.assert_index_equal(result, expected)
result = pd.timedelta_range('0 days', periods=10)
result += dti
tm.assert_index_equal(result, expected)
# iadd with timedelta64 array
result = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
result += tdi.values
tm.assert_index_equal(result, expected)
result = pd.timedelta_range('0 days', periods=10)
result += dti
tm.assert_index_equal(result, expected)
def test_dti_sub_tdi(self, tz):
# GH 17558
dti = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
tdi = pd.timedelta_range('0 days', periods=10)
expected = pd.date_range('2017-01-01', periods=10, tz=tz, freq='-1D')
# sub with TimedeltaIndex
result = dti - tdi
tm.assert_index_equal(result, expected)
msg = 'cannot subtract TimedeltaIndex and DatetimeIndex'
with tm.assert_raises_regex(TypeError, msg):
tdi - dti
# sub with timedelta64 array
result = dti - tdi.values
tm.assert_index_equal(result, expected)
msg = 'cannot perform __neg__ with this index type:'
with tm.assert_raises_regex(TypeError, msg):
tdi.values - dti
def test_dti_isub_tdi(self, tz):
# GH 17558
dti = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
tdi = pd.timedelta_range('0 days', periods=10)
expected = pd.date_range('2017-01-01', periods=10, tz=tz, freq='-1D')
# isub with TimedeltaIndex
result = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
result -= tdi
tm.assert_index_equal(result, expected)
msg = 'cannot subtract TimedeltaIndex and DatetimeIndex'
with tm.assert_raises_regex(TypeError, msg):
tdi -= dti
# isub with timedelta64 array
result = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
result -= tdi.values
tm.assert_index_equal(result, expected)
msg = '|'.join(['cannot perform __neg__ with this index type:',
'ufunc subtract cannot use operands with types'])
with tm.assert_raises_regex(TypeError, msg):
tdi.values -= dti
# -------------------------------------------------------------
# Binary Operations DatetimeIndex and datetime-like
# TODO: A couple other tests belong in this section. Move them in
# A PR where there isn't already a giant diff.
def test_add_datetimelike_and_dti(self, addend):
# GH#9631
dti = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = 'cannot add DatetimeIndex and {0}'.format(
type(addend).__name__)
with tm.assert_raises_regex(TypeError, msg):
dti + addend
with tm.assert_raises_regex(TypeError, msg):
addend + dti
def test_add_datetimelike_and_dti_tz(self, addend):
# GH#9631
dti_tz = DatetimeIndex(['2011-01-01',
'2011-01-02']).tz_localize('US/Eastern')
msg = 'cannot add DatetimeIndex and {0}'.format(
type(addend).__name__)
with tm.assert_raises_regex(TypeError, msg):
dti_tz + addend
with tm.assert_raises_regex(TypeError, msg):
addend + dti_tz
# -------------------------------------------------------------
def test_sub_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now changed to
# return subtraction -> TimeDeltaIndex (GH ...)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
dti_tz2 = | date_range('20130101', periods=3) | pandas.date_range |
import numpy as np
import pandas as pd
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
import copy
import math
from shapely.geometry.polygon import Polygon
# A shared random state will ensure that data is split in a same way in both train and test function
RANDOM_STATE = 42
def load_tabular_features_hadoop(distribution='all', matched=False, scale='all', minus_one=False):
tabular_path = 'data/join_results/train/join_cardinality_data_points_sara.csv'
print(tabular_path)
tabular_features_df = pd.read_csv(tabular_path, delimiter='\\s*,\\s*', header=0)
if distribution != 'all':
tabular_features_df = tabular_features_df[tabular_features_df['label'].str.contains('_{}'.format(distribution))]
if matched:
tabular_features_df = tabular_features_df[tabular_features_df['label'].str.contains('_Match')]
if scale != all:
tabular_features_df = tabular_features_df[tabular_features_df['label'].str.contains(scale)]
if minus_one:
tabular_features_df['join_sel'] = 1 - tabular_features_df['join_sel']
tabular_features_df = tabular_features_df.drop(columns=['label', 'coll1', 'D1', 'coll2', 'D2'])
tabular_features_df = tabular_features_df.rename(columns={x: y for x, y in zip(tabular_features_df.columns, range(0, len(tabular_features_df.columns)))})
# Get train and test data
train_data, test_data = train_test_split(tabular_features_df, test_size=0.20, random_state=RANDOM_STATE)
num_features = len(tabular_features_df.columns) - 1
X_train = pd.DataFrame.to_numpy(train_data[[i for i in range(num_features)]])
y_train = train_data[num_features]
X_test = pd.DataFrame.to_numpy(test_data[[i for i in range(num_features)]])
y_test = test_data[num_features]
return X_train, y_train, X_test, y_test
def load_tabular_features(join_result_path, tabular_path, normalize=False, minus_one=False, target='join_selectivity'):
tabular_features_df = pd.read_csv(tabular_path, delimiter='\\s*,\\s*', header=0)
cols = ['dataset1', 'dataset2', 'result_size', 'mbr_tests', 'duration']
join_df = pd.read_csv(join_result_path, delimiter=',', header=None, names=cols)
join_df = join_df[join_df.result_size != 0]
join_df = pd.merge(join_df, tabular_features_df, left_on='dataset1', right_on='dataset_name')
join_df = pd.merge(join_df, tabular_features_df, left_on='dataset2', right_on='dataset_name')
cardinality_x = join_df['cardinality_x']
cardinality_y = join_df['cardinality_y']
result_size = join_df['result_size']
mbr_tests = join_df['mbr_tests']
# x1_x, y1_x, x2_x, y2_x, x1_y, y1_y, x2_y, y2_y = join_df['x1_x'], join_df['y1_x'], join_df['x2_x'], join_df['y2_x'], join_df['x1_y'], join_df['y1_y'], join_df['x2_y'], join_df['y2_y']
# # Compute intersection area 1, intersection area 2 and area similarity
# intersect_x1 = pd.concat([x1_x, x1_y]).max(level=0)
# intersect_y1 = max(y1_x, y1_y)
# intersect_x2 = min(x2_x, x2_y)
# intersect_y2 = min(y2_x, y2_y)
# print(intersect_x1)
if minus_one:
join_selectivity = 1 - result_size / (cardinality_x * cardinality_y)
mbr_tests_selectivity = 1 - mbr_tests / (cardinality_x * cardinality_y)
else:
join_selectivity = result_size / (cardinality_x * cardinality_y)
mbr_tests_selectivity = mbr_tests / (cardinality_x * cardinality_y)
join_df = join_df.drop(
columns=['result_size', 'dataset1', 'dataset2', 'dataset_name_x', 'dataset_name_y', 'mbr_tests', 'duration'])
if normalize:
column_groups = [
['AVG area_x', 'AVG area_y'],
['AVG x_x', 'AVG y_x', 'AVG x_y', 'AVG y_y'],
['E0_x', 'E2_x', 'E0_y', 'E2_y'],
['cardinality_x', 'cardinality_y'],
]
for column_group in column_groups:
input_data = join_df[column_group].to_numpy()
original_shape = input_data.shape
reshaped = input_data.reshape(input_data.size, 1)
reshaped = preprocessing.minmax_scale(reshaped)
join_df[column_group] = reshaped.reshape(original_shape)
# Rename the column's names to numbers for easier access
join_df = join_df.rename(columns={x: y for x, y in zip(join_df.columns, range(0, len(join_df.columns)))})
# Save the number of features in order to extract (X, y) correctly
num_features = len(join_df.columns)
# Append the target to the right of data frame
join_df.insert(len(join_df.columns), 'join_selectivity', join_selectivity, True)
join_df.insert(len(join_df.columns), 'mbr_tests_selectivity', mbr_tests_selectivity, True)
# TODO: delete this dumping action. This is just for debugging
join_df.to_csv('data/temp/join_df.csv')
# Split join data to train and test data
# target = 'join_selectivity'
train_data, test_data = train_test_split(join_df, test_size=0.20, random_state=RANDOM_STATE)
X_train = pd.DataFrame.to_numpy(train_data[[i for i in range(num_features)]])
y_train = train_data[target]
X_test = pd.DataFrame.to_numpy(test_data[[i for i in range(num_features)]])
y_test = test_data[target]
return X_train, y_train, X_test, y_test
def generate_tabular_features(join_result_path, tabular_path, output, normalize=False, minus_one=False):
tabular_features_df = pd.read_csv(tabular_path, delimiter='\\s*,\\s*', header=0)
cols = ['dataset1', 'dataset2', 'result_size', 'mbr_tests', 'duration', 'best_algorithm']
join_df = pd.read_csv(join_result_path, delimiter=',', header=None, names=cols)
best_algorithm = join_df['best_algorithm']
join_df = join_df[join_df.result_size != 0]
join_df = pd.merge(join_df, tabular_features_df, left_on='dataset1', right_on='dataset_name')
join_df = | pd.merge(join_df, tabular_features_df, left_on='dataset2', right_on='dataset_name') | pandas.merge |
import recordlinkage
import pandas as pd
import csv
import re
import pymongo
from pymongo import MongoClient
#path to our datasets
ORIGINAL = "restaurants.tsv"
DUPLICATES = "restaurants_DPL.tsv"
#parse to tsv files into a dataframe
df = | pd.read_csv(ORIGINAL, sep='\t') | pandas.read_csv |
# *****************************************************************************
# Copyright (c) 2020, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import sdc
import unittest
from itertools import product
from sdc.str_arr_ext import StringArray
from sdc.str_ext import std_str_to_unicode, unicode_to_std_str
from sdc.tests.test_base import TestCase
from sdc.tests.test_utils import skip_numba_jit
from sdc.functions import numpy_like
from sdc.functions import sort
class TestArrays(TestCase):
def test_astype_to_num(self):
def ref_impl(a, t):
return a.astype(t)
def sdc_impl(a, t):
return numpy_like.astype(a, t)
sdc_func = self.jit(sdc_impl)
cases = [[5, 2, 0, 333, -4], [3.3, 5.4, np.nan]]
cases_type = [np.float64, np.int64, 'float64', 'int64']
for case in cases:
a = np.array(case)
for type_ in cases_type:
with self.subTest(data=case, type=type_):
np.testing.assert_array_equal(sdc_func(a, type_), ref_impl(a, type_))
def test_astype_to_float(self):
def ref_impl(a):
return a.astype('float64')
def sdc_impl(a):
return numpy_like.astype(a, 'float64')
sdc_func = self.jit(sdc_impl)
cases = [[2, 3, 0], [4., 5.6, np.nan]]
for case in cases:
a = np.array(case)
with self.subTest(data=case):
np.testing.assert_array_equal(sdc_func(a), ref_impl(a))
def test_astype_to_int(self):
def ref_impl(a):
return a.astype(np.int64)
def sdc_impl(a):
return numpy_like.astype(a, np.int64)
sdc_func = self.jit(sdc_impl)
cases = [[2, 3, 0], [4., 5.6, np.nan]]
for case in cases:
a = np.array(case)
with self.subTest(data=case):
np.testing.assert_array_equal(sdc_func(a), ref_impl(a))
def test_astype_int_to_str(self):
def ref_impl(a):
return a.astype(str)
def sdc_impl(a):
return numpy_like.astype(a, str)
sdc_func = self.jit(sdc_impl)
a = np.array([2, 3, 0])
np.testing.assert_array_equal(sdc_func(a), ref_impl(a))
@unittest.skip('Numba converts float to string with incorrect precision')
def test_astype_float_to_str(self):
def ref_impl(a):
return a.astype(str)
def sdc_impl(a):
return numpy_like.astype(a, str)
sdc_func = self.jit(sdc_impl)
a = np.array([4., 5.6, np.nan])
np.testing.assert_array_equal(sdc_func(a), ref_impl(a))
def test_astype_num_to_str(self):
def ref_impl(a):
return a.astype('str')
def sdc_impl(a):
return numpy_like.astype(a, 'str')
sdc_func = self.jit(sdc_impl)
a = np.array([5, 2, 0, 333, -4])
np.testing.assert_array_equal(sdc_func(a), ref_impl(a))
@unittest.skip('Needs Numba astype impl support converting unicode_type to other type')
def test_astype_str_to_num(self):
def ref_impl(a, t):
return a.astype(t)
def sdc_impl(a, t):
return numpy_like.astype(a, t)
sdc_func = self.jit(sdc_impl)
cases = [['a', 'cc', 'd'], ['3.3', '5', '.4'], ['¡Y', 'tú quién ', 'te crees']]
cases_type = [np.float64, np.int64]
for case in cases:
a = np.array(case)
for type_ in cases_type:
with self.subTest(data=case, type=type_):
np.testing.assert_array_equal(sdc_func(a, type_), ref_impl(a, type_))
def test_isnan(self):
def ref_impl(a):
return np.isnan(a)
def sdc_impl(a):
return numpy_like.isnan(a)
sdc_func = self.jit(sdc_impl)
cases = [[5, 2, 0, 333, -4], [3.3, 5.4, np.nan, 7.9, np.nan]]
for case in cases:
a = np.array(case)
with self.subTest(data=case):
np.testing.assert_array_equal(sdc_func(a), ref_impl(a))
@unittest.skip('Needs provide String Array boxing')
def test_isnan_str(self):
def ref_impl(a):
return np.isnan(a)
def sdc_impl(a):
return numpy_like.isnan(a)
sdc_func = self.jit(sdc_impl)
cases = [['a', 'cc', np.nan], ['se', None, 'vvv']]
for case in cases:
a = np.array(case)
with self.subTest(data=case):
np.testing.assert_array_equal(sdc_func(a), ref_impl(a))
def test_notnan(self):
def ref_impl(a):
return np.invert(np.isnan(a))
def sdc_impl(a):
return numpy_like.notnan(a)
sdc_func = self.jit(sdc_impl)
cases = [[5, 2, 0, 333, -4], [3.3, 5.4, np.nan, 7.9, np.nan]]
for case in cases:
a = np.array(case)
with self.subTest(data=case):
np.testing.assert_array_equal(sdc_func(a), ref_impl(a))
def test_copy(self):
from sdc.str_arr_ext import StringArray
def ref_impl(a):
return np.copy(a)
@self.jit
def sdc_func(a):
_a = StringArray(a) if as_str_arr == True else a # noqa
return numpy_like.copy(_a)
cases = {
'int': [5, 2, 0, 333, -4],
'float': [3.3, 5.4, np.nan, 7.9, np.nan],
'bool': [True, False, True],
'str': ['a', 'vv', 'o12oo']
}
for dtype, data in cases.items():
a = data if dtype == 'str' else np.asarray(data)
as_str_arr = True if dtype == 'str' else False
with self.subTest(case=data):
np.testing.assert_array_equal(sdc_func(a), ref_impl(a))
def test_copy_int(self):
def ref_impl():
a = np.array([5, 2, 0, 333, -4])
return np.copy(a)
def sdc_impl():
a = np.array([5, 2, 0, 333, -4])
return numpy_like.copy(a)
sdc_func = self.jit(sdc_impl)
np.testing.assert_array_equal(sdc_func(), ref_impl())
def test_copy_bool(self):
def ref_impl():
a = np.array([True, False, True])
return np.copy(a)
def sdc_impl():
a = np.array([True, False, True])
return numpy_like.copy(a)
sdc_func = self.jit(sdc_impl)
np.testing.assert_array_equal(sdc_func(), ref_impl())
@unittest.skip("Numba doesn't have string array")
def test_copy_str(self):
def ref_impl():
a = np.array(['a', 'vv', 'o12oo'])
return np.copy(a)
def sdc_impl():
a = np.array(['a', 'vv', 'o12oo'])
return numpy_like.copy(a)
sdc_func = self.jit(sdc_impl)
np.testing.assert_array_equal(sdc_func(), ref_impl())
def test_argmin(self):
def ref_impl(a):
return np.argmin(a)
def sdc_impl(a):
return numpy_like.argmin(a)
sdc_func = self.jit(sdc_impl)
cases = [[5, 2, 0, 333, -4], [3.3, 5.4, np.nan, 7.9, np.nan]]
for case in cases:
a = np.array(case)
with self.subTest(data=case):
np.testing.assert_array_equal(sdc_func(a), ref_impl(a))
def test_argmax(self):
def ref_impl(a):
return np.argmax(a)
def sdc_impl(a):
return numpy_like.argmax(a)
sdc_func = self.jit(sdc_impl)
cases = [[np.nan, np.nan, np.inf, np.nan], [5, 2, 0, 333, -4], [3.3, 5.4, np.nan, 7.9, np.nan]]
for case in cases:
a = np.array(case)
with self.subTest(data=case):
np.testing.assert_array_equal(sdc_func(a), ref_impl(a))
def test_nanargmin(self):
def ref_impl(a):
return np.nanargmin(a)
def sdc_impl(a):
return numpy_like.nanargmin(a)
sdc_func = self.jit(sdc_impl)
cases = [[5, 2, 0, 333, -4], [3.3, 5.4, np.nan, 7.9, np.nan]]
for case in cases:
a = np.array(case)
with self.subTest(data=case):
np.testing.assert_array_equal(sdc_func(a), ref_impl(a))
def test_nanargmax(self):
def ref_impl(a):
return np.nanargmax(a)
def sdc_impl(a):
return numpy_like.nanargmax(a)
sdc_func = self.jit(sdc_impl)
cases = [[np.nan, np.nan, np.inf, np.nan], [5, 2, -9, 333, -4], [3.3, 5.4, np.nan, 7.9]]
for case in cases:
a = np.array(case)
with self.subTest(data=case):
np.testing.assert_array_equal(sdc_func(a), ref_impl(a))
def test_sort(self):
np.random.seed(0)
def ref_impl(a):
return np.sort(a)
def sdc_impl(a):
sort.parallel_sort(a)
return a
sdc_func = self.jit(sdc_impl)
float_array = np.random.ranf(10**2)
int_arryay = np.random.randint(0, 127, 10**2)
float_cases = ['float32', 'float64']
for case in float_cases:
array0 = float_array.astype(case)
array1 = np.copy(array0)
with self.subTest(data=case):
np.testing.assert_array_equal(ref_impl(array0), sdc_func(array1))
int_cases = ['int8', 'uint8', 'int16', 'uint16', 'int32', 'uint32', 'int64', 'uint64']
for case in int_cases:
array0 = int_arryay.astype(case)
array1 = np.copy(array0)
with self.subTest(data=case):
np.testing.assert_array_equal(ref_impl(array0), sdc_func(array1))
def test_stable_sort(self):
np.random.seed(0)
def ref_impl(a):
return np.sort(a)
def sdc_impl(a):
sort.parallel_stable_sort(a)
return a
sdc_func = self.jit(sdc_impl)
float_array = np.random.ranf(10**2)
int_arryay = np.random.randint(0, 127, 10**2)
float_cases = ['float32', 'float64']
for case in float_cases:
array0 = float_array.astype(case)
array1 = np.copy(array0)
with self.subTest(data=case):
np.testing.assert_array_equal(ref_impl(array0), sdc_func(array1))
int_cases = ['int8', 'uint8', 'int16', 'uint16', 'int32', 'uint32', 'int64', 'uint64']
for case in int_cases:
array0 = int_arryay.astype(case)
array1 = np.copy(array0)
with self.subTest(data=case):
np.testing.assert_array_equal(ref_impl(array0), sdc_func(array1))
def _test_fillna_numeric(self, pyfunc, cfunc, inplace):
data_to_test = [
[True, False, False, True, True],
[5, 2, 0, 333, -4],
[3.3, 5.4, 7.9],
[3.3, 5.4, np.nan, 7.9, np.nan],
]
values_to_test = [
None,
np.nan,
2.1,
2
]
for data, value in product(data_to_test, values_to_test):
a1 = np.asarray(data)
a2 = pd.Series(np.copy(a1)) if inplace else pd.Series(a1)
with self.subTest(data=data, value=value):
result = cfunc(a1, value)
result_ref = pyfunc(a2, value)
if inplace:
result, result_ref = a1, a2
np.testing.assert_array_equal(result, result_ref)
def test_fillna_numeric_inplace_false(self):
def ref_impl(S, value):
if value is None:
return S.values.copy()
else:
return S.fillna(value=value, inplace=False).values
def sdc_impl(a, value):
return numpy_like.fillna(a, inplace=False, value=value)
sdc_func = self.jit(sdc_impl)
self._test_fillna_numeric(ref_impl, sdc_func, inplace=False)
def test_fillna_numeric_inplace_true(self):
def ref_impl(S, value):
if value is None:
return None
else:
S.fillna(value=value, inplace=True)
return None
def sdc_impl(a, value):
return numpy_like.fillna(a, inplace=True, value=value)
sdc_func = self.jit(sdc_impl)
self._test_fillna_numeric(ref_impl, sdc_func, inplace=True)
def test_fillna_str_inplace_false(self):
def ref_impl(S, value):
if value is None:
return S.values.copy()
else:
return S.fillna(value=value, inplace=False).values
def sdc_impl(S, value):
str_arr = S.values
return numpy_like.fillna(str_arr, inplace=False, value=value)
sdc_func = self.jit(sdc_impl)
data_to_test = [
['a', 'b', 'c', 'd'],
['a', 'b', None, 'c', None, 'd'],
]
values_to_test = [
None,
'',
'asd'
]
for data, value in product(data_to_test, values_to_test):
S = | pd.Series(data) | pandas.Series |
# Reference: https://learndataanalysis.org/how-to-download-photos-from-google-photos-in-python/
import os
from Google import Create_Service
import pandas as pd # pip install pandas
import requests # pip install requests
pd.set_option('display.max_columns', 100)
pd.set_option('display.max_rows', 150)
| pd.set_option('display.max_colwidth', 150) | pandas.set_option |
#!/usr/bin/env python -B
#==============================================================================
#title :phenorank.py
#description :main function for running PhenoRank
#author :<NAME>
#date_created :12 May 2015
#version :0.1.2
#usage :
#python_version :2.7.9
#==============================================================================
import cPickle
import logging
import numpy as np
import pandas as pd
import random
import pkg_resources
import scipy.sparse as sp
import scipy.stats as ss
import sys
import inout
import scoring
def run_phenorank(omim_obs, phenotypes_obs=None, nperm=1000, r=0.1, ni=20, gene_mask=None, include_h=True, include_m=True, dir_data="data_phenorank", filename_output="~"):
"""
run the main PhenoRank function
Args:
omim_obs: OMIM ID for the disease of interest
phenotypes_obs: a list of phenotypes describing the disease of interest
nperm: number of permutations to complete
r: the restart probability for the RWR method
ic: the iteration cutoff for the RWR method
gene_mask: gene to mask, if None no gene is masked
include_h, include_m: logical values denoting whether human and mouse data should be included
dir_data: path to the data, used for testing
Returns:
a pandas DataFrame of scores for the input GWAS loci
"""
# create logger if required
logger = logging.getLogger("__main__")
# log algrithm progress
logger.info("")
logger.info("# Algorithm progress")
logger.info("Importing data...")
# import data
con = open(pkg_resources.resource_filename("phenorank", dir_data + "/phenorank_genes.tsv"), "r")
genes = list(pd.read_csv(con, header=None)[0])
con.close()
con = open(pkg_resources.resource_filename("phenorank", dir_data + "/phenorank_conditions.tsv"), "r")
omims = list(pd.read_csv(con, header=None)[0])
con.close()
con = open(pkg_resources.resource_filename("phenorank", dir_data + "/phenorank_phenotypes.tsv"), "r")
phenotypes = list(pd.read_csv(con, header=None)[0])
con.close()
con = open(pkg_resources.resource_filename("phenorank", dir_data + "/cp_h_omim.tsv"), "r")
cp_h = inout.import_dictionary(con, split_by="|", key_int=True, value_int=True)
con.close()
con = open(pkg_resources.resource_filename("phenorank", dir_data + "/gc_h.pickle"), "r")
gc_h = cPickle.load(con)
con.close()
con = open(pkg_resources.resource_filename("phenorank", dir_data + "/gc_m.pickle"), "r")
gc_m = cPickle.load(con)
con.close()
con = open(pkg_resources.resource_filename("phenorank", dir_data + "/phenotype_ancestors.tsv"), "r")
pheno_ancestors = inout.import_dictionary(con, split_by="|", key_int=True, value_int=True)
con.close()
con = open(pkg_resources.resource_filename("phenorank", dir_data + "/phenotype_ic.tsv"), "r")
pheno_ic = np.array(list(pd.read_csv(con, header=None)[0]))
con.close()
con = open(pkg_resources.resource_filename("phenorank", dir_data + "/condition_ic.tsv"), "r")
omim_ic = np.array(list(pd.read_csv(con, header=None)[0]))
con.close()
con = open(pkg_resources.resource_filename("phenorank", dir_data + "/pheno_condition_ic_matrix.pickle"), "r")
pheno_omim_ic_matrix = cPickle.load(con)
con.close()
con = open(pkg_resources.resource_filename("phenorank", dir_data + "/pheno_cooccur.pickle"), "r")
pheno_cooccur = cPickle.load(con)
con.close()
con = open(pkg_resources.resource_filename("phenorank", dir_data + "/W_generic.pickle"), "r")
W = cPickle.load(con)
con.close()
# check when input is acceptable
if omim_obs not in omims:
raise Exception("disease " + omim_obs + " not accepted by PhenoRank")
# get the indices of input genes, diseases and phenotypes
logger.info("Indexing genes, diseases and phenotypes to consider...")
if gene_mask:
gene_mask_ind = genes.index(gene_mask)
if omim_obs:
omim_obs_ind = omims.index(omim_obs)
if phenotypes_obs:
phenotypes_obs_ind = [phenotypes.index(phenotype) for phenotype in phenotypes_obs]
else:
phenotypes_obs_ind = cp_h[omim_obs_ind]
# if a gene to mask has been specified, and if it is associated with the OMIM ID, mask
if gene_mask:
gc_h[gene_mask_ind, omim_obs_ind] = 0
# score genes using updated phenotypes
logger.info("Scoring genes for query disease...")
score_unprop, score_unranked_prop, score_obs = scoring.score_genes(phenotypes_obs_ind, pheno_ancestors, pheno_ic, omim_ic, pheno_omim_ic_matrix, gc_h, gc_m, W, r, ni, include_h, include_m)
# score genes using permuted phenotypes
score_sim = np.empty((nperm, len(score_obs))) # empty array to hold scores
for n in range(nperm):
pheno_sim_ind = scoring.simulate_disease(len(phenotypes_obs_ind), pheno_cooccur)
tmp, tmp, score_sim[n] = scoring.score_genes(pheno_sim_ind, pheno_ancestors, pheno_ic, omim_ic, pheno_omim_ic_matrix, gc_h, gc_m, W, r, ni, include_h, include_m)
if n != 0 and (n + 1) % 100 == 0: logger.info("Scoring genes for simulated sets of disease phenotype terms ({}/{} sets completed)...".format(n + 1, nperm))
# compute p-values
logger.info("Computing p-values...")
pvalues = sum(score_sim >= score_obs) / float(nperm)
pvalues[pvalues == 0.0] = 1.0 / nperm # ensure that no p-values equal 0
# format output
logger.info("Formatting results...")
res = pd.DataFrame({"GENE": pd.Series(genes, index=genes), "SCORE_UNRANKED_UNPROP": pd.Series(score_unprop, index=genes), "SCORE_UNRANKED_PROP": | pd.Series(score_unranked_prop, index=genes) | pandas.Series |
# © All rights reserved. ECOLE POLYTECHNIQUE FEDERALE DE LAUSANNE,
# Switzerland, Laboratory of Experimental Biophysics, 2016
# See the LICENSE.txt file for more details.
import pandas as pd
import trackpy as tp
import numpy as np
import matplotlib.pyplot as plt
import re
from abc import ABCMeta, abstractmethod, abstractproperty
from sklearn.cluster import DBSCAN
from operator import *
from scipy.signal import gaussian
from scipy.ndimage import filters
from scipy.interpolate import UnivariateSpline, interp1d
from scipy.optimize import minimize
from matplotlib.widgets import RectangleSelector
from bstore import config
from bstore.parsers import FormatMap
import warnings
__version__ = config.__bstore_Version__
"""Metaclasses
-------------------------------------------------------------------------------
"""
class ComputeTrajectories(metaclass=ABCMeta):
"""Basic functionality for computing trajectories from localizations.
This is used to compute trajectories from regions of a dataset containing
localizations, such as fiducial drift trajectories (position vs. frame
number)or astigmatic calibration curves (PSF width vs. z).
Attributes
----------
regionLocs : Pandas DataFrame
The localizations for individual regions.
"""
def __init__(self):
"""Initializes the trajectory computer.
"""
self._regionData = None
def _plotCurves(self, curveNumber=None, coordCols=['x', 'y'],
horizontalLabels=['', 'time'], verticalLabels=['x', 'y'],
title='trajectories', splineCols=['t','x','y'],
offsets=[0,0], ylims=[-100, 500, -100, 500]):
"""Make a plot of each region's trajectory and the average spline fit.
plotCurves allows the user to check the trajectories of localizations
and their fits against the average spline fit.
Parameters
----------
curveNumber : int
Index of the spline to plot. (0-index)
coordCols : list of string
The column names corresponding to the trajectory's dependent
variable (e.g. time or z-position) and the localizations' x- and
y-coordinates (order is t, x, y).
horizontalLabels : list of string
The labels for the x-axes of each trajectory plot.
verticalLabels : list of string
The labels for the y-axes for each trajectory (order is
x-trajectory, then y).
title : str
The title of each plot.
splineCols : list of str
The column names of the average spline DataFrame that correspond to
the trajectory's dependent variable (i.e. z-position or frame
number,) the localizations' x-coordinates, and the localizaitons'
y-coordinates, respectively.
offsets : list of int
The vertical offsets to apply to the curves.
ylims : list of float
The y-limits of the two trajectory plots (order is min and max of
x-trajectory, then min and max of the y-trajectory).
"""
t, x, y = coordCols
xHorzLabel, yHorzLabel = horizontalLabels
xVertLabel, yVertLabel = verticalLabels
ts, xs, ys = splineCols
x0, y0 = offsets
minxy, maxxy, minyy, maxyy = ylims
if self.regionLocs is None:
raise ZeroRegions(
'Zero regions are currently saved with this processor.')
fig, (axx, axy) = plt.subplots(nrows=2, ncols=1, sharex=True)
locs = self.regionLocs.xs(curveNumber, level='region_id',
drop_level=False)
# Filter out localizations that are outliers
outliers = locs.loc[locs[self._includeColName] == False]
locs = locs.loc[locs[self._includeColName]]
if (curveNumber in self.useTrajectories) or (not self.useTrajectories):
markerColor = 'blue'
else:
markerColor = '#999999' # gray
axx.plot(locs[t],
locs[x] - x0,
'.',
color=markerColor,
alpha=0.5)
axx.plot(outliers[t],
outliers[x] - x0,
'x',
color='#999999',
alpha=0.5)
axx.plot(self.avgSpline[ts],
self.avgSpline[xs],
linewidth=2,
color='orange')
axx.set_xlabel(xHorzLabel)
axx.set_ylabel(xVertLabel)
axx.set_title('{0:s}, Region number: {1:d}'.format(title, curveNumber))
axx.set_ylim((minxy, maxxy))
axy.plot(locs[t],
locs[y] - y0,
'.',
color=markerColor,
alpha=0.5)
axy.plot(outliers[t],
outliers[y] - y0,
'x',
color='#999999',
alpha=0.5)
axy.plot(self.avgSpline[ts],
self.avgSpline[ys],
linewidth=2,
color='orange')
axy.set_xlabel(yHorzLabel)
axy.set_ylabel(yVertLabel)
axy.set_ylim((minyy, maxyy))
plt.show()
@property
def regionLocs(self):
"""DataFrame holding the localizations for individual fiducials.
"""
return self._regionData
@regionLocs.setter
def regionLocs(self, regionData):
"""Checks that the fiducial localizations are formatted correctly.
"""
if regionData is not None:
assert 'region_id' in regionData.index.names, \
'regionLocs DataFrame requires index named "region_id"'
# Sort the multi-index to allow slicing
regionData.sort_index(inplace=True)
self._regionData = regionData
def clearRegionLocs(self):
"""Clears any currently held localization data.
"""
self._regionData = None
@abstractmethod
def computeTrajectory(self):
"""Computes the trajectory.
"""
pass
def _movingAverage(self, series, windowSize=100, sigma=3):
"""Estimate the weights for smoothing splines.
Parameters
----------
series : array of int
Discrete samples from a time series.
windowSize : int
Size of the moving average window in axial slices.
sigma : int
Size of the Gaussian averaging kernel in axial slices.
Returns
-------
average : float
The moving window average.
var : float
The variance of the data within the sumoving window.
References
----------
http://www.nehalemlabs.net/prototype/blog/2014/04/12/how-to-fix-scipys-interpolating-spline-default-behavior/
"""
b = gaussian(windowSize, sigma)
average = filters.convolve1d(series, b / b.sum())
var = filters.convolve1d(np.power(series - average, 2), b / b.sum())
return average, var
@abstractmethod
def reset(self):
"""Resets the drift computer to its initial value.
"""
pass
class DriftCorrect(metaclass=ABCMeta):
"""Basic functionality for a drift correction processor.
Attributes
----------
correctorType : string
Identifies the type of drift corrector for a specific class.
driftTrajectory : Pandas DataFrame
x,y pairs each possessing a unique frame number.
"""
@abstractproperty
def correctorType(self):
"""Identifies the type of drift corrector for a specific class.
"""
pass
@abstractproperty
def driftTrajectory(self):
"""A list of x,y pairs with each possessing a unique frame number.
"""
pass
@abstractmethod
def correctLocalizations(self):
"""Corrects a DataFrame of localizations for drift.
"""
pass
@abstractmethod
def readSettings(self):
"""Sets the state of the drift corrector.
"""
pass
@abstractmethod
def writeSettings(self):
"""Writes the state of the drift corrector to a file.
"""
pass
class MergeStats(metaclass=ABCMeta):
"""Basic functionality for computing statistics from merged localizations.
"""
@abstractmethod
def computeStatistics(self):
"""Computes the merged molecule statistics.
"""
pass
def _wAvg(self, group, coordinate, photonsCol='photons'):
"""Perform a photon-weighted average over positions.
This helper function computes the average of all numbers in the
'coordinate' column when applied to a Pandas GroupBy object.
Parameters
----------
group : Pandas GroupBy
The merged localizations.
coordinate : str
Column label for the coordinate over which to compute the weighted
average for a particular group.
photonsCol : str
Column label for the photons column.
Returns
-------
wAvg : float
The weighted average over the grouped data in 'coordinate',
weighted by the square root of values in the 'photons' column.
"""
positions = group[coordinate]
photons = group[photonsCol]
wAvg = (positions * photons.apply(np.sqrt)).sum() \
/ photons.apply(np.sqrt).sum()
return wAvg
"""
Utility classes
-------------------------------------------------------------------------------
"""
class SelectLocalizations:
"""Interactively select localizations using rectangular ROI's.
This class is used to display an image containing information about the
local density of localizations within a dataset. From this image, a user
may interactively select regions containing localizations for further
analysis, such as when performing fiducial drift corrections.
"""
def __init__(self):
# Setup the class fields
self._regions = [{'xMin': None, 'xMax': None,
'yMin': None, 'yMax': None}]
def doInteractiveSearch(self, df, gridSize=100, unitConvFactor=1. / 1000,
unitLabel='microns'):
"""Interactively find regions in the histogram images.
Allows the user to select regions and extract localizations.
Parameters
----------
df : Pandas DataFrame
Data to visualize and search for fiducials.
gridSize : float
The size of the hexagonal grid in the 2D histogram.
unitConvFactor : float
Conversion factor for plotting the 2D histogram in different units
than the data. Most commonly used to convert nanometers to microns.
In this case, there are unitConvFactor = 1/1000 nm/micron.
unitLabel : str
Unit label for the histogram. This is only used for labeling the
axes of the 2D histogram; users may change this depending on the
units of their data and unitConvFactor.
"""
# Reset the fiducial regions
self._regions = [{'xMin': None, 'xMax': None,
'yMin': None, 'yMax': None}]
def onClose(event):
"""Run when the figure closes.
"""
fig.canvas.stop_event_loop()
def onSelect(eclick, erelease):
pass
def toggleSelector(event, processor):
"""Handles user input.
"""
if event.key in [' ']:
# Clear fiducial regions list if they are not empty
#(Important for when multiple search regions are selected.)
if not self._regions[0]['xMin']:
# Convert _regions to empty list ready for appending
self._regions = []
xMin, xMax, yMin, yMax = toggleSelector.RS.extents
processor._regions.append({'xMin': xMin / unitConvFactor,
'xMax': xMax / unitConvFactor,
'yMin': yMin / unitConvFactor,
'yMax': yMax / unitConvFactor})
fig, ax = plt.subplots()
fig.canvas.mpl_connect('close_event', onClose)
im = ax.hexbin(df[self._coordCols[0]] * unitConvFactor,
df[self._coordCols[1]] * unitConvFactor,
gridsize=gridSize, cmap=plt.cm.YlOrRd_r)
ax.set_xlabel(r'x-position, ' + unitLabel)
ax.set_ylabel(r'y-position, ' + unitLabel)
ax.invert_yaxis()
cb = plt.colorbar(im)
cb.set_label('Counts')
toggleSelector.RS = RectangleSelector(ax,
onSelect,
drawtype='box',
useblit=True,
button=[1, 3], # l/r only
spancoords='data',
interactive=True)
plt.connect('key_press_event',
lambda event: toggleSelector(event, self))
# Make figure full screen
figManager = plt.get_current_fig_manager()
figManager.window.showMaximized()
plt.show()
# Suppress the MatplotlibDeprecationWarning
with warnings.catch_warnings():
warnings.simplefilter("ignore")
fig.canvas.start_event_loop_default()
def _extractLocsFromRegions(self, df):
"""Reduce the size of the search area for automatic fiducial detection.
Parameters
----------
df : Pandas DataFrame
DataFrame that will be spatially filtered.
Returns
-------
locsInRegions : Pandas DataFrame
DataFrame containing localizations only within the select regions.
"""
# If search regions are not defined, raise an error
if not self._regions[0]['xMin']:
raise ZeroFiducialRegions('Error: Identified no fiducial regions.')
locsInRegions = []
numRegions = len(self._regions)
for regionNumber in range(numRegions):
xMin = self._regions[regionNumber]['xMin']
xMax = self._regions[regionNumber]['xMax']
yMin = self._regions[regionNumber]['yMin']
yMax = self._regions[regionNumber]['yMax']
# Isolate the localizations within the current region
locsInCurrRegion = df[(df[self._coordCols[0]] > xMin) &
(df[self._coordCols[0]] < xMax) &
(df[self._coordCols[1]] > yMin) &
(df[self._coordCols[1]] < yMax)].copy()
# Add a multi-index identifying the region number
locsInCurrRegion['region_id'] = regionNumber
locsInCurrRegion.set_index(['region_id'], append=True,
inplace=True)
locsInRegions.append(locsInCurrRegion)
return pd.concat(locsInRegions)
"""
Concrete classes
-------------------------------------------------------------------------------
"""
class AddColumn:
"""Adds a column to a DataFrame.
AddColumn adds a column to a DataFrame and initializes every row to the
same value.
Parameters
----------
columnName : str
The name of the new column.
defaultValue : mixed datatype
The default value to assign to each row of the new column.
Attributes
----------
columnName : str
The name of the new column.
defaultValue : mixed datatype
The default value to assign to each row of the new column.
"""
def __init__(self, columnName, defaultValue=True):
self.columnName = columnName
self.defaultValue = defaultValue
def __call__(self, df):
"""Add the new column to the DataFrame.
Parameters
----------
df : DataFrame
A Pandas DataFrame object.
Returns
-------
procdf : DataFrame
A DataFrame object with a new column.
"""
procdf = df.copy()
del(df)
numRows, _ = procdf.shape
procdf[self.columnName] = pd.Series([self.defaultValue] * numRows,
index=procdf.index)
return procdf
class CalibrateAstigmatism(SelectLocalizations):
"""Computes calibration curves for astigmatic imaging from bead stacks.
Parameters
----------
interactiveSearch : bool
Determines whether the user will interactively find fiducials.
Setting this to False means that fiducials are found automatically,
although this is not always reliable.
coordCols : list of str
List of strings identifying the x- and y-coordinate column names
in that order.
sigmaCols : list of str
List of strings identifying the column names containing the PSF widths
in x and y.
zCol : str
Name of the column identifying the z-coordinate values.
astigmatismComputer: ComputeTrajectories
Algorithm for computing astigmatic calibration curves.
wobbleComputer: ComputeTrajectories
Algorithm for computing wobble calibration curves.
Attributes
----------
interactiveSearch : bool
Determines whether the user will interactively find fiducials.
Setting this to False means that fiducials are found automatically,
although this is not always reliable.
astigmatismComputer: AstigComputer
Algorithm for computing astigmatic calibration curves.
calibrationCurves : func, func
The calibration curves for astigmatic 3D imaging. The first
element contains the PSF width in x as a function of z and
the second contains the width in y as a function of z.
wobbleCurves : func, func
The wobble curves for astigmatic 3D imaging. These map the PSF centroid
positions as a function of z. See Ref. 1 for more information.
References
----------
1. Carlini, et al., "Correction of a Depth-Dependent Lateral Distortion in
3D Super-Resolution Imaging," PLoS One 10(11):e0142949 (2015).
"""
def __init__(self, interactiveSearch=True, coordCols=['x', 'y'],
sigmaCols=['sigma_x', 'sigma_y'], zCol='z', startz=None,
stopz=None, astigmatismComputer=None, wobbleComputer=None):
self.interactiveSearch = interactiveSearch
self.calibrationCurves = None
self.wobbleCurves = None
self._coordCols = coordCols
self._sigmaCols = sigmaCols
self._zCol = zCol
if astigmatismComputer:
self.astigmatismComputer = astigmatismComputer
else:
self.astigmatismComputer = DefaultAstigmatismComputer(
coordCols=coordCols,
sigmaCols=sigmaCols, zCol=zCol)
if wobbleComputer:
self.wobbleComputer = wobbleComputer
else:
self.wobbleComputer = DefaultAstigmatismComputer(
coordCols=coordCols, sigmaCols=coordCols,
zCol=zCol, zeroz=0)
def __call__(self, df):
"""Computes the astigmatic calibration curves from user-selected beads.
Parameters
----------
df : DataFrame
A Pandas DataFrame object.
Returns
-------
df : DataFrame
The same Pandas DataFrame object is returned because the original
localizations are not modified.
"""
# Update the wobble computer to match the same fitting range as the
# astigmatism computer. This prevents problems with the display of bead
# fits where the points not included in the astigmatism curve fits
# reflected the wobble computer settings.
print(('Setting wobble fiting range to the match the astigmatism fit '
'range. startz and stopz are set in the astigmatism computer.'))
self.wobbleComputer.startz = self.astigmatismComputer.startz
self.wobbleComputer.stopz = self.astigmatismComputer.stopz
if self.interactiveSearch:
self.doInteractiveSearch(df)
try:
locs = self._extractLocsFromRegions(df)
except ZeroFiducialRegions:
print('No regions containing localizations identified. '
'Returning original DataFrame.')
self.astigmatismComputer.clearRegionLocs()
self.wobbleComputer.clearRegionLocs()
return df
else:
locs = self.astigmatismComputer.regionLocs
# This returns the average splines, but we don't need them.
_ = self.astigmatismComputer.computeTrajectory(locs)
_ = self.wobbleComputer.computeTrajectory(locs)
self.calibrationCurves = self._computeCalibrationCurves(
self.astigmatismComputer.avgSpline)
self.wobbleCurves = self._computeCalibrationCurves(
self.wobbleComputer.avgSpline)
return df
def _computeCalibrationCurves(self, avgSpline):
"""Computes the 3D astigmatic calibration curve from average splines.
Parameters
----------
avgSpline : Pandas DataFrame
The averaged spline fits to bead data.
Returns
-------
fx : func
The calibration curve that returns the the width in x as
a function of z.
fy : func
The calibration curve that returns the the width in y as
a function of z.
"""
xS, yS = avgSpline['xS'], avgSpline['yS']
zPos = avgSpline['z']
fx = interp1d(zPos, xS, kind='cubic', bounds_error=False,
fill_value=np.NaN, assume_sorted=False)
fy = interp1d(zPos, yS, kind='cubic', bounds_error=False,
fill_value=np.NaN, assume_sorted=False)
return fx, fy
class CleanUp:
"""Performs regular clean up routines on imported data.
The cleanup processor encapsulates a few common steps that are performed on
imported datasets. Currently, these steps are:
1) Convert rows containing strings to numeric data types
2) Drop rows containing strings that cannot be parsed to numeric types
3) Drop rows with Inf's and NaN's
"""
def __call__(self, df):
"""Clean up the data.
Parameters
----------
df : DataFrame
A Pandas DataFrame object.
Returns
-------
procdf : DataFrame
A DataFrame object with the same information but new column names.
"""
procdf = df.copy()
del(df)
for column in procdf:
# 'coerce' means anything unable to be parsed becomes a NaN
procdf[column] = pd.to_numeric(procdf[column], errors='coerce')
procdf.replace([np.inf, -np.inf], np.nan, inplace=True)
procdf.dropna(inplace=True)
# DO NOT USE procdf.reindex() because it will not
# automatically reorder an index correctly. It is
# used for other purposes.
procdf.index = np.arange(procdf.shape[0])
return procdf
class Cluster:
"""Clusters the localizations into spatial clusters.
Parameters
----------
minSamples : int
Minimum number of samples within one neighborhood radius.
eps : float
The neighborhood radius defining a cluster.
coordCols : list of str
The columns of the data to be clustered in the format ['x', 'y'].
"""
def __init__(self, minSamples=50, eps=20, coordCols=['x', 'y']):
self._minSamples = minSamples
self._eps = eps
self._coordCols = coordCols
def __call__(self, df):
"""Group the localizations into spatial clusters.
When this class is called, it performs density-based spatial clustering
on the positional coordinates of each localization. Cluster labels are
added as an additional column to the DataFrame.
Parameters
----------
df : DataFrame
A Pandas DataFrame object.
Returns
-------
procdf : DataFrame
A DataFrame object with containing a new column indicating the
cluster ID.
"""
columnsToCluster = self._coordCols
# Setup and perform the clustering
db = DBSCAN(min_samples=self._minSamples, eps=self._eps)
db.fit(df[columnsToCluster])
# Get the cluster labels and make it a Pandas Series
clusterLabels = pd.Series(db.labels_, name='cluster_id')
# Append the labels to the DataFrame
procdf = pd.concat([df, clusterLabels], axis=1)
return procdf
class ComputeClusterStats:
"""Computes statistics for clusters of localizations.
Parameters
----------
idLabel : str
The column name containing cluster ID's.
coordCols : list of string
A list containing the column names containing the transverse
localization coordinates.
zCoord : str
The column name of the axial coordinate.
statsFunctions : dict of name/function pairs
A dictionary containing column names and functions for computing
custom statistics from the clustered localizations. The keys in
dictionary determine the name of the customized column and the
value contains a function that computes a number from the
coordinates of the localizations in each cluster.
"""
# The name to append to the center coordinate column names
centerName = '_center'
def __init__(self, idLabel='cluster_id',
coordCols=['x', 'y'],
zCoord='z',
statsFunctions=None):
self._idLabel = idLabel
self._statsFunctions = {'radius_of_gyration': self._radiusOfGyration,
'eccentricity': self._eccentricity,
'convex_hull': self._convexHull}
self.coordCols = coordCols
self.zCoord = zCoord
# Add the input functions to the defaults if they were supplied
if statsFunctions:
for name, func in statsFunctions.items():
self._statsFunctions[name] = func
def __call__(self, df):
"""Compute the statistics for each cluster of localizations.
This function takes a DataFrame, groups the data by the column idLabel,
then computes the cluster statistics for each cluster. A new DataFrame
for the statistics are returned.
Parameters
----------
df : DataFrame
A Pandas DataFrame object.
Returns
-------
procdf : DataFrame
A DataFrame object containing cluster statistics.
"""
# Group localizations by their ID
groups = df.groupby(self._idLabel)
# Computes the default statistics for each cluster
tempResultsCoM = groups[self.coordCols].agg(np.mean)
tempResultsLength = pd.Series(groups.size())
# Compute the custom statistics for each cluster and set
# the column name to the dictionary key
tempResultsCustom = []
for name, func in self._statsFunctions.items():
temp = groups.apply(func, self.coordCols, self.zCoord)
temp.name = name # The name of the column is now the dictionary key
tempResultsCustom.append(temp)
# Appends '_center' to the names of the coordinate columns
# and renames the series
newCoordCols = [col + self.centerName for col in self.coordCols]
nameMapping = dict(zip(self.coordCols, newCoordCols))
tempResultsCoM.rename(columns=nameMapping,
inplace=True)
tempResultsLength.name = 'number_of_localizations'
# Create the merged DataFrame
dataToJoin = [tempResultsCoM,
tempResultsLength]
dataToJoin = dataToJoin + tempResultsCustom
procdf = pd.concat(dataToJoin, axis=1)
# Convert the cluster_id index to a column
procdf.reset_index(level=['cluster_id'], inplace=True)
return procdf
def _radiusOfGyration(self, group, coordinates, zCoordinate):
"""Computes the radius of gyration of a grouped cluster.
Parameters
----------
group : Pandas GroupBy
The clustered localizations.
coordinates : list of str
The columns to use for performing the computation;
typically these containg the localization coordinates.
zCoordinate : str
The column title of the axial coordinate.
Returns
-------
Rg : float
The radius of gyration of the group of localizations.
Notes
-----
This currently only uses the transverse coordinates and
ignores the z-coordinate.
"""
variances = group[coordinates].var(ddof=0)
Rg = np.sqrt(variances.sum())
return Rg
def _eccentricity(self, group, coordinates, zCoordinate):
""" Computes the eccentricity of a grouped cluster.
Parameters
----------
group : Pandas GroupBy
The clustered localizations.
coordinates : list of str
The columns to use for performing the computation; typically these
containg the localization coordinates.
zCoordinate : str
The column title of the axial coordinate.
Returns
-------
ecc : float
The eccentricity of the group of localizations.
Notes
-----
This currently only uses the transverse coordinates and
ignores the z-coordinate.
"""
try:
# Compute the covariance matrix and its eigevalues
Mcov = np.cov(group[coordinates].as_matrix(),
rowvar=0,
bias=1)
eigs = np.linalg.eigvals(Mcov)
ecc = np.max(eigs) / min(eigs)
except:
print('Warning: Error occurred during eccentricity computation. '
'Returning NaN instead.')
ecc = np.nan
return ecc
def _convexHull(self, group, coordinates, zCoordinate):
"""Computes the volume of the cluster's complex hull.
Parameters
----------
group : Pandas GroupBy
The clustered localizations.
coordinates : list of str
The columns to use for performing the computation; typically these
containg the localization coordinates.
zCoordinate : str
The column title of the axial coordinate.
Returns
-------
volume : float or np.nan
Notes
-----
This currently only uses the transverse coordinates and
ignores the z-coordinate.
"""
# Compute CHull only if pyhull is installed
# pyhull is only available in Linux
try:
from pyhull import qconvex
except ImportError:
print(('Warning: pyhull is not installed. '
'Cannot compute convex hull. Returning NaN instead.'))
return np.nan
# Find output volume
try:
points = group[coordinates].as_matrix()
output = qconvex('FA', points)
volume = [vol for vol in output if 'Total volume:' in vol][0]
volume = float(re.findall(r'[-+]?[0-9]*\.?[0-9]+', volume)[0])
except:
print(('Warning: Error occurred during convhex hull computation. '
'Returning NaN instead.'))
volume = np.nan
return volume
class ComputeZPosition:
"""Computes the localizations' z-positions from calibration curves.
Parameters
----------
zFunc : func
Function(s) mapping the PSF widths onto Z. Supply this
argument as a tuple in the order (fx, fy).
zCol : str
The name to assign to the new column of z-positions.
coordCols : list of str
The x- and y-coordinate column names, in that order. This is only used
for the wobble correction if wobbleFunc is not None.
sigmaCols : list of str
The column names containing the PSF widths in the x- and y-directions,
respectively.
fittype : str
String indicating the type of fit to use when deriving the z-positions.
Can be either 'huang', which minimizes a distance-like objective
function, or 'diff', which interpolates a curve based on the difference
between PSF widths in x and y.
scalingFactor : float
A scaling factor that multiples the computed z-values to account for
a refractive index mismatch at the coverslip. See [1] for more details.
This can safely be left at one and the computed z-values rescaled later
if you are uncertain about the value to use.
wobbleFunc : func
Function(s) mapping the PSF centroids onto Z. Supply this
argument as a tuple in the order (fx, fy). See [2] for more details.
References
----------
1. Huang, et al., Science 319, 810-813 (2008)
2. Carlini, et al., PLoS One 10(11):e0142949 (2015).
"""
def __init__(self, zFunc, zCol='z', coordCols=['x', 'y'],
sigmaCols=['sigma_x, sigma_y'],
fittype='diff', scalingFactor=1, wobbleFunc = None):
self.zFunc = zFunc
self.zCol = zCol
self.coordCols = coordCols
self.sigmaCols = sigmaCols
self.fittype = fittype
self.scalingFactor = scalingFactor
self.wobbleFunc = wobbleFunc
# This is the calibration curve computed when fittype='diff' and is
# used internally for error checking and testing.
self._f = None
def __call__(self, df):
""" Applies zFunc to the localizations to produce the z-positions.
Parameters
----------
df : DataFrame
A Pandas DataFrame object.
Returns
-------
procdf : DataFrame
A DataFrame object with the same information but new column names.
"""
x, y = self.sigmaCols
fx, fy = self.zFunc
if self.fittype == 'diff':
procdf = self._diff(df, x, y, fx, fy)
elif self.fittype == 'huang':
procdf = self._huang(df, x, y, fx, fy)
procdf[self.zCol] *= self.scalingFactor
if self.wobbleFunc:
procdf = self._wobble(procdf)
return procdf
def _diff(self, df, x, y, fx, fy):
"""Determines the z-position from the difference in x- and y-widths.
In this approach, the two calibration curves are sampled, subtracted
from one another, and then reinterpolated to produce a function
representing the axial position as a function of the difference between
the PSF widths in x and y.
In general, it is much faster than the optimization routine used in
Huang et al., Science 2008.
"""
df = df.copy() # Prevents overwriting input DataFrame
# Get minimum and maximum z-positions contained in calibration curves.
# This is required to define the bounds on the sampling domain.
zMin, zMax = np.min([fx.x, fy.x]), np.max([fx.x, fy.x])
zSamples = np.linspace(zMin, zMax, num=150)
# Create the function representing the difference between calibration
# curves.
def dW(fx, fy):
return lambda z: fx(z) - fy(z)
f = interp1d(dW(fx, fy)(zSamples), zSamples, bounds_error=False,
fill_value=np.NaN, assume_sorted=False)
self._f = f
# Compute the z-positions from this interpolated curve
locWidths = df[x] - df[y]
z = f(locWidths)
df[self.zCol] = z
return df
def _huang(self, df, x, y, fx, fy):
"""Determines the z-position by objective minimization.
This routine can be very slow, especially for large datasets. It is
recommended to try it on a very slow dataset first.
"""
df = df.copy() # Prevents overwriting input DataFrame
# Create the objective function for the distance between the data and
# calibration curves.
def D(z, wx, wy):
return np.sqrt((wx**0.5 - fx(z)**0.5)**2 + \
(wy**0.5 - fy(z)**0.5)**2)
# Create the objective function to minimize
def fmin(wx, wy):
res = minimize(lambda z: D(z, wx, wy), [0], bounds=[(-600,600)])
return res.x[0]
df[self.zCol] = df.apply(lambda row: fmin(row[x], row[y]), axis=1)
return df
def _wobble(self, df):
"""Corrects localizations for wobble.
This function takes a DataFrame of localizations whose z-positions were
already computed and determines the x- and y-corrections necessary to
correct for an axial dependence of the centriod position. It then
applies these corrections and returns the processed DataFrame.
Parameters
----------
df : DataFrame
A Pandas DataFrame containing the localizations.
Returns
-------
df : DataFrame
The wobble-corrected DataFrame.
"""
df = df.copy() # Prevents overwriting input DataFrame
x, y = self.coordCols
zLocs = df[self.zCol]
fx, fy = self.wobbleFunc
xc = fx(zLocs)
yc = fy(zLocs)
df['dx'] = xc
df['dy'] = yc
df[x] = df[x] - xc
df[y] = df[y] - yc
return df
class ConvertHeader:
"""Converts the column names in a localization file to a different format.
Parameters
----------
mapping : FormatMap
A two-way dictionary for converting from column name to another.
Attributes
----------
mapping : FormatMap
A two-way dictionary for converting from column name to another.
"""
def __init__(self, mapping=FormatMap(config.__Format_Default__)):
"""Determines whether the file is a single file or a directory tree.
Parameters
----------
mapping : FormatMap
A dict-like object for converting between column names in
different data formats.
"""
self.mapping = mapping
def __call__(self, df):
"""Convert the files to the new header format.
When this class is called, it maps the column names from the input
format to the output format. Formats are defined independently of this
class.
Parameters
----------
df : DataFrame
A Pandas DataFrame object.
Returns
-------
procdf : DataFrame
A DataFrame object with the same information but new column names.
"""
procdf = df
# Change the column names
colNames = [self.mapping[oldName] for oldName in df.columns]
procdf.columns = colNames
return procdf
class DefaultAstigmatismComputer(ComputeTrajectories):
"""Default algorithm for computing astigmatic calibration curves.
Parameters
----------
coordCols : list of str
List of strings identifying the x- and y-coordinate column names
in that order.
sigmaCols : list of str
List of strings identifying the column names containing the PSF widths
in x and y.
zCol : str
Name of the column identifying the z-coordinate values.
smoothingWindowSize : float
Moving average window size in slices for spline fitting.
smoothingFilterSize : float
Moving average Gaussian kernel width in slices for spline fitting.
useTrajectories : list of int or empty list
List of integers corresponding to the fiducial trajectories to use
when computing the average trajectory. If [], all trajectories
are used.
startz : float
The start point of the z-fitting range.
stopz : float
The end point of the z-fitting range.
zeroz : None or float
The z-position corresponding to the focal plane. This is used only for
the calculation of the wobble curves and NOT the astigmatism curves.
Set to None if this computer is intended to compute astigmatism curves;
set to a number to compute wobble curves.
"""
def __init__(self, coordCols=['x','y'], sigmaCols=['sigma_x', 'sigma_y'],
zCol='z', smoothingWindowSize=20, smoothingFilterSize=3,
useTrajectories=[], startz=None, stopz=None, zeroz = None):
self.coordCols = coordCols
self.sigmaCols = sigmaCols
self.zCol = zCol
self.smoothingWindowSize = smoothingWindowSize
self.smoothingFilterSize = smoothingFilterSize
self.useTrajectories = useTrajectories
self.startz = startz
self.stopz = stopz
self.zeroz = zeroz
super(ComputeTrajectories, self).__init__()
# Column of DataFrame used to indicate what localizations are not
# included in a trajectory for a spline fit, e.g. outliers
self._includeColName = 'included_in_trajectory'
# initial state
self._init_coordCols = coordCols.copy()
self._init_sigmaCols = sigmaCols.copy()
self._init_zCol = zCol
self._init_smoothingWindowSize = smoothingWindowSize
self._init_smoothingFilterSize = smoothingFilterSize
self._init_useTrajectories = useTrajectories.copy()
self._init_startz = startz
self._init_stopz = stopz
self._init_zeroz = zeroz
def combineCurves(self, startz, stopz):
"""Average the splines from different fiducials together.
Parameters
----------
startz : float
Minimum frame number in full dataset
stopz : float
Maximum frame number in full dataset
"""
zPos = np.linspace(startz, stopz, num=100)
numSplines = len(self.splines)
# Evalute each x and y spline at every frame position
fullRangeSplines = {'xS': np.array([self.splines[i]['xS'](zPos)
for i in range(numSplines)]),
'yS': np.array([self.splines[i]['yS'](zPos)
for i in range(numSplines)])}
# Create the mask area if only certain fiducials are to be averaged
if not self.useTrajectories:
mask = np.arange(numSplines)
else:
mask = self.useTrajectories
# Compute the average over spline values
avgSpline = {'xS': [], 'yS': []}
try:
for key in avgSpline.keys():
avgSpline[key] = np.mean(fullRangeSplines[key][mask], axis=0)
except IndexError:
raise UseTrajectoryError(
'At least one of the indexes inside '
'useTrajectories does not match a known fiducial '
'index. The maximum fiducial index is {0:d}.'
''.format(
numSplines - 1))
# Append z positions to avgSpline
avgSpline['z'] = zPos
self.avgSpline = pd.DataFrame(avgSpline)
def computeTrajectory(self, locs):
"""Computes the final drift trajectory from fiducial localizations.
Parameters
----------
locs : Pandas DataFrame
DataFrame containing the localizations belonging to beads.
Returns
-------
avgSpline : Pandas DataFrame
A dataframe containing z-positions and PSF widths in x- and y- for
calibrating an astigmatic imaging measurement.
"""
z = self.zCol
if self.startz:
startz = self.startz
else:
startz = locs[z].min()
if self.stopz:
stopz = self.stopz
else:
stopz = locs[z].max()
self.clearRegionLocs()
self.regionLocs = locs
self._removeOutliers(startz, stopz)
self.fitCurves()
self.combineCurves(startz, stopz)
return self.avgSpline
def _computeOffsets(self, locs):
"""Compute the offsets for bead trajectories to align curves at z=0.
Parameters
----------
locs : Pandas DataFrame
Localizations from a single bead region.
Returns
-------
x0, y0 : tuple of int
The offsets to subtract from the localizations belonging to a
bead.
"""
avgOffset = 10
x, y = self.coordCols[0], self.coordCols[1]
startFrame, stopFrame = locs[self.zCol].min(), \
locs[self.zCol].max()
# Convert None's to infinity for comparison
if self.startz == None:
startz = -np.inf
else:
startz = self.startz
if self.stopz == None:
stopz = np.inf
else:
stopz = self.stopz
if self.zeroz > stopz or self.zeroz < startz:
warnings.warn(('Warning: zeroz ({0:d}) is outside the '
'allowable range of frame numbers in this dataset '
'({1:d} - {2:d}). Try a different zeroz value.'
''.format(self.zeroz, startFrame + avgOffset,
stopFrame - avgOffset)))
# Average the localizations around the zeroz value
x0 = locs[(locs[self.zCol] > self.zeroz - avgOffset)
& (locs[self.zCol] < self.zeroz + avgOffset)][x].mean()
y0 = locs[(locs[self.zCol] > self.zeroz - avgOffset)
& (locs[self.zCol] < self.zeroz + avgOffset)][y].mean()
if (x0 is np.nan) or (y0 is np.nan):
warnings.warn('Could not determine an offset value; '
'setting offsets to zero.')
x0, y0 = 0, 0
return x0, y0
def fitCurves(self):
"""Fits individual splines to each z-scan.
"""
print('Performing spline fits...')
# Check whether trajectories already exist
if self.regionLocs is None:
raise ZeroRegions('Zero regions containing beads are currently '
'saved with this processor.')
self.splines = []
regionIDIndex = self.regionLocs.index.names.index('region_id')
x = self.sigmaCols[0]
y = self.sigmaCols[1]
z = self.zCol
# rid is an integer
for rid in self.regionLocs.index.levels[regionIDIndex]:
# Get localizations from inside the current region matching rid
# and that passed the _removeOutliers() step
currRegionLocs = self.regionLocs.xs(
rid, level='region_id', drop_level=False)
# Use only those fiducials within a certain radius of the
# cluster of localization's center of mass
currRegionLocs = currRegionLocs.loc[
currRegionLocs[self._includeColName]]
windowSize = self.smoothingWindowSize
sigma = self.smoothingFilterSize
# Shift the localization(s) at zeroz to (x = 0, y = 0) by
# subtracting its value at frame number zeroFrame
if self.zeroz is not None:
x0, y0 = self._computeOffsets(currRegionLocs)
else:
x0, y0 = 0, 0
# Determine the appropriate weighting factors
_, varx = self._movingAverage(currRegionLocs[x] - x0,
windowSize=windowSize,
sigma=sigma)
_, vary = self._movingAverage(currRegionLocs[y] - y0,
windowSize=windowSize,
sigma=sigma)
# Perform spline fits. Extrapolate using boundary values (const)
extrapMethod = 'extrapolate'
xSpline = UnivariateSpline(currRegionLocs[z].as_matrix(),
currRegionLocs[x].as_matrix() - x0,
w=1 / np.sqrt(varx),
ext=extrapMethod)
ySpline = UnivariateSpline(currRegionLocs[z].as_matrix(),
currRegionLocs[y].as_matrix() - y0,
w=1 / np.sqrt(vary),
ext=extrapMethod)
# Append results to class field splines
self.splines.append({'xS': xSpline,
'yS': ySpline})
def plotBeads(self, curveNumber=None):
"""Make a plot of each bead's z-stack and the average spline fit.
plotBeads allows the user to check the individual beads and
their fits against the average spline fit.
Parameters
----------
curveNumber : int
Index of the spline to plot. (0-index)
"""
coordCols = [
self.zCol,
self.sigmaCols[0],
self.sigmaCols[1]
]
horizontalLabels = ['', 'z-position']
verticalLabels = ['x', 'y']
title = 'Avg. spline and bead'
splineCols = ['z', 'xS', 'yS']
# Set the y-axis based on the average spline
minxy, maxxy = self.avgSpline['xS'].min(), self.avgSpline['xS'].max()
minyy, maxyy = self.avgSpline['yS'].min(), self.avgSpline['yS'].max()
minxy -= 50
maxxy += 50
minyy -= 50
maxyy += 50
ylims = [minxy, maxxy, minyy, maxyy]
if curveNumber is None:
# Plot all trajectories and splines
startIndex = 0
stopIndex = len(self.splines)
else:
# Plot only the input trajectory and spline
startIndex = curveNumber
stopIndex = curveNumber + 1
offsets=[0,0] # No offsets for these plots
for fid in range(startIndex, stopIndex):
locs = self.regionLocs.xs(fid, level='region_id', drop_level=False)
locs = locs.loc[locs[self._includeColName]]
if self.zeroz is not None:
offsets = self._computeOffsets(locs)
else:
offsets = (0, 0)
self._plotCurves(fid, coordCols, horizontalLabels,
verticalLabels, title, splineCols,
offsets, ylims)
def _removeOutliers(self, startz, stopz):
"""
Removes localizations lying outside the z-fitting range.
Parameters
----------
startz : float
stopz : float
"""
z = self.zCol
self.regionLocs[self._includeColName] = True
self.regionLocs.loc[
(self.regionLocs[z] < startz) | (self.regionLocs[z] > stopz),
self._includeColName
] = False
def reset(self):
"""Resets the astigmatism computer to its initial state.
"""
self.coordCols = self._init_coordCols.copy()
self.sigmaCols = self._init_sigmaCols.copy()
self.zCol = self._init_zCol
self.smoothingWindowSize = self._init_smoothingWindowSize
self.smoothingFilterSize = self._init_smoothingFilterSize
self.useTrajectories = self._init_useTrajectories.copy()
self.startz = self._init_startz
self.stopz = self._init_stopz
self.zeroz = self._init_zeroz
class DefaultDriftComputer(ComputeTrajectories):
"""The default algorithm for computing a drift trajectory.
The default drift computer fits a cubic smoothing spline to
localizations from fiducial regions and averages the splines from multiple
fiducials. It allows users to set the frame where the trajectories are
equal to zero in x and y, to adjust the smoothing window parameters, and
to select what trajectories are used to compute the final trajectory that
is stored inside the avgSpline attribute.
Parameters
----------
coordCols : list str
List of strings identifying the x- and y-coordinate column names
in that order.
frameCol : str
Name of the column identifying the column containing the frames.
maxRadius : float
The maximum distance that a localization may lie from the center of
a cluster of fiducial localizations; localizations farther than this
distance are not included in the fit. Set to None to include all
fiducials. Units are the same as in coordCols.
smoothingWindowSize : float
Moving average window size in frames for spline fitting.
smoothingFilterSize : float
Moving average Gaussian kernel width in frames for spline fitting.
useTrajectories : list of int
List of integers corresponding to the fiducial trajectories to use
when computing the average trajectory. If empty, all trajectories
are used.
zeroFrame : int
Frame where all individual drift trajectories are equal to zero.
This may be adjusted to help correct fiducial trajectories that
don't overlap well near the beginning.
Attributes
----------
avgSpline : Pandas DataFrame
DataFrame with 'frame' index column and 'xS' and 'yS' position
coordinate columns representing the drift of the sample during the
acquisition.
coordCols : list str
List of strings identifying the x- and y-coordinate column names
in that order.
regionLocs : Pandas DataFrame
DataFrame with a 'region_id' column denoting localizations from
different regions of the original dataset. This is created by the
parent ComputeTrajectories class.
frameCol : str
Name of the column identifying the column containing the frames.
maxRadius : float
The maximum distance that a localization may lie from the center of
a cluster of fiducial localizations; localizations farther than this
distance are not included in the fit. Set to None to include all
fiducials. Units are the same as in coordCols.
smoothingWindowSize : float
Moving average window size in frames for spline fitting.
smoothingFilterSize : float
Moving average Gaussian kernel width in frames for spline fitting.
splines : list of dict of 2x UnivariateSpline, 2x int
Individual splines fit to the fiducial trajectories. Key names are
'xS', 'yS', 'minFrame', and 'maxFrame'.
useTrajectories : list of int or empty list
List of integers corresponding to the fiducial trajectories to use
when computing the average trajectory. If [], all trajectories
are used.
zeroFrame : int
Frame where all individual drift trajectories are equal to zero.
This may be adjusted to help correct fiducial trajectories that
don't overlap well near the beginning.
"""
def __init__(self, coordCols=['x', 'y'], frameCol='frame',
maxRadius=None, smoothingWindowSize=600,
smoothingFilterSize=400, useTrajectories=[],
zeroFrame=1000):
self.coordCols = coordCols
self.frameCol = frameCol
self.maxRadius = maxRadius
self.smoothingWindowSize = smoothingWindowSize
self.smoothingFilterSize = smoothingFilterSize
self.useTrajectories = useTrajectories
self.zeroFrame = zeroFrame
super(ComputeTrajectories, self).__init__()
# Column of DataFrame used to indicate what localizations are not
# included in a trajectory for a spline fit, e.g. outliers
self._includeColName = 'included_in_trajectory'
# initial state
self._init_coordCols = coordCols.copy()
self._init_frameCol = frameCol
self._init_maxRadius = maxRadius
self._init_smoothingWindowSize = smoothingWindowSize
self._init_smoothingFilterSize = smoothingFilterSize
self._init_useTrajectories = useTrajectories.copy()
self._init_zeroFrame = zeroFrame
def combineCurves(self, startFrame, stopFrame):
"""Average the splines from different fiducials together.
combineSplines(self, framesdf) relies on the assumption that fiducial
trajectories span a significant portion of the full number of frames in
the acquisition. Under this assumption, it uses the splines found in
fitSplines() to extrapolate values outside of their tracks using the
boundary value. It next evaluates the splines at each frame spanning
the input DataFrame, shifts the evaluated splines to zero at the first
frame, and then computes the average across different fiducials.
Parameters
----------
startFrame : int
Minimum frame number in full dataset
stopFrame : int
Maximum frame number in full dataset
"""
# Build list of evaluated splines between the absolute max and
# min frames.
frames = np.arange(startFrame, stopFrame + 1, 1)
numSplines = len(self.splines)
# Evalute each x and y spline at every frame position
fullRangeSplines = {'xS': np.array([self.splines[i]['xS'](frames)
for i in range(numSplines)]),
'yS': np.array([self.splines[i]['yS'](frames)
for i in range(numSplines)])}
# Create the mask area if only certain fiducials are to be averaged
if not self.useTrajectories:
mask = np.arange(numSplines)
else:
mask = self.useTrajectories
# Compute the average over spline values
avgSpline = {'xS': [], 'yS': []}
try:
for key in avgSpline.keys():
avgSpline[key] = np.mean(fullRangeSplines[key][mask], axis=0)
except IndexError:
raise UseTrajectoryError(
'At least one of the indexes inside '
'useTrajectories does not match a known fiducial '
'index. The maximum fiducial index is {0:d}.'
''.format(
numSplines - 1))
# Append frames to avgSpline
avgSpline['frame'] = frames
self.avgSpline = pd.DataFrame(avgSpline)
def computeTrajectory(self, regionLocs, startFrame, stopFrame):
"""Computes the final drift trajectory from fiducial localizations.
Parameters
----------
regionLocs : Pandas DataFrame
DataFrame containing the localizations belonging to fiducials.
startFrame : int
The minimum frame number in the full dataset.
stopFrame : int
The maximum frame number in the full dataset.
Returns
-------
self.avgSpline : Pandas DataFrame
DataFrame with 'frame' index column and 'xS' and 'yS' position
coordinate columns representing the drift of the sample during the
acquisition.
Notes
-----
computeTrajectory() requires the start and stop frames
because the fiducial localizations may not span the full range
of frames in the dataset.
"""
self.clearRegionLocs()
self.regionLocs = regionLocs
self._removeOutliers()
self.fitCurves()
self.combineCurves(startFrame, stopFrame)
return self.avgSpline
def _computeOffsets(self, locs):
"""Compute the offsets for fiducial trajectories based on zeroFrame.
Parameters
----------
locs : Pandas DataFrame
Localizations from a single fiducial region.
Returns
-------
x0, y0 : tuple of int
The offsets to subtract from the localizations belonging to a
fiducial.
"""
avgOffset = 50
x, y = self.coordCols[0], self.coordCols[1]
startFrame, stopFrame = locs[self.frameCol].min(), \
locs[self.frameCol].max()
if self.zeroFrame > stopFrame or self.zeroFrame < startFrame:
warnings.warn(('Warning: zeroFrame ({0:d}) is outside the '
'allowable range of frame numbers in this dataset '
'({1:d} - {2:d}). Try a different zeroFrame value'
'by adjusting driftComputer.zeroFrame.'
''.format(self.zeroFrame, startFrame + avgOffset,
stopFrame - avgOffset)))
# Average the localizations around the zeroFrame value
x0 = locs[(locs[self.frameCol] > self.zeroFrame - avgOffset)
& (locs[self.frameCol] < self.zeroFrame + avgOffset)][x].mean()
y0 = locs[(locs[self.frameCol] > self.zeroFrame - avgOffset)
& (locs[self.frameCol] < self.zeroFrame + avgOffset)][y].mean()
if (x0 is np.nan) or (y0 is np.nan):
warnings.warn('Could not determine an offset value; '
'setting offsets to zero.')
x0, y0 = 0, 0
return x0, y0
def fitCurves(self):
"""Fits individual splines to each fiducial.
"""
print('Performing spline fits...')
# Check whether fiducial trajectories already exist
if self.regionLocs is None:
raise ZeroRegions('Zero fiducials are currently saved '
'with this processor.')
self.splines = []
regionIDIndex = self.regionLocs.index.names.index('region_id')
x = self.coordCols[0]
y = self.coordCols[1]
frameID = self.frameCol
# fid is an integer
for fid in self.regionLocs.index.levels[regionIDIndex]:
# Get localizations from inside the current region matching fid
# and that passed the _removeOutliers() step
currRegionLocs = self.regionLocs.xs(
fid, level='region_id', drop_level=False)
# Use only those fiducials within a certain radius of the
# cluster of localization's center of mass
currRegionLocs = currRegionLocs.loc[
currRegionLocs[self._includeColName]]
maxFrame = currRegionLocs[frameID].max()
minFrame = currRegionLocs[frameID].min()
windowSize = self.smoothingWindowSize
sigma = self.smoothingFilterSize
# Shift the localization(s) at zeroFrame to (x = 0, y = 0) by
# subtracting its value at frame number zeroFrame
x0, y0 = self._computeOffsets(currRegionLocs)
# Determine the appropriate weighting factors
_, varx = self._movingAverage(currRegionLocs[x] - x0,
windowSize=windowSize,
sigma=sigma)
_, vary = self._movingAverage(currRegionLocs[y] - y0,
windowSize=windowSize,
sigma=sigma)
# Perform spline fits. Extrapolate using boundary values (const)
extrapMethod = 'const'
xSpline = UnivariateSpline(currRegionLocs[frameID].as_matrix(),
currRegionLocs[x].as_matrix() - x0,
w=1 / np.sqrt(varx),
ext=extrapMethod)
ySpline = UnivariateSpline(currRegionLocs[frameID].as_matrix(),
currRegionLocs[y].as_matrix() - y0,
w=1 / np.sqrt(vary),
ext=extrapMethod)
# Append results to class field splines
self.splines.append({'xS': xSpline,
'yS': ySpline,
'minFrame': minFrame,
'maxFrame': maxFrame})
def plotFiducials(self, curveNumber=None):
"""Make a plot of each fiducial track and the average spline fit.
plotFiducials allows the user to check the individual fiducial tracks
against the average spline fit.
Parameters
----------
curveNumber : int
Index of the spline to plot. (0-index)
"""
coordCols = [
self.frameCol,
self.coordCols[0],
self.coordCols[1]
]
horizontalLabels = ['', 'Frame number']
verticalLabels = ['x-position', 'y-position']
title = 'Avg. spline and fiducial'
splineCols = ['frame', 'xS', 'yS']
# Set the y-axis based on the average spline
minxy, maxxy = self.avgSpline['xS'].min(), self.avgSpline['xS'].max()
minyy, maxyy = self.avgSpline['yS'].min(), self.avgSpline['yS'].max()
minxy -= 50
maxxy += 50
minyy -= 50
maxyy += 50
ylims = [minxy, maxxy, minyy, maxyy]
if curveNumber is None:
# Plot all trajectories and splines
startIndex = 0
stopIndex = len(self.splines)
else:
# Plot only the input trajectory and spline
startIndex = curveNumber
stopIndex = curveNumber + 1
for fid in range(startIndex, stopIndex):
locs = self.regionLocs.xs(fid, level='region_id', drop_level=False)
# Filter out localizations that are outliers and find
# offsets
locs = locs.loc[locs[self._includeColName]]
offsets = self._computeOffsets(locs)
self._plotCurves(fid, coordCols, horizontalLabels,
verticalLabels, title, splineCols,
offsets, ylims)
def _removeOutliers(self):
"""Removes outlier localizations from fiducial tracks before fitting.
_removeOutliers() computes the center of mass of each cluster of
localizations belonging to a fiducial and then removes localizations
lying farther than self.maxRadius from this center.
"""
x = self.coordCols[0]
y = self.coordCols[1]
self.regionLocs[self._includeColName] = True
maxRadius = self.maxRadius
if not maxRadius:
return
# Change the region_id from an index to a normal column
self.regionLocs.reset_index(level='region_id', inplace=True)
groups = self.regionLocs.groupby('region_id')
temp = []
for _, group in groups:
# Make a copy to avoid the warning about modifying slices
group = group.copy()
# Subtract the center of mass and filter by distances
xc, yc = group.loc[:, [x, y]].mean()
dfc = pd.concat(
[group[x] - xc, group[y] - yc], axis=1)
distFilter = dfc[x]**2 + dfc[y]**2 > maxRadius**2
group.loc[distFilter, self._includeColName] = False
temp.append(group)
# Aggregate the filtered groups, reset the index, then recreate
# self.regionLocs with the filtered localizations
temp = | pd.concat(temp) | pandas.concat |
'''
Model training with the entire training data
'''
# Libraries
import pandas as pd
import numpy as np
import keras
import tensorflow as tf
from keras.models import Model
from tensorflow.keras.models import load_model
import keras.backend as K
from keras import optimizers
from keras.layers import Dense, Dropout, BatchNormalization, Conv1D, Flatten, Input, GaussianNoise, LeakyReLU, Add
from keras.utils import to_categorical, np_utils
from keras.regularizers import l2
from sklearn.model_selection import train_test_split, KFold, cross_val_score, StratifiedKFold
from sklearn.preprocessing import StandardScaler, LabelEncoder, normalize
from sklearn.utils import shuffle
from sklearn.metrics import confusion_matrix, accuracy_score, f1_score
import matplotlib.pyplot as plt
import pickle
from keras import regularizers
from keras import backend as K
from sklearn.utils import class_weight
# GPU
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import InteractiveSession
tf.keras.backend.clear_session()
config = ConfigProto()
config.gpu_options.allow_growth = True
gpu_options = tf.compat.v1.GPUOptions(per_process_gpu_memory_fraction=0.333)
sess = tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(gpu_options=gpu_options))
LIMIT = 3 * 1024
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
tf.config.experimental.set_virtual_device_configuration(
gpus[0],
[tf.config.experimental.VirtualDeviceConfiguration(memory_limit=LIMIT)])
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
except RuntimeError as e:
# Virtual devices must be set before GPUs have been initialized
print(e)
# dataset import and preprocessing
ds = pd.read_csv("../dataset/dataset.csv")
X1 = ds.iloc[:,5:6] # (BF)
X1 = pd.DataFrame(X1)
X2 = ds.iloc[:,6:7] # rHpy
X = pd.concat([X1, X2], axis=1)
y = ds.iloc[:,7]
y_w = y
# Seed
seed = 1337
np.random.seed(1337)
# Features
# Secondary Structure Folds
infile = open('../features/SSF/feature/NF1_7.pickle','rb')
nf1_9 = pickle.load(infile)
infile.close()
# # Amino Acid Signatures in the Interaction Shells
infile = open('../features/AASIS/feature/NF2_8.pickle','rb')
nf2_8 = pickle.load(infile)
infile.close()
infile = open('../features/AASIS/feature/NF2_7.pickle','rb')
nf2_7 = pickle.load(infile)
infile.close()
infile = open('../features/AASIS/feature/NF2_6.pickle','rb')
nf2_6 = pickle.load(infile)
infile.close()
infile = open('../features/AASIS/feature/NF2_5.pickle','rb')
nf2_5 = pickle.load(infile)
infile.close()
# # Enzyme Class
infile = open('../features/EC/feature/NF3_le.pickle','rb')
nf3 = pickle.load(infile)
infile.close()
# # Motifs
infile = open('../features/Motifs/feature/NF4_13.pickle','rb')
nf4_13 = pickle.load(infile)
infile.close()
infile = open('../features/Motifs/feature/NF4_11.pickle','rb')
nf4_11 = pickle.load(infile)
infile.close()
infile = open('../features/Motifs/feature/NF4_9.pickle','rb')
nf4_9 = pickle.load(infile)
infile.close()
infile = open('../features/Motifs/feature/NF4_7.pickle','rb')
nf4_7 = pickle.load(infile)
infile.close()
infile = open('../features/Motifs/feature/NF4_5.pickle','rb')
nf4_5 = pickle.load(infile)
infile.close()
infile = open('../features/Motifs/feature/NF4_3.pickle','rb')
nf4_3 = pickle.load(infile)
infile.close()
# Feature Selection
nf1_9 = pd.DataFrame(nf1_9)
nf2_8 = pd.DataFrame(nf2_8)
nf2_7 = pd.DataFrame(nf2_7)
nf2_6 = pd.DataFrame(nf2_6)
nf2_5 = pd.DataFrame(nf2_5)
nf3 = pd.DataFrame(nf3)
nf4_3 = pd.DataFrame(nf4_3)
nf4_5 = pd.DataFrame(nf4_5)
nf4_7 = pd.DataFrame(nf4_7)
nf4_9 = pd.DataFrame(nf4_9)
nf4_11 = pd.DataFrame(nf4_11)
nf4_13 = | pd.DataFrame(nf4_13) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 7 12:11:33 2020
@author: Andrew
"""
import numpy as np
import pandas as pd
import src.features as features
import json
import os
#### methods for reading in or creating parameter files in params/
def read_ignoring_comments(filepath):
"""read in a file and return a list of lines not starting with '#'"""
with open(filepath) as f:
contents = f.read()
contents = contents.split('\n')
# filter out any blank lines or lines that are comments
contents = [c for c in contents if c != '' and c[0] != '#']
return contents
def get_gestures(version=2, path = 'params/'):
"""fetches gestures, and dictionaries mapping between integers and gestures"""
gestures = read_ignoring_comments(f'{path}gesturesV{version}.txt')
# get gesture to id dictionary
g2idx = {g: i for i, g in enumerate(gestures)}
# get id to gesture dictionary
idx2g = {i: g for i, g in enumerate(gestures)}
return gestures, g2idx, idx2g
def get_VoI(path = 'params/'):
"""fetches variables of interest from params/VoI.txt"""
VoI = read_ignoring_comments(f'{path}VoI.txt')
return VoI
def get_VoI_drop(path = 'params/'):
"""fetches variables to drop at prediction time from params/VoI_drop.txt"""
VoI_drop = read_ignoring_comments(f'{path}VoI_drop.txt')
return VoI_drop
def get_derived_feature_dict(path = 'params/'):
"""fetches two lists, one each for one and two handed features to derive"""
feature_dict = {}
feature_dict['one_handed'] = read_ignoring_comments(f'{path}derived_features_one_handed.txt')
feature_dict['two_handed'] = read_ignoring_comments(f'{path}derived_features_two_handed.txt')
return feature_dict
def create_dicts(df):
"""generates dictionaries for mean and std of a pandas df's columns, saving them to params/"""
with open('params/means_dict.json', 'w') as f:
json.dump(df.mean().to_dict(), f)
with open('params/stds_dict.json', 'w') as f:
json.dump(df.std().to_dict(), f)
#### methods for the different steps in getting training examples from a CSV file
def CSV2VoI(raw_file='data/recordings/fist_test.csv', VoI_file='params/VoI.txt', target_fps=25):
"""Turns a csv file of raw leap data into a pandas df containing gesture + variables of interest
Attributes:
raw_file -- str, giving the path/name of the leap motion data
VoI_file -- str, giving the path/name of the txt file, with a variable of interest for each line
target_fps -- int, output fps. Every nth frame is taken to acheive this, n is calculated using average fps of file
Note:
The VoI txt file shouldn't reference handedness for each of its chosen variables, or contain any
variables that won't work with 'left_' or 'right_' appended to the start of them.
Assumes that 'gesture' is a column name, indicating the gesture being performed.
The error thrown when VoI contains an invalid name does not specify which name is invalid. This is annoying!
"""
# get the raw leap data from a csv file
with open(raw_file, 'r') as f:
raw = pd.read_csv(f)
# get the variables of interest
VoI = get_VoI()
# get the average frame rate of the file
mean_fps = raw['currentFrameRate'].mean()
# get number of frames to skip
skip = round(mean_fps / target_fps)
if skip == 0:
print('WARNING: Average file frame rate is less that half the target frame rate. Taking every frame.')
skip = 1
# replace raw df with skipped frame version
raw = raw.iloc[::skip,:]
print(f'mean fps: {mean_fps:.2f}')
print(f'target fps: {target_fps}')
print(f'taking every {skip} frames')
### get df with VoI only
# make list of variables to extract
VoI_list = ['gesture']
# check there is data for the left hand, before adding left hand variables to VoI_list
left, right = False, False
if len(raw.filter(regex='left').columns) != 0:
left = True
VoI_list += ['left_' + v for v in VoI]
fraction_active = 1 - sum(raw['left_' + VoI[0]].isna()) / len(raw)
print(f'{fraction_active*100:.2f}% of rows contain valid LH data')
# likewise, for right
if len(raw.filter(regex='right').columns) != 0:
right = True
VoI_list += ['right_' + v for v in VoI]
fraction_active = 1 - sum(raw['right_' + VoI[0]].isna()) / len(raw)
print(f'{fraction_active*100:.2f}% of rows contain valid RH data')
df = raw[::][VoI_list]
print('Found left hand data: ', left)
print('Found right hand data: ', right)
df.reset_index(inplace=True)
return df
def df2X_y(df, g2idx = {'no_gesture': 0, 'so_so': 1, 'open_close': 2, 'maybe': 3}, hands=['right', 'left'],
derive_features=True, standardize=True, dicts_gen=False, mirror=False):
"""Extracts X and y from pandas data frame, drops nan rows, and normalizes variables
Arguments:
df -- a dataframe of leap motion capture data
g2idx -- dict mapping gesture names to integers
hands -- list of hands to keep columns for
derive_features -- bool, indicates whether or not to derive more features
standardize -- bool, indicates whether or not to standardize and center variables
create_dicts -- if true, new standard deviations and means dictionary are generated from the df, and saved to params/.
needed if new features have been added to the model.
mirror -- if true, flips data so that left hand becomes right hand and vice versa
Returns:
df.values -- np array of shape (time steps, features), predictors for every time step
y -- np array of shape (time steps), with an int label for every time step
Note:
Purging na rows is a bit clumsy, it results in sudden time jumps in the input.
Ideally a single training example shouldn't contain such a jump. In any case, this is likely rare.
"""
# drop columns for other hand, drop na rows
len_with_na = len(df)
# filter to gesture + variables for hands of interest
df = df.filter(regex='|'.join(hands + ['gesture']))
# filter out any rows that have a gesture not in g2idx
allowable_gestures = [g in g2idx.keys() for g in df['gesture']]
if False in allowable_gestures:
n_rows = len(allowable_gestures)
invalid_rows = n_rows - sum(allowable_gestures)
print(f'Warning: {invalid_rows} of {n_rows} rows contain gestures not in g2idx')
df = df[allowable_gestures]
if len(hands) == 1:
# if we are only interested in one hand, then at this point the df will only contain cols for that hand
# if the other hand was active while the hand of interest wasn't, this will leave NA rows
df.dropna(inplace=True)
else:
# if both hands are required, then we replace nans with the last observed valid value
df.fillna(method='ffill', inplace=True)
# the first rows might contain nans - remove these, then reset index
df.dropna(inplace=True)
df.reset_index(inplace=True, drop=True)
# make sure that data for both hands is present in the dataframe
assert df.filter(regex='left').shape[1] > 0 and df.filter(regex='right').shape[1] > 0, 'Dataframe contains columns for only one hand, but data for both is requested'
print(f'dealt with {len_with_na - len(df)} of {len_with_na} rows with nans')
# warn if mirror=True but only one hand is needed for data, and set mirror to False
# in future, this could be restructured so that mirroring occurs before data for other hand is dropped, so that mirroring can be used one-handed
if mirror and len(hands) == 1:
print('WARNING: length of hands is one, but mirror=True will result in one handed data being mirrored to opposite hand. mirror set to False.')
mirror = False
if mirror:
df = mirror_data(df)
print('Data successfully mirrored')
# at this point, we may wish to derive some more features, and drop some of the original VoI
if derive_features:
derived_feature_dict = get_derived_feature_dict()
df = pd.concat([df, pd.DataFrame.from_records(df.apply(features.get_derived_features,args=(derived_feature_dict,),axis=1))], axis=1)
for hand in hands:
for VoI in get_VoI_drop():
df.drop(hand + '_' + VoI, axis=1, inplace=True)
# extract the gesture label after dealing with nans
y = [g2idx[i] for i in df['gesture']]
df = df.drop(columns=['gesture'])
# # use create_dicts here if new derived variables have been created
if dicts_gen:
create_dicts(df)
# perform mean normalization and scaling for unit variance
if standardize:
# use the dictionaries of means and stds for each variable
with open('params/means_dict.json', 'r') as f:
means_dict = json.load(f)
for col in df.columns:
df[col] = df[col] - means_dict[col]
with open('params/stds_dict.json', 'r') as f:
stds_dict = json.load(f)
for col in df.columns:
df[col] = df[col] / stds_dict[col]
# get range for each variable, to check normalization:
# print(df.min(), df.max())
# make sure that columns are in alphabetical order, so that model training and deployment accord with one another
df = df.reindex(sorted(df.columns), axis=1)
return df.values, np.array(y)
def X_y2examples(X,y=[],n_frames=30, stride=None):
"""splits a contiguous list of frames and labels up into single gesture examples of length n_frames
Arguments:
X -- features to be split up
y -- labels for each frame
n_frames -- int, length of each training example
stride -- int, determines how far to move along the sliding window that takes training examples, defaults to n_frames // 2
Returns:
X_final -- np array of shape (examples, frames, features)
y_final -- np array of shape (examples), using integers to indicate gestures. I.e. not one hot.
Note:
A sliding window is used to take training examples, with stride equal to half of frame size
Any trailing frames not long enough for a complete example will be dropped
"""
# # simple test case
# X = [[0,1],[0,2],[0,3],[0,4],[0,5],[0,6],[0,7],[0,8],[1,1],[1,2],[1,3],[1,4],[1,5],[3,1],[3,2],[3,3],[3,4],[3,5],[3,6],[3,7],[4,1],[4,2]]
# y = [0,0,0,0,0,0,0,0,1,1,1,1,1,3,3,3,3,3,3,3,4,4]
# if there are no y labels, then just return X split up into n_frames length examples
if len(y) == 0:
return np.array([X[i:i+n_frames] for i in range(0, len(X) - len(X) % n_frames, n_frames)])
if stride == None:
stride = n_frames // 2
#### part 1: get the start and end indices of each gesture
# each sublist contains two indices, for start and end
Xsplit = [[0]]
# ysplit contains a label for each sublist in Xsplit
ysplit = [y[0]]
for i, g in enumerate(y):
# check if this frame is a different gestre
if g != ysplit[-1]:
# note down i - 1 as last index of previous gesture
Xsplit[-1].append(i-1)
# note down i as first index of current gesture
Xsplit.append([i])
ysplit.append(g)
Xsplit[-1].append(len(X)-1)
#### part 2: split up into examples, using the generated indices
X_final = []
y_final = []
for i, g in enumerate(ysplit):
# iterate over what will be the end index of each training example
# we add 2 to the upper bound because it is non inclusive (+1), but then neither is the stop index when slicing to get the example (+1 again)
for j in range(Xsplit[i][0] + n_frames, Xsplit[i][1] + 2, stride):
X_final.append(X[j-n_frames:j])
y_final.append(g)
return np.array(X_final), np.array(y_final)
def synced_shuffle(x, y):
'''shuffles two numpy arrays in sync'''
state = np.random.get_state()
np.random.shuffle(x)
np.random.set_state(state)
np.random.shuffle(y)
def mirror_data(df):
"""takes a data frame of gesture data and generates its mirror image: RH <-> LH"""
#### warning: this function MIGHT break if different VoI are used
# swap around left and right variables
df_flipped = df.copy()
df_flipped.columns = [col.replace('left', 'lft') for col in df_flipped.columns]
df_flipped.columns = [col.replace('right', 'left') for col in df_flipped.columns]
df_flipped.columns = [col.replace('lft', 'right') for col in df_flipped.columns]
df_flipped = df_flipped.apply(lambda x: -x if x.name[-1] == '0' else x, axis=0)
return df_flipped
#### methods for combining the above together, to go straight from a CSVs to training examples
def CSV2examples(raw_file='data/recordings/test1.csv', target_fps=30,
g2idx={'no_gesture': 0, 'so_so': 1}, hands=['left', 'right'], n_frames=25, standardize=True, dicts_gen=False, mirror=True, derive_features=True):
"""all of the above: gets VoI, and using these, splits a CSV to X and y"""
df = CSV2VoI(raw_file=raw_file, VoI_file='params/VoI.txt', target_fps=target_fps)
X_contiguous, y_contiguous = df2X_y(df, g2idx, hands=hands, standardize=standardize, dicts_gen=dicts_gen, mirror=False, derive_features=derive_features)
X, y = X_y2examples(X_contiguous, y=y_contiguous, n_frames=n_frames)
if mirror:
X_contiguous, y_contiguous = df2X_y(df, g2idx, hands=hands, standardize=standardize, dicts_gen=dicts_gen, mirror=True, derive_features=derive_features)
X2, y2 = X_y2examples(X_contiguous, y=y_contiguous, n_frames=n_frames)
X = np.concatenate([X,X2])
y = np.concatenate([y, y2])
synced_shuffle(X, y)
return X, y
def folder2examples(folder='data/loops/', target_fps=30,
g2idx={'no_gesture': 0, 'so_so': 1}, hands=['left', 'right'], n_frames=25, standardize=True,
dicts_gen=False, mirror=True, derive_features=True):
'''all of the above: gets VoI, splits a folder of CSVs to X and y'''
# create empty data frame
df = | pd.DataFrame() | pandas.DataFrame |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.13.7
# kernelspec:
# display_name: Python [conda env:.conda-bandit_nhgf]
# language: python
# name: conda-env-.conda-bandit_nhgf-py
# ---
# %% language="javascript"
# IPython.notebook.kernel.restart()
# %%
import numpy as np
import pandas as pd
from collections import OrderedDict
from pyPRMS.ParameterFile import ParameterFile
# %%
step = 'SOM' # one of AET, RUN, RCH, SCA, SOM, ALL
hru_id = '66119'
workdir = f'/Users/pnorton/Projects/National_Hydrology_Model/calibrations/NHMv11/byHRU_sample/HRU{hru_id}'
calib_file = f'{workdir}/parameter_info'
param_file = f'{workdir}/params_byHRU'
ofs_file = f'{workdir}/RESULTS/{step}_OFS_HRU{hru_id}'
mean_param_file = f'{workdir}/RESULTS/{step}_PARAMS_HRU{hru_id}'
cal_sca = False
# %%
# 1 RUN 0.34418
# 2 AET 0.27683
# 3 SCA 0.16787
# 4 SOM 0.13448
# 5 RCH 0.07663
# 6 ALL 1.00000
# %%
calib_df = | pd.read_csv(calib_file, sep='\s+', index_col=['Parameter']) | pandas.read_csv |
import numpy as np
import pandas as pd
from sklearn.metrics.pairwise import cosine_similarity
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
import warnings
import seaborn as sns
warnings.filterwarnings('ignore')
| pd.set_option('display.max_rows',100,'display.max_columns', 10000,"display.max_colwidth",10000,'display.width',10000) | pandas.set_option |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.