prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
"""
Import necessary libraries
"""
from itertools import chain
import sqlalchemy as db
import pandas as pd
from bs4 import BeautifulSoup
from urllib.request import urlopen
import re
import json
from time import sleep
# Few component's idea adapted from/Reference from - # https://github.com/erilu/web-scraping-NBA-statistics
def build_team() -> dict[str, str]:
"""
This function access espn's nba page and extract names of each roster. For each roster name, URL of the team page
is created. :return:
"""
f = urlopen('https://www.espn.com/nba/teams')
teams_source = f.read().decode('utf-8')
teams = dict(re.findall("www\.espn\.com/nba/team/_/name/(\w+)/(.+?)\",", teams_source))
roster_urls = []
for key in teams.keys():
roster_urls.append('https://www.espn.com/nba/team/roster/_/name/' + key + '/' + teams[key])
teams[key] = str(teams[key])
return dict(zip(teams.values(), roster_urls))
def get_info_players(roster_url) -> dict:
"""
Function retrieves info such as age, height salary etc for each player within a roster.
:param roster_url:
:return:
"""
f = urlopen(roster_url)
roster_source = f.read().decode('utf-8')
sleep(0.6)
player_regex = '\{\"name\"\:\"(\w+\s\w+)\",\"href\"\:\"https?\://www\.espn\.com/nba/player/.*?\",(.*?)\}'
player_info = re.findall(player_regex, roster_source)
player_dict = dict()
for player in player_info:
player_dict[player[0]] = json.loads("{" + player[1] + "}")
return player_dict
def career_stats(all_players_df) -> pd.DataFrame:
"""
Steps involved in this function: 1. Data Frame created for storing stats for each player 2. For each player,
espn webpage is parsed and career stats average is retrieved 3.Some of the stats contain non-numerical symbols (
example: - 3PT means 3-Point Field Goals Made-Attempted Per Game, so 5.3-12.7 is split as 3PTM - 5.3 and
3PTA - 12.7 similarly is done for FG and FT as well.
:return:
"""
career_stats_df = pd.DataFrame(
columns=["GP", "GS", "MIN", "FGM", "FGA", "FG%", "3PTM", "3PTA", "3P%", "FTM", "FTA", "FT%", "OR", "DR", "REB",
"AST", "BLK", "STL", "PF", "TO", "PTS"])
for player_index in all_players_df.index:
url = "https://www.espn.com/nba/player/stats/_/id/" + str(all_players_df.loc[player_index]['id'])
f = urlopen(url)
soup = BeautifulSoup(f, 'html.parser')
sleep(0.5)
try:
content = soup.find_all('div', class_='ResponsiveTable ResponsiveTable--fixed-left pt4')[0]
year = []
scores = []
tr = content.find_all('tr', class_='Table__TR Table__TR--sm Table__even')
for i, point in enumerate(tr):
td = point.find_all('td')
i = 0
scr = []
for element in td:
if len(td) <= 2:
year.append(td[i].text)
elif len(td) >= 2:
scr.append(td[i].text)
i += 1
scores.append(scr)
career_info = list(chain.from_iterable([i.split("-") for i in scores[-2]]))
career_info = list(map(float, career_info))
career_stats_df = career_stats_df.append(
| pd.Series(career_info, index=career_stats_df.columns, name=player_index) | pandas.Series |
# -------------------------------------------------- ML 02/10/2019 ----------------------------------------------------#
#
# This is the class for poisson process
#
# -------------------------------------------------------------------------------------------------------------------- #
import numpy as np
import pandas as pd
import math
from handles.data_hand import get_slotted_data
from sklearn.linear_model import LinearRegression
from scipy.stats import kstest
import statsmodels.api as sm
import statsmodels.formula.api as smf
from modeling.stat.models import fit_neg_binom
from scipy.stats import expon,gamma,nbinom
import random
random.seed( 30 )
class poisson_process:
def __init__(self,events,x,slotmin=60,sesonality=24.00,x_meta=None,combine=None,variablity_lambda=True):
# x is the numeric features lambda depends on.
# x_meta are catagorical features that lambda depends on
# Sesonality is when to loop the ts back. i.e. like 24 hours
# x can be any factor levels. with _ in between each category. however, each catogory
# should be defined by a numeric indicator
self.x_names = np.array( x.columns )
self.ts = np.array(events)
self.x = np.array(x)
self.x_meta=x_meta
self.slotmin = slotmin
self.sesonality = float( sesonality )
self.processed_data = self.get_combined_ts_data(combine=combine)
self.def_scale_multiplier()
self._variablity_lambda = variablity_lambda
def combine_timeslots(self,x,combine):
p = x.copy()
p[np.in1d(x, combine)] = combine[0]
return p
def poles_fun(self,d):
return pd.DataFrame(d).apply(lambda x: 1/(x**3))
def def_scale_multiplier(self):
# this is based on emperical data
average_mat = pd.DataFrame({'2014':[0.237053898,0.23033784,0.22646637,0.224855127,0.22145071,0.22017719,0.219680942],
'2015':[0.190591233,0.185363899,0.183113651,0.180825924,0.179276851,0.179478113,0.17919847]}).T
average_mat.columns = [1000,1100,1200,1300,1400,1500,1600]
average_mat=average_mat.reset_index()
average_mat=average_mat.melt(id_vars=["index"],var_name="Poles",value_name="Value")
cols = ['year','poles','scale']
average_mat.columns = cols
average_mat[cols] = average_mat[cols].apply(pd.to_numeric, errors='coerce')
average_mat['poles']=self.poles_fun(average_mat['poles'])
regressor = LinearRegression()
regressor.fit(average_mat[['year','poles']], average_mat['scale'])
self.scale_multiplier_predictor = regressor
self.reset_scale_multiplier()
def reset_scale_multiplier(self):
self._scale_multiplier = 1
def avg_scale_pred(self,year,poles):
return self.scale_multiplier_predictor.predict(np.array([year,
np.array(self.poles_fun([poles]))]).reshape(1, -1))
def get_processed_data(self):
diff_col_name = 'Aarrival_diff'
delta_t = np.diff(self.ts, n=1).reshape(-1, 1)
fin_d = pd.DataFrame(np.concatenate((delta_t, self.x[:-1, :]), axis=1))
fin_d.columns = np.concatenate(
(np.array(diff_col_name).reshape(-1, 1), np.array(self.x_names).reshape(-1, 1)), axis=0).flatten()
fin_d[diff_col_name] = pd.to_numeric(fin_d[diff_col_name])
# split the values in the factor that was provided to us
split = fin_d[self.x_names[0]].str.split("_", -1)
n = []
for i in range(0, len(split[0])):
fin_d['f' + str(i)] = split.str.get(i)#.astype(float) # update this if code breaks
n.append('f' + str(i))
n.append(self.x_names[1])
self.all_names = n
fin_d = fin_d.sort_values(by=n)
return fin_d
def get_combined_ts_data(self,combine):
# combine timeslots
# if given argument = combine -- array of time slots to combine. we will replace these with
# the first element of the combine array
# start time internal is the timeslots to model the data on
self.processed_data = self.get_processed_data()
self.combine = combine
if combine is None:
self.combined_slots = False
combined_timeslots = self.processed_data[self.x_names[1]]
else:
self.combined_slots = True
combined_timeslots = self.combine_timeslots(self.processed_data[self.x_names[1]], combine=combine)
self.processed_data['Start_time_internal'] = combined_timeslots
return self.processed_data
def get_slotted_data(self,data, slot_secs):
return get_slotted_data(data=data,slot_secs=slot_secs)
# ------------------------------------------- FITTING --------------------------------------------------------------
def daywise_training_data(self,d,combine,fac1,fac2,f1,days,orignal_start_slot):
# fac2 is out internal slots that are combined
# it is also worth noting that we calculate the average for combined slots and then put them for
# all the slots for that given duration
if self.combined_slots:
x = fac2[(fac1 == f1)]
day = days[(fac1 == f1)]
model_d = []
for day_i in np.unique(day):
model_d_temp = []
for t_i in np.unique(x):
try:
model_d_temp.append([[t_i, expon.fit(pd.to_numeric(d[(x == t_i) & (day == day_i)]))[1], day_i]])
except:
continue
model_d_temp = np.vstack(model_d_temp)
scale_val = model_d_temp[(model_d_temp[:, 0] == combine[0])].flatten()[1]
add = [[i, scale_val, day_i] for i in combine[1:]]
model_d_temp = np.concatenate((model_d_temp, add))
model_d.append(model_d_temp)
model_d = np.vstack(model_d)
else:
x = orignal_start_slot[(fac1 == f1)]
day = days[(fac1 == f1)]
model_d = []
for day_i in np.unique(day):
model_d_temp = []
for t_i in np.unique(x):
try:
model_d_temp.append([[t_i, expon.fit(pd.to_numeric(d[(x == t_i) & (day == day_i)]))[1], day_i]])
except:
continue
model_d_temp = np.vstack(model_d_temp)
model_d.append(model_d_temp)
model_d = np.vstack(model_d)
return model_d
def discreet_fit_model(self,data,x):
data_gamma = pd.DataFrame({'days':data, 'arrivalslot':x,'indicator':1})
data_gamma = data_gamma.groupby(['days','arrivalslot']).agg(['count']).reset_index()
data_gamma.columns = ['days', 'arrivalslot','count']
data_save = data_gamma['count']
x_save = data_gamma['arrivalslot']
ks_t_D = pd.DataFrame()
ks_t_pval = pd.DataFrame()
t_t_pval = pd.DataFrame()
exp_loc = pd.DataFrame()
exp_scale = pd.DataFrame()
exp_shape = pd.DataFrame()
time_slot = pd.DataFrame()
pos_l = pd.DataFrame()
neg_bio_r = pd.DataFrame()
neg_bio_p = pd.DataFrame()
for f2 in np.unique(data_gamma['arrivalslot']):
d = pd.to_numeric( data_gamma[data_gamma['arrivalslot'] == f2]['count'] )
# poission
lam = np.mean(d)
# gamma
alpha,loc, beta = gamma.fit(d,loc=0)
# ks test
D , kspval = kstest(d,'gamma', args=(alpha,loc,beta))
# ttest - one sided
# sample2 = gamma.rvs(a = alpha, loc=loc, scale=beta, size=d.shape[0])
val , pval = 0,0 #ttest_ind(d,sample2)
# neg_binom
r,p = fit_neg_binom(vec=np.array(d).flatten(),init=0.0000001)
# if we have combined data then add same model to all combined timeslots
if self.combined_slots and f2 == self.combine[0]:
for var in self.combine:
pos_l = pos_l.append(pd.DataFrame([lam]))
exp_loc = exp_loc.append(pd.DataFrame([loc]))
exp_shape = exp_shape.append(pd.DataFrame([alpha]))
exp_scale = exp_scale.append(pd.DataFrame([beta]))
neg_bio_r = neg_bio_r.append(pd.DataFrame([r]))
neg_bio_p = neg_bio_p.append(pd.DataFrame([p]))
ks_t_D = ks_t_D.append(pd.DataFrame([D]))
ks_t_pval = ks_t_pval.append(pd.DataFrame([kspval]))
t_t_pval = t_t_pval.append(pd.DataFrame([pval / 2]))
# add timeslot
time_slot = time_slot.append([var])
else:
pos_l = pos_l.append(pd.DataFrame([lam]))
exp_loc = exp_loc.append(pd.DataFrame([loc]))
exp_shape = exp_shape.append(pd.DataFrame([alpha]))
exp_scale = exp_scale.append(pd.DataFrame([beta]))
neg_bio_r = neg_bio_r.append(pd.DataFrame([r]))
neg_bio_p = neg_bio_p.append(pd.DataFrame([p]))
ks_t_D = ks_t_D.append(pd.DataFrame([D]))
ks_t_pval = ks_t_pval.append(pd.DataFrame([kspval]))
t_t_pval = t_t_pval.append(pd.DataFrame([pval / 2]))
# add timeslot
time_slot = time_slot.append([f2])
# this is the final fit
fit = pd.DataFrame()
fit[[self.x_names[1]]] = time_slot
fit['gamma_loc'] = np.array(exp_loc).flatten()
fit['gamma_scale'] = np.array(exp_scale).flatten()
fit['gamma_shape'] = np.array(exp_shape).flatten()
fit['KS_D'] = np.array(ks_t_D).flatten()
fit['KS_PVal'] = np.array(ks_t_pval).flatten()
fit['Ttest_PVal'] = np.array(t_t_pval).flatten()
fit['Poisson_lam'] = np.array(pos_l).flatten()
fit['Negbio_r'] = np.array(neg_bio_r).flatten()
fit['Negbio_p'] = np.array(neg_bio_p).flatten()
return fit,data_save,x_save
def neg_bio_reg_fit_model(self,data,x):
data_gamma = pd.DataFrame({'days': data, 'arrivalslot': x, 'indicator': 1})
data_gamma = data_gamma.groupby(['days', 'arrivalslot']).agg(['count']).reset_index()
data_gamma.columns = ['days', 'arrivalslot', 'count']
data_save = data_gamma['count']
x_save = data_gamma['arrivalslot']
nb_mu = pd.DataFrame()
nb_p = | pd.DataFrame() | pandas.DataFrame |
import random
import pandas as pd
from tqdm import tqdm
from shared.utils import make_dirs
from shared.utils import load_from_json
import sys
class Training_Data_Generator(object):
""" Class for generating ground-truth dataset used for feature learning
:param random_seed: parameter used for reproducibility
:param num_samples: total number of negative samples
:param neg_type: negative samples type (simple or hard)
:param query_type: query type (faq or user_query)
:param loss_type: the loss type as method used for BERT Fine-tuning (softmax or triplet loss)
:param hard_filepath: the absolut path to hard negatives filepath
"""
def __init__(self, random_seed=5, num_samples=24, neg_type='simple', query_type='faq',
loss_type='triplet', hard_filepath=''):
self.random_seed = random_seed
self.num_samples = num_samples
self.hard_filepath = hard_filepath
self.neg_type = neg_type
self.query_type = query_type
self.loss_type = loss_type
self.pos_labels = []
self.neg_labels = []
self.num_pos_labels = 0
self.num_neg_labels = 0
self.id2qa = dict()
self.id2negids = dict()
self.df = pd.DataFrame()
self.seq_len_df = pd.DataFrame()
self.df_pos = pd.DataFrame()
self.df_neg = pd.DataFrame()
if self.query_type == 'faq':
self.hard_filepath = self.hard_filepath + "/hard_negatives_faq.json"
elif self.query_type == "user_query":
self.hard_filepath = self.hard_filepath + "/hard_negatives_user_query.json"
else:
raise ValueError('error, no query_type found for {}'.format(query_type))
def generate_pos_labels(self, query_answer_pairs):
""" Generate positive labels from qa pairs
:param qa_pairs: list of dicts
:return: list of positive labels
"""
qap_df = | pd.DataFrame.from_records(query_answer_pairs) | pandas.DataFrame.from_records |
import pandas as pd
import numpy as np
import re
import os
from pandas import json_normalize
import json
from alive_progress import alive_bar
class PrepareNSMCLogs:
def __init__(self, config):
self.raw_logs_dir = config.raw_logs_dir
self.prepared_logs_dir = config.prepared_logs_dir
self.filename = config.filename
@staticmethod
def starts_with_timestamp(line):
pattern = re.compile("^(\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2})")
return bool(pattern.match(line))
def multiline_logs_processing(self, fpath):
dfs = []
with open(fpath) as f:
logs = f.readlines()
with alive_bar(len(logs), title="Parsing json to csv") as bar:
for log in logs:
json_log = json.loads(log)
df = | json_normalize(json_log) | pandas.json_normalize |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import functools
import math
import warnings
from typing import Callable, Dict, List, Optional, Tuple
import numpy as np
import pandas as pd
import plotly
from beanmachine.ppl.inference.monte_carlo_samples import MonteCarloSamples
from beanmachine.ppl.model.rv_identifier import RVIdentifier
from plotly.subplots import make_subplots
from torch import Tensor
from . import common_plots, common_statistics as common_stats
class BaseDiagnostics:
def __init__(self, samples: MonteCarloSamples):
self.samples = samples
self.statistics_dict = {}
self.plots_dict = {}
def _prepare_query_list(
self, query_list: Optional[List[RVIdentifier]] = None
) -> List[RVIdentifier]:
if query_list is None:
return list(self.samples.keys())
for query in query_list:
if not (query in self.samples):
raise ValueError(f"query {self._stringify_query(query)} does not exist")
return query_list
def summaryfn(self, func: Callable, display_names: List[str]) -> Callable:
"""
this function keeps a directory of all summary-related functions,
so it could handle the overridden functions and new ones that user defines
:param func: method which is going to be executed when summary() is called.
:param display_name: the name appears in the summary() output dataframe
:returns: user-visible function that can be called over a list of queries
"""
statistics_name = func.__name__
self.statistics_dict[statistics_name] = (func, display_names)
return self._standalone_summary_stat_function(statistics_name, func)
def _prepare_summary_stat_input(
self, query: RVIdentifier, chain: Optional[int] = None
):
query_samples = self.samples[query]
if query_samples.shape[0] != 1:
# squeeze out non-chain singleton dims
query_samples = query_samples.squeeze()
if chain is not None:
query_samples = query_samples[chain].unsqueeze(0)
return query_samples
def _create_table(
self, query: RVIdentifier, results: List[Tensor], func_list: List[str]
) -> pd.DataFrame:
"""
this function turns output of each summary stat function to a dataframe
"""
out_pd = pd.DataFrame()
if len(results) > 0:
single_result_set = results[0]
if single_result_set is not None and len(single_result_set) > 0:
for flattened_index in range(single_result_set[0].numel()):
index = np.unravel_index(
flattened_index, tuple(single_result_set[0].size())
)
row_data = []
rowname = f"{self._stringify_query(query)}{list(index)}"
for result in results:
num_of_sets = result.size()[0]
for set_num in range(num_of_sets):
row_data.append(result[set_num][index].item())
cur = pd.DataFrame([row_data], columns=func_list, index=[rowname])
if out_pd.empty:
out_pd = cur
else:
out_pd = pd.concat([out_pd, cur])
return out_pd
def _stringify_query(self, query: RVIdentifier) -> str:
return f"{query.function.__name__}{query.arguments}"
def _execute_summary_stat_funcs(
self,
query: RVIdentifier,
func_dict: Dict[str, Tuple[Callable, str]],
chain: Optional[int] = None,
raise_warning: bool = False,
):
frames = pd.DataFrame()
query_results = []
func_list = []
queried_samples = self._prepare_summary_stat_input(query, chain)
for _k, (func, display_names) in func_dict.items():
result = func(queried_samples)
if result is None:
# in the case of r hat and other algorithms, they may return None
# if the samples do not have enough chains or have the wrong shape
if raise_warning:
warnings.warn(
f"{display_names} cannot be calculated for the provided samples"
)
continue
# the first dimension is equivalant to the size of the display_names
if len(display_names) <= 1:
result = result.unsqueeze(0)
query_results.append(result)
func_list.extend(display_names)
out_df = self._create_table(query, query_results, func_list)
if frames.empty:
frames = out_df
else:
frames = pd.concat([frames, out_df])
return frames
def summary(
self,
query_list: Optional[List[RVIdentifier]] = None,
chain: Optional[int] = None,
) -> pd.DataFrame:
"""
this function outputs a table summarizing results of registered functions
in self.statistics_dict for requested queries in query_list,
if chain is None, results correspond to the aggreagated chains
"""
frames = pd.DataFrame()
query_list = self._prepare_query_list(query_list)
for query in query_list:
out_df = self._execute_summary_stat_funcs(
query, self.statistics_dict, chain
)
frames = pd.concat([frames, out_df])
frames.sort_index(inplace=True)
return frames
def _prepare_plots_input(
self, query: RVIdentifier, chain: Optional[int] = None
) -> Tensor:
"""
:param query: the query for which registered plot functions are called
:param chain: the chain that query samples are extracted from
:returns: tensor of query samples
"""
query_samples = self.samples[query]
if chain is not None:
return query_samples[chain].unsqueeze(0)
return query_samples
def plotfn(self, func: Callable, display_name: str) -> Callable:
"""
this function keeps a directory of all plot-related functions
so it could handle the overridden functions and new ones that user defines
:param func: method which is going to be executed when plot() is called.
:param display_name: appears as part of the plot title for func
:returns: user-visible function that can be called over a list of queries
"""
self.plots_dict[func.__name__] = (func, display_name)
return self._standalone_plot_function(func.__name__, func)
def _execute_plot_funcs(
self,
query: RVIdentifier,
func_dict: Dict[str, Tuple[Callable, str]],
chain: Optional[int] = None,
display: Optional[bool] = False,
): # task T57168727 to add type
figs = []
queried_samples = self._prepare_plots_input(query, chain)
for _k, (func, display_name) in func_dict.items():
trace, labels = common_plots.plot_helper(queried_samples, func)
title = f"{self._stringify_query(query)} {display_name}"
fig = self._display_results(
trace,
[title + label for label in labels],
# pyre-fixme[6]: Expected `bool` for 3rd param but got `Optional[bool]`.
display,
)
figs.append(fig)
return figs
def plot(
self,
query_list: Optional[List[RVIdentifier]] = None,
display: Optional[bool] = False,
chain: Optional[int] = None,
): # task T57168727 to add type
"""
this function outputs plots related to registered functions in
self.plots_dict for requested queries in query_list
:param query_list: list of queries for which plot functions will be called
:param chain: the chain that query samples are extracted from
:returns: plotly object holding the results from registered plot functions
"""
figs = []
query_list = self._prepare_query_list(query_list)
for query in query_list:
fig = self._execute_plot_funcs(query, self.plots_dict, chain, display)
figs.extend(fig)
return figs
def _display_results(
self, traces, labels: List[str], display: bool
): # task T57168727 to add type
"""
:param traces: a list of plotly objects
:param labels: plot labels
:returns: a plotly subplot object
"""
fig = make_subplots(
rows=math.ceil(len(traces) / 2), cols=2, subplot_titles=tuple(labels)
)
r = 1
for trace in traces:
for data in trace:
fig.add_trace(data, row=math.ceil(r / 2), col=((r - 1) % 2) + 1)
r += 1
if display:
plotly.offline.iplot(fig)
return fig
def _standalone_plot_function(self, func_name: str, func: Callable) -> Callable:
"""
this function makes each registered plot function directly callable by the user
"""
@functools.wraps(func)
def _wrapper(
query_list: List[RVIdentifier],
chain: Optional[int] = None,
display: Optional[bool] = False,
):
figs = []
query_list = self._prepare_query_list(query_list)
for query in query_list:
fig = self._execute_plot_funcs(
query, {func_name: self.plots_dict[func_name]}, chain, display
)
figs.extend(fig)
return figs
return _wrapper
def _standalone_summary_stat_function(
self, func_name: str, func: Callable
) -> Callable:
"""
this function makes each registered summary-stat related function directly callable by the user
"""
@functools.wraps(func)
def _wrapper(query_list: List[RVIdentifier], chain: Optional[int] = None):
frames = pd.DataFrame()
query_list = self._prepare_query_list(query_list)
for query in query_list:
out_df = self._execute_summary_stat_funcs(
query, {func_name: self.statistics_dict[func_name]}, chain, True
)
frames = | pd.concat([frames, out_df]) | pandas.concat |
# Copyright (c) 2019-2021 - for information on the respective copyright owner
# see the NOTICE file and/or the repository
# https://github.com/boschresearch/pylife
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = "<NAME>"
__maintainer__ = __author__
import pytest
import numpy as np
import pandas as pd
from pylife.core import *
foo_bar_baz = pd.DataFrame({'foo': [1.0, 1.0], 'bar': [1.0, 1.0], 'baz': [1.0, 1.0]})
def test_keys_dataframe():
pd.testing.assert_index_equal(foo_bar_baz.test_accessor_none.keys(), pd.Index(['foo', 'bar', 'baz']))
def test_keys_series():
pd.testing.assert_index_equal(foo_bar_baz.iloc[0].test_accessor_none.keys(), | pd.Index(['foo', 'bar', 'baz']) | pandas.Index |
import pandas
from msdss_models_api.models import Model
def create_init_method(can_input=True, can_output=True, can_update=True):
"""
Create model init method for scikit-learn models to be compatible with :class:`msdss_models_api:msdss_models_api.models.Model`.
See :class:`msdss_models_api:msdss_models_api.models.Model`.
Parameters
----------
can_input : bool
Whether the method ``.input`` is defined and available. This is useful for controlling route requests in an API.
can_output : bool
Whether the method ``.output`` is defined and available. This is useful for controlling route requests in an API.
can_update : bool
Whether the method ``.update`` is defined and available. This is useful for controlling route requests in an API.
Author
------
<NAME> <<EMAIL>>
Example
-------
.. jupyter-execute::
from msdss_models_sklearn.tools import *
from sklearn.linear_model import LinearRegression
input = create_input_method(LinearRegression)
"""
def init(self, can_input=can_input, can_output=can_output, can_update=can_update, *args, **kwargs):
Model.__init__(self, can_input=can_input, can_output=can_output, can_update=can_update, *args, **kwargs)
return init
def create_input_method(model):
"""
Create model input method for scikit-learn models to be compatible with :class:`msdss_models_api:msdss_models_api.models.Model`.
See :meth:`msdss_models_api:msdss_models_api.models.Model.input`.
Parameters
----------
model : class
Scikit-learn model class to create machine learning models.
Author
------
<NAME> <<EMAIL>>
Example
-------
.. jupyter-execute::
from msdss_models_sklearn.tools import *
from sklearn.linear_model import LinearRegression
input = create_input_method(LinearRegression)
"""
def input(self, data, x=None, y=None, _fit={}, *args, **kwargs):
# (create_input_method_vars) Set default vars
x = x if x else self.settings['x'] if 'x' in self.settings else x
y = y if y else self.settings['y'] if 'y' in self.settings else y
# (create_input_method_data) Format data for model instance input
data = pandas.DataFrame(data)
data_x = data[x] if x else data
data_y = data[y] if y else None
# (create_input_method_set) Train model instance
self.instance = model(*args, **kwargs).fit(data_x, data_y, **_fit)
return input
def create_output_method():
"""
Create model output method for scikit-learn models to be compatible with :class:`msdss_models_api:msdss_models_api.models.Model`.
See :meth:`msdss_models_api:msdss_models_api.models.Model.output`.
Author
------
<NAME> <<EMAIL>>
Example
-------
.. jupyter-execute::
from msdss_models_sklearn.tools import *
output = create_output_method()
"""
def output(self, data, x=None, y=None, *args, **kwargs):
# (create_output_method_vars) Set default vars
x = x if x else self.settings['x'] if 'x' in self.settings else x
y = y if y else self.settings['y'] if 'y' in self.settings else y
y = [y] if y and not isinstance(y, list) else y
# (create_output_method_data) Format data for model instance output
data = | pandas.DataFrame(data) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""Functions from market data"""
__author__ = "<NAME>"
__version__ = "1"
import pandas as pd
import numpy as np
from pyquanttrade.engine.utils import (
max_drawdown_ratio,
max_drawdown_value,
safe_div,
safe_min,
safe_sum,
safe_mean,
)
class DailyStats:
def __init__(self, data_sim, ticker, capital):
self.trade_list = None
self.tickers = [ticker]
self.tickers_capital = {}
self.tickers_capital[ticker] = capital
self.long_ticker = {}
self.short_ticker = {}
self.global_ticker = {}
(l, s, t) = self._initDataframe(data_sim.index, ticker, data_sim, capital)
self.long_ticker[ticker] = l
self.short_ticker[ticker] = s
self.global_ticker[ticker] = t
self.long_all = None
self.short_all = None
self.global_all = None
self.last_closed_profit = {}
self.last_closed_inversion = {}
self._last_trades_len = {}
self.last_closed_profit[ticker] = {"long": 0.0, "short": 0.0, "all": 0.0}
self.last_closed_inversion[ticker] = {"long": 0.0, "short": 0.0, "all": 0.0}
self._last_trades_len[ticker] = {"long": 0.0, "short": 0.0, "all": 0.0}
# Init dataframes stats values for a ticker data.
def _initDataframe(self, index_data, ticker, data_sim, capital):
long_system_results = | pd.DataFrame(index=index_data) | pandas.DataFrame |
import osmnx as ox
import networkx as nx
import geopandas
import pandas as pd
from pylab import *
print('EXECUTING')
# Get graph
g = ox.graph_from_place(
'Brentwood - Darlington, Portland, Oregon, USA', network_type='all')
# tranfer to GDF
g_gdf_nodes, g_gdf_edges = ox.graph_to_gdfs(g)
# transfer to data fram
g_dataframe_nodes = | pd.DataFrame(g_gdf_nodes) | pandas.DataFrame |
"""
NetSQL is a network query tool which helps to collect and filter data about your network.
Requires access to network devices, but also can process raw command output.
"""
from __future__ import print_function, unicode_literals
import json
import re
import csv
import getpass
import ipaddress
import argparse
import sys
import numpy
import textfsm
import os
import pandas as pd
from netmiko import ConnectHandler
from netmiko.ssh_exception import (
NetMikoTimeoutException,
NetMikoAuthenticationException,
SSHException,
)
from colorama import init, Fore, Style
DEVICE_TYPE = "cisco_ios"
REPORT_DIR = "reports\\"
RAW_OUTPUT_DIR = "raw_data\\"
class CustomParser(argparse.ArgumentParser):
"""
Overrides default CLI parser's print_help and error methods
"""
def print_help(self):
# Print default help from argparse.ArgumentParser class
super().print_help()
# print our help messages
print(
"\n Usage examples:"
+ '\n python netsql.py --query="select * from interfaces where Last_Input = never" --source 10.74.41.73 --user aupuser3 --screen-output --html-output'
+ '\n python netsql.py --query="select Interface,Name,Last_Input from interfaces where Last_Input = never" --source cleveland_st.txt --user azyuzin1 --screen-output --html-output'
+ '\n python netsql.py --query="select * from neighbours where Platform=Polycom" --source device_ip_addresses.txt --screen-output --user azyuzin1 --html-output'
+ "\n\n Query should be in the following format: "
+ '\n -query="select <fields to select or * > from <source> where <condition>"'
+ "\n <fields to select or * > and <source> are required, <condition> is onptional "
+ "\n\n Query examples: "
+ "\n - List ports which never been used:"
+ '\n --query="select * from interfaces where Last_Input = never"'
+ "\n - Find switch port where a device located by its MAC"
+ '\n --query="select * from mac-addresses where MAC=3348"'
+ "\n - Get device details from L3 switch: IP, VLAN, port:"
'\n --query="select * from mac-addresses where MAC=b19f"'
+ "\n - Get device details from L2 switch:"
+ '\n --query="select * from addresses where MAC=b19f"'
+ "\n - Get neighbours details:"
+ '\n --query="select Host,Management_ip,Platform,Remote_Port,Local_port from neighbours"'
+ "\n - Get number of Polcycom devices in building:"
+ '\n --query="select * from neighbours where Platform=Polycom"'
)
print("\n The following data sources are allowed in queries: \n")
with open("command_definitions.json", "r") as f:
command_definitions = json.load(f)
with open("data_source_definitions.json", "r") as f:
source_definitions = json.load(f)
for source in source_definitions:
print(
"Data source: {:<15} Actual commands {}".format(
source["data_source_name"], source["commands"]
)
)
def error(self, message):
print("error: %s\n" % message)
print("Use --help or -h for help")
sys.exit(2)
# -------------------------------------------------------------------------------------------
def parse_args(args=sys.argv[1:]):
"""Parse arguments."""
parser = CustomParser()
parser._action_groups.pop()
required = parser.add_argument_group("required arguments")
optional = parser.add_argument_group("optional arguments")
# Required arguments
required.add_argument(
"-q", "--query", help="Query, see usage examples", type=str, required=True
)
required.add_argument(
"-s",
"--source",
help="Source of IP addresses to process. Can be a file, or a single IP address",
required=True,
)
required.add_argument(
"-u", "--user", help="Username to connect to network devices", required=True
)
# Optional arguments
optional.add_argument(
"--no-connect",
"-nc",
default=False,
action="store_true",
help="Run without connecting to network devices, uses the output previously collected. Impoves query processing speed",
)
optional.add_argument(
"--screen-output",
default=True,
required=False,
action="store_true",
help="Prints report to screen. CVS reports are always generated",
)
optional.add_argument(
"--screen-lines",
default=10,
type=int,
required=False,
help="Number of lines to output for each device",
)
optional.add_argument(
"--html-output",
"-html",
default=False,
action="store_true",
help="Prints report to HTML. CVS reports are always generated",
)
return parser.parse_args(args)
# -------------------------------------------------------------------------------------------
def command_analysis(text):
"""
:param text: SQL string, for example:
select first_name,last_name from students where id = 5
select * from students where first_name = "Mike" or "Andrew" and last_name = "Brown"
select last_name from students where math_score = "90" or "80" and last_name = "Smith" and year = 7 or 8
:return: Dictionary built from the input string, for example:
{'conditions': [{'cond_field': 'math_score',
'cond_value': ['"90"',
'"80"']},
{'cond_field': 'last_name',
'cond_value': '"Smith"'},
{'cond_field': 'year',
'cond_value': ['7',
'8']}],
'fields': ['*'],
'source': 'students'}
Written by <NAME>, McKinnon Secondary College, 07K. 2019.
"""
fields = []
source = ""
conditions = []
conditions_list = []
result = {}
command = text.split()
if command[0] == "select":
# field analysis
if "," in command[1]:
morefields = command[1].split(",")
for item in morefields:
fields.append(item)
else:
fields.append(command[1])
# checking whether 'from' exists
if command[2] == "from":
# source
source = command[3]
else:
print("Error: 'from' not found!")
try:
if command[4] == "where":
tempcond = " ".join(command[5:])
# split conditions by keyword 'and'
condition = tempcond.split("and")
# loop until everything has been sorted
for element in condition:
condition_dic = {}
# split every condition by keyword '='
val = element.split("=")
condition_dic["cond_field"] = val[0].strip()
conditions_list.append(val[0].strip())
if "or" in val[1]:
# if there is an 'or' in the request
tempvalue = ("").join(val[1])
values = tempvalue.split("or")
condition_dic["cond_value"] = []
for value in values:
if value != " ":
condition_dic["cond_value"].append(value.strip())
else:
condition_dic["cond_value"] = val[1].strip()
conditions.append(condition_dic)
except:
pass
else:
print("Invalid Format or Command!")
# if * is in list, return all fields anyway, so ignore all other selected fields
if "*" in fields:
fields[0] = "*"
del fields[1:]
else:
# add 'conditions' fields to the list of fields selected
fields.extend(conditions_list)
# remove duplicates
fields_no_duplicates = []
[fields_no_duplicates.append(item) for item in fields if item not in fields_no_duplicates]
fields = fields_no_duplicates
result["fields"] = fields[0:]
result["source"] = source
result["conditions"] = conditions[0:]
return result
# -------------------------------------------------------------------------------------------
def run_command_and_write_to_txt(commands, a_device, no_connect, get_metadata_items):
"""
Executes IOS commands using Netmiko.
Writes raw output to a report file.
:param commands: list of commands
:param a_device: device IP
:param no_connect: whether to connect, if False the script exits without trying to connect
:return: False if any errors occurred, otherwise True
"""
# If Do Not Connect flag is set, do not connect to any devices, just return True
# The script uses the output .txt files previously collected
if no_connect:
return True
try:
remote_conn = ConnectHandler(**a_device)
except NetMikoAuthenticationException as error:
print("Authentication Exception - terminating program \n", str(error))
exit(1)
except NetMikoTimeoutException as error:
print(
" ===> WARNING : Timeout while connecting to: {}, error: {} Skipping.".format(
a_device["host"], str(error)
)
)
except SSHException as error:
print(
" ===> WARNING : SSH2 protocol negotiation or logic errors while connecting to: {}, error: {} Skipping.".format(
a_device["host"], str(error)
)
)
except Exception as error:
# raise ValueError(' ===> Skipping - Failed to execute due to %s', error)
print(
" ===> WARNING : Unhandled exception while connecting to: {}, error: {} Skipping.".format(
a_device["host"], str(error)
)
)
else:
# no exceptions happened - ssh connection established, OK to run commands
for command in commands:
file_name = get_file_path(a_device["host"], command, "raw_output") + ".txt"
print("Writing output to file: ", file_name)
os.makedirs(os.path.dirname(file_name), exist_ok=True)
with open(file_name, "w") as f:
# execute command on a device and write to a file
f.write(remote_conn.send_command_expect(command))
if get_metadata_items:
# Get device metadata - hostname and location
print("Writing metadata to file: ", file_name)
metadata_item = ""
for item in get_metadata_items.keys():
file_name = get_file_path(a_device["host"], "_metadata", "raw_output") + ".txt"
os.makedirs(os.path.dirname(file_name), exist_ok=True)
metadata_item = metadata_item + item + ":" + remote_conn.send_command_expect(get_metadata_items[item]) + "\n"
with open(file_name, "w") as f:
# execute command on a device and write to a file
f.write(metadata_item)
# sucessful command execution - return True
return True
# failure during command execution - return False
print("-" * 80)
return False
# -------------------------------------------------------------------------------------------
def find_command(command, all_commands):
"""
Looks up a command in a list of dictioraries, returns a dictionary from a list
:param command: Command to look up
:param all_commands: list of all commands
:return: dictionary
"""
for item in all_commands:
if item["command"] == command:
return item
# -------------------------------------------------------------------------------------------
def get_file_path(host, command, file_type):
"""
Builds file name from input arguments
:param host: Host ip address
:param command: Command to run
:param file_type: report or raw output
:return: full path with filename
"""
if file_type == "report":
file_name = REPORT_DIR + host + "\\" + command.replace(" ", "_")
else:
file_name = RAW_OUTPUT_DIR + host + "\\" + command.replace(" ", "_")
return file_name
# -------------------------------------------------------------------------------------------
def normalise_file(file_name):
"""
Replaces strings to match different command output, for example, changes all interface names from GigabitEnternet to Gi
Add any other normalisation here
:param file_name: File Name to load and replace strings
:return: none
"""
# Issue - when regexes are stored as strings, they are treated as special characters
dict = {
"(GigabitEthernet)(\d{1})\/(\d{1})\/(\d{1,2})": "Gi\2/\3/\4",
"(Te)(\d{1})\/(\d{1})\/(\d{1,2})": "TenGi\2/\3/\4"
}
with open(file_name, "r+") as f:
content = f.read()
# needs to be fixed to read from dict
# for key in dict:
# print(key)
# print(dict[key])
# subs = "r"+ "\"" +(dict[key])+ "\""
# content_new = re.sub(
# key,
# #r dict[key],
# subs,
# text,
# flags=re.M,
# )
# text = content_new
content_new = re.sub(
"(TenGigabitEthernet)(\d{1})\/(\d{1})\/(\d{1,2})",
r"TenGi\2/\3/\4",
content,
flags=re.M,
)
content = content_new
content_new = re.sub(
"(GigabitEthernet)(\d{1})\/(\d{1})\/(\d{1,2})",
r"Gi\2/\3/\4",
content,
flags=re.M,
)
content = content_new
content_new = re.sub(
"(Te)(\d{1})\/(\d{1})\/(\d{1,2})",
r"TenGi\2/\3/\4",
content,
flags=re.M,
)
# rewriting file
f.seek(0)
f.truncate()
f.write(content_new)
# -------------------------------------------------------------------------------------------
def print_to_csv_file(headers, content, file_name):
"""
Prints text to CSV files, also changes command output where necessary, such as Gi -> GigabitEthernet
:param headers: CSV headers
:param content: CSV text
:param file_name: output file name
:return: None
"""
try:
with open(file_name, "w", newline="") as out_csv:
csvwriter = csv.writer(out_csv, delimiter=",")
csvwriter.writerow(headers)
for item in content:
csvwriter.writerow(item)
# Replace strings to match different command output, for example, make all interface names from GigabitEnternet
# to Gi
normalise_file(file_name)
print("Writing CSV", file_name)
except Exception as e:
print("Error while opening file", e)
# -------------------------------------------------------------------------------------------
def convert_output_to_csv(commands, a_device):
"""
Pasres raw test with TextFSM, Converts text file to CSV and writes CSV
:param commands: List of commands to execute
:param a_device: Dictionary - Netmiko device format
:return: none
"""
for command in commands:
# build file names - directory + host IP + command name + .txt
file_name = get_file_path(a_device["host"], command, "raw_output") + ".txt"
# Read the whole file
try:
with open(file_name, "r") as content_file:
raw_command_output = content_file.read()
except Exception as e:
# Could open file, skip the remaining processing
print("Error while opening file", e)
return False
# Get headers and NTC templates for a given command - should be defined as global variables
try:
headers = find_command(command, command_definitions)["headers"]
template = find_command(command, command_definitions)["template"]
except:
print("template not yet defined for ", command, " - skipping")
continue
# Parse raw output with text FSM
text_fsm_template = textfsm.TextFSM(open(template))
parsed_command_output = text_fsm_template.ParseText(raw_command_output)
# print to CSV
print_to_csv_file(
headers, parsed_command_output, file_name.replace(".txt", ".csv")
)
return True
# -------------------------------------------------------------------------------------------
def process_csv_files(
join_dataframes, common_column, fields_to_select, filter, file1, file2, result_file
):
"""
Joins two dataframes.
Input parameters:
- common_column
- two csv files to join
Writes raw output to a report file
"""
# If "join_dataframes": true is source_definition.json
if join_dataframes:
pd1 = pd.read_csv(file1)
pd2 = pd.read_csv(file2)
if fields_to_select[0] == "*":
#result_pd = pd.merge(pd1, pd2, on=common_column)
result_pd = pd.merge(pd1, pd2, left_on=common_column[0], right_on=common_column[1])
else:
result_pd = pd.merge(pd1, pd2, left_on=common_column[0], right_on=common_column[1]).filter(fields_to_select)
#result_pd = pd.merge(pd1, pd2, left_on=common_column[0], right_on=common_column[1])
#result_pd = pd.merge(pd1, pd2, on=common_column).filter(fields_to_select)
else:
# If "join_dataframes": false is source_definition.json
pd1 = | pd.read_csv(file1) | pandas.read_csv |
import numpy as np
import pandas as pd
from bach import Series, DataFrame
from bach.operations.cut import CutOperation, QCutOperation
from sql_models.util import quote_identifier
from tests.functional.bach.test_data_and_utils import assert_equals_data
PD_TESTING_SETTINGS = {
'check_dtype': False,
'check_exact': False,
'atol': 1e-3,
}
def compare_boundaries(expected: pd.Series, result: Series) -> None:
for exp, res in zip(expected.to_numpy(), result.to_numpy()):
if not isinstance(exp, pd.Interval):
assert res is None or np.isnan(res)
continue
np.testing.assert_almost_equal(exp.left, float(res.left), decimal=2)
np.testing.assert_almost_equal(exp.right, float(res.right), decimal=2)
if exp.closed_left:
assert res.closed_left
if exp.closed_right:
assert res.closed_right
def test_cut_operation_pandas(engine) -> None:
p_series = pd.Series(range(100), name='a')
series = DataFrame.from_pandas(engine=engine, df=p_series.to_frame(), convert_objects=True).a
expected = pd.cut(p_series, bins=10)
result = CutOperation(series=series, bins=10)()
compare_boundaries(expected, result)
expected_wo_right = pd.cut(p_series, bins=10, right=False)
result_wo_right = CutOperation(series, bins=10, right=False)()
compare_boundaries(expected_wo_right, result_wo_right)
def test_cut_operation_bach(engine) -> None:
p_series = pd.Series(range(100), name='a')
series = DataFrame.from_pandas(engine=engine, df=p_series.to_frame(), convert_objects=True).a
ranges = [
pd.Interval(0, 9.9, closed='both'),
pd.Interval(9.9, 19.8, closed='right'),
pd.Interval(19.8, 29.7, closed='right'),
pd.Interval(29.7, 39.6, closed='right'),
pd.Interval(39.6, 49.5, closed='right'),
pd.Interval(49.5, 59.4, closed='right'),
pd.Interval(59.4, 69.3, closed='right'),
pd.Interval(69.3, 79.2, closed='right'),
| pd.Interval(79.2, 89.1, closed='right') | pandas.Interval |
#!/usr/bin/env python3
import os
import functools
import subprocess
import numpy as np
import pandas as pd
from multiprocessing import Pool
from sklearn.model_selection import train_test_split
import deepmp.utils as ut
import deepmp.merge_h5s as mh5
names_all = ['chrom', 'pos', 'strand', 'pos_in_strand', 'readname',
'read_strand', 'kmer', 'signal_means', 'signal_stds', 'signal_median',
'signal_diff', 'qual', 'mis', 'ins', 'del', 'methyl_label']
names_seq = ['chrom', 'pos', 'strand', 'pos_in_strand', 'readname',
'read_strand', 'kmer', 'signal_means', 'signal_stds', 'signal_median',
'signal_diff', 'methyl_label']
names_err =['readname', 'pos', 'chrom', 'kmer', 'qual', 'mis', 'ins',
'del', 'methyl_label']
# ------------------------------------------------------------------------------
# PREPROCESS INCUDING TRAIN-TEST-VAL SPLITS
# ------------------------------------------------------------------------------
def get_training_test_val(df):
train, test = train_test_split(df, test_size=0.05, random_state=0)
train, val = train_test_split(train, test_size=test.shape[0], random_state=0)
return [(train, 'train'), (test, 'test'), (val, 'val')]
def get_training_test_val_chr(df):
test = df[df['chrom'] == 'chr1']
df_red = df[df['chrom'] != 'chr1']
train, val = train_test_split(df_red, test_size=0.05, random_state=0)
return [(train, 'train'), (test, 'test'), (val, 'val')]
def get_training_test_val_pos(df):
test = df[(df['pos_in_strand'] >= 1000000) & (df['pos_in_strand'] <= 2000000)]
df_red = pd.concat(
[df[df['pos_in_strand'] < 1000000], df[df['pos_in_strand'] > 2000000]]
)
train, val = train_test_split(df_red, test_size=0.05, random_state=0)
return [(train, 'train'), (test, 'test'), (val, 'val')]
def save_tsv(df, output, file, mode='w'):
file_name = os.path.join(output, '{}.tsv'.format(file))
if mode == 'a':
df.to_csv(file_name, sep='\t', index=None, mode=mode, header=None)
else:
df.to_csv(file_name, sep='\t', index=None, mode=mode)
def get_positions_only(df, positions):
df = pd.merge(
df, positions, right_on=['chr', 'start', 'strand'],
left_on=['chrom', 'pos', 'strand']
)
label = np.zeros(len(df), dtype=int)
label[np.argwhere(df['status'].values == 'mod')] = 1
df = df[df.columns[:19]]
df['methyl_label'] = label
return df
def get_data(df, split_type):
if split_type == 'read':
return get_training_test_val(df)
elif split_type == 'chr':
return get_training_test_val_chr(df)
else:
return get_training_test_val_pos(df)
def save_data_in_h5(data, feature_type, tmps, file):
for el in data:
if el[0].shape[0] > 0:
if feature_type == 'combined':
ut.preprocess_combined(el[0], tmps, el[1], file)
elif feature_type == 'seq':
ut.preprocess_sequence(el[0], tmps, el[1], file)
else:
ut.preprocess_errors(el[0], tmps, el[1], file)
def split_sets_files(file, tmp_folder, counter, tsv_flag, output,
tmps, split_type, positions, feature_type):
df = pd.read_csv(os.path.join(tmp_folder, file), sep='\t', names=names_all)
if isinstance(positions, pd.DataFrame):
df = get_positions_only(df, positions)
data = get_data(df, split_type)
if tsv_flag:
for el in data:
if counter == 0:
mode = 'w'
else:
mode = 'a'
save_tsv(el[0], output, el[1], 'a')
save_data_in_h5(data, feature_type, tmps, file)
def split_preprocess(features, output, tsv_flag, cpus, split_type,
positions, feature_type):
tmp_folder = os.path.join(os.path.dirname(features), 'tmp_all/')
tmp_train = os.path.join(os.path.dirname(features), 'train/')
tmp_test = os.path.join(os.path.dirname(features), 'test/')
tmp_val = os.path.join(os.path.dirname(features), 'val/')
print('Splitting original file...')
os.mkdir(tmp_folder);
os.mkdir(tmp_train); os.mkdir(tmp_test); os.mkdir(tmp_val)
cmd = 'split -l {} {} {}'.format(20000, features, tmp_folder)
subprocess.call(cmd, shell=True)
if positions:
print('Getting position file...')
positions = | pd.read_csv(positions, sep='\t') | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 20 12:06:23 2019
Elexon Data API
@author: <NAME>
"""
######################### Libraries ###########################
from datetime import date, timedelta, datetime
import requests
import os
from io import StringIO
import pandas as pd
import fnmatch
######################### Functions ###########################
class API:
def __init__(self, APIKEY):
self.APIKEY = APIKEY
def gen_url(self, report,start_date = '',end_date = '',\
period = '*', rtype = 0):
server = 'https://api.bmreports.com/BMRS/'
service = '&ServiceType=csv'
api = '/V1?APIKey=' + self.APIKEY
if rtype == 0:
fdate = "&FromDate="
tdate = "&ToDate="
try:
start_date = start_date.strftime('%Y-%m-%d')
end_date = end_date.strftime('%Y-%m-%d')
url = server+report+api+fdate+start_date+tdate+end_date+service
except:
print('Date range not provided. Default data for yesterday and today ({} to {})'\
.format(date.today() - timedelta(days=1),date.today()))
url = server+report+api+service
elif rtype == 1:
sdate = '&SettlementDate='
speriod = '&Period='
try:
start_date = start_date.strftime('%Y-%m-%d')
url = server+report+api+sdate+start_date+speriod+period+service
except:
print('Date range not provided. Default data for latest period({} p {})'\
.format(date.today(), datetime.now().hour*2\
+ int(datetime.now().minute/30)-1))
url = server+report+api+service
elif rtype == 2:
fdate = "&FromSettlementDate="
tdate = "&ToSettlementDate="
speriod = '&Period='
try:
start_date = start_date.strftime('%Y-%m-%d')
end_date = end_date.strftime('%Y-%m-%d')
url = server+report+api+fdate+start_date+tdate\
+end_date+speriod+period+service
except:
print('Date range not provided. Default data for yesterday and today ({} to {})'\
.format(date.today() - timedelta(days=1),date.today()))
url = server+report+api+service
elif rtype == 3:
year = "&Year=" + str(start_date)
try:
url = server+report+api+year+service
except:
print('Year not provided. Default data current year ({})'\
.format(date.today().year))
url = server+report+api+service
elif rtype == 4:
sdate = '&SettlementDate='
speriod = '&SettlementPeriod='
try:
start_date = start_date.strftime('%Y-%m-%d')
url = server+report+api+sdate+start_date+speriod+period+service
except:
print('Date range not provided. Default data for latest period({} p {})'\
.format(date.today(), datetime.now().hour*2\
+ int(datetime.now().minute/30)-1))
url = server+report+api+service
return url
def get_generation__by_fuel(self, start_date = '',end_date =''):
# BMRS Half Hourly Outturn Generation by Fuel Type
# 5.2.17 of API Guide pg. 84
rtype = 0
report = 'FUELHH'
names = ['Record Type', 'Settlement Date', 'Settlement Period',\
'CCGT', 'OIL', 'COAL', 'NUCLEAR', 'WIND', 'PS', 'NPSHYD',\
'OCGT', 'OTHER', 'INTFR', 'INTIRL', 'INTNED', 'INTEW',\
'BIOMASS', 'INTNEM']
#Generate URL
url = self.gen_url(report,start_date = start_date,\
end_date = end_date, rtype = rtype)
r = requests.get(url)
data = pd.read_csv(StringIO(r.text), header=None,\
names = names, skiprows=1)
solar = self.get_solar(start_date = start_date, end_date = end_date)
# Format data
data = data.iloc[:-1]
data['Time'] = data['Settlement Period'].apply(lambda x:\
pd.Timedelta(str((x-1)*30)+' min'))
data.index = pd.to_datetime(data['Settlement Date'],\
format = '%Y%m%d') + data['Time']
data.drop(['Record Type', 'Time'], axis = 1, inplace = True)
data['SOLAR'] = solar
return data
def get_solar(self, start_date = '', end_date = ''):
sdate = start_date.strftime('%Y-%m-%dT%H:%M:%S')
edate = end_date.strftime('%Y-%m-%dT%H:%M:%S')
names = ['PES ID', 'DATETIME', 'SOLAR']
url = 'https://api0.solar.sheffield.ac.uk/pvlive/v2?start='\
+ sdate +'&end=' + edate + '&data_format=csv'
r = requests.get(url)
data = pd.read_csv(StringIO(r.text), names = names, skiprows = 1)
data.index = pd.to_datetime(data['DATETIME'])
data.drop(['PES ID', 'DATETIME'], axis = 1, inplace = True)
data['SOLAR'] = data['SOLAR']
return data
def get_actual_demand(self, start_date = '',period ='*'):
# BMRS Actual Total Load per Bidding Zone
# 5.1.12 of API Guide pg. 24
rtype = 1
report = 'B0610'
names = ['TimeSeriesID','Settlement Date','Settlement Period',\
'Quantity','Secondary Quantity(MAW)','Document Type',\
'Business Type', 'Process Type', 'Object Aggregation',\
'Curve Type','Resolution','Unit Of Measure',\
'Document ID','Document RevNum','ActiveFlag']
#Generate URL
url = self.gen_url(report,start_date = start_date,\
period = period, rtype = rtype)
r = requests.get(url)
data = pd.read_csv(StringIO(r.text), header=None,\
names = names, skiprows=1)
# Format data
data = data.iloc[4:-1]
data = data[['Settlement Date','Settlement Period','Quantity']]
data['Quantity'] = pd.to_numeric(data['Quantity'])
data['Settlement Period'] = pd.to_numeric(data['Settlement Period'])
data = data.sort_values('Settlement Period')
data['Time'] = data['Settlement Period'].apply(lambda x:\
pd.Timedelta(str((x-1)*30)+' min'))
data.index = pd.to_datetime(data['Settlement Date'],\
format = '%Y-%m-%d') + data['Time']
data.drop('Time',axis = 1, inplace = True)
data.rename({'Quantity': 'Actual'}, axis = 'columns', inplace = True)
return data
def get_dayahead_demand(self, start_date = '',period ='*'):
# BMRS Day-Ahead Total Load Forecast per Bidding Zone
# 5.1.13 of API Guide pg. 25
rtype = 1
report = 'B0620'
names = ['TimeSeriesID','Settlement Date','Settlement Period',\
'Quantity','Document Type', 'Business Type',\
'Process Type', 'Object Aggregation','Resolution',\
'Curve Type','Unit Of Measure', 'ActiveFlag',\
'Document ID','Document RevNum','Secondary Quantity(MAW)']
#Generate URL
url = self.gen_url(report,start_date = start_date,\
period = period, rtype = rtype)
r = requests.get(url)
data = pd.read_csv(StringIO(r.text), header=None,\
names = names, skiprows=1)
# Format data
data = data.iloc[4:-1]
data = data[['Settlement Date','Settlement Period','Quantity']]
data['Settlement Period'] =\
pd.to_numeric(data['Settlement Period'])
data = data.sort_values('Settlement Period')
data['Quantity'] = pd.to_numeric(data['Quantity'])
data['Time'] = data['Settlement Period'].apply(lambda x:\
pd.Timedelta(str((x-1)*30)+' min'))
data.index = pd.to_datetime(data['Settlement Date'],\
format = '%Y-%m-%d') + data['Time']
data.drop('Time',axis = 1, inplace = True)
data.rename({'Quantity': 'Forecast'},\
axis = 'columns', inplace = True)
return data
def get_system_prices(self, start_date = '',end_date =''):
# BMRS Derived System Wide Data
# 5.2.51 of API Guide pg. 169
rtype = 2
report = 'DERSYSDATA'
names = ['Record Type', 'Settlement Date', 'Settlement Period',\
'SSP', 'SBP', 'BD', 'PDC', 'RSP', 'NIV', 'SPA', 'BPA',\
'RP', 'RPRV', 'OV', 'BV', 'TOV', 'TBV','ASV','ABV',\
'TASV','TABV']
#Generate URL
url = self.gen_url(report, start_date = start_date,\
end_date = end_date, period = '*', rtype = rtype)
r = requests.get(url)
data = pd.read_csv(StringIO(r.text), header=None, names = names)
# Format data
data = data.iloc[1:-1]
data['Time'] = data['Settlement Period'].apply(lambda x:\
pd.Timedelta(str((x-1)*30)+' min'))
data.index = pd.to_datetime(data['Settlement Date'],\
format = '%Y%m%d') + data['Time']
data.drop(['Record Type', 'Time'], axis = 1, inplace = True)
return data
def get_bo_stack(self, start_date = '',period =''):
# BMRS Detailed System Price Data
# 5.2.52 of API Guide pg. 177
rtype = 4
report = 'DETSYSPRICES'
names = ['Record Type', 'Settlement Date', 'Settlement Period',\
'INDEX', 'ID','Acc ID', 'BOP ID', 'CADL Flag', 'SO Flag',\
'STOR Flag', 'Reprice', 'RSP','BO Price', 'BO Volume',\
'DMAT Vol', 'Arb Vol', 'NIV Vol', 'PAR Vol',\
'Final Price', 'TLM', 'TLM Adj Vol', 'TLM Adj Price']
#Generate URL
url = self.gen_url(report,start_date = start_date,\
period = str(period), rtype = rtype)
try:
r = requests.get(url)
data = pd.read_csv(StringIO(r.text), header=None, names = names)
#Format data
data = data.iloc[1:-1]
data['Time'] = data['Settlement Period'].apply(lambda x:\
pd.Timedelta(str((x-1)*30)+' min'))
data.index = pd.to_datetime(data['Settlement Date'],\
format = '%Y%m%d') + data['Time']
data = data[['Settlement Date', 'Settlement Period','BO Price',\
'BO Volume', 'Arb Vol', 'NIV Vol','Final Price']]
except:
names = ['Settlement Date', 'Settlement Period','BO Price',\
'BO Volume', 'Arb Vol', 'NIV Vol','Final Price']
data = pd.DataFrame(data = [], columns = names)
return data
def get_market_prices(self, start_date = '', end_date = '',\
period ='*'):
# BMRS Market Index Data
# 5.2.8 of API Guide pg. 69
rtype = 2
report = 'MID'
names = ['Record Type', 'Data Provider', 'Settlement Date',\
'Settlement Period','Price', 'Volume']
#Generate URL
url = self.gen_url(report,start_date = start_date,\
end_date=end_date, period = str(period), rtype = rtype)
r = requests.get(url)
data = pd.read_csv(StringIO(r.text), header=None, names = names)
#Format data
data = data.iloc[1:-1]
data['Time'] = data['Settlement Period'].apply(lambda x:\
pd.Timedelta(str((x-1)*30)+' min'))
data.index = pd.to_datetime(data['Settlement Date'],\
format = '%Y%m%d') + data['Time']
data = data[data['Data Provider'] == 'APXMIDP'] # Only keep APXMIDP
data.drop(['Record Type','Data Provider', 'Time'],\
axis = 1, inplace = True)
return data
def get_temperature(self,start_date = '',end_date =''):
# BMRS Derived System Wide Data
# 5.2.51 of API Guide pg. 169
rtype = 0
report = 'TEMP'
names = ['Record Type', 'Settlement Date', 'Temp', 'Temp_Norm',\
'Temp_Low', 'Temp_High']
#Generate URL
url = self.gen_url(report,start_date = start_date,\
end_date = end_date, rtype = rtype)
r = requests.get(url)
data = pd.read_csv(StringIO(r.text), header=None, names = names)
# Format data
data = data.iloc[1:-1].copy(deep = True)
data.index = pd.to_datetime(data['Settlement Date'],\
format = '%Y%m%d')
data.drop(['Record Type'], axis = 1, inplace = True)
return data
def get_installed_cap(self, year = ''):
# BMRS Installed Generation Capacity Aggregated
# 5.1.18 of API Guide pg. 31
rtype = 3
report = 'B1410'
names = ['Document Type', 'Business Type', 'Process Type', 'TimeSeriesID',\
'Quantity', 'Resolution', 'Year', 'Power System Resource Type',\
'ActiveFlag','DocumentID', 'Document RevNum']
#Generate URL
url = self.gen_url(report,start_date = year, rtype = rtype)
r = requests.get(url)
data = pd.read_csv(StringIO(r.text), header=None, names = names)
# Format data
data = data[['Year','Power System Resource Type','Quantity',]].iloc[5:-1].copy(deep = True)
data['Quantity'] = | pd.to_numeric(data['Quantity']) | pandas.to_numeric |
# License: Apache-2.0
import databricks.koalas as ks
import numpy as np
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal, assert_series_equal
from gators.model_building.train_test_split import TrainTestSplit
@pytest.fixture()
def data_ordered():
X = pd.DataFrame(np.arange(40).reshape(8, 5), columns=list("ABCDE"))
y_name = "TARGET"
y = pd.Series([0, 1, 2, 0, 1, 2, 0, 1], name=y_name)
test_ratio = 0.5
obj = TrainTestSplit(test_ratio=test_ratio, strategy="ordered")
X_train_expected = pd.DataFrame(
{
"A": {0: 0, 1: 5, 2: 10, 3: 15},
"B": {0: 1, 1: 6, 2: 11, 3: 16},
"C": {0: 2, 1: 7, 2: 12, 3: 17},
"D": {0: 3, 1: 8, 2: 13, 3: 18},
"E": {0: 4, 1: 9, 2: 14, 3: 19},
}
)
X_test_expected = pd.DataFrame(
{
"A": {4: 20, 5: 25, 6: 30, 7: 35},
"B": {4: 21, 5: 26, 6: 31, 7: 36},
"C": {4: 22, 5: 27, 6: 32, 7: 37},
"D": {4: 23, 5: 28, 6: 33, 7: 38},
"E": {4: 24, 5: 29, 6: 34, 7: 39},
}
)
y_train_expected = pd.Series({0: 0, 1: 1, 2: 2, 3: 0}, name=y_name)
y_test_expected = pd.Series({4: 1, 5: 2, 6: 0, 7: 1}, name=y_name)
return (
obj,
X,
y,
X_train_expected,
X_test_expected,
y_train_expected,
y_test_expected,
)
@pytest.fixture()
def data_random():
X = pd.DataFrame(np.arange(40).reshape(8, 5), columns=list("ABCDE"))
y_name = "TARGET"
y = pd.Series([0, 1, 2, 0, 1, 2, 0, 1], name=y_name)
test_ratio = 0.5
obj = TrainTestSplit(test_ratio=test_ratio, strategy="random", random_state=0)
X_train_expected = pd.DataFrame(
{
"A": {0: 0, 3: 15, 4: 20, 5: 25},
"B": {0: 1, 3: 16, 4: 21, 5: 26},
"C": {0: 2, 3: 17, 4: 22, 5: 27},
"D": {0: 3, 3: 18, 4: 23, 5: 28},
"E": {0: 4, 3: 19, 4: 24, 5: 29},
}
)
X_test_expected = pd.DataFrame(
{
"A": {6: 30, 2: 10, 1: 5, 7: 35},
"B": {6: 31, 2: 11, 1: 6, 7: 36},
"C": {6: 32, 2: 12, 1: 7, 7: 37},
"D": {6: 33, 2: 13, 1: 8, 7: 38},
"E": {6: 34, 2: 14, 1: 9, 7: 39},
}
)
y_train_expected = pd.Series({0: 0, 3: 0, 4: 1, 5: 2}, name=y_name)
y_test_expected = pd.Series({6: 0, 2: 2, 1: 1, 7: 1}, name=y_name)
return (
obj,
X,
y,
X_train_expected,
X_test_expected,
y_train_expected,
y_test_expected,
)
@pytest.fixture()
def data_stratified():
X = pd.DataFrame(np.arange(40).reshape(8, 5), columns=list("ABCDE"))
y_name = "TARGET"
y = | pd.Series([0, 1, 2, 0, 1, 2, 0, 1], name=y_name) | pandas.Series |
#!/usr/bin/env python
"""Given rows from a parse job script, generates tables/figures for certain
performance metrics. Each output from the parse job script is expected to have
one header row and one metrics row. The first 5 columns are expected to be the
task profile information.
"""
import argparse
from common import fileopen, parse_size
import csv
import os
import pandas as pd
class Metrics():
def __init__(self, header, rows, output, formats, template_path):
self.header = header
self.rows = rows
self.output = output
self.formats = formats
self.template_path = template_path
def show(self, name, column, **kwargs):
table = self._get_table(column)
prefix = "{}.{}".format(self.output, name)
for fmt in self.formats:
outfile = "{}.{}".format(prefix, fmt)
if fmt == 'txt':
table.to_csv(outfile, sep="\t", index=False)
elif fmt == 'pickle':
import pickle
with fileopen(outfile, 'wb') as out:
pickle.dump(table, out)
else:
fn = getattr(self, "{}_{}".format(name, fmt))
fn(table, column, outfile)
def mem_tex(self, table, column, outfile, name=None, caption=None):
texdat = (table.
drop('Program', 1).
rename(columns={
'Program2' : 'Program',
column : 'Memory'
}).
groupby(['Dataset', 'Threads', 'Program']).
agg({ 'Memory' : max }))
texdat = texdat.assign(MemoryMB=round(texdat['Memory'] / 1000000, 1))
from mako.template import Template
table_template = Template(filename=os.path.join(
self.template_path, "job_memory_table.tex"))
with fileopen(outfile, "wt") as o:
o.write(table_template.render(
name=name, caption=caption, table=texdat))
def mem_svg(self, table, column, outfile):
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sb
sb.set(style="whitegrid")
svgdat = (table.
rename(columns={ column : 'Memory' }).
groupby(['Dataset', 'Threads', 'Program']).
agg({ 'Memory' : max }).
reset_index())
svgdat = svgdat.assign(MemoryMB=svgdat['Memory'] / 1000000)
threads = svgdat.Threads.unique()
if len(threads) == 1:
plot = sb.factorplot(
x='Program', y='MemoryMB', col="Dataset",
data=svgdat, kind="bar", ci=None, sharey=True)
else:
plot = sb.factorplot(
x='Threads', y='MemoryMB', col="Dataset", hue="Program",
data=svgdat, kind="bar", ci=None, sharey=True)
if len(threads) == 1:
plot = plot.set_titles('')
plot = plot.set_xlabels('Threads')
plot = plot.set_ylabels('Memory (MB)')
plot = plot.set_xticklabels(rotation=90)
plot.fig.subplots_adjust(wspace=0.35)
plot.savefig(outfile)
def _get_table(self, column, is_size=True):
cols = list(range(5))
cols.append(self.header.index(column))
header = [self.header[c] for c in cols]
rows = [
[row[c] for c in cols]
for row in self.rows
]
if is_size:
for row in rows:
row[5] = parse_size(row[5])
table = pd.DataFrame.from_records(rows, columns=header)
table = table.rename(columns={
'prog' : 'Program',
'prog2' : 'Program2',
'threads' : 'Threads',
'dataset' : 'Dataset',
'qcut' : 'Quality',
})
table['Threads'] = pd.to_numeric(table['Threads'])
table['Dataset'] = pd.Categorical(table['Dataset'])
table['Program'] = | pd.Categorical(table['Program']) | pandas.Categorical |
import pandas as pd
from pattern.en import conjugate
import global_variables as v
from generic_operations import print_to_file
def detect_activities(transformed_text_list, dictionary_list):
tagged_records = []
try: conjugate('hello', 'inf') # dirty fix to python 3.7 / pattern error
except: pass
for sentence in transformed_text_list:
if type(sentence) != float: # skip if nan?
tokens = sentence.split(' ')
for idx, token in enumerate(tokens):
if v.symptom_state_tag_symbol not in token: # if it has not already been tagged as a symptom/state
conjugated_current_word = conjugate(token, 'inf')
if conjugated_current_word in dictionary_list:
tokens[idx] = token + v.maintenance_activity_tag_symbol
tagged_records.append(' '.join(tokens))
else:
tagged_records.append('')
print_to_file(v.maintenance_activity_output_path, tagged_records, v.output_headings)
def main():
print("Starting tagging: maintenance_activity")
preprocessed_data = pd.read_excel(v.symptom_state_output_path, sheet_name=v.input_file_sheet_name)
selected_data = pd.DataFrame(preprocessed_data, columns=v.output_headings)
transformed_text_list = list(selected_data[v.output_heading])
dict_data = pd.read_excel(v.maint_activity_dict_path, sheet_name=v.input_file_sheet_name)
selected_data = | pd.DataFrame(dict_data, columns=v.dictionary_headings) | pandas.DataFrame |
# importar panda
import numpy as np
# importar metodos de tabela
from pandas import Series, DataFrame
# gerar uma array(uma lista que é uma tupla)
dados = np.arange(6)
linha = ['linha1', 'linha2', 'linha3', 'linha4', 'linha5', 'linha6']
coluna = ['coluna1', 'coluna2', 'coluna3']
# indexar(numerar) o array
serie = | Series(dados, index=linha) | pandas.Series |
"""
conjoin_tables.py.
Bring together two tables:
- Reading times by subject by token
- Surprisal by token
This should be the final step before R analysis.
Ideally, this process would be included in the R analysis to lower the number
of steps needed to get data visualizations, but this Python script will fill
that role for now.
<NAME>
"""
import argparse
from functools import cache
import pandas as pd
parser = argparse.ArgumentParser()
DATADIR = '../data'
parser.add_argument('--id_file', default=f'{DATADIR}/ids.tsv')
parser.add_argument('--rnng_file',
default=f'{DATADIR}/naturalstories_rnng.output')
parser.add_argument('--rts_file', default=f'{DATADIR}/processed_RTs.tsv')
parser.add_argument('--lstm_file')
parser.add_argument('--id_file', default=f'{DATADIR}/ids.tsv')
parser.add_argument('--save_file', default=f'{DATADIR}/final.csv')
def get_rts(rts_file) -> pd.DataFrame:
"""Load reading times from a file.
Returns a pandas Dataframe with the following columns:
* worker_id (str) - Unique identifier for the reader.
* work_time_total (int) - Total time the reader took.
* story (int) - Story index.
* story_pos (int) - Token index.
* rt (int) - Reading time in milliseconds.
"""
df = | pd.read_csv(rts_file, sep='\t', header=0) | pandas.read_csv |
import pandas as pd
import plotly.graph_objects as go
import plotly.express as px
import plotly.io as pio
import plotly as pl
import re
import requests
from .DataFrameUtil import DataFrameUtil as dfUtil
class CreateDataFrame():
"""Classe de serviços para a criação de dataframes utilizados para a construção dos gráficos"""
def __init__(self):
self.dfTimeSeriesCases = pd.read_csv('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv')
self.dfTimeSeriesRecover = pd.read_csv('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_recovered_global.csv')
self.dfTimeSeriesDeath = pd.read_csv('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv')
url = 'https://covid19.who.int/WHO-COVID-19-global-table-data.csv'
self.dfRegioes = pd.read_csv(url)
def DataFrameMensal():
pd.options.display.float_format = '{:.0f}'.format # Sem Virgula
# Motando Dataframes
# Coletando dados através de arquivos CSV, disponibilizados online.
url = 'https://covid19.who.int/WHO-COVID-19-global-table-data.csv'
dfRegioes = pd.read_csv(url)
dfTimeSeriesCases = pd.read_csv('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv')
dfTimeSeriesRecover = pd.read_csv('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_recovered_global.csv')
dfTimeSeriesDeath = pd.read_csv('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv')
# Coletando dados através de web scrapping
html_source = requests.get("https://www.worldometers.info/coronavirus/").text
html_source = re.sub(r'<.*?>', lambda g: g.group(0).upper(), html_source)
table_MN2 = pd.read_html(html_source)
dfWorldMeters = table_MN2[0]
dfWorldMeters.columns = [column.replace(" ", "_").replace(",", "_").replace("-","").replace("__","_") for column in dfWorldMeters.columns]
# Renomeando colunas, padronização
dfTimeSeriesCases.rename(columns={'Country/Region':'Name'}, inplace=True)
dfTimeSeriesRecover.rename(columns={'Country/Region':'Name'}, inplace=True)
dfTimeSeriesDeath.rename(columns={'Country/Region':'Name'}, inplace=True)
# Normalização de nome de países
dfTimeSeriesCases.loc[249,'Name'] = "United States of America"
dfTimeSeriesRecover.loc[249,'Name'] = "United States of America"
dfTimeSeriesDeath.loc[249,'Name'] = "United States of America"
dfWorldMeters.loc[8, 'Country_Other']= "United States of America"
dfWorldMeters.loc[13, 'Country_Other']= "United Kingdom"
dfRegioes.loc[6, 'Name'] ="United Kingdom"
# Filtrando Dataframes
dfRegioes.columns =[column.replace(" ", "_").replace("-","") for column in dfRegioes.columns]
dfRegioes.query('Name != "Global" and Name != "World" and Cases__cumulative_total > 0 and WHO_Region != "NaN"', inplace=True)
dfWorldMeters.query('Country_Other != "Total: " and Country_Other != "World" and ' +
' Country_Other != "North America" and Country_Other != "South America" and Country_Other != "Asia" and Country_Other != "Europe" ' +
'and Country_Other != "Africa" and Country_Other != "Oceania" and Country_Other != "Total:" and Country_Other != "NaN" and Population != "nan" and Population != "NaN"', inplace=True)
# Ordenando Dataframes
dfRegioes.sort_values(['Name'], inplace=True)
dfWorldMeters.sort_values(['Country_Other'], inplace=True)
# Criando novos dataframes manipulados
selected_columns = dfRegioes[["Name", "WHO_Region"]]
dfRegioesNew = selected_columns.copy()
dfRegioesNew.sort_values(['Name'], inplace=True)
listMonth = ['Jan', 'Fev', 'Mar', 'Abr','Mai','Jun',
'Jul', 'Ago','Set','Out','Nov', 'Dez',
'Jan 21', 'Fev 21', 'Mar 21', 'Abr 21']
dfTimeSeriesCases.drop(['Province/State', 'Lat','Long'], axis=1,inplace=True)
dfTimeSeriesRecover.drop(['Province/State', 'Lat','Long'], axis=1,inplace=True)
dfTimeSeriesDeath.drop(['Province/State', 'Lat','Long'], axis=1,inplace=True)
selected_columns = dfTimeSeriesCases[dfUtil.SelectColumnsMensal()]
dfTimeSeriesCases = selected_columns.copy()
selected_columns = dfTimeSeriesRecover[dfUtil.SelectColumnsMensal()]
dfTimeSeriesRecover = selected_columns.copy()
selected_columns = dfTimeSeriesDeath[dfUtil.SelectColumnsMensal()]
dfTimeSeriesDeath = selected_columns.copy()
selected_columns = dfWorldMeters[["Country_Other", "Population"]]
dfWorldMetersNew = selected_columns.copy()
dfWorldMetersNew.sort_values(['Country_Other'], inplace=True)
dfTimeSeriesCases = dfUtil.RenameColsMesAno(dfTimeSeriesCases)
dfTimeSeriesRecover = dfUtil.RenameColsMesAno(dfTimeSeriesRecover)
dfTimeSeriesDeath = dfUtil.RenameColsMesAno(dfTimeSeriesDeath)
# Renomeando colunas, padronização para merge final dos dataframes
dfRegioesNew.rename(columns={'WHO_Region':'Regiao'}, inplace=True)
dfWorldMetersNew.rename(columns={'Country_Other': 'Name'}, inplace=True)
dfWorldMetersNew.rename(columns={'Population': 'Populacao'}, inplace=True)
dfAux = dfTimeSeriesCases
mapping = dfUtil.CreateMappingMensal(dfTimeSeriesCases)
dfTimeSeriesCases = dfAux.rename(columns=mapping)
dfAux = dfTimeSeriesRecover
mapping = dfUtil.CreateMappingMensal(dfTimeSeriesRecover)
dfTimeSeriesRecover = dfAux.rename(columns=mapping)
dfAux = dfTimeSeriesDeath
mapping = dfUtil.CreateMappingMensal(dfTimeSeriesDeath)
dfTimeSeriesDeath = dfAux.rename(columns=mapping)
#Somando resultados montados através das linhas do Dataframe
dfTimeSeriesCasesSomado = dfUtil.SumRows(dfTimeSeriesCases)
dfTimeSeriesRecoverSomado = dfUtil.SumRows(dfTimeSeriesRecover)
dfTimeSeriesDeathSomado = dfUtil.SumRows(dfTimeSeriesDeath)
# Resetando index dos dataframes
dfRegioesNew.reset_index(drop=True)
dfWorldMetersNew.reset_index(drop=True)
dfTimeSeriesCasesSomado.reset_index(drop=True)
dfTimeSeriesRecoverSomado.reset_index(drop=True)
dfTimeSeriesDeathSomado.reset_index(drop=True)
dfTimeSeriesCasesSomado.sort_values(['Name'], inplace=True)
dfTimeSeriesRecoverSomado.sort_values(['Name'], inplace=True)
dfTimeSeriesDeathSomado.sort_values(['Name'], inplace=True)
dfRegioesNew.sort_values(['Name'], inplace=True)
dfWorldMetersNew.sort_values(['Name'], inplace=True)
# Merge dataframe
dfFinalCases = pd.merge(dfTimeSeriesCasesSomado, dfRegioesNew, on="Name")
dfFinalCases.rename(columns={'WHO_Region': 'Regiao'}, inplace=True)
dfFinalRecover = | pd.merge(dfTimeSeriesRecoverSomado, dfRegioesNew, on="Name") | pandas.merge |
from datetime import datetime, timedelta
import warnings
import operator
from textwrap import dedent
import numpy as np
from pandas._libs import (lib, index as libindex, tslib as libts,
algos as libalgos, join as libjoin,
Timedelta)
from pandas._libs.lib import is_datetime_array
from pandas.compat import range, u, set_function_name
from pandas.compat.numpy import function as nv
from pandas import compat
from pandas.core.accessor import CachedAccessor
from pandas.core.arrays import ExtensionArray
from pandas.core.dtypes.generic import (
ABCSeries, ABCDataFrame,
ABCMultiIndex,
ABCPeriodIndex, ABCTimedeltaIndex,
ABCDateOffset)
from pandas.core.dtypes.missing import isna, array_equivalent
from pandas.core.dtypes.common import (
_ensure_int64,
_ensure_object,
_ensure_categorical,
_ensure_platform_int,
is_integer,
is_float,
is_dtype_equal,
is_dtype_union_equal,
is_object_dtype,
is_categorical,
is_categorical_dtype,
is_interval_dtype,
is_period_dtype,
is_bool,
is_bool_dtype,
is_signed_integer_dtype,
is_unsigned_integer_dtype,
is_integer_dtype, is_float_dtype,
is_datetime64_any_dtype,
is_datetime64tz_dtype,
is_timedelta64_dtype,
is_hashable,
needs_i8_conversion,
is_iterator, is_list_like,
is_scalar)
from pandas.core.base import PandasObject, IndexOpsMixin
import pandas.core.common as com
from pandas.core import ops
from pandas.util._decorators import (
Appender, Substitution, cache_readonly, deprecate_kwarg)
from pandas.core.indexes.frozen import FrozenList
import pandas.core.dtypes.concat as _concat
import pandas.core.missing as missing
import pandas.core.algorithms as algos
import pandas.core.sorting as sorting
from pandas.io.formats.printing import (
pprint_thing, default_pprint, format_object_summary, format_object_attrs)
from pandas.core.ops import make_invalid_op
from pandas.core.strings import StringMethods
__all__ = ['Index']
_unsortable_types = frozenset(('mixed', 'mixed-integer'))
_index_doc_kwargs = dict(klass='Index', inplace='',
target_klass='Index',
unique='Index', duplicated='np.ndarray')
_index_shared_docs = dict()
def _try_get_item(x):
try:
return x.item()
except AttributeError:
return x
def _make_comparison_op(op, cls):
def cmp_method(self, other):
if isinstance(other, (np.ndarray, Index, ABCSeries)):
if other.ndim > 0 and len(self) != len(other):
raise ValueError('Lengths must match to compare')
# we may need to directly compare underlying
# representations
if needs_i8_conversion(self) and needs_i8_conversion(other):
return self._evaluate_compare(other, op)
if is_object_dtype(self) and self.nlevels == 1:
# don't pass MultiIndex
with np.errstate(all='ignore'):
result = ops._comp_method_OBJECT_ARRAY(op, self.values, other)
else:
# numpy will show a DeprecationWarning on invalid elementwise
# comparisons, this will raise in the future
with warnings.catch_warnings(record=True):
with np.errstate(all='ignore'):
result = op(self.values, np.asarray(other))
# technically we could support bool dtyped Index
# for now just return the indexing array directly
if is_bool_dtype(result):
return result
try:
return Index(result)
except TypeError:
return result
name = '__{name}__'.format(name=op.__name__)
# TODO: docstring?
return set_function_name(cmp_method, name, cls)
def _make_arithmetic_op(op, cls):
def index_arithmetic_method(self, other):
if isinstance(other, (ABCSeries, ABCDataFrame)):
return NotImplemented
elif isinstance(other, ABCTimedeltaIndex):
# Defer to subclass implementation
return NotImplemented
other = self._validate_for_numeric_binop(other, op)
# handle time-based others
if isinstance(other, (ABCDateOffset, np.timedelta64, timedelta)):
return self._evaluate_with_timedelta_like(other, op)
elif isinstance(other, (datetime, np.datetime64)):
return self._evaluate_with_datetime_like(other, op)
values = self.values
with np.errstate(all='ignore'):
result = op(values, other)
result = missing.dispatch_missing(op, values, other, result)
attrs = self._get_attributes_dict()
attrs = self._maybe_update_attributes(attrs)
if op is divmod:
result = (Index(result[0], **attrs), Index(result[1], **attrs))
else:
result = Index(result, **attrs)
return result
name = '__{name}__'.format(name=op.__name__)
# TODO: docstring?
return set_function_name(index_arithmetic_method, name, cls)
class InvalidIndexError(Exception):
pass
_o_dtype = np.dtype(object)
_Identity = object
def _new_Index(cls, d):
""" This is called upon unpickling, rather than the default which doesn't
have arguments and breaks __new__
"""
# required for backward compat, because PI can't be instantiated with
# ordinals through __new__ GH #13277
if issubclass(cls, ABCPeriodIndex):
from pandas.core.indexes.period import _new_PeriodIndex
return _new_PeriodIndex(cls, **d)
return cls.__new__(cls, **d)
class Index(IndexOpsMixin, PandasObject):
"""
Immutable ndarray implementing an ordered, sliceable set. The basic object
storing axis labels for all pandas objects
Parameters
----------
data : array-like (1-dimensional)
dtype : NumPy dtype (default: object)
If dtype is None, we find the dtype that best fits the data.
If an actual dtype is provided, we coerce to that dtype if it's safe.
Otherwise, an error will be raised.
copy : bool
Make a copy of input ndarray
name : object
Name to be stored in the index
tupleize_cols : bool (default: True)
When True, attempt to create a MultiIndex if possible
Notes
-----
An Index instance can **only** contain hashable objects
Examples
--------
>>> pd.Index([1, 2, 3])
Int64Index([1, 2, 3], dtype='int64')
>>> pd.Index(list('abc'))
Index(['a', 'b', 'c'], dtype='object')
See Also
---------
RangeIndex : Index implementing a monotonic integer range
CategoricalIndex : Index of :class:`Categorical` s.
MultiIndex : A multi-level, or hierarchical, Index
IntervalIndex : an Index of :class:`Interval` s.
DatetimeIndex, TimedeltaIndex, PeriodIndex
Int64Index, UInt64Index, Float64Index
"""
# To hand over control to subclasses
_join_precedence = 1
# Cython methods
_left_indexer_unique = libjoin.left_join_indexer_unique_object
_left_indexer = libjoin.left_join_indexer_object
_inner_indexer = libjoin.inner_join_indexer_object
_outer_indexer = libjoin.outer_join_indexer_object
_typ = 'index'
_data = None
_id = None
name = None
asi8 = None
_comparables = ['name']
_attributes = ['name']
_is_numeric_dtype = False
_can_hold_na = True
# would we like our indexing holder to defer to us
_defer_to_indexing = False
# prioritize current class for _shallow_copy_with_infer,
# used to infer integers as datetime-likes
_infer_as_myclass = False
_engine_type = libindex.ObjectEngine
_accessors = set(['str'])
str = CachedAccessor("str", StringMethods)
def __new__(cls, data=None, dtype=None, copy=False, name=None,
fastpath=False, tupleize_cols=True, **kwargs):
if name is None and hasattr(data, 'name'):
name = data.name
if fastpath:
return cls._simple_new(data, name)
from .range import RangeIndex
# range
if isinstance(data, RangeIndex):
return RangeIndex(start=data, copy=copy, dtype=dtype, name=name)
elif isinstance(data, range):
return RangeIndex.from_range(data, copy=copy, dtype=dtype,
name=name)
# categorical
if is_categorical_dtype(data) or is_categorical_dtype(dtype):
from .category import CategoricalIndex
return CategoricalIndex(data, dtype=dtype, copy=copy, name=name,
**kwargs)
# interval
if is_interval_dtype(data) or is_interval_dtype(dtype):
from .interval import IntervalIndex
closed = kwargs.get('closed', None)
return IntervalIndex(data, dtype=dtype, name=name, copy=copy,
closed=closed)
# index-like
elif isinstance(data, (np.ndarray, Index, ABCSeries)):
if (is_datetime64_any_dtype(data) or
(dtype is not None and is_datetime64_any_dtype(dtype)) or
'tz' in kwargs):
from pandas.core.indexes.datetimes import DatetimeIndex
result = DatetimeIndex(data, copy=copy, name=name,
dtype=dtype, **kwargs)
if dtype is not None and is_dtype_equal(_o_dtype, dtype):
return Index(result.to_pydatetime(), dtype=_o_dtype)
else:
return result
elif (is_timedelta64_dtype(data) or
(dtype is not None and is_timedelta64_dtype(dtype))):
from pandas.core.indexes.timedeltas import TimedeltaIndex
result = TimedeltaIndex(data, copy=copy, name=name, **kwargs)
if dtype is not None and _o_dtype == dtype:
return Index(result.to_pytimedelta(), dtype=_o_dtype)
else:
return result
if dtype is not None:
try:
# we need to avoid having numpy coerce
# things that look like ints/floats to ints unless
# they are actually ints, e.g. '0' and 0.0
# should not be coerced
# GH 11836
if is_integer_dtype(dtype):
inferred = lib.infer_dtype(data)
if inferred == 'integer':
try:
data = np.array(data, copy=copy, dtype=dtype)
except OverflowError:
# gh-15823: a more user-friendly error message
raise OverflowError(
"the elements provided in the data cannot "
"all be casted to the dtype {dtype}"
.format(dtype=dtype))
elif inferred in ['floating', 'mixed-integer-float']:
if isna(data).any():
raise ValueError('cannot convert float '
'NaN to integer')
# If we are actually all equal to integers,
# then coerce to integer.
try:
return cls._try_convert_to_int_index(
data, copy, name, dtype)
except ValueError:
pass
# Return an actual float index.
from .numeric import Float64Index
return Float64Index(data, copy=copy, dtype=dtype,
name=name)
elif inferred == 'string':
pass
else:
data = data.astype(dtype)
elif is_float_dtype(dtype):
inferred = lib.infer_dtype(data)
if inferred == 'string':
pass
else:
data = data.astype(dtype)
else:
data = np.array(data, dtype=dtype, copy=copy)
except (TypeError, ValueError) as e:
msg = str(e)
if 'cannot convert float' in msg:
raise
# maybe coerce to a sub-class
from pandas.core.indexes.period import (
PeriodIndex, IncompatibleFrequency)
if isinstance(data, PeriodIndex):
return PeriodIndex(data, copy=copy, name=name, **kwargs)
if is_signed_integer_dtype(data.dtype):
from .numeric import Int64Index
return Int64Index(data, copy=copy, dtype=dtype, name=name)
elif is_unsigned_integer_dtype(data.dtype):
from .numeric import UInt64Index
return UInt64Index(data, copy=copy, dtype=dtype, name=name)
elif is_float_dtype(data.dtype):
from .numeric import Float64Index
return Float64Index(data, copy=copy, dtype=dtype, name=name)
elif issubclass(data.dtype.type, np.bool) or is_bool_dtype(data):
subarr = data.astype('object')
else:
subarr = com._asarray_tuplesafe(data, dtype=object)
# _asarray_tuplesafe does not always copy underlying data,
# so need to make sure that this happens
if copy:
subarr = subarr.copy()
if dtype is None:
inferred = lib.infer_dtype(subarr)
if inferred == 'integer':
try:
return cls._try_convert_to_int_index(
subarr, copy, name, dtype)
except ValueError:
pass
return Index(subarr, copy=copy,
dtype=object, name=name)
elif inferred in ['floating', 'mixed-integer-float']:
from .numeric import Float64Index
return Float64Index(subarr, copy=copy, name=name)
elif inferred == 'interval':
from .interval import IntervalIndex
return IntervalIndex(subarr, name=name, copy=copy)
elif inferred == 'boolean':
# don't support boolean explicitly ATM
pass
elif inferred != 'string':
if inferred.startswith('datetime'):
if (lib.is_datetime_with_singletz_array(subarr) or
'tz' in kwargs):
# only when subarr has the same tz
from pandas.core.indexes.datetimes import (
DatetimeIndex)
try:
return DatetimeIndex(subarr, copy=copy,
name=name, **kwargs)
except libts.OutOfBoundsDatetime:
pass
elif inferred.startswith('timedelta'):
from pandas.core.indexes.timedeltas import (
TimedeltaIndex)
return TimedeltaIndex(subarr, copy=copy, name=name,
**kwargs)
elif inferred == 'period':
try:
return PeriodIndex(subarr, name=name, **kwargs)
except IncompatibleFrequency:
pass
return cls._simple_new(subarr, name)
elif hasattr(data, '__array__'):
return Index(np.asarray(data), dtype=dtype, copy=copy, name=name,
**kwargs)
elif data is None or is_scalar(data):
cls._scalar_data_error(data)
else:
if tupleize_cols and is_list_like(data) and data:
if is_iterator(data):
data = list(data)
# we must be all tuples, otherwise don't construct
# 10697
if all(isinstance(e, tuple) for e in data):
from .multi import MultiIndex
return MultiIndex.from_tuples(
data, names=name or kwargs.get('names'))
# other iterable of some kind
subarr = com._asarray_tuplesafe(data, dtype=object)
return Index(subarr, dtype=dtype, copy=copy, name=name, **kwargs)
"""
NOTE for new Index creation:
- _simple_new: It returns new Index with the same type as the caller.
All metadata (such as name) must be provided by caller's responsibility.
Using _shallow_copy is recommended because it fills these metadata
otherwise specified.
- _shallow_copy: It returns new Index with the same type (using
_simple_new), but fills caller's metadata otherwise specified. Passed
kwargs will overwrite corresponding metadata.
- _shallow_copy_with_infer: It returns new Index inferring its type
from passed values. It fills caller's metadata otherwise specified as the
same as _shallow_copy.
See each method's docstring.
"""
@classmethod
def _simple_new(cls, values, name=None, dtype=None, **kwargs):
"""
we require the we have a dtype compat for the values
if we are passed a non-dtype compat, then coerce using the constructor
Must be careful not to recurse.
"""
if not hasattr(values, 'dtype'):
if (values is None or not len(values)) and dtype is not None:
values = np.empty(0, dtype=dtype)
else:
values = np.array(values, copy=False)
if is_object_dtype(values):
values = cls(values, name=name, dtype=dtype,
**kwargs)._ndarray_values
result = object.__new__(cls)
result._data = values
result.name = name
for k, v in compat.iteritems(kwargs):
setattr(result, k, v)
return result._reset_identity()
_index_shared_docs['_shallow_copy'] = """
create a new Index with the same class as the caller, don't copy the
data, use the same object attributes with passed in attributes taking
precedence
*this is an internal non-public method*
Parameters
----------
values : the values to create the new Index, optional
kwargs : updates the default attributes for this Index
"""
@Appender(_index_shared_docs['_shallow_copy'])
def _shallow_copy(self, values=None, **kwargs):
if values is None:
values = self.values
attributes = self._get_attributes_dict()
attributes.update(kwargs)
if not len(values) and 'dtype' not in kwargs:
attributes['dtype'] = self.dtype
return self._simple_new(values, **attributes)
def _shallow_copy_with_infer(self, values=None, **kwargs):
"""
create a new Index inferring the class with passed value, don't copy
the data, use the same object attributes with passed in attributes
taking precedence
*this is an internal non-public method*
Parameters
----------
values : the values to create the new Index, optional
kwargs : updates the default attributes for this Index
"""
if values is None:
values = self.values
attributes = self._get_attributes_dict()
attributes.update(kwargs)
attributes['copy'] = False
if not len(values) and 'dtype' not in kwargs:
attributes['dtype'] = self.dtype
if self._infer_as_myclass:
try:
return self._constructor(values, **attributes)
except (TypeError, ValueError):
pass
return Index(values, **attributes)
def _deepcopy_if_needed(self, orig, copy=False):
"""
.. versionadded:: 0.19.0
Make a copy of self if data coincides (in memory) with orig.
Subclasses should override this if self._base is not an ndarray.
Parameters
----------
orig : ndarray
other ndarray to compare self._data against
copy : boolean, default False
when False, do not run any check, just return self
Returns
-------
A copy of self if needed, otherwise self : Index
"""
if copy:
# Retrieve the "base objects", i.e. the original memory allocations
if not isinstance(orig, np.ndarray):
# orig is a DatetimeIndex
orig = orig.values
orig = orig if orig.base is None else orig.base
new = self._data if self._data.base is None else self._data.base
if orig is new:
return self.copy(deep=True)
return self
def _update_inplace(self, result, **kwargs):
# guard when called from IndexOpsMixin
raise TypeError("Index can't be updated inplace")
def _sort_levels_monotonic(self):
""" compat with MultiIndex """
return self
_index_shared_docs['_get_grouper_for_level'] = """
Get index grouper corresponding to an index level
Parameters
----------
mapper: Group mapping function or None
Function mapping index values to groups
level : int or None
Index level
Returns
-------
grouper : Index
Index of values to group on
labels : ndarray of int or None
Array of locations in level_index
uniques : Index or None
Index of unique values for level
"""
@Appender(_index_shared_docs['_get_grouper_for_level'])
def _get_grouper_for_level(self, mapper, level=None):
assert level is None or level == 0
if mapper is None:
grouper = self
else:
grouper = self.map(mapper)
return grouper, None, None
def is_(self, other):
"""
More flexible, faster check like ``is`` but that works through views
Note: this is *not* the same as ``Index.identical()``, which checks
that metadata is also the same.
Parameters
----------
other : object
other object to compare against.
Returns
-------
True if both have same underlying data, False otherwise : bool
"""
# use something other than None to be clearer
return self._id is getattr(
other, '_id', Ellipsis) and self._id is not None
def _reset_identity(self):
"""Initializes or resets ``_id`` attribute with new object"""
self._id = _Identity()
return self
# ndarray compat
def __len__(self):
"""
return the length of the Index
"""
return len(self._data)
def __array__(self, dtype=None):
""" the array interface, return my values """
return self._data.view(np.ndarray)
def __array_wrap__(self, result, context=None):
"""
Gets called after a ufunc
"""
if is_bool_dtype(result):
return result
attrs = self._get_attributes_dict()
attrs = self._maybe_update_attributes(attrs)
return Index(result, **attrs)
@cache_readonly
def dtype(self):
""" return the dtype object of the underlying data """
return self._data.dtype
@cache_readonly
def dtype_str(self):
""" return the dtype str of the underlying data """
return str(self.dtype)
@property
def values(self):
""" return the underlying data as an ndarray """
return self._data.view(np.ndarray)
@property
def _values(self):
# type: () -> Union[ExtensionArray, Index]
# TODO(EA): remove index types as they become extension arrays
"""The best array representation.
This is an ndarray, ExtensionArray, or Index subclass. This differs
from ``_ndarray_values``, which always returns an ndarray.
Both ``_values`` and ``_ndarray_values`` are consistent between
``Series`` and ``Index``.
It may differ from the public '.values' method.
index | values | _values | _ndarray_values |
----------------- | -------------- -| ----------- | --------------- |
CategoricalIndex | Categorical | Categorical | codes |
DatetimeIndex[tz] | ndarray[M8ns] | DTI[tz] | ndarray[M8ns] |
For the following, the ``._values`` is currently ``ndarray[object]``,
but will soon be an ``ExtensionArray``
index | values | _values | _ndarray_values |
----------------- | --------------- | ------------ | --------------- |
PeriodIndex | ndarray[object] | ndarray[obj] | ndarray[int] |
IntervalIndex | ndarray[object] | ndarray[obj] | ndarray[object] |
See Also
--------
values
_ndarray_values
"""
return self.values
def get_values(self):
"""
Return `Index` data as an `numpy.ndarray`.
Returns
-------
numpy.ndarray
A one-dimensional numpy array of the `Index` values.
See Also
--------
Index.values : The attribute that get_values wraps.
Examples
--------
Getting the `Index` values of a `DataFrame`:
>>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
... index=['a', 'b', 'c'], columns=['A', 'B', 'C'])
>>> df
A B C
a 1 2 3
b 4 5 6
c 7 8 9
>>> df.index.get_values()
array(['a', 'b', 'c'], dtype=object)
Standalone `Index` values:
>>> idx = pd.Index(['1', '2', '3'])
>>> idx.get_values()
array(['1', '2', '3'], dtype=object)
`MultiIndex` arrays also have only one dimension:
>>> midx = pd.MultiIndex.from_arrays([[1, 2, 3], ['a', 'b', 'c']],
... names=('number', 'letter'))
>>> midx.get_values()
array([(1, 'a'), (2, 'b'), (3, 'c')], dtype=object)
>>> midx.get_values().ndim
1
"""
return self.values
@Appender(IndexOpsMixin.memory_usage.__doc__)
def memory_usage(self, deep=False):
result = super(Index, self).memory_usage(deep=deep)
# include our engine hashtable
result += self._engine.sizeof(deep=deep)
return result
# ops compat
@deprecate_kwarg(old_arg_name='n', new_arg_name='repeats')
def repeat(self, repeats, *args, **kwargs):
"""
Repeat elements of an Index.
Returns a new index where each element of the current index
is repeated consecutively a given number of times.
Parameters
----------
repeats : int
The number of repetitions for each element.
**kwargs
Additional keywords have no effect but might be accepted for
compatibility with numpy.
Returns
-------
pandas.Index
Newly created Index with repeated elements.
See Also
--------
Series.repeat : Equivalent function for Series
numpy.repeat : Underlying implementation
Examples
--------
>>> idx = pd.Index([1, 2, 3])
>>> idx
Int64Index([1, 2, 3], dtype='int64')
>>> idx.repeat(2)
Int64Index([1, 1, 2, 2, 3, 3], dtype='int64')
>>> idx.repeat(3)
Int64Index([1, 1, 1, 2, 2, 2, 3, 3, 3], dtype='int64')
"""
nv.validate_repeat(args, kwargs)
return self._shallow_copy(self._values.repeat(repeats))
_index_shared_docs['where'] = """
.. versionadded:: 0.19.0
Return an Index of same shape as self and whose corresponding
entries are from self where cond is True and otherwise are from
other.
Parameters
----------
cond : boolean array-like with the same length as self
other : scalar, or array-like
"""
@Appender(_index_shared_docs['where'])
def where(self, cond, other=None):
if other is None:
other = self._na_value
dtype = self.dtype
values = self.values
if is_bool(other) or is_bool_dtype(other):
# bools force casting
values = values.astype(object)
dtype = None
values = np.where(cond, values, other)
if self._is_numeric_dtype and np.any(isna(values)):
# We can't coerce to the numeric dtype of "self" (unless
# it's float) if there are NaN values in our output.
dtype = None
return self._shallow_copy_with_infer(values, dtype=dtype)
def ravel(self, order='C'):
"""
return an ndarray of the flattened values of the underlying data
See also
--------
numpy.ndarray.ravel
"""
return self._ndarray_values.ravel(order=order)
# construction helpers
@classmethod
def _try_convert_to_int_index(cls, data, copy, name, dtype):
"""
Attempt to convert an array of data into an integer index.
Parameters
----------
data : The data to convert.
copy : Whether to copy the data or not.
name : The name of the index returned.
Returns
-------
int_index : data converted to either an Int64Index or a
UInt64Index
Raises
------
ValueError if the conversion was not successful.
"""
from .numeric import Int64Index, UInt64Index
if not is_unsigned_integer_dtype(dtype):
# skip int64 conversion attempt if uint-like dtype is passed, as
# this could return Int64Index when UInt64Index is what's desrired
try:
res = data.astype('i8', copy=False)
if (res == data).all():
return Int64Index(res, copy=copy, name=name)
except (OverflowError, TypeError, ValueError):
pass
# Conversion to int64 failed (possibly due to overflow) or was skipped,
# so let's try now with uint64.
try:
res = data.astype('u8', copy=False)
if (res == data).all():
return UInt64Index(res, copy=copy, name=name)
except (OverflowError, TypeError, ValueError):
pass
raise ValueError
@classmethod
def _scalar_data_error(cls, data):
raise TypeError('{0}(...) must be called with a collection of some '
'kind, {1} was passed'.format(cls.__name__,
repr(data)))
@classmethod
def _string_data_error(cls, data):
raise TypeError('String dtype not supported, you may need '
'to explicitly cast to a numeric type')
@classmethod
def _coerce_to_ndarray(cls, data):
"""coerces data to ndarray, raises on scalar data. Converts other
iterables to list first and then to array. Does not touch ndarrays.
"""
if not isinstance(data, (np.ndarray, Index)):
if data is None or is_scalar(data):
cls._scalar_data_error(data)
# other iterable of some kind
if not isinstance(data, (ABCSeries, list, tuple)):
data = list(data)
data = np.asarray(data)
return data
def _get_attributes_dict(self):
""" return an attributes dict for my class """
return {k: getattr(self, k, None) for k in self._attributes}
def view(self, cls=None):
# we need to see if we are subclassing an
# index type here
if cls is not None and not hasattr(cls, '_typ'):
result = self._data.view(cls)
else:
result = self._shallow_copy()
if isinstance(result, Index):
result._id = self._id
return result
def _coerce_scalar_to_index(self, item):
"""
we need to coerce a scalar to a compat for our index type
Parameters
----------
item : scalar item to coerce
"""
dtype = self.dtype
if self._is_numeric_dtype and isna(item):
# We can't coerce to the numeric dtype of "self" (unless
# it's float) if there are NaN values in our output.
dtype = None
return Index([item], dtype=dtype, **self._get_attributes_dict())
_index_shared_docs['copy'] = """
Make a copy of this object. Name and dtype sets those attributes on
the new object.
Parameters
----------
name : string, optional
deep : boolean, default False
dtype : numpy dtype or pandas type
Returns
-------
copy : Index
Notes
-----
In most cases, there should be no functional difference from using
``deep``, but if ``deep`` is passed it will attempt to deepcopy.
"""
@Appender(_index_shared_docs['copy'])
def copy(self, name=None, deep=False, dtype=None, **kwargs):
if deep:
new_index = self._shallow_copy(self._data.copy())
else:
new_index = self._shallow_copy()
names = kwargs.get('names')
names = self._validate_names(name=name, names=names, deep=deep)
new_index = new_index.set_names(names)
if dtype:
new_index = new_index.astype(dtype)
return new_index
def __copy__(self, **kwargs):
return self.copy(**kwargs)
def __deepcopy__(self, memo=None):
if memo is None:
memo = {}
return self.copy(deep=True)
def _validate_names(self, name=None, names=None, deep=False):
"""
Handles the quirks of having a singular 'name' parameter for general
Index and plural 'names' parameter for MultiIndex.
"""
from copy import deepcopy
if names is not None and name is not None:
raise TypeError("Can only provide one of `names` and `name`")
elif names is None and name is None:
return deepcopy(self.names) if deep else self.names
elif names is not None:
if not is_list_like(names):
raise TypeError("Must pass list-like as `names`.")
return names
else:
if not is_list_like(name):
return [name]
return name
def __unicode__(self):
"""
Return a string representation for this object.
Invoked by unicode(df) in py2 only. Yields a Unicode String in both
py2/py3.
"""
klass = self.__class__.__name__
data = self._format_data()
attrs = self._format_attrs()
space = self._format_space()
prepr = (u(",%s") %
space).join(u("%s=%s") % (k, v) for k, v in attrs)
# no data provided, just attributes
if data is None:
data = ''
res = u("%s(%s%s)") % (klass, data, prepr)
return res
def _format_space(self):
# using space here controls if the attributes
# are line separated or not (the default)
# max_seq_items = get_option('display.max_seq_items')
# if len(self) > max_seq_items:
# space = "\n%s" % (' ' * (len(klass) + 1))
return " "
@property
def _formatter_func(self):
"""
Return the formatter function
"""
return default_pprint
def _format_data(self, name=None):
"""
Return the formatted data as a unicode string
"""
# do we want to justify (only do so for non-objects)
is_justify = not (self.inferred_type in ('string', 'unicode') or
(self.inferred_type == 'categorical' and
| is_object_dtype(self.categories) | pandas.core.dtypes.common.is_object_dtype |
__author__ = "<NAME>"
__copyright__ = "Sprace.org.br"
__version__ = "1.0.0"
import os
import numpy as np
import pandas as pd
#from torch.utils.data import Dataset, DataLoader
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from enum import Enum
from pickle import dump, load
class FeatureType(Enum):
Divided = 1, # indica as caracteristicas estao divididas em posiciones e outras informacoes
Mixed = 2, # indica que todas as caracteristicas estao juntas
Positions = 3 # indica que so tem posicoes dos hits
class KindNormalization(Enum):
Scaling = 1,
Zscore = 2,
Polar = 3,
Nothing = 4
class Dataset():
def __init__(self, input_path, train_size, cylindrical, hits, kind_normalization, points_3d=True):
#np.set_printoptions(suppress=True)
# com index_col ja nao inclui a coluna index
dataframe = pd.read_csv(input_path, header=0, engine='python')
print("[Data] Data loaded from ", input_path)
self.kind = kind_normalization
if self.kind == KindNormalization.Scaling:
self.x_scaler = MinMaxScaler(feature_range=(-1, 1))
self.y_scaler = MinMaxScaler(feature_range=(-1, 1))
elif self.kind == KindNormalization.Zscore:
self.x_scaler = StandardScaler() # mean and standart desviation
self.y_scaler = StandardScaler() # mean and standart desviation
self.y_scaler_test = StandardScaler()
'''
if normalise:
data = self.scaler.fit_transform(dataframe.values)
data = pd.DataFrame(data, columns=columns)
else:
data = pd.DataFrame(dataframe.values, columns=columns)
'''
self.start_hits = 9
self.interval = 11
self.decimals = 4
self.data = dataframe.iloc[:, self.start_hits:]
#self.self = 0
if cylindrical:
self.coord_name = 'cylin'
else:
self.coord_name = 'xyz'
self.cylindrical = cylindrical
begin_coord = 0
end_coord = 0
begin_val = 10
end_val = 11
if self.cylindrical == False:
# if we choose points_3d = true then the filter is 3d data points : rho, eta, phi
# else then 2d data eta and phi
if points_3d:
begin_coord = 1
else:
begin_coord = 2
end_coord = 4
# cilyndrical coordinates
elif self.cylindrical == True:
if points_3d:
begin_coord = 4
else:
begin_coord = 5
end_coord = 7
begin_cols = [begin_coord+(self.interval*hit) for hit in range(0, hits)]
end_cols = [end_coord+(self.interval*hit) for hit in range(0, hits)]
new_df = pd.DataFrame()
for c in range(0,len(begin_cols)):
frame = self.data.iloc[:,np.r_[begin_cols[c]:end_cols[c]]]
new_df = pd.concat([new_df, frame], axis=1)
self.data = new_df
# we nee remove data for avoid problems
res = len(self.data) % 10
if res != 0:
# this is a big bug. the easy solution was removing some values non divided with 10.
print('\t We have removed %s unuseful tracks. We believe you need to know. ' % res)
self.data = self.data.iloc[:-res,:]
i_split = int(len(self.data) * train_size)
self.data_train = self.data.iloc[0:i_split,0:]
self.data_test = self.data.iloc[i_split:,0:]
print("[Data] Data set shape ", self.data.shape)
print("[Data] Data train shape ", self.data_train.shape)
print("[Data] Data test shape ", self.data_test.shape)
print("[Data] Data coordinates ", self.coord_name)
print("[Data] Data normalization type ", self.kind)
def prepare_training_data(self, feature_type, normalise=True, cylindrical=False):
if not isinstance(feature_type, FeatureType):
raise TypeError('direction must be an instance of FeatureType Enum')
self.cylindrical = cylindrical
interval = self.interval
# x, y, z coordinates
if cylindrical == False:
bp=1
ep=4
bpC=10
epC=11
# cilyndrical coordinates
elif cylindrical == True:
bp=4
ep=7
bpC=10
epC=11
df_hits_values = None
df_hits_positions = None
if feature_type==FeatureType.Divided:
# get hits positions p1(X1,Y1,Z1) p2(X2,Y2,Z2) p3(X3,Y3,Z3) p4(X4,Y4,Z4)
df_hits_positions = self.data.iloc[:, np.r_[
bp:ep,
bp+(interval*1):ep+(interval*1),
bp+(interval*2):ep+(interval*2),
bp+(interval*3):ep+(interval*3)]]
# get hits values p1(V1,V2,V3,V4)
df_hits_values = self.data.iloc[:, np.r_[
bpC:epC,
bpC+(interval*1):epC+(interval*1),
bpC+(interval*2):epC+(interval*2),
bpC+(interval*3):epC+(interval*3)]]
frames = [df_hits_positions, df_hits_values]
df_hits_positions = pd.concat(frames, axis=1)
if feature_type==FeatureType.Mixed:
df_hits_positions = self.data.iloc[:, np.r_[
bp:ep,
bpC:epC,
bp+(interval*1):ep+(interval*1), bpC+(interval*1):epC+(interval*1),
bp+(interval*2):ep+(interval*2), bpC+(interval*2):epC+(interval*2),
bp+(interval*3):ep+(interval*3), bpC+(interval*3):epC+(interval*3)]]
elif feature_type==FeatureType.Positions:
df_hits_positions = self.data.iloc[:, np.r_[
bp:ep,
bp+(interval*1):ep+(interval*1),
bp+(interval*2):ep+(interval*2),
bp+(interval*3):ep+(interval*3)]]
self.x_data = df_hits_positions
self.y_data = self.data.iloc[:, np.r_[bp+(interval*4):(bp+(interval*4)+3)]]
self.len = len(self.data)
xcolumns = self.x_data.columns
ycolumns = self.y_data.columns
# normalization just of features.
if normalise:
xscaled = self.x_scaler.fit_transform(self.x_data.values)
self.x_data = pd.DataFrame(xscaled, columns=xcolumns)
yscaled = self.y_scaler.fit_transform(self.y_data.values)
self.y_data = pd.DataFrame(yscaled, columns=ycolumns)
print("[Data] shape datas X: ", self.x_data.shape)
print("[Data] shape data y: ", self.y_data.shape)
print('[Data] len data total:', self.len)
#y_hit_info = self.getitem_by_hit(hit_id)
if feature_type==FeatureType.Divided:
# return x_data, y_data normalizated with data splited
return (self.x_data.iloc[:,0:12], self.x_data.iloc[:,-4:], self.y_data)
else:
# return x_data, y_data normalizated with no data splited
return (self.x_data, self.y_data)
def get_training_data(self, n_hit_in, n_hit_out, n_features, normalise=False):
'''
n_hit_in : 4 number of hits
n_hit_out: 1 number of future hits
n_features 3
'''
X , Y = [],[]
sequences = self.data_train.values
rows = sequences.shape[0]
cols = sequences.shape[1]
for i in range(0, rows):
end_idx = 0
out_end_idx = 0
for j in range(0, cols, n_features):
end_ix = j + n_hit_in*n_features
out_end_idx = end_ix + n_hit_out*n_features
if out_end_idx > cols+1:
#print('corta ', out_end_idx)
break
#if i < 5:
# print('[%s,%s:%s][%s,%s:%s]' % (i, j, end_ix, i, end_ix, out_end_idx))
#seq_x, seq_y = sequences.iloc[i, j:end_ix], sequences.iloc[i, end_ix:out_end_idx]
seq_x, seq_y = sequences[i, j:end_ix], sequences[i, end_ix:out_end_idx]
X.append(seq_x)
Y.append(seq_y)
x_data, y_data = 0,0
# normalization just of features.
if normalise:
xscaled = self.x_scaler.fit_transform(X)
x_data = pd.DataFrame(xscaled)
yscaled = self.y_scaler.fit_transform(Y)
y_data = pd.DataFrame(yscaled)
#if save_params:
# self.save_scale_param()
else:
x_data = pd.DataFrame(X)
y_data = pd.DataFrame(Y)
#return pd.DataFrame(x_data).round(self.decimals) , pd.DataFrame(y_data).round(self.decimals)
return pd.DataFrame(x_data) , | pd.DataFrame(y_data) | pandas.DataFrame |
# Import packages
import os
import pandas as pd
import scipy
from scipy.optimize import curve_fit
import hplib as hpl
# Functions
def import_heating_data():
# read in keymark data from *.txt files in /input/txt/
# save a dataframe to database_heating.csv in folder /output/
Modul = []
Manufacturer = []
Date = []
Refrigerant = []
Mass = []
Poff = []
Psb = []
Prated = []
SPLindoor = []
SPLoutdoor = []
Type = []
Climate = []
Guideline = []
T_in = []
T_out = []
P_th = []
COP = []
df = pd.DataFrame()
os.chdir('../')
root = os.getcwd()
Scanordner = (root + '/input/txt')
os.chdir(Scanordner)
Scan = os.scandir(os.getcwd())
with Scan as dir1:
for file in dir1:
with open(file, 'r', encoding='utf-8') as f:
contents = f.readlines()
date = 'NaN'
modul = 'NaN'
prated_low = 'NaN'
prated_medium = 'NaN'
heatpumpType = 'NaN'
refrigerant = 'NaN'
splindoor_low = 'NaN'
splindoor_medium = 'NaN'
sploutdoor_low = 'NaN'
sploutdoor_medium = 'NaN'
poff = 'NaN'
climate = 'NaN'
NumberOfTestsPerNorm = []
NumberOfTestsPerModule = []
i = 1 # indicator for the line wich is read
d = 0 # indicator if only medium Temperature is given
p = 0 # -15° yes or no
date = contents[1]
date = date[61:]
if (date == '17 Dec 2020\n'):
date = '17.12.2020\n'
if (date == '18 Dec 2020\n'):
date = '18.12.2020\n'
if (date.startswith('5 Mar 2021')):
date = '05.03.2021\n'
if (date.startswith('15 Feb 2021')):
date = '15.02.2021\n'
if (date.startswith('22 Feb 2021')):
date = '22.02.2021\n'
for lines in contents:
i = i + 1
if (lines.startswith('Name\n') == 1):
manufacturer = (contents[i])
if (manufacturer.find('(') > 0):
manufacturer = manufacturer.split('(', 1)[1].split('\n')[0]
if manufacturer.endswith('GmbH\n'):
manufacturer = manufacturer[:-5]
if manufacturer.endswith('S.p.A.\n'):
manufacturer = manufacturer[:-6]
if manufacturer.endswith('s.p.a.\n'):
manufacturer = manufacturer[:-6]
if manufacturer.endswith('S.p.A\n'):
manufacturer = manufacturer[:-5]
if manufacturer.endswith('S.L.U.\n'):
manufacturer = manufacturer[:-6]
if manufacturer.endswith('s.r.o.\n'):
manufacturer = manufacturer[:-6]
if manufacturer.endswith('S.A.\n'):
manufacturer = manufacturer[:-4]
if manufacturer.endswith('S.L.\n'):
manufacturer = manufacturer[:-4]
if manufacturer.endswith('B.V.\n'):
manufacturer = manufacturer[:-4]
if manufacturer.endswith('N.V.\n'):
manufacturer = manufacturer[:-4]
if manufacturer.endswith('GmbH & Co KG\n'):
manufacturer = manufacturer[:-12]
elif manufacturer.startswith('NIBE'):
manufacturer = 'Nibe\n'
elif manufacturer.startswith('Nibe'):
manufacturer = 'Nibe\n'
elif manufacturer.startswith('Mitsubishi'):
manufacturer = 'Mitsubishi\n'
elif manufacturer.startswith('Ochsner'):
manufacturer = 'Ochsner\n'
elif manufacturer.startswith('OCHSNER'):
manufacturer = 'Ochsner\n'
elif manufacturer.startswith('Viessmann'):
manufacturer = 'Viessmann\n'
elif (lines.endswith('Date\n') == 1):
date = (contents[i])
if (date == 'basis\n'):
date = contents[i - 3]
date = date[14:]
elif (lines.startswith('Model') == 1):
modul = (contents[i - 2])
splindoor_low = 'NaN'
splindoor_medium = 'NaN'
sploutdoor_low = 'NaN'
sploutdoor_medium = 'NaN'
elif lines.endswith('Type\n'):
heatpumpType = contents[i][:-1]
if heatpumpType.startswith('A'):
heatpumpType = 'Outdoor Air/Water'
if heatpumpType.startswith('Eau glycol'):
heatpumpType = 'Brine/Water'
elif (lines.startswith('Sound power level indoor')):
SPL = 1
if (contents[i].startswith('Low')):
if contents[i + 2].startswith('Medium'):
splindoor_low = contents[i + 4][:-7]
splindoor_medium = contents[i + 6][:-7]
if contents[i].startswith('Medium'):
splindoor_medium = contents[i + 4][:-7]
splindoor_low = contents[i + 6][:-7]
elif (contents[i].endswith('dB(A)\n')):
if (contents[i - 3].startswith('Low')):
splindoor_low = contents[i][:-7]
if (contents[i - 3].startswith('Medium')):
splindoor_medium = contents[i][:-7]
if (contents[i - 6].startswith('Low')):
splindoor_low = contents[i][:-7]
if (contents[i - 6].startswith('Medium')):
splindoor_medium = contents[i + 2][:-7]
if (contents[i - 4].startswith('Low')):
splindoor_low = contents[i][:-7]
if (contents[i - 4].startswith('Medium')):
splindoor_medium = contents[i + 2][:-7]
else:
splindoor_low = contents[i][:-7]
splindoor_medium = contents[i][:-7]
elif (lines.startswith('Sound power level outdoor')):
SPL = 1
if (contents[i].startswith('Low')):
if contents[i + 2].startswith('Medium'):
sploutdoor_low = contents[i + 4][:-7]
sploutdoor_medium = contents[i + 6][:-7]
if contents[i].startswith('Medium'):
sploutdoor_medium = contents[i + 4][:-7]
sploutdoor_low = contents[i + 6][:-7]
elif (contents[i].endswith('dB(A)\n')):
if (contents[i - 3].startswith('Low')):
sploutdoor_low = contents[i][:-7]
if (contents[i - 3].startswith('Medium')):
sploutdoor_medium = contents[i][:-7]
if (contents[i - 6].startswith('Low')):
sploutdoor_low = contents[i][:-7]
if (contents[i - 6].startswith('Medium')):
sploutdoor_medium = contents[i + 2][:-7]
if (contents[i - 4].startswith('Low')):
sploutdoor_low = contents[i][:-7]
if (contents[i - 4].startswith('Medium')):
sploutdoor_medium = contents[i + 2][:-7]
else:
sploutdoor_low = contents[i][:-7]
sploutdoor_medium = contents[i][:-7]
elif (lines.startswith('Puissance acoustique extérieure')):
b = 1
if (contents[i].startswith('Low')):
if contents[i + 2].startswith('Medium'):
sploutdoor_low = contents[i + 4][:-7]
sploutdoor_medium = contents[i + 6][:-7]
if contents[i].startswith('Medium'):
sploutdoor_medium = contents[i + 4][:-7]
sploutdoor_low = contents[i + 6][:-7]
elif (contents[i].endswith('dB(A)\n')):
if (contents[i - 3].startswith('Low')):
sploutdoor_low = contents[i][:-7]
if (contents[i - 3].startswith('Medium')):
sploutdoor_medium = contents[i][:-7]
if (contents[i - 6].startswith('Low')):
sploutdoor_low = contents[i][:-7]
if (contents[i - 6].startswith('Medium')):
sploutdoor_medium = contents[i + 2][:-7]
if (contents[i - 4].startswith('Low')):
sploutdoor_low = contents[i][:-7]
if (contents[i - 4].startswith('Medium')):
sploutdoor_medium = contents[i + 2][:-7]
else:
sploutdoor_low = contents[i][:-7]
sploutdoor_medium = contents[i][:-7]
elif (lines.startswith('Potencia sonora de la unidad interior')):
SPL = 1
if (contents[i].startswith('Low')):
if contents[i + 2].startswith('Medium'):
splindoor_low = contents[i + 4][:-7]
splindoor_medium = contents[i + 6][:-7]
if contents[i].startswith('Medium'):
splindoor_medium = contents[i + 4][:-7]
splindoor_low = contents[i + 6][:-7]
elif (contents[i].endswith('dB(A)\n')):
if (contents[i - 3].startswith('Low')):
splindoor_low = contents[i][:-7]
if (contents[i - 3].startswith('Medium')):
splindoor_medium = contents[i][:-7]
if (contents[i - 6].startswith('Low')):
splindoor_low = contents[i][:-7]
if (contents[i - 6].startswith('Medium')):
splindoor_medium = contents[i + 2][:-7]
if (contents[i - 4].startswith('Low')):
splindoor_low = contents[i][:-7]
if (contents[i - 4].startswith('Medium')):
splindoor_medium = contents[i + 2][:-7]
else:
splindoor_low = contents[i][:-7]
splindoor_medium = contents[i][:-7]
elif (lines.startswith('Potencia sonora de la unidad exterior')):
SPL = 1
if (contents[i].startswith('Low')):
if contents[i + 2].startswith('Medium'):
sploutdoor_low = contents[i + 4][:-7]
sploutdoor_medium = contents[i + 6][:-7]
if contents[i].startswith('Medium'):
sploutdoor_medium = contents[i + 4][:-7]
sploutdoor_low = contents[i + 6][:-7]
elif (contents[i].endswith('dB(A)\n')):
if (contents[i - 3].startswith('Low')):
sploutdoor_low = contents[i][:-7]
if (contents[i - 3].startswith('Medium')):
sploutdoor_medium = contents[i][:-7]
if (contents[i - 6].startswith('Low')):
sploutdoor_low = contents[i][:-7]
if (contents[i - 6].startswith('Medium')):
sploutdoor_medium = contents[i + 2][:-7]
if (contents[i - 4].startswith('Low')):
sploutdoor_low = contents[i][:-7]
if (contents[i - 4].startswith('Medium')):
sploutdoor_medium = contents[i + 2][:-7]
else:
sploutdoor_low = contents[i][:-7]
sploutdoor_medium = contents[i][:-7]
elif (lines.startswith('Nivel de Potência sonora interior')):
SPL = 1
if (contents[i].startswith('Low')):
if contents[i + 2].startswith('Medium'):
splindoor_low = contents[i + 4][:-7]
splindoor_medium = contents[i + 6][:-7]
if contents[i].startswith('Medium'):
splindoor_medium = contents[i + 4][:-7]
splindoor_low = contents[i + 6][:-7]
elif (contents[i].endswith('dB(A)\n')):
if (contents[i - 3].startswith('Low')):
splindoor_low = contents[i][:-7]
if (contents[i - 3].startswith('Medium')):
splindoor_medium = contents[i][:-7]
if (contents[i - 6].startswith('Low')):
splindoor_low = contents[i][:-7]
if (contents[i - 6].startswith('Medium')):
splindoor_medium = contents[i + 2][:-7]
if (contents[i - 4].startswith('Low')):
splindoor_low = contents[i][:-7]
if (contents[i - 4].startswith('Medium')):
splindoor_medium = contents[i + 2][:-7]
else:
splindoor_low = contents[i][:-7]
splindoor_medium = contents[i][:-7]
elif (lines.startswith('Nivel de Potência sonora exterior')):
SPL = 1
if (contents[i].startswith('Low')):
if contents[i + 2].startswith('Medium'):
sploutdoor_low = contents[i + 4][:-7]
sploutdoor_medium = contents[i + 6][:-7]
if contents[i].startswith('Medium'):
sploutdoor_medium = contents[i + 4][:-7]
sploutdoor_low = contents[i + 6][:-7]
elif (contents[i].endswith('dB(A)\n')):
if (contents[i - 3].startswith('Low')):
sploutdoor_low = contents[i][:-7]
if (contents[i - 3].startswith('Medium')):
sploutdoor_medium = contents[i][:-7]
if (contents[i - 6].startswith('Low')):
sploutdoor_low = contents[i][:-7]
if (contents[i - 6].startswith('Medium')):
sploutdoor_medium = contents[i + 2][:-7]
if (contents[i - 4].startswith('Low')):
sploutdoor_low = contents[i][:-7]
if (contents[i - 4].startswith('Medium')):
sploutdoor_medium = contents[i + 2][:-7]
else:
sploutdoor_low = contents[i][:-7]
sploutdoor_medium = contents[i][:-7]
elif (lines.startswith('Livello di potenza acustica interna')):
SPL = 1
if (contents[i].startswith('Low')):
if contents[i + 2].startswith('Medium'):
splindoor_low = contents[i + 4][:-7]
splindoor_medium = contents[i + 6][:-7]
if contents[i].startswith('Medium'):
splindoor_medium = contents[i + 4][:-7]
splindoor_low = contents[i + 6][:-7]
elif (contents[i].endswith('dB(A)\n')):
if (contents[i - 3].startswith('Low')):
splindoor_low = contents[i][:-7]
if (contents[i - 3].startswith('Medium')):
splindoor_medium = contents[i][:-7]
if (contents[i - 6].startswith('Low')):
splindoor_low = contents[i][:-7]
if (contents[i - 6].startswith('Medium')):
splindoor_medium = contents[i + 2][:-7]
if (contents[i - 4].startswith('Low')):
splindoor_low = contents[i][:-7]
if (contents[i - 4].startswith('Medium')):
splindoor_medium = contents[i + 2][:-7]
else:
splindoor_low = contents[i][:-7]
splindoor_medium = contents[i][:-7]
elif (lines.startswith('Livello di potenza acustica externa')):
SPL = 1
if (contents[i].startswith('Low')):
if contents[i + 2].startswith('Medium'):
sploutdoor_low = contents[i + 4][:-7]
sploutdoor_medium = contents[i + 6][:-7]
if contents[i].startswith('Medium'):
sploutdoor_medium = contents[i + 4][:-7]
sploutdoor_low = contents[i + 6][:-7]
elif (contents[i].endswith('dB(A)\n')):
if (contents[i - 3].startswith('Low')):
sploutdoor_low = contents[i][:-7]
if (contents[i - 3].startswith('Medium')):
sploutdoor_medium = contents[i][:-7]
if (contents[i - 6].startswith('Low')):
sploutdoor_low = contents[i][:-7]
if (contents[i - 6].startswith('Medium')):
sploutdoor_medium = contents[i + 2][:-7]
if (contents[i - 4].startswith('Low')):
sploutdoor_low = contents[i][:-7]
if (contents[i - 4].startswith('Medium')):
sploutdoor_medium = contents[i + 2][:-7]
else:
sploutdoor_low = contents[i][:-7]
sploutdoor_medium = contents[i][:-7]
elif (lines == 'Refrigerant\n'):
if (contents[i - 3] == 'Mass Of\n'):
continue
refrigerant = (contents[i])
elif (lines.startswith('Mass Of') == 1):
if (lines == 'Mass Of\n'):
mass = contents[i + 1]
elif (lines.endswith('kg\n') == 1):
mass = contents[i - 2]
mass = mass[20:]
else:
mass = contents[i]
elif lines.startswith('Average'):
climate = 'average'
elif lines.startswith('Cold'):
climate = 'cold'
elif lines.startswith('Warmer Climate'):
climate = 'warm'
elif (lines.startswith('EN') == 1):
if (p == 1):
Poff.append(poff)
Psb.append(psb)
if (p == 2):
Poff.append(poff)
Poff.append(poff)
Psb.append(psb)
Psb.append(psb_medium)
guideline = (contents[i - 2])
d = 0 # Medium or Low Content
p = 0 # -15 yes or no
NumberOfTestsPerNorm = []
if (contents[i - 1].startswith('Low') == 1):
d = 0
continue
if (contents[i - 1] == '\n'):
continue
if (contents[i - 1].startswith('Medium')):
d = 1
else:
d = 0
if lines.startswith('Prated'):
prated_low = contents[i][:-4]
if (contents[i + 2].endswith('kW\n')):
prated_medium = contents[i + 2][:-4]
elif (lines.startswith('Pdh Tj = -15°C') == 1): # check
if (contents[i].endswith('Cdh\n') == 1): # wrong content
continue
if (contents[i] == '\n'): # no content
continue
else:
minusfifteen_low = contents[i]
P_th.append(minusfifteen_low[:-4])
T_in.append('-15')
if d == 0: # first low than medium Temperatur
if (climate == 'average'):
T_out.append('35')
elif (climate == 'cold'):
T_out.append('32')
elif (climate == 'warm'):
T_out.append('35')
if d == 1: # first medium Temperature
if (climate == 'average'):
T_out.append('55')
elif (climate == 'cold'):
T_out.append('49')
elif (climate == 'warm'):
T_out.append('55')
Modul.append(modul[7:-1])
Manufacturer.append(manufacturer[:-1])
Date.append(date[:-1])
Refrigerant.append(refrigerant[:-1])
Mass.append(mass[:-4])
Prated.append(prated_low)
SPLindoor.append(splindoor_low)
# SPLindoor.append(splindoor_medium)
SPLoutdoor.append(sploutdoor_low)
# SPLoutdoor.append(sploutdoor_medium)
Guideline.append(guideline[:-1])
Climate.append(climate)
Type.append(heatpumpType)
if (contents[i + 2].startswith('COP')): # for PDF without medium heat
continue
if (contents[i + 2].startswith('Disclaimer')): # for PDF without medium heat
continue
if (contents[i + 2].startswith('EHPA')): # End of page
if (contents[i + 8].startswith('COP')): # end of page plus no medium heat
continue
minusfifteen_medium = contents[i + 2]
P_th.append(minusfifteen_medium[:-4])
T_in.append('-15')
if (climate == 'average'):
T_out.append('55')
elif (climate == 'cold'):
T_out.append('49')
elif (climate == 'warm'):
T_out.append('55')
Modul.append(modul[7:-1])
Manufacturer.append(manufacturer[:-1])
Date.append(date[:-1])
Refrigerant.append(refrigerant[:-1])
Mass.append(mass[:-4])
Prated.append(prated_medium)
# SPLindoor.append(splindoor_low)
SPLindoor.append(splindoor_medium)
# SPLoutdoor.append(sploutdoor_low)
SPLoutdoor.append(sploutdoor_medium)
Type.append(heatpumpType)
Guideline.append(guideline[:-1])
Climate.append(climate)
elif (lines.startswith('COP Tj = -15°C')):
if (contents[i] == '\n'):
continue
if (contents[i].startswith('EHPA')):
continue
COP.append(contents[i][:-1])
NumberOfTestsPerModule.append(i)
p = 1
if (contents[i + 2].startswith('Pdh')): # no medium Climate
continue
if (contents[i + 2].startswith('Cdh')): # no medium Climate
continue
if (contents[i + 2].startswith('EHPA')): # no medium Climate
continue
COP.append(contents[i + 2][:-1])
NumberOfTestsPerModule.append(i)
p = 2
elif (lines.startswith('Pdh Tj = -7°C') == 1): # check
minusseven_low = contents[i]
P_th.append(minusseven_low[:-4])
T_in.append('-7')
if d == 0: # first low than medium Temperatur
if (climate == 'average'):
T_out.append('34')
elif (climate == 'cold'):
T_out.append('30')
elif (climate == 'warm'):
T_out.append('35')
if d == 1: # first medium Temperature
if (climate == 'average'):
T_out.append('52')
elif (climate == 'cold'):
T_out.append('44')
elif (climate == 'warm'):
T_out.append('55')
Modul.append(modul[7:-1])
Manufacturer.append(manufacturer[:-1])
Date.append(date[:-1])
Refrigerant.append(refrigerant[:-1])
Mass.append(mass[:-4])
Prated.append(prated_low)
SPLindoor.append(splindoor_low)
# SPLindoor.append(splindoor_medium)
SPLoutdoor.append(sploutdoor_low)
# SPLoutdoor.append(sploutdoor_medium)
Type.append(heatpumpType)
Guideline.append(guideline[:-1])
Climate.append(climate)
if (contents[i + 2].startswith('COP') == 1):
continue
else:
minusseven_medium = contents[i + 2]
P_th.append(minusseven_medium[:-4])
T_in.append('-7')
if (climate == 'average'):
T_out.append('52')
elif (climate == 'cold'):
T_out.append('44')
elif (climate == 'warm'):
T_out.append('55')
Modul.append(modul[7:-1])
Manufacturer.append(manufacturer[:-1])
Date.append(date[:-1])
Refrigerant.append(refrigerant[:-1])
# SPLindoor.append(splindoor_low)
SPLindoor.append(splindoor_medium)
# SPLoutdoor.append(sploutdoor_low)
SPLoutdoor.append(sploutdoor_medium)
Mass.append(mass[:-4])
Prated.append(prated_medium)
Type.append(heatpumpType)
Guideline.append(guideline[:-1])
Climate.append(climate)
elif (lines.startswith('COP Tj = -7°C')):
COP.append(contents[i][:-1])
NumberOfTestsPerNorm.append(i)
NumberOfTestsPerModule.append(i)
if (contents[i + 2].startswith('Pdh')): # no medium Climate
continue
if (contents[i + 2].startswith('Cdh')): # no medium Climate
continue
COP.append(contents[i + 2][:-1])
NumberOfTestsPerNorm.append(i)
NumberOfTestsPerModule.append(i)
elif (lines.startswith('Pdh Tj = +2°C') == 1):
if (contents[i].endswith('Cdh\n') == 1): # wrong content
continue
if (contents[i] == '\n'): # no content
continue
else:
plustwo_low = contents[i]
P_th.append(plustwo_low[:-4])
T_in.append('2')
if d == 0: # first low than medium Temperatur
if (climate == 'average'):
T_out.append('30')
elif (climate == 'cold'):
T_out.append('27')
elif (climate == 'warm'):
T_out.append('35')
if d == 1: # first medium Temperature
if (climate == 'average'):
T_out.append('42')
elif (climate == 'cold'):
T_out.append('37')
elif (climate == 'warm'):
T_out.append('55')
Modul.append(modul[7:-1])
Manufacturer.append(manufacturer[:-1])
Date.append(date[:-1])
Refrigerant.append(refrigerant[:-1])
SPLindoor.append(splindoor_low)
# SPLindoor.append(splindoor_medium)
SPLoutdoor.append(sploutdoor_low)
# SPLoutdoor.append(sploutdoor_medium)
Mass.append(mass[:-4])
Prated.append(prated_low)
Type.append(heatpumpType)
Guideline.append(guideline[:-1])
Climate.append(climate)
if (contents[i + 2].startswith('COP')): # for PDF without medium heat
continue
if (contents[i + 2].startswith('Disclaimer')): # for PDF without medium heat
continue
if (contents[i + 2].startswith('EHPA')): # End of page
if (contents[i + 8].startswith('COP')): # end of page plus no medium heat
continue
plustwo_medium = contents[i + 2]
# if(plustwo_low[:-1].endswith('kW')==0):#test
# print(plustwo_low[:-1])
# if(plustwo_medium[:-1].endswith('kW')==0):#test
# print(file.name)#plustwo_medium[:-1]
P_th.append(plustwo_medium[:-4])
T_in.append('2')
if (climate == 'average'):
T_out.append('42')
elif (climate == 'cold'):
T_out.append('37')
elif (climate == 'warm'):
T_out.append('55')
Modul.append(modul[7:-1])
Manufacturer.append(manufacturer[:-1])
Date.append(date[:-1])
Refrigerant.append(refrigerant[:-1])
# SPLindoor.append(splindoor_low)
SPLindoor.append(splindoor_medium)
# SPLoutdoor.append(sploutdoor_low)
SPLoutdoor.append(sploutdoor_medium)
Mass.append(mass[:-4])
Prated.append(prated_medium)
Type.append(heatpumpType)
Guideline.append(guideline[:-1])
Climate.append(climate)
elif (lines.startswith('COP Tj = +2°C')): # check
if (contents[i] == '\n'): # no infos
continue
if (contents[i].startswith('EHPA')): # end of page
print(file.name)
continue
if (contents[i + 2].startswith('Warmer')): # usless infos
continue
if (contents[i] == 'n/a\n'): # usless infos
continue
COP.append(contents[i][:-1])
NumberOfTestsPerNorm.append(i)
NumberOfTestsPerModule.append(i)
if (contents[i + 2].startswith('Pdh')): # no medium Climate
continue
if (contents[i + 2].startswith('Cdh')): # no medium Climate
continue
if (contents[i + 2].startswith('EHPA')): # no medium Climate
continue
COP.append(contents[i + 2][:-1])
NumberOfTestsPerNorm.append(i)
NumberOfTestsPerModule.append(i)
elif (lines.startswith('Pdh Tj = +7°C') == 1):
if (contents[i].endswith('Cdh\n') == 1): # wrong content
continue
if (contents[i] == '\n'): # no content
continue
else:
plusseven_low = contents[i]
P_th.append(plusseven_low[:-4])
T_in.append('7')
if d == 0: # first low than medium Temperatur
if (climate == 'average'):
T_out.append('27')
elif (climate == 'cold'):
T_out.append('25')
elif (climate == 'warm'):
T_out.append('31')
if d == 1: # first medium Temperature
if (climate == 'average'):
T_out.append('36')
elif (climate == 'cold'):
T_out.append('32')
elif (climate == 'warm'):
T_out.append('46')
Modul.append(modul[7:-1])
Manufacturer.append(manufacturer[:-1])
Date.append(date[:-1])
Refrigerant.append(refrigerant[:-1])
SPLindoor.append(splindoor_low)
# SPLindoor.append(splindoor_medium)
SPLoutdoor.append(sploutdoor_low)
# SPLoutdoor.append(sploutdoor_medium)
Mass.append(mass[:-4])
Prated.append(prated_low)
Type.append(heatpumpType)
Guideline.append(guideline[:-1])
Climate.append(climate)
if (contents[i + 2].startswith('COP')): # for PDF without medium heat
continue
if (contents[i + 2].startswith('Disclaimer')): # for PDF without medium heat
continue
if (contents[i + 2].startswith('EHPA')): # End of page
if (contents[i + 8].startswith('COP')): # end of page plus no medium heat
continue
plusseven_medium = contents[i + 2]
P_th.append(plusseven_medium[:-4])
T_in.append('7')
if (climate == 'average'):
T_out.append('36')
elif (climate == 'cold'):
T_out.append('32')
elif (climate == 'warm'):
T_out.append('46')
Modul.append(modul[7:-1])
Manufacturer.append(manufacturer[:-1])
Date.append(date[:-1])
Refrigerant.append(refrigerant[:-1])
# SPLindoor.append(splindoor_low)
SPLindoor.append(splindoor_medium)
# SPLoutdoor.append(sploutdoor_low)
SPLoutdoor.append(sploutdoor_medium)
Mass.append(mass[:-4])
Prated.append(prated_medium)
Type.append(heatpumpType)
Guideline.append(guideline[:-1])
Climate.append(climate)
elif (lines.startswith('COP Tj = +7°C')): # check
if (contents[i] == '\n'): # no infos
continue
if (contents[i].startswith('EHPA')): # end of page
continue
if (contents[i + 2].startswith('Warmer')): # usless infos
continue
if (contents[i] == 'n/a\n'): # usless infos
continue
COP.append(contents[i][:-1])
NumberOfTestsPerNorm.append(i)
NumberOfTestsPerModule.append(i)
if (contents[i + 2].startswith('Pdh')): # no medium Climate
continue
if (contents[i + 2].startswith('Cdh')): # no medium Climate
continue
if (contents[i + 2].startswith('EHPA')): # no medium Climate
continue
COP.append(contents[i + 2][:-1])
NumberOfTestsPerNorm.append(i)
NumberOfTestsPerModule.append(i)
elif (lines.startswith('Pdh Tj = 12°C') == 1):
if (contents[i].endswith('Cdh\n') == 1): # wrong content
continue
if (contents[i] == '\n'): # no content
continue
if (contents[i].startswith('EHPA Secretariat') == 1):
plustwelfe_low = (contents[i - 11])
P_th.append(plustwelfe_low[:-4])
T_in.append('12')
if (climate == 'average'):
T_out.append('24')
elif (climate == 'cold'):
T_out.append('24')
elif (climate == 'warm'):
T_out.append('26')
Modul.append(modul[7:-1])
Manufacturer.append(manufacturer[:-1])
Date.append(date[:-1])
Refrigerant.append(refrigerant[:-1])
SPLindoor.append(splindoor_low)
# SPLindoor.append(splindoor_medium)
SPLoutdoor.append(sploutdoor_low)
# SPLoutdoor.append(sploutdoor_medium)
Mass.append(mass[:-4])
Prated.append(prated_low)
Type.append(heatpumpType)
Guideline.append(guideline[:-1])
Climate.append(climate)
plustwelfe_medium = (contents[i - 9])
P_th.append(plustwelfe_medium[:-4])
T_in.append('12')
if (climate == 'average'):
T_out.append('30')
elif (climate == 'cold'):
T_out.append('28')
elif (climate == 'warm'):
T_out.append('34')
Modul.append(modul[7:-1])
Manufacturer.append(manufacturer[:-1])
Date.append(date[:-1])
Refrigerant.append(refrigerant[:-1])
# SPLindoor.append(splindoor_low)
SPLindoor.append(splindoor_medium)
# SPLoutdoor.append(sploutdoor_low)
SPLoutdoor.append(sploutdoor_medium)
Mass.append(mass[:-4])
Prated.append(prated_medium)
Type.append(heatpumpType)
Guideline.append(guideline[:-1])
Climate.append(climate)
else:
plustwelfe_low = contents[i]
P_th.append(plustwelfe_low[:-4])
T_in.append('12')
if d == 0: # first low than medium Temperatur
if (climate == 'average'):
T_out.append('24')
elif (climate == 'cold'):
T_out.append('24')
elif (climate == 'warm'):
T_out.append('26')
if d == 1: # first medium Temperature
if (climate == 'average'):
T_out.append('30')
elif (climate == 'cold'):
T_out.append('28')
elif (climate == 'warm'):
T_out.append('34')
Modul.append(modul[7:-1])
Manufacturer.append(manufacturer[:-1])
Date.append(date[:-1])
Refrigerant.append(refrigerant[:-1])
SPLindoor.append(splindoor_low)
SPLoutdoor.append(sploutdoor_low)
Mass.append(mass[:-4])
Prated.append(prated_low)
Type.append(heatpumpType)
Guideline.append(guideline[:-1])
Climate.append(climate)
if (contents[i + 2].startswith('COP')): # for PDF without medium heat
continue
if (contents[i + 2].startswith('Disclaimer')): # for PDF without medium heat
continue
if (contents[i + 2].startswith('EHPA')): # End of page
if (contents[i + 8].startswith('COP')): # end of page plus no medium heat
continue
plustwelfe_medium = contents[i + 2]
P_th.append(plustwelfe_medium[:-4])
T_in.append('12')
if (climate == 'average'):
T_out.append('30')
elif (climate == 'cold'):
T_out.append('28')
elif (climate == 'warm'):
T_out.append('34')
Modul.append(modul[7:-1])
Manufacturer.append(manufacturer[:-1])
Date.append(date[:-1])
Refrigerant.append(refrigerant[:-1])
# SPLindoor.append(splindoor_low)
SPLindoor.append(splindoor_medium)
SPLoutdoor.append(sploutdoor_medium)
Mass.append(mass[:-4])
Prated.append(prated_medium)
Type.append(heatpumpType)
Guideline.append(guideline[:-1])
Climate.append(climate)
elif (lines.startswith('COP Tj = 12°C')): # check
if (contents[i] == '\n'): # no infos
continue
if (contents[i].startswith('EHPA')): # end of page
print('W')
continue
if (contents[i + 2].startswith('Warmer')): # usless infos
continue
if (contents[i] == 'n/a\n'): # usless infos
continue
COP.append(contents[i][:-1])
NumberOfTestsPerNorm.append(i)
NumberOfTestsPerModule.append(i)
if (contents[i + 2].startswith('Pdh')): # no medium Climate
continue
if (contents[i + 2].startswith('Cdh')): # no medium Climate
continue
if (contents[i + 2].startswith('EHPA')): # no medium Climate
continue
COP.append(contents[i + 2][:-1])
NumberOfTestsPerNorm.append(i)
NumberOfTestsPerModule.append(i)
elif (lines.startswith('Poff')):
l = 0 # l shows if Poff Medium is different to Poff Low Temperature
c = 2 # c is just an iterator to print every second Poff
poff = contents[i][:-2]
if poff.endswith(' '):
poff = poff[:-1]
if poff.endswith('.00'):
poff = poff[:-3]
second_poff = contents[i + 2][:-2]
if second_poff.endswith(' '):
second_poff = second_poff[:-1]
if second_poff.endswith('.00'):
second_poff = second_poff[:-3]
if (poff != second_poff): # see if Poff Medium to Poff low
if (contents[i + 2].endswith('W\n')):
if (contents[i + 2] != 'W\n'):
l = 1
for Tests in NumberOfTestsPerNorm:
if l == 0:
Poff.append(poff)
if l == 1:
c += 1
if c % 2 == 1:
Poff.append(poff)
if c % 2 == 0:
Poff.append(second_poff)
elif (lines.startswith('PSB')):
l = 0 # l shows if Poff Medium is different to Poff Low Temperature
c = 2 # c is just an iterator to print every second Poff
psb = contents[i][:-2]
if psb.endswith(' '):
psb = psb[:-1]
if psb.endswith('.00'):
psb = psb[:-3]
psb_medium = contents[i + 2][:-2]
if psb_medium.endswith(' '):
psb_medium = psb_medium[:-1]
if psb_medium.endswith('.00'):
psb_medium = psb_medium[:-3]
if (psb != psb_medium): # see if Poff Medium to Poff low
if (contents[i + 2].endswith('W\n')):
if (contents[i + 2] != 'W\n'):
l = 1
for Tests in NumberOfTestsPerNorm:
if l == 0:
Psb.append(psb)
if l == 1:
c += 1
if c % 2 == 1:
Psb.append(psb)
if c % 2 == 0:
Psb.append(psb_medium)
if p == 1:
Poff.append(poff)
Psb.append(psb)
if p == 2:
Poff.append(poff)
Poff.append(second_poff)
Psb.append(psb)
Psb.append(psb_medium)
df['Manufacturer'] = Manufacturer
df['Model'] = Modul
df['Date'] = Date
df['Date'] = pd.to_datetime(df['Date'], format='%d.%m.%Y')
df['Type'] = Type
df['SPL indoor [dBA]'] = SPLindoor
df['SPL outdoor [dBA]'] = SPLoutdoor
df['Refrigerant'] = Refrigerant
df['Mass of Refrigerant [kg]'] = Mass
df['Poff [W]'] = Poff
df['Poff [W]'] = df['Poff [W]'].astype(int)
df['PSB [W]'] = Psb
df['PSB [W]'] = df['PSB [W]'].astype(int)
df['Prated [W]'] = Prated
df['Guideline'] = Guideline
df['Climate'] = Climate
df['T_in [°C]'] = T_in
df['T_in [°C]'] = df['T_in [°C]'].astype(int)
df['T_out [°C]'] = T_out
df['T_out [°C]'] = df['T_out [°C]'].astype(int)
"""
T_out for Low Temperature
T-in: -15 -7 2 7 12
Cold Climate 32 30 27 25 24
Average Climate 35 34 30 27 24
Warm Climate 35 35 35 31 26
T_out for Medium Temperature
T-in: -15 -7 2 7 12
Cold Climate 49 44 37 32 28
Average Climate 55 52 42 36 30
Warm Climate 55 55 55 46 34
"""
df['P_th [W]'] = P_th
df['P_th [W]'] = ((df['P_th [W]'].astype(float)) * 1000).astype(int)
df['COP'] = COP
df['COP'] = round(df['COP'].astype(float), 2)
df['P_el [W]'] = round(df['P_th [W]'] / df['COP'])
df['P_el [W]'] = df['P_el [W]'].fillna(0).astype(int)
df['PSB [W]'] = df['PSB [W]'].where(df['PSB [W]'] > df['Poff [W]'],
df['Poff [W]']) # Poff should not be bigger than PSB
df.drop(columns=['Poff [W]'], inplace=True) # not needed anymore
filt = df['P_th [W]'] < 50 # P_th too small
df.drop(index=df[filt].index, inplace=True)
# add T_amb and change T_in to right values
df['T_amb [°C]'] = df['T_in [°C]']
filt = df['Type'] == 'Brine/Water'
df.loc[filt, 'T_in [°C]'] = 0
filt = df['Type'] == 'Water/Water'
df.loc[filt, 'T_in [°C]'] = 10
df = df[
['Manufacturer', 'Model', 'Date', 'Type', 'Refrigerant', 'Mass of Refrigerant [kg]', 'PSB [W]', 'Prated [W]',
'SPL indoor [dBA]', 'SPL outdoor [dBA]', 'Climate', 'T_amb [°C]', 'T_in [°C]', 'T_out [°C]', 'P_th [W]',
'P_el [W]', 'COP']]
df.sort_values(by=['Manufacturer', 'Model'], inplace=True)
os.chdir("../")
df.to_csv(r'../output/database_heating.csv', index=False)
os.chdir('../src/')
def import_cooling_data():
# read in keymark data from *.txt files in /input/txt/
# save a dataframe to database_heating.csv in folder /output/
Modul = []
Manufacturer = []
Date = []
Refrigerant = []
Mass = []
Type = []
Pdesignc = []
Temperatur = []
T_outside = []
PDC = []
EER = []
df = pd.DataFrame()
os.chdir('../')
root = os.getcwd()
Scanordner = (root + '/input/txt')
os.chdir(Scanordner)
Scan = os.scandir(os.getcwd())
with Scan as dir1:
for file in dir1:
with open(file, 'r', encoding='utf-8') as f:
contents = f.readlines()
T = 0
i = 1 # indicator for the line wich is read
date = contents[1]
date = date[61:]
if (date == '17 Dec 2020\n'):
date = '17.12.2020\n'
if (date == '18 Dec 2020\n'):
date = '18.12.2020\n'
if (date.startswith('5 Mar 2021')):
date = '05.03.2021\n'
if (date.startswith('15 Feb 2021')):
date = '15.02.2021\n'
if (date.startswith('22 Feb 2021')):
date = '22.02.2021\n'
for lines in contents:
i = i + 1
if (lines.startswith('Name\n') == 1):
manufacturer = (contents[i][:-1])
if (manufacturer.find('(') > 0):
manufacturer = manufacturer.split('(', 1)[1].split(')')[0]
elif manufacturer.startswith('NIBE'):
manufacturer = 'Nibe'
elif manufacturer.startswith('Nibe'):
manufacturer = 'Nibe'
elif manufacturer.startswith('Mitsubishi'):
manufacturer = 'Mitsubishi'
elif manufacturer.startswith('Ochsner'):
manufacturer = 'Ochsner'
elif manufacturer.startswith('OCHSNER'):
manufacturer = 'Ochsner'
elif manufacturer.startswith('Viessmann'):
manufacturer = 'Viessmann'
elif (lines.endswith('Date\n') == 1):
date = (contents[i])
if (date == 'basis\n'):
date = contents[i - 3]
date = date[14:]
elif (lines.startswith('Model') == 1):
modul = (contents[i - 2][7:-1])
temperatur2 = ''
elif lines.endswith('Type\n'):
heatpumpType = contents[i][:-1]
if heatpumpType.startswith('A'):
heatpumpType = 'Outdoor Air/Water'
if heatpumpType.startswith('Eau glycol'):
heatpumpType = 'Brine/Water'
elif (lines == 'Refrigerant\n'):
if (contents[i - 3] == 'Mass Of\n'):
continue
refrigerant = (contents[i][:-1])
elif (lines.startswith('Mass Of') == 1):
if (lines == 'Mass Of\n'):
mass = contents[i + 1][:-4]
elif (lines.endswith('kg\n') == 1):
mass = contents[i - 2]
mass = mass[20:-4]
else:
mass = contents[i][:-4]
elif lines.startswith('+'):
if T == 0:
temperatur1 = contents[i - 2][:-1]
if (contents[i].startswith('+')):
temperatur2 = contents[i][:-1]
T = 1
temperatur2 = (temperatur2[1:3])
temperatur1 = (temperatur1[1:2])
else:
T = 0
elif lines.startswith('Pdesignc'):
pdesignc1 = contents[i][:-4]
if temperatur2 != '':
pdesignc2 = contents[i + 2][:-4]
elif lines.startswith('Pdc Tj = 30°C'):
pdcT1_30 = contents[i][:-4]
if contents[i + 2].endswith('W\n'):
pdcT2_30 = contents[i + 2][:-4]
elif lines.startswith('EER Tj = 30°C'):
eerT1_30 = (contents[i][:-1])
EER.append(eerT1_30)
PDC.append(pdcT1_30)
T_outside.append('30')
Pdesignc.append(pdesignc1)
Temperatur.append(temperatur1)
Modul.append(modul)
Manufacturer.append(manufacturer)
Date.append(date)
Refrigerant.append(refrigerant)
Mass.append(mass)
Type.append(heatpumpType)
if temperatur2 != '':
eerT2_30 = contents[i + 2][:-1]
EER.append(eerT2_30)
PDC.append(pdcT2_30)
T_outside.append('30')
Pdesignc.append(pdesignc2)
Temperatur.append(temperatur2)
Modul.append(modul)
Manufacturer.append(manufacturer)
Date.append(date)
Refrigerant.append(refrigerant)
Mass.append(mass)
Type.append(heatpumpType)
elif lines.startswith('Pdc Tj = 35°C'):
pdcT1_35 = contents[i][:-4]
if contents[i + 2].endswith('W\n'):
pdcT2_35 = contents[i + 2][:-4]
elif lines.startswith('EER Tj = 35°C'):
eerT1_35 = (contents[i][:-1])
EER.append(eerT1_35)
PDC.append(pdcT1_35)
T_outside.append('35')
Pdesignc.append(pdesignc1)
Temperatur.append(temperatur1)
Modul.append(modul)
Manufacturer.append(manufacturer)
Date.append(date)
Refrigerant.append(refrigerant)
Mass.append(mass)
Type.append(heatpumpType)
if temperatur2 != '':
eerT2_35 = contents[i + 2][:-1]
EER.append(eerT2_35)
PDC.append(pdcT2_35)
T_outside.append('35')
Pdesignc.append(pdesignc2)
Temperatur.append(temperatur2)
Modul.append(modul)
Manufacturer.append(manufacturer)
Date.append(date)
Refrigerant.append(refrigerant)
Mass.append(mass)
Type.append(heatpumpType)
elif lines.startswith('Pdc Tj = 25°C'):
pdcT1_25 = contents[i][:-4]
if contents[i + 2].endswith('W\n'):
pdcT2_25 = contents[i + 2][:-4]
elif lines.startswith('EER Tj = 25°C'):
eerT1_25 = (contents[i][:-1])
EER.append(eerT1_25)
PDC.append(pdcT1_25)
T_outside.append('25')
Pdesignc.append(pdesignc1)
Temperatur.append(temperatur1)
Modul.append(modul)
Manufacturer.append(manufacturer)
Date.append(date)
Refrigerant.append(refrigerant)
Mass.append(mass)
Type.append(heatpumpType)
if temperatur2 != '':
eerT2_25 = contents[i + 2][:-1]
EER.append(eerT2_25)
PDC.append(pdcT2_25)
T_outside.append('25')
Pdesignc.append(pdesignc2)
Temperatur.append(temperatur2)
Modul.append(modul)
Manufacturer.append(manufacturer)
Date.append(date)
Refrigerant.append(refrigerant)
Mass.append(mass)
Type.append(heatpumpType)
elif lines.startswith('Pdc Tj = 20°C'):
pdcT1_20 = contents[i][:-4]
if contents[i + 2].endswith('W\n'):
pdcT2_20 = contents[i + 2][:-4]
elif lines.startswith('EER Tj = 20°C'):
eerT1_20 = (contents[i][:-1])
EER.append(eerT1_20)
PDC.append(pdcT1_20)
T_outside.append('20')
Pdesignc.append(pdesignc1)
Temperatur.append(temperatur1)
Modul.append(modul)
Manufacturer.append(manufacturer)
Date.append(date)
Refrigerant.append(refrigerant)
Mass.append(mass)
Type.append(heatpumpType)
if temperatur2 != '':
eerT2_20 = contents[i + 2][:-1]
EER.append(eerT2_20)
PDC.append(pdcT2_20)
T_outside.append('20')
Pdesignc.append(pdesignc2)
Temperatur.append(temperatur2)
Modul.append(modul)
Manufacturer.append(manufacturer)
Date.append(date)
Refrigerant.append(refrigerant)
Mass.append(mass)
Type.append(heatpumpType)
df['Manufacturer'] = Manufacturer
df['Model'] = Modul
df['Date'] = Date
df['Date'] = pd.to_datetime(df['Date'], format='%d.%m.%Y\n')
df['Type'] = Type
df['Refrigerant'] = Refrigerant
df['Mass of Refrigerant [kg]'] = Mass
df['Pdesignc'] = Pdesignc
df['T_outside [°C]'] = T_outside
df['T_out [°C]'] = Temperatur
df['Pdc [kW]'] = PDC
df['EER'] = EER
filt = df['EER'] == 'Cdc' # P_th too small
df.drop(index=df[filt].index, inplace=True)
filt = df['EER'] == 'Pdc Tj = 30°C' # P_th too small
df.drop(index=df[filt].index, inplace=True)
os.chdir("../..")
df.to_csv(os.getcwd() + r'/output/database_cooling.csv', index=False)
os.chdir("src")
def reduce_heating_data(filename, climate):
# reduce the hplib_database_heating to a specific climate measurement series (average, warm, cold)
# delete redundant entries
# climate = average, warm or cold
df = pd.read_csv(r'../output/' + filename)
data_key = df.loc[df['Climate'] == climate]
delete = []
Models = data_key['Model'].values.tolist()
Models = list(dict.fromkeys(Models))
for model in Models:
Modeldf = data_key.loc[data_key['Model'] == model, :]
if Modeldf.shape[0] != 8: # Models with more ar less than 8 datapoints are deleted
delete.append('delete')
else:
delete.append('keep')
deletemodels = pd.DataFrame()
deletemodels['delete'] = delete
deletemodels['Model'] = Models
data_key = data_key.merge(deletemodels, how='inner', on='Model')
data_key = data_key.loc[data_key['delete'] == 'keep']
data_key.drop(columns=['delete'], inplace=True)
data_key.to_csv(r'../output/database_heating_' + climate + '.csv', index=False)
def normalize_heating_data(filename):
data_key = pd.read_csv(r'../output/' + filename) # read Dataframe of all models
Models = data_key['Model'].values.tolist()
Models = list(dict.fromkeys(Models))
new_df = pd.DataFrame()
for model in Models:
data_key = pd.read_csv(r'../output/' + filename) # read Dataframe of all models
data = data_key.loc[((data_key['Model'] == model) & (
data_key['T_out [°C]'] == 52))] # only use data of model and ref point -7/52
Pel_ref = data['P_el [W]'].array[0] # ref Point Pel
Pth_ref = data['P_th [W]'].array[0] # ref Point Pth
data_key = data_key.loc[data_key['Model'] == model] # only use data of model
data_key.loc[:, ['P_th_n']] = data_key['P_th [W]'] / Pth_ref # get normalized Value P_th_n
data_key.loc[:, ['P_el_n']] = data_key['P_el [W]'] / Pel_ref # get normalized Value P_el_n
new_df = pd.concat([new_df, data_key]) # merge new Dataframe with old one
filt1 = (new_df['P_th_n'] >= 2) & (new_df['T_out [°C]'] == 34)
deletemodels = new_df.loc[filt1, ['Model']].values.tolist()
for model in deletemodels:
new_df = new_df.loc[new_df['Model'] != model[0]]
new_df.to_csv(r'../output/' + filename[:-4] + '_normalized.csv', encoding='utf-8', index=False)
def get_subtype(P_th_minus7_34, P_th_2_30, P_th_7_27, P_th_12_24):
if (P_th_minus7_34 <= P_th_2_30):
if (P_th_2_30 <= P_th_7_27):
if (P_th_7_27 <= P_th_12_24):
modus = 'On-Off'
else:
modus = 'Regulated' # Inverter, 2-Stages, etc.
else:
modus = 'Regulated' # Inverter, 2-Stages, etc.
else:
modus = 'Regulated' # Inverter, 2-Stages, etc.
return modus
def identify_subtypes(filename):
# Identify Subtype like On-Off or Regulated by comparing the thermal Power output at different temperature levels:
# -7/34 | 2/30 | 7/27 | 12/24
# assumptions for On-Off Heatpump: if temperature difference is bigger, thermal Power output is smaller
# assumptions for Regulated: everythin else
data_key = pd.read_csv(r'../output/' + filename) # read Dataframe of all models
Models = data_key['Model'].values.tolist()
Models = list(dict.fromkeys(Models))
data_keymark = data_key.rename(
columns={'P_el [W]': 'P_el', 'P_th [W]': 'P_th', 'T_in [°C]': 'T_in', 'T_out [°C]': 'T_out'})
data_keymark['deltaT'] = data_keymark['T_out'] - data_keymark['T_in']
Subtypelist = []
for model in Models:
try:
P_thermal = []
filt1 = data_keymark['T_out'] == 34
Tin_minus_seven = data_keymark.loc[filt1]
filt2 = Tin_minus_seven['Model'] == model
Model_minus_seven = Tin_minus_seven[filt2]
P_th_minus_seven = Model_minus_seven['P_th'].array[0]
P_thermal.append(P_th_minus_seven)
filt1 = data_keymark['T_out'] == 30
T_in_plus_two = data_keymark.loc[filt1]
filt2 = T_in_plus_two['Model'] == model
Model_plus_two = T_in_plus_two[filt2]
P_th_plus_two = Model_plus_two['P_th'].array[0]
P_thermal.append(P_th_plus_two)
filt1 = data_keymark['T_out'] == 27
Tin_plus_seven = data_keymark.loc[filt1]
filt2 = Tin_plus_seven['Model'] == model
Model_plus_seven = Tin_plus_seven[filt2]
P_th_plus_seven = Model_plus_seven['P_th'].array[0]
P_thermal.append(P_th_plus_seven)
filt1 = data_keymark['T_out'] == 24
Tin_plus_twelfe = data_keymark.loc[filt1]
filt2 = Tin_plus_twelfe['Model'] == model
Model_plus_twelfe = Tin_plus_twelfe[filt2]
P_th_plus_twelfe = Model_plus_twelfe['P_th'].array[0]
P_thermal.append(P_th_plus_twelfe)
P_thermal
Modus = get_subtype(P_thermal[0], P_thermal[1], P_thermal[2], P_thermal[3])
except:
print(model)
Subtypelist.append(Modus)
Subtype_df = pd.DataFrame()
Subtype_df['Model'] = Models
Subtype_df['Subtype'] = Subtypelist
Subtype_df
data_key = pd.read_csv(r'../output/' + filename) # read Dataframe of all models
data_key = data_key.merge(Subtype_df, how='inner', on='Model')
##assign group:
filt1 = (data_key['Type'] == 'Outdoor Air/Water') & (data_key['Subtype'] == 'Regulated')
data_key.loc[filt1, 'Group'] = 1
filt1 = (data_key['Type'] == 'Exhaust Air/Water') & (data_key['Subtype'] == 'Regulated')
data_key.loc[filt1, 'Group'] = 7
filt1 = (data_key['Type'] == 'Brine/Water') & (data_key['Subtype'] == 'Regulated')
data_key.loc[filt1, 'Group'] = 2
filt1 = (data_key['Type'] == 'Water/Water') & (data_key['Subtype'] == 'Regulated')
data_key.loc[filt1, 'Group'] = 3
filt1 = (data_key['Type'] == 'Outdoor Air/Water') & (data_key['Subtype'] == 'On-Off')
data_key.loc[filt1, 'Group'] = 4
filt1 = (data_key['Type'] == 'Exhaust Air/Water') & (data_key['Subtype'] == 'On-Off')
data_key.loc[filt1, 'Group'] = 7
filt1 = (data_key['Type'] == 'Brine/Water') & (data_key['Subtype'] == 'On-Off')
data_key.loc[filt1, 'Group'] = 5
filt1 = (data_key['Type'] == 'Water/Water') & (data_key['Subtype'] == 'On-Off')
data_key.loc[filt1, 'Group'] = 6
data_key = data_key[
['Manufacturer', 'Model', 'Date', 'Type', 'Subtype', 'Group', 'Refrigerant', 'Mass of Refrigerant [kg]',
'SPL indoor [dBA]', 'SPL outdoor [dBA]', 'PSB [W]', 'Climate', 'T_amb [°C]', 'T_in [°C]', 'T_out [°C]',
'P_th [W]', 'P_el [W]', 'COP', 'P_th_n', 'P_el_n']]
filt1 = data_key['Group'] != 7
data_key = data_key.loc[filt1]
data_key.to_csv(r'../output/' + filename[:-4] + '_subtypes.csv', encoding='utf-8', index=False)
def fit_simple(w, x, y, z):
p0 = [0.1, 0.001, 0.1, 1.] # starting values
a = (w, x, y, z)
para, _ = scipy.optimize.leastsq(func_simple_zero, p0, args=a)
return para
def func_simple_zero(para, w, x, y, z):
k1, k2, k3, k4 = para
z_calc = k1 * w + k2 * x + k3 + k4 * y
z_diff = z_calc - z
return z_diff
def func_simple(para, w, x, y):
# Function to calculate z using parameters and any x and y:
k1, k2, k3, k4 = para
z = k1 * w + k2 * x + k3 + k4 * y
return z
def calculate_heating_parameters(filename):
# Calculate function parameters from normalized values
data_key = pd.read_csv('../output/' + filename)
Models = data_key['Model'].values.tolist()
Models = list(dict.fromkeys(Models)) # get models
Group = []
Pel_ref = []
Pth_ref = []
p1_P_th = []
p2_P_th = []
p3_P_th = []
p4_P_th = []
p1_P_el = []
p2_P_el = []
p3_P_el = []
p4_P_el = []
p1_COP = []
p2_COP = []
p3_COP = []
p4_COP = []
for model in Models:
data_key = pd.read_csv('../output/' + filename)
data_key = data_key.rename(
columns={'P_el [W]': 'P_el', 'P_th [W]': 'P_th', 'T_in [°C]': 'T_in', 'T_out [°C]': 'T_out',
'T_amb [°C]': 'T_amb'})
data_key = data_key.loc[data_key['Model'] == model] # get data of model
group = data_key.Group.array[0] # get Group of model
if group > 1 and group != 4: # give another point at different Temperature of Brine/Water
data_key1 = data_key.loc[data_key['Model'] == model]
data_key1['T_in'] = data_key1['T_in'] + 1
data_key1['T_out'] = data_key1['T_out'] + 1
data_key = pd.concat([data_key, data_key1])
Pel_REF = data_key.loc[data_key['P_el_n'] == 1, ['P_el']].values.tolist()[0][0]
Pth_REF = data_key.loc[data_key['P_th_n'] == 1, ['P_th']].values.tolist()[0][0]
data_key.fillna(0, inplace=True)
if group == 1 or group == 2 or group == 3:
data = data_key.loc[((data_key['T_amb'] != 12) & (data_key['T_amb'] != 7))]
P_el_n_para_key = fit_simple(data['T_in'], data['T_out'], data['T_amb'], data['P_el_n'])
P_th_n_para_key = fit_simple(data_key['T_in'], data_key['T_out'], data_key['T_amb'], data_key['P_th_n'])
COP_para_key = fit_simple(data_key['T_in'], data_key['T_out'], data_key['T_amb'], data_key['COP'])
else:
P_el_n_para_key = fit_simple(data_key['T_in'], data_key['T_out'], data_key['T_amb'], data_key['P_el_n'])
P_th_n_para_key = fit_simple(data_key['T_in'], data_key['T_out'], data_key['T_amb'], data_key['P_th_n'])
COP_para_key = fit_simple(data_key['T_in'], data_key['T_out'], data_key['T_amb'], data_key['COP'])
# write Parameters in List
p1_P_th.append(P_th_n_para_key[0])
p2_P_th.append(P_th_n_para_key[1])
p3_P_th.append(P_th_n_para_key[2])
p4_P_th.append(P_th_n_para_key[3])
p1_P_el.append(P_el_n_para_key[0])
p2_P_el.append(P_el_n_para_key[1])
p3_P_el.append(P_el_n_para_key[2])
p4_P_el.append(P_el_n_para_key[3])
p1_COP.append(COP_para_key[0])
p2_COP.append(COP_para_key[1])
p3_COP.append(COP_para_key[2])
p4_COP.append(COP_para_key[3])
Group.append(group)
Pel_ref.append(Pel_REF)
Pth_ref.append(Pth_REF)
# write List in Dataframe
paradf = pd.DataFrame()
paradf['Model'] = Models
paradf['p1_P_th [1/°C]'] = p1_P_th
paradf['p2_P_th [1/°C]'] = p2_P_th
paradf['p3_P_th [-]'] = p3_P_th
paradf['p4_P_th [1/°C]'] = p4_P_th
paradf['p1_P_el_h [1/°C]'] = p1_P_el
paradf['p2_P_el_h [1/°C]'] = p2_P_el
paradf['p3_P_el_h [-]'] = p3_P_el
paradf['p4_P_el_h [1/°C]'] = p4_P_el
paradf['p1_COP [-]'] = p1_COP
paradf['p2_COP [-]'] = p2_COP
paradf['p3_COP [-]'] = p3_COP
paradf['p4_COP [-]'] = p4_COP
paradf['Group'] = Group
paradf['P_el_ref'] = Pel_ref
paradf['P_th_ref'] = Pth_ref
para = paradf
key = pd.read_csv('../output/' + filename)
key = key.loc[key['T_out [°C]'] == 52]
parakey = para.merge(key, how='left', on='Model')
parakey = parakey.rename(columns={'Group_x': 'Group', 'P_el_ref': 'P_el_h_ref [W]', 'P_th_ref': 'P_th_h_ref [W]'})
parakey['COP_ref'] = parakey['P_th_h_ref [W]'] / parakey['P_el_h_ref [W]']
table = parakey[
['Manufacturer', 'Model', 'Date', 'Type', 'Subtype', 'Group', 'Refrigerant', 'Mass of Refrigerant [kg]',
'SPL indoor [dBA]', 'SPL outdoor [dBA]', 'PSB [W]', 'Climate', 'P_el_h_ref [W]', 'P_th_h_ref [W]', 'COP_ref',
'p1_P_th [1/°C]', 'p2_P_th [1/°C]', 'p3_P_th [-]', 'p4_P_th [1/°C]', 'p1_P_el_h [1/°C]', 'p2_P_el_h [1/°C]',
'p3_P_el_h [-]', 'p4_P_el_h [1/°C]', 'p1_COP [-]', 'p2_COP [-]', 'p3_COP [-]', 'p4_COP [-]']]
table.to_csv('hplib_database.csv', encoding='utf-8', index=False)
table.to_csv('../output/hplib_database_heating.csv', encoding='utf-8', index=False)
def validation_relative_error_heating():
# Simulate every set point for every heat pump and save csv file
df=pd.read_csv('../output/database_heating_average_normalized_subtypes.csv')
i=0
prev_model='first Model'
while i<len(df):
Model=df.iloc[i,1]
T_amb=df.iloc[i,12]
T_in=df.iloc[i,13]
T_out=df.iloc[i,14]
P_th=df.iloc[i,15]
P_el=df.iloc[i,16]
COP=df.iloc[i,17]
try:
if prev_model!=Model:
para=hpl.get_parameters(Model)
results=hpl.simulate(T_in,T_out-5,para,T_amb)
df.loc[i,'P_th_sim']=results.P_th[0]
df.loc[i,'P_el_sim']=results.P_el[0]
df.loc[i,'COP_sim']=results.COP[0]
prev_model=Model
i=i+1
except:
i=i+1
pass
# Relative error (RE) for every set point
df['RE_P_th']=(df['P_th_sim']/df['P_th [W]']-1)*100
df['RE_P_el']=(df['P_el_sim']/df['P_el [W]']-1)*100
df['RE_COP']=(df['COP_sim']/df['COP']-1)*100
df.to_csv('../output/database_heating_average_normalized_subtypes_validation.csv', encoding='utf-8', index=False)
def validation_mape_heating():
#calculate the mean absolute percentage error for every heat pump and save in hplib_database.csv
df=pd.read_csv('../output/database_heating_average_normalized_subtypes_validation.csv')
para=pd.read_csv('../output/hplib_database_heating.csv', delimiter=',')
para=para.loc[para['Model']!='Generic']
Models = para['Model'].values.tolist()
Models = list(dict.fromkeys(Models))
mape_cop=[]
mape_pel=[]
mape_pth=[]
for model in Models:
df_model=df.loc[df['Model']==model]
mape_pth.append((((df_model['P_th [W]']-df_model['P_th_sim']).abs())/df_model['P_th [W]']*100).mean())
mape_pel.append((((df_model['P_el [W]']-df_model['P_el_sim']).abs())/df_model['P_el [W]']*100).mean())
mape_cop.append((((df_model['COP']-df_model['COP_sim']).abs())/df_model['COP']*100).mean())
para['MAPE_P_el']=mape_pel
para['MAPE_COP']=mape_cop
para['MAPE_P_th']=mape_pth
para.to_csv('../output/hplib_database_heating.csv', encoding='utf-8', index=False)
def add_generic():
data_key = pd.read_csv('hplib_database.csv', delimiter=',')
data_key = data_key.loc[data_key['Model'] != 'Generic']
Groups = [1, 2, 3, 4, 5, 6]
for group in Groups:
if group == 1:
Type = 'Outdoor Air/Water'
modus = 'Regulated'
elif group == 2:
Type = 'Brine/Water'
modus = 'Regulated'
elif group == 3:
Type = 'Water/Water'
modus = 'Regulated'
elif group == 4:
Type = 'Outdoor Air/Water'
modus = 'On-Off'
elif group == 5:
Type = 'Brine/Water'
modus = 'On-Off'
elif group == 6:
Type = 'Water/Water'
modus = 'On-Off'
Group1 = data_key.loc[data_key['Group'] == group]
Group1=Group1.loc[Group1['MAPE_P_el']<=25]
p1_P_th_average = pd.unique(Group1['p1_P_th [1/°C]']).mean(0)
p2_P_th_average = pd.unique(Group1['p2_P_th [1/°C]']).mean(0)
p3_P_th_average = pd.unique(Group1['p3_P_th [-]']).mean(0)
p4_P_th_average = pd.unique(Group1['p4_P_th [1/°C]']).mean(0)
p1_P_el_average = pd.unique(Group1['p1_P_el_h [1/°C]']).mean(0)
p2_P_el_average = pd.unique(Group1['p2_P_el_h [1/°C]']).mean(0)
p3_P_el_average = pd.unique(Group1['p3_P_el_h [-]']).mean(0)
p4_P_el_average = pd.unique(Group1['p4_P_el_h [1/°C]']).mean(0)
p1_COP_average = pd.unique(Group1['p1_COP [-]']).mean(0)
p2_COP_average = pd.unique(Group1['p2_COP [-]']).mean(0)
p3_COP_average = pd.unique(Group1['p3_COP [-]']).mean(0)
p4_COP_average = pd.unique(Group1['p4_COP [-]']).mean(0)
p1_Pdc_average = Group1['p1_Pdc [1/°C]'].mean(0)
p2_Pdc_average = Group1['p2_Pdc [1/°C]'].mean(0)
p3_Pdc_average = Group1['p3_Pdc [-]'].mean(0)
p4_Pdc_average = Group1['p4_Pdc [1/°C]'].mean(0)
p5_P_el_average = Group1['p1_P_el_c [1/°C]'].mean(0)
p6_P_el_average = Group1['p2_P_el_c [1/°C]'].mean(0)
p7_P_el_average = Group1['p3_P_el_c [-]'].mean(0)
p8_P_el_average = Group1['p4_P_el_c [1/°C]'].mean(0)
p1_EER_average = Group1['p1_EER [-]'].mean(0)
p2_EER_average = Group1['p2_EER [-]'].mean(0)
p3_EER_average = Group1['p3_EER [-]'].mean(0)
p4_EER_average = Group1['p4_EER [-]'].mean(0)
if group == 1 or group == 4:
COP_ref = -7 * p1_COP_average + 52 * p2_COP_average + p3_COP_average - 7 * p4_COP_average
elif group == 2 or group == 5:
COP_ref = 0 * p1_COP_average + 52 * p2_COP_average + p3_COP_average - 7 * p4_COP_average
elif group == 3 or group == 6:
COP_ref = 10 * p1_COP_average + 52 * p2_COP_average + p3_COP_average - 7 * p4_COP_average
data_key.loc[len(data_key.index)] = ['Generic', 'Generic', '', Type, modus, group, '', '', '', '', '',
'average', '', '', COP_ref,'', '', p1_P_th_average, p2_P_th_average,
p3_P_th_average, p4_P_th_average, p1_P_el_average, p2_P_el_average,
p3_P_el_average, p4_P_el_average, p1_COP_average, p2_COP_average,
p3_COP_average, p4_COP_average, '', '', '',
p1_Pdc_average, p2_Pdc_average, p3_Pdc_average, p4_Pdc_average,
p5_P_el_average,p6_P_el_average ,p7_P_el_average ,p8_P_el_average ,
p1_EER_average,p2_EER_average ,p3_EER_average ,p4_EER_average,
'', '', '']
data_key['COP_ref'] = data_key['COP_ref'].round(2)
data_key.to_csv('hplib_database.csv', encoding='utf-8', index=False)
def reduce_to_unique():
# Many heat pump models have several entries
# because of different controller or storage configurations.
# Reduce to unique heat pump models.
df = pd.read_csv('../output/hplib_database_heating.csv', delimiter=',')
df_cool=pd.read_csv('../output/database_cooling.csv')
cooling_Models=df_cool['Model'].unique()
Models = []
unique_values = pd.unique(df['p3_P_el_h [-]']).tolist()
for values in unique_values:
modelnames = df.loc[df['p3_P_el_h [-]'] == values, ['Model']]
for model in (modelnames.Model.values):
for cooling_model in cooling_Models:
if model==cooling_model:
modelnames.Model.values[0]=model
Models.append(modelnames.Model.values[0])
new_df = pd.DataFrame()
new_df1 = | pd.DataFrame() | pandas.DataFrame |
"""
Initial population
======
This module generates initial population for the genetic algorithm.
"""
from BOFdat.util.update import _import_csv_file,_import_base_biomass,_import_model,_import_essentiality
from BOFdat.util.update import _get_biomass_objective_function, determine_coefficients
import warnings
import random
from random import shuffle, randint
import cobra
import pandas as pd
import numpy as np
from itertools import repeat
from deap import creator, base, tools
from deap.tools import History, HallOfFame
from cobra import Reaction
from cobra.flux_analysis import single_gene_deletion
from cobra.util.solver import linear_reaction_coefficients
from sklearn.metrics import matthews_corrcoef
# Parallel
import multiprocessing
# Timeout imports and definitions
from concurrent.futures import TimeoutError
from pebble import ProcessPool, ProcessExpired
class Individual:
biomass_name = ''
biomass = {}
solvability = True
"""
DEPRECATED --> FUNCTIONS MOVED TO BOFdat.util.update
def _get_biomass_objective_function(model):
from cobra.util.solver import linear_reaction_coefficients
return list(linear_reaction_coefficients(model).keys())[0]
def _import_model(path_to_model):
extension = path_to_model.split('.')[-1]
if extension == 'json':
return cobra.io.load_json_model(path_to_model)
elif extension == 'xml':
return cobra.io.read_sbml_model(path_to_model)
else:
raise Exception('Model format not compatible, provide xml or json')
def _import_csv_file(path):
csv_file = pd.read_csv(path)
# 1- Verify number of columns
if len(csv_file.columns) > 2:
raise Exception("Your file format is not appropriate, more than 2 columns")
# 2- Verify presence of header
if type(csv_file.iloc[0:0, 0]) == str and type(csv_file.iloc[0:0, 1]) == str:
csv_file = csv_file.iloc[1:]
# 3- Remove null data
if csv_file.isnull().values.any():
csv_file = csv_file.dropna()
return csv_file
def _import_base_biomass(path):
two_col_df = _import_csv_file(path)
metabolites = [str(i) for i in two_col_df.iloc[0:, 0]]
coefficients = [float(i) for i in two_col_df.iloc[0:, 1]]
base_biomass_df = pd.DataFrame({'Metabolites':metabolites,'Coefficients':coefficients},
columns=['Metabolites','Coefficients'])
return base_biomass_df
DEPRECATED --> function not used anymore
def _make_metab_ind(m,metab_index):
# Generates an individual with metabolites
ind_dict = {}
for i in metab_index:
if i.id == m.id:
ind_dict[i.id] = 1
else:
ind_dict[i.id] = 0
return ind_dict
"""
def _branching_analysis(model):
metab, number_of_rxn = [], []
for m in model.metabolites:
metab.append(m.id)
number_of_rxn.append(len(m.reactions))
branching_df = | pd.DataFrame({'Metab': metab, 'Number of metab': number_of_rxn}) | pandas.DataFrame |
import streamlit as st
import streamlit.components.v1 as components
import pandas as pd
import numpy as np
import yfinance as yf
from datetime import datetime, date
import matplotlib.pyplot as plt
import talib
#import ta
import numpy as np
import matplotlib.ticker as mticker
import pandas as pd
import requests
from bs4 import BeautifulSoup as soup
from urllib.request import Request, urlopen
import plotly.graph_objects as go
yf.pdr_override()
from pytrends.request import TrendReq
import nltk
nltk.downloader.download('vader_lexicon')
import time
from finvizfinance.quote import finvizfinance
def user_input_features():
today = date.today()
ticker = st.sidebar.text_input("Ticker", 'AAPL')
start_date = st.sidebar.text_input("Start Date", '2021-01-01')
end_date = st.sidebar.text_input("End Date", f'{today}')
return ticker, start_date, end_date
def get_symbol(symbol):
try:
stock = finvizfinance(symbol)
company_name = stock.ticker_fundament()
com = list(company_name.values())[0]
return com
#url = "http://d.yimg.com/autoc.finance.yahoo.com/autoc?query={}®ion=1&lang=en".format(symbol)
#result = requests.get(url).json()
#for x in result['ResultSet']['Result']:
#if x['symbol'] == symbol:
#return x['name']
except Exception as e:
return e
def get_fundamentals(symbol):
try:
#symbol, start, end = user_input_features()
# ##Fundamentals
url2 = ("http://finviz.com/quote.ashx?t=" + symbol.lower())
req = Request(url2, headers={'User-Agent': 'Mozilla/5.0'})
webpage = urlopen(req).read()
html = soup(webpage, "html.parser")
# Find fundamentals table
fundamentals = pd.read_html(str(html), attrs = {'class': 'snapshot-table2'})[0]
# Clean up fundamentals dataframe
fundamentals.columns = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11']
colOne = []
colLength = len(fundamentals)
for k in np.arange(0, colLength, 2):
colOne.append(fundamentals[f'{k}'])
attrs = | pd.concat(colOne, ignore_index=True) | pandas.concat |
"""
oil price data source: https://www.ppac.gov.in/WriteReadData/userfiles/file/PP_9_a_DailyPriceMSHSD_Metro.pdf
"""
import pandas as pd
import numpy as np
import tabula
import requests
import plotly.express as px
import plotly.graph_objects as go
import time
from pandas.tseries.offsets import MonthEnd
import re
import xmltodict
def process_table(table_df):
print("processing the downloaded PDF from PPAC website.")
cols = ['Date', 'Delhi_Petrol', 'Mumbai_Petrol', 'Chennai_Petrol', 'Kolkata_Petrol',
'Date_D', 'Delhi_Diesel', 'Mumbai_Diesel','Chennai_Diesel', 'Kolkata_Diesel']
table_df.columns = cols
table_df.drop(table_df.index[[0,3]],inplace=True)
table_df.drop('Date_D',axis=1,inplace=True)
table_df.dropna(how='any',inplace=True)
table_df = table_df.astype(str)
table_df = table_df.apply(lambda x: x.str.replace(" ", ""))
table_df[['Delhi_Petrol', 'Mumbai_Petrol', 'Chennai_Petrol', 'Kolkata_Petrol', 'Delhi_Diesel', 'Mumbai_Diesel','Chennai_Diesel', 'Kolkata_Diesel']] = table_df[['Delhi_Petrol', 'Mumbai_Petrol', 'Chennai_Petrol', 'Kolkata_Petrol', 'Delhi_Diesel', 'Mumbai_Diesel','Chennai_Diesel', 'Kolkata_Diesel']].astype(float)
table_df['Date'] = pd.to_datetime(table_df['Date'])
table_petrol = table_df[['Date','Delhi_Petrol', 'Mumbai_Petrol', 'Chennai_Petrol','Kolkata_Petrol']]
table_diesel = table_df[['Date','Delhi_Diesel', 'Mumbai_Diesel','Chennai_Diesel', 'Kolkata_Diesel']]
new_cols = [i.replace("_Petrol", "") for i in list(table_petrol.columns)]
table_petrol.columns = new_cols
table_diesel.columns = new_cols
return table_petrol, table_diesel
def get_international_exchange_rates(start_date,end_date):
print("sending request for international exchange rates.")
exchange_dates_url = "https://api.exchangeratesapi.io/history?"
params = {"start_at": start_date, "end_at":end_date, "base":"USD", "symbols":"INR"}
try:
req = requests.get(exchange_dates_url,params=params)
except Exception as e:
print(e)
print("request failed. using the saved data.")
dollar_exchange_rates = pd.read_csv("dollar_exhange_rates.csv")
dollar_exchange_rates['Date'] = pd.to_datetime(dollar_exchange_rates)
dollar_exchange_rates.set_index('Date').sort_index(ascending=False)
return dollar_exchange_rates
else:
print("request successful. processing the data.")
dollar_exchange_rates = pd.DataFrame(req.json()['rates']).T.reset_index()
dollar_exchange_rates['index'] = pd.to_datetime(dollar_exchange_rates['index'])
dollar_exchange_rates.set_index('index').sort_index(ascending=False)
dollar_exchange_rates.to_csv("dollar_exhange_rates.csv")
return dollar_exchange_rates
# def merge_data(dollar_exchange_rates, international_oil_prices, oil_price_data):
# print("merging the international oil price data, international exchange rate data and domestic oil price data.")
# trim_int = international_oil_prices.loc[international_oil_prices.index.isin(oil_price_data.index)].dropna()
# oil_price_data = oil_price_data.merge(trim_int, left_index=True, right_index=True).sort_index(ascending=False)
# oil_price_data = oil_price_data.merge(dollar_exchange_rates, left_index=True, right_index=True).sort_index(ascending=False)
# oil_price_data['INR'] = oil_price_data['INR'].round(2)
# oil_price_data['INR_pc'] = (((oil_price_data['INR'] - oil_price_data['INR'].iloc[-1])/oil_price_data['INR'].iloc[-1])*100).round(2)
# oil_price_data['rup_lit_crude'] = (oil_price_data['Price'] / 159) * oil_price_data['INR']
# oil_price_data['int_pc'] = (((oil_price_data['Price'] - oil_price_data['Price'].iloc[-1])/oil_price_data['Price'].iloc[-1])*100).round(2)
# oil_price_data['rup_lit_crude_pc'] = (((oil_price_data['rup_lit_crude'] - oil_price_data['rup_lit_crude'].iloc[-1])/oil_price_data['rup_lit_crude'].iloc[-1])*100).round(2)
# return oil_price_data
def download_ppac():
print("sending request for domestic oil price data from PPAC website.")
ppac_url = r"https://www.ppac.gov.in/WriteReadData/userfiles/file/PP_9_a_DailyPriceMSHSD_Metro.pdf"
try:
req = requests.get(ppac_url)
except Exception as e:
print(e)
print("Request unsuccessful. The saved file will be used.")
else:
with open('DATA/price_data.pdf', 'wb') as file:
file.write(req.content)
print('file saved successfully.')
def prepare_downloaded_file():
print("preparing downloaded file for analysis.")
oil_prices = 'DATA/price_data.pdf'
tables = tabula.read_pdf(oil_prices, pages="all")
proc_dfs = [process_table(i) for i in tables]
petrol_df = pd.concat(i[0] for i in proc_dfs)
diesel_df = pd.concat(i[1] for i in proc_dfs)
print(f"Success. Length of Petrol prices {len(petrol_df)}------ diesel prices {len(diesel_df)}")
petrol_df['mean_price'] = (petrol_df['Delhi']+petrol_df['Mumbai']+petrol_df['Chennai']+petrol_df['Kolkata'])/4
diesel_df['mean_price'] = (diesel_df['Delhi']+diesel_df['Mumbai']+diesel_df['Chennai']+diesel_df['Kolkata'])/4
print("Adding percent change columns")
for i in petrol_df.columns[1:]:
petrol_df[f'{i}_pc'] = (((petrol_df[i] - petrol_df[i].iloc[-1])/petrol_df[i].iloc[-1]) * 100).round(2)
for i in diesel_df.columns[1:]:
diesel_df[f'{i}_pc'] = (((diesel_df[i] - diesel_df[i].iloc[-1])/diesel_df[i].iloc[-1]) * 100).round(2)
petrol_df.set_index("Date",inplace=True)
diesel_df.set_index("Date",inplace=True)
return petrol_df, diesel_df
def prep_consumption_df(consumption_df,year):
consumption_df.reset_index(inplace=True)
consumption_df.dropna(how='any',inplace=True)
consumption_df.drop('index',axis=1,inplace=True)
#print(consumption_df)
cols = ['products', 'April','May','June','July','August','September','October','November','December','January','February','March','Total']
consumption_df.drop(consumption_df.index[0],inplace=True)
consumption_df.columns = cols
consumption_df = consumption_df.loc[(consumption_df['products']=='MS')|(consumption_df['products']=='HSD')].reset_index().drop(['index','Total'],axis=1)
melt_df = pd.melt(consumption_df, id_vars = 'products',var_name='month',value_name='average_cons')
melt_df.sort_values('products',inplace=True)
melt_df = melt_df.reset_index().drop('index',axis=1)
melt_df['year'] = year
melt_df['year'] = melt_df['year'].apply(lambda x: x.split('-')[0]).astype(int)
melt_df['year'] = np.where((melt_df['month'].isin(['January','February','March'])),melt_df['year']+1,melt_df['year'])
melt_df['average_cons'] = melt_df['average_cons'].astype(float).round(2)
return melt_df
def prep_consumption_df_present(consumption_df,year):
consumption_df.reset_index().drop('index',inplace=True,axis=1)
consumption_df.drop(consumption_df.index[range(0,6)],inplace=True)
consumption_df.reset_index(inplace=True)
consumption_df.drop('index',axis=1,inplace=True)
print(consumption_df)
consumption_df.drop(consumption_df.index[range(14,20)],inplace=True)
consumption_df.reset_index(inplace=True)
consumption_df.drop('index',axis=1,inplace=True)
#print(consumption_df)
cols = ['products', 'April','May','June','July','August','September','October','November','December','January','February','March','Total']
consumption_df.drop(consumption_df.index[0],inplace=True)
consumption_df.columns = cols
consumption_df = consumption_df.loc[(consumption_df['products']=='MS')|(consumption_df['products']=='HSD')].reset_index().drop(['index','Total'],axis=1)
melt_df = pd.melt(consumption_df, id_vars = 'products',var_name='month',value_name='average_cons')
melt_df.sort_values('products',inplace=True)
melt_df = melt_df.reset_index().drop('index',axis=1)
melt_df['year'] = year
melt_df['year'] = melt_df['year'].apply(lambda x: x.split('-')[0]).astype(int)
melt_df['year'] = np.where((melt_df['month'].isin(['January','February','March'])),melt_df['year']+1,melt_df['year'])
melt_df['average_cons'] = melt_df['average_cons'].astype(float).round(2)
return melt_df
def prep_historical_crude(hist_crude_df,year):
cols = ['year', 'April','May','June','July','August','September','October','November','December','January','February','March','Average','Ratio']
hist_crude_df = hist_crude_df.dropna(how='any').reset_index().drop('index',axis=1)
hist_crude_df.columns = cols
hist_crude_df.drop(hist_crude_df.index[0],inplace=True)
hist_crude_df.drop(['Average','Ratio'],axis=1,inplace=True)
melt_df = pd.melt(hist_crude_df, id_vars = 'year',var_name='month',value_name='import_bbl_usd')
melt_df['year'] = melt_df['year'].apply(lambda x: x.split('-')[0]).astype(int)
melt_df['year'] = np.where((melt_df['month'].isin(['January','February','March'])),melt_df['year']+1,melt_df['year'])
melt_df['import_bbl_usd'] = melt_df['import_bbl_usd'].astype(float).round(2)
melt_df = melt_df.loc[melt_df['year']>=year].sort_values(['year','month']).reset_index().drop('index',axis=1)
return melt_df
def prep_current_crude(current_crude_df):
current_crude_df.drop(current_crude_df.index[[i for i in range(0,12)]],inplace=True)
current_crude_df.reset_index(inplace=True)
current_crude_df.drop('index',inplace=True,axis=1)
current_crude_df.drop(current_crude_df.index[[2,3,4]],inplace=True)
cols = ['year', 'April','May','June','July','August','September','October','November','December','January','February','March']
current_crude_df.columns = cols
current_crude_df.drop(current_crude_df.index[0],inplace=True)
melt_df = pd.melt(current_crude_df, id_vars = 'year',var_name='month',value_name='import_bbl_usd')
melt_df['year'] = melt_df['year'].apply(lambda x: x.split('-')[0]).astype(int)
melt_df['year'] = np.where((melt_df['month'].isin(['January','February','March'])),melt_df['year']+1,melt_df['year'])
melt_df.dropna(inplace=True,how='any')
melt_df['import_bbl_usd'] = melt_df['import_bbl_usd'].astype(float).round(2)
return melt_df
def prep_historical_import(historical_import_df, year):
cols = ['product', 'April','May','June','July','August','September','October','November','December','January','February','March','Total']
print(historical_import_df)
historical_import_df.dropna(how='all',inplace=True,axis=1)
historical_import_df.columns = cols
historical_import_df = historical_import_df.dropna(how='any').reset_index().drop('index',axis=1)
historical_import_df = historical_import_df.loc[historical_import_df['product'].str.contains('import oil|ms|hsd|total',flags=re.I)].reset_index()
historical_import_df.drop('index',axis=1,inplace=True)
historical_import_df = historical_import_df[:4]
historical_import_df = historical_import_df.melt(id_vars='product',var_name='month',value_name='import_rs_cr')
historical_import_df['sheetname'] = year
historical_import_df['year'] = historical_import_df['sheetname'].str.extract("(\d+)").astype(int)
historical_import_df['year'] = np.where((historical_import_df['month'].isin(['January','February','March'])),historical_import_df['year']+1,historical_import_df['year'])
historical_import_df.drop('sheetname',axis=1,inplace=True)
return historical_import_df
def get_opec_crude(dmin):
opec_url = "https://www.opec.org/basket/basketDayArchives.xml"
req = requests.get(opec_url)
xml_dict = xmltodict.parse(req.content)
opec_df = pd.DataFrame(xml_dict['Basket']['BasketList'],columns=['Date','Price'])
opec_df['Date'] = pd.to_datetime(opec_df["Date"])
return opec_df.loc[opec_df['Date']>=dmin]
#Downloading the PDF file from PPAC website, saving the file and returning the final dataframes.
# if __name__ == "__main__"
download_ppac()
petrol_df, diesel_df = prepare_downloaded_file()
# int_oil_prices = pd.read_csv(r'oil-prices-master\oil-prices-master\data\brent-daily.csv')
# print(f'loaded international oil prices. Length {len(int_oil_prices)}')
# int_oil_prices["Date"] = pd.to_datetime(int_oil_prices['Date'])
# int_oil_prices.set_index("Date",inplace=True)
# Saving the merged petrol and diesel data
petrol_df['Type'], diesel_df['Type'] = 'Petrol', 'Diesel'
merged_price_data = pd.concat([petrol_df, diesel_df])
merged_price_data.to_csv('price_df_merged.csv')
#Getting the international exchange rates.
start_date = str(petrol_df.index.min())[:10]
end_date = str(petrol_df.index.max())[:10]
dollar_exchange_rates = get_international_exchange_rates(start_date, end_date)
dollar_exchange_rates.set_index('index',inplace=True)
month_avg_dol = dollar_exchange_rates.resample('M').mean()
month_avg_dol['month'] = month_avg_dol.index.month_name()
month_avg_dol['year'] = month_avg_dol.index.year
#creating merged dataframes for international section analysis.
# petrol_df_merged = merge_data(dollar_exchange_rates, int_oil_prices, petrol_df)
# diesel_df_merged = merge_data(dollar_exchange_rates, int_oil_prices, diesel_df)
#loading the monthly average crude prices dataset
consumption_dict = pd.read_excel("DATA/consumption_historical_original.xls", sheet_name=["2017-18","2018-19","2019-20"])
consumption_hist = pd.concat([prep_consumption_df(df,year) for year,df in consumption_dict.items()]).reset_index()
consumption_hist.drop('index',axis=1,inplace=True)
consumption_present = pd.read_excel("DATA/PT_consumption.xls")
consumption_present = prep_consumption_df_present(consumption_present,"2020-21")
consumption_data = pd.concat([consumption_present,consumption_hist]).reset_index().drop('index',axis=1)
consumption_data['merge_col'] = consumption_data['year'].astype(str) + " " +consumption_data['month']
consumption_data['type'] = np.where((consumption_data['products']=='MS'),'Petrol','Diesel')
#1 metric ton = 1210 liters for diesel
#1 metric ton Petrol (MS) = 1411 litres of diesel [http://petroleum.nic.in/sites/default/files/readyrecknor_Oct14.pdf]
#Handling the historical crude and current crude files. Returns a dataframe with total imports in price_rs_billion_lit
crude_import_bbl = pd.read_excel("DATA/historical_crude_bbl.xls")
crude_import_bbl = prep_historical_crude(crude_import_bbl,2017)
current_crude_bbl = pd.read_excel("DATA/current_crude_bbl.xls")
current_crude_bbl = prep_current_crude(current_crude_bbl)
month_avg_dol['merge_col'] = month_avg_dol['year'].astype(str) + " " +month_avg_dol['month']
crude_bbl = pd.concat([crude_import_bbl, current_crude_bbl]).reset_index().drop('index',axis=1)
crude_bbl['merge_col'] = crude_bbl['year'].astype(str) + " " +crude_bbl['month']
crude_bbl = crude_bbl.merge(month_avg_dol)
crude_bbl['INR'] = crude_bbl['INR'].round(2)
crude_bbl['price_rs_lit_crude'] = crude_bbl['import_bbl_usd'] * crude_bbl['INR'] / 159 #calculating price in Rs per litre from dollars per barrel
#Handling the historical and current crude import data
req_sheets = ['PT_Import_Val_Rs 2019-20', 'PT_Import_Val_2018-19', 'PT_Import_Val_2017-18']
crude_import = pd.read_excel('DATA/import_historical_original.xls',sheet_name=req_sheets)
crude_import = pd.concat([prep_historical_import(i,filename) for filename,i in crude_import.items()]).reset_index()
crude_import.drop('index',axis=1,inplace=True)
#Calculating monthly average prices for petrol and diesel
petrol_monthly_average = petrol_df.resample('M').mean()
to_drop = [i for i in petrol_monthly_average if 'pc' in i]
petrol_monthly_average.drop(to_drop, inplace=True,axis=1)
petrol_monthly_average['month'] = petrol_monthly_average.index.month_name()
petrol_monthly_average['year'] = petrol_monthly_average.index.year
petrol_monthly_average['merge_col'] = petrol_monthly_average['year'].astype(str) + " " + petrol_monthly_average['month']
petrol_monthly_average = petrol_monthly_average.merge(crude_bbl)
petrol_monthly_average['type'] = 'Petrol'
petrol_monthly_average['Date'] = pd.to_datetime(petrol_monthly_average['merge_col'])+MonthEnd(1)
diesel_monthly_average = diesel_df.resample('M').mean()
to_drop = [i for i in diesel_monthly_average if 'pc' in i]
diesel_monthly_average.drop(to_drop, inplace=True,axis=1)
diesel_monthly_average['month'] = diesel_monthly_average.index.month_name()
diesel_monthly_average['year'] = diesel_monthly_average.index.year
diesel_monthly_average['merge_col'] = diesel_monthly_average['year'].astype(str) + " " + diesel_monthly_average['month']
diesel_monthly_average = diesel_monthly_average.merge(crude_bbl)
diesel_monthly_average['type'] = 'Diesel'
diesel_monthly_average['Date'] = pd.to_datetime(diesel_monthly_average['merge_col'])+MonthEnd(1)
#creating merge_col to merge with the monthly average crude oil import price data
merged_monthly_average = | pd.concat([petrol_monthly_average,diesel_monthly_average]) | pandas.concat |
# -*- coding: utf-8 -*-
import copy
import os
import shutil
from builtins import range
from datetime import datetime
import numpy as np
import pandas as pd
import pytest
from ..testing_utils import make_ecommerce_entityset
import featuretools as ft
from featuretools import variable_types
from featuretools.entityset import EntitySet, Relationship
from featuretools.tests import integration_data
@pytest.fixture()
def entityset():
return make_ecommerce_entityset()
def test_operations_invalidate_metadata(entityset):
new_es = ft.EntitySet(id="test")
# test metadata gets created on access
assert new_es._metadata is None
assert new_es.metadata is not None # generated after access
assert new_es._metadata is not None
new_es.entity_from_dataframe("customers",
entityset["customers"].df,
index=entityset["customers"].index)
new_es.entity_from_dataframe("sessions",
entityset["sessions"].df,
index=entityset["sessions"].index)
assert new_es._metadata is None
assert new_es.metadata is not None
assert new_es._metadata is not None
r = ft.Relationship(new_es["customers"]["id"],
new_es["sessions"]["customer_id"])
new_es = new_es.add_relationship(r)
assert new_es._metadata is None
assert new_es.metadata is not None
assert new_es._metadata is not None
new_es = new_es.normalize_entity("customers", "cohort", "cohort")
assert new_es._metadata is None
assert new_es.metadata is not None
assert new_es._metadata is not None
new_es.add_last_time_indexes()
assert new_es._metadata is None
assert new_es.metadata is not None
assert new_es._metadata is not None
new_es.add_interesting_values()
assert new_es._metadata is None
assert new_es.metadata is not None
assert new_es._metadata is not None
def test_reset_metadata(entityset):
assert entityset.metadata is not None
assert entityset._metadata is not None
entityset.reset_metadata()
assert entityset._metadata is None
def test_cannot_readd_relationships_that_already_exists(entityset):
before_len = len(entityset.relationships)
entityset.add_relationship(entityset.relationships[0])
after_len = len(entityset.relationships)
assert before_len == after_len
def test_add_relationships_convert_type(entityset):
for r in entityset.relationships:
parent_e = entityset[r.parent_entity.id]
child_e = entityset[r.child_entity.id]
assert type(r.parent_variable) == variable_types.Index
assert type(r.child_variable) == variable_types.Id
assert parent_e.df[r.parent_variable.id].dtype == child_e.df[r.child_variable.id].dtype
def test_add_relationship_errors_on_dtype_mismatch(entityset):
log_2_df = entityset['log'].df.copy()
log_variable_types = {
'id': variable_types.Categorical,
'session_id': variable_types.Id,
'product_id': variable_types.Id,
'datetime': variable_types.Datetime,
'value': variable_types.Numeric,
'value_2': variable_types.Numeric,
'latlong': variable_types.LatLong,
'latlong2': variable_types.LatLong,
'value_many_nans': variable_types.Numeric,
'priority_level': variable_types.Ordinal,
'purchased': variable_types.Boolean,
'comments': variable_types.Text
}
entityset.entity_from_dataframe(entity_id='log2',
dataframe=log_2_df,
index='id',
variable_types=log_variable_types,
time_index='datetime',
encoding='utf-8')
error_text = u'Unable to add relationship because id in customers is Pandas dtype category and session_id in log2 is Pandas dtype int64.'
with pytest.raises(ValueError, match=error_text):
mismatch = Relationship(entityset[u'customers']['id'], entityset['log2']['session_id'])
entityset.add_relationship(mismatch)
def test_query_by_id(entityset):
df = entityset['log'].query_by_values(instance_vals=[0])
assert df['id'].values[0] == 0
def test_query_by_id_with_time(entityset):
df = entityset['log'].query_by_values(
instance_vals=[0, 1, 2, 3, 4],
time_last=datetime(2011, 4, 9, 10, 30, 2 * 6))
assert df['id'].get_values().tolist() == [0, 1, 2]
def test_get_forward_entities_deep(entityset):
entities = entityset.get_forward_entities('log', 'deep')
assert entities == set(['sessions', 'customers', 'products', u'régions', 'cohorts'])
def test_query_by_variable_with_time(entityset):
df = entityset['log'].query_by_values(
instance_vals=[0, 1, 2], variable_id='session_id',
time_last=datetime(2011, 4, 9, 10, 50, 0))
true_values = [
i * 5 for i in range(5)] + [i * 1 for i in range(4)] + [0]
assert df['id'].get_values().tolist() == list(range(10))
assert df['value'].get_values().tolist() == true_values
def test_query_by_variable_with_training_window(entityset):
df = entityset['log'].query_by_values(
instance_vals=[0, 1, 2], variable_id='session_id',
time_last=datetime(2011, 4, 9, 10, 50, 0),
training_window='15m')
assert df['id'].get_values().tolist() == [9]
assert df['value'].get_values().tolist() == [0]
def test_query_by_indexed_variable(entityset):
df = entityset['log'].query_by_values(
instance_vals=['taco clock'],
variable_id='product_id')
assert df['id'].get_values().tolist() == [15, 16]
def test_check_variables_and_dataframe():
# matches
df = pd.DataFrame({'id': [0, 1, 2], 'category': ['a', 'b', 'a']})
vtypes = {'id': variable_types.Categorical,
'category': variable_types.Categorical}
entityset = EntitySet(id='test')
entityset.entity_from_dataframe('test_entity', df, index='id',
variable_types=vtypes)
assert entityset.entity_dict['test_entity'].variable_types['category'] == variable_types.Categorical
def test_make_index_variable_ordering():
df = pd.DataFrame({'id': [0, 1, 2], 'category': ['a', 'b', 'a']})
vtypes = {'id': variable_types.Categorical,
'category': variable_types.Categorical}
entityset = EntitySet(id='test')
entityset.entity_from_dataframe(entity_id='test_entity',
index='id1',
make_index=True,
variable_types=vtypes,
dataframe=df)
assert entityset.entity_dict['test_entity'].df.columns[0] == 'id1'
def test_extra_variable_type():
# more variables
df = pd.DataFrame({'id': [0, 1, 2], 'category': ['a', 'b', 'a']})
vtypes = {'id': variable_types.Categorical,
'category': variable_types.Categorical,
'category2': variable_types.Categorical}
error_text = "Variable ID category2 not in DataFrame"
with pytest.raises(LookupError, match=error_text):
entityset = EntitySet(id='test')
entityset.entity_from_dataframe(entity_id='test_entity',
index='id',
variable_types=vtypes, dataframe=df)
def test_add_parent_not_index_varible(entityset):
error_text = "Parent variable.*is not the index of entity Entity.*"
with pytest.raises(AttributeError, match=error_text):
entityset.add_relationship(Relationship(entityset[u'régions']['language'],
entityset['customers'][u'région_id']))
def test_unknown_index():
# more variables
df = pd.DataFrame({'category': ['a', 'b', 'a']})
vtypes = {'category': variable_types.Categorical}
entityset = EntitySet(id='test')
entityset.entity_from_dataframe(entity_id='test_entity',
index='id',
variable_types=vtypes, dataframe=df)
assert entityset['test_entity'].index == 'id'
assert entityset['test_entity'].df['id'].tolist() == list(range(3))
def test_doesnt_remake_index():
# more variables
df = pd.DataFrame({'id': [0, 1, 2], 'category': ['a', 'b', 'a']})
error_text = "Cannot make index: index variable already present"
with pytest.raises(RuntimeError, match=error_text):
entityset = EntitySet(id='test')
entityset.entity_from_dataframe(entity_id='test_entity',
index='id',
make_index=True,
dataframe=df)
def test_bad_time_index_variable():
df = pd.DataFrame({'category': ['a', 'b', 'a']})
error_text = "Time index not found in dataframe"
with pytest.raises(LookupError, match=error_text):
entityset = EntitySet(id='test')
entityset.entity_from_dataframe(entity_id='test_entity',
index="id",
dataframe=df,
time_index='time')
def test_converts_variable_types_on_init():
df = pd.DataFrame({'id': [0, 1, 2],
'category': ['a', 'b', 'a'],
'category_int': [1, 2, 3],
'ints': ['1', '2', '3'],
'floats': ['1', '2', '3.0']})
df["category_int"] = df["category_int"].astype("category")
vtypes = {'id': variable_types.Categorical,
'ints': variable_types.Numeric,
'floats': variable_types.Numeric}
entityset = EntitySet(id='test')
entityset.entity_from_dataframe(entity_id='test_entity', index='id',
variable_types=vtypes, dataframe=df)
entity_df = entityset['test_entity'].df
assert entity_df['ints'].dtype.name in variable_types.PandasTypes._pandas_numerics
assert entity_df['floats'].dtype.name in variable_types.PandasTypes._pandas_numerics
# this is infer from pandas dtype
e = entityset["test_entity"]
assert isinstance(e['category_int'], variable_types.Categorical)
def test_converts_variable_type_after_init():
df = pd.DataFrame({'id': [0, 1, 2],
'category': ['a', 'b', 'a'],
'ints': ['1', '2', '1']})
df["category"] = df["category"].astype("category")
entityset = EntitySet(id='test')
entityset.entity_from_dataframe(entity_id='test_entity', index='id',
dataframe=df)
e = entityset['test_entity']
df = entityset['test_entity'].df
e.convert_variable_type('ints', variable_types.Numeric)
assert isinstance(e['ints'], variable_types.Numeric)
assert df['ints'].dtype.name in variable_types.PandasTypes._pandas_numerics
e.convert_variable_type('ints', variable_types.Categorical)
assert isinstance(e['ints'], variable_types.Categorical)
e.convert_variable_type('ints', variable_types.Ordinal)
assert isinstance(e['ints'], variable_types.Ordinal)
e.convert_variable_type('ints', variable_types.Boolean,
true_val=1, false_val=2)
assert isinstance(e['ints'], variable_types.Boolean)
assert df['ints'].dtype.name == 'bool'
def test_converts_datetime():
# string converts to datetime correctly
# This test fails without defining vtypes. Entityset
# infers time column should be numeric type
times = pd.date_range('1/1/2011', periods=3, freq='H')
time_strs = times.strftime('%Y-%m-%d')
df = pd.DataFrame({'id': [0, 1, 2], 'time': time_strs})
vtypes = {'id': variable_types.Categorical,
'time': variable_types.Datetime}
entityset = EntitySet(id='test')
entityset._import_from_dataframe(entity_id='test_entity', index='id',
time_index="time", variable_types=vtypes,
dataframe=df)
pd_col = entityset['test_entity'].df['time']
# assert type(entityset['test_entity']['time']) == variable_types.Datetime
assert type(pd_col[0]) == pd.Timestamp
def test_handles_datetime_format():
# check if we load according to the format string
# pass in an ambigious date
datetime_format = "%d-%m-%Y"
actual = pd.Timestamp('Jan 2, 2011')
time_strs = [actual.strftime(datetime_format)] * 3
df = pd.DataFrame(
{'id': [0, 1, 2], 'time_format': time_strs, 'time_no_format': time_strs})
vtypes = {'id': variable_types.Categorical,
'time_format': (variable_types.Datetime, {"format": datetime_format}),
'time_no_format': variable_types.Datetime}
entityset = EntitySet(id='test')
entityset._import_from_dataframe(entity_id='test_entity', index='id',
variable_types=vtypes, dataframe=df)
col_format = entityset['test_entity'].df['time_format']
col_no_format = entityset['test_entity'].df['time_no_format']
# without formatting pandas gets it wrong
assert (col_no_format != actual).all()
# with formatting we correctly get jan2
assert (col_format == actual).all()
def test_handles_datetime_mismatch():
# can't convert arbitrary strings
df = pd.DataFrame({'id': [0, 1, 2], 'time': ['a', 'b', 'tomorrow']})
vtypes = {'id': variable_types.Categorical,
'time': variable_types.Datetime}
error_text = "Given date string not likely a datetime."
with pytest.raises(ValueError, match=error_text):
entityset = EntitySet(id='test')
entityset.entity_from_dataframe('test_entity', df, 'id',
time_index='time', variable_types=vtypes)
def test_entity_init(entityset):
# Note: to convert the time column directly either the variable type
# or convert_date_columns must be specifie
df = pd.DataFrame({'id': [0, 1, 2],
'time': [datetime(2011, 4, 9, 10, 31, 3 * i)
for i in range(3)],
'category': ['a', 'b', 'a'],
'number': [4, 5, 6]})
vtypes = {'time': variable_types.Datetime}
entityset.entity_from_dataframe('test_entity', df, index='id',
time_index='time', variable_types=vtypes)
assert entityset['test_entity'].df.shape == df.shape
assert entityset['test_entity'].index == 'id'
assert entityset['test_entity'].time_index == 'time'
assert set([v.id for v in entityset['test_entity'].variables]) == set(df.columns)
assert entityset['test_entity'].df['time'].dtype == df['time'].dtype
assert set(entityset['test_entity'].df['id']) == set(df['id'])
def test_nonstr_column_names():
df = | pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6], 3: ['a', 'b', 'c']}) | pandas.DataFrame |
from datetime import datetime, timedelta
import unittest
from pandas.core.datetools import (
bday, BDay, BQuarterEnd, BMonthEnd, BYearEnd, MonthEnd,
DateOffset, Week, YearBegin, YearEnd, Hour, Minute, Second,
format, ole2datetime, to_datetime, normalize_date,
getOffset, getOffsetName, inferTimeRule, hasOffsetName)
from nose.tools import assert_raises
####
## Misc function tests
####
def test_format():
actual = format(datetime(2008, 1, 15))
assert actual == '20080115'
def test_ole2datetime():
actual = ole2datetime(60000)
assert actual == datetime(2064, 4, 8)
assert_raises(Exception, ole2datetime, 60)
def test_to_datetime1():
actual = to_datetime(datetime(2008, 1, 15))
assert actual == datetime(2008, 1, 15)
actual = to_datetime('20080115')
assert actual == datetime(2008, 1, 15)
# unparseable
s = 'Month 1, 1999'
assert to_datetime(s) == s
def test_normalize_date():
actual = normalize_date(datetime(2007, 10, 1, 1, 12, 5, 10))
assert actual == datetime(2007, 10, 1)
#####
### DateOffset Tests
#####
class TestDateOffset(object):
def setUp(self):
self.d = datetime(2008, 1, 2)
def test_repr(self):
repr(DateOffset())
repr(DateOffset(2))
repr(2 * DateOffset())
repr(2 * DateOffset(months=2))
def test_mul(self):
assert DateOffset(2) == 2 * DateOffset(1)
assert DateOffset(2) == DateOffset(1) * 2
def test_constructor(self):
assert((self.d + DateOffset(months=2)) == datetime(2008, 3, 2))
assert((self.d - DateOffset(months=2)) == datetime(2007, 11, 2))
assert((self.d + DateOffset(2)) == datetime(2008, 1, 4))
assert not DateOffset(2).isAnchored()
assert DateOffset(1).isAnchored()
d = datetime(2008, 1, 31)
assert((d + DateOffset(months=1)) == datetime(2008, 2, 29))
def test_copy(self):
assert(DateOffset(months=2).copy() == DateOffset(months=2))
class TestBusinessDay(unittest.TestCase):
def setUp(self):
self.d = datetime(2008, 1, 1)
self.offset = BDay()
self.offset2 = BDay(2)
def test_repr(self):
assert repr(self.offset) == '<1 BusinessDay>'
assert repr(self.offset2) == '<2 BusinessDays>'
expected = '<1 BusinessDay: offset=datetime.timedelta(1)>'
assert repr(self.offset + timedelta(1)) == expected
def test_with_offset(self):
offset = self.offset + timedelta(hours=2)
assert (self.d + offset) == datetime(2008, 1, 2, 2)
def testEQ(self):
self.assertEqual(self.offset2, self.offset2)
def test_mul(self):
pass
def test_hash(self):
self.assertEqual(hash(self.offset2), hash(self.offset2))
def testCall(self):
self.assertEqual(self.offset2(self.d), datetime(2008, 1, 3))
def testRAdd(self):
self.assertEqual(self.d + self.offset2, self.offset2 + self.d)
def testSub(self):
off = self.offset2
self.assertRaises(Exception, off.__sub__, self.d)
self.assertEqual(2 * off - off, off)
self.assertEqual(self.d - self.offset2, self.d + BDay(-2))
def testRSub(self):
self.assertEqual(self.d - self.offset2, (-self.offset2).apply(self.d))
def testMult1(self):
self.assertEqual(self.d + 10*self.offset, self.d + BDay(10))
def testMult2(self):
self.assertEqual(self.d + (-5*BDay(-10)),
self.d + BDay(50))
def testRollback1(self):
self.assertEqual(BDay(10).rollback(self.d), self.d)
def testRollback2(self):
self.assertEqual(BDay(10).rollback(datetime(2008, 1, 5)), datetime(2008, 1, 4))
def testRollforward1(self):
self.assertEqual(BDay(10).rollforward(self.d), self.d)
def testRollforward2(self):
self.assertEqual(BDay(10).rollforward(datetime(2008, 1, 5)), datetime(2008, 1, 7))
def test_onOffset(self):
tests = [(BDay(), datetime(2008, 1, 1), True),
(BDay(), datetime(2008, 1, 5), False)]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
def test_apply(self):
tests = []
tests.append((bday,
{datetime(2008, 1, 1): datetime(2008, 1, 2),
datetime(2008, 1, 4): datetime(2008, 1, 7),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 8)}))
tests.append((2*bday,
{datetime(2008, 1, 1): datetime(2008, 1, 3),
datetime(2008, 1, 4): datetime(2008, 1, 8),
datetime(2008, 1, 5): datetime(2008, 1, 8),
datetime(2008, 1, 6): datetime(2008, 1, 8),
datetime(2008, 1, 7): datetime(2008, 1, 9)}))
tests.append((-bday,
{datetime(2008, 1, 1): datetime(2007, 12, 31),
datetime(2008, 1, 4): datetime(2008, 1, 3),
datetime(2008, 1, 5): datetime(2008, 1, 4),
datetime(2008, 1, 6): datetime(2008, 1, 4),
datetime(2008, 1, 7): datetime(2008, 1, 4),
datetime(2008, 1, 8): datetime(2008, 1, 7)}))
tests.append((-2*bday,
{datetime(2008, 1, 1): datetime(2007, 12, 28),
datetime(2008, 1, 4): datetime(2008, 1, 2),
datetime(2008, 1, 5): datetime(2008, 1, 3),
datetime(2008, 1, 6): datetime(2008, 1, 3),
datetime(2008, 1, 7): datetime(2008, 1, 3),
datetime(2008, 1, 8): datetime(2008, 1, 4),
datetime(2008, 1, 9): datetime(2008, 1, 7)}))
tests.append((BDay(0),
{datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 1, 4): datetime(2008, 1, 4),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 7)}))
for dateOffset, cases in tests:
for baseDate, expected in cases.iteritems():
assertEq(dateOffset, baseDate, expected)
def test_apply_corner(self):
self.assertRaises(Exception, BDay().apply, BMonthEnd())
def assertOnOffset(offset, date, expected):
actual = offset.onOffset(date)
assert actual == expected
class TestWeek(unittest.TestCase):
def test_corner(self):
self.assertRaises(Exception, Week, weekday=7)
self.assertRaises(Exception, Week, weekday=-1)
def test_isAnchored(self):
self.assert_(Week(weekday=0).isAnchored())
self.assert_(not Week().isAnchored())
self.assert_(not Week(2, weekday=2).isAnchored())
self.assert_(not Week(2).isAnchored())
def test_offset(self):
tests = []
tests.append((Week(), # not business week
{datetime(2008, 1, 1): datetime(2008, 1, 8),
datetime(2008, 1, 4): datetime(2008, 1, 11),
datetime(2008, 1, 5): datetime(2008, 1, 12),
datetime(2008, 1, 6): datetime(2008, 1, 13),
datetime(2008, 1, 7): datetime(2008, 1, 14)}))
tests.append((Week(weekday=0), # Mon
{datetime(2007, 12, 31): datetime(2008, 1, 7),
datetime(2008, 1, 4): datetime(2008, 1, 7),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 14)}))
tests.append((Week(0, weekday=0), # n=0 -> roll forward. Mon
{datetime(2007, 12, 31): datetime(2007, 12, 31),
datetime(2008, 1, 4): datetime(2008, 1, 7),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 7)}))
tests.append((Week(-2, weekday=1), # n=0 -> roll forward. Mon
{datetime(2010, 4, 6): datetime(2010, 3, 23),
datetime(2010, 4, 8): datetime(2010, 3, 30),
datetime(2010, 4, 5): datetime(2010, 3, 23)}))
for dateOffset, cases in tests:
for baseDate, expected in cases.iteritems():
assertEq(dateOffset, baseDate, expected)
def test_onOffset(self):
tests = [(Week(weekday=0), datetime(2008, 1, 1), False),
(Week(weekday=0), datetime(2008, 1, 2), False),
(Week(weekday=0), datetime(2008, 1, 3), False),
(Week(weekday=0), datetime(2008, 1, 4), False),
(Week(weekday=0), datetime(2008, 1, 5), False),
(Week(weekday=0), datetime(2008, 1, 6), False),
(Week(weekday=0), datetime(2008, 1, 7), True),
(Week(weekday=1), datetime(2008, 1, 1), True),
(Week(weekday=1), datetime(2008, 1, 2), False),
(Week(weekday=1), datetime(2008, 1, 3), False),
(Week(weekday=1), datetime(2008, 1, 4), False),
(Week(weekday=1), datetime(2008, 1, 5), False),
(Week(weekday=1), datetime(2008, 1, 6), False),
(Week(weekday=1), datetime(2008, 1, 7), False),
(Week(weekday=2), datetime(2008, 1, 1), False),
(Week(weekday=2), datetime(2008, 1, 2), True),
(Week(weekday=2), datetime(2008, 1, 3), False),
(Week(weekday=2), datetime(2008, 1, 4), False),
(Week(weekday=2), datetime(2008, 1, 5), False),
(Week(weekday=2), datetime(2008, 1, 6), False),
(Week(weekday=2), datetime(2008, 1, 7), False),
(Week(weekday=3), datetime(2008, 1, 1), False),
(Week(weekday=3), datetime(2008, 1, 2), False),
(Week(weekday=3), datetime(2008, 1, 3), True),
(Week(weekday=3), datetime(2008, 1, 4), False),
(Week(weekday=3), datetime(2008, 1, 5), False),
(Week(weekday=3), datetime(2008, 1, 6), False),
(Week(weekday=3), datetime(2008, 1, 7), False),
(Week(weekday=4), datetime(2008, 1, 1), False),
(Week(weekday=4), datetime(2008, 1, 2), False),
(Week(weekday=4), datetime(2008, 1, 3), False),
(Week(weekday=4), datetime(2008, 1, 4), True),
(Week(weekday=4), datetime(2008, 1, 5), False),
(Week(weekday=4), datetime(2008, 1, 6), False),
(Week(weekday=4), datetime(2008, 1, 7), False),
(Week(weekday=5), datetime(2008, 1, 1), False),
(Week(weekday=5), datetime(2008, 1, 2), False),
(Week(weekday=5), datetime(2008, 1, 3), False),
(Week(weekday=5), datetime(2008, 1, 4), False),
(Week(weekday=5), datetime(2008, 1, 5), True),
(Week(weekday=5), datetime(2008, 1, 6), False),
(Week(weekday=5), datetime(2008, 1, 7), False),
(Week(weekday=6), datetime(2008, 1, 1), False),
(Week(weekday=6), datetime(2008, 1, 2), False),
(Week(weekday=6), datetime(2008, 1, 3), False),
(Week(weekday=6), datetime(2008, 1, 4), False),
(Week(weekday=6), datetime(2008, 1, 5), False),
(Week(weekday=6), datetime(2008, 1, 6), True),
(Week(weekday=6), datetime(2008, 1, 7), False),
]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
class TestBMonthEnd(unittest.TestCase):
def test_offset(self):
tests = []
tests.append((BMonthEnd(),
{datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 2, 29),
datetime(2006, 12, 29): datetime(2007, 1, 31),
datetime(2006, 12, 31): datetime(2007, 1, 31),
datetime(2007, 1, 1): datetime(2007, 1, 31),
datetime(2006, 12, 1): datetime(2006, 12, 29)}))
tests.append((BMonthEnd(0),
{datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 1, 31),
datetime(2006, 12, 29): datetime(2006, 12, 29),
datetime(2006, 12, 31): datetime(2007, 1, 31),
datetime(2007, 1, 1): datetime(2007, 1, 31)}))
tests.append((BMonthEnd(2),
{datetime(2008, 1, 1): datetime(2008, 2, 29),
datetime(2008, 1, 31): datetime(2008, 3, 31),
datetime(2006, 12, 29): datetime(2007, 2, 28),
datetime(2006, 12, 31): datetime(2007, 2, 28),
datetime(2007, 1, 1): datetime(2007, 2, 28),
datetime(2006, 11, 1): datetime(2006, 12, 29)}))
tests.append((BMonthEnd(-1),
{datetime(2007, 1, 1): datetime(2006, 12, 29),
datetime(2008, 6, 30): datetime(2008, 5, 30),
datetime(2008, 12, 31): datetime(2008, 11, 28),
datetime(2006, 12, 29): datetime(2006, 11, 30),
datetime(2006, 12, 30): datetime(2006, 12, 29),
datetime(2007, 1, 1): datetime(2006, 12, 29)}))
for dateOffset, cases in tests:
for baseDate, expected in cases.iteritems():
assertEq(dateOffset, baseDate, expected)
def test_onOffset(self):
tests = [(BMonthEnd(), datetime(2007, 12, 31), True),
(BMonthEnd(), datetime(2008, 1, 1), False)]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
class TestMonthEnd(unittest.TestCase):
def test_offset(self):
tests = []
tests.append((MonthEnd(),
{datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 2, 29),
datetime(2006, 12, 29): datetime(2006, 12, 31),
datetime(2006, 12, 31): datetime(2007, 1, 31),
datetime(2007, 1, 1): datetime(2007, 1, 31),
datetime(2006, 12, 1): datetime(2006, 12, 31)}))
tests.append((MonthEnd(0),
{datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 1, 31),
datetime(2006, 12, 29): datetime(2006, 12, 31),
datetime(2006, 12, 31): datetime(2006, 12, 31),
datetime(2007, 1, 1): datetime(2007, 1, 31)}))
tests.append((MonthEnd(2),
{datetime(2008, 1, 1): datetime(2008, 2, 29),
datetime(2008, 1, 31): datetime(2008, 3, 31),
datetime(2006, 12, 29): datetime(2007, 1, 31),
datetime(2006, 12, 31): datetime(2007, 2, 28),
datetime(2007, 1, 1): datetime(2007, 2, 28),
datetime(2006, 11, 1): datetime(2006, 12, 31)}))
tests.append((MonthEnd(-1),
{datetime(2007, 1, 1): datetime(2006, 12, 31),
datetime(2008, 6, 30): datetime(2008, 5, 31),
datetime(2008, 12, 31): datetime(2008, 11, 30),
datetime(2006, 12, 29): datetime(2006, 11, 30),
datetime(2006, 12, 30): datetime(2006, 11, 30),
datetime(2007, 1, 1): datetime(2006, 12, 31)}))
for dateOffset, cases in tests:
for baseDate, expected in cases.iteritems():
assertEq(dateOffset, baseDate, expected)
def test_onOffset(self):
tests = [(MonthEnd(), datetime(2007, 12, 31), True),
(MonthEnd(), datetime(2008, 1, 1), False)]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
class TestBQuarterEnd(unittest.TestCase):
def test_corner(self):
self.assertRaises(Exception, BQuarterEnd, startingMonth=4)
self.assertRaises(Exception, BQuarterEnd, startingMonth=-1)
def test_isAnchored(self):
self.assert_( | BQuarterEnd(startingMonth=1) | pandas.core.datetools.BQuarterEnd |
import pandas as pd
from telethon import TelegramClient, sync, events
import json
import re
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
pd.set_option('display.width', None)
pd.set_option('display.max_colwidth', None)
api_id = 'your api_id'
api_hash = 'yout api hash'
group_username = 'fradpro'#It's temporary off
client = TelegramClient('session_name', api_id, api_hash)
client.start()
@client.on(events.NewMessage(chats=group_username, incoming=True))
async def my_event_handler(event):
chats = await client.get_messages(group_username, 1)
message =[]
time = []
if len(chats):
for chat in chats:
message.append(chat.message)
time.append(chat.date)
data = {'time':time,'message':message}
df = | pd.DataFrame(data) | pandas.DataFrame |
import csv
import sys
import plotly.express as px
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import pandas as pd
import json
from os import listdir
from os.path import isfile, join
import re
monnomdistances={'C':0,'I':0,'D':1,'J':1,'K':2,'L':1,'M':2,'S':1,'T':2}
markersize=8
linewidth=3
markerstyles = {'MonNom':{'color':'#000000', 'symbol':'x','size':markersize+2},
'Nom':{'color':'#00B050', 'symbol':'cross','size':markersize},
'Proxied Grift':{'color':'#ED7D31', 'symbol':'arrow-up','size':markersize},
'Monotonic Grift':{'color':'#ED7D31', 'symbol':'diamond-open','size':markersize},
'Racket':{'color':'#4472C4', 'symbol':'circle-open','size':markersize},
'C#':{'color':'#264478', 'symbol':'diamond','size':markersize},
'Java':{'color':'#7030A0', 'symbol':'diamond-wide','size':markersize+3},
'NodeJS':{'color':'#9E480E', 'symbol':'circle','size':markersize},
'HiggsCheck':{'color':'#C00000', 'symbol':'arrow-up','size':markersize},
'Reticulated':{'color':'#B21E6F', 'symbol':'circle-open','size':markersize}}
linestyles = {'MonNom':{'color':'#000000', 'width':linewidth},
'Nom':{'color':'#00aa00', 'dash':'dash', 'width':linewidth},
'Proxied Grift':{'color':'#ED7D31', 'dash':'longdash', 'width':linewidth},
'Monotonic Grift':{'color':'#ED7D31', 'dash':'dashdot', 'width':linewidth},
'Racket':{'color':'#4472C4', 'dash':'dot', 'width':linewidth},
'C#':{'color':'#264478', 'dash':'dot', 'width':linewidth},
'Java':{'color':'#7030A0', 'dash':'dot', 'width':linewidth},
'NodeJS':{'color':'#9E480E', 'dash':'dot', 'width':linewidth},
'HiggsCheck':{'color':'#C00000', 'dash':'dot', 'width':linewidth},
'Reticulated':{'color':'#B21E6F', 'dash':'dot', 'width':linewidth}}
def distance_to_fully_typed(config):
ret=0
for c in config:
ret+=monnomdistances[c]
return ret
def combine_funcs(f1,f2):
return lambda x: f2(f1(x))
def cut_dotbm(str):
return str[4:]
def fetch_key(key,data):
try:
if(key.isdigit()):
return data.loc[int(key)][0]
else:
return data.loc[key][0]
except KeyError:
return "REMOVE"
def load_converter(path):
data=pd.read_csv(path,index_col=0)
return lambda k: fetch_key(str(k),data)
def check_key(key,data):
try:
if(key.isdigit()):
return data.loc[int(key)][0] is None
else:
return data.loc[key][0] is None
except KeyError:
return True
def load_skipper(path,results):
data=pd.read_csv(path,index_col=0)
actualdata=pd.read_csv(results,header=None)
return lambda i: check_key(cut_dotbm(actualdata.iat[i,0]),data)
def load_benchmark(path):
config=json.load(open(path+"/plotconfig.json","rt"))
if(config.get("version")!=None):
if(config.get("version")=="v2"):
return load_newbenchmark(path,config)
if(config.get("version")=="v3"):
return load_benchmarkv3(path,config)
if(config.get("version")=="v1"):
return load_oldbenchmark(path,config)
return load_newbenchmark(path,config)
def load_newbenchmark(path,config):
converter=cut_dotbm
skipper=lambda x : False
if(config.get("mapping")!=None):
converter=combine_funcs(cut_dotbm,load_converter(path+"/"+config["mapping"]))
skipper=load_skipper(path+"/"+config["mapping"],path+"/results.csv")
data=pd.read_csv(path+"/results.csv",header=None,converters={0:converter},skiprows=skipper,index_col=0)
datacolumns=len(data.columns)
linesperprog=config["lines"]
resultcolumns=[[] for i in range(0,linesperprog-1)]
timescolumns=[]
if(datacolumns%linesperprog!=0):
raise Exception("Invalid number of columns: "+path)
rightvalues=[]
for i in range(0,linesperprog):
if i!=config["time"]:
rightvalues.append(data.iat[0,i])
for i in range(0,datacolumns):
if i%linesperprog==config["time"]:
timescolumns.append(i)
else:
if i%linesperprog<config["time"]:
resultcolumns[i%linesperprog].append(i)
else:
resultcolumns[(i-1)%linesperprog].append(i)
for i in range(0,linesperprog-1):
if not data.take(resultcolumns[i],axis=1).applymap(lambda x : x==rightvalues[i]).all(axis=None):
print(data.take(resultcolumns[i],axis=1))
raise Exception("not all result values match!")
times=data.take(timescolumns,axis=1)
times=times.rename(columns={0:'Configuration'})
dists=pd.Series(times.index.map(distance_to_fully_typed), name='Distance to Fully Typed/Nominal')
means=times.mean(axis=1,numeric_only=True).rename("Running Time in Seconds")
stdevs=times.std(axis=1,numeric_only=True).rename("Running Time Standard Deviation")
extended=pd.concat([pd.Series(times.index),dists],join="inner",axis=1)
extended=extended.set_index([0])
dtable= | pd.DataFrame(extended) | pandas.DataFrame |
import pytest
import os
from mapping import util
from pandas.util.testing import assert_frame_equal, assert_series_equal
import pandas as pd
from pandas import Timestamp as TS
import numpy as np
@pytest.fixture
def price_files():
cdir = os.path.dirname(__file__)
path = os.path.join(cdir, 'data/')
files = ["CME-FVU2014.csv", "CME-FVZ2014.csv"]
return [os.path.join(path, f) for f in files]
def assert_dict_of_frames(dict1, dict2):
assert dict1.keys() == dict2.keys()
for key in dict1:
assert_frame_equal(dict1[key], dict2[key])
def test_read_price_data(price_files):
# using default name_func in read_price_data()
df = util.read_price_data(price_files)
dt1 = TS("2014-09-30")
dt2 = TS("2014-10-01")
idx = pd.MultiIndex.from_tuples([(dt1, "CME-FVU2014"),
(dt1, "CME-FVZ2014"),
(dt2, "CME-FVZ2014")],
names=["date", "contract"])
df_exp = pd.DataFrame([119.27344, 118.35938, 118.35938],
index=idx, columns=["Open"])
assert_frame_equal(df, df_exp)
def name_func(fstr):
file_name = os.path.split(fstr)[-1]
name = file_name.split('-')[1].split('.')[0]
return name[-4:] + name[:3]
df = util.read_price_data(price_files, name_func)
dt1 = TS("2014-09-30")
dt2 = TS("2014-10-01")
idx = pd.MultiIndex.from_tuples([(dt1, "2014FVU"), (dt1, "2014FVZ"),
(dt2, "2014FVZ")],
names=["date", "contract"])
df_exp = pd.DataFrame([119.27344, 118.35938, 118.35938],
index=idx, columns=["Open"])
assert_frame_equal(df, df_exp)
def test_calc_rets_one_generic():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
( | TS('2015-01-05') | pandas.Timestamp |
from bs4 import BeautifulSoup as Bt4
import requests
import json
import datetime
import pandas as pd
import urllib
import time
import re
import random
import platform
from datetime import datetime
import platform
import shutil
from lxml import etree
# ---------- 爬取 主幹航線準班率 ----------
# 顯卡網址
website = f"https://www.sse.net.cn/index/singleIndex?indexType=gcspi"
source = requests.get(website).text
soup = Bt4(source, "lxml")
form = soup.find_all('tbody')
upper_form = form[0].find_all('td')
man_index = [] # 主幹航線準班率
for num in range(4,11,3):
man_index.append(float(upper_form[num].get_text()))
lower_form = form[1].find_all('td')
arrive_harbor = [] #到離港服務準班率
logistic = [] #收發或服務準班率
#從第8個開始到58個 step:5
for num in range(8,58,5):
arrive_harbor.append(float(lower_form[num].get_text()))
for num in range(10,60,5):
logistic.append(float(lower_form[num].get_text()))
col1 = ['綜合準班率指數(%)',
'到離港服務準班率指數(%)',
'收发貨服務準班率指數(%)']
col2 = ['亞洲-歐洲',
'亞洲-地中海-到',
'亞洲-美西-到',
'亞洲-美東-到',
'亞洲-波斯灣-到',
'亞洲-澳新-到',
'亞洲-西非-到',
'亞洲-南非-到',
'亞洲-南美-到',
'歐洲-美東-到']
col3 = ['亞洲-歐洲-收',
'亞洲-地中海-收',
'亞洲-美西-收',
'亞洲-美東-收',
'亞洲-波斯灣-收',
'亞洲-澳新-收',
'亞洲-西非-收',
'亞洲-南非-收',
'亞洲-南美-收',
'歐洲-美東-收']
main_index = pd.DataFrame({0:man_index})
main_index = main_index.T
main_index.columns = col1
arrive_harbor = pd.DataFrame({0:arrive_harbor})
arrive_harbor = arrive_harbor.T
arrive_harbor.columns = col2
logistic = | pd.DataFrame({0:logistic}) | pandas.DataFrame |
# Copyright 2019 <NAME>.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Plot Service will make use of appropriately decorated functions in this module.
import datetime
import logging
import re
import time
from collections import namedtuple
from enum import auto
from numbers import Real
from dateutil import tz
import cachetools.func
import numpy as np
import pandas as pd
from pandas import Series
from pandas.tseries.holiday import Holiday, AbstractHolidayCalendar, USMemorialDay, USLaborDay, USThanksgivingDay, \
nearest_workday
from gs_quant.api.gs.assets import GsIdType
from gs_quant.api.gs.data import GsDataApi
from gs_quant.api.gs.data import QueryType
from gs_quant.data.core import DataContext
from gs_quant.data.fields import Fields
from gs_quant.datetime.gscalendar import GsCalendar
from gs_quant.datetime.point import relative_days_add
from gs_quant.errors import MqTypeError, MqValueError
from gs_quant.markets.securities import *
from gs_quant.markets.securities import Asset, AssetIdentifier, SecurityMaster
from gs_quant.target.common import AssetClass, FieldFilterMap, AssetType, Currency
from gs_quant.timeseries.helper import log_return, plot_measure
GENERIC_DATE = Union[datetime.date, str]
TD_ONE = datetime.timedelta(days=1)
_logger = logging.getLogger(__name__)
MeasureDependency: namedtuple = namedtuple("MeasureDependency", ["id_provider", "query_type"])
# TODO: get NERC Calendar from SecDB
class NercCalendar(AbstractHolidayCalendar):
rules = [
Holiday('New Years Day', month=1, day=1, observance=nearest_workday),
USMemorialDay,
Holiday('July 4th', month=7, day=4, observance=nearest_workday),
USLaborDay,
USThanksgivingDay,
Holiday('Christmas', month=12, day=25, observance=nearest_workday)
]
def _to_fx_strikes(strikes):
out = []
for strike in strikes:
if strike == 50:
out.append('ATMS')
elif strike < 50:
out.append(f'{round(strike)}DC')
else:
out.append(f'{round(abs(100 - strike))}DP')
return out
class SkewReference(Enum):
DELTA = 'delta'
NORMALIZED = 'normalized'
SPOT = 'spot'
FORWARD = 'forward'
class VolReference(Enum):
DELTA_CALL = 'delta_call'
DELTA_PUT = 'delta_put'
DELTA_NEUTRAL = 'delta_neutral'
NORMALIZED = 'normalized'
SPOT = 'spot'
FORWARD = 'forward'
class VolSmileReference(Enum):
SPOT = 'spot'
FORWARD = 'forward'
class EdrDataReference(Enum):
DELTA_CALL = 'delta_call'
DELTA_PUT = 'delta_put'
FORWARD = 'forward'
class ForeCastHorizon(Enum):
THREE_MONTH = '3m'
SIX_MONTH = '6m'
ONE_YEAR = '1y'
EOY1 = 'EOY1'
EOY2 = 'EOY2'
EOY3 = 'EOY3'
EOY4 = 'EOY4'
class BenchmarkType(Enum):
LIBOR = 'LIBOR'
EURIBOR = 'EURIBOR'
STIBOR = 'STIBOR'
OIS = 'OIS'
class RatesConversionType(Enum):
DEFAULT_BENCHMARK_RATE = auto()
INFLATION_BENCHMARK_RATE = auto()
CROSS_CURRENCY_BASIS = auto()
CURRENCY_TO_DEFAULT_RATE_BENCHMARK = {
'USD': 'USD-LIBOR-BBA',
'EUR': 'EUR-EURIBOR-Telerate',
'GBP': 'GBP-LIBOR-BBA',
'JPY': 'JPY-LIBOR-BBA'
}
CURRENCY_TO_INFLATION_RATE_BENCHMARK = {
'GBP': 'CPI-UKRPI',
'EUR': 'CPI-CPXTEMU'
}
CROSS_TO_CROSS_CURRENCY_BASIS = {
'JPYUSD': 'USD-3m/JPY-3m',
'USDJPY': 'USD-3m/JPY-3m',
'USDEUR': 'EUR-3m/USD-3m',
'EURUSD': 'EUR-3m/USD-3m',
'USDGBP': 'GBP-3m/USD-3m',
'GBPUSD': 'GBP-3m/USD-3m'
}
def cross_stored_direction_for_fx_vol(asset_id: str) -> str:
result_id = asset_id
try:
asset = SecurityMaster.get_asset(asset_id, AssetIdentifier.MARQUEE_ID)
if asset.asset_class is AssetClass.FX:
bbid = asset.get_identifier(AssetIdentifier.BLOOMBERG_ID)
if bbid is not None:
legit_usd_cross = str.startswith(bbid, "USD") and not str.endswith(bbid, ("EUR", "GBP", "NZD", "AUD"))
legit_eur_cross = str.startswith(bbid, "EUR")
legit_jpy_cross = str.endswith(bbid, "JPY") and not str.startswith(bbid, ("KRW", "IDR", "CLP", "COP"))
odd_cross = bbid in ("EURUSD", "GBPUSD", "NZDUSD", "AUDUSD", "JPYKRW", "JPYIDR", "JPYCLP", "JPYCOP")
if not legit_usd_cross and not legit_eur_cross and not legit_jpy_cross and not odd_cross:
cross = bbid[3:] + bbid[:3]
cross_asset = SecurityMaster.get_asset(cross, AssetIdentifier.BLOOMBERG_ID)
result_id = cross_asset.get_marquee_id()
except TypeError:
result_id = asset_id
return result_id
def cross_to_usd_based_cross(asset_id: str) -> str:
result_id = asset_id
try:
asset = SecurityMaster.get_asset(asset_id, AssetIdentifier.MARQUEE_ID)
if asset.asset_class is AssetClass.FX:
bbid = asset.get_identifier(AssetIdentifier.BLOOMBERG_ID)
if bbid is not None and not str.startswith(bbid, "USD"):
cross = bbid[3:] + bbid[:3]
cross_asset = SecurityMaster.get_asset(cross, AssetIdentifier.BLOOMBERG_ID)
result_id = cross_asset.get_marquee_id()
except TypeError:
result_id = asset_id
return result_id
def currency_to_default_benchmark_rate(asset_id: str) -> str:
try:
asset = SecurityMaster.get_asset(asset_id, AssetIdentifier.MARQUEE_ID)
result = convert_asset_for_rates_data_set(asset, RatesConversionType.DEFAULT_BENCHMARK_RATE)
except TypeError:
result = asset_id
return result
def currency_to_inflation_benchmark_rate(asset_id: str) -> str:
try:
asset = SecurityMaster.get_asset(asset_id, AssetIdentifier.MARQUEE_ID)
result = convert_asset_for_rates_data_set(asset, RatesConversionType.INFLATION_BENCHMARK_RATE)
except TypeError:
result = asset_id
return result
def cross_to_basis(asset_id: str) -> str:
try:
asset = SecurityMaster.get_asset(asset_id, AssetIdentifier.MARQUEE_ID)
result = convert_asset_for_rates_data_set(asset, RatesConversionType.CROSS_CURRENCY_BASIS)
except TypeError:
result = asset_id
return result
def convert_asset_for_rates_data_set(from_asset: Asset, c_type: RatesConversionType) -> str:
try:
bbid = from_asset.get_identifier(AssetIdentifier.BLOOMBERG_ID)
if bbid is None:
return from_asset.get_marquee_id()
if c_type is RatesConversionType.DEFAULT_BENCHMARK_RATE:
to_asset = CURRENCY_TO_DEFAULT_RATE_BENCHMARK[bbid]
elif c_type is RatesConversionType.INFLATION_BENCHMARK_RATE:
to_asset = CURRENCY_TO_INFLATION_RATE_BENCHMARK[bbid]
else:
to_asset = CROSS_TO_CROSS_CURRENCY_BASIS[bbid]
return GsAssetApi.map_identifiers(GsIdType.mdapi, GsIdType.id, [to_asset])[to_asset]
except KeyError:
logging.info(f'Unsupported currency or cross ${bbid}')
raise from_asset.get_marquee_id()
def _get_custom_bd(exchange):
from pandas.tseries.offsets import CustomBusinessDay
calendar = GsCalendar.get(exchange).business_day_calendar()
return CustomBusinessDay(calendar=calendar)
@log_return(_logger, 'trying pricing dates')
def _range_from_pricing_date(exchange, pricing_date: Optional[GENERIC_DATE] = None):
if isinstance(pricing_date, datetime.date):
return pricing_date, pricing_date
today = pd.Timestamp.today().normalize()
if pricing_date is None:
t1 = today - _get_custom_bd(exchange)
return t1, t1
assert isinstance(pricing_date, str)
matcher = re.fullmatch('(\\d+)b', pricing_date)
if matcher:
start = end = today - _get_custom_bd(exchange) * int(matcher.group(1))
else:
end = today - datetime.timedelta(days=relative_days_add(pricing_date, True))
start = end - _get_custom_bd(exchange)
return start, end
def _to_offset(tenor: str) -> pd.DateOffset:
import re
matcher = re.fullmatch('(\\d+)([dwmy])', tenor)
if not matcher:
raise ValueError('invalid tenor ' + tenor)
ab = matcher.group(2)
if ab == 'd':
name = 'days'
elif ab == 'w':
name = 'weeks'
elif ab == 'm':
name = 'months'
else:
assert ab == 'y'
name = 'years'
kwarg = {name: int(matcher.group(1))}
return pd.DateOffset(**kwarg)
def _market_data_timed(q):
start = time.perf_counter()
df = GsDataApi.get_market_data(q)
_logger.debug('market data query ran in %.3f ms', (time.perf_counter() - start) * 1000)
return df
@plot_measure((AssetClass.FX, AssetClass.Equity), None, [MeasureDependency(
id_provider=cross_stored_direction_for_fx_vol, query_type=QueryType.IMPLIED_VOLATILITY)])
def skew(asset: Asset, tenor: str, strike_reference: SkewReference, distance: Real, *, location: str = 'NYC',
source: str = None, real_time: bool = False) -> Series:
"""
Difference in implied volatility of equidistant out-of-the-money put and call options.
:param asset: asset object loaded from security master
:param tenor: relative date representation of expiration date e.g. 1m
:param strike_reference: reference for strike level (for equities)
:param distance: distance from at-the-money option
:param location: location at which a price fixing has been taken (for FX assets)
:param source: name of function caller
:param real_time: whether to retrieve intraday data instead of EOD
:return: skew curve
"""
if real_time:
raise MqValueError('real-time skew not supported')
if strike_reference in (SkewReference.DELTA, None):
b = 50
elif strike_reference == SkewReference.NORMALIZED:
b = 0
else:
b = 100
kwargs = {}
if strike_reference in (SkewReference.DELTA, None):
# using delta call strikes so X DP is represented as (100 - X) DC
q_strikes = [100 - distance, distance, b]
else:
q_strikes = [b - distance, b + distance, b]
asset_id = asset.get_marquee_id()
if asset.asset_class == AssetClass.FX:
asset_id = cross_stored_direction_for_fx_vol(asset_id)
q_strikes = _to_fx_strikes(q_strikes)
kwargs['location'] = location
column = 'deltaStrike' # should use SkewReference.DELTA for FX
else:
assert asset.asset_class == AssetClass.Equity
if not strike_reference:
raise MqTypeError('strike reference required for equities')
if strike_reference != SkewReference.NORMALIZED:
q_strikes = [x / 100 for x in q_strikes]
kwargs['strikeReference'] = strike_reference.value
column = 'relativeStrike'
kwargs[column] = q_strikes
_logger.debug('where tenor=%s and %s', tenor, kwargs)
where = FieldFilterMap(tenor=tenor, **kwargs)
q = GsDataApi.build_market_data_query([asset_id], QueryType.IMPLIED_VOLATILITY, where=where, source=source)
_logger.debug('q %s', q)
df = _market_data_timed(q)
if df.empty:
return pd.Series()
curves = {k: v for k, v in df.groupby(column)}
if len(curves) < 3:
raise MqValueError('skew not available for given inputs')
series = [curves[qs]['impliedVolatility'] for qs in q_strikes]
return (series[0] - series[1]) / series[2]
@plot_measure((AssetClass.Equity, AssetClass.Commod, AssetClass.FX,), None,
[MeasureDependency(id_provider=cross_stored_direction_for_fx_vol,
query_type=QueryType.IMPLIED_VOLATILITY)])
def implied_volatility(asset: Asset, tenor: str, strike_reference: VolReference, relative_strike: Real = None, *,
source: str = None, real_time: bool = False) -> Series:
"""
Volatility of an asset implied by observations of market prices.
:param asset: asset object loaded from security master
:param tenor: relative date representation of expiration date e.g. 1m
:param strike_reference: reference for strike level
:param relative_strike: strike relative to reference
:param source: name of function caller
:param real_time: whether to retrieve intraday data instead of EOD
:return: implied volatility curve
"""
if relative_strike is None and strike_reference != VolReference.DELTA_NEUTRAL:
raise MqValueError('Relative strike must be provided if your strike reference is not delta_neutral')
if asset.asset_class == AssetClass.FX:
if strike_reference == VolReference.DELTA_NEUTRAL:
delta_strike = 'DN'
elif strike_reference == VolReference.DELTA_CALL:
delta_strike = f'{relative_strike}DC'
elif strike_reference == VolReference.DELTA_PUT:
delta_strike = f'{relative_strike}DP'
elif strike_reference == VolReference.FORWARD:
if relative_strike == 100:
delta_strike = 'ATMF'
else:
raise MqValueError('Relative strike must be 100 for Forward strike reference')
elif strike_reference == VolReference.SPOT:
if relative_strike == 100:
delta_strike = 'ATMS'
else:
raise MqValueError('Relative strike must be 100 for Spot strike reference')
else:
raise MqValueError('strikeReference: ' + strike_reference.value + ' not supported for FX')
asset_id = cross_stored_direction_for_fx_vol(asset.get_marquee_id())
_logger.debug('where tenor=%s, deltaStrike=%s, location=NYC', tenor, delta_strike)
q = GsDataApi.build_market_data_query(
[asset_id],
QueryType.IMPLIED_VOLATILITY,
where=FieldFilterMap(tenor=tenor, deltaStrike=delta_strike, location='NYC'),
source=source,
real_time=real_time
)
_logger.debug('q %s', q)
df = _market_data_timed(q)
else:
if strike_reference == VolReference.DELTA_NEUTRAL:
raise NotImplementedError('delta_neutral strike reference is not supported for equities.')
if strike_reference == VolReference.DELTA_PUT:
relative_strike = abs(100 - relative_strike)
relative_strike = relative_strike if strike_reference == VolReference.NORMALIZED else relative_strike / 100
ref_string = "delta" if strike_reference in (VolReference.DELTA_CALL,
VolReference.DELTA_PUT) else strike_reference.value
_logger.debug('where tenor=%s, strikeReference=%s, relativeStrike=%s', tenor, ref_string, relative_strike)
where = FieldFilterMap(tenor=tenor, strikeReference=ref_string, relativeStrike=relative_strike)
q = GsDataApi.build_market_data_query([asset.get_marquee_id()], QueryType.IMPLIED_VOLATILITY,
where=where, source=source, real_time=real_time)
_logger.debug('q %s', q)
df = _market_data_timed(q)
return Series() if df.empty else df['impliedVolatility']
@plot_measure((AssetClass.Equity,), (AssetType.Index, AssetType.ETF,), [QueryType.IMPLIED_CORRELATION])
def implied_correlation(asset: Asset, tenor: str, strike_reference: EdrDataReference, relative_strike: Real, *,
source: str = None, real_time: bool = False) -> Series:
"""
Correlation of an asset implied by observations of market prices.
:param asset: asset object loaded from security master
:param tenor: relative date representation of expiration date e.g. 1m
:param strike_reference: reference for strike level
:param relative_strike: strike relative to reference
:param source: name of function caller
:param real_time: whether to retrieve intraday data instead of EOD
:return: implied correlation curve
"""
if real_time:
raise NotImplementedError('realtime implied_correlation not implemented')
if strike_reference == EdrDataReference.DELTA_PUT:
relative_strike = abs(100 - relative_strike)
relative_strike = relative_strike / 100
delta_types = (EdrDataReference.DELTA_CALL, EdrDataReference.DELTA_PUT)
strike_ref = "delta" if strike_reference in delta_types else strike_reference.value
_logger.debug('where tenor=%s, strikeReference=%s, relativeStrike=%s', tenor, strike_ref, relative_strike)
mqid = asset.get_marquee_id()
where = FieldFilterMap(tenor=tenor, strikeReference=strike_ref, relativeStrike=relative_strike)
q = GsDataApi.build_market_data_query([mqid], QueryType.IMPLIED_CORRELATION, where=where, source=source,
real_time=real_time)
_logger.debug('q %s', q)
df = _market_data_timed(q)
return Series() if df.empty else df['impliedCorrelation']
@plot_measure((AssetClass.Equity,), (AssetType.Index, AssetType.ETF,), [QueryType.AVERAGE_IMPLIED_VOLATILITY])
def average_implied_volatility(asset: Asset, tenor: str, strike_reference: EdrDataReference, relative_strike: Real, *,
source: str = None, real_time: bool = False) -> Series:
"""
Historic weighted average implied volatility for the underlying assets of an equity index.
:param asset: asset object loaded from security master
:param tenor: relative date representation of expiration date e.g. 1m
:param strike_reference: reference for strike level
:param relative_strike: strike relative to reference
:param source: name of function caller
:param real_time: whether to retrieve intraday data instead of EOD
:return: average implied volatility curve
"""
if real_time:
raise NotImplementedError('realtime average_implied_volatility not implemented')
if strike_reference == EdrDataReference.DELTA_PUT:
relative_strike = abs(100 - relative_strike)
relative_strike = relative_strike / 100
delta_types = (EdrDataReference.DELTA_CALL, EdrDataReference.DELTA_PUT)
strike_ref = "delta" if strike_reference in delta_types else strike_reference.value
_logger.debug('where tenor=%s, strikeReference=%s, relativeStrike=%s', tenor, strike_ref, relative_strike)
mqid = asset.get_marquee_id()
where = FieldFilterMap(tenor=tenor, strikeReference=strike_ref, relativeStrike=relative_strike)
q = GsDataApi.build_market_data_query([mqid], QueryType.AVERAGE_IMPLIED_VOLATILITY,
where=where, source=source, real_time=real_time)
_logger.debug('q %s', q)
df = _market_data_timed(q)
return | Series() | pandas.Series |
# import modules ----------------------
import nba_py
import nba_py.game
import nba_py.player
import nba_py.team
import pandas as pd
import numpy as np
import datetime
import pytz
old_settings = np.seterr(all='print')
np.geterr()
print('modules imported')
# define functions ----------------------
def get_games(date):
"""
:param date: datetime.date, the match day
:return: df, all the games on the given day
"""
return nba_py.Scoreboard(month=date.month,
day=date.day,
year=date.year,
league_id='00',
offset=0).game_header()[['GAME_ID', 'HOME_TEAM_ID', 'VISITOR_TEAM_ID']]
def get_players(games, all_players):
"""
:param games: df, some games
:param all_players: df, all players list of this season
:return: df, all players of the given games
"""
home_team_player = all_players[all_players['TEAM_ID'].isin(games['HOME_TEAM_ID'])][['PERSON_ID', 'TEAM_ID']]
home_team_player['Location'] = 'HOME'
away_team_player = all_players[all_players['TEAM_ID'].isin(games['VISITOR_TEAM_ID'])][['PERSON_ID', 'TEAM_ID']]
away_team_player['Location'] = 'AWAY'
players = pd.concat([home_team_player, away_team_player])
game_team = pd.concat([games[['HOME_TEAM_ID', 'GAME_ID']].rename(columns={'HOME_TEAM_ID': 'TEAM_ID'}),
games[['VISITOR_TEAM_ID', 'GAME_ID']].rename(columns={'VISITOR_TEAM_ID': 'TEAM_ID'})])
players = pd.merge(players, game_team, on='TEAM_ID')
team_team = pd.concat(
[games[['HOME_TEAM_ID', 'VISITOR_TEAM_ID']].rename(columns={'HOME_TEAM_ID': 'TEAM_ID',
'VISITOR_TEAM_ID': 'Against_Team_ID'}),
games[['VISITOR_TEAM_ID', 'HOME_TEAM_ID']].rename(columns={'VISITOR_TEAM_ID': 'TEAM_ID',
'HOME_TEAM_ID': 'Against_Team_ID'})])
players = | pd.merge(players, team_team, on='TEAM_ID') | pandas.merge |
"""
Functions for data cleaning.
:author: <NAME>
"""
# Imports
import itertools
import numpy as np
import pandas as pd
import re
from sklearn.base import BaseEstimator, TransformerMixin
from typing import List, Optional, Union
from klib.describe import corr_mat
from klib.utils import (
_diff_report,
_drop_duplicates,
_missing_vals,
_validate_input_bool,
_validate_input_range,
)
__all__ = [
"clean_column_names",
"convert_datatypes",
"data_cleaning",
"drop_missing",
"mv_col_handling",
]
def optimize_ints(data: Union[pd.Series, pd.DataFrame]) -> pd.DataFrame:
data = pd.DataFrame(data).copy()
ints = data.select_dtypes(include=["int64"]).columns.tolist()
data[ints] = data[ints].apply(pd.to_numeric, downcast="integer")
return data
def optimize_floats(data: Union[pd.Series, pd.DataFrame]) -> pd.DataFrame:
data = pd.DataFrame(data).copy()
floats = data.select_dtypes(include=["float64"]).columns.tolist()
data[floats] = data[floats].apply(pd.to_numeric, downcast="float")
return data
def clean_column_names(data: pd.DataFrame, hints: bool = True) -> pd.DataFrame:
""" Cleans the column names of the provided Pandas Dataframe and optionally \
provides hints on duplicate and long column names.
Parameters
----------
data : pd.DataFrame
Original Dataframe with columns to be cleaned
hints : bool, optional
Print out hints on column name duplication and colum name length, by default \
True
Returns
-------
pd.DataFrame
Pandas DataFrame with cleaned column names
"""
_validate_input_bool(hints, "hints")
# Handle CamelCase
for i, col in enumerate(data.columns):
matches = re.findall(re.compile("[a-z][A-Z]"), col)
column = col
for match in matches:
column = column.replace(match, match[0] + "_" + match[1])
data.rename(columns={data.columns[i]: column}, inplace=True)
data.columns = (
data.columns.str.replace("\n", "_")
.str.replace("(", "_")
.str.replace(")", "_")
.str.replace("'", "_")
.str.replace('"', "_")
.str.replace(".", "_")
.str.replace("-", "_")
.str.replace(r"[!?:;/]", "_", regex=True)
.str.replace("+", "_plus_")
.str.replace("*", "_times_")
.str.replace("<", "_smaller")
.str.replace(">", "_larger_")
.str.replace("=", "_equal_")
.str.replace("ä", "ae")
.str.replace("ö", "oe")
.str.replace("ü", "ue")
.str.replace("ß", "ss")
.str.replace("%", "_percent_")
.str.replace("$", "_dollar_")
.str.replace("€", "_euro_")
.str.replace("@", "_at_")
.str.replace("#", "_hash_")
.str.replace("&", "_and_")
.str.replace(r"\s+", "_", regex=True)
.str.replace(r"_+", "_", regex=True)
.str.strip("_")
.str.lower()
)
dupl_idx = [i for i, x in enumerate(data.columns.duplicated()) if x]
if len(dupl_idx) > 0:
dupl_before = data.columns[dupl_idx].tolist()
data.columns = [
col if col not in data.columns[:i] else col + "_" + str(i)
for i, col in enumerate(data.columns)
]
if hints:
print(
f"Duplicate column names detected! Columns with index {dupl_idx} and "
f"names {dupl_before}) have been renamed to "
f"{data.columns[dupl_idx].tolist()}."
)
long_col_names = [x for x in data.columns if len(x) > 25]
if len(long_col_names) > 0 and hints:
print(
"Long column names detected (>25 characters). Consider renaming the "
f"following columns {long_col_names}."
)
return data
def convert_datatypes(
data: pd.DataFrame,
category: bool = True,
cat_threshold: float = 0.05,
cat_exclude: Optional[List[Union[str, int]]] = None,
) -> pd.DataFrame:
""" Converts columns to best possible dtypes using dtypes supporting pd.NA.
Temporarily not converting to integers due to an issue in pandas. This is expected \
to be fixed in pandas 1.1. See https://github.com/pandas-dev/pandas/issues/33803
Parameters
----------
data : pd.DataFrame
2D dataset that can be coerced into Pandas DataFrame
category : bool, optional
Change dtypes of columns with dtype "object" to "category". Set threshold \
using cat_threshold or exclude columns using cat_exclude, by default True
cat_threshold : float, optional
Ratio of unique values below which categories are inferred and column dtype is \
changed to categorical, by default 0.05
cat_exclude : Optional[List[Union[str, int]]], optional
List of columns to exclude from categorical conversion, by default None
Returns
-------
pd.DataFrame
Pandas DataFrame with converted Datatypes
"""
# Validate Inputs
_validate_input_bool(category, "Category")
_validate_input_range(cat_threshold, "cat_threshold", 0, 1)
cat_exclude = [] if cat_exclude is None else cat_exclude.copy()
data = pd.DataFrame(data).copy()
for col in data.columns:
unique_vals_ratio = data[col].nunique(dropna=False) / data.shape[0]
if (
category
and unique_vals_ratio < cat_threshold
and col not in cat_exclude
and data[col].dtype == "object"
):
data[col] = data[col].astype("category")
# convert_ints = True if int(pd.__version__.replace(".", "")) >= 110 else False
# convert_integer does not work as expected until pandas 1.1.0 while
# convert_string is still experimental
data[col] = data[col].convert_dtypes(
infer_objects=True,
convert_string=True,
convert_integer=False,
convert_boolean=True,
)
data = optimize_ints(data)
data = optimize_floats(data)
return data
def drop_missing(
data: pd.DataFrame,
drop_threshold_cols: float = 1,
drop_threshold_rows: float = 1,
col_exclude: Optional[List[str]] = None,
) -> pd.DataFrame:
""" Drops completely empty columns and rows by default and optionally provides \
flexibility to loosen restrictions to drop additional non-empty columns and \
rows based on the fraction of NA-values.
Parameters
----------
data : pd.DataFrame
2D dataset that can be coerced into Pandas DataFrame
drop_threshold_cols : float, optional
Drop columns with NA-ratio equal to or above the specified threshold, by \
default 1
drop_threshold_rows : float, optional
Drop rows with NA-ratio equal to or above the specified threshold, by default 1
col_exclude : Optional[List[str]], optional
Specify a list of columns to exclude from dropping. The excluded columns do \
not affect the drop thresholds, by default None
Returns
-------
pd.DataFrame
Pandas DataFrame without any empty columns or rows
Notes
-----
Columns are dropped first
"""
# Validate Inputs
_validate_input_range(drop_threshold_cols, "drop_threshold_cols", 0, 1)
_validate_input_range(drop_threshold_rows, "drop_threshold_rows", 0, 1)
col_exclude = [] if col_exclude is None else col_exclude.copy()
data_exclude = data[col_exclude]
data = pd.DataFrame(data).copy()
data_dropped = data.drop(columns=col_exclude, errors="ignore")
data_dropped = data_dropped.drop(
columns=data_dropped.loc[
:, _missing_vals(data)["mv_cols_ratio"] > drop_threshold_cols
].columns
).dropna(axis=1, how="all")
data = pd.concat([data_dropped, data_exclude], axis=1)
data_cleaned = data.drop(
index=data.loc[
_missing_vals(data)["mv_rows_ratio"] > drop_threshold_rows, :
].index
).dropna(axis=0, how="all")
return data_cleaned
def data_cleaning(
data: pd.DataFrame,
drop_threshold_cols: float = 0.9,
drop_threshold_rows: float = 0.9,
drop_duplicates: bool = True,
convert_dtypes: bool = True,
col_exclude: Optional[List[str]] = None,
category: bool = True,
cat_threshold: float = 0.03,
cat_exclude: Optional[List[Union[str, int]]] = None,
clean_col_names: bool = True,
show: str = "changes",
) -> pd.DataFrame:
""" Perform initial data cleaning tasks on a dataset, such as dropping single \
valued and empty rows, empty columns as well as optimizing the datatypes.
Parameters
----------
data : pd.DataFrame
2D dataset that can be coerced into Pandas DataFrame
drop_threshold_cols : float, optional
Drop columns with NA-ratio equal to or above the specified threshold, by \
default 0.9
drop_threshold_rows : float, optional
Drop rows with NA-ratio equal to or above the specified threshold, by \
default 0.9
drop_duplicates : bool, optional
Drop duplicate rows, keeping the first occurence. This step comes after the \
dropping of missing values, by default True
convert_dtypes : bool, optional
Convert dtypes using pd.convert_dtypes(), by default True
col_exclude : Optional[List[str]], optional
Specify a list of columns to exclude from dropping, by default None
category : bool, optional
Enable changing dtypes of "object" columns to "category". Set threshold using \
cat_threshold. Requires convert_dtypes=True, by default True
cat_threshold : float, optional
Ratio of unique values below which categories are inferred and column dtype is \
changed to categorical, by default 0.03
cat_exclude : Optional[List[str]], optional
List of columns to exclude from categorical conversion, by default None
clean_column_names: bool, optional
Cleans the column names and provides hints on duplicate and long names, by \
default True
show : str, optional
{"all", "changes", None}, by default "changes"
Specify verbosity of the output:
* "all": Print information about the data before and after cleaning as \
well as information about changes and memory usage (deep). Please be \
aware, that this can slow down the function by quite a bit.
* "changes": Print out differences in the data before and after cleaning.
* None: No information about the data and the data cleaning is printed.
Returns
-------
pd.DataFrame
Cleaned Pandas DataFrame
See also
--------
convert_datatypes: Convert columns to best possible dtypes.
drop_missing : Flexibly drop columns and rows.
_memory_usage: Gives the total memory usage in megabytes.
_missing_vals: Metrics about missing values in the dataset.
Notes
-----
The category dtype is not grouped in the summary, unless it contains exactly the \
same categories.
"""
# Validate Inputs
_validate_input_range(drop_threshold_cols, "drop_threshold_cols", 0, 1)
_validate_input_range(drop_threshold_rows, "drop_threshold_rows", 0, 1)
_validate_input_bool(drop_duplicates, "drop_duplicates")
_validate_input_bool(convert_dtypes, "convert_datatypes")
_validate_input_bool(category, "category")
_validate_input_range(cat_threshold, "cat_threshold", 0, 1)
data = pd.DataFrame(data).copy()
data_cleaned = drop_missing(
data, drop_threshold_cols, drop_threshold_rows, col_exclude=col_exclude
)
if clean_col_names:
data_cleaned = clean_column_names(data_cleaned)
single_val_cols = data_cleaned.columns[
data_cleaned.nunique(dropna=False) == 1
].tolist()
data_cleaned = data_cleaned.drop(columns=single_val_cols)
dupl_rows = None
if drop_duplicates:
data_cleaned, dupl_rows = _drop_duplicates(data_cleaned)
if convert_dtypes:
data_cleaned = convert_datatypes(
data_cleaned,
category=category,
cat_threshold=cat_threshold,
cat_exclude=cat_exclude,
)
_diff_report(
data,
data_cleaned,
dupl_rows=dupl_rows,
single_val_cols=single_val_cols,
show=show,
)
return data_cleaned
class DataCleaner(BaseEstimator, TransformerMixin):
""" Wrapper for data_cleaning(). Allows data_cleaning() to be put into a pipeline \
with similar functions (e.g. using MVColHandler() or SubsetPooler()).
Parameters:
---------´
drop_threshold_cols: float, default 0.9
Drop columns with NA-ratio equal to or above the specified threshold.
drop_threshold_rows: float, default 0.9
Drop rows with NA-ratio equal to or above the specified threshold.
drop_duplicates: bool, default True
Drop duplicate rows, keeping the first occurence. This step comes after the \
dropping of missing values.
convert_dtypes: bool, default True
Convert dtypes using pd.convert_dtypes().
col_exclude: list, default None
Specify a list of columns to exclude from dropping.
category: bool, default True
Change dtypes of columns to "category". Set threshold using cat_threshold. \
Requires convert_dtypes=True
cat_threshold: float, default 0.03
Ratio of unique values below which categories are inferred and column dtype is \
changed to categorical.
cat_exclude: list, default None
List of columns to exclude from categorical conversion.
clean_column_names: bool, optional
Cleans the column names and provides hints on duplicate and long names, by \
default True
show: str, optional
{"all", "changes", None}, by default "changes"
Specify verbosity of the output:
* "all": Print information about the data before and after cleaning as \
well as information about changes and memory usage (deep). Please be \
aware, that this can slow down the function by quite a bit.
* "changes": Print out differences in the data before and after cleaning.
* None: No information about the data and the data cleaning is printed.
Returns
-------
data_cleaned: Pandas DataFrame
"""
def __init__(
self,
drop_threshold_cols: float = 0.9,
drop_threshold_rows: float = 0.9,
drop_duplicates: bool = True,
convert_dtypes: bool = True,
col_exclude: Optional[List[str]] = None,
category: bool = True,
cat_threshold: float = 0.03,
cat_exclude: Optional[List[Union[str, int]]] = None,
clean_col_names: bool = True,
show: str = "changes",
):
self.drop_threshold_cols = drop_threshold_cols
self.drop_threshold_rows = drop_threshold_rows
self.drop_duplicates = drop_duplicates
self.convert_dtypes = convert_dtypes
self.col_exclude = col_exclude
self.category = category
self.cat_threshold = cat_threshold
self.cat_exclude = cat_exclude
self.clean_col_names = clean_col_names
self.show = show
def fit(self, data, target=None):
return self
def transform(self, data, target=None):
data_cleaned = data_cleaning(
data,
drop_threshold_cols=self.drop_threshold_cols,
drop_threshold_rows=self.drop_threshold_rows,
drop_duplicates=self.drop_duplicates,
convert_dtypes=self.convert_dtypes,
col_exclude=self.col_exclude,
category=self.category,
cat_threshold=self.cat_threshold,
cat_exclude=self.cat_exclude,
clean_col_names=self.clean_col_names,
show=self.show,
)
return data_cleaned
def mv_col_handling(
data: pd.DataFrame,
target: Optional[Union[str, pd.Series, List]] = None,
mv_threshold: float = 0.1,
corr_thresh_features: float = 0.5,
corr_thresh_target: float = 0.3,
return_details: bool = False,
) -> pd.DataFrame:
""" Converts columns with a high ratio of missing values into binary features and \
eventually drops them based on their correlation with other features and the \
target variable. This function follows a three step process:
- 1) Identify features with a high ratio of missing values (above 'mv_threshold').
- 2) Identify high correlations of these features among themselves and with \
other features in the dataset (above 'corr_thresh_features').
- 3) Features with high ratio of missing values and high correlation among each \
other are dropped unless they correlate reasonably well with the target \
variable (above 'corr_thresh_target').
Note: If no target is provided, the process exits after step two and drops columns \
identified up to this point.
Parameters
----------
data : pd.DataFrame
2D dataset that can be coerced into Pandas DataFrame
target : Optional[Union[str, pd.Series, List]], optional
Specify target for correlation. I.e. label column to generate only the \
correlations between each feature and the label, by default None
mv_threshold : float, optional
Value between 0 <= threshold <= 1. Features with a missing-value-ratio larger \
than mv_threshold are candidates for dropping and undergo further analysis, by \
default 0.1
corr_thresh_features : float, optional
Value between 0 <= threshold <= 1. Maximum correlation a previously identified \
features (with a high mv-ratio) is allowed to have with another feature. If \
this threshold is overstepped, the feature undergoes further analysis, by \
default 0.5
corr_thresh_target : float, optional
Value between 0 <= threshold <= 1. Minimum required correlation of a remaining \
feature (i.e. feature with a high mv-ratio and high correlation to another \
existing feature) with the target. If this threshold is not met the feature is \
ultimately dropped, by default 0.3
return_details : bool, optional
Provdies flexibility to return intermediary results, by default False
Returns
-------
pd.DataFrame
Updated Pandas DataFrame
optional:
cols_mv: Columns with missing values included in the analysis
drop_cols: List of dropped columns
"""
# Validate Inputs
_validate_input_range(mv_threshold, "mv_threshold", 0, 1)
_validate_input_range(corr_thresh_features, "corr_thresh_features", 0, 1)
_validate_input_range(corr_thresh_target, "corr_thresh_target", 0, 1)
data = pd.DataFrame(data).copy()
data_local = data.copy()
mv_ratios = _missing_vals(data_local)["mv_cols_ratio"]
cols_mv = mv_ratios[mv_ratios > mv_threshold].index.tolist()
data_local[cols_mv] = (
data_local[cols_mv].applymap(lambda x: 1 if not pd.isnull(x) else x).fillna(0)
)
high_corr_features = []
data_temp = data_local.copy()
for col in cols_mv:
corrmat = corr_mat(data_temp, colored=False)
if abs(corrmat[col]).nlargest(2)[1] > corr_thresh_features:
high_corr_features.append(col)
data_temp = data_temp.drop(columns=[col])
drop_cols = []
if target is None:
data = data.drop(columns=high_corr_features)
else:
corrs = corr_mat(data_local, target=target, colored=False).loc[
high_corr_features
]
drop_cols = corrs.loc[abs(corrs.iloc[:, 0]) < corr_thresh_target].index.tolist()
data = data.drop(columns=drop_cols)
if return_details:
return data, cols_mv, drop_cols
return data
class MVColHandler(BaseEstimator, TransformerMixin):
""" Wrapper for mv_col_handling(). Allows mv_col_handling() to be put into a \
pipeline with similar functions (e.g. using DataCleaner() or SubsetPooler()).
Parameters
----------
target: string, list, np.array or pd.Series, default None
Specify target for correlation. E.g. label column to generate only the \
correlations between each feature and the label.
mv_threshold: float, default 0.1
Value between 0 <= threshold <= 1. Features with a missing-value-ratio larger \
than mv_threshold are candidates for dropping and undergo further analysis.
corr_thresh_features: float, default 0.6
Value between 0 <= threshold <= 1. Maximum correlation a previously identified \
features with a high mv-ratio is allowed to have with another feature. If this \
threshold is overstepped, the feature undergoes further analysis.
corr_thresh_target: float, default 0.3
Value between 0 <= threshold <= 1. Minimum required correlation of a remaining \
feature (i.e. feature with a high mv-ratio and high correlation to another \
existing feature) with the target. If this threshold is not met the feature is \
ultimately dropped.
return_details: bool, default True
Provdies flexibility to return intermediary results.
Returns
-------
data: Updated Pandas DataFrame
"""
def __init__(
self,
target: Optional[Union[str, pd.Series, List]] = None,
mv_threshold: float = 0.1,
corr_thresh_features: float = 0.6,
corr_thresh_target: float = 0.3,
return_details: bool = True,
):
self.target = target
self.mv_threshold = mv_threshold
self.corr_thresh_features = corr_thresh_features
self.corr_thresh_target = corr_thresh_target
self.return_details = return_details
def fit(self, data, target=None):
return self
def transform(self, data, target=None):
data, cols_mv, dropped_cols = mv_col_handling(
data,
target=self.target,
mv_threshold=self.mv_threshold,
corr_thresh_features=self.corr_thresh_features,
corr_thresh_target=self.corr_thresh_target,
return_details=self.return_details,
)
print(f"\nFeatures with MV-ratio > {self.mv_threshold}: {len(cols_mv)}")
print("Features dropped:", len(dropped_cols), dropped_cols)
return data
def pool_duplicate_subsets(
data: pd.DataFrame,
col_dupl_thresh: float = 0.2,
subset_thresh: float = 0.2,
min_col_pool: int = 3,
exclude: Optional[List[str]] = None,
return_details=False,
) -> pd.DataFrame:
""" Checks for duplicates in subsets of columns and pools them. This can reduce \
the number of columns in the data without loosing much information. Suitable \
columns are combined to subsets and tested for duplicates. In case sufficient \
duplicates can be found, the respective columns are aggregated into a \
"pooled_var" column. Identical numbers in the "pooled_var" column indicate \
identical information in the respective rows.
Note: It is advised to exclude features that provide sufficient informational \
content by themselves as well as the target column by using the "exclude" \
setting.
Parameters
----------
data : pd.DataFrame
2D dataset that can be coerced into Pandas DataFrame
col_dupl_thresh : float, optional
Columns with a ratio of duplicates higher than "col_dupl_thresh" are \
considered in the further analysis. Columns with a lower ratio are not \
considered for pooling, by default 0.2
subset_thresh : float, optional
The first subset with a duplicate threshold higher than "subset_thresh" is \
chosen and aggregated. If no subset reaches the threshold, the algorithm \
continues with continuously smaller subsets until "min_col_pool" is reached, \
by default 0.2
min_col_pool : int, optional
Minimum number of columns to pool. The algorithm attempts to combine as many \
columns as possible to suitable subsets and stops when "min_col_pool" is \
reached, by default 3
exclude : Optional[List[str]], optional
List of column names to be excluded from the analysis. These columns are \
passed through without modification, by default None
return_details : bool, optional
Provdies flexibility to return intermediary results, by default False
Returns
-------
pd.DataFrame
DataFrame with low cardinality columns pooled
optional:
subset_cols: List of columns used as subset
"""
# Input validation
_validate_input_range(col_dupl_thresh, "col_dupl_thresh", 0, 1)
_validate_input_range(subset_thresh, "subset_thresh", 0, 1)
_validate_input_range(min_col_pool, "min_col_pool", 0, data.shape[1])
excluded_cols = []
if exclude is not None:
excluded_cols = data[exclude]
data = data.drop(columns=exclude)
subset_cols = []
for i in range(data.shape[1] + 1 - min_col_pool):
check_list = [
col
for col in data.columns
if data.duplicated(subset=col).mean() > col_dupl_thresh
]
if len(check_list) > 0:
combinations = itertools.combinations(check_list, len(check_list) - i)
else:
continue
ratios = [
*map(lambda comb: data.duplicated(subset=list(comb)).mean(), combinations)
]
max_ratio = max(ratios)
max_idx = np.argmax(ratios)
if max_ratio > subset_thresh:
best_subset = itertools.islice(
itertools.combinations(check_list, len(check_list) - i),
max_idx,
max_idx + 1,
)
best_subset = data[list(list(best_subset)[0])]
subset_cols = best_subset.columns.tolist()
unique_subset = (
best_subset.drop_duplicates()
.reset_index()
.rename(columns={"index": "pooled_vars"})
)
data = data.merge(
unique_subset, how="left", on=best_subset.columns.tolist()
).drop(columns=best_subset.columns.tolist())
data.index = pd.RangeIndex(len(data))
break
data = pd.concat([data, | pd.DataFrame(excluded_cols) | pandas.DataFrame |
"""
Folium operations.
save_map,
create_base_map,
heatmap,
heatmap_with_time,
cluster,
faster_cluster,
plot_markers,
plot_trajectories_with_folium,
plot_trajectory_by_id_folium,
plot_trajectory_by_period,
plot_trajectory_by_day_week,
plot_trajectory_by_date,
plot_trajectory_by_hour,
plot_stops,
plot_bbox,
plot_points_folium,
plot_poi_folium,
plot_event_folium,
show_trajs_with_event,
show_traj_id_with_event,
plot_traj_timestamp_geo_json
"""
from typing import Any, Dict, List, Optional, Text, Tuple, Union
import folium
import numpy as np
import pandas as pd
from folium import Map, plugins
from folium.plugins import FastMarkerCluster, HeatMap, HeatMapWithTime, MarkerCluster
from pandas import DataFrame
from pymove.preprocessing import filters
from pymove.utils import distances
from pymove.utils.constants import (
COUNT,
DATE,
DATETIME,
DAY,
EVENT_ID,
EVENT_POINT,
HOUR,
LATITUDE,
LINE_COLOR,
LONGITUDE,
PERIOD,
POI_POINT,
SITUATION,
STOP,
TILES,
TRAJ_ID,
UID,
USER_POINT,
)
from pymove.utils.datetime import str_to_datetime
from pymove.utils.log import progress_bar
from pymove.utils.visual import add_map_legend, cmap_hex_color, get_cmap
def save_map(
move_data: DataFrame,
filename: Text,
tiles: Optional[Text] = TILES[0],
label_id: Optional[Text] = TRAJ_ID,
cmap: Optional[Text] = 'Set1',
return_map: Optional[bool] = False
) -> Optional[Map]:
"""
Save a visualization in a map in a new file.
Parameters
----------
move_data : DataFrame
Input trajectory data
filename : Text
Represents the filename path
tiles : str, optional
Represents the type_ of tile that will be used on the map, by default TILES[0]
label_id : str, optional
Represents column name of trajectory id, by default TRAJ_ID
cmap : str, optional
Color map to use, by default 'Set1'
return_map : bool, optional
Represents the Colormap, by default False
Returns
-------
Map
folium map or None
"""
map_ = folium.Map(tiles=tiles)
map_.fit_bounds(
[
[move_data[LATITUDE].min(), move_data[LONGITUDE].min()],
[move_data[LATITUDE].max(), move_data[LONGITUDE].max()],
]
)
ids = move_data[label_id].unique()
cmap_ = get_cmap(cmap)
num = cmap_.N
for id_ in ids:
id_index = np.where(ids == id_)[0][0]
move_df = move_data[move_data[label_id] == id_]
points_ = [
(point[0], point[1])
for point in move_df[[LATITUDE, LONGITUDE]].values
]
color_ = cmap_hex_color(cmap_, (id_index % num))
folium.PolyLine(points_, weight=3, color=color_).add_to(map_)
map_.save(filename)
if return_map:
return map_
def create_base_map(
move_data: DataFrame,
lat_origin: Optional[float] = None,
lon_origin: Optional[float] = None,
tile: Optional[Text] = TILES[0],
default_zoom_start: Optional[float] = 12,
) -> Map:
"""
Generates a folium map.
Parameters
----------
move_data : DataFrame
Input trajectory data
lat_origin : float, optional
Represents the latitude which will be the center of the map, by default None
lon_origin : float, optional
Represents the longitude which will be the center of the map, by default None
tile : str, optional
Represents the map tiles, by default TILES[0]
default_zoom_start : float, optional
Represents the zoom which will be the center of the map, by default 12
Returns
-------
Map
a folium map
"""
if lat_origin is None and lon_origin is None:
lat_origin = move_data[LATITUDE].median()
lon_origin = move_data[LONGITUDE].median()
base_map = folium.Map(
location=[lat_origin, lon_origin],
control_scale=True,
zoom_start=default_zoom_start,
tiles=tile
)
return base_map
def heatmap(
move_data: DataFrame,
n_rows: Optional[int] = None,
lat_origin: Optional[float] = None,
lon_origin: Optional[float] = None,
zoom_start: Optional[float] = 12,
radius: Optional[float] = 8,
base_map: Optional[Map] = None,
tile: Optional[Text] = TILES[0],
save_as_html: Optional[bool] = False,
filename: Optional[Text] = 'heatmap.html',
) -> Map:
"""
Generate visualization of Heat Map using folium plugin.
Parameters
----------
move_data : DataFrame
Input trajectory data
n_rows : int, optional
Represents number of data rows that are will plot, by default None
lat_origin : float, optional
Represents the latitude which will be the center of the map, by default None
lon_origin : float, optional
Represents the longitude which will be the center of the map, by default None
zoom_start : float, optional
Initial zoom level for the map, by default 12
radius : float, optional
Radius of each “point” of the heatmap, by default 8
base_map : Map, optional
Represents the folium map. If not informed, a new map is generated
using the function create_base_map(), with the lat_origin, lon_origin
and zoom_start, by default None
tile : str, optional
Represents the map tiles, by default TILES[0]
save_as_html : bool, optional
Represents if want save this visualization in a new file .html, by default False
filename : str, optional
Represents the file name of new file .html, by default 'heatmap.html'
Returns
-------
Map
folium Map
"""
if base_map is None:
base_map = create_base_map(
move_data,
lat_origin,
lon_origin,
tile=tile,
default_zoom_start=zoom_start,
)
if n_rows is None:
n_rows = move_data.shape[0]
move_data[COUNT] = 1
HeatMap(
data=move_data.iloc[:n_rows][[LATITUDE, LONGITUDE, COUNT]]
.groupby([LATITUDE, LONGITUDE])
.sum()
.reset_index()
.values.tolist(),
radius=radius
).add_to(base_map)
move_data.drop(columns=COUNT, inplace=True)
if save_as_html:
base_map.save(outfile=filename)
return base_map
def heatmap_with_time(
move_data: DataFrame,
n_rows: Optional[int] = None,
lat_origin: Optional[float] = None,
lon_origin: Optional[float] = None,
zoom_start: Optional[float] = 12,
radius: Optional[float] = 8,
min_opacity: Optional[float] = 0.5,
max_opacity: Optional[float] = 0.8,
base_map: Optional[Map] = None,
tile: Optional[Text] = TILES[0],
save_as_html: Optional[bool] = False,
filename: Optional[Text] = 'heatmap_time.html',
) -> Map:
"""
Generate visualization of Heat Map using folium plugin.
Parameters
----------
move_data : DataFrame
Input trajectory data
n_rows : int, optional
Represents number of data rows that are will plot, by default None
lat_origin : float, optional
Represents the latitude which will be the center of the map, by default None
lon_origin : float, optional
Represents the longitude which will be the center of the map, by default None
zoom_start : float, optional
Initial zoom level for the map, by default 12
radius : float, optional
Radius of each “point” of the heatmap, by default 8
min_opacity: float, optional
Minimum heat opacity, by default 0.5.
max_opacity: float, optional
Maximum heat opacity, by default 0.8.
base_map : Map, optional
Represents the folium map. If not informed, a new map is generated
using the function create_base_map(), with the lat_origin, lon_origin
and zoom_start, by default None
tile : str, optional
Represents the map tiles, by default TILES[0]
save_as_html : bool, optional
Represents if want save this visualization in a new file .html, by default False
filename : str, optional
Represents the file name of new file .html, by default 'heatmap_time.html'
Returns
-------
Map
folium Map
"""
if base_map is None:
base_map = create_base_map(
move_data,
lat_origin,
lon_origin,
tile=tile,
default_zoom_start=zoom_start,
)
if n_rows is None:
n_rows = move_data.shape[0]
move_data = move_data.iloc[:n_rows].copy()
move_data[COUNT] = 1
move_data[HOUR] = move_data[DATETIME].apply(lambda x: x.hour)
move_data_hour_list = []
for hour in move_data[HOUR].sort_values().unique():
move_data_hour_list.append(
move_data.loc[move_data.hour == hour, [LATITUDE, LONGITUDE, COUNT]]
.groupby([LATITUDE, LONGITUDE])
.sum()
.reset_index()
.values.tolist()
)
HeatMapWithTime(
move_data_hour_list,
radius=radius,
gradient={0.2: 'blue', 0.4: 'lime', 0.6: USER_POINT, 1: 'red'},
min_opacity=min_opacity,
max_opacity=max_opacity,
use_local_extrema=True
).add_to(base_map)
move_data.drop(columns=[COUNT, HOUR], inplace=True)
if save_as_html:
base_map.save(outfile=filename)
return base_map
def cluster(
move_data: DataFrame,
n_rows: Optional[int] = None,
lat_origin: Optional[float] = None,
lon_origin: Optional[float] = None,
zoom_start: Optional[float] = 12,
base_map: Optional[Map] = None,
tile: Optional[Text] = TILES[0],
save_as_html: Optional[bool] = False,
filename: Optional[Text] = 'cluster.html',
) -> Map:
"""
Generate visualization of Heat Map using folium plugin.
Parameters
----------
move_data : DataFrame
Input trajectory data
n_rows : int, optional
Represents number of data rows that are will plot, by default None
lat_origin : float, optional
Represents the latitude which will be the center of the map, by default None
lon_origin : float, optional
Represents the longitude which will be the center of the map, by default None
zoom_start : float, optional
Initial zoom level for the map, by default 12
radius : float, optional
Radius of each “point” of the heatmap, by default 8
base_map : Map, optional
Represents the folium map. If not informed, a new map is generated
using the function create_base_map(), with the lat_origin, lon_origin
and zoom_start, by default None
tile : str, optional
Represents the map tiles, by default TILES[0]
save_as_html : bool, optional
Represents if want save this visualization in a new file .html, by default False
filename : str, optional
Represents the file name of new file .html, by default 'cluster.html'
Returns
-------
Map
folium Map
"""
if base_map is None:
base_map = create_base_map(
move_data,
lat_origin,
lon_origin,
tile=tile,
default_zoom_start=zoom_start,
)
if n_rows is None:
n_rows = move_data.shape[0]
mc = MarkerCluster()
for row in move_data.iloc[:n_rows].iterrows():
pop = (
'<b>Latitude:</b> '
+ str(row[1][LATITUDE])
+ '\n<b>Longitude:</b> '
+ str(row[1][LONGITUDE])
+ '\n<b>Datetime:</b> '
+ str(row[1][DATETIME])
)
mc.add_child(
folium.Marker(
location=[row[1][LATITUDE], row[1][LONGITUDE]], popup=pop
)
)
base_map.add_child(mc)
if save_as_html:
base_map.save(outfile=filename)
return base_map
def faster_cluster(
move_data: DataFrame,
n_rows: Optional[int] = None,
lat_origin: Optional[float] = None,
lon_origin: Optional[float] = None,
zoom_start: Optional[float] = 12,
base_map: Optional[Map] = None,
tile: Optional[Text] = TILES[0],
save_as_html: Optional[bool] = False,
filename: Optional[Text] = 'faster_cluster.html',
) -> Map:
"""
Generate visualization of Heat Map using folium plugin.
Parameters
----------
move_data : DataFrame
Input trajectory data
n_rows : int, optional
Represents number of data rows that are will plot, by default None
lat_origin : float, optional
Represents the latitude which will be the center of the map, by default None
lon_origin : float, optional
Represents the longitude which will be the center of the map, by default None
zoom_start : float, optional
Initial zoom level for the map, by default 12
radius : float, optional
Radius of each “point” of the heatmap, by default 8
base_map : Map, optional
Represents the folium map. If not informed, a new map is generated
using the function create_base_map(), with the lat_origin, lon_origin
and zoom_start, by default None
tile : str, optional
Represents the map tiles, by default TILES[0]
save_as_html : bool, optional
Represents if want save this visualization in a new file .html, by default False
filename : str, optional
Represents the file name of new file .html, by default 'faster_cluster.html'
Returns
-------
Map
folium Map
"""
if base_map is None:
base_map = create_base_map(
move_data,
lat_origin,
lon_origin,
tile=tile,
default_zoom_start=zoom_start,
)
if n_rows is None:
n_rows = move_data.shape[0]
callback = """\
function (row) {
var marker;
marker = L.circle(new L.LatLng(row[0], row[1]), {color:'red'});
return marker;
};
"""
FastMarkerCluster(
move_data.iloc[:n_rows][[LATITUDE, LONGITUDE]].values.tolist(),
callback=callback,
).add_to(base_map)
if save_as_html:
base_map.save(outfile=filename)
return base_map
def plot_markers(
move_data: DataFrame,
n_rows: Optional[int] = None,
lat_origin: Optional[float] = None,
lon_origin: Optional[float] = None,
zoom_start: Optional[float] = 12,
base_map: Optional[Map] = None,
tile: Optional[Text] = TILES[0],
save_as_html: Optional[bool] = False,
filename: Optional[Text] = 'markers.html',
) -> Map:
"""
Generate visualization of Heat Map using folium plugin.
Parameters
----------
move_data : DataFrame
Input trajectory data
n_rows : int, optional
Represents number of data rows that are will plot, by default None
lat_origin : float, optional
Represents the latitude which will be the center of the map, by default None
lon_origin : float, optional
Represents the longitude which will be the center of the map, by default None
zoom_start : float, optional
Initial zoom level for the map, by default 12
radius : float, optional
Radius of each “point” of the heatmap, by default 8
base_map : Map, optional
Represents the folium map. If not informed, a new map is generated
using the function create_base_map(), with the lat_origin, lon_origin
and zoom_start, by default None
tile : str, optional
Represents the map tiles, by default TILES[0]
save_as_html : bool, optional
Represents if want save this visualization in a new file .html, by default False
filename : str, optional
Represents the file name of new file .html, by default 'markers.html'
Returns
-------
Map
folium Map
"""
if base_map is None:
base_map = create_base_map(
move_data,
lat_origin,
lon_origin,
tile=tile,
default_zoom_start=zoom_start,
)
if n_rows is None:
n_rows = move_data.shape[0]
for i, row in enumerate(move_data.iloc[:n_rows].iterrows()):
if i == 0:
se = '<b>START</b>\n'
color = 'green'
elif i == n_rows - 1:
se = '<b>END\ns</b>\n'
color = 'red'
else:
se = ''
color = 'blue'
pop = (
se
+ '<b>Latitude:</b> '
+ str(row[1][LATITUDE])
+ '\n<b>Longitude:</b> '
+ str(row[1][LONGITUDE])
+ '\n<b>Datetime:</b> '
+ str(row[1][DATETIME])
)
folium.Marker(
location=[row[1][LATITUDE], row[1][LONGITUDE]],
color=color,
clustered_marker=True,
popup=pop,
icon=folium.Icon(color=color)
).add_to(base_map)
if save_as_html:
base_map.save(outfile=filename)
return base_map
def _filter_and_generate_colors(
move_data: DataFrame,
id_: Optional[int] = None,
n_rows: Optional[int] = None,
color: Optional[Text] = None,
color_by_id: Optional[Dict] = None
) -> Tuple[DataFrame, List[Tuple]]:
"""
Filters the dataframe and generate colors for folium map.
Parameters
----------
move_data : DataFrame
Input trajectory data.
id_: int, optional
The TRAJ_ID's to be plotted, by default None
n_rows : int, optional
Represents number of data rows that are will plot, by default None.
color: str, optional
The color of the trajectory, of each trajectory or a colormap, by default None
color_by_id: dict, optional
A dictionary where the key is the trajectory id and value is a color(str),
by default None.
Returns
-------
DataFrame
Filtered trajectories
list of tuples
list containing a combination of id and color
"""
if n_rows is None:
n_rows = move_data.shape[0]
if id_ is not None:
mv_df = move_data[move_data[TRAJ_ID] == id_].iloc[:n_rows][
[LATITUDE, LONGITUDE, DATETIME, TRAJ_ID]
]
if not len(mv_df):
raise IndexError('No user with id %s in dataframe' % id_)
else:
mv_df = move_data.iloc[:n_rows][
[LATITUDE, LONGITUDE, DATETIME, TRAJ_ID]
]
if id_ is not None:
if color is None:
color = 'black'
items = [(id_, color)]
else:
if color is None:
color = 'Set1'
ids = mv_df[TRAJ_ID].unique()
if isinstance(color, str):
try:
cmap_ = get_cmap(color)
num = cmap_.N
colors = [
cmap_hex_color(cmap_, (i % num))
for i, _ in enumerate(ids)
]
diff = (len(ids) // len(colors)) + 1
colors *= diff
except ValueError:
colors = [color]
else:
colors = color[:]
items = [*zip(ids, colors)]
if color_by_id is not None:
keys = color_by_id.keys()
for key in keys:
for count, item in enumerate(items):
if str(key) == str(item[0]):
items[count] = (item[0], color_by_id[key])
return mv_df, items
def _filter_generated_feature(
move_data: DataFrame, feature: Text, values: Any
) -> DataFrame:
"""
Filters the values from the dataframe.
Parameters
----------
move_data : DataFrame
Input trajectory data.
feature: str
Name of the feature
value: any
value of the feature
Returns
-------
dataframe
filtered dataframe
"""
if len(values) == 1:
mv_df = move_data[move_data[feature] == values[0]]
else:
mv_df = move_data[
(move_data[feature] >= values[0])
& (move_data[feature] <= values[1])
]
if not len(mv_df):
raise KeyError('No %s found in dataframe' % feature)
return mv_df
def _add_begin_end_markers_to_folium_map(
move_data: DataFrame,
base_map: Map,
color: Optional[Text] = None,
_id: Optional[int] = None
):
"""
Adds markers to the beggining and end of a trajectory.
Adds a green marker to beginning of the trajectory
and a red marker to the end of the trajectory.
Parameters
----------
move_data : DataFrane
Input trajectory data.
base_map : Map, optional
Represents the folium map. If not informed, a new map is generated.
color : str, optional
Color of the markers, by default None
id: int, optional
Id of the trajectory, by default None
"""
points = folium.map.FeatureGroup(
'The start and end points of trajectory {}'.format(_id or '')
)
folium.Marker(
location=[move_data.iloc[0][LATITUDE], move_data.iloc[0][LONGITUDE]],
color='green',
clustered_marker=True,
popup='Início',
icon=plugins.BeautifyIcon(
icon='play', icon_shape='marker', background_color=color or 'green'
)
).add_to(points)
folium.Marker(
location=[move_data.iloc[-1][LATITUDE], move_data.iloc[-1][LONGITUDE]],
color='red',
clustered_marker=True,
popup='Fim',
icon=plugins.BeautifyIcon(
icon='times-circle', icon_shape='marker', background_color=color or 'red'
)
).add_to(points)
base_map.add_child(points)
def _add_trajectories_to_folium_map(
move_data: DataFrame,
items: Tuple,
base_map: Map,
legend: Optional[bool] = True,
save_as_html: Optional[bool] = True,
filename: Optional[Text] = 'map.html',
):
"""
Adds a trajectory to a folium map with begin and end markers.
Parameters
----------
move_data : DataFrame
Input trajectory data.
base_map : Map
Represents the folium map. If not informed, a new map is generated.
legend: bool
Whether to add a legend to the map, by default True
save_as_html : bool, optional
Represents if want save this visualization in a new file .html, by default False.
filename : str, optional
Represents the file name of new file .html, by default 'map.html'.
"""
for _id, color in items:
mv = move_data[move_data[TRAJ_ID] == _id]
_add_begin_end_markers_to_folium_map(mv, base_map, color, _id)
folium.PolyLine(
mv[[LATITUDE, LONGITUDE]], color=color, weight=2.5, opacity=1
).add_to(base_map)
if legend:
add_map_legend(base_map, 'Color by user ID', items)
folium.map.LayerControl().add_to(base_map)
if save_as_html:
base_map.save(outfile=filename)
def plot_trajectories_with_folium(
move_data: DataFrame,
n_rows: Optional[int] = None,
lat_origin: Optional[float] = None,
lon_origin: Optional[float] = None,
zoom_start: Optional[float] = 12,
legend: Optional[bool] = True,
base_map: Optional[Map] = None,
tile: Optional[Text] = TILES[0],
save_as_html: Optional[bool] = False,
color: Optional[Union[Text, List[Text]]] = None,
color_by_id: Optional[Dict] = None,
filename: Optional[Text] = 'plot_trajectories_with_folium.html',
) -> Map:
"""
Generate visualization of all trajectories with folium.
Parameters
----------
move_data : DataFrame
Input trajectory data.
n_rows : int, optional
Represents number of data rows that are will plot, by default None.
lat_origin : float, optional
Represents the latitude which will be the center of the map.
If not entered, the first data from the dataset is used, by default None.
lon_origin : float, optional
Represents the longitude which will be the center of the map.
If not entered, the first data from the dataset is used, by default None.
zoom_start : int, optional
Initial zoom level for the map, by default 12.
legend: boolean
Whether to add a legend to the map, by default True
base_map : folium.folium.Map, optional
Represents the folium map. If not informed, a new map is generated
using the function create_base_map(), with the lat_origin, lon_origin
and zoom_start, by default None.
tile : str, optional
Represents the map tiles, by default TILES[0]
save_as_html : bool, optional
Represents if want save this visualization in a new file .html, by default False.
color : str, list, optional
Represents line colors of visualization.
Can be a single color name, a list of colors or a colormap name, by default None.
color_by_id: dict, optional
A dictionary where the key is the trajectory id and value is a color(str),
by default None.
filename : str, optional
Represents the file name of new file .html,
by default 'plot_trajectory_with_folium.html'.
Returns
-------
Map
a folium map with visualization.
"""
if base_map is None:
base_map = create_base_map(
move_data,
lat_origin,
lon_origin,
tile=tile,
default_zoom_start=zoom_start,
)
mv_df, items = _filter_and_generate_colors(
move_data, n_rows=n_rows, color=color, color_by_id=color_by_id
)
_add_trajectories_to_folium_map(
mv_df, items, base_map, legend, save_as_html, filename
)
return base_map
def plot_trajectory_by_id_folium(
move_data: DataFrame,
id_: int,
n_rows: Optional[int] = None,
lat_origin: Optional[float] = None,
lon_origin: Optional[float] = None,
zoom_start: Optional[float] = 12,
legend: Optional[bool] = True,
base_map: Optional[Map] = None,
tile: Optional[Text] = TILES[0],
save_as_html: Optional[bool] = False,
color: Optional[Union[Text, List[Text]]] = None,
filename: Optional[Text] = 'plot_trajectories_with_folium.html',
) -> Map:
"""
Generate visualization of all trajectories with folium.
Parameters
----------
move_data : DataFrame
Input trajectory data
id_: int
Trajectory id to plot
n_rows : int, optional
Represents number of data rows that are will plot, by default None.
lat_origin : float, optional
Represents the latitude which will be the center of the map.
If not entered, the first data from the dataset is used, by default None.
lon_origin : float, optional
Represents the longitude which will be the center of the map.
If not entered, the first data from the dataset is used, by default None.
zoom_start : int, optional
Initial zoom level for the map, by default 12.
legend: boolean
Whether to add a legend to the map, by default True
base_map : folium.folium.Map, optional
Represents the folium map. If not informed, a new map is generated
using the function create_base_map(), with the lat_origin, lon_origin
and zoom_start, by default None.
tile : str, optional
Represents the map tiles, by default TILES[0]
save_as_html : bool, optional
Represents if want save this visualization in a new file .html, by default False.
color : str, list, optional
Represents line colors of visualization.
Can be a single color name, a list of colors or a colormap name, by default None.
filename : str, optional
Represents the file name of new file .html,
by default 'plot_trajectory_by_id_with_folium.html'.
Returns
-------
Map
a folium map with visualization
Raises
------
IndexError
If there is no user with the id passed
"""
if base_map is None:
base_map = create_base_map(
move_data,
lat_origin,
lon_origin,
tile=tile,
default_zoom_start=zoom_start,
)
mv_df, items = _filter_and_generate_colors(move_data, id_, n_rows, color)
_add_trajectories_to_folium_map(
mv_df, items, base_map, legend, save_as_html, filename
)
return base_map
def plot_trajectory_by_period(
move_data: DataFrame,
period: Text,
id_: Optional[int] = None,
n_rows: Optional[int] = None,
lat_origin: Optional[float] = None,
lon_origin: Optional[float] = None,
zoom_start: Optional[float] = 12,
legend: Optional[bool] = True,
base_map: Optional[Map] = None,
tile: Optional[Text] = TILES[0],
save_as_html: Optional[bool] = False,
color: Optional[Union[Text, List[Text]]] = None,
color_by_id: Optional[Dict] = None,
filename: Optional[Text] = 'plot_trajectories_by_period.html',
) -> Map:
"""
Generate visualization of all trajectories with folium.
Parameters
----------
move_data : DataFrame
Input trajectory data
period: str
Period of the day
id_: int
Trajectory id to plot, by default None
n_rows : int, optional
Represents number of data rows that are will plot, by default None.
lat_origin : float, optional
Represents the latitude which will be the center of the map.
If not entered, the first data from the dataset is used, by default None.
lon_origin : float, optional
Represents the longitude which will be the center of the map.
If not entered, the first data from the dataset is used, by default None.
zoom_start : int, optional
Initial zoom level for the map, by default 12.
legend: boolean
Whether to add a legend to the map, by default True
base_map : folium.folium.Map, optional
Represents the folium map. If not informed, a new map is generated
using the function create_base_map(), with the lat_origin, lon_origin
and zoom_start, by default None.
tile : str, optional
Represents the map tiles, by default TILES[0]
save_as_html : bool, optional
Represents if want save this visualization in a new file .html, by default False.
color : str, list, optional
Represents line colors of visualization.
Can be a single color name, a list of colors or a colormap name, by default None.
color_by_id: dict, optional
A dictionary where the key is the trajectory id and value is a color,
by default None.
filename : str, optional
Represents the file name of new file .html,
by default 'plot_trajectories_by_period.html'.
Returns
-------
Map
a folium map with visualization
Raises
------
KeyError
If period value is not found in dataframe
IndexError
If there is no user with the id passed
"""
if base_map is None:
base_map = create_base_map(
move_data,
lat_origin,
lon_origin,
tile=tile,
default_zoom_start=zoom_start,
)
if PERIOD not in move_data:
move_data.generate_time_of_day_features()
mv_df = _filter_generated_feature(move_data, PERIOD, [period])
mv_df, items = _filter_and_generate_colors(mv_df, id_, n_rows, color, color_by_id)
_add_trajectories_to_folium_map(
mv_df, items, base_map, legend, save_as_html, filename
)
return base_map
def plot_trajectory_by_day_week(
move_data: DataFrame,
day_week: Text,
id_: Optional[int] = None,
n_rows: Optional[int] = None,
lat_origin: Optional[float] = None,
lon_origin: Optional[float] = None,
zoom_start: Optional[float] = 12,
legend: Optional[bool] = True,
base_map: Optional[Map] = None,
tile: Optional[Text] = TILES[0],
save_as_html: Optional[bool] = False,
color: Optional[Union[Text, List[Text]]] = None,
color_by_id: Optional[Dict] = None,
filename: Optional[Text] = 'plot_trajectories_by_day_week.html',
) -> Map:
"""
Generate visualization of all trajectories with folium.
Parameters
----------
move_data : DataFrame
Input trajectory data
day_week: str
Day of the week
id_: int
Trajectory id to plot, by default None
n_rows : int, optional
Represents number of data rows that are will plot, by default None.
lat_origin : float, optional
Represents the latitude which will be the center of the map.
If not entered, the first data from the dataset is used, by default None.
lon_origin : float, optional
Represents the longitude which will be the center of the map.
If not entered, the first data from the dataset is used, by default None.
zoom_start : int, optional
Initial zoom level for the map, by default 12.
legend: boolean
Whether to add a legend to the map, by default True
base_map : folium.folium.Map, optional
Represents the folium map. If not informed, a new map is generated
using the function create_base_map(), with the lat_origin, lon_origin
and zoom_start, by default None.
tile : str, optional
Represents the map tiles, by default TILES[0]
save_as_html : bool, optional
Represents if want save this visualization in a new file .html, by default False.
color : str, list, optional
Represents line colors of visualization.
Can be a single color name, a list of colors or a colormap name, by default None.
color_by_id: dict, optional
A dictionary where the key is the trajectory id and value is a color,
by default None.
filename : str, optional
Represents the file name of new file .html,
by default 'plot_trajectories_by_day_week.html'.
Returns
-------
Map
a folium map with visualization
Raises
------
KeyError
If period value is not found in dataframe
IndexError
If there is no user with the id passed
"""
if base_map is None:
base_map = create_base_map(
move_data,
lat_origin,
lon_origin,
tile=tile,
default_zoom_start=zoom_start,
)
if DAY not in move_data:
move_data.generate_day_of_the_week_features()
mv_df = _filter_generated_feature(move_data, DAY, [day_week])
mv_df, items = _filter_and_generate_colors(mv_df, id_, n_rows, color, color_by_id)
_add_trajectories_to_folium_map(
mv_df, items, base_map, legend, save_as_html, filename
)
return base_map
def plot_trajectory_by_date(
move_data: DataFrame,
start_date: Text,
end_date: Text,
id_: Optional[int] = None,
n_rows: Optional[int] = None,
lat_origin: Optional[float] = None,
lon_origin: Optional[float] = None,
zoom_start: Optional[float] = 12,
legend: Optional[bool] = True,
base_map: Optional[Map] = None,
tile: Optional[Text] = TILES[0],
save_as_html: Optional[bool] = False,
color: Optional[Union[Text, List[Text]]] = None,
color_by_id: Optional[Dict] = None,
filename: Optional[Text] = 'plot_trajectories_by_date.html',
) -> Map:
"""
Generate visualization of all trajectories with folium.
Parameters
----------
move_data : DataFrame
Input trajectory data
start_date : str
Represents start date of time period.
end_date : str
Represents end date of time period.
id_: int, optional
Trajectory id to plot, by default None
n_rows : int, optional
Represents number of data rows that are will plot, by default None.
lat_origin : float, optional
Represents the latitude which will be the center of the map.
If not entered, the first data from the dataset is used, by default None.
lon_origin : float, optional
Represents the longitude which will be the center of the map.
If not entered, the first data from the dataset is used, by default None.
zoom_start : int, optional
Initial zoom level for the map, by default 12.
legend: boolean
Whether to add a legend to the map, by default True
base_map : folium.folium.Map, optional
Represents the folium map. If not informed, a new map is generated
using the function create_base_map(), with the lat_origin, lon_origin
and zoom_start, by default None.
tile : str, optional
Represents the map tiles, by default TILES[0]
save_as_html : bool, optional
Represents if want save this visualization in a new file .html, by default False.
color : str, list, optional
Represents line colors of visualization.
Can be a single color name, a list of colors or a colormap name, by default None.
color_by_id: dict, optional
A dictionary where the key is the trajectory id and value is a color,
by default None.
filename : str, optional
Represents the file name of new file .html,
by default 'plot_trajectories_by_date.html'.
Returns
-------
Map
a folium map with visualization
Raises
------
KeyError
If period value is not found in dataframe
IndexError
If there is no user with the id passed
"""
if base_map is None:
base_map = create_base_map(
move_data,
lat_origin,
lon_origin,
tile=tile,
default_zoom_start=zoom_start,
)
if isinstance(start_date, str):
start_date = str_to_datetime(start_date).date()
if isinstance(end_date, str):
end_date = str_to_datetime(end_date).date()
if DATE not in move_data:
move_data.generate_date_features()
mv_df = _filter_generated_feature(move_data, DATE, [start_date, end_date])
mv_df, items = _filter_and_generate_colors(mv_df, id_, n_rows, color, color_by_id)
_add_trajectories_to_folium_map(
mv_df, items, base_map, legend, save_as_html, filename
)
return base_map
def plot_trajectory_by_hour(
move_data: DataFrame,
start_hour: Text,
end_hour: Text,
id_: Optional[int] = None,
n_rows: Optional[int] = None,
lat_origin: Optional[float] = None,
lon_origin: Optional[float] = None,
zoom_start: Optional[float] = 12,
legend: Optional[bool] = True,
base_map: Optional[Map] = None,
tile: Optional[Text] = TILES[0],
save_as_html: Optional[bool] = False,
color: Optional[Union[Text, List[Text]]] = None,
color_by_id: Optional[Dict] = None,
filename: Optional[Text] = 'plot_trajectories_by_hour.html',
) -> Map:
"""
Generate visualization of all trajectories with folium.
Parameters
----------
move_data : DataFrame
Input trajectory data
start_hour : str
Represents start hour of time period.
end_hour : str
Represents end hour of time period.
id_: int, optional
Trajectory id to plot, by default None
n_rows : int, optional
Represents number of data rows that are will plot, by default None.
lat_origin : float, optional
Represents the latitude which will be the center of the map.
If not entered, the first data from the dataset is used, by default None.
lon_origin : float, optional
Represents the longitude which will be the center of the map.
If not entered, the first data from the dataset is used, by default None.
zoom_start : int, optional
Initial zoom level for the map, by default 12.
legend: boolean
Whether to add a legend to the map, by default True
base_map : folium.folium.Map, optional
Represents the folium map. If not informed, a new map is generated
using the function create_base_map(), with the lat_origin, lon_origin
and zoom_start, by default None.
tile : str, optional
Represents the map tiles, by default TILES[0]
save_as_html : bool, optional
Represents if want save this visualization in a new file .html, by default False.
color : str, list, optional
Represents line colors of visualization.
Can be a single color name, a list of colors or a colormap name, by default None.
color_by_id: dict, optional
A dictionary where the key is the trajectory id and value is a color,
by default None.
filename : str, optional
Represents the file name of new file .html,
by default 'plot_trajectories_by_hour.html'.
Returns
-------
Map
a folium map with visualization
Raises
------
KeyError
If period value is not found in dataframe
IndexError
If there is no user with the id passed
"""
if base_map is None:
base_map = create_base_map(
move_data,
lat_origin,
lon_origin,
tile=tile,
default_zoom_start=zoom_start,
)
if HOUR not in move_data:
move_data.generate_hour_features()
mv_df = _filter_generated_feature(move_data, HOUR, [start_hour, end_hour])
mv_df, items = _filter_and_generate_colors(mv_df, id_, n_rows, color, color_by_id)
_add_trajectories_to_folium_map(
mv_df, items, base_map, legend, save_as_html, filename
)
return base_map
def plot_stops(
move_data: DataFrame,
radius: Optional[float] = 0,
weight: Optional[float] = 3,
id_: Optional[int] = None,
n_rows: Optional[int] = None,
lat_origin: Optional[float] = None,
lon_origin: Optional[float] = None,
zoom_start: Optional[float] = 12,
legend: Optional[bool] = True,
base_map: Optional[Map] = None,
tile: Optional[Text] = TILES[0],
save_as_html: Optional[bool] = False,
color: Optional[Union[Text, List[Text]]] = None,
filename: Optional[Text] = 'plot_stops.html',
) -> Map:
"""
Generate visualization of all trajectories with folium.
Parameters
----------
move_data : DataFrame
Input trajectory data
radius : float, optional
The radius value is used to determine if a segment is a stop.
If the value of the point in target_label is greater than
radius, the segment is a stop, by default 0
weight: float, optional
Stroke width in pixels, by default 3
id_: int, optional
Trajectory id to plot, by default None
n_rows : int, optional
Represents number of data rows that are will plot, by default None.
lat_origin : float, optional
Represents the latitude which will be the center of the map.
If not entered, the first data from the dataset is used, by default None.
lon_origin : float, optional
Represents the longitude which will be the center of the map.
If not entered, the first data from the dataset is used, by default None.
zoom_start : int, optional
Initial zoom level for the map, by default 12.
legend: boolean
Whether to add a legend to the map, by default True
base_map : folium.folium.Map, optional
Represents the folium map. If not informed, a new map is generated
using the function create_base_map(), with the lat_origin, lon_origin
and zoom_start, by default None.
tile : str, optional
Represents the map tiles, by default TILES[0]
save_as_html : bool, optional
Represents if want save this visualization in a new file .html, by default False.
color : str, list, optional
Represents line colors of visualization.
Can be a single color name, a list of colors or a colormap name, by default None.
filename : str, optional
Represents the file name of new file .html, by default 'plot_stops.html'.
Returns
-------
Map
a folium map with visualization
Raises
------
KeyError
If period value is not found in dataframe
IndexError
If there is no user with the id passed
"""
if base_map is None:
base_map = create_base_map(
move_data,
lat_origin,
lon_origin,
tile=tile,
default_zoom_start=zoom_start,
)
if SITUATION not in move_data:
move_data.generate_move_and_stop_by_radius(radius=radius)
mv_df = _filter_generated_feature(move_data, SITUATION, STOP)
mv_df, items = _filter_and_generate_colors(mv_df, id_, n_rows, color)
for _id, color in items:
for stop in mv_df[mv_df[TRAJ_ID] == _id].iterrows():
base_map.add_child(
folium.Circle(
(stop[1][LATITUDE], stop[1][LONGITUDE]),
color=color,
weight=weight,
radius=40,
opacity=0.5,
popup=stop[1][DATETIME],
fill_color=color,
fill_opacity=0.5,
)
)
if legend:
add_map_legend(base_map, 'Color by user ID', items)
if save_as_html:
base_map.save(outfile=filename)
return base_map
def plot_bbox(
bbox_tuple: Tuple[float, float, float, float],
base_map: Optional[Map] = None,
tiles: Optional[Text] = TILES[0],
color: Optional[Text] = 'red',
save_as_html: Optional[bool] = False,
filename: Optional[Text] = 'bbox.html'
) -> Map:
"""
Plots a bbox using Folium.
Parameters
----------
bbox_tuple : tuple.
Represents a bound box, that is a tuple of 4 values with the
min and max limits of latitude e longitude.
base_map: Folium map, optional
A folium map to plot the trajectories. If None a map will be created,
by default None.
tiles : str, optional
by default TILES[0]
color : str, optional
Represents color of lines on map, by default 'red'.
file : str, optional
Represents filename, by default 'bbox.html'.
save_map: Boolean, optional
Wether to save the bbox folium map, by default False.
Returns
-------
Map
folium map with bounding box
"""
if base_map is None:
base_map = folium.Map(tiles=tiles)
base_map.fit_bounds(
[[bbox_tuple[0], bbox_tuple[1]], [bbox_tuple[2], bbox_tuple[3]]]
)
points_ = [
(bbox_tuple[0], bbox_tuple[1]),
(bbox_tuple[0], bbox_tuple[3]),
(bbox_tuple[2], bbox_tuple[3]),
(bbox_tuple[2], bbox_tuple[1]),
(bbox_tuple[0], bbox_tuple[1]),
]
polygon = folium.PolyLine(points_, weight=3, color=color)
polygon.add_to(base_map)
if save_as_html:
base_map.save(filename)
return base_map
def _format_tags(line, slice_):
"""
Create or format tags.
Parameters
----------
line: Line to add a tag.
slice_: Tag interval.
Returns
-------
str: formatted html tag
"""
map_formated_tags = map(lambda tag: '{}: {}'.format(tag, line[tag]), slice_)
return '<br/>'.join(map_formated_tags)
def _circle_maker(
iter_tuple,
user_lat,
user_lon,
slice_tags,
user_point,
radius,
map_
):
"""
Return a circle.
Parameters
----------
iter_tuple: DataFrame iter_tuple.
user_lat: str.
Latitude column name.
user_lon: str.
Longitude column name.
slice_tags:
user_point: str.
Point color.
radius: float.
radius size.
map_: Folium map.
"""
_, line = iter_tuple
x = line[user_lat]
y = line[user_lon]
tags_formated = _format_tags(line, slice_tags)
folium.Circle(
radius=radius,
location=[x, y],
popup=tags_formated,
color=user_point,
fill=False
).add_to(map_)
def plot_points_folium(
move_data: DataFrame,
user_lat: Optional[Text] = LATITUDE,
user_lon: Optional[Text] = LONGITUDE,
user_point: Optional[Text] = USER_POINT,
radius: Optional[float] = 2,
base_map: Optional[Map] = None,
slice_tags: Optional[List] = None,
tiles: Optional[Text] = TILES[0],
save_as_html: Optional[bool] = False,
filename: Optional[Text] = 'points.html'
) -> Map:
"""
Generates a folium map with the trajectories plots and a point.
Parameters
----------
move_data: Dataframe
Trajectory data.
user_lat: str, optional
Latitude column name, by default LATITUDE.
user_lon: str, optional
Longitude column name, by default LONGITUDE.
user_point: str, optional
The point color, by default USER_POINT.
radius: float, optional
radius size, by default 2.
sort:Boolean, optional
If True the data will be sorted, by default False.
base_map: Folium map, optional
A folium map to plot the trajectories. If None a map will be created,
by default None.
slice_tags: optional, by default None.
tiles: str, optional, by default TILES[0]
The map type.
save_as_html : bool, optional
Represents if want save this visualization in a new file .html, by default False.
filename : str, optional
Represents the file name of new file .html, by default 'points.html'.
Returns
-------
Map
A folium map
"""
if not slice_tags:
slice_tags = move_data.columns
# If not have a map a map is create with mean to lat and lon
if not base_map:
initial_lat = move_data[user_lat].mean()
initial_lon = move_data[user_lon].mean()
base_map = create_base_map(
move_data=move_data,
lat_origin=initial_lat,
lon_origin=initial_lon,
tile=tiles
)
list(
map(
lambda x: _circle_maker(
x,
user_lat,
user_lon,
slice_tags,
user_point,
radius,
base_map
),
move_data.iterrows()
)
)
if save_as_html:
base_map.save(outfile=filename)
return base_map
def plot_poi_folium(
move_data,
poi_lat=LATITUDE,
poi_lon=LONGITUDE,
poi_point=POI_POINT,
radius=2,
base_map=None,
slice_tags=None,
tiles=TILES[0],
save_as_html: Optional[bool] = False,
filename: Optional[Text] = 'pois.html'
) -> Map:
"""
Receives a MoveDataFrame and returns a folium map with poi points.
Parameters
----------
move_data: DataFrame
Trajectory input data
poi_lat: str, optional
Latitude column name, by default LATITUDE.
poi_lon: str, optional
Longitude column name, by default LONGITUDE.
poi_point: str, optional
Poi point color, by default POI_POINT.
radius: float, optional
radius size, by default 2.
base_map: Folium map, optional
A folium map to plot. If None a map. If None a map will be created,
by default None.
slice_tags: optional, by default None.
tiles: str, optional, by default TILES[0]
The map type.
save_as_html : bool, optional
Represents if want save this visualization in a new file .html, by default False.
filename : str, optional
Represents the file name of new file .html, by default 'pois.html'.
Returns
-------
folium.folium.Map.
Represents a folium map with visualization.
"""
return plot_points_folium(
move_data,
user_lat=poi_lat,
user_lon=poi_lon,
user_point=poi_point,
radius=radius,
base_map=base_map,
slice_tags=slice_tags,
tiles=tiles,
save_as_html=save_as_html,
filename=filename
)
def plot_event_folium(
move_data,
event_lat=LATITUDE,
event_lon=LONGITUDE,
event_point=EVENT_POINT,
radius=2,
base_map=None,
slice_tags=None,
tiles=TILES[0],
save_as_html: Optional[bool] = False,
filename: Optional[Text] = 'events.html'
) -> Map:
"""
Receives a MoveDataFrame and returns a folium map with events.
Parameters
----------
move_data: DataFrame
Trajectory input data
event_lat: str, optional
Latitude column name, by default LATITUDE.
event_lon: str, optional
Longitude column name, by default LONGITUDE.
event_point: str, optional
Event color, by default EVENT_POI
radius: float, optional
radius size, by default 2.
base_map: Folium map, optional
A folium map to plot. If None a map. If None a map will be created,
by default None.
tiles: str, optional, by default TILES[0]
save_as_html : bool, optional
Represents if want save this visualization in a new file .html, by default False.
filename : str, optional
Represents the file name of new file .html, by default 'events.html'.
Returns
-------
A folium map.
"""
return plot_points_folium(
move_data,
user_lat=event_lat,
user_lon=event_lon,
user_point=event_point,
radius=radius,
base_map=base_map,
slice_tags=slice_tags,
tiles=tiles,
save_as_html=save_as_html,
filename=filename
)
def show_trajs_with_event(
move_data: DataFrame,
window_time_subject: float,
df_event: DataFrame,
window_time_event: float,
radius: float,
event_lat: Optional[Text] = LATITUDE,
event_lon: Optional[Text] = LONGITUDE,
event_datetime: Optional[Text] = DATETIME,
user_lat: Optional[Text] = LATITUDE,
user_lon: Optional[Text] = LONGITUDE,
user_datetime: Optional[Text] = DATETIME,
event_id: Optional[Text] = EVENT_ID,
event_point: Optional[Text] = EVENT_POINT,
user_id: Optional[Text] = UID,
user_point: Optional[Text] = USER_POINT,
line_color: Optional[Text] = LINE_COLOR,
slice_event_show: Optional[int] = None,
slice_subject_show: Optional[int] = None,
) -> List[Map]:
"""
Plot a trajectory, including your user_points lat lon and your tags.
Parameters
----------
move_data: DataFrame.
Trajectory input data.
window_time_subject: float.
The subject time window.
window_time_event: float.
The event time window.
radius: float.
The radius to use.
event_lat: str, optional
Event latitude column name, by default LATITUDE.
event_lon: str, optional
Event longitude column name, by default LONGITUDE.
event_datetime: str, optional
Event datetime column name, by default DATETIME.
user_lat: str, optional
User latitude column name, by default LATITUDE.
user_lon: str, optional
User longitude column name, by default LONGITUDE.
user_datetime: str, optional
User datetime column name, by default DATETIME.
event_id_: str, optional
Event id column name, by default TRAJ_ID.
event_point: str, optional
Event color, by default EVENT_POI.
user_id: str, optional
User id column name, by default TRAJ_ID.
user_point: str, optional
User point color, by default USER_POINT.
line_color: str, optional
Line color, by default 'blue'.
slice_event_show: int, optional
by default None.
slice_subject_show: int, optional
by default None.
Returns
-------
list of Map
A list of folium maps.
"""
# building structure for deltas
delta_event = pd.to_timedelta(window_time_event, unit='s')
delta_user = pd.to_timedelta(window_time_subject, unit='s')
# length of df_user
len_df_user = move_data.shape[0]
# building structure for lat and lon array
lat_arr = np.zeros(len_df_user)
lon_arr = np.zeros(len_df_user)
# folium map list
folium_maps = []
# for each event in df_event
for _, line in df_event.iterrows():
e_lat = line[event_lat]
e_lon = line[event_lon]
e_datetime = line[event_datetime]
e_id = line[event_id]
# building time window for event search
start_time = pd.to_datetime(e_datetime - delta_event)
end_time = | pd.to_datetime(e_datetime + delta_event) | pandas.to_datetime |
from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay
from sklearn.metrics import accuracy_score
from sklearn.model_selection import TimeSeriesSplit
from keras.layers import Dropout
from keras.layers import Dense, LSTM
from keras.models import Sequential
import numpy as np
from sklearn.preprocessing import StandardScaler
from matplotlib import pyplot as plt
from datetime import timedelta
from datetime import datetime
import pandas as pd
import datetime
from dateutil.relativedelta import relativedelta
from pandas_datareader import data as pdr
from sklearn.metrics import r2_score, mean_squared_error
# pandasのインポート
# データの読み込み
#df = pd.read_csv('finance_dataset.csv')
# データフレームの表示
#df
code = '6976' # '6976'#6758
#2021年から今日までの1年間のデータを取得しましょう。期日を決めて行きます。
# (2021, 1, 1) # 教師データ(今までのデータ)
#start_train = datetime.date(2022, 1, 1) # 教師データ(今までのデータ)
start_train=datetime.date.today() + relativedelta(days=-700)
#dowstart_train = datetime.date(2022, 1, 5)#start_train + relativedelta(days=+3)
# 昨日分(today-1日)まで取得できる(当日分は変動しているため)
end_train = datetime.date.today() + relativedelta(days=-1)
data = pdr.get_data_yahoo(f'{code}.T', start_train, end_train) # 教師データを読み込む。
Dow_df = pdr.get_data_yahoo('^DJI', start_train, end_train) # 試験データのcsvファイルを読み込む。
Nikkei_df = pdr.get_data_yahoo('^N225', start_train, end_train) # 試験データのcsvファイルを読み込む。
#データの前処理
#欠損データがあるので、欠損値NaNを除外する
#df_NikkeiAll_drop = df_NikkeiAll.dropna()
#df_NikkeiAll_drop.head() # 先頭の5行を表形式で表示
print(data.head())
'''
png
インデックスが0から13966までの連番で、カラムに
日付('Date')、最高値('High')、最安値('Low')、始値('Open')、終値('Close')が設定されたデータフレームである事が確認できます。
日付('Date)は1965年1月5日から2021年10月21日までとなっています。
後に詳しく説明を行いますが、予測モデル作成に対して、目的変数の追加や、週ごとにデータを纏める必要があります。
そのために、曜日情報や初めの週を基準として何週目となるか等の情報と、今回の目的変数である木曜日の終値から翌日金曜日の始値が上がるかどうかの’Up’(上がる場合は'1', 同じ又は下がる場合は'0')を追加していきます。
次に、infoメソッドを用いて、欠損値の有無やカラムのデータ型の確認を行います。
'''
# 各カラムの詳細確認
data.info()
'''
png
各カラム欠損値なしである事がわかります。
日付('Date')が’object'型となっています。今回の様な時系列データを用いる際には、'datetime64'型を用いる方が利便性が高い為、pandasの'to_datetime'メソッドを用いてデータ型の変換を行います。
'''
# 日付インデックスをりセット
data.reset_index(drop=False,inplace=True)
Dow_df.reset_index(drop=False,inplace=True)
Nikkei_df.reset_index(drop=False, inplace=True)
# Dateのデータ型をを'datetime'型へ変更
data['Date'] = pd.to_datetime(data['Date'])
Dow_df['Date'] = pd.to_datetime(Dow_df['Date'])
Nikkei_df['Date'] = | pd.to_datetime(Nikkei_df['Date']) | pandas.to_datetime |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 17 09:11:58 2020
@author: ets
"""
import datetime as dt
import logging
import re
import warnings
from pathlib import Path
from typing import List, Tuple
# import climpred
import numpy as np
import pandas as pd
import xarray as xr
from climpred import HindcastEnsemble
from . import gis_import_error_message
try:
import rioxarray
from clisops.core import subset
except (ImportError, ModuleNotFoundError) as e:
msg = gis_import_error_message.format(Path(__file__).stem)
raise ImportError(msg) from e
from ravenpy.models import get_model
LOGGER = logging.getLogger("PYWPS")
# TODO: Complete docstrings
# This function gets model states after running the model (i.e. states at the end of the run).
def get_raven_states(model, workdir=None, **kwds):
"""Get the RAVEN states file (.rvc file) after a model run.
Parameters
----------
model : {'HMETS', 'GR4JCN', 'MOHYSE', 'HBVEC'}
Model name.
kwds : {}
Model configuration parameters, including the forcing files (ts).
Returns
-------
rvc : {}
Raven model forcing file
"""
# Run the model and get the rvc file for future hotstart.
m = get_model(model)(workdir=workdir)
m(overwrite=True, **kwds)
rvc = m.outputs["solution"]
return rvc
# Do the actual forecasting step
def perform_forecasting_step(rvc, model, workdir=None, **kwds):
"""
Function that might be useful eventually to do a forecast from a model setup.
"""
# kwds includes 'ts', the forecast timeseries data
# Setup the model
m = get_model(model)(workdir=workdir)
# Force the initial conditions
m.resume(rvc)
# Set the parameters, start dates, etc. required to run the model and run
m(overwrite=True, **kwds)
return m.q_sim
def perform_climatology_esp(
model_name, forecast_date, forecast_duration, workdir=None, **kwds
):
"""
This function takes the model setup and name as well as forecast data and duration and returns
an ESP forecast netcdf. The data comes from the climatology data and thus there is a mechanism
to get the correct data from the time series and exclude the current year.
Parameters
----------
model_name : {'HMETS', 'MOHYSE', 'GR4JCN', 'HBVEC'}
Model name to instantiate Raven model.
forecast_date : datetime.datetime
Date of the forecast issue.
forecast_duration : int
Number of days of forecast, forward looking.
kwds : dict
Raven model configuration parameters.
Returns
-------
qsims
Array of streamflow values from the ESP method along with list of member years
"""
# Get the timeseries
tsnc = xr.open_dataset(kwds["ts"])
# Prepare model instance
m = get_model(model_name)(workdir=workdir)
# Now find the periods of time for warm-up and forecast and add to the model keywords as the defaults are failing
# (nanoseconds datetimes do not like the year 0001...)
start_date = pd.to_datetime(tsnc["time"][0].values)
start_date = start_date.to_pydatetime()
kwds["start_date"] = start_date
# Forecasting from Feb 29th is not ideal, we will replace with Feb 28th.
# Should not change much in a climatological forecast.
if forecast_date.month == 2 and forecast_date.day == 29:
forecast_date.replace(day=28)
# Check to make sure forecast date is not in the first year as we need model warm-up.
# We cannot use timedelta because if the dataset happens to start on a leap
# year, then the timedelta=365 days will not be robust. (and we cannot use timedelta(years=1)...)
dateLimit = start_date.replace(year=start_date.year + 1)
if dateLimit > forecast_date:
msg = (
"Forecast date is within the warm-up period. Select another forecast date."
)
warnings.warn(msg)
# initialize the array of forecast variables
qsims = []
# list of unique years in the dataset:
avail_years = list(np.unique(tsnc["time.year"].data))
# Take a copy of the forecast initial date before overwriting in the forecast step.
forecast_date_main = forecast_date
# Remove the year that we are forecasting. Or else it's cheating!
avail_years.remove(forecast_date.year)
# Update the forecast end-date, which will be the day prior to the forecast date.
# So forecasts warm-up will be from day 1 in the dataset to the forecast date.
kwds["end_date"] = forecast_date - dt.timedelta(days=1)
# Get RVC file if it exists, else compute it.
if len(kwds['rvc']) > 0:
rvc=kwds.pop('rvc')
else:
# Run model to get rvc file after warm-up using base meteo
rvc = get_raven_states(model_name, workdir=workdir, **kwds)
# We need to check which years are long enough (ex: wrapping years, 365-day forecast starting in
# September 2015 will need data up to August 2016 at least)
for years in avail_years:
if forecast_date.replace(year=years) + dt.timedelta(
days=forecast_duration - 1
) > | pd.to_datetime(tsnc["time"][-1].values) | pandas.to_datetime |
# -*- coding: utf-8 -*-
import csv
import os
import platform
import codecs
import re
import sys
from datetime import datetime
import pytest
import numpy as np
from pandas._libs.lib import Timestamp
import pandas as pd
import pandas.util.testing as tm
from pandas import DataFrame, Series, Index, MultiIndex
from pandas import compat
from pandas.compat import (StringIO, BytesIO, PY3,
range, lrange, u)
from pandas.errors import DtypeWarning, EmptyDataError, ParserError
from pandas.io.common import URLError
from pandas.io.parsers import TextFileReader, TextParser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
# Parsers support only length-1 decimals
msg = 'Only length-1 decimal markers supported'
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(data), decimal='')
def test_bad_stream_exception(self):
# Issue 13652:
# This test validates that both python engine
# and C engine will raise UnicodeDecodeError instead of
# c engine raising ParserError and swallowing exception
# that caused read to fail.
handle = open(self.csv_shiftjs, "rb")
codec = codecs.lookup("utf-8")
utf8 = codecs.lookup('utf-8')
# stream must be binary UTF8
stream = codecs.StreamRecoder(
handle, utf8.encode, utf8.decode, codec.streamreader,
codec.streamwriter)
if compat.PY3:
msg = "'utf-8' codec can't decode byte"
else:
msg = "'utf8' codec can't decode byte"
with tm.assert_raises_regex(UnicodeDecodeError, msg):
self.read_csv(stream)
stream.close()
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
self.read_csv(fname, index_col=0, parse_dates=True)
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
assert isinstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# see gh-8217
# Series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
assert not result._is_view
def test_malformed(self):
# see gh-6607
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 4, saw 5'
with tm.assert_raises_regex(Exception, msg):
self.read_table(StringIO(data), sep=',',
header=1, comment='#')
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
it.read(5)
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
it.read(3)
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
it.read()
# skipfooter is not supported with the C parser yet
if self.engine == 'python':
# skipfooter
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
msg = 'Expected 3 fields in line 4, saw 5'
with tm.assert_raises_regex(Exception, msg):
self.read_table(StringIO(data), sep=',',
header=1, comment='#',
skipfooter=1)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>""" # noqa
pytest.raises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
assert len(df) == 3
def test_unnamed_columns(self):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = np.array([[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]], dtype=np.int64)
df = self.read_table(StringIO(data), sep=',')
tm.assert_almost_equal(df.values, expected)
tm.assert_index_equal(df.columns,
Index(['A', 'B', 'C', 'Unnamed: 3',
'Unnamed: 4']))
def test_csv_mixed_type(self):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
expected = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 3, 4],
'C': [2, 4, 5]})
out = self.read_csv(StringIO(data))
tm.assert_frame_equal(out, expected)
def test_read_csv_dataframe(self):
df = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv1, sep=',', index_col=0,
parse_dates=True)
tm.assert_index_equal(df.columns, pd.Index(['A', 'B', 'C', 'D']))
assert df.index.name == 'index'
assert isinstance(
df.index[0], (datetime, np.datetime64, Timestamp))
assert df.values.dtype == np.float64
tm.assert_frame_equal(df, df2)
def test_read_csv_no_index_name(self):
df = self.read_csv(self.csv2, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv2, sep=',', index_col=0,
parse_dates=True)
tm.assert_index_equal(df.columns,
pd.Index(['A', 'B', 'C', 'D', 'E']))
assert isinstance(df.index[0], (datetime, np.datetime64, Timestamp))
assert df.loc[:, ['A', 'B', 'C', 'D']].values.dtype == np.float64
tm.assert_frame_equal(df, df2)
def test_read_table_unicode(self):
fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8'))
df1 = self.read_table(fin, sep=";", encoding="utf-8", header=None)
assert isinstance(df1[0].values[0], compat.text_type)
def test_read_table_wrong_num_columns(self):
# too few!
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
pytest.raises(ValueError, self.read_csv, StringIO(data))
def test_read_duplicate_index_explicit(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
result = self.read_csv(StringIO(data), index_col=0)
expected = self.read_csv(StringIO(data)).set_index(
'index', verify_integrity=False)
tm.assert_frame_equal(result, expected)
result = self.read_table(StringIO(data), sep=',', index_col=0)
expected = self.read_table(StringIO(data), sep=',', ).set_index(
'index', verify_integrity=False)
tm.assert_frame_equal(result, expected)
def test_read_duplicate_index_implicit(self):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
# make sure an error isn't thrown
self.read_csv(StringIO(data))
self.read_table(StringIO(data), sep=',')
def test_parse_bools(self):
data = """A,B
True,1
False,2
True,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.bool_
data = """A,B
YES,1
no,2
yes,3
No,3
Yes,3
"""
data = self.read_csv(StringIO(data),
true_values=['yes', 'Yes', 'YES'],
false_values=['no', 'NO', 'No'])
assert data['A'].dtype == np.bool_
data = """A,B
TRUE,1
FALSE,2
TRUE,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.bool_
data = """A,B
foo,bar
bar,foo"""
result = self.read_csv(StringIO(data), true_values=['foo'],
false_values=['bar'])
expected = DataFrame({'A': [True, False], 'B': [False, True]})
tm.assert_frame_equal(result, expected)
def test_int_conversion(self):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.float64
assert data['B'].dtype == np.int64
def test_read_nrows(self):
expected = self.read_csv(StringIO(self.data1))[:3]
df = self.read_csv(StringIO(self.data1), nrows=3)
tm.assert_frame_equal(df, expected)
# see gh-10476
df = self.read_csv(StringIO(self.data1), nrows=3.0)
tm.assert_frame_equal(df, expected)
msg = r"'nrows' must be an integer >=0"
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), nrows=1.2)
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), nrows='foo')
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), nrows=-1)
def test_read_chunksize(self):
reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# with invalid chunksize value:
msg = r"'chunksize' must be an integer >=1"
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), chunksize=1.3)
with | tm.assert_raises_regex(ValueError, msg) | pandas.util.testing.assert_raises_regex |
import numpy as np
import pandas as pd
import scanpy as sc
from termcolor import colored
import time
import matplotlib
import matplotlib.pyplot as plt
from sklearn.metrics.pairwise import euclidean_distances
import umap
import phate
import seaborn as sns
from pyVIA.core import *
def cellrank_Human(ncomps=80, knn=30, v0_random_seed=7):
import scvelo as scv
dict_abb = {'Basophils': 'BASO1', 'CD4+ Effector Memory': 'TCEL7', 'Colony Forming Unit-Granulocytes': 'GRAN1',
'Colony Forming Unit-Megakaryocytic': 'MEGA1', 'Colony Forming Unit-Monocytes': 'MONO1',
'Common myeloid progenitors': "CMP", 'Early B cells': "PRE_B2", 'Eosinophils': "EOS2",
'Erythroid_CD34- CD71+ GlyA-': "ERY2", 'Erythroid_CD34- CD71+ GlyA+': "ERY3",
'Erythroid_CD34+ CD71+ GlyA-': "ERY1", 'Erythroid_CD34- CD71lo GlyA+': 'ERY4',
'Granulocyte/monocyte progenitors': "GMP", 'Hematopoietic stem cells_CD133+ CD34dim': "HSC1",
'Hematopoietic stem cells_CD38- CD34+': "HSC2",
'Mature B cells class able to switch': "B_a2", 'Mature B cells class switched': "B_a4",
'Mature NK cells_CD56- CD16- CD3-': "Nka3", 'Monocytes': "MONO2",
'Megakaryocyte/erythroid progenitors': "MEP", 'Myeloid Dendritic Cells': 'mDC', 'Naïve B cells': "B_a1",
'Plasmacytoid Dendritic Cells': "pDC", 'Pro B cells': 'PRE_B3'}
string_ = 'ncomp =' + str(ncomps) + ' knn=' + str(knn) + ' randseed=' + str(v0_random_seed)
# print('ncomp =', ncomps, ' knn=', knn, ' randseed=', v0_random_seed)
print(colored(string_, 'blue'))
nover_labels = pd.read_csv('/home/shobi/Trajectory/Datasets/HumanCD34/Nover_Cor_PredFine_notLogNorm.csv')[
'x'].values.tolist()
nover_labels = [dict_abb[i] for i in nover_labels]
for i in list(set(nover_labels)):
print('the population of ', i, 'is ', nover_labels.count(i))
ad = scv.read_loom('/home/shobi/Downloads/Human Hematopoietic Profiling homo_sapiens 2019-11-08 16.12.loom')
print(ad)
# ad = sc.read('/home/shobi/Trajectory/Datasets/HumanCD34/human_cd34_bm_rep1.h5ad')
# ad.obs['nover_label'] = nover_labels
print('start cellrank pipeline', time.ctime())
# scv.utils.show_proportions(ad)
scv.pl.proportions(ad)
scv.pp.filter_and_normalize(ad, min_shared_counts=20, n_top_genes=2000)
sc.tl.pca(ad, n_comps=ncomps)
n_pcs = ncomps
print('npcs', n_pcs, 'knn', knn)
sc.pp.neighbors(ad, n_pcs=n_pcs, n_neighbors=knn)
sc.tl.louvain(ad, key_added='clusters', resolution=1)
scv.pp.moments(ad, n_pcs=n_pcs, n_neighbors=knn)
scv.tl.velocity(ad)
scv.tl.velocity_graph(ad)
scv.pl.velocity_embedding_stream(ad, basis='umap', color='nover_label')
def adata_preprocess(adata, n_top_genes=1000, log=True):
# this is a lot like the steps for scvelo.pp.filter_and_normalize() which also allows selection of top genes (see Pancreas)
sc.pp.filter_genes(adata, min_counts=1) # only consider genes with more than 1 count#1
# print(adata)
sc.pp.normalize_per_cell( # normalize with total UMI count per cell #same as normalize_total()
adata, key_n_counts='n_counts_all'
)
# select highly-variable genes
filter_result = sc.pp.filter_genes_dispersion(adata.X, flavor='cell_ranger', n_top_genes=n_top_genes, log=False)
adata = adata[:, filter_result.gene_subset] # subset the genes
sc.pp.normalize_per_cell(adata) # renormalize after filtering
if log: sc.pp.log1p(adata) # log transform: adata.X = log(adata.X + 1)
'''
total = adata.X
total = total.sum(axis=0).transpose()
total = pd.DataFrame(total.transpose())
print('total')
print(total.shape)
#total = total.sum(axis=0).transpose()
total.columns = [i for i in adata.var_names]
print(total)
total.to_csv('/home/shobi/Trajectory/Datasets/HumanCD34/library_counts_500hvg.csv')
sc.pp.scale(adata, max_value=10)
from sklearn.decomposition import PCA
pca = PCA(n_components=499) # estimate only 2 PCs
X_new = pca.fit_transform(adata.X)
print('variance explained')
print(pca.explained_variance_ratio_)
print('pca.components_ shape ncomp x nfeat')
print()
df = pd.DataFrame(abs(pca.components_))
df.to_csv('/home/shobi/Trajectory/Datasets/HumanCD34/pca_components_importance_500hvg.csv')
print('done saving')
'''
# sc.pp.scale(adata, max_value=10)zheng scales after the log, but this doesnt work well and is also not used in scvelo.pp.filter_and_normalize
return adata
def main_Human(ncomps=80, knn=30, v0_random_seed=7, run_palantir_func=False):
'''
df = pd.read_csv('/home/shobi/Trajectory/Datasets/HumanCD34/pca_components_importance_500hvg.csv')
print(df)
df = df.set_index('Unnamed: 0')
print(df)
df = df.sort_values(by='totals', axis=1, ascending = False)
df.to_csv('/home/shobi/Trajectory/Datasets/HumanCD34/pca_components_importance_sorted_500hvg.csv')
print('saved')
'''
import random
random.seed(100)
dict_abb = {'Basophils': 'BASO1', 'CD4+ Effector Memory': 'TCEL7', 'Colony Forming Unit-Granulocytes': 'GRAN1',
'Colony Forming Unit-Megakaryocytic': 'MEGA1', 'Colony Forming Unit-Monocytes': 'MONO1',
'Common myeloid progenitors': "CMP", 'Early B cells': "PRE_B2", 'Eosinophils': "EOS2",
'Erythroid_CD34- CD71+ GlyA-': "ERY2", 'Erythroid_CD34- CD71+ GlyA+': "ERY3",
'Erythroid_CD34+ CD71+ GlyA-': "ERY1", 'Erythroid_CD34- CD71lo GlyA+': 'ERY4',
'Granulocyte/monocyte progenitors': "GMP", 'Hematopoietic stem cells_CD133+ CD34dim': "HSC1",
'Hematopoietic stem cells_CD38- CD34+': "HSC2",
'Mature B cells class able to switch': "B_a2", 'Mature B cells class switched': "B_a4",
'Mature NK cells_CD56- CD16- CD3-': "Nka3", 'Monocytes': "MONO2",
'Megakaryocyte/erythroid progenitors': "MEP", 'Myeloid Dendritic Cells': 'mDC (cDC)',
'Naïve B cells': "B_a1",
'Plasmacytoid Dendritic Cells': "pDC", 'Pro B cells': 'PRE_B3'}
# NOTE: Myeloid DCs are now called Conventional Dendritic Cells cDCs
string_ = 'ncomp =' + str(ncomps) + ' knn=' + str(knn) + ' randseed=' + str(v0_random_seed)
# print('ncomp =', ncomps, ' knn=', knn, ' randseed=', v0_random_seed)
print(colored(string_, 'blue'))
nover_labels = pd.read_csv('/home/shobi/Trajectory/Datasets/HumanCD34/Nover_Cor_PredFine_notLogNorm.csv')[
'x'].values.tolist()
nover_labels = [dict_abb[i] for i in nover_labels]
df_nover = pd.DataFrame(nover_labels)
# df_nover.to_csv('/home/shobi/Trajectory/Datasets/HumanCD34/noverLabelsforMonocle.csv')
print('save nover')
for i in list(set(nover_labels)):
print('the population of ', i, 'is ', nover_labels.count(i))
parc53_labels = pd.read_csv('/home/shobi/Trajectory/Datasets/HumanCD34/Nover_Cor_Parc53_set1.csv')[
'x'].values.tolist()
parclabels_all = pd.read_csv('/home/shobi/Trajectory/Datasets/HumanCD34/parclabels_all_set1.csv')[
'parc'].values.tolist()
parc_dict_nover = {}
for i, c in enumerate(parc53_labels):
parc_dict_nover[i] = dict_abb[c]
parclabels_all = [parc_dict_nover[ll] for ll in parclabels_all]
# print('all', len(parclabels_all))
ad = sc.read(
'/home/shobi/Trajectory/Datasets/HumanCD34/human_cd34_bm_rep1.h5ad')
# 5780 cells x 14651 genes Human Replicate 1. Male african american, 38 years
print('h5ad ad size', ad)
colors = pd.Series(ad.uns['cluster_colors'])
colors['10'] = '#0b128f'
ct_colors = pd.Series(ad.uns['ct_colors'])
list_var_names = ad.var_names
# print(list_var_names)
ad.uns['iroot'] = np.flatnonzero(ad.obs_names == ad.obs['palantir_pseudotime'].idxmin())[0]
print('iroot', np.flatnonzero(ad.obs_names == ad.obs['palantir_pseudotime'].idxmin())[0])
tsne = pd.DataFrame(ad.obsm['tsne'], index=ad.obs_names, columns=['x', 'y'])
tsnem = ad.obsm['tsne']
palantir_tsne_df = pd.DataFrame(tsnem)
# palantir_tsne_df.to_csv('/home/shobi/Trajectory/Datasets/HumanCD34/palantir_tsne.csv')
revised_clus = ad.obs['clusters'].values.tolist().copy()
loc_DCs = [i for i in range(5780) if ad.obs['clusters'].values.tolist()[i] == '7']
for loc_i in loc_DCs:
if ad.obsm['palantir_branch_probs'][loc_i, 5] > ad.obsm['palantir_branch_probs'][
loc_i, 2]: # if prob that cDC > pDC, then relabel as cDC
revised_clus[loc_i] = '10'
revised_clus = [int(i) for i in revised_clus]
# magic_df = ad.obsm['MAGIC_imputed_data']
# ad.X: Filtered, normalized and log transformed count matrix
# ad.raw.X: Filtered raw count matrix
# print('before extra filtering' ,ad.shape)
# sc.pp.filter_genes(ad, min_cells=10)
# print('after extra filtering', ad.shape)
adata_counts = sc.AnnData(ad.X)
print(ad.raw.X.shape)
# df_X = pd.DataFrame(ad.raw.X.todense(), columns = ad.var_names)
# df_X.columns = [i for i in ad.var_names]
# print('starting to save .X')
# df_X.to_csv("/home/shobi/Trajectory/Datasets/HumanCD34/expression_matrix_raw.csv")
print('finished save .X')
# (ad.X) # ad.X is filtered, lognormalized,scaled// ad.raw.X is the filtered but not pre-processed
adata_counts.obs_names = ad.obs_names
adata_counts.var_names = ad.var_names
adata_counts_raw = sc.AnnData(ad.raw.X)
adata_counts_raw.var_names = [i for i in ad.var_names]
# adata_counts_raw = adata_preprocess(adata_counts_raw, n_top_genes=500, log=True) # when using HVG and no PCA
# sc.tl.pca(adata_counts_raw,svd_solver='arpack', n_comps=ncomps)
sc.tl.pca(adata_counts, svd_solver='arpack', n_comps=ncomps)
marker = ['x', '+', (5, 0), '>', 'o', (5, 2)]
import colorcet as cc
# tsnem = TSNE().fit_transform(adata_counts.obsm['X_pca'])
'''
f, (ax1, ax2, ax3) = plt.subplots(1, 3, sharey=True)
line = np.linspace(0, 1, len(set(revised_clus)))
for color, group in zip(line, set(revised_clus)):
where = np.where(np.array(revised_clus) == group)[0]
ax1.scatter(tsnem[where, 0], tsnem[where, 1], label=group, c=np.asarray(plt.cm.jet(color)).reshape(-1, 4))
ax1.legend()
ax1.set_title('Palantir Phenograph Labels')
import colorcet as cc
marker = ['x', '+', (5, 0), '>', 'o', (5, 2)]
line_nover = np.linspace(0, 1, len(set(nover_labels)))
col_i = 0
for color, group in zip(line_nover, set(nover_labels)):
where = np.where(np.array(nover_labels) == group)[0]
marker_x = marker[random.randint(0, 5)]
# ax2.scatter(tsnem[where, 0],tsnem[where, 1], label=group, c=plt.cm.nipy_spectral(color), marker = marker_x, alpha=0.5)
ax2.scatter(tsnem[where, 0], tsnem[where, 1], label=group, c=cc.glasbey_dark[col_i], marker=marker_x,
alpha=0.5)
col_i = col_i + 1
ax2.legend(fontsize=6)
ax2.set_title('Novershtern Corr. Labels')
line = np.linspace(0, 1, len(set(parclabels_all)))
col_i = 0
for color, group in zip(line, set(parclabels_all)):
where = np.where(np.array(parclabels_all) == group)[0]
ax3.scatter(tsnem[where, 0], tsnem[where, 1], label=group, c=cc.glasbey_dark[col_i], alpha=0.5)
col_i = col_i + 1
ax3.legend()
ax3.set_title('Parc53 Nover Labels')
# plt.show()
'''
'''
plt.figure(figsize=[5, 5])
plt.title('palantir, ncomps = ' + str(ncomps) + ' knn' + str(knn))
for group in set(revised_clus):
loc_group = np.where(np.asarray(revised_clus) == group)[0]
plt.scatter(tsnem[loc_group, 0], tsnem[loc_group, 1], s=5, color=colors[group], label=group)
ax = plt.gca()
ax.set_axis_off()
ax.legend(fontsize=6)
'''
gene_list = [
'ITGAX'] # ['GATA1', 'GATA2', 'ITGA2B', 'CSF1R', 'MPO', 'CD79B', 'SPI1', 'IRF8', 'CD34', 'IL3RA', 'ITGAX', 'IGHD',
# 'CD27', 'CD14', 'CD22', 'ITGAM', 'CLC', 'MS4A3', 'FCGR3A', 'CSF1R']
true_label = nover_labels # revised_clus
root_user = [4823]
print('v0 random seed', v0_random_seed)
# df_temp_write = pd.DataFrame(adata_counts.obsm['X_pca'][:, 0:200])
# df_temp_write.to_csv("/home/shobi/Trajectory/Datasets/HumanCD34/Human_CD34_200PCA.csv")
Xin = adata_counts.obsm['X_pca'][:, 0:ncomps]
# Xin = adata_counts_raw.obsm['X_pca'][:, 0:ncomps]
# Xin = adata_counts_raw.X.todense()
print(time.ctime())
print(time.ctime())
v0 = VIA(Xin, true_label, jac_std_global=0.15, dist_std_local=1, knn=knn,
too_big_factor=0.2,
root_user=root_user, dataset='humanCD34', preserve_disconnected=True, random_seed=v0_random_seed,
do_impute_bool=True, is_coarse=True, pseudotime_threshold_TS=10,
neighboring_terminal_states_threshold=3) # *.4 root=1,
v0.run_VIA()
v0.make_JSON(filename='scRNA_Hema_temp.js')
super_labels = v0.labels
print('starting to save selected genes')
genes_save = ['ITGAX', 'GATA1', 'GATA2', 'ITGA2B', 'CSF1R', 'MPO', 'CD79B', 'SPI1', 'IRF8', 'CD34', 'IL3RA',
'ITGAX', 'IGHD',
'CD27', 'CD14', 'CD22', 'ITGAM', 'CLC', 'MS4A3', 'FCGR3A', 'CSF1R']
df_selected_genes = pd.DataFrame(adata_counts.X, columns=[cc for cc in adata_counts.var_names])
df_selected_genes = df_selected_genes[genes_save]
# df_selected_genes.to_csv("/home/shobi/Trajectory/Datasets/HumanCD34/selected_genes.csv")
df_ = pd.DataFrame(ad.X)
df_.columns = [i for i in ad.var_names]
print('start magic')
gene_list_magic = ['IL3RA', 'IRF8', 'GATA1', 'GATA2', 'ITGA2B', 'MPO', 'CD79B', 'SPI1', 'CD34', 'CSF1R', 'ITGAX']
df_magic = v0.do_impute(df_, magic_steps=3, gene_list=gene_list_magic)
df_magic_cluster = df_magic.copy()
df_magic_cluster['parc'] = v0.labels
df_magic_cluster = df_magic_cluster.groupby('parc', as_index=True).mean()
print('end magic', df_magic.shape)
f, ((ax, ax1)) = plt.subplots(1, 2, sharey=True)
v0.draw_piechart_graph(ax, ax1, type_pt='gene', gene_exp=df_magic_cluster['GATA1'].values, title='GATA1')
plt.show()
super_clus_ds_PCA_loc = sc_loc_ofsuperCluster_PCAspace(v0, v0, np.arange(0, len(true_label)))
draw_trajectory_gams(tsnem, super_clus_ds_PCA_loc, super_labels, super_labels, v0.edgelist_maxout,
v0.x_lazy, v0.alpha_teleport, v0.single_cell_pt_markov, true_label, knn=v0.knn,
final_super_terminal=v0.terminal_clusters,
sub_terminal_clusters=v0.terminal_clusters,
title_str='Markov Hitting Times (Gams)', ncomp=str(Xin.shape[1]))
plt.show()
print('super labels', set(super_labels))
ad.obs['via0_label'] = [str(i) for i in super_labels]
magic_ad = ad.obsm['MAGIC_imputed_data']
magic_ad = sc.AnnData(magic_ad)
magic_ad.obs_names = ad.obs_names
magic_ad.var_names = ad.var_names
magic_ad.obs['via0_label'] = [str(i) for i in super_labels]
marker_genes = {"ERY": ['GATA1', 'GATA2', 'ITGA2B'], "BCell": ['IGHD', 'CD22'],
"DC": ['IRF8', 'IL3RA', 'IRF4', 'CSF2RA', 'ITGAX'],
"MONO": ['CD14', 'SPI1', 'MPO', 'IL12RB1', 'IL13RA1', 'C3AR1', 'FCGR3A'], 'HSC': ['CD34']}
sc.pl.matrixplot(magic_ad, marker_genes, groupby='via0_label', dendrogram=True)
'''
sc.tl.rank_genes_groups(ad, groupby='via0_label', use_raw=True,
method='wilcoxon', n_genes=10) # compute differential expression
sc.pl.rank_genes_groups_heatmap(ad, n_genes=10, groupby="via0_label", show_gene_labels=True, use_raw=False)
sc.pl.rank_genes_groups_tracksplot(ad, groupby='via0_label', n_genes = 3) # plot the result
'''
loaded_magic_df = pd.read_csv('/home/shobi/Trajectory/Datasets/HumanCD34/MAGIC_palantir_knn30ncomp100_subset.csv')
# loaded_magic_df.head()
for gene_name in ['ITGA2B', 'IL3RA',
'IRF8',
'MPO', 'CSF1R', 'GATA2', 'CD79B',
'CD34', 'GATA1', 'IL3RA']: # ,'SPI1', 'CD34','CSF1R','IL3RA','IRF4', 'CSF2RA','ITGAX']:
print('gene name', gene_name)
# DC markers https://www.cell.com/pb-assets/products/nucleus/nucleus-phagocytes/rnd-systems-dendritic-cells-br.pdf
gene_name_dict = {'GATA1': 'GATA1', 'GATA2': 'GATA2', 'ITGA2B': 'CD41 (Mega)', 'MPO': 'MPO (Mono)',
'CD79B': 'CD79B (B)', 'IRF8': 'IRF8 (DC)', 'SPI1': 'PU.1', 'CD34': 'CD34',
'CSF1R': 'CSF1R (cDC Up. Up then Down in pDC)', 'IL3RA': 'CD123 (pDC)', 'IRF4': 'IRF4 (pDC)',
'ITGAX': 'ITGAX (cDCs)', 'CSF2RA': 'CSF2RA (cDC)'}
loc_gata = np.where(np.asarray(ad.var_names) == gene_name)[0][0]
magic_ad = ad.obsm['MAGIC_imputed_data'][:, loc_gata]
magic_ad = loaded_magic_df[gene_name]
# subset_ = magic_ad
subset_ = df_magic[gene_name].values
print(subset_.shape)
# print('shapes of magic_ad 1 and 2', magic_ad.shape,subset_.shape)
# v1.get_gene_expression(magic_ad,title_gene = gene_name_dict[gene_name])
v0.get_gene_expression(subset_, title_gene=gene_name_dict[gene_name])
plt.show()
super_edges = v0.edgelist_maxout # v0.edgelist
tsi_list = get_loc_terminal_states(v0, Xin)
v1 = VIA(Xin, true_label, jac_std_global=0.15, dist_std_local=1, knn=knn,
too_big_factor=0.05, super_cluster_labels=super_labels, super_node_degree_list=v0.node_degree_list,
super_terminal_cells=tsi_list, root_user=root_user,
x_lazy=0.95, alpha_teleport=0.99, dataset='humanCD34', preserve_disconnected=True,
super_terminal_clusters=v0.terminal_clusters, is_coarse=False, full_neighbor_array=v0.full_neighbor_array,
ig_full_graph=v0.ig_full_graph, full_distance_array=v0.full_distance_array,
csr_array_locally_pruned=v0.csr_array_locally_pruned,
random_seed=v0_random_seed, pseudotime_threshold_TS=10) # *.4super_terminal_cells = tsi_list #3root=1,
v1.run_VIA()
labels = v1.labels
v1.make_JSON(filename='scRNA_Hema_via1_temp.js')
df_magic_cluster = df_magic.copy()
df_magic_cluster['via1'] = v1.labels
df_magic_cluster = df_magic_cluster.groupby('via1', as_index=True).mean()
# print('df_magic_cluster', df_magic_cluster)
'''
#Get the clustsergraph gene expression on topology
for gene_i in gene_list_magic:
f, ((ax, ax1)) = plt.subplots(1, 2, sharey=True)
v1.draw_piechart_graph(ax,ax1,type_pt='gene', gene_exp = df_magic_cluster[gene_i].values, title = gene_i)
plt.show()
'''
ad.obs['parc1_label'] = [str(i) for i in labels]
'''
tsi_list = [] # find the single-cell which is nearest to the average-location of a terminal cluster
for tsi in v1.revised_super_terminal_clusters:
loc_i = np.where(super_labels == tsi)[0]
temp = np.mean(adata_counts.obsm['X_pca'][:, 0:ncomps][loc_i], axis=0)
labelsq, distances = v0.knn_struct.knn_query(temp, k=1)
tsi_list.append(labelsq[0][0])
sc.tl.rank_genes_groups(ad, groupby='parc1_label', use_raw=True,
method='wilcoxon', n_genes=10) # compute differential expression
sc.pl.matrixplot(ad, marker_genes, groupby='parc1_label', use_raw=False)
sc.pl.rank_genes_groups_heatmap(ad, n_genes=3, groupby="parc1_label", show_gene_labels=True, use_raw=False)
'''
label_df = pd.DataFrame(labels, columns=['parc'])
# label_df.to_csv('/home/shobi/Trajectory/Datasets/HumanCD34/parclabels.csv', index=False)
gene_ids = adata_counts.var_names
obs = ad.raw.X.toarray()
print('shape obs', obs.shape)
obs = pd.DataFrame(obs, columns=gene_ids)
# obs['parc']=v1.labels
obs['louvain'] = revised_clus
# obs_average = obs.groupby('parc', as_index=True).mean()
obs_average = obs.groupby('louvain', as_index=True).mean()
# print(obs_average.head())
# obs_average.to_csv('/home/shobi/Trajectory/Datasets/HumanCD34/louvain_palantir_average.csv', index=False)
ad_obs = sc.AnnData(obs_average)
ad_obs.var_names = gene_ids
ad_obs.obs['parc'] = [i for i in range(len(set(revised_clus)))] # v1.labels instaed of revised_clus
# sc.write('/home/shobi/Trajectory/Datasets/HumanCD34/louvain_palantir_average.h5ad',ad_obs)
# fig_0, ax_0 = plt.subplots()
loaded_magic_df = pd.read_csv('/home/shobi/Trajectory/Datasets/HumanCD34/MAGIC_palantir_knn30ncomp100_subset.csv')
# loaded_magic_df.head()
for gene_name in ['ITGA2B', 'IL3RA',
'IRF8',
'MPO', 'CSF1R', 'GATA2', 'CD79B',
'CD34']: # ['GATA1', 'GATA2', 'ITGA2B', 'MPO', 'CD79B','IRF8','SPI1', 'CD34','CSF1R','IL3RA','IRF4', 'CSF2RA','ITGAX']:
print('gene name', gene_name)
# DC markers https://www.cell.com/pb-assets/products/nucleus/nucleus-phagocytes/rnd-systems-dendritic-cells-br.pdf
gene_name_dict = {'GATA1': 'GATA1', 'GATA2': 'GATA2', 'ITGA2B': 'CD41 (Mega)', 'MPO': 'MPO (Mono)',
'CD79B': 'CD79B (B)', 'IRF8': 'IRF8 (DC)', 'SPI1': 'PU.1', 'CD34': 'CD34',
'CSF1R': 'CSF1R (cDC Up. Up then Down in pDC)', 'IL3RA': 'CD123 (pDC)', 'IRF4': 'IRF4 (pDC)',
'ITGAX': 'ITGAX (cDCs)', 'CSF2RA': 'CSF2RA (cDC)'}
loc_gata = np.where(np.asarray(ad.var_names) == gene_name)[0][0]
magic_ad = ad.obsm['MAGIC_imputed_data'][:, loc_gata]
magic_ad = loaded_magic_df[gene_name]
# subset_ = magic_ad
subset_ = df_magic[gene_name].values
print(subset_.shape)
# print('shapes of magic_ad 1 and 2', magic_ad.shape,subset_.shape)
# v1.get_gene_expression(magic_ad,title_gene = gene_name_dict[gene_name])
v1.get_gene_expression(subset_, title_gene=gene_name_dict[gene_name])
# v0.get_gene_expression(subset_, title_gene=gene_name_dict[gene_name] + 'VIA MAGIC')
print('start tsne')
n_downsample = 4000
if len(labels) > n_downsample:
# idx = np.random.randint(len(labels), size=4000)
np.random.seed(2357)
idx = np.random.choice(a=np.arange(0, len(labels)), size=5780, replace=False, p=None)
super_labels = np.asarray(super_labels)[idx]
labels = list(np.asarray(labels)[idx])
print('labels p1', len(labels), set(labels))
true_label = list(np.asarray(true_label)[idx])
sc_pt_markov = list(np.asarray(v1.single_cell_pt_markov)[idx])
# graph_hnsw = v0.knngraph_visual()
embedding = tsnem[idx, :] # TSNE().fit_transform(adata_counts.obsm['X_pca'][idx, :])
# phate_op = phate.PHATE()
# embedding = phate_op.fit_transform(adata_counts.obsm['X_pca'][:, 0:20])
# embedding = embedding[idx, :]
# embedding = umap.UMAP().fit_transform(adata_counts.obsm['X_pca'][idx, 0:20])
print('size of downsampled embedding', embedding.shape)
else:
embedding = tsnem # umap.UMAP().fit_transform(adata_counts.obsm['X_pca'][:,0:20])
idx = np.random.randint(len(labels), size=len(labels))
print('end tsne')
super_clus_ds_PCA_loc = sc_loc_ofsuperCluster_PCAspace(v0, v1, idx)
draw_trajectory_gams(embedding, super_clus_ds_PCA_loc, labels, super_labels, super_edges,
v1.x_lazy, v1.alpha_teleport, sc_pt_markov, true_label, knn=v0.knn,
final_super_terminal=v1.revised_super_terminal_clusters,
sub_terminal_clusters=v1.terminal_clusters,
title_str='Hitting times: Markov Simulation on biased edges', ncomp=ncomps)
plt.show()
# DRAW EVOLUTION PATHS
knn_hnsw = make_knn_embeddedspace(embedding)
draw_sc_evolution_trajectory_dijkstra(v1, embedding, knn_hnsw, v0.full_graph_shortpath, idx)
plt.show()
def main_Toy_comparisons(ncomps=10, knn=30, random_seed=42, dataset='Toy3', root_user='M1',
foldername="/home/shobi/Trajectory/Datasets/Toy3/"):
print('dataset, ncomps, knn, seed', dataset, ncomps, knn, random_seed)
# root_user = ["T1_M1", "T2_M1"] # "M1" # #'M1' # "T1_M1", "T2_M1"] #"T1_M1"
if dataset == "Toy3":
print('dataset Toy3')
df_counts = pd.read_csv(foldername + "toy_multifurcating_M8_n1000d1000.csv",
delimiter=",")
#df_counts = pd.read_csv(foldername + "Toy3_noise_100genes_thinfactor8.csv", delimiter=",")
df_ids = pd.read_csv(foldername + "toy_multifurcating_M8_n1000d1000_ids_with_truetime.csv",
delimiter=",")
#counts = palantir.io.from_csv("/home/shobi/Trajectory/Datasets/Toy3/toy_multifurcating_M8_n1000d1000.csv")
print('df_ids', df_ids.columns)
root_user = ['M1']
paga_root = "M1"
palantir_root = 'C107'
if dataset == "Toy4": # 2 disconnected components
df_counts = pd.read_csv(foldername + "toy_disconnected_M9_n1000d1000.csv",
delimiter=",")
# df_counts = pd.read_csv(foldername + "Toy4_noise_500genes.csv", delimiter=",")
df_ids = pd.read_csv(foldername + "toy_disconnected_M9_n1000d1000_ids_with_truetime.csv", delimiter=",")
#counts = palantir.io.from_csv("/home/shobi/Trajectory/Datasets/Toy4/toy_disconnected_M9_n1000d1000.csv")
print(df_counts.shape, 'df_counts shape')
root_user = ['T1_M1', 'T2_M1'] # 'T1_M1'
paga_root = 'T2_M1'
palantir_root = 'C107'
if dataset == "Connected":
df_counts = pd.read_csv(foldername + "ToyConnected_M9_n2000d1000.csv", delimiter=",")
# df_counts = pd.read_csv(foldername + "ToyConnected_noise_500genes.csv", delimiter=",")
df_ids = pd.read_csv(foldername + "ToyConnected_M9_n2000d1000_ids_with_truetime.csv",
delimiter=",")
#counts = palantir.io.from_csv("/home/shobi/Trajectory/Datasets/ToyConnected/ToyConnected_M9_n2000d1000.csv")
root_user = ['M1']
paga_root = "M1"
palantir_root = 'C1'
if dataset == "Connected2":
df_counts = pd.read_csv(foldername + "Connected2_n1000d1000.csv",
delimiter=",")
# df_counts = pd.read_csv(foldername + "ToyConnected2_noise_500genes.csv", 'rt',delimiter=",")
df_ids = pd.read_csv(foldername + "Connected2_n1000d1000_ids_with_truetime.csv",
delimiter=",")
#counts = palantir.io.from_csv("/home/shobi/Trajectory/Datasets/ToyConnected2/Connected2_n1000d1000.csv")
root_user = ['M1']
paga_root = "M1"
palantir_root = 'C11'
# suggest to use visual jaccard pruning of 1 (this doesnt alter underlying graph, just the display. can also use "M2" as the starting root,
if dataset == "ToyMultiM11":
df_counts = pd.read_csv(foldername + "Toymulti_M11_n3000d1000.csv",
delimiter=",")
# df_counts = pd.read_csv(foldername + "ToyMulti_M11_noised.csv", delimiter=",")
df_ids = pd.read_csv(foldername + "Toymulti_M11_n3000d1000_ids_with_truetime.csv",
delimiter=",")
#counts = palantir.io.from_csv( "/home/shobi/Trajectory/Datasets/ToyMultifurcating_M11/Toymulti_M11_n3000d1000.csv")
root_user = ['M1']
paga_root = "M1"
palantir_root = 'C1005'
if dataset == "Cyclic": # 4 milestones
df_counts = pd.read_csv("/home/shobi/Trajectory/Datasets/ToyCyclic/ToyCyclic_M4_n1000d1000.csv",
delimiter=",")
df_counts = pd.read_csv("/home/shobi/Trajectory/Datasets/ToyCyclic/ToyCyclic_noise_100genes_thinfactor3.csv",
delimiter=",")
df_ids = pd.read_csv("/home/shobi/Trajectory/Datasets/ToyCyclic/ToyCyclic_M4_n1000d1000_ids_with_truetime.csv",
delimiter=",")
#counts = palantir.io.from_csv("/home/shobi/Trajectory/Datasets/ToyCyclic/ToyCyclic_M4_n1000d1000.csv")
root_user = ['M1'] # 'T1_M1'
paga_root = 'M1'
palantir_root = 'C1'
if dataset == "Cyclic2": # 4 milestones
df_counts = pd.read_csv("/home/shobi/Trajectory/Datasets/ToyCyclic2/Cyclic2_n1000d1000.csv",
delimiter=",")
# df_counts = pd.read_csv("/home/shobi/Trajectory/Datasets/ToyCyclic2/ToyCyclic2_noise_500genes.csv", delimiter=",")
df_ids = pd.read_csv("/home/shobi/Trajectory/Datasets/ToyCyclic2/Cyclic2_n1000d1000_ids_with_truetime.csv",
delimiter=",")
#counts = palantir.io.from_csv("/home/shobi/Trajectory/Datasets/ToyCyclic2/Cyclic2_n1000d1000.csv")
root_user = ['M1'] # 'T1_M1'
paga_root = 'M1'
palantir_root = 'C107'
if dataset == 'Bifurc2':
df_counts = pd.read_csv("/home/shobi/Trajectory/Datasets/ToyBifurcating2/Bifurc2_M4_n2000d1000.csv",
delimiter=",")
df_counts = pd.read_csv("/home/shobi/Trajectory/Datasets/ToyBifurcating2/ToyBifurc2_noised.csv", delimiter=",")
df_ids = pd.read_csv( "/home/shobi/Trajectory/Datasets/ToyBifurcating2/Bifurc2_M4_n2000d1000_ids_with_truetime.csv",delimiter=",")
#counts = palantir.io.from_csv("/home/shobi/Trajectory/Datasets/ToyBifurcating2/Bifurc2_M4_n2000d1000.csv")
root_user = ['M1'] # 'T1_M1'
paga_root = 'M1'
palantir_root = 'C1006'
if dataset == 'Disconnected2':
df_counts = pd.read_csv("/home/shobi/Trajectory/Datasets/ToyDisconnected2/Disconnected2_n1000d1000.csv",
delimiter=",")
df_counts = pd.read_csv("/home/shobi/Trajectory/Datasets/ToyDisconnected2/ToyDisconnected2_noise_500genes.csv",
delimiter=",")
df_ids = pd.read_csv(
"/home/shobi/Trajectory/Datasets/ToyDisconnected2/Disconnected2_n1000d1000_ids_with_truetime.csv",
delimiter=",")
#counts = palantir.io.from_csv("/home/shobi/Trajectory/Datasets/ToyDisconnected2/Disconnected2_n1000d1000.csv")
root_user = ['T1_M1', 'T1_M2', 'T1_M3'] # 'T1_M1'
paga_root = 'T1_M1'
palantir_root = 'C125'
df_ids['cell_id_num'] = [int(s[1::]) for s in df_ids['cell_id']]
print("shape", df_counts.shape, df_ids.shape)
df_counts = df_counts.drop('Unnamed: 0', 1)
df_ids = df_ids.sort_values(by=['cell_id_num'])
df_ids = df_ids.reset_index(drop=True)
# df_ids.to_csv("/home/shobi/Trajectory/Datasets/ToyConnected2/Connected2_n1000d1000_ids_sorted_with_truetime.csv")
# df_counts['group_id'] = df_ids['group_id']#to split Toy4
# df_counts['main_Traj'] = [i[0:2] for i in df_ids['group_id']]#to split Toy4
# df_ids['main_Traj'] = [i[0:2] for i in df_ids['group_id']]#to split Toy4
# df_counts = df_counts[df_counts['main_Traj']=='T2']#to split Toy4
# df_ids = df_ids[df_ids['main_Traj'] == 'T2']#to split Toy4
#true_time = df_ids['true_time']
true_label = df_ids['group_id'].tolist()
# df_counts = df_counts.drop('main_Traj', 1)#to split Toy4
# df_counts = df_counts.drop('group_id', 1)#to split Toy4
# df_ids = df_ids.reset_index(drop=True)#to split Toy4
# df_counts = df_counts.reset_index(drop=True)#to split Toy4
# true_label = df_ids['group_id'] #to split Toy4
print("shape", df_counts.index, df_ids.index)
adata_counts = sc.AnnData(df_counts, obs=df_ids)
sc.tl.pca(adata_counts, svd_solver='arpack', n_comps=ncomps)
# comparisons
adata_counts.uns['iroot'] = np.flatnonzero(adata_counts.obs['group_id'] == paga_root)[0] # 'T1_M1'#'M1'
do_paga = False #
do_palantir = False #
# comparisons
if do_paga == True:
sc.pp.neighbors(adata_counts, n_neighbors=knn, use_rep='X', ) # n_pcs=ncomps) # 4
sc.tl.draw_graph(adata_counts)
# sc.pl.draw_graph(adata_counts, color='group_id', legend_loc='on data') # force-directed layout
start_dfmap = time.time()
# sc.tl.diffmap(adata_counts, n_comps=ncomps)
sc.tl.diffmap(adata_counts, n_comps=200) # default retains n_comps = 15
print('time taken to get diffmap given knn', time.time() - start_dfmap)
sc.pp.neighbors(adata_counts, n_neighbors=knn, use_rep='X_diffmap') # 4
sc.tl.draw_graph(adata_counts)
sc.pl.draw_graph(adata_counts, color='group_id', legend_loc='on data')
sc.tl.leiden(adata_counts, resolution=1.0, random_state=10)
sc.tl.paga(adata_counts, groups='leiden')
# sc.pl.paga(adata_counts, color=['leiden','group_id'])
sc.tl.dpt(adata_counts, n_dcs=ncomps)
df_paga = pd.DataFrame()
df_paga['paga_dpt'] = adata_counts.obs['dpt_pseudotime'].values
correlation = df_paga['paga_dpt'].corr(df_ids['true_time'])
print('corr paga knn', knn, correlation)
sc.pl.paga(adata_counts, color=['leiden', 'group_id', 'dpt_pseudotime'],
title=['leiden (knn:' + str(knn) + ' ncomps:' + str(ncomps) + ')',
'group_id (ncomps:' + str(ncomps) + ')', 'pseudotime (ncomps:' + str(ncomps) + ')'])
# X = df_counts.values
'''
# palantir
if do_palantir == True:
print(palantir.__file__) # location of palantir source code
str_true_label = true_label.tolist()
str_true_label = [(i[1:]) for i in str_true_label]
str_true_label = pd.Series(str_true_label, index=counts.index)
norm_df = counts # palantir.preprocess.normalize_counts(counts)
# pca_projections, _ = palantir.utils.run_pca(norm_df, n_components=ncomps) #normally use
pca_projections = counts
dm_res = palantir.utils.run_diffusion_maps(pca_projections, knn=knn,
n_components=300) ## n_components=ncomps, knn=knn)
ms_data = palantir.utils.determine_multiscale_space(dm_res) # n_eigs is determined using eigengap
tsne = palantir.utils.run_tsne(ms_data)
palantir.plot.plot_cell_clusters(tsne, str_true_label)
# C108 for M12 connected' #M8n1000d1000 start - c107 #c1001 for bifurc n2000d1000 #disconnected n1000 c108, "C1 for M10 connected" # c10 for bifurcating_m4_n2000d1000
# c107 for T1_M1, C42 for T2_M1 disconnected
# C1 for M8_connected, C1005 for multi_M11 , 'C1006 for bifurc2'
pr_res = palantir.core.run_palantir(ms_data, early_cell=palantir_root, num_waypoints=500, knn=knn)
df_palantir = pd.read_csv(
'/home/shobi/Trajectory/Datasets/Toy3/palantir_pt.csv') # /home/shobi/anaconda3/envs/ViaEnv/lib/python3.7/site-packages/palantir
pt = df_palantir['pt']
correlation = pt.corr(true_time)
print('corr Palantir', correlation)
print('')
palantir.plot.plot_palantir_results(pr_res, tsne, n_knn=knn, n_comps=pca_projections.shape[1])
plt.show()
'''
# from sklearn.decomposition import PCA
# pca = PCA(n_components=ncomps)
# pc = pca.fit_transform(df_counts)
Xin = adata_counts.obsm['X_pca'][:, 0:ncomps]
# Xin = adata_counts.X
if dataset == 'Toy4':
jac_std_global = .15 # .15
else:
jac_std_global = 0.15 # .15#0.15 #bruge 1 til cyclic2, ellers 0.15
#
v0 = VIA(Xin, true_label, jac_std_global=jac_std_global, dist_std_local=1,
knn=knn,
too_big_factor=0.3, root_user=root_user, preserve_disconnected=True, dataset='toy',
visual_cluster_graph_pruning=1, max_visual_outgoing_edges=2,
random_seed=random_seed) # *.4 root=2,
v0.run_VIA()
super_labels = v0.labels
df_ids['pt'] = v0.single_cell_pt_markov
correlation = df_ids['pt'].corr(df_ids['true_time'])
print('corr via knn', knn, correlation)
super_edges = v0.edgelist
# v0.make_JSON(filename = 'Toy3_ViaOut_temp.js')
p = hnswlib.Index(space='l2', dim=adata_counts.obsm['X_pca'][:, 0:ncomps].shape[1])
p.init_index(max_elements=adata_counts.obsm['X_pca'][:, 0:ncomps].shape[0], ef_construction=200, M=16)
p.add_items(adata_counts.obsm['X_pca'][:, 0:ncomps])
p.set_ef(50)
tsi_list = [] # find the single-cell which is nearest to the average-location of a terminal cluster in PCA space (
for tsi in v0.terminal_clusters:
loc_i = np.where(np.asarray(v0.labels) == tsi)[0]
val_pt = [v0.single_cell_pt_markov[i] for i in loc_i]
th_pt = np.percentile(val_pt, 50) # 50
loc_i = [loc_i[i] for i in range(len(val_pt)) if val_pt[i] >= th_pt]
temp = np.mean(adata_counts.obsm['X_pca'][:, 0:ncomps][loc_i], axis=0)
labelsq, distances = p.knn_query(temp, k=1)
tsi_list.append(labelsq[0][0])
print('Granular VIA iteration')
v1 = VIA(Xin, true_label, jac_std_global=jac_std_global, dist_std_local=1,
knn=knn,
too_big_factor=0.1,
super_cluster_labels=super_labels, super_node_degree_list=v0.node_degree_list,
super_terminal_cells=tsi_list, root_user=root_user, is_coarse=False,
x_lazy=0.95, alpha_teleport=0.99, preserve_disconnected=True, dataset='toy',
visual_cluster_graph_pruning=1, max_visual_outgoing_edges=2,
super_terminal_clusters=v0.terminal_clusters,
full_neighbor_array=v0.full_neighbor_array,
ig_full_graph=v0.ig_full_graph, full_distance_array=v0.full_distance_array,
csr_array_locally_pruned=v0.csr_array_locally_pruned, random_seed=random_seed) # root=1,
v1.run_VIA()
df_ids['pt1'] = v1.single_cell_pt_markov
correlation = df_ids['pt1'].corr(df_ids['true_time'])
print('corr via1 knn', knn, correlation)
labels = v1.labels
# v1 = PARC(adata_counts.obsm['X_pca'], true_label, jac_std_global=1, knn=5, too_big_factor=0.05, anndata= adata_counts, small_pop=2)
# v1.run_VIA()
# labels = v1.labels
print('start tsne')
n_downsample = 500
if len(labels) > n_downsample:
# idx = np.random.randint(len(labels), size=900)
np.random.seed(2357)
idx = np.random.choice(a=np.arange(0, len(labels)), size=len(labels), replace=False, p=None)
print('len idx', len(idx))
super_labels = np.asarray(super_labels)[idx]
labels = list(np.asarray(labels)[idx])
true_label = list(np.asarray(true_label)[idx])
sc_pt_markov = list(np.asarray(v1.single_cell_pt_markov[idx]))
# embedding = v0.run_umap_hnsw(adata_counts.obsm['X_pca'][idx, :], graph)
embedding = adata_counts.obsm['X_pca'][idx,
0:2] # umap.UMAP().fit_transform(adata_counts.obsm['X_pca'][idx, 0:5])
# embedding = TSNE().fit_transform(adata_counts.obsm['X_pca'][idx, :])
print('tsne downsampled size', embedding.shape)
else:
embedding = umap.UMAP().fit_transform(Xin) # (adata_counts.obsm['X_pca'])
print('tsne input size', adata_counts.obsm['X_pca'].shape)
# embedding = umap.UMAP().fit_transform(adata_counts.obsm['X_pca'])
idx = np.random.randint(len(labels), size=len(labels))
print('end tsne')
super_clus_ds_PCA_loc = sc_loc_ofsuperCluster_PCAspace(v0, v1, idx)
print('super terminal and sub terminal', v0.super_terminal_cells, v1.terminal_clusters)
knn_hnsw, ci_list = sc_loc_ofsuperCluster_embeddedspace(embedding, v0, v1, idx)
draw_trajectory_gams(embedding, super_clus_ds_PCA_loc, labels, super_labels, super_edges,
v1.x_lazy, v1.alpha_teleport, sc_pt_markov, true_label, knn=v0.knn,
final_super_terminal=v1.revised_super_terminal_clusters,
sub_terminal_clusters=v1.terminal_clusters,
title_str='Hitting times: Markov Simulation on biased edges', ncomp=ncomps)
plt.show()
'''
draw_trajectory_dimred(embedding, ci_list, labels, super_labels, super_edges,
v1.x_lazy, v1.alpha_teleport, sc_pt_markov, true_label, knn=v0.knn,
final_super_terminal=v0.terminal_clusters,
title_str='Hitting times: Markov Simulation on biased edges', ncomp=ncomps)
'''
plt.show()
num_group = len(set(true_label))
line = np.linspace(0, 1, num_group)
f, (ax1, ax3) = plt.subplots(1, 2, sharey=True)
for color, group in zip(line, set(true_label)):
where = np.where(np.asarray(true_label) == group)[0]
ax1.scatter(embedding[where, 0], embedding[where, 1], label=group,
c=np.asarray(plt.cm.jet(color)).reshape(-1, 4))
ax1.legend(fontsize=6)
ax1.set_title('true labels')
ax3.set_title("Markov Sim PT ncomps:" + str(Xin.shape[1]) + '. knn:' + str(knn))
ax3.scatter(embedding[:, 0], embedding[:, 1], c=sc_pt_markov, cmap='viridis_r')
plt.show()
df_subset = pd.DataFrame(adata_counts.obsm['X_pca'][:, 0:5], columns=['Gene0', 'Gene1', 'Gene2', 'Gene3', 'Gene4'])
for gene_i in ['Gene0', 'Gene1', 'Gene2']: # , 'Gene3', 'Gene4']:
subset_ = df_subset[gene_i].values
print(subset_.shape)
# print('shapes of magic_ad 1 and 2', magic_ad.shape,subset_.shape)
# v1.get_gene_expression(magic_ad,title_gene = gene_name_dict[gene_name])
v1.get_gene_expression(subset_, title_gene=gene_i)
# knn_hnsw = make_knn_embeddedspace(embedding)
draw_sc_evolution_trajectory_dijkstra(v1, embedding, knn_hnsw, v0.full_graph_shortpath, idx,
adata_counts.obsm['X_pca'][:, 0:ncomps])
plt.show()
def main_Toy(ncomps=10, knn=30, random_seed=41, dataset='Toy3', root_user=['M1'],
cluster_graph_pruning_std=1., foldername="/home/shobi/Trajectory/Datasets/"):
print('dataset, ncomps, knn, seed', dataset, ncomps, knn, random_seed)
if dataset == "Toy3":
df_counts = pd.read_csv(foldername + "toy_multifurcating_M8_n1000d1000.csv", delimiter=",")
df_ids = pd.read_csv(foldername + "toy_multifurcating_M8_n1000d1000_ids_with_truetime.csv", delimiter=",")
root_user = ['M1']
paga_root = "M1"
if dataset == "Toy4": # 2 disconnected components
print('inside toy4')
df_counts = pd.read_csv(foldername + "toy_disconnected_M9_n1000d1000.csv", delimiter=",")
df_ids = pd.read_csv(foldername + "toy_disconnected_M9_n1000d1000_ids_with_truetime.csv", delimiter=",")
root_user = ['T1_M1', 'T2_M1'] # 'T1_M1'
paga_root = 'T1_M1'
df_ids['cell_id_num'] = [int(s[1::]) for s in df_ids['cell_id']]
# print("shape", df_counts.shape, df_ids.shape)
df_counts = df_counts.drop('Unnamed: 0', 1)
df_ids = df_ids.sort_values(by=['cell_id_num'])
df_ids = df_ids.reset_index(drop=True)
true_label = df_ids['group_id'].tolist()
#true_time = df_ids['true_time']
adata_counts = sc.AnnData(df_counts, obs=df_ids)
sc.tl.pca(adata_counts, svd_solver='arpack', n_comps=ncomps)
# true_label =['a' for i in true_label] #testing dummy true_label
adata_counts.uns['iroot'] = np.flatnonzero(adata_counts.obs['group_id'] == paga_root)[0] # 'T1_M1'#'M1'
# via_wrapper(adata_counts, true_label, embedding= adata_counts.obsm['X_pca'][:,0:2], root=[1], knn=30, ncomps=10,cluster_graph_pruning_std = 1)
# print('starting via wrapper disconn')
# via_wrapper_disconnected(adata_counts, true_label, embedding=adata_counts.obsm['X_pca'][:, 0:2], root=[23,902], preserve_disconnected=True, knn=10, ncomps=10, cluster_graph_pruning_std=1 ,random_seed=41)
# print('end via wrapper disconn')
if dataset == 'Toy4':
jac_std_global = 0.15 # 1
else:
jac_std_global = 0.15
import umap
embedding = umap.UMAP().fit_transform(adata_counts.obsm['X_pca'][:, 0:10]) # 50
# embedding = adata_counts.obsm['X_pca'][:, 0:2]
# plt.scatter(embedding[:,0],embedding[:,1])
# plt.show()
print('root user', root_user)
v0 = VIA(adata_counts.obsm['X_pca'][:, 0:ncomps], true_label, jac_std_global=jac_std_global, dist_std_local=1,
knn=knn,
cluster_graph_pruning_std=cluster_graph_pruning_std,
too_big_factor=0.3, root_user=root_user, preserve_disconnected=True, dataset='toy',
visual_cluster_graph_pruning=1, max_visual_outgoing_edges=2,
random_seed=random_seed, piegraph_arrow_head_width=0.4,
piegraph_edgeweight_scalingfactor=1.0) # *.4 root=2,
v0.run_VIA()
super_labels = v0.labels
print('super labels', type(super_labels))
df_ids['pt'] = v0.single_cell_pt_markov
correlation = df_ids['pt'].corr(df_ids['true_time'])
print('corr via knn', knn, correlation)
super_edges = v0.edgelist
# v0.make_JSON(filename = 'Toy3_ViaOut_temp.js')
p = hnswlib.Index(space='l2', dim=adata_counts.obsm['X_pca'][:, 0:ncomps].shape[1])
p.init_index(max_elements=adata_counts.obsm['X_pca'][:, 0:ncomps].shape[0], ef_construction=200, M=16)
p.add_items(adata_counts.obsm['X_pca'][:, 0:ncomps])
p.set_ef(50)
tsi_list = [] # find the single-cell which is nearest to the average-location of a terminal cluster in PCA space (
for tsi in v0.terminal_clusters:
loc_i = np.where(np.asarray(v0.labels) == tsi)[0]
val_pt = [v0.single_cell_pt_markov[i] for i in loc_i]
th_pt = np.percentile(val_pt, 50) # 50
loc_i = [loc_i[i] for i in range(len(val_pt)) if val_pt[i] >= th_pt]
temp = np.mean(adata_counts.obsm['X_pca'][:, 0:ncomps][loc_i], axis=0)
labelsq, distances = p.knn_query(temp, k=1)
tsi_list.append(labelsq[0][0])
print('Granular VIA iteration')
v1 = VIA(adata_counts.obsm['X_pca'][:, 0:ncomps], true_label, jac_std_global=jac_std_global, dist_std_local=1,
knn=knn,
too_big_factor=0.1,
cluster_graph_pruning_std=cluster_graph_pruning_std,
super_cluster_labels=super_labels, super_node_degree_list=v0.node_degree_list,
super_terminal_cells=tsi_list, root_user=root_user, is_coarse=False,
x_lazy=0.95, alpha_teleport=0.99, preserve_disconnected=True, dataset='toy',
visual_cluster_graph_pruning=1, max_visual_outgoing_edges=2,
super_terminal_clusters=v0.terminal_clusters,
full_neighbor_array=v0.full_neighbor_array,
ig_full_graph=v0.ig_full_graph, full_distance_array=v0.full_distance_array,
csr_array_locally_pruned=v0.csr_array_locally_pruned, random_seed=random_seed) # root=1,
v1.run_VIA()
labels = v1.labels
df_ids['pt1'] = v1.single_cell_pt_markov
correlation = df_ids['pt1'].corr(df_ids['true_time'])
print('corr via knn', knn, correlation)
n_downsample = 50
if len(labels) > n_downsample: # just testing the downsampling and indices. Not actually downsampling
# idx = np.random.randint(len(labels), size=900)
np.random.seed(2357)
idx = np.random.choice(a=np.arange(0, len(labels)), size=len(labels), replace=False, p=None)
print('len idx', len(idx))
super_labels = np.asarray(super_labels)[idx]
labels = list(np.asarray(labels)[idx])
true_label = list(np.asarray(true_label)[idx])
sc_pt_markov = list(np.asarray(v1.single_cell_pt_markov[idx]))
embedding = embedding[idx, :]
# embedding = v0.run_umap_hnsw(adata_counts.obsm['X_pca'][idx, :], graph)
# embedding = adata_counts.obsm['X_pca'][idx, 0:2] # umap.UMAP().fit_transform(adata_counts.obsm['X_pca'][idx, 0:5])
super_clus_ds_PCA_loc = sc_loc_ofsuperCluster_PCAspace(v0, v1, idx)
print('super terminal and sub terminal', v0.super_terminal_cells, v1.terminal_clusters)
draw_trajectory_gams(embedding, super_clus_ds_PCA_loc, labels, super_labels, super_edges,
v1.x_lazy, v1.alpha_teleport, sc_pt_markov, true_label, knn=v0.knn,
final_super_terminal=v1.revised_super_terminal_clusters,
sub_terminal_clusters=v1.terminal_clusters,
title_str='Hitting times: Markov Simulation on biased edges', ncomp=ncomps)
plt.show()
num_group = len(set(true_label))
line = np.linspace(0, 1, num_group)
'''
f, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
for color, group in zip(line, set(true_label)):
where = np.where(np.asarray(true_label) == group)[0]
ax1.scatter(embedding[where, 0], embedding[where, 1], label=group,
c=np.asarray(plt.cm.jet(color)).reshape(-1, 4))
ax1.legend(fontsize=6)
ax1.set_title('true labels')
ax2.set_title("Markov Sim PT ncomps:" + str(ncomps) + '. knn:' + str(knn))
ax2.scatter(embedding[:, 0], embedding[:, 1], c=sc_pt_markov, cmap='viridis_r')
plt.show()
'''
df_subset = pd.DataFrame(adata_counts.obsm['X_pca'][:, 0:5], columns=['Gene0', 'Gene1', 'Gene2', 'Gene3', 'Gene4'])
for gene_i in ['Gene0', 'Gene1', 'Gene2']: # , 'Gene3', 'Gene4']:
subset_ = df_subset[gene_i].values
v1.get_gene_expression(subset_, title_gene=gene_i)
knn_hnsw = make_knn_embeddedspace(embedding)
draw_sc_evolution_trajectory_dijkstra(v1, embedding, knn_hnsw, v0.full_graph_shortpath, idx)
plt.show()
def main_Bcell(ncomps=50, knn=20, random_seed=0, cluster_graph_pruning_std=.15,path='/home/shobi/Trajectory/Datasets/Bcell/'):
print('Input params: ncomp, knn, random seed', ncomps, knn, random_seed)
# https://github.com/STATegraData/STATegraData
def run_zheng_Bcell(adata, min_counts=3, n_top_genes=500, do_HVG=True):
sc.pp.filter_genes(adata, min_counts=min_counts)
# sc.pp.filter_genes(adata, min_cells=3)# only consider genes with more than 1 count
'''
sc.pp.normalize_per_cell( # normalize with total UMI count per cell
adata, key_n_counts='n_counts_all')
'''
sc.pp.normalize_total(adata, target_sum=1e4)
if do_HVG == True:
sc.pp.log1p(adata)
'''
filter_result = sc.pp.filter_genes_dispersion( # select highly-variable genes
adata.X, flavor='cell_ranger', n_top_genes=n_top_genes, log=False )
adata = adata[:, filter_result.gene_subset] # subset the genes
'''
sc.pp.highly_variable_genes(adata, n_top_genes=n_top_genes, min_mean=0.0125, max_mean=3,
min_disp=0.5) # this function expects logarithmized data
print('len hvg ', sum(adata.var.highly_variable))
adata = adata[:, adata.var.highly_variable]
sc.pp.normalize_per_cell(adata) # renormalize after filtering
# if do_log: sc.pp.log1p(adata) # log transform: adata.X = log(adata.X + 1)
if do_HVG == False: sc.pp.log1p(adata)
sc.pp.scale(adata, max_value=10) # scale to unit variance and shift to zero mean
return adata
'''
def run_palantir_func_Bcell(ad1, ncomps, knn, tsne_X, true_label):
ad = ad1.copy()
tsne = pd.DataFrame(tsne_X, index=ad.obs_names, columns=['x', 'y'])
norm_df_pal = pd.DataFrame(ad.X)
new = ['c' + str(i) for i in norm_df_pal.index]
norm_df_pal.columns = [i for i in ad.var_names]
# print('norm df', norm_df_pal)
norm_df_pal.index = new
pca_projections, _ = palantir.utils.run_pca(norm_df_pal, n_components=ncomps)
sc.tl.pca(ad, svd_solver='arpack')
dm_res = palantir.utils.run_diffusion_maps(pca_projections, n_components=ncomps, knn=knn)
ms_data = palantir.utils.determine_multiscale_space(dm_res) # n_eigs is determined using eigengap
print('ms data shape: determined using eigengap', ms_data.shape)
# tsne = pd.DataFrame(tsnem)#palantir.utils.run_tsne(ms_data)
tsne.index = new
# print(type(tsne))
str_true_label = pd.Series(true_label, index=norm_df_pal.index)
palantir.plot.plot_cell_clusters(tsne, str_true_label)
start_cell = 'c42' # '#C108 for M12 connected' #M8n1000d1000 start - c107 #c1001 for bifurc n2000d1000 #disconnected n1000 c108, "C1 for M10 connected" # c10 for bifurcating_m4_n2000d1000
pr_res = palantir.core.run_palantir(ms_data, early_cell=start_cell, num_waypoints=1200, knn=knn)
palantir.plot.plot_palantir_results(pr_res, tsne, n_knn=knn, n_comps=ncomps)
imp_df = palantir.utils.run_magic_imputation(norm_df_pal, dm_res)
Bcell_marker_gene_list = ['Igll1', 'Myc', 'Ldha', 'Foxo1', 'Lig4'] # , 'Slc7a5']#,'Slc7a5']#,'Sp7','Zfp629']
gene_trends = palantir.presults.compute_gene_trends(pr_res, imp_df.loc[:, Bcell_marker_gene_list])
palantir.plot.plot_gene_trends(gene_trends)
plt.show()
'''
def run_paga_func_Bcell(adata_counts1, ncomps, knn, embedding):
# print('npwhere',np.where(np.asarray(adata_counts.obs['group_id']) == '0')[0][0])
adata_counts = adata_counts1.copy()
sc.tl.pca(adata_counts, svd_solver='arpack', n_comps=ncomps)
adata_counts.uns['iroot'] = 33 # np.where(np.asarray(adata_counts.obs['group_id']) == '0')[0][0]
sc.pp.neighbors(adata_counts, n_neighbors=knn, n_pcs=ncomps) # 4
sc.tl.draw_graph(adata_counts)
sc.pl.draw_graph(adata_counts, color='group_id', legend_loc='on data') # force-directed layout
start_dfmap = time.time()
sc.tl.diffmap(adata_counts, n_comps=ncomps)
print('time taken to get diffmap given knn', time.time() - start_dfmap)
sc.pp.neighbors(adata_counts, n_neighbors=knn, use_rep='X_diffmap') # 4
sc.tl.draw_graph(adata_counts)
sc.pl.draw_graph(adata_counts, color='group_id', legend_loc='on data')
sc.tl.leiden(adata_counts, resolution=1.0)
sc.tl.paga(adata_counts, groups='leiden')
# sc.pl.paga(adata_counts, color=['louvain','group_id'])
sc.tl.dpt(adata_counts, n_dcs=ncomps)
sc.pl.paga(adata_counts, color=['leiden', 'group_id', 'dpt_pseudotime'],
title=['leiden (knn:' + str(knn) + ' ncomps:' + str(ncomps) + ')',
'group_id (ncomps:' + str(ncomps) + ')', 'pseudotime (ncomps:' + str(ncomps) + ')'])
sc.pl.draw_graph(adata_counts, color='dpt_pseudotime', legend_loc='on data')
print('dpt format', adata_counts.obs['dpt_pseudotime'])
plt.scatter(embedding[:, 0], embedding[:, 1], c=adata_counts.obs['dpt_pseudotime'].values, cmap='viridis')
plt.title('PAGA DPT')
plt.show()
def find_time_Bcell(s):
start = s.find("Ik") + len("Ik")
end = s.find("h")
return int(s[start:end])
def find_cellID_Bcell(s):
start = s.find("h") + len("h")
end = s.find("_")
return s[start:end]
Bcell = pd.read_csv(path + 'genes_count_table.txt', sep='\t')
gene_name = pd.read_csv(path + 'genes_attr_table.txt', sep='\t')
Bcell_columns = [i for i in Bcell.columns]
adata_counts = sc.AnnData(Bcell.values[:, 1:].T)
Bcell_columns.remove('tracking_id')
print(gene_name.shape, gene_name.columns)
Bcell['gene_short_name'] = gene_name['gene_short_name']
adata_counts.var_names = gene_name['gene_short_name']
adata_counts.obs['TimeCellID'] = Bcell_columns
time_list = [find_time_Bcell(s) for s in Bcell_columns]
print('time list set', set(time_list))
adata_counts.obs['TimeStamp'] = [str(tt) for tt in time_list]
ID_list = [find_cellID_Bcell(s) for s in Bcell_columns]
adata_counts.obs['group_id'] = [str(i) for i in time_list]
ID_dict = {}
color_dict = {}
for j, i in enumerate(list(set(ID_list))):
ID_dict.update({i: j})
print('timelist', list(set(time_list)))
for j, i in enumerate(list(set(time_list))):
color_dict.update({i: j})
print('shape of raw data', adata_counts.shape)
adata_counts_unfiltered = adata_counts.copy()
Bcell_marker_gene_list = ['Myc', 'Igll1', 'Slc7a5', 'Ldha', 'Foxo1', 'Lig4']
small_large_gene_list = ['Kit', 'Pcna', 'Ptprc', 'Il2ra', 'Vpreb1', 'Cd24a', 'Igll1', 'Cd79a', 'Cd79b', 'Mme',
'Spn']
list_var_names = [s for s in adata_counts_unfiltered.var_names]
matching = [s for s in list_var_names if "IgG" in s]
for gene_name in Bcell_marker_gene_list:
print('gene name', gene_name)
loc_gata = np.where(np.asarray(adata_counts_unfiltered.var_names) == gene_name)[0][0]
for gene_name in small_large_gene_list:
print('looking at small-big list')
print('gene name', gene_name)
loc_gata = np.where(np.asarray(adata_counts_unfiltered.var_names) == gene_name)[0][0]
# diff_list = [i for i in diff_list if i in list_var_names] #based on paper STable1 https://journals.plos.org/plosbiology/article?id=10.1371/journal.pbio.2006506#pbio.2006506.s007
# adata_counts = adata_counts[:,diff_list] #if using these, then set do-HVG to False
print('adata counts difflisted', adata_counts.shape)
adata_counts = run_zheng_Bcell(adata_counts, n_top_genes=5000, min_counts=30,
do_HVG=True) # 5000 for better ordering
print('adata counts shape', adata_counts.shape)
# sc.pp.recipe_zheng17(adata_counts)
# (ncomp=50, knn=20 gives nice results. use 10PCs for visualizing)
marker_genes = {"small": ['Rag2', 'Rag1', 'Pcna', 'Myc', 'Ccnd2', 'Cdkn1a', 'Smad4', 'Smad3', 'Cdkn2a'],
# B220 = Ptprc, PCNA negative for non cycling
"large": ['Ighm', 'Kit', 'Ptprc', 'Cd19', 'Il2ra', 'Vpreb1', 'Cd24a', 'Igll1', 'Cd79a', 'Cd79b'],
"Pre-B2": ['Mme', 'Spn']} # 'Cd19','Cxcl13',,'Kit'
print('make the v0 matrix plot')
mplot_adata = adata_counts_unfiltered.copy() # mplot_adata is for heatmaps so that we keep all genes
mplot_adata = run_zheng_Bcell(mplot_adata, n_top_genes=25000, min_counts=1, do_HVG=False)
# mplot_adata.X[mplot_adata.X>10] =10
# mplot_adata.X[mplot_adata.X< -1] = -1
# sc.pl.matrixplot(mplot_adata, marker_genes, groupby='TimeStamp', dendrogram=True)
sc.tl.pca(adata_counts, svd_solver='arpack', n_comps=200) # ncomps
# df_bcell_pc = pd.DataFrame(adata_counts.obsm['X_pca'])
# print('df_bcell_pc.shape',df_bcell_pc.shape)
# df_bcell_pc['time'] = [str(i) for i in time_list]
# df_bcell_pc.to_csv('/home/shobi/Trajectory/Datasets/Bcell/Bcell_200PCs.csv')
# sc.pl.pca_variance_ratio(adata_counts, log=True)
jet = cm.get_cmap('viridis', len(set(time_list)))
cmap_ = jet(range(len(set(time_list))))
jet2 = cm.get_cmap('jet', len(set(ID_list)))
cmap2_ = jet2(range(len(set(ID_list))))
# color_dict = {"0": [0], "2": [1], "6": [2], "12": [3], "18": [4], "24": [5]}
# sc.pl.heatmap(mplot_adata, var_names = small_large_gene_list,groupby = 'TimeStamp', dendrogram = True)
embedding = umap.UMAP(random_state=42, n_neighbors=15, init='random').fit_transform(
adata_counts.obsm['X_pca'][:, 0:5])
df_umap = pd.DataFrame(embedding)
# df_umap.to_csv('/home/shobi/Trajectory/Datasets/Bcell/Bcell_umap.csv')
f, (ax1, ax2, ax3, ax4) = plt.subplots(1, 4, sharey=True)
for i in list(set(time_list)):
loc = np.where(np.asarray(time_list) == i)[0]
ax4.scatter(embedding[loc, 0], embedding[loc, 1], c=cmap_[color_dict[i]], alpha=1, label=str(i))
if i == 0:
for xx in range(len(loc)):
poss = loc[xx]
ax4.text(embedding[poss, 0], embedding[poss, 1], 'c' + str(xx))
ax4.legend()
ax1.scatter(embedding[:, 0], embedding[:, 1], c=mplot_adata[:, 'Pcna'].X.flatten(), alpha=1)
ax1.set_title('Pcna, cycling')
ax2.scatter(embedding[:, 0], embedding[:, 1], c=mplot_adata[:, 'Vpreb1'].X.flatten(), alpha=1)
ax2.set_title('Vpreb1')
ax3.scatter(embedding[:, 0], embedding[:, 1], c=mplot_adata[:, 'Cd24a'].X.flatten(), alpha=1)
ax3.set_title('Cd24a')
# ax2.text(embedding[i, 0], embedding[i, 1], str(i))
'''
for i, j in enumerate(list(set(ID_list))):
loc = np.where(np.asarray(ID_list) == j)
if 'r'in j: ax2.scatter(embedding[loc, 0], embedding[loc, 1], c=cmap2_[i], alpha=1, label=str(j), edgecolors = 'black' )
else: ax2.scatter(embedding[loc, 0], embedding[loc, 1], c=cmap2_[i], alpha=1, label=str(j))
'''
# plt.show()
true_label = time_list
# run_paga_func_Bcell(adata_counts, ncomps, knn, embedding)
#run_palantir_func_Bcell(adata_counts, ncomps, knn, embedding, true_label)
print('input has shape', adata_counts.obsm['X_pca'].shape)
input_via = adata_counts.obsm['X_pca'][:, 0:ncomps]
df_input = pd.DataFrame(adata_counts.obsm['X_pca'][:, 0:200])
df_annot = pd.DataFrame(['t' + str(i) for i in true_label])
# df_input.to_csv('/home/shobi/Trajectory/Datasets/Bcell/Bcell_200PC_5000HVG.csv')
# df_annot.to_csv('/home/shobi/Trajectory/Datasets/Bcell/Bcell_annots.csv')
root_user = [42]
v0 = VIA(input_via, true_label, jac_std_global=0.15, dist_std_local=1, knn=knn,
too_big_factor=0.3, dataset='bcell',
cluster_graph_pruning_std=cluster_graph_pruning_std,
root_user=root_user, preserve_disconnected=True, random_seed=random_seed,
do_impute_bool=True) # *.4#root_user = 34
v0.run_VIA()
super_labels = v0.labels
tsi_list = get_loc_terminal_states(via0=v0, X_input=adata_counts.obsm['X_pca'][:, 0:ncomps])
v1 = VIA(adata_counts.obsm['X_pca'][:, 0:ncomps], true_label, jac_std_global=0.15, dist_std_local=1, knn=knn,
too_big_factor=0.05, is_coarse=False,
cluster_graph_pruning_std=cluster_graph_pruning_std,
super_cluster_labels=super_labels, super_node_degree_list=v0.node_degree_list,
super_terminal_cells=tsi_list, root_user=root_user, full_neighbor_array=v0.full_neighbor_array,
full_distance_array=v0.full_distance_array, ig_full_graph=v0.ig_full_graph,
csr_array_locally_pruned=v0.csr_array_locally_pruned,
x_lazy=0.99, alpha_teleport=0.99, preserve_disconnected=True, dataset='bcell',
super_terminal_clusters=v0.terminal_clusters, random_seed=random_seed)
v1.run_VIA()
labels = v1.labels
super_edges = v0.edgelist
# plot gene expression vs. pseudotime
Bcell_marker_gene_list = ['Igll1', 'Myc', 'Slc7a5', 'Ldha', 'Foxo1', 'Lig4', 'Sp7', 'Zfp629'] # irf4 down-up
df_ = pd.DataFrame(adata_counts_unfiltered.X) # no normalization, or scaling of the gene count values
df_.columns = [i for i in adata_counts_unfiltered.var_names]
df_Bcell_marker = df_[Bcell_marker_gene_list]
print(df_Bcell_marker.shape, 'df_Bcell_marker.shape')
df_Bcell_marker.to_csv('/home/shobi/Trajectory/Datasets/Bcell/Bcell_markergenes.csv')
# v0 is run with "do_impute" = true, hence it stores the full graph (in subsequent iterations we dont recompute and store the full unpruned knn graph)
df_magic = v0.do_impute(df_, magic_steps=3, gene_list=Bcell_marker_gene_list)
for gene_name in Bcell_marker_gene_list:
# loc_gata = np.where(np.asarray(adata_counts_unfiltered.var_names) == gene_name)[0][0]
subset_ = df_magic[gene_name].values
v1.get_gene_expression(subset_, title_gene=gene_name)
# magic_ad = adata_counts_unfiltered.X[:, loc_gata]
# v1.get_gene_expression(magic_ad, gene_name)
n_downsample = 100
if len(labels) > n_downsample:
# idx = np.random.randint(len(labels), size=900)
np.random.seed(2357)
# idx = np.random.choice(a=np.arange(0, len(labels)), size=len(labels), replace=False, p=None)
idx = np.arange(0, len(labels))
super_labels = np.asarray(super_labels)[idx]
labels = list(np.asarray(labels)[idx])
true_label = list((np.asarray(true_label)[idx]))
sc_pt_markov = list(np.asarray(v1.single_cell_pt_markov[idx]))
# embedding = TSNE().fit_transform(adata_counts.obsm['X_pca'][idx, :])
graph_embedding = v0.knngraph_visual(input_via[idx, 0:5], knn_umap=10, downsampled=True)
embedding_hnsw = v0.run_umap_hnsw(input_via[idx, 0:5], graph_embedding)
# embedding = embedding_hnsw
# loc0 = np.where(np.asarray(true_label)==0)[0]
# for item in loc0:
# print(item, 'at', embedding[item,:])
embedding = embedding[idx, :]
print('tsne downsampled size', embedding.shape)
else:
# embedding = TSNE().fit_transform(adata_counts.obsm['X_pca'][:,0:5]) # (adata_counts.obsm['X_pca'])
print('tsne input size', adata_counts.obsm['X_pca'].shape)
# embedding = umap.UMAP().fit_transform(adata_counts.obsm['X_pca'])
idx = np.arange(0, len(labels)) # np.random.randint(len(labels), size=len(labels))
sc_pt_markov = v1.single_cell_pt_markov
# embedding = umap.UMAP(random_state=42, n_neighbors=15, init=umap_init).fit_transform( adata_counts.obsm['X_pca'][:, 0:5])
super_clus_ds_PCA_loc = sc_loc_ofsuperCluster_PCAspace(v0, v1, idx)
draw_trajectory_gams(embedding, super_clus_ds_PCA_loc, labels, super_labels, super_edges,
v1.x_lazy, v1.alpha_teleport, sc_pt_markov, true_label, knn=v0.knn,
final_super_terminal=v1.revised_super_terminal_clusters,
sub_terminal_clusters=v1.terminal_clusters,
title_str='Markov Hitting Times (Gams)', ncomp=ncomps)
plt.show()
knn_hnsw = make_knn_embeddedspace(embedding)
draw_sc_evolution_trajectory_dijkstra(v1, embedding, knn_hnsw, v0.full_graph_shortpath, idx,
adata_counts.obsm['X_pca'][:, 0:ncomps])
plt.show()
def plot_EB():
# genes along lineage cluster path
df_groupby_p1 = pd.read_csv(
'/home/shobi/Trajectory/Datasets/EB_Phate/df_groupbyParc1_knn20_pc100_seed20_allgenes.csv')
path_clusters = [43, 38, 42, 56, 7,
3] # NC[43,41,16,2,3,6]#SMP[43,41,16,14,11,18]#C[43,41,16,14,12,15]#NS3[43,38,42,56,7,3]
target = "NS 3" # 'NC 6' #'SMP 18'#' Cardiac 15'
marker_genes_dict = {'Hermang': ['TAL1', 'HOXB4', 'SOX17', 'CD34', 'PECAM1'],
'NP': ['NES', 'MAP2'], 'NS': ['LHX2', 'NR2F1', 'DMRT3', 'LMX1A',
# 'KLF7', 'ISL1', 'DLX1', 'ONECUT1', 'ONECUT2', 'OLIG1','PAX6', 'ZBTB16','NPAS1', 'SOX1'
'NKX2-8', 'EN2'], 'NC': ['PAX3', 'FOXD3', 'SOX9', 'SOX10'],
'PostEn': ['CDX2', 'ASCL2', 'KLF5', 'NKX2-1'],
'EN': ['ARID3A', 'GATA3', 'SATB1', 'SOX15', 'SOX17', 'FOXA2'],
'Pre-NE': ['POU5F1', 'OTX2'], 'SMP': ['TBX18', 'SIX2', 'TBX15', 'PDGFRA'],
'Cardiac': ['TNNT2', 'HAND1', 'F3', 'CD82', 'LIFR'],
'EpiCard': ['WT1', 'TBX5', 'HOXD9', 'MYC', 'LOX'],
'PS/ME': ['T', 'EOMES', 'MIXL1', 'CER1', 'SATB1'],
'NE': ['GBX2', 'GLI3', 'LHX2', 'LHX5', 'SIX3', 'SIX6'],
# 'OLIG3','HOXD1', 'ZIC2', 'ZIC5','HOXA2','HOXB2'
'ESC': ['NANOG', 'POU5F1'], 'Pre-NE': ['POU5F1', 'OTX2'], 'Lat-ME': ['TBX5', 'HOXD9', 'MYC']}
relevant_genes = []
relevant_keys = ['ESC', 'Pre-NE', 'NE', 'NP',
'NS'] # NC['ESC', 'Pre-NE', 'NE', 'NC']#SMP['ESC','PS/ME','Lat-ME','SMP']#NS['ESC', 'Pre-NE', 'NE', 'NP', 'NS']
dict_subset = {key: value for key, value in marker_genes_dict.items() if key in relevant_keys}
print('dict subset', dict_subset)
for key in relevant_keys:
relevant_genes.append(marker_genes_dict[key])
relevant_genes = [item for sublist in relevant_genes for item in sublist]
print(relevant_genes)
df_groupby_p1 = df_groupby_p1.set_index('parc1')
df_groupby_p1 = df_groupby_p1.loc[path_clusters]
df_groupby_p1 = df_groupby_p1[relevant_genes]
df_groupby_p1 = df_groupby_p1.transpose()
# print( df_groupby_p1.head)
# print(df_groupby_p1)
ax = sns.heatmap(df_groupby_p1, vmin=-1, vmax=1, yticklabels=True)
ax.set_title('target ' + str(target))
plt.show()
# df_groupby_p1 = pd.concat([df_groupby_p1,df_groupby_p1])
# adata = sc.AnnData(df_groupby_p1)
# adata.var_names = df_groupby_p1.columns
# print(adata.var_names)
# adata.obs['parc1'] = ['43','38','42','56','7','3','43','38','42','56','7','3']
# print(adata.obs['parc1'])
# sc.pl.matrixplot(adata, dict_subset, groupby='parc1', vmax=1, vmin=-1, dendrogram=False)
def main_EB_clean(ncomps=30, knn=20, v0_random_seed=24, cluster_graph_pruning_std=.15,
foldername='/home/shobi/Trajectory/Datasets/EB_Phate/'):
marker_genes_dict = {'Hermang': ['TAL1', 'HOXB4', 'SOX17', 'CD34', 'PECAM1'],
'NP': ['NES', 'MAP2'],
'NS': ['KLF7', 'ISL1', 'DLX1', 'ONECUT1', 'ONECUT2', 'OLIG1', 'NPAS1', 'LHX2', 'NR2F1',
'NPAS1', 'DMRT3', 'LMX1A',
'NKX2-8', 'EN2', 'SOX1', 'PAX6', 'ZBTB16'], 'NC': ['PAX3', 'FOXD3', 'SOX9', 'SOX10'],
'PostEn': ['CDX2', 'ASCL2', 'KLF5', 'NKX2-1'],
'EN': ['ARID3A', 'GATA3', 'SATB1', 'SOX15', 'SOX17', 'FOXA2'], 'Pre-NE': ['POU5F1', 'OTX2'],
'SMP': ['TBX18', 'SIX2', 'TBX15', 'PDGFRA'],
'Cardiac': ['TNNT2', 'HAND1', 'F3', 'CD82', 'LIFR'],
'EpiCard': ['WT1', 'TBX5', 'HOXD9', 'MYC', 'LOX'],
'PS/ME': ['T', 'EOMES', 'MIXL1', 'CER1', 'SATB1'],
'NE': ['GBX2', 'OLIG3', 'HOXD1', 'ZIC2', 'ZIC5', 'GLI3', 'LHX2', 'LHX5', 'SIX3', 'SIX6',
'HOXA2', 'HOXB2'], 'ESC': ['NANOG', 'POU5F1', 'OTX2'], 'Pre-NE': ['POU5F1', 'OTX2']}
marker_genes_list = []
for key in marker_genes_dict:
for item in marker_genes_dict[key]:
marker_genes_list.append(item)
v0_too_big = 0.3
v1_too_big = 0.05
n_var_genes = 'no filtering for HVG' # 15000
print('ncomps, knn, n_var_genes, v0big, p1big, randomseed, time', ncomps, knn, n_var_genes, v0_too_big, v1_too_big,
v0_random_seed, time.ctime())
# TI_pcs = pd.read_csv(foldername+'PCA_TI_200_final.csv')
# TI_pcs is PCA run on data that has been: filtered (remove cells with too large or small library count - can directly use all cells in EBdata.mat), library normed, sqrt transform, scaled to unit variance/zero mean
# TI_pcs = TI_pcs.values[:, 1:]
from scipy.io import loadmat
annots = loadmat(
foldername + 'EBdata.mat') # has been filtered but not yet normed (by library size) nor other subsequent pre-processing steps
# print('annots', annots)
data = annots['data'].toarray() # (16825, 17580) (cells and genes have been filtered)
# print('data min max', np.max(data), np.min(data), data[1, 0:20], data[5, 250:270], data[1000, 15000:15050])
loc_ = np.where((data < 1) & (data > 0))
temp = data[(data < 1) & (data > 0)]
# print('temp non int', temp)
time_labels = annots['cells'].flatten().tolist()
# df_timelabels = pd.DataFrame(time_labels, columns=['true_time_labels'])
# df_timelabels.to_csv(foldername+'EB_true_time_labels.csv')
gene_names_raw = annots['EBgenes_name'] # (17580, 1) genes
adata = sc.AnnData(data)
gene_names = []
for i in gene_names_raw:
gene_names.append(i[0][0])
adata.var_names = gene_names
adata.obs['time'] = ['Day' + str(i) for i in time_labels]
adata.X = sc.pp.normalize_total(adata, inplace=False)['X'] # normalize by library after filtering
adata.X = np.sqrt(adata.X) # follow Phate paper which doesnt take log1() but instead does sqrt() transformation
Y_phate = pd.read_csv(foldername + 'EB_phate_embedding.csv')
Y_phate = Y_phate.values
# phate_operator = phate.PHATE(n_jobs=-1)
# Y_phate = phate_operator.fit_transform(adata.X) # before scaling. as done in PHATE
scale = False # scaling mostly improves the cluster-graph heatmap of genes vs clusters. doesnt sway VIA performance
if scale == True: # we scale before VIA. scaling not needed for PHATE
print('pp scaled')
adata.X = (adata.X - np.mean(adata.X, axis=0)) / np.std(adata.X, axis=0)
print('data max min after SCALED', np.max(adata.X), np.min(adata.X))
else:
print('not pp scaled')
sc.tl.pca(adata, svd_solver='arpack', n_comps=200, random_state=0)
# adata.obsm['X_pca'] = TI_pcs
input_data = adata.obsm['X_pca'][:, 0:ncomps]
print('do v0')
root_user = [1]
v0 = VIA(input_data, time_labels, jac_std_global=0.15, dist_std_local=1, knn=knn,
cluster_graph_pruning_std=cluster_graph_pruning_std,
too_big_factor=v0_too_big, root_user=root_user, dataset='EB', random_seed=v0_random_seed,
do_impute_bool=True, is_coarse=True, preserve_disconnected=True) # *.4 root=1,
v0.run_VIA()
tsi_list = get_loc_terminal_states(v0, input_data)
v1 = VIA(input_data, time_labels, jac_std_global=0.15, dist_std_local=1, knn=knn,
cluster_graph_pruning_std=cluster_graph_pruning_std,
too_big_factor=v1_too_big, super_cluster_labels=v0.labels, super_node_degree_list=v0.node_degree_list,
super_terminal_cells=tsi_list, root_user=root_user, is_coarse=False,
full_neighbor_array=v0.full_neighbor_array,
full_distance_array=v0.full_distance_array, ig_full_graph=v0.ig_full_graph,
csr_array_locally_pruned=v0.csr_array_locally_pruned,
x_lazy=0.95, alpha_teleport=0.99, preserve_disconnected=True, dataset='EB',
super_terminal_clusters=v0.terminal_clusters, random_seed=21)
v1.run_VIA()
f, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
ax1.scatter(Y_phate[:, 0], Y_phate[:, 1], c=time_labels, s=5, cmap='viridis', alpha=0.5)
ax2.scatter(Y_phate[:, 0], Y_phate[:, 1], c=v1.single_cell_pt_markov, s=5, cmap='viridis', alpha=0.5)
ax1.set_title('Embryoid: Annotated Days')
ax2.set_title('Embryoid VIA Pseudotime (Randomseed' + str(v0_random_seed) + ')')
plt.show()
super_clus_ds_PCA_loc = sc_loc_ofsuperCluster_PCAspace(v0, v1, np.arange(0, len(v1.labels)))
draw_trajectory_gams(Y_phate, super_clus_ds_PCA_loc, v1.labels, v0.labels, v0.edgelist_maxout,
v1.x_lazy, v1.alpha_teleport, v1.single_cell_pt_markov, time_labels, knn=v0.knn,
final_super_terminal=v1.revised_super_terminal_clusters,
sub_terminal_clusters=v1.terminal_clusters,
title_str='Markov Hitting Times and Paths', ncomp=ncomps)
knn_hnsw = make_knn_embeddedspace(Y_phate)
draw_sc_evolution_trajectory_dijkstra(v1, Y_phate, knn_hnsw, v0.full_graph_shortpath,
idx=np.arange(0, input_data.shape[0]))
plt.show()
adata.obs['via0'] = [str(i) for i in v0.labels]
adata.obs['parc1'] = [str(i) for i in v1.labels]
adata.obs['terminal_state'] = ['True' if i in v1.terminal_clusters else 'False' for i in v1.labels]
adata.X = (adata.X - np.mean(adata.X, axis=0)) / np.std(adata.X,
axis=0) # to improve scale of the matrix plot we will scale
sc.pl.matrixplot(adata, marker_genes_dict, groupby='parc1', vmax=1, vmin=-1, dendrogram=True, figsize=[20, 10])
def main_EB(ncomps=30, knn=20, v0_random_seed=24):
marker_genes_dict = {'Hermang': ['TAL1', 'HOXB4', 'SOX17', 'CD34', 'PECAM1'],
'NP': ['NES', 'MAP2'],
'NS': ['KLF7', 'ISL1', 'DLX1', 'ONECUT1', 'ONECUT2', 'OLIG1', 'NPAS1', 'LHX2', 'NR2F1',
'NPAS1', 'DMRT3', 'LMX1A',
'NKX2-8', 'EN2', 'SOX1', 'PAX6', 'ZBTB16'], 'NC': ['PAX3', 'FOXD3', 'SOX9', 'SOX10'],
'PostEn': ['CDX2', 'ASCL2', 'KLF5', 'NKX2-1'],
'EN': ['ARID3A', 'GATA3', 'SATB1', 'SOX15', 'SOX17', 'FOXA2'], 'Pre-NE': ['POU5F1', 'OTX2'],
'SMP': ['TBX18', 'SIX2', 'TBX15', 'PDGFRA'],
'Cardiac': ['TNNT2', 'HAND1', 'F3', 'CD82', 'LIFR'],
'EpiCard': ['WT1', 'TBX5', 'HOXD9', 'MYC', 'LOX'],
'PS/ME': ['T', 'EOMES', 'MIXL1', 'CER1', 'SATB1'],
'NE': ['GBX2', 'OLIG3', 'HOXD1', 'ZIC2', 'ZIC5', 'GLI3', 'LHX2', 'LHX5', 'SIX3', 'SIX6',
'HOXA2', 'HOXB2'], 'ESC': ['NANOG', 'POU5F1', 'OTX2'], 'Pre-NE': ['POU5F1', 'OTX2']}
marker_genes_list = []
for key in marker_genes_dict:
for item in marker_genes_dict[key]:
marker_genes_list.append(item)
v0_too_big = 0.3
v1_too_big = 0.05
root_user = 1
n_var_genes = 'no filtering for HVG' # 15000
print('ncomps, knn, n_var_genes, v0big, p1big, randomseed, time', ncomps, knn, n_var_genes, v0_too_big, v1_too_big,
v0_random_seed, time.ctime())
# data = data.drop(['Unnamed: 0'], axis=1)
TI_pcs = pd.read_csv(
'/home/shobi/Trajectory/Datasets/EB_Phate/PCA_TI_200_final.csv') # filtered, library normed, sqrt transform, scaled to unit variance/zero mean
TI_pcs = TI_pcs.values[:, 1:]
umap_pcs = pd.read_csv('/home/shobi/Trajectory/Datasets/EB_Phate/PCA_umap_200_TuesAM.csv')
umap_pcs = umap_pcs.values[:, 1:]
# print('TI PC shape', TI_pcs.shape)
from scipy.io import loadmat
annots = loadmat(
'/home/shobi/Trajectory/Datasets/EB_Phate/EBdata.mat') # has been filtered but not yet normed (by library s
data = annots['data'].toarray() # (16825, 17580) (cells and genes have been filtered)
# print('data min max', np.max(data), np.min(data), data[1, 0:20], data[5, 250:270], data[1000, 15000:15050])
# loc_ = np.where((data < 1) & (data > 0))
temp = data[(data < 1) & (data > 0)]
# print('temp non int', temp)
time_labels = annots['cells'].flatten().tolist()
import scprep
dict_labels = {'Day 00-03': 0, 'Day 06-09': 2, 'Day 12-15': 4, 'Day 18-21': 6, 'Day 24-27': 8}
# print(annots.keys()) # (['__header__', '__version__', '__globals__', 'EBgenes_name', 'cells', 'data'])
gene_names_raw = annots['EBgenes_name'] # (17580, 1) genes
print(data.shape)
adata = sc.AnnData(data)
# time_labels = pd.read_csv('/home/shobi/Trajectory/Datasets/EB_Phate/labels_1.csv')
# time_labels = time_labels.drop(['Unnamed: 0'], axis=1)
# time_labels = time_labels['time']
# adata.obs['time'] = [str(i) for i in time_labels]
gene_names = []
for i in gene_names_raw:
gene_names.append(i[0][0])
adata.var_names = gene_names
adata.obs['time'] = [str(i) for i in time_labels]
# filter_result = sc.pp.filter_genes_dispersion(adata.X, flavor='cell_ranger', n_top_genes=5000, log=False) #dont take log
adata_umap = adata.copy()
# adata = adata[:, filter_result.gene_subset] # subset the genes
# sc.pp.normalize_per_cell(adata, min_counts=2) # renormalize after filtering
print('data max min BEFORE NORM', np.max(adata.X), np.min(adata.X), adata.X[1, 0:20])
rowsums = adata.X.sum(axis=1)
# adata.X = adata.X / rowsums[:, np.newaxis]
# adata.X = sc.pp.normalize_total(adata, exclude_highly_expressed=True, max_fraction=0.05, inplace=False)['X'] #normalize after filtering
adata.X = sc.pp.normalize_total(adata, inplace=False)['X'] # normalize after filtering
print('data max min after NORM', np.max(adata.X), np.min(adata.X), adata.X[1, 0:20])
adata.X = np.sqrt(adata.X) # follow Phate paper which doesnt take log1() but instead does sqrt() transformation
adata_umap.X = np.sqrt(adata_umap.X)
print('data max min after SQRT', np.max(adata.X), np.min(adata.X), adata.X[1, 0:20])
# sc.pp.log1p(adata) # log transform: adata.X = log(adata.X + 1)
'''
phate_operator = phate.PHATE(n_jobs=-1)
Y_phate = phate_operator.fit_transform(adata.X)
scprep.plot.scatter2d(Y_phate, c=time_labels, figsize=(12, 8), cmap="Spectral",
ticks=False, label_prefix="PHATE")
plt.show()
'''
Y_phate = pd.read_csv('/home/shobi/Trajectory/Datasets/EB_Phate/EB_phate_embedding.csv')
Y_phate = Y_phate.values
scale = True
if scale == True:
print('pp scaled')
# sc.pp.scale(adata)
adata.X = (adata.X - np.mean(adata.X, axis=0)) / np.std(adata.X, axis=0)
sc.pp.scale(adata_umap)
print('data max min after SCALED', np.max(adata.X), np.min(adata.X))
else:
print('not pp scaled')
print('sqrt transformed')
# sc.pp.recipe_zheng17(adata, n_top_genes=15000) #expects non-log data
# g = sc.tl.rank_genes_groups(adata, groupby='time', use_raw=True, n_genes=10)#method='t-test_overestim_var'
# sc.pl.rank_genes_groups_heatmap(adata, n_genes=3, standard_scale='var')
'''
pcs = pd.read_csv('/home/shobi/Trajectory/Datasets/EB_Phate/umap_200_matlab.csv')
pcs = pcs.drop(['Unnamed: 0'], axis=1)
pcs = pcs.values
print(time.ctime())
ncomps = 50
input_data =pcs[:, 0:ncomps]
'''
print('v0_toobig, p1_toobig, v0randomseed', v0_too_big, v1_too_big, v0_random_seed)
print('do pca')
# sc.tl.pca(adata, svd_solver='arpack', n_comps=200, random_state = 0)
# sc.tl.pca(adata_umap, svd_solver='arpack', n_comps=200)
# df_pca_TI_200 = pd.DataFrame(adata.obsm['X_pca'])
# df_pca_TI_200.to_csv('/home/shobi/Trajectory/Datasets/EB_Phate/PCA_TI_200_TuesAM.csv')
# df_pca_umap_200 = pd.DataFrame(adata_umap.obsm['X_pca'])
# df_pca_umap_200.to_csv('/home/shobi/Trajectory/Datasets/EB_Phate/PCA_umap_200_TuesAM.csv')
adata.obsm['X_pca'] = TI_pcs
adata_umap.obsm['X_pca'] = umap_pcs
input_data = adata.obsm['X_pca'][:, 0:ncomps]
'''
#plot genes vs clusters for each trajectory
df_plot_gene = pd.DataFrame(adata.X, columns=[i for i in adata.var_names])
df_plot_gene = df_plot_gene[marker_genes_list]
previous_p1_labels = pd.read_csv('/home/shobi/Trajectory/Datasets/EB_Phate/df_labels_knn20_pc100_seed20.csv')
title_str = 'Terminal state 27 (Cardiac)'
gene_groups = ['ESC', 'PS/ME','EN','Cardiac']
clusters = [43,41,16,14,12,27]
'''
u_knn = 15
repulsion_strength = 1
n_pcs = 10
print('knn and repel', u_knn, repulsion_strength)
U = pd.read_csv('/home/shobi/Trajectory/Datasets/EB_Phate/umap_pc10_knn15.csv')
U = U.values[:, 1:]
U = Y_phate
# U = umap.UMAP(n_neighbors=u_knn, random_state=1, repulsion_strength=repulsion_strength).fit_transform(adata_umap.obsm['X_pca'][:, 0:n_pcs])
#print('start palantir', time.ctime())
# run_palantir_EB(adata, knn=knn, ncomps=ncomps, tsne=U, str_true_label=[str(i) for i in time_labels])
#print('end palantir', time.ctime())
# df_U = pd.DataFrame(U)
# df_U.to_csv('/home/shobi/Trajectory/Datasets/EB_Phate/umap_pc10_knn15.csv')
print('do v0')
v0 = VIA(input_data, time_labels, jac_std_global=0.15, dist_std_local=1, knn=knn,
too_big_factor=v0_too_big, root_user=root_user, dataset='EB', random_seed=v0_random_seed,
do_impute_bool=True, is_coarse=True, preserve_disconnected=True) # *.4 root=1,
v0.run_VIA()
super_labels = v0.labels
v0_labels_df = pd.DataFrame(super_labels, columns=['v0_labels'])
v0_labels_df.to_csv('/home/shobi/Trajectory/Datasets/EB_Phate/p0_labels.csv')
adata.obs['via0'] = [str(i) for i in super_labels]
'''
df_temp1 = pd.DataFrame(adata.X, columns = [i for i in adata.var_names])
df_temp1 = df_temp1[marker_genes_list]
df_temp1['via0']=[str(i) for i in super_labels]
df_temp1 = df_temp1.groupby('via0').mean()
'''
# sns.clustermap(df_temp1, vmin=-1, vmax=1,xticklabels=True, yticklabels=True, row_cluster= False, col_cluster=True)
# sc.pl.matrixplot(adata, marker_genes_dict, groupby='via0', vmax=1, vmin =-1, dendrogram=True)
'''
sc.tl.rank_genes_groups(adata, groupby='via0', use_raw=True,
method='t-test_overestim_var', n_genes=5) # compute differential expression
sc.pl.rank_genes_groups_heatmap(adata, groupby='via0',vmin=-3, vmax=3) # plot the result
'''
p = hnswlib.Index(space='l2', dim=input_data.shape[1])
p.init_index(max_elements=input_data.shape[0], ef_construction=100, M=16)
p.add_items(input_data)
p.set_ef(30)
tsi_list = get_loc_terminal_states(v0, input_data)
v1 = VIA(input_data, time_labels, jac_std_global=0.15, dist_std_local=1, knn=knn,
too_big_factor=v1_too_big, is_coarse=False,
super_cluster_labels=super_labels, super_node_degree_list=v0.node_degree_list,
super_terminal_cells=tsi_list, root_user=root_user, ig_full_graph=v0.ig_full_graph,
csr_array_locally_pruned=v0.csr_array_locally_pruned, full_distance_array=v0.full_distance_array,
full_neighbor_array=v0.full_neighbor_array,
x_lazy=0.95, alpha_teleport=0.99, preserve_disconnected=True, dataset='EB',
super_terminal_clusters=v0.terminal_clusters, random_seed=v0_random_seed)
v1.run_VIA()
# adata.obs['parc1'] = [str(i) for i in v1.labels]
# sc.pl.matrixplot(adata, marker_genes, groupby='parc1', dendrogram=True)
labels = v1.labels
'''
df_labels = pd.DataFrame({'v0_labels':v0.labels,'p1_labels':v1.labels})
df_labels['sub_TS'] = [1 if i in v1.terminal_clusters else 0 for i in v1.labels]
df_labels['super_TS'] = [1 if i in v0.terminal_clusters else 0 for i in v0.labels]
df_labels.to_csv('/home/shobi/Trajectory/Datasets/EB_Phate/df_labels_knn20_pc100_seed20.csv')
df_temp2 = pd.DataFrame(adata.X, columns=[i for i in adata.var_names])
df_temp2 = df_temp2[marker_genes_list]
df_temp2['parc1'] = [str(i) for i in labels]
df_temp2 = df_temp2.groupby('parc1').mean()
df_temp2.to_csv('/home/shobi/Trajectory/Datasets/EB_Phate/df_groupbyParc1_knn20_pc100_seed20.csv')
'''
adata.obs['parc1'] = [str(i) for i in labels]
# df_ts = pd.DataFrame(adata.X, columns = [i for i in adata.var_names])
# df_ts = df_ts[marker_genes_list]
# df_ts['parc1'] = [str(i) for i in labels]
adata.obs['terminal_state'] = ['True' if i in v1.terminal_clusters else 'False' for i in labels]
# df_ts = df_ts[df_ts['terminal_state']=='True']
adata_TS = adata[adata.obs['terminal_state'] == 'True']
# sns.clustermap(df_temp1, vmin=-1, vmax=1, xticklabels=True, yticklabels=True, row_cluster=False, col_cluster=True)
sc.pl.matrixplot(adata, marker_genes_dict, groupby='parc1', vmax=1, vmin=-1, dendrogram=True)
# sc.pl.matrixplot(adata_TS, marker_genes_dict, groupby='parc1', vmax=1, vmin=-1, dendrogram=True)
# U = umap.UMAP(n_neighbors=10, random_state=0, repulsion_strength=repulsion_strength).fit_transform(input_data[:, 0:n_pcs])
f, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
ax1.scatter(U[:, 0], U[:, 1], c=time_labels, s=5, cmap='viridis', alpha=0.5)
ax2.scatter(U[:, 0], U[:, 1], c=v1.single_cell_pt_markov, s=5, cmap='viridis', alpha=0.5)
plt.title('repulsion and knn and pcs ' + str(repulsion_strength) + ' ' + str(u_knn) + ' ' + str(
n_pcs) + ' randseed' + str(v0_random_seed))
plt.show()
super_clus_ds_PCA_loc = sc_loc_ofsuperCluster_PCAspace(v0, v1, np.arange(0, len(labels)))
draw_trajectory_gams(U, super_clus_ds_PCA_loc, labels, super_labels, v0.edgelist_maxout,
v1.x_lazy, v1.alpha_teleport, v1.single_cell_pt_markov, time_labels, knn=v0.knn,
final_super_terminal=v1.revised_super_terminal_clusters,
sub_terminal_clusters=v1.terminal_clusters,
title_str='Markov Hitting Times (Gams)', ncomp=ncomps)
plt.show()
knn_hnsw = make_knn_embeddedspace(U)
draw_sc_evolution_trajectory_dijkstra(v1, U, knn_hnsw, v0.full_graph_shortpath,
idx=np.arange(0, input_data.shape[0]))
plt.show()
def main_mESC(knn=30, v0_random_seed=42, cluster_graph_pruning_std=.0, run_palantir_func=False):
import random
rand_str = 950 # random.randint(1, 999)
print('rand string', rand_str)
print('knn', knn)
data_random_seed = 20
root = '0.0'
type_germ = 'Meso'
normalize = True
data = pd.read_csv('/home/shobi/Trajectory/Datasets/mESC/mESC_' + type_germ + '_markers.csv')
print('counts', data.groupby('day').count())
# print(data.head())
print(data.shape)
n_sub = 7000
print('type,', type_germ, 'nelements', n_sub, 'v0 randseed', v0_random_seed)
title_string = 'randstr:' + str(rand_str) + ' Knn' + str(knn) + ' nelements:' + str(n_sub) + ' ' + 'meso'
# data = data[data['day']!=0]
v0_too_big = 0.3
p1_too_big = 0.15 # .15
print('v0 and p1 too big', v0_too_big, p1_too_big)
data_sub = data[data['day'] == 0.0]
np.random.seed(data_random_seed)
idx_sub = np.random.choice(a=np.arange(0, data_sub.shape[0]), size=min(n_sub, data_sub.shape[0]), replace=False,
p=None) # len(true_label)
data_sub = data_sub.values[idx_sub, :]
data_sub = | pd.DataFrame(data_sub, columns=data.columns) | pandas.DataFrame |
# authors: <NAME>, <NAME>, <NAME>
# date: 2020-01-25
'''The script loads previously trained model and performs validation on test data. It then
stores sample excerpt in data folder
Usage: test_model.py [--TEST_FILE_PATH=<TEST_FILE_PATH>] [--MODEL_DUMP_PATH=<MODEL_DUMP_PATH>] [--TEST_SIZE=<TEST_SIZE>]
Options:
--TEST_FILE_PATH=<TEST_FILE_PATH> Test data file path [default: data/vehicles_test.csv]
--MODEL_DUMP_PATH=<MODEL_DUMP_PATH> Path to load the model to test. [default: results/model.pic]
--TEST_SIZE=<TEST_SIZE> Percentage of test set to use (from 0 to 1). [default: 1]
'''
import numpy as np
import pandas as pd
import altair as alt
import altair as alt
from sklearn import tree
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import RobustScaler
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OneHotEncoder
from sklearn.pipeline import Pipeline
from sklearn import linear_model
from sklearn.model_selection import GridSearchCV
from sklearn.svm import SVC, SVR, LinearSVC
from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor
from sklearn.model_selection import KFold
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import mean_squared_error
from sklearn.multiclass import OneVsRestClassifier
from sklearn.multiclass import OneVsOneClassifier
from sklearn.ensemble import VotingClassifier, AdaBoostClassifier, GradientBoostingClassifier, RandomForestRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn import datasets
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
import statsmodels.api as sm
from statsmodels.stats.outliers_influence import variance_inflation_factor
import pickle
import os
import sys
from docopt import docopt
opt = docopt(__doc__)
# Excerpt for the report path
TEST_EXCERPT_PATH = 'results/test_results_sample.csv'
def main(test_file_path, model_dump_path, test_size=1):
"""
Main entry for script to load data and test the model.
Arguments
---------
test_file_path : str
Path to test dataset.
model_dump_path : str
Path to dump the resulting model.
test_size : float
Percentage of test dataset to use, from 0 to 1 (Default = 1)
"""
if (not os.path.isfile(test_file_path)):
print('ERROR - No test file. Run scripts/wrangling.R first')
sys.exit()
if (not os.path.isfile(model_dump_path)):
print('ERROR - No model file. Run scripts/train_model.py first')
sys.exit()
if test_size <= 0 or test_size > 1:
print('ERROR - Invalid TEST_SIZE. Should be between 0 and 1')
sys.exit()
# Read the model from Pickle dump
print("Loading model...")
model = pickle.load(open(model_dump_path, 'rb'))
# Read master test data
print("Loading test data...")
test_data = | pd.read_csv(test_file_path) | pandas.read_csv |
"""
Tests for statistical pipeline terms.
"""
from numpy import (
arange,
full,
full_like,
nan,
where,
)
from pandas import (
DataFrame,
date_range,
Int64Index,
Timestamp,
)
from pandas.util.testing import assert_frame_equal
from scipy.stats import linregress, pearsonr, spearmanr
from catalyst.assets import Equity
from catalyst.errors import IncompatibleTerms, NonExistentAssetInTimeFrame
from catalyst.pipeline import CustomFactor, Pipeline
from catalyst.pipeline.data import USEquityPricing
from catalyst.pipeline.data.testing import TestingDataSet
from catalyst.pipeline.engine import SimplePipelineEngine
from catalyst.pipeline.factors.equity import (
Returns,
RollingLinearRegressionOfReturns,
RollingPearsonOfReturns,
RollingSpearmanOfReturns,
)
from catalyst.pipeline.loaders.frame import DataFrameLoader
from catalyst.pipeline.sentinels import NotSpecified
from catalyst.testing import (
AssetID,
AssetIDPlusDay,
check_arrays,
make_alternating_boolean_array,
make_cascading_boolean_array,
parameter_space,
)
from catalyst.testing.fixtures import (
WithSeededRandomPipelineEngine,
WithTradingEnvironment,
CatalystTestCase,
)
from catalyst.utils.numpy_utils import (
bool_dtype,
datetime64ns_dtype,
float64_dtype,
)
class StatisticalBuiltInsTestCase(WithTradingEnvironment, CatalystTestCase):
sids = ASSET_FINDER_EQUITY_SIDS = Int64Index([1, 2, 3])
START_DATE = Timestamp('2015-01-31', tz='UTC')
END_DATE = Timestamp('2015-03-01', tz='UTC')
@classmethod
def init_class_fixtures(cls):
super(StatisticalBuiltInsTestCase, cls).init_class_fixtures()
day = cls.trading_calendar.day
cls.dates = dates = date_range(
'2015-02-01', '2015-02-28', freq=day, tz='UTC',
)
# Using these start and end dates because they are a contigous span of
# 5 days (Monday - Friday) and they allow for plenty of days to look
# back on when computing correlations and regressions.
cls.start_date_index = start_date_index = 14
cls.end_date_index = end_date_index = 18
cls.pipeline_start_date = dates[start_date_index]
cls.pipeline_end_date = dates[end_date_index]
cls.num_days = num_days = end_date_index - start_date_index + 1
sids = cls.sids
cls.assets = assets = cls.asset_finder.retrieve_all(sids)
cls.my_asset_column = my_asset_column = 0
cls.my_asset = assets[my_asset_column]
cls.num_assets = num_assets = len(assets)
cls.raw_data = raw_data = DataFrame(
data=arange(len(dates) * len(sids), dtype=float64_dtype).reshape(
len(dates), len(sids),
),
index=dates,
columns=assets,
)
# Using mock 'close' data here because the correlation and regression
# built-ins use USEquityPricing.close as the input to their `Returns`
# factors. Since there is no way to change that when constructing an
# instance of these built-ins, we need to test with mock 'close' data
# to most accurately reflect their true behavior and results.
close_loader = DataFrameLoader(USEquityPricing.close, raw_data)
cls.run_pipeline = SimplePipelineEngine(
{USEquityPricing.close: close_loader}.__getitem__,
dates,
cls.asset_finder,
).run_pipeline
cls.cascading_mask = \
AssetIDPlusDay() < (sids[-1] + dates[start_date_index].day)
cls.expected_cascading_mask_result = make_cascading_boolean_array(
shape=(num_days, num_assets),
)
cls.alternating_mask = (AssetIDPlusDay() % 2).eq(0)
cls.expected_alternating_mask_result = make_alternating_boolean_array(
shape=(num_days, num_assets),
)
cls.expected_no_mask_result = full(
shape=(num_days, num_assets), fill_value=True, dtype=bool_dtype,
)
@parameter_space(returns_length=[2, 3], correlation_length=[3, 4])
def _test_correlation_factors(self, returns_length, correlation_length):
"""
Tests for the built-in factors `RollingPearsonOfReturns` and
`RollingSpearmanOfReturns`.
"""
assets = self.assets
my_asset = self.my_asset
my_asset_column = self.my_asset_column
dates = self.dates
start_date = self.pipeline_start_date
end_date = self.pipeline_end_date
start_date_index = self.start_date_index
end_date_index = self.end_date_index
num_days = self.num_days
run_pipeline = self.run_pipeline
returns = Returns(window_length=returns_length)
masks = (self.cascading_mask, self.alternating_mask, NotSpecified)
expected_mask_results = (
self.expected_cascading_mask_result,
self.expected_alternating_mask_result,
self.expected_no_mask_result,
)
for mask, expected_mask in zip(masks, expected_mask_results):
pearson_factor = RollingPearsonOfReturns(
target=my_asset,
returns_length=returns_length,
correlation_length=correlation_length,
mask=mask,
)
spearman_factor = RollingSpearmanOfReturns(
target=my_asset,
returns_length=returns_length,
correlation_length=correlation_length,
mask=mask,
)
columns = {
'pearson_factor': pearson_factor,
'spearman_factor': spearman_factor,
}
pipeline = Pipeline(columns=columns)
if mask is not NotSpecified:
pipeline.add(mask, 'mask')
results = run_pipeline(pipeline, start_date, end_date)
pearson_results = results['pearson_factor'].unstack()
spearman_results = results['spearman_factor'].unstack()
if mask is not NotSpecified:
mask_results = results['mask'].unstack()
check_arrays(mask_results.values, expected_mask)
# Run a separate pipeline that calculates returns starting
# (correlation_length - 1) days prior to our start date. This is
# because we need (correlation_length - 1) extra days of returns to
# compute our expected correlations.
results = run_pipeline(
Pipeline(columns={'returns': returns}),
dates[start_date_index - (correlation_length - 1)],
dates[end_date_index],
)
returns_results = results['returns'].unstack()
# On each day, calculate the expected correlation coefficients
# between the asset we are interested in and each other asset. Each
# correlation is calculated over `correlation_length` days.
expected_pearson_results = full_like(pearson_results, nan)
expected_spearman_results = full_like(spearman_results, nan)
for day in range(num_days):
todays_returns = returns_results.iloc[
day:day + correlation_length
]
my_asset_returns = todays_returns.iloc[:, my_asset_column]
for asset, other_asset_returns in todays_returns.iteritems():
asset_column = int(asset) - 1
expected_pearson_results[day, asset_column] = pearsonr(
my_asset_returns, other_asset_returns,
)[0]
expected_spearman_results[day, asset_column] = spearmanr(
my_asset_returns, other_asset_returns,
)[0]
expected_pearson_results = DataFrame(
data=where(expected_mask, expected_pearson_results, nan),
index=dates[start_date_index:end_date_index + 1],
columns=assets,
)
| assert_frame_equal(pearson_results, expected_pearson_results) | pandas.util.testing.assert_frame_equal |
import operator
import re
import warnings
import numpy as np
import pytest
from pandas._libs.sparse import IntIndex
import pandas.util._test_decorators as td
import pandas as pd
from pandas import isna
from pandas.core.sparse.api import SparseArray, SparseDtype, SparseSeries
import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal
@pytest.fixture(params=["integer", "block"])
def kind(request):
return request.param
class TestSparseArray:
def setup_method(self, method):
self.arr_data = np.array([np.nan, np.nan, 1, 2, 3,
np.nan, 4, 5, np.nan, 6])
self.arr = SparseArray(self.arr_data)
self.zarr = SparseArray([0, 0, 1, 2, 3, 0, 4, 5, 0, 6], fill_value=0)
def test_constructor_dtype(self):
arr = SparseArray([np.nan, 1, 2, np.nan])
assert arr.dtype == SparseDtype(np.float64, np.nan)
assert arr.dtype.subtype == np.float64
assert np.isnan(arr.fill_value)
arr = SparseArray([np.nan, 1, 2, np.nan], fill_value=0)
assert arr.dtype == SparseDtype(np.float64, 0)
assert arr.fill_value == 0
arr = SparseArray([0, 1, 2, 4], dtype=np.float64)
assert arr.dtype == SparseDtype(np.float64, np.nan)
assert np.isnan(arr.fill_value)
arr = SparseArray([0, 1, 2, 4], dtype=np.int64)
assert arr.dtype == SparseDtype(np.int64, 0)
assert arr.fill_value == 0
arr = SparseArray([0, 1, 2, 4], fill_value=0, dtype=np.int64)
assert arr.dtype == SparseDtype(np.int64, 0)
assert arr.fill_value == 0
arr = SparseArray([0, 1, 2, 4], dtype=None)
assert arr.dtype == SparseDtype(np.int64, 0)
assert arr.fill_value == 0
arr = SparseArray([0, 1, 2, 4], fill_value=0, dtype=None)
assert arr.dtype == SparseDtype(np.int64, 0)
assert arr.fill_value == 0
def test_constructor_dtype_str(self):
result = SparseArray([1, 2, 3], dtype='int')
expected = SparseArray([1, 2, 3], dtype=int)
tm.assert_sp_array_equal(result, expected)
def test_constructor_sparse_dtype(self):
result = SparseArray([1, 0, 0, 1], dtype=SparseDtype('int64', -1))
expected = SparseArray([1, 0, 0, 1], fill_value=-1, dtype=np.int64)
tm.assert_sp_array_equal(result, expected)
assert result.sp_values.dtype == np.dtype('int64')
def test_constructor_sparse_dtype_str(self):
result = SparseArray([1, 0, 0, 1], dtype='Sparse[int32]')
expected = SparseArray([1, 0, 0, 1], dtype=np.int32)
tm.assert_sp_array_equal(result, expected)
assert result.sp_values.dtype == np.dtype('int32')
def test_constructor_object_dtype(self):
# GH 11856
arr = SparseArray(['A', 'A', np.nan, 'B'], dtype=np.object)
assert arr.dtype == SparseDtype(np.object)
assert np.isnan(arr.fill_value)
arr = SparseArray(['A', 'A', np.nan, 'B'], dtype=np.object,
fill_value='A')
assert arr.dtype == SparseDtype(np.object, 'A')
assert arr.fill_value == 'A'
# GH 17574
data = [False, 0, 100.0, 0.0]
arr = SparseArray(data, dtype=np.object, fill_value=False)
assert arr.dtype == SparseDtype(np.object, False)
assert arr.fill_value is False
arr_expected = np.array(data, dtype=np.object)
it = (type(x) == type(y) and x == y for x, y in zip(arr, arr_expected))
assert np.fromiter(it, dtype=np.bool).all()
@pytest.mark.parametrize("dtype", [SparseDtype(int, 0), int])
def test_constructor_na_dtype(self, dtype):
with pytest.raises(ValueError, match="Cannot convert"):
SparseArray([0, 1, np.nan], dtype=dtype)
def test_constructor_spindex_dtype(self):
arr = SparseArray(data=[1, 2], sparse_index=IntIndex(4, [1, 2]))
# XXX: Behavior change: specifying SparseIndex no longer changes the
# fill_value
expected = SparseArray([0, 1, 2, 0], kind='integer')
tm.assert_sp_array_equal(arr, expected)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
arr = SparseArray(data=[1, 2, 3],
sparse_index=IntIndex(4, [1, 2, 3]),
dtype=np.int64, fill_value=0)
exp = SparseArray([0, 1, 2, 3], dtype=np.int64, fill_value=0)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
arr = SparseArray(data=[1, 2], sparse_index=IntIndex(4, [1, 2]),
fill_value=0, dtype=np.int64)
exp = SparseArray([0, 1, 2, 0], fill_value=0, dtype=np.int64)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
arr = SparseArray(data=[1, 2, 3],
sparse_index=IntIndex(4, [1, 2, 3]),
dtype=None, fill_value=0)
exp = SparseArray([0, 1, 2, 3], dtype=None)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
@pytest.mark.parametrize("sparse_index", [
None, IntIndex(1, [0]),
])
def test_constructor_spindex_dtype_scalar(self, sparse_index):
# scalar input
arr = SparseArray(data=1, sparse_index=sparse_index, dtype=None)
exp = SparseArray([1], dtype=None)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
arr = SparseArray(data=1, sparse_index=IntIndex(1, [0]), dtype=None)
exp = SparseArray([1], dtype=None)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
def test_constructor_spindex_dtype_scalar_broadcasts(self):
arr = SparseArray(data=[1, 2], sparse_index=IntIndex(4, [1, 2]),
fill_value=0, dtype=None)
exp = SparseArray([0, 1, 2, 0], fill_value=0, dtype=None)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
@pytest.mark.parametrize('data, fill_value', [
(np.array([1, 2]), 0),
(np.array([1.0, 2.0]), np.nan),
([True, False], False),
([pd.Timestamp('2017-01-01')], pd.NaT),
])
def test_constructor_inferred_fill_value(self, data, fill_value):
result = SparseArray(data).fill_value
if pd.isna(fill_value):
assert pd.isna(result)
else:
assert result == fill_value
@pytest.mark.parametrize('format', ['coo', 'csc', 'csr'])
@pytest.mark.parametrize('size', [
pytest.param(0,
marks=td.skip_if_np_lt("1.16",
reason='NumPy-11383')),
10
])
@td.skip_if_no_scipy
def test_from_spmatrix(self, size, format):
import scipy.sparse
mat = scipy.sparse.random(size, 1, density=0.5, format=format)
result = SparseArray.from_spmatrix(mat)
result = np.asarray(result)
expected = mat.toarray().ravel()
tm.assert_numpy_array_equal(result, expected)
@td.skip_if_no_scipy
def test_from_spmatrix_raises(self):
import scipy.sparse
mat = scipy.sparse.eye(5, 4, format='csc')
with pytest.raises(ValueError, match="not '4'"):
SparseArray.from_spmatrix(mat)
@pytest.mark.parametrize('scalar,dtype', [
(False, SparseDtype(bool, False)),
(0.0, SparseDtype('float64', 0)),
(1, SparseDtype('int64', 1)),
('z', SparseDtype('object', 'z'))])
def test_scalar_with_index_infer_dtype(self, scalar, dtype):
# GH 19163
arr = SparseArray(scalar, index=[1, 2, 3], fill_value=scalar)
exp = SparseArray([scalar, scalar, scalar], fill_value=scalar)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == dtype
assert exp.dtype == dtype
@pytest.mark.parametrize("fill", [1, np.nan, 0])
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
def test_sparse_series_round_trip(self, kind, fill):
# see gh-13999
arr = SparseArray([np.nan, 1, np.nan, 2, 3],
kind=kind, fill_value=fill)
res = SparseArray(SparseSeries(arr))
tm.assert_sp_array_equal(arr, res)
arr = SparseArray([0, 0, 0, 1, 1, 2], dtype=np.int64,
kind=kind, fill_value=fill)
res = SparseArray(SparseSeries(arr), dtype=np.int64)
tm.assert_sp_array_equal(arr, res)
res = SparseArray(SparseSeries(arr))
tm.assert_sp_array_equal(arr, res)
@pytest.mark.parametrize("fill", [True, False, np.nan])
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
def test_sparse_series_round_trip2(self, kind, fill):
# see gh-13999
arr = SparseArray([True, False, True, True], dtype=np.bool,
kind=kind, fill_value=fill)
res = SparseArray(SparseSeries(arr))
tm.assert_sp_array_equal(arr, res)
res = SparseArray(SparseSeries(arr))
tm.assert_sp_array_equal(arr, res)
def test_get_item(self):
assert np.isnan(self.arr[1])
assert self.arr[2] == 1
assert self.arr[7] == 5
assert self.zarr[0] == 0
assert self.zarr[2] == 1
assert self.zarr[7] == 5
errmsg = re.compile("bounds")
with pytest.raises(IndexError, match=errmsg):
self.arr[11]
with pytest.raises(IndexError, match=errmsg):
self.arr[-11]
assert self.arr[-1] == self.arr[len(self.arr) - 1]
def test_take_scalar_raises(self):
msg = "'indices' must be an array, not a scalar '2'."
with pytest.raises(ValueError, match=msg):
self.arr.take(2)
def test_take(self):
exp = SparseArray(np.take(self.arr_data, [2, 3]))
tm.assert_sp_array_equal(self.arr.take([2, 3]), exp)
exp = SparseArray(np.take(self.arr_data, [0, 1, 2]))
tm.assert_sp_array_equal(self.arr.take([0, 1, 2]), exp)
def test_take_fill_value(self):
data = np.array([1, np.nan, 0, 3, 0])
sparse = SparseArray(data, fill_value=0)
exp = SparseArray(np.take(data, [0]), fill_value=0)
tm.assert_sp_array_equal(sparse.take([0]), exp)
exp = SparseArray(np.take(data, [1, 3, 4]), fill_value=0)
tm.assert_sp_array_equal(sparse.take([1, 3, 4]), exp)
def test_take_negative(self):
exp = SparseArray(np.take(self.arr_data, [-1]))
tm.assert_sp_array_equal(self.arr.take([-1]), exp)
exp = SparseArray(np.take(self.arr_data, [-4, -3, -2]))
tm.assert_sp_array_equal(self.arr.take([-4, -3, -2]), exp)
@pytest.mark.parametrize('fill_value', [0, None, np.nan])
def test_shift_fill_value(self, fill_value):
# GH #24128
sparse = SparseArray(np.array([1, 0, 0, 3, 0]),
fill_value=8.0)
res = sparse.shift(1, fill_value=fill_value)
if isna(fill_value):
fill_value = res.dtype.na_value
exp = SparseArray(np.array([fill_value, 1, 0, 0, 3]),
fill_value=8.0)
tm.assert_sp_array_equal(res, exp)
def test_bad_take(self):
with pytest.raises(IndexError, match="bounds"):
self.arr.take([11])
def test_take_filling(self):
# similar tests as GH 12631
sparse = SparseArray([np.nan, np.nan, 1, np.nan, 4])
result = sparse.take(np.array([1, 0, -1]))
expected = SparseArray([np.nan, np.nan, 4])
tm.assert_sp_array_equal(result, expected)
# XXX: test change: fill_value=True -> allow_fill=True
result = sparse.take(np.array([1, 0, -1]), allow_fill=True)
expected = SparseArray([np.nan, np.nan, np.nan])
tm.assert_sp_array_equal(result, expected)
# allow_fill=False
result = sparse.take(np.array([1, 0, -1]),
allow_fill=False, fill_value=True)
expected = SparseArray([np.nan, np.nan, 4])
tm.assert_sp_array_equal(result, expected)
msg = "Invalid value in 'indices'"
with pytest.raises(ValueError, match=msg):
sparse.take(np.array([1, 0, -2]), allow_fill=True)
with pytest.raises(ValueError, match=msg):
sparse.take(np.array([1, 0, -5]), allow_fill=True)
with pytest.raises(IndexError):
sparse.take(np.array([1, -6]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]), allow_fill=True)
def test_take_filling_fill_value(self):
# same tests as GH 12631
sparse = SparseArray([np.nan, 0, 1, 0, 4], fill_value=0)
result = sparse.take(np.array([1, 0, -1]))
expected = SparseArray([0, np.nan, 4], fill_value=0)
tm.assert_sp_array_equal(result, expected)
# fill_value
result = sparse.take(np.array([1, 0, -1]), allow_fill=True)
# XXX: behavior change.
# the old way of filling self.fill_value doesn't follow EA rules.
# It's supposed to be self.dtype.na_value (nan in this case)
expected = SparseArray([0, np.nan, np.nan], fill_value=0)
tm.assert_sp_array_equal(result, expected)
# allow_fill=False
result = sparse.take(np.array([1, 0, -1]),
allow_fill=False, fill_value=True)
expected = SparseArray([0, np.nan, 4], fill_value=0)
tm.assert_sp_array_equal(result, expected)
msg = ("Invalid value in 'indices'.")
with pytest.raises(ValueError, match=msg):
sparse.take(np.array([1, 0, -2]), allow_fill=True)
with pytest.raises(ValueError, match=msg):
sparse.take(np.array([1, 0, -5]), allow_fill=True)
with pytest.raises(IndexError):
sparse.take(np.array([1, -6]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]), fill_value=True)
def test_take_filling_all_nan(self):
sparse = SparseArray([np.nan, np.nan, np.nan, np.nan, np.nan])
# XXX: did the default kind from take change?
result = sparse.take(np.array([1, 0, -1]))
expected = SparseArray([np.nan, np.nan, np.nan], kind='block')
| tm.assert_sp_array_equal(result, expected) | pandas.util.testing.assert_sp_array_equal |
import pandas as pd
import numpy as np
import pdb
import sys
import os
from sklearn.ensemble import GradientBoostingRegressor
from joblib import dump, load
import re
##################################################################3
# (Sept 2020 - Jared) - PG-MTL training script on 145 source lake
# Features and hyperparamters must be manually specified below
# (e.g. feats = ['dif_max_depth', ....]; n_estimators = 5500, etc)
####################################################################3
#file to save model to
save_file_path = '../../models/metamodel_pgdl_RMSE_GBR.joblib'
#########################################################################################
#paste features found in "pbmtl_feature_selection.py" here
feats = ['n_obs_sp', 'n_obs_su', 'dif_max_depth', 'dif_surface_area',
'dif_glm_strat_perc', 'perc_dif_max_depth', 'perc_dif_surface_area',
'perc_dif_sqrt_surface_area']
###################################################################################
#######################################################################3
#paste hyperparameters found in "pbmtl_hyperparameter_search.py" here
#
n_estimators = 5500
lr = .05
#####################################################################
ids = pd.read_csv('../../metadata/pball_site_ids.csv', header=None)
ids = ids[0].values
glm_all_f = pd.read_csv("../../results/glm_transfer/RMSE_transfer_glm_pball.csv")
train_lakes = [re.search('nhdhr_(.*)', x).group(1) for x in np.unique(glm_all_f['target_id'].values)]
train_lakes_wp = np.unique(glm_all_f['target_id'].values) #with prefix
#compile training data
train_df = pd.DataFrame()
for _, lake_id in enumerate(train_lakes):
new_df = pd.DataFrame()
#get performance results (metatargets), filter out target as source
lake_df_res = pd.read_csv("../../results/transfer_learning/target_"+lake_id+"/resultsPGRNNbasic_pball",header=None,names=['source_id','rmse'])
lake_df_res = lake_df_res[lake_df_res.source_id != 'source_id']
#get metadata differences between target and all the sources
lake_df = | pd.read_feather("../../metadata/diffs/target_nhdhr_"+lake_id+".feather") | pandas.read_feather |
import pandas as pd
import re
import ijson
import json
import numpy as np
import csv
class jsonData:
percent_critical = 0
percent_high = 0
percent_medium = 0
percent_low = 0
total = 0
def __init__(self):
pass
def print_full(self, x): # function that prints full dataframe for display/debugging purposes
pd.set_option('display.max_rows', len(x))
pd.set_option('display.max_columns', None)
pd.set_option('display.width', 2000)
pd.set_option('display.float_format', '{:20,.2f}'.format)
pd.set_option('display.max_colwidth', -1)
print(x)
pd.reset_option('display.max_rows')
pd.reset_option('display.max_columns')
pd.reset_option('display.width')
pd.reset_option('display.float_format')
pd.reset_option('display.max_colwidth')
def flatten_json(self, y): # function to flatten jsons
out = {}
def flatten(x, name=''):
if type(x) is dict:
for a in x:
flatten(x[a], name + a + '_')
elif type(x) is list:
i = 0
for a in x:
flatten(a, name + str(i) + '_')
i += 1
else:
out[name[:-1]] = x
flatten(y)
return out
def search_dicts(self, key, list_of_dicts):
for item in list_of_dicts:
if key in item.keys():
return item
def json_to_dataframe(self, filename): # function to turn flsttened json into a pandas dataframe
jsonObj = json.load(filename)
flat = self.flatten_json(jsonObj)
results = | pd.DataFrame() | pandas.DataFrame |
import os
from os.path import join
import numpy as np
import pandas as pd
from collections import OrderedDict
from itertools import chain
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import ShuffleSplit
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import roc_auc_score
from ukbb_variables import (brain_dmri_fa, brain_dmri_icvf,
brain_dmri_isovf, brain_dmri_l1,
brain_dmri_l2, brain_dmri_l3,
brain_dmri_md, brain_dmri_mo,
brain_dmri_od, brain_smri_plus,
fluid_intelligence, neuroticism,
education, primary_demographics,
lifestyle, mental_health, earlylife)
path_to_csv = '/storage/local/kdadi/work/rs_study/experiments/UKBB/ukb9543.csv'
path_to_matrices = '/storage/local/kdadi/work/data/UKBB/rfMRI_tangent_matrix_dim100'
path_to_merge_brain = '/storage/local/kdadi/work/rs_study/experiments/UKBB/para/roadmap/ukb_add1_merge_brain.csv'
X_iterate = zip([brain_dmri_fa, brain_dmri_icvf, brain_dmri_isovf, brain_dmri_l1,
brain_dmri_l2, brain_dmri_l3, brain_dmri_md, brain_dmri_mo,
brain_dmri_od, brain_smri_plus, fluid_intelligence, neuroticism,
education, primary_demographics, lifestyle, mental_health, earlylife],
['fa', 'icvf', 'isovf', 'l1', 'l2', 'l3', 'md', 'mo', 'od',
'smri', 'Fluid \n intelligence', 'Neuroticism', 'Edu', 'Age',
'Lifestyle', 'MH', 'EL'])
columns = []
for i in X_iterate:
columns.extend(i[0].keys())
columns.extend(['eid'])
ukbb = pd.read_csv(path_to_csv, usecols=['20016-2.0', 'eid', '20127-0.0'])
y = ukbb['20016-2.0'].dropna()
new_ukbb = pd.DataFrame(ukbb, index=y.index)
new_ukbb = new_ukbb.drop(columns=['20016-2.0'], errors='ignore')
# Random splitting of data to train our model
X_train, X_test, y_train, y_test = train_test_split(
new_ukbb, y, test_size=0.5, random_state=0)
y_train = X_train['20127-0.0'].dropna()
y_test = X_test['20127-0.0'].dropna()
X_train = pd.DataFrame(X_train, index=y_train.index)
X_test = | pd.DataFrame(X_test, index=y_test.index) | pandas.DataFrame |
import pandas as pd
import numpy as np
from web.pickle_helper import *
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
from django.http import HttpResponse
import io
from io import BytesIO
import random
import base64
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
def get_null_count(df):
return df[df.isnull().any(axis=1)].shape[0]
def get_zero_count(df):
return (df == 0).sum().sum()
def get_row_count(df):
return df.shape[0]
def get_column_count(df):
return df.shape[1]
def get_duplicated_count(df):
return df.duplicated().sum()
def get_dtype_count(df):
temp_df = df.dtypes.to_frame()
temp_df.index.names = ['Column Name']
temp_df.columns = ['Type']
return temp_df.to_html()
def get_dtypes_as_dict(df):
return df.dtypes.apply(lambda x: x.name).to_dict()
def get_null_rows_as_html(pk):
df = get_or_save_dataframe(pk)
return df[pd.isnull(df).any(axis=1)].to_html()
def get_zero_rows_as_html(pk):
df = get_or_save_dataframe(pk)
return df.loc[~(df==0).all(axis=1)].to_html()
def get_column_as_html(pk, column_name):
df = get_or_save_dataframe(pk)
return df[[column_name]].to_html()
def get_df_column_as_html(pk, count=200):
df = get_or_save_dataframe(pk)
return df.head(count).to_html()
def get_df_as_html(df, count=200):
return df.head(count).to_html(classes='preview-table table table-striped table-bordered display compact nowrap')
def change_column_name(df, old_column, new_column):
df.rename(columns={old_column: new_column}, inplace=True)
#lets update pickle
update_dataframe(df)
return df
def drop_column(df, column_name):
df.drop(column_name, axis=1, inplace=True)
#lets update pickle
update_dataframe(df)
return df
def get_dummy_value_list(df, column_name):
return df[column_name].value_counts().to_frame().to_html(classes="table")
def get_categorical_value_list(df, column_name):
dummy, mapping_index = | pd.Series(df[column_name]) | pandas.Series |
""" Format data """
from __future__ import division, print_function
import pandas as pd
import numpy as np
import re
from os.path import dirname, join
from copy import deepcopy
import lawstructural.lawstructural.constants as lc
import lawstructural.lawstructural.utils as lu
#TODO: Take out entrant stuff from lawData
class Format(object):
""" Basic class for formatting dataset """
def __init__(self):
self.dpath = join(dirname(dirname(__file__)), 'data')
self.data_sets = self.data_imports()
self.data = self.data_sets['usn']
self.ent_data = pd.DataFrame([])
@staticmethod
def _col_fix(col):
""" Fix column strings to be R-readable as well and to be consistent
with older datasets. Think about changing name through rest of program
instead.
"""
col = re.sub('%', 'Percent', col)
col = re.sub('[() -/]', '', col)
if col[0].isdigit():
col = re.sub('thpercentile', '', col)
col = 'p' + col
if col == 'Name':
col = 'school'
if col == 'Issueyear':
col = 'year'
if col == 'Overallrank':
col = 'OverallRank'
return col
@staticmethod
def _fix_bad_values(data):
""" Fix known USN data typos """
data.loc[(data['school'] == 'University of Miami') &
(data['year'] == 2000), 'Tuitionandfeesfulltime'] = 21000
data.loc[(data['school'] == 'Emory University') &
(data['year'] == 2006), 'Employmentrateatgraduation'] = 72.4
data.loc[(data['school'] == 'Northwestern University') &
(data['year'] == 2006),
'EmploymentRate9MonthsafterGraduation'] = 99.5
data.loc[(data['school'] == 'Michigan State University') &
(data['year'] == 2001), 'BarpassageRateinJurisdiction'] = 75
data.loc[(data['school'] == 'Mississippi College') &
(data['year'] == 2001), 'BarpassageRateinJurisdiction'] = 80
return data
def usn_format(self):
""" Basic USN import and format """
#data = pd.read_csv(join(self.dpath, 'usn2015.csv'))
data = pd.read_csv(join(self.dpath, 'Law1988-2015.csv'))
data = data[['Name', 'Value', 'Metric description', 'Issue year']]
data = pd.pivot_table(data, values='Value',
index=['Name', 'Issue year'],
columns='Metric description')
data = data.reset_index()
names = data.columns.tolist()
data.columns = [self._col_fix(el) for el in names]
data = self._fix_bad_values(data)
data = data.sort(['school', 'year'])
data['year'] = data['year'].astype(int)
return data
def cpi_format(self):
""" Basic CPI import and format """
data = pd.read_csv(join(self.dpath, 'lawCPI.csv'))
# Make up for reporting vs data year in USNews and BLS
data['year'] = data['year'] + 2
data = data[data['year'] <= 2015]
data = data.reset_index(drop=True)
return data
@staticmethod
def _id_name_fix(col):
""" Fix outdated names of schools from id dataset """
#FIXME: Find out why this doesn't work for Drexel, Cath U
old = ['Phoenix School of Law',
'Chapman University',
'Drexel University (Mack)',
'Indiana University--Indianapolis',
'Texas Wesleyan University',
'Catholic University of America (Columbus)',
'John Marshall Law School']
new = ['Arizona Summit Law School',
'Chapman University (Fowler)',
'Drexel University',
'Indiana University--Indianapolis (McKinney)',
'Texas A&M University',
'The Catholic University of America',
'The John Marshall Law School']
for i in xrange(len(old)):
col = re.sub(old[i], new[i], col)
return col
def id_format(self):
""" Import LSAC id's. Note that Irvine doesn't have an id. """
data = pd.read_csv(join(self.dpath, 'USNewsNameStateID.csv'))
data['name'] = [self._id_name_fix(col) for col in data['name']]
return data
def elec_format(self):
""" Import yearly electricity prices """
data = pd.read_csv(join(self.dpath, 'lawElectricity.csv'))
states = pd.read_csv(join(self.dpath, 'lawStateAbbr.csv'))
# Change state names to abbreviations
data = pd.merge(data, states)
data = data.drop('state', 1)
columns = data.columns.tolist()
index = columns.index('abbr')
columns[index] = 'state'
data.columns = columns
data['year'] = data['year'] + 2
return data
def data_imports(self):
""" Import dictionary of initially formatted datasets
Datasets are as follows with corresponding sources/scrapers
usn
---
- Data: US News and World Report
- Source: ai.usnews.com
cpi
---
- Data: CPI data from BLS
- Source: http://data.bls.gov/cgi-bin/dsrv?cu
Series Id: CUUR0000SA0,CUUS0000SA0
Not Seasonally Adjusted
Area: U.S. city average
Item: All items
Base Period: 1982-84=100
Years: 1986 to 2015
- Used to be data.bls.gov/timeseries/LNS14000000
wage
----
- Data: Market wages for lawyers from BLS
- Source: bls.gov/oes
states
------
- Data: US News name/state combinations
- Source: US News Top Law Schools
- Scraper: StateScraper.py
id
--
- Data: US News names and LSAC ID combinations
- Source: http://www.lsac.org/lsacresources/publications/
official-guide-archives
- Scraper: NameScraperLSAC.py
entrants
--------
- Data: School entrants, with id's and dates
- Source: http://www.americanbar.org/groups/legal_education/
resources/aba_approved_law_schools/in_alphabetical_order.html
via
http://en.wikipedia.org/
wiki/List_of_law_schools_in_the_United_States
- Scraper: entryscraper.py
electricity
-----------
- Data: State/Country level electricity prices
- Source: eia.gov/electricity/monthly/backissues.html
- Scraper: ElecScraper.py
Returns
-------
data_sets: dict; data sets from specified sources
"""
data_sets = {
'usn': self.usn_format(),
'cpi': self.cpi_format(),
'states': pd.read_csv(join(self.dpath, 'lawNameState.csv')),
'id': self.id_format(),
'entrants': pd.read_csv(join(self.dpath, 'lawEntrants.csv')),
'electricity': self.elec_format(),
'stateregions': pd.read_csv(join(self.dpath, 'StateRegions.csv')),
'aaup_comp_region': pd.read_csv(join(self.dpath,
'aaup_comp_region.csv')),
'aaup_comp': pd.read_csv(join(self.dpath, 'aaup_comp.csv')),
'aaup_salary_region': pd.read_csv(join(self.dpath,
'aaup_salary_region.csv')),
'aaup_salary': pd.read_csv(join(self.dpath, 'aaup_salary.csv'))
}
return data_sets
def fill_ranks(self):
""" Generate top/bottom/inside/squared rank variables,
fill in unranked schools
"""
# Indicate top/bottom ranked schools
self.data['TopRanked'] = 1 * (self.data['OverallRank'] == 1)
self.data['BottomRanked'] = 1 * (self.data['OverallRank'] ==
np.nanmax(self.data['OverallRank']))
self.data['InsideRanked'] = 1 * ((self.data['OverallRank'] > 1) &
(self.data['OverallRank'] <
np.nanmax(self.data['OverallRank'])))
# Squared rank
self.data['OverallRankSquared'] = self.data['OverallRank']**2
# Fill in un-ranked schools as max(rank) + 1 or lc.UNRANKED
mask = pd.isnull(self.data['OverallRank'])
#unranked = np.nanmax(self.data['OverallRank']) + 1
unranked = lc.UNRANKED
self.data['OverallRank'][mask] = unranked
self.data['Ranked'] = 1 * (self.data['OverallRank'] != unranked)
def combine_tuition(self):
""" Combine Full-time and Out-of-State Tuitions """
self.data['Tuition'] = self.data['Tuitionandfeesfulltime']
self.data['Tuition'] = self.data['Tuition'].fillna(
value=self.data['Outofstatetuitionandfeesfulltime']
)
def lags(self):
""" Generate various lags (including tuition alignment) """
lag_vars = ['OverallRank', 'Ranked']
lag_vars = [lag_vars, lu.reaction_spec('full')[0]]
lag_vars.append(lag_vars[1])
lag_vars[2] = [el + '_comp' for el in lag_vars[2]]
lag_vars = [el for sublist in lag_vars for el in sublist]
for lvar in lag_vars:
self.data[lvar + 'L'] = self.data.groupby('school').apply(
pd.DataFrame.shift)[lvar]
def add_entrants(self):
""" Add indicators for when schools entered """
self.data_sets['entrants']['Founded'] = \
self.data_sets['entrants']['Founded'] + 2
self.data['entry'] = 0
zipped = zip(self.data_sets['entrants']['Founded'],
self.data_sets['entrants']['SchoolUS'])
for enter in zipped:
self.data.loc[(self.data['school'] == enter[1]) &
(self.data['year'] == enter[0]), 'entry'] = 1
def combine_dsets(self):
""" Add in other members of self.data_sets to self.data """
# Location and id
self.data['id'] = 0
for name in self.data['school'].unique():
self.data.ix[self.data['school'] == name, 'state'] = \
self.data_sets['states']['place'].where(
self.data_sets['states']['name'] == name).max()
self.data.ix[self.data['school'] == name, 'id'] = \
self.data_sets['id']['id'].where(
self.data_sets['id']['name'] == name).max()
# Electricity
self.data = pd.merge(self.data, self.data_sets['electricity'],
how='outer', sort=True)
# CPI
self.data = pd.merge(self.data, self.data_sets['cpi'])
# Regions
self.data = pd.merge(self.data, self.data_sets['stateregions'])
# AAUP data sets
aaup_dsets = ['aaup_salary_region',
'aaup_salary']
for dset in aaup_dsets:
self.data = pd.merge(self.data, self.data_sets[dset], how='outer')
def price_adjust(self):
""" Convert nominal to real prices (base year is 2000 - remember,
using USNews dating convention at this point, will be adjusted later)
and change some to thousands
"""
dollar_vars = [
'Tuition', 'p25privatesectorstartingsalary',
'p75privatesectorstartingsalary',
'Averageindebtednessofgraduateswhoincurredlawschooldebt',
'Medianpublicservicestartingsalary',
'Roomboardbooksmiscellaneousexpenses'
]
for dvar in dollar_vars:
self.data[dvar] = self.data[dvar] / 1000.0
dollar_vars.append('p_elec_state')
dollar_vars.append('p_elec_us')
base = self.data_sets['cpi'].loc[
self.data_sets['cpi']['year'] == 2002, 'cpi']
base = base.reset_index(drop=True)[0]
for dvar in dollar_vars:
self.data[dvar + '_nominal'] = self.data[dvar]
self.data[dvar] = self.data[dvar] * (base / self.data['cpi'])
def competition(self):
""" Generate averages in competition sets """
# Generate competition variables
reac_vars = lu.reaction_spec('full')[0]
comp_vars = ['OverallRank']
comp_vars = [reac_vars, comp_vars]
comp_vars = [el for sublist in comp_vars for el in sublist]
comp_vars_comp = [el + '_comp' for el in comp_vars]
comp_add = pd.DataFrame(
np.zeros((self.data.shape[0], len(comp_vars_comp))),
columns=comp_vars_comp)
self.data = self.data.join(comp_add)
for year in self.data['year'].unique():
for cvar in comp_vars:
mask = (1 - np.isnan(self.data[cvar])).astype(bool)
mdata = deepcopy(self.data[mask])
comp_mat = lu.comp_mat_gen(mdata.loc[
mdata['year'] == year, 'OverallRank'])
mdata.loc[mdata['year'] == year, cvar + '_comp'] = \
np.dot(comp_mat, mdata.loc[mdata['year'] == year, cvar])
self.data[mask] = mdata
def treatment(self):
""" Generate treatment variables """
self.data['treat'] = 1 * (self.data['year'] >= 2012)
self.data['RankTreat'] = self.data['OverallRank'] * self.data['treat']
self.data['RankTreat_comp'] = (self.data['OverallRank_comp'] *
self.data['treat'])
self.data['GPATreat'] = (self.data['UndergraduatemedianGPA'] *
self.data['treat'])
self.data['LSATTreat'] = (self.data['MedianLSAT'] *
self.data['treat'])
self.data['TuitionTreat'] = (self.data['Tuition'] *
self.data['treat'])
def ratios(self):
""" Generate transparency ratios """
emp = EmploymentData(self.data, self.dpath)
emp.id_extend()
emp.ratio_gen()
self.data = emp.data_merge()
# Currently next line drops Drexel, Catholic U of Amer., and UC Irvine
self.data = self.data[pd.notnull(self.data['id'])]
self.data = self.data[pd.notnull(self.data['school'])]
for name in np.unique(self.data['school']):
# Find earliest year with calculated ratio, and reverse impute
selection = self.data.loc[
self.data['school'] == name,
['year', 'ratio']
].reset_index(drop=True)
year = np.min(
selection['year'][np.where(selection['ratio'].notnull())[0]]
)
self.data.loc[
(self.data['school'] == name) &
(self.data['year'] < year),
'ratio'
] = self.data.loc[
(self.data['school'] == name) &
(self.data['year'] == year),
'ratio'
].reset_index(drop=True)[0]
def dummies(self):
""" Generate state dummies """
dummies = pd.get_dummies(self.data['state'])
self.data = self.data.join(dummies)
self.data = self.data.reset_index(drop=True)
def var_name_change(self):
""" Change names of variables to match previous data dump """
previous = [
'p25LSATfulltime',
'MedianLSATfulltime', 'p75LSATfulltime',
'p25undergraduateGPAfulltime',
'MedianundergraduateGPAfulltime',
'p75undergraduateGPAfulltime', 'Enrollmentfulltime'
]
new = [
'p25LSATScore',
'MedianLSAT', 'p75LSATScore',
'p25UndergraduateGPA',
'UndergraduatemedianGPA',
'p75UndergraduateGPA', 'FreshmanEnrolled'
]
columns = self.data.columns.tolist()
for i in xrange(len(previous)):
columns = [re.sub(previous[i], new[i], el) for el in columns]
self.data.columns = columns
def format(self):
""" Driver function """
print(" * Formatting primary dataset.")
self.var_name_change()
self.fill_ranks()
self.combine_dsets()
self.combine_tuition()
self.price_adjust()
self.competition()
self.lags()
self.treatment()
#FIXME: Find out why ratios not keeping Atlanta's John Marshall
self.ratios()
self.dummies()
self.data['const'] = 1
self.data['year'] = self.data['year'] - 2
self.data = self.data.sort(['school', 'year']).reset_index(drop=True)
self.data = self.data.drop_duplicates(subset=['school', 'year'])
n_applicants_format(self.dpath)
def entrance_format(self):
""" Generate dataset for entrance estimates """
print(" * Formatting entrance dataset.")
data = self.data[[
'year', 'Numberofmatriculantsfulltime', 'Tuition'
]]
data['Revenue'] = (data['Numberofmatriculantsfulltime'] *
data['Tuition'])
grouped = data.groupby('year')
self.ent_data = pd.DataFrame(grouped.mean())
data = pd.read_csv(join(self.dpath, 'lawEntrants.csv'))
#pylint: disable=maybe-no-member
data = data.loc[data['Founded'] >= min(self.data['year']), 'Founded']
summed = pd.DataFrame(data.value_counts(), columns=['entry'])
summed = summed.sort().reset_index()
summed.columns = ['year', 'entry']
for year in xrange(int(min(self.data['year'])),
int(max(self.data['year']))+1):
if year not in summed.year.values:
summed = summed.append({'year': year, 'entry': 0},
ignore_index=True)
summed = summed.sort('year').reset_index(drop=True)
self.ent_data = pd.merge(self.ent_data.reset_index(),
summed.reset_index())
diffed = self.ent_data.set_index('year').diff()
diffed.columns = [el + '_d' for el in diffed.columns]
self.ent_data['treat'] = 1 * (self.ent_data['year'] >= 2010)
self.ent_data = pd.merge(self.ent_data, diffed.reset_index())
def data_out(self):
""" Save dataset as .csv file """
self.data.to_csv(join(self.dpath, 'lawData.csv'), encoding='utf-8',
index=False)
self.ent_data.to_csv(join(self.dpath, 'entData.csv'), index=False)
def lsn_format(self):
""" Format Law School Numbers dataset. Must be run AFTER data_out """
print(" * Formatting Law School Numbers dataset.")
data = pd.read_csv(join(self.dpath, 'lawschoolnumbersSCHOOLS.csv'))
ranks = pd.read_csv(join(self.dpath, 'lawData.csv'))
lsn = LawSchoolNumbers(self.dpath, data, ranks)
lsn.gen()
lsn.save()
def n_applicants_format(dpath):
""" Format end-of-year ABA Volume summary for yearly number of
applicants
Source: http://www.lsac.org/lsacresources/data/lsac-volume-summary
"""
print(" * Formatting ABA year-end summary")
data = pd.read_excel(join(dpath, 'eoy-spreadsheet.xlsx'), index_col=None)
data = data.ix[1, :].reset_index()
data.columns = ['year', 'n_apps']
data['year'] = data['year'].str.replace(r'\D+', '').astype('int')
def sub_format(arg):
""" Sub-function to clean out unicode """
try:
return int(re.sub(',', '', re.sub(u'\xa0', '', arg)))
except TypeError:
return arg
data['n_apps'] = data['n_apps'].map(sub_format)
data.to_csv(join(dpath, 'abaeoy.csv'))
class EmploymentData(object):
""" Construct and merge transparency ratios. Note that each year in raw
data corresponds to the previous year's graduating class. Example: 2011
data was reported in 2011 about the 2010 graduating class associated with
the 2009 entering class. This is adjusted by one year with respect to the
"master data" set to correspond to students using last year's placement to
make decisions for the current year. This is equivalent to Ranking being
based on previous year's school attributes.
"""
def __init__(self, master_data, path):
self.max_year = 2014
self.master_data = master_data
self.data = self.data_import(path)
self.weights = self.weight_gen()
self.ratios = None
@staticmethod
def col_filter(col):
""" Filter column names from 2011 data """
col = re.sub('university', 'SchoolName', col)
col = re.sub('Emp Solo', 'Solo', col)
col = re.sub('Emp 501', '501-PLUS', col)
col = re.sub('Emp Type Unk', 'Unknown', col)
col = re.sub('Emp ', '', col)
col = re.sub('\+', '', col)
col = col.strip()
return col
def data_import(self, path):
""" Import employment datasets, format 2011 columns to math other
years, and export as dictionary
Parameters
----------
path: string, directory with data files
"""
data = {}
for year in xrange(2011, (self.max_year + 1)):
dpath = join(path, 'EmploymentSummary-' + str(year) + '.csv')
data[str(year)] = pd.read_csv(dpath)
data['2011'].columns = [
self.col_filter(col) for col in data['2011'].columns
]
return data
@staticmethod
def weight_gen():
""" Generate weights for different employment types. Weights taken
from expected salaries for new law graduates by firm size reported at
http://www.nalp.org/new_associate_sal_oct2011
Returns
-------
dictionary: types of employment with corresponding weights
"""
types = ['Solo', '2-10', '11-25', '26-50', '51-100', '101-250',
'251-500', '501-PLUS', 'Unknown']
salaries = np.array([73, 73, 73, 86, 91, 110, 130, 130, 0])
salaries = salaries / salaries[-2]
return dict(zip(types, salaries))
def id_extend(self):
""" Extend LSAC ids to all schools to facilitate merger with US News
dataset. Previously only associated with 2011-2012.
"""
idvars = ['SchoolName', 'id']
nameid = pd.concat([self.data['2011'][idvars],
self.data['2012'][idvars]])
nameid = nameid.drop_duplicates()
for name in nameid['SchoolName']:
for year in xrange(2013, (self.max_year + 1)):
self.data[str(year)].loc[
self.data[str(year)]['SchoolName'] == name, 'id'
] = nameid.loc[nameid['SchoolName'] == name, 'id'].tolist()[0]
def ratio_gen(self):
""" Generate transparency ratios """
ratios = []
for year in xrange(2011, (self.max_year + 1)):
data_weight = deepcopy(self.data[str(year)][self.weights.keys()])
denominator = data_weight.sum(1)
for column in data_weight.columns:
data_weight[column] = (data_weight[column] *
self.weights[column])
numerator = data_weight.sum(1)
ratio = numerator / denominator
ratio = pd.DataFrame({
'id': self.data[str(year)]['id'],
'year': year,
'ratio': ratio
})
ratios.append(ratio)
self.ratios = pd.concat(ratios, ignore_index=True)
def data_merge(self):
""" Merge ratios with master dataset and return """
self.ratios['year'] = self.ratios['year'] + 1
data_out = pd.merge(self.master_data, self.ratios, on=['id', 'year'],
how='outer')
return data_out
class LawSchoolNumbers(object):
""" Primary class. Keeps updated dataset as attribute """
def __init__(self, dpath, data, ranks):
self.dpath = dpath
self.data = data
self.ranks = ranks[['school', 'year', 'OverallRank', 'Tuition']]
initial_vars = ['year', 'user', 'state', 'LSAT', 'LSDAS_GPA',
'Degree_GPA']
self.school_names = np.array(self.data.columns)
self.school_names = self.get_school_names(initial_vars)
self.data = self.data.loc[
~(data[self.school_names] == 0).all(axis=1)
].reset_index(drop=True)
self.new_data = deepcopy(self.data[initial_vars])
def get_school_names(self, initial_vars):
""" Isolate school names from column names in data """
self.school_names = np.delete(self.school_names, 0)
original_indices = self.school_names.argsort()
remove = original_indices[
np.searchsorted(self.school_names[original_indices],
initial_vars)
]
return np.delete(self.school_names, remove)
def get_not_applied(self, year):
""" For a given year, return boolean mask of schools not applied to.
Returned mask is a pandas dataframe object.
"""
not_applied = (
self.data.loc[self.data.year == year, self.school_names] == 0
).reset_index(drop=True)
return not_applied
def get_not_admitted(self, year):
""" For a given year, return boolean mask of schools not admitting
applicant. Returned mask is a pandas dataframe object.
"""
not_admitted = (
self.data.loc[self.data.year == year, self.school_names] < 4
).reset_index(drop=True)
return not_admitted
def get_not_matric(self, year):
""" For a given year, return boolean mask of schools not matriculating
applicant. Returned mask is a pandas dataframe object.
"""
not_matric = (
self.data.loc[self.data.year == year, self.school_names] != 5
).reset_index(drop=True)
return not_matric
def max_applied(self, rank, data_schools, year):
""" Select maximum (worst) rank of applied-to schools """
data_applied = deepcopy(data_schools)
not_applied = self.get_not_applied(year)
data_applied[not_applied] = 0
worst_schools = np.array(data_applied.idxmax(1))
self.new_data.loc[self.new_data['year'] == year, 'app_worst'] = \
np.array(
rank.loc[worst_schools, 'OverallRank']
)
def min_applied(self, rank, data_schools, year):
""" Select minimum (best) rank of applied-to schools"""
data_applied = deepcopy(data_schools)
not_applied = self.get_not_applied(year)
data_applied[not_applied] = 500
best_schools = np.array(data_applied.idxmin(1))
self.new_data.loc[self.new_data['year'] == year, 'app_best'] = \
np.array(
rank.loc[best_schools, 'OverallRank']
)
def max_admitted(self, rank, data_schools, year):
""" Select maximum (worst) rank of admitted-to schools """
data_admitted = deepcopy(data_schools)
not_admitted = self.get_not_admitted(year)
data_admitted[not_admitted] = np.nan
worst_schools = np.array(data_admitted.idxmax(1))
was_admitted = True - | pd.isnull(worst_schools) | pandas.isnull |
# -*- coding: utf-8 -*-
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import pandas.compat as compat
###############################################################
# Index / Series common tests which may trigger dtype coercions
###############################################################
class CoercionBase(object):
klasses = ['index', 'series']
dtypes = ['object', 'int64', 'float64', 'complex128', 'bool',
'datetime64', 'datetime64tz', 'timedelta64', 'period']
@property
def method(self):
raise NotImplementedError(self)
def _assert(self, left, right, dtype):
# explicitly check dtype to avoid any unexpected result
if isinstance(left, pd.Series):
tm.assert_series_equal(left, right)
elif isinstance(left, pd.Index):
tm.assert_index_equal(left, right)
else:
raise NotImplementedError
self.assertEqual(left.dtype, dtype)
self.assertEqual(right.dtype, dtype)
def test_has_comprehensive_tests(self):
for klass in self.klasses:
for dtype in self.dtypes:
method_name = 'test_{0}_{1}_{2}'.format(self.method,
klass, dtype)
if not hasattr(self, method_name):
msg = 'test method is not defined: {0}, {1}'
raise AssertionError(msg.format(type(self), method_name))
class TestSetitemCoercion(CoercionBase, tm.TestCase):
method = 'setitem'
def _assert_setitem_series_conversion(self, original_series, loc_value,
expected_series, expected_dtype):
""" test series value's coercion triggered by assignment """
temp = original_series.copy()
temp[1] = loc_value
tm.assert_series_equal(temp, expected_series)
# check dtype explicitly for sure
self.assertEqual(temp.dtype, expected_dtype)
# .loc works different rule, temporary disable
# temp = original_series.copy()
# temp.loc[1] = loc_value
# tm.assert_series_equal(temp, expected_series)
def test_setitem_series_object(self):
obj = pd.Series(list('abcd'))
self.assertEqual(obj.dtype, np.object)
# object + int -> object
exp = pd.Series(['a', 1, 'c', 'd'])
self._assert_setitem_series_conversion(obj, 1, exp, np.object)
# object + float -> object
exp = pd.Series(['a', 1.1, 'c', 'd'])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.object)
# object + complex -> object
exp = pd.Series(['a', 1 + 1j, 'c', 'd'])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.object)
# object + bool -> object
exp = pd.Series(['a', True, 'c', 'd'])
self._assert_setitem_series_conversion(obj, True, exp, np.object)
def test_setitem_series_int64(self):
obj = pd.Series([1, 2, 3, 4])
self.assertEqual(obj.dtype, np.int64)
# int + int -> int
exp = pd.Series([1, 1, 3, 4])
self._assert_setitem_series_conversion(obj, 1, exp, np.int64)
# int + float -> float
# TODO_GH12747 The result must be float
# tm.assert_series_equal(temp, pd.Series([1, 1.1, 3, 4]))
# self.assertEqual(temp.dtype, np.float64)
exp = pd.Series([1, 1, 3, 4])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.int64)
# int + complex -> complex
exp = pd.Series([1, 1 + 1j, 3, 4])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.complex128)
# int + bool -> int
exp = pd.Series([1, 1, 3, 4])
self._assert_setitem_series_conversion(obj, True, exp, np.int64)
def test_setitem_series_float64(self):
obj = pd.Series([1.1, 2.2, 3.3, 4.4])
self.assertEqual(obj.dtype, np.float64)
# float + int -> float
exp = pd.Series([1.1, 1.0, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, 1, exp, np.float64)
# float + float -> float
exp = pd.Series([1.1, 1.1, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.float64)
# float + complex -> complex
exp = pd.Series([1.1, 1 + 1j, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp,
np.complex128)
# float + bool -> float
exp = pd.Series([1.1, 1.0, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, True, exp, np.float64)
def test_setitem_series_complex128(self):
obj = pd.Series([1 + 1j, 2 + 2j, 3 + 3j, 4 + 4j])
self.assertEqual(obj.dtype, np.complex128)
# complex + int -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, True, exp, np.complex128)
# complex + float -> complex
exp = pd.Series([1 + 1j, 1.1, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.complex128)
# complex + complex -> complex
exp = pd.Series([1 + 1j, 1 + 1j, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.complex128)
# complex + bool -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, True, exp, np.complex128)
def test_setitem_series_bool(self):
obj = pd.Series([True, False, True, False])
self.assertEqual(obj.dtype, np.bool)
# bool + int -> int
# TODO_GH12747 The result must be int
# tm.assert_series_equal(temp, pd.Series([1, 1, 1, 0]))
# self.assertEqual(temp.dtype, np.int64)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 1, exp, np.bool)
# TODO_GH12747 The result must be int
# assigning int greater than bool
# tm.assert_series_equal(temp, pd.Series([1, 3, 1, 0]))
# self.assertEqual(temp.dtype, np.int64)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 3, exp, np.bool)
# bool + float -> float
# TODO_GH12747 The result must be float
# tm.assert_series_equal(temp, pd.Series([1., 1.1, 1., 0.]))
# self.assertEqual(temp.dtype, np.float64)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.bool)
# bool + complex -> complex (buggy, results in bool)
# TODO_GH12747 The result must be complex
# tm.assert_series_equal(temp, pd.Series([1, 1 + 1j, 1, 0]))
# self.assertEqual(temp.dtype, np.complex128)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.bool)
# bool + bool -> bool
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, True, exp, np.bool)
def test_setitem_series_datetime64(self):
obj = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self.assertEqual(obj.dtype, 'datetime64[ns]')
# datetime64 + datetime64 -> datetime64
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-01'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self._assert_setitem_series_conversion(obj, pd.Timestamp('2012-01-01'),
exp, 'datetime64[ns]')
# datetime64 + int -> object
# ToDo: The result must be object
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp(1),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self._assert_setitem_series_conversion(obj, 1, exp, 'datetime64[ns]')
# ToDo: add more tests once the above issue has been fixed
def test_setitem_series_datetime64tz(self):
tz = 'US/Eastern'
obj = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2011-01-02', tz=tz),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
self.assertEqual(obj.dtype, 'datetime64[ns, US/Eastern]')
# datetime64tz + datetime64tz -> datetime64tz
exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
value = pd.Timestamp('2012-01-01', tz=tz)
self._assert_setitem_series_conversion(obj, value, exp,
'datetime64[ns, US/Eastern]')
# datetime64 + int -> object
# ToDo: The result must be object
exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp(1, tz=tz),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
self._assert_setitem_series_conversion(obj, 1, exp,
'datetime64[ns, US/Eastern]')
# ToDo: add more tests once the above issue has been fixed
def test_setitem_series_timedelta64(self):
pass
def test_setitem_series_period(self):
pass
def _assert_setitem_index_conversion(self, original_series, loc_key,
expected_index, expected_dtype):
""" test index's coercion triggered by assign key """
temp = original_series.copy()
temp[loc_key] = 5
exp = pd.Series([1, 2, 3, 4, 5], index=expected_index)
tm.assert_series_equal(temp, exp)
# check dtype explicitly for sure
self.assertEqual(temp.index.dtype, expected_dtype)
temp = original_series.copy()
temp.loc[loc_key] = 5
exp = pd.Series([1, 2, 3, 4, 5], index=expected_index)
tm.assert_series_equal(temp, exp)
# check dtype explicitly for sure
self.assertEqual(temp.index.dtype, expected_dtype)
def test_setitem_index_object(self):
obj = pd.Series([1, 2, 3, 4], index=list('abcd'))
self.assertEqual(obj.index.dtype, np.object)
# object + object -> object
exp_index = pd.Index(list('abcdx'))
self._assert_setitem_index_conversion(obj, 'x', exp_index, np.object)
# object + int -> IndexError, regarded as location
temp = obj.copy()
with tm.assertRaises(IndexError):
temp[5] = 5
# object + float -> object
exp_index = pd.Index(['a', 'b', 'c', 'd', 1.1])
self._assert_setitem_index_conversion(obj, 1.1, exp_index, np.object)
def test_setitem_index_int64(self):
# tests setitem with non-existing numeric key
obj = pd.Series([1, 2, 3, 4])
self.assertEqual(obj.index.dtype, np.int64)
# int + int -> int
exp_index = pd.Index([0, 1, 2, 3, 5])
self._assert_setitem_index_conversion(obj, 5, exp_index, np.int64)
# int + float -> float
exp_index = pd.Index([0, 1, 2, 3, 1.1])
self._assert_setitem_index_conversion(obj, 1.1, exp_index, np.float64)
# int + object -> object
exp_index = pd.Index([0, 1, 2, 3, 'x'])
self._assert_setitem_index_conversion(obj, 'x', exp_index, np.object)
def test_setitem_index_float64(self):
# tests setitem with non-existing numeric key
obj = pd.Series([1, 2, 3, 4], index=[1.1, 2.1, 3.1, 4.1])
self.assertEqual(obj.index.dtype, np.float64)
# float + int -> int
temp = obj.copy()
# TODO_GH12747 The result must be float
with tm.assertRaises(IndexError):
temp[5] = 5
# float + float -> float
exp_index = pd.Index([1.1, 2.1, 3.1, 4.1, 5.1])
self._assert_setitem_index_conversion(obj, 5.1, exp_index, np.float64)
# float + object -> object
exp_index = pd.Index([1.1, 2.1, 3.1, 4.1, 'x'])
self._assert_setitem_index_conversion(obj, 'x', exp_index, np.object)
def test_setitem_index_complex128(self):
pass
def test_setitem_index_bool(self):
pass
def test_setitem_index_datetime64(self):
pass
def test_setitem_index_datetime64tz(self):
pass
def test_setitem_index_timedelta64(self):
pass
def test_setitem_index_period(self):
pass
class TestInsertIndexCoercion(CoercionBase, tm.TestCase):
klasses = ['index']
method = 'insert'
def _assert_insert_conversion(self, original, value,
expected, expected_dtype):
""" test coercion triggered by insert """
target = original.copy()
res = target.insert(1, value)
tm.assert_index_equal(res, expected)
self.assertEqual(res.dtype, expected_dtype)
def test_insert_index_object(self):
obj = pd.Index(list('abcd'))
self.assertEqual(obj.dtype, np.object)
# object + int -> object
exp = pd.Index(['a', 1, 'b', 'c', 'd'])
self._assert_insert_conversion(obj, 1, exp, np.object)
# object + float -> object
exp = pd.Index(['a', 1.1, 'b', 'c', 'd'])
self._assert_insert_conversion(obj, 1.1, exp, np.object)
# object + bool -> object
res = obj.insert(1, False)
tm.assert_index_equal(res, pd.Index(['a', False, 'b', 'c', 'd']))
self.assertEqual(res.dtype, np.object)
# object + object -> object
exp = pd.Index(['a', 'x', 'b', 'c', 'd'])
self._assert_insert_conversion(obj, 'x', exp, np.object)
def test_insert_index_int64(self):
obj = pd.Int64Index([1, 2, 3, 4])
self.assertEqual(obj.dtype, np.int64)
# int + int -> int
exp = pd.Index([1, 1, 2, 3, 4])
self._assert_insert_conversion(obj, 1, exp, np.int64)
# int + float -> float
exp = pd.Index([1, 1.1, 2, 3, 4])
self._assert_insert_conversion(obj, 1.1, exp, np.float64)
# int + bool -> int
exp = pd.Index([1, 0, 2, 3, 4])
self._assert_insert_conversion(obj, False, exp, np.int64)
# int + object -> object
exp = pd.Index([1, 'x', 2, 3, 4])
self._assert_insert_conversion(obj, 'x', exp, np.object)
def test_insert_index_float64(self):
obj = pd.Float64Index([1., 2., 3., 4.])
self.assertEqual(obj.dtype, np.float64)
# float + int -> int
exp = | pd.Index([1., 1., 2., 3., 4.]) | pandas.Index |
import numpy as np
import py2neo
import pandas as pd
import networkx as nx
from scipy import sparse
DATA_DIR = "data/mag"
def get_db():
username = "neo4j"
password = "<PASSWORD>"
uri = "http://localhost:7474"
graph = py2neo.Graph(uri=uri, user=username, password=password)
return graph
def construct_adjacency_matrix(nodes, edges):
# Compute the mapping from node ids to id
max_node_id = np.max(nodes)
node2id = -np.ones(max_node_id + 1)
node2id[nodes] = np.arange(nodes.size)
max_edge_id = np.max(edges[:, :2])
# Remove edges that do not exist in nodes list
edges = edges[(np.max(edges[:, :2], axis=1) <= max_node_id), :]
edges[:, :2] = node2id[edges[:, :2].reshape(-1)].reshape((edges.shape[0], 2))
# Remove edges that do not exist in nodes list again
edges = edges[(np.min(edges[:, :2], axis=1) >= 0), :]
# Adjacency matrix
N = len(nodes)
A = sparse.csr_matrix((edges[:, 2], (edges[:, 0], edges[:, 1])), shape=(N, N))
return A
def load_network(years, net_data_dir=None):
if hasattr(years, "__len__") == False:
years = [years]
if net_data_dir is None:
net_data_dir = "%s/networks/" % DATA_DIR
# Load the node and edge files
df_nodes = []
df_edges = []
df_raw_edges = []
for year in years:
node_file = "{root}/nodes-{year}.csv".format(
root=net_data_dir, year=year
)
edge_file = "{root}/edges-{year}.csv".format(
root=net_data_dir, year=year
)
raw_edge_file = "{root}/raw-edges-{year}.csv".format(
root=net_data_dir, year=year
)
_df_nodes = pd.read_csv(node_file, sep="\t")
_df_edges = pd.read_csv(edge_file, sep="\t")
_df_raw_edges = pd.read_csv(raw_edge_file, sep="\t")
df_nodes += [_df_nodes]
df_edges += [_df_edges]
df_raw_edges += [_df_raw_edges]
df_nodes = pd.concat(df_nodes, ignore_index=True)
df_edges = pd.concat(df_edges, ignore_index=True)
df_raw_edges = pd.concat(df_raw_edges, ignore_index=True)
# Nodes
nodes = np.unique(df_edges[["source", "target"]].values.reshape(-1))
# Edges
edges = df_edges[["source", "target", "w"]].values
raw_edges = df_raw_edges[["source", "target", "w"]].values
# Construct networks
A = construct_adjacency_matrix(nodes, edges)
Araw = construct_adjacency_matrix(nodes, raw_edges)
return A, Araw, nodes
def neo4jid2mag_journalid(neo4jids):
graph = get_db()
query = """
match (j:Journal)
where ID(j) in [{neo4jids}]
return ID(j) as neo4jid, j.JournalId as journal_id
""".format(neo4jids = ",".join(["%d" % x for x in neo4jids]))
df = graph.run(query).to_data_frame()
return df.set_index("neo4jid").loc[neo4jids,"journal_id"].values
def to_networkx_graph(A, nodes, create_using=nx.DiGraph):
"""
Parameters
----------
A : scipy sparse matrix
Adjacency matrix
nodes : list or numpy array
List of node names in order of nodes in A
Returns
------
G : networkx.Graph
"""
G = nx.from_scipy_sparse_matrix(A, create_using=create_using)
return nx.relabel_nodes(G, dict(zip(np.arange(nodes.size), nodes)))
def load_detected_cartels(years, cartel_dir):
cartel_table_list = []
group_id_offset = 0
for year in years:
cartel_table = pd.read_csv(
"{root}/cartels-{year}.csv".format(root=cartel_dir, year=year), sep="\t"
)
cartel_table["year"] = year
#cartel_table["group_id"] += group_id_offset
cartel_table["gross_group_id"] = cartel_table["group_id"]
cartel_table["gross_group_id"] += group_id_offset
group_id_offset = np.max(cartel_table["gross_group_id"].values) + 1
cartel_table_list += [cartel_table]
cartel_table = | pd.concat(cartel_table_list, ignore_index=True) | pandas.concat |
import numpy as np
import pandas as pd
import pytest
from src.policies.single_policy_functions import (
_identify_who_attends_because_of_a_b_schooling,
)
from src.policies.single_policy_functions import mixed_educ_policy
@pytest.fixture
def fake_states():
states = pd.DataFrame(index=np.arange(10))
states["state"] = ["Bayern", "Berlin"] * 5
# date at which schools are open in Berlin but closed in Bavaria
# date with uneven week number, i.e. where group a attends school
states["date"] = pd.Timestamp("2020-04-23")
states["educ_a_b_identifier"] = [False, True] * 5
states["occupation"] = pd.Categorical(
["school"] * 8 + ["preschool_teacher", "school_teacher"]
)
states["school_group_id_0"] = [-1] + [22] * 9
states["educ_worker"] = [False] * 8 + [True] * 2
states["age"] = np.arange(10)
return states
@pytest.fixture
def contacts(fake_states):
return pd.Series(True, index=fake_states.index)
def test_a_b_school_system_above_age_0(fake_states, contacts):
calculated = mixed_educ_policy(
states=fake_states,
contacts=contacts,
seed=123,
group_id_column="school_group_id_0",
a_b_query="occupation == 'school'",
non_a_b_attend=True,
hygiene_multiplier=1.0,
always_attend_query="state == 'Niedersachsen'", # no one
params=None,
)
expected = pd.Series([False, True] * 4 + [True, True])
pd.testing.assert_series_equal(calculated, expected)
def test_a_b_school_system_above_age_5(fake_states, contacts):
calculated = mixed_educ_policy(
states=fake_states,
contacts=contacts,
seed=123,
group_id_column="school_group_id_0",
a_b_query="occupation == 'school' & age > 5",
non_a_b_attend=True,
hygiene_multiplier=1.0,
always_attend_query="state == 'Niedersachsen'", # no one
params=None,
)
expected = pd.Series([True] * 6 + [False] + [True] * 3)
pd.testing.assert_series_equal(calculated, expected)
def test_a_b_school_system_below_age_5(fake_states, contacts):
calculated = mixed_educ_policy(
states=fake_states,
contacts=contacts,
seed=123,
group_id_column="school_group_id_0",
a_b_query="occupation == 'school' & age < 5",
non_a_b_attend=False,
hygiene_multiplier=1.0,
always_attend_query="state == 'Niedersachsen'", # no one
params=None,
)
expected = pd.Series(
[False, True, False, True, False, False, False, False, True, True]
)
pd.testing.assert_series_equal(calculated, expected)
def test_mixed_educ_policy_others_home_no_hygiene():
states = pd.DataFrame()
states["county"] = [1, 1, 2, 2, 2, 2, 2, 2]
states["educ_worker"] = [True, False, True, False, False, False, False, False]
states["school_group_id_0"] = [11, 11, 22, 22, 22, 22, 22, -1]
states["educ_a_b_identifier"] = [False, True, True, True, False, True, False, True]
states["date"] = pd.Timestamp("2021-01-04") # week 1
contacts = pd.Series([True] * 6 + [False] * 2, index=states.index)
seed = 333
res = mixed_educ_policy(
states=states,
contacts=contacts,
seed=seed,
group_id_column="school_group_id_0",
a_b_query="county == 2",
non_a_b_attend=False,
hygiene_multiplier=1.0,
always_attend_query="county == 55", # no one
params=None,
)
# zero class, closed county, teacher despite wrong week,
# wrong week, right week, wrong week, right week but not attending, not in school
expected = pd.Series([True, False, True, True, False, True, False, False])
pd.testing.assert_series_equal(res, expected)
def test_mixed_educ_policy_no_contacts():
states = pd.DataFrame()
states["educ_worker"] = [True, False, True, False, False, False, False]
states["educ_a_b_identifier"] = [False, True, False, True, False, True, False]
states["school_group_id_0"] = [11, 11, 22, 22, 22, 22, -1]
states["date"] = pd.Timestamp("2021-01-04") # week 1
states["county"] = 33
contacts = pd.Series(False, index=states.index)
seed = 333
res = mixed_educ_policy(
states=states,
contacts=contacts,
seed=seed,
group_id_column="school_group_id_0",
a_b_query=True,
non_a_b_attend=True,
hygiene_multiplier=1.0,
always_attend_query="county == 55", # no one
params=None,
)
pd.testing.assert_series_equal(res, contacts)
def test_identify_who_attends_because_of_a_b_schooling():
states = pd.DataFrame()
states["county"] = [1, 1, 2, 2, 2]
states["educ_a_b_identifier"] = [False, True, False, True, False]
states["date"] = pd.Timestamp("2021-01-04") # week number 1
# wrong county, wrong county, wrong week, right week, wrong week
expected = pd.Series([False, False, False, True, False])
a_b_query = "county == 2"
res = _identify_who_attends_because_of_a_b_schooling(
states,
a_b_query=a_b_query,
a_b_rhythm="weekly",
)
pd.testing.assert_series_equal(res, expected)
def test_identify_who_attends_because_of_a_b_schooling_daily():
states = pd.DataFrame()
states["educ_a_b_identifier"] = [False, True, False, True, False]
states["county"] = 2
states["date"] = pd.Timestamp("2021-01-05")
expected = states["educ_a_b_identifier"].astype(bool)
a_b_query = "county == 2"
res = _identify_who_attends_because_of_a_b_schooling(
states=states,
a_b_query=a_b_query,
a_b_rhythm="daily",
)
pd.testing.assert_series_equal(res, expected, check_names=False)
def test_idenfy_who_attends_because_of_a_b_schooling_daily2():
states = pd.DataFrame()
states["educ_worker"] = [False, False, True, False, False]
states["county"] = 2
states["educ_a_b_identifier"] = [False, True, False, True, False]
states["date"] = pd.Timestamp("2021-01-12")
expected = ~states["educ_a_b_identifier"].astype(bool)
a_b_query = "county == 2"
res = _identify_who_attends_because_of_a_b_schooling(
states=states,
a_b_query=a_b_query,
a_b_rhythm="daily",
)
pd.testing.assert_series_equal(res, expected, check_names=False)
def test_emergency_care():
states = pd.DataFrame()
states["educ_worker"] = [True, False, True, False, False]
states["always_attend"] = [False, False, True, True, False]
states["school_group_id_0"] = [1, 1, 2, 2, -1]
contacts = pd.Series([True, True, True, True, False], index=states.index)
res = mixed_educ_policy(
contacts=contacts,
states=states,
seed=333,
group_id_column="school_group_id_0",
always_attend_query="always_attend",
a_b_query=False,
non_a_b_attend=False,
hygiene_multiplier=1.0,
params=None,
)
# educ_worker, child not in emergency care, educ_worker with class,
# attends, outside educ system
expected = pd.Series([True, False, True, True, False])
| pd.testing.assert_series_equal(res, expected) | pandas.testing.assert_series_equal |
import luigi
import os
import pandas as pd
from db import extract
from db import sql
from forecast import util
import shutil
import luigi.contrib.hadoop
from sqlalchemy import create_engine
from pysandag.database import get_connection_string
from pysandag import database
from db import log
class EmpPopulation(luigi.Task):
econ_id = luigi.Parameter()
dem_id = luigi.Parameter()
def requires(self):
return None
def output(self):
return luigi.LocalTarget('temp/data.h5')
def run(self):
engine = create_engine(get_connection_string("model_config.yml", 'output_database'))
db_connection_string = database.get_connection_string('model_config.yml', 'in_db')
sql_in_engine = create_engine(db_connection_string)
in_query = getattr(sql, 'max_run_id')
db_run_id = pd.read_sql(in_query, engine, index_col=None)
# db_run_id = log.new_run(name='emp_run_log', run_id=db_run_id['max'].iloc[0])
run_id = pd.Series([db_run_id['id'].iloc[0]])
run_id.to_hdf('temp/data.h5', 'run_id', mode='a')
tables = util.yaml_to_dict('model_config.yml', 'db_tables')
dem_sim_rates = extract.create_df('dem_sim_rates', 'dem_sim_rates_table',
rate_id=self.dem_id, index=None)
dem_sim_rates.to_hdf('temp/data.h5', 'dem_sim_rates', mode='a')
econ_sim_rates = extract.create_df('econ_sim_rates', 'econ_sim_rates_table',
rate_id=self.econ_id, index=None)
econ_sim_rates.to_hdf('temp/data.h5', 'econ_sim_rates', mode='a')
in_query = getattr(sql, 'inc_pop') % (tables['inc_pop_table'], run_id[0])
in_query2 = getattr(sql, 'inc_mil_hh_pop') % (tables['population_table'], dem_sim_rates.base_population_id[0])
pop = pd.read_sql(in_query, engine, index_col=['age', 'race_ethn', 'sex', 'mildep'])
pop_mil = pd.read_sql(in_query2, sql_in_engine, index_col=['age', 'race_ethn', 'sex', 'mildep'])
pop = pop.join(pop_mil)
pop['persons'] = (pop['persons'] - pop['mil_mildep'])
pop = pop.reset_index(drop=False)
pop['age_cat'] = ''
pop.loc[pop['age'].isin(list(range(0, 5))), ['age_cat']] = '00_04'
pop.loc[pop['age'].isin(list(range(5, 10))), ['age_cat']] = '05_09'
pop.loc[pop['age'].isin(list(range(10, 15))), ['age_cat']] = '10_14'
pop.loc[pop['age'].isin(list(range(15, 18))), ['age_cat']] = '15_17'
pop.loc[pop['age'].isin(list(range(18, 20))), ['age_cat']] = '18_19'
pop.loc[pop['age'].isin(list(range(20, 21))), ['age_cat']] = '20_20'
pop.loc[pop['age'].isin(list(range(21, 22))), ['age_cat']] = '21_21'
pop.loc[pop['age'].isin(list(range(22, 25))), ['age_cat']] = '22_24'
pop.loc[pop['age'].isin(list(range(25, 30))), ['age_cat']] = '25_29'
pop.loc[pop['age'].isin(list(range(30, 35))), ['age_cat']] = '30_34'
pop.loc[pop['age'].isin(list(range(35, 40))), ['age_cat']] = '35_39'
pop.loc[pop['age'].isin(list(range(40, 45))), ['age_cat']] = '40_44'
pop.loc[pop['age'].isin(list(range(45, 50))), ['age_cat']] = '45_49'
pop.loc[pop['age'].isin(list(range(50, 55))), ['age_cat']] = '50_54'
pop.loc[pop['age'].isin(list(range(55, 60))), ['age_cat']] = '55_59'
pop.loc[pop['age'].isin(list(range(60, 62))), ['age_cat']] = '60_61'
pop.loc[pop['age'].isin(list(range(62, 65))), ['age_cat']] = '62_64'
pop.loc[pop['age'].isin(list(range(65, 67))), ['age_cat']] = '65_66'
pop.loc[pop['age'].isin(list(range(67, 70))), ['age_cat']] = '67_69'
pop.loc[pop['age'].isin(list(range(70, 75))), ['age_cat']] = '70_74'
pop.loc[pop['age'].isin(list(range(75, 80))), ['age_cat']] = '75_79'
pop.loc[pop['age'].isin(list(range(80, 85))), ['age_cat']] = '80_84'
pop.loc[pop['age'].isin(list(range(85, 103))), ['age_cat']] = '85_99'
pop = pd.DataFrame(pop['persons'].groupby([pop['yr'], pop['age_cat'], pop['sex'], pop['race_ethn']]).sum())
pop.to_hdf('temp/data.h5', 'pop', mode='a')
class MilPopulation(luigi.Task):
econ_id = luigi.Parameter()
dem_id = luigi.Parameter()
def requires(self):
return EmpPopulation(econ_id=self.econ_id, dem_id=self.dem_id)
def output(self):
return luigi.LocalTarget('temp/data.h5')
def run(self):
engine = create_engine(get_connection_string("model_config.yml", 'output_database'))
db_connection_string = database.get_connection_string('model_config.yml', 'in_db')
sql_in_engine = create_engine(db_connection_string)
in_query = getattr(sql, 'max_run_id')
db_run_id = pd.read_sql(in_query, engine, index_col=None)
run_id = pd.Series([db_run_id['id'].iloc[0]])
run_id.to_hdf('temp/data.h5', 'run_id', mode='a')
tables = util.yaml_to_dict('model_config.yml', 'db_tables')
dem_sim_rates = pd.read_hdf('temp/data.h5', 'dem_sim_rates')
in_query = getattr(sql, 'inc_mil_gc_pop') % (tables['inc_pop_table'], run_id[0])
in_query2 = getattr(sql, 'inc_mil_hh_pop') % (tables['population_table'], dem_sim_rates.base_population_id[0])
pop = pd.read_sql(in_query, engine, index_col=['age', 'race_ethn', 'sex'])
pop_mil = pd.read_sql(in_query2, sql_in_engine, index_col=['age', 'race_ethn', 'sex'])
pop_mil = pop_mil.loc[pop_mil['mildep'] == 'Y']
pop = pop.join(pop_mil)
pop.rename(columns={'persons': 'mil_gc_pop'}, inplace=True)
pop.rename(columns={'mil_mildep': 'mil_hh_pop'}, inplace=True)
pop = pop.reset_index(drop=False)
pop = pd.DataFrame(pop[['mil_gc_pop', 'mil_hh_pop']].groupby([pop['yr']]).sum())
pop.to_hdf('temp/data.h5', 'mil_pop', mode='a')
class LaborForceParticipationRates(luigi.Task):
econ_id = luigi.Parameter()
dem_id = luigi.Parameter()
def requires(self):
return EmpPopulation(econ_id=self.econ_id, dem_id=self.dem_id)
def output(self):
return luigi.LocalTarget('temp/data.h5')
def run(self):
econ_sim_rates = pd.read_hdf('temp/data.h5', 'econ_sim_rates')
lfpr = extract.create_df('lfp_rates', 'lfp_rates_table', rate_id=econ_sim_rates.lfpr_id[0], index=['yr', 'age_cat', 'sex', 'race_ethn'])
lfpr.to_hdf('temp/data.h5', 'lfpr', mode='a')
class LaborForce(luigi.Task):
econ_id = luigi.Parameter()
dem_id = luigi.Parameter()
def requires(self):
return LaborForceParticipationRates(econ_id=self.econ_id, dem_id=self.dem_id)
def output(self):
return luigi.LocalTarget('temp/data.h5')
def run(self):
pop = pd.read_hdf('temp/data.h5', 'pop')
lfpr = pd.read_hdf('temp/data.h5', 'lfpr')
labor_force = pop.join(lfpr)
labor_force['labor_force'] = (labor_force['persons'] * labor_force['lfpr']).round()
labor_force = labor_force.iloc[~labor_force.index.get_level_values('age_cat').isin(['00_04', '05_09', '10_14'])]
labor_force.to_hdf('temp/data.h5', 'labor_force', mode='a')
class CohortUrRate(luigi.Task):
econ_id = luigi.Parameter()
dem_id = luigi.Parameter()
def requires(self):
return LaborForce(econ_id=self.econ_id, dem_id=self.dem_id)
def output(self):
return luigi.LocalTarget('temp/data.h5')
def run(self):
econ_sim_rates = pd.read_hdf('temp/data.h5', 'econ_sim_rates')
cohort_ur = extract.create_df('cohort_ur', 'cohort_ur_table', rate_id=econ_sim_rates.ur1_id[0], index=['yr', 'age_cat', 'sex', 'race_ethn'])
cohort_ur.to_hdf('temp/data.h5', 'cohort_ur', mode='a')
yearly_ur = extract.create_df('yearly_ur', 'yearly_ur_table', rate_id=econ_sim_rates.ur2_id[0], index=['yr'])
yearly_ur.to_hdf('temp/data.h5', 'yearly_ur', mode='a')
class WorkForce(luigi.Task):
econ_id = luigi.Parameter()
dem_id = luigi.Parameter()
def requires(self):
return CohortUrRate(econ_id=self.econ_id, dem_id=self.dem_id)
def output(self):
return luigi.LocalTarget('temp/data.h5')
def run(self):
labor_force = pd.read_hdf('temp/data.h5', 'labor_force')
cohort_ur = pd.read_hdf('temp/data.h5', 'cohort_ur')
yearly_ur = pd.read_hdf('temp/data.h5', 'yearly_ur')
work_force = labor_force.join(cohort_ur)
work_force['unemployed'] = (work_force['labor_force'] * work_force['ur2']).round()
computed_ur = work_force.reset_index(drop=False)
computed_ur = pd.DataFrame(computed_ur[['labor_force', 'unemployed']].groupby([computed_ur['yr']]).sum())
computed_ur['computed_ur'] = (computed_ur['unemployed'] / computed_ur['labor_force'])
computed_ur = computed_ur.join(yearly_ur)
computed_ur['adjustment'] = (computed_ur['ur1'] / computed_ur['computed_ur'])
work_force = work_force.join(computed_ur['adjustment'])
work_force['unemployed'] = (work_force['unemployed'] * work_force['adjustment']).round()
work_force['work_force'] = (work_force['labor_force'] - work_force['unemployed'])
work_force.to_hdf('temp/data.h5', 'work_force', mode='a')
# Code to check if after adjustment ur matches target
'''
computed_ur = work_force.reset_index(drop=False)
computed_ur = pd.DataFrame(computed_ur[['labor_force', 'unemployed']].groupby([computed_ur['yr']]).sum())
computed_ur['computed_ur'] = (computed_ur['unemployed'] / computed_ur['labor_force'])
print computed_ur
'''
class LocalWorkForce(luigi.Task):
econ_id = luigi.Parameter()
dem_id = luigi.Parameter()
def requires(self):
return WorkForce(econ_id=self.econ_id, dem_id=self.dem_id)
def output(self):
return luigi.LocalTarget('temp/data.h5')
def run(self):
econ_sim_rates = pd.read_hdf('temp/data.h5', 'econ_sim_rates')
out_commuting = extract.create_df('out_commuting', 'out_commuting_table', rate_id=econ_sim_rates.oc_id[0], index=['yr'])
work_force = pd.read_hdf('temp/data.h5', 'work_force')
work_force = work_force.reset_index(drop=False)
work_force = pd.DataFrame(work_force[['labor_force', 'unemployed', 'work_force']].groupby([work_force['yr']]).sum())
work_force = work_force.join(out_commuting)
work_force['work_force_outside'] = (work_force['work_force'] * work_force['wtlh_lh']).round()
work_force['work_force_local'] = (work_force['work_force'] - work_force['work_force_outside']).round()
work_force.to_hdf('temp/data.h5', 'work_force_local', mode='a')
class Jobs(luigi.Task):
econ_id = luigi.Parameter()
dem_id = luigi.Parameter()
def requires(self):
return LocalWorkForce(econ_id=self.econ_id, dem_id=self.dem_id)
def output(self):
return luigi.LocalTarget('temp/data.h5')
def run(self):
econ_sim_rates = pd.read_hdf('temp/data.h5', 'econ_sim_rates')
local_jobs = extract.create_df('local_jobs', 'local_jobs_table', rate_id=econ_sim_rates.lj_id[0], index=['yr'])
in_commuting = extract.create_df('in_commuting', 'in_commuting_table',rate_id=econ_sim_rates.ic_id[0], index=['yr'])
work_force_local = pd.read_hdf('temp/data.h5', 'work_force_local')
work_force_local = work_force_local.join(local_jobs)
work_force_local['jobs_local'] = (work_force_local['work_force_local'] * work_force_local['jlw']).round()
work_force_local = work_force_local.join(in_commuting)
work_force_local['jobs_total'] = (work_force_local['jobs_local'] * work_force_local['wh_whlh']).round()
work_force_local['jobs_external'] = (work_force_local['jobs_total'] - work_force_local['jobs_local']).round()
# pull information from here
work_force_local.to_hdf('temp/data.h5', 'jobs', mode='a')
class SectoralPay(luigi.Task):
econ_id = luigi.Parameter()
dem_id = luigi.Parameter()
def requires(self):
return Jobs(econ_id=self.econ_id, dem_id=self.dem_id)
def output(self):
return luigi.LocalTarget('temp/data.h5')
def run(self):
engine = create_engine(get_connection_string("model_config.yml", 'output_database'))
econ_sim_rates = pd.read_hdf('temp/data.h5', 'econ_sim_rates')
sectoral_share = extract.create_df('sectoral_share', 'sectoral_share_table', rate_id=econ_sim_rates.ss_id[0], index=['yr', 'sandag_sector'])
sectoral_pay = extract.create_df('sectoral_pay', 'sectoral_pay_table', rate_id=econ_sim_rates.sp_id[0], index=['yr', 'sandag_sector'])
jobs = pd.read_hdf('temp/data.h5', 'jobs')
jobs = jobs[['jobs_total']]
jobs = jobs.join(sectoral_share, how='right')
jobs['sector_jobs'] = (jobs['jobs_total'] * jobs['share']).round()
jobs = jobs.drop(['jobs_total'], 1)
jobs = jobs.join(sectoral_pay)
jobs['tot_ann_job_pay'] = (jobs['sector_jobs'] * jobs['annual_pay']).round()
jobs.to_hdf('temp/data.h5', 'sectoral', mode='a')
run_table = pd.read_hdf('temp/data.h5', 'run_id')
run_id = run_table[0]
jobs['run_id'] = run_id
jobs.to_sql(name='sectors', con=engine, schema='defm', if_exists='append', index=True)
class MilPay(luigi.Task):
econ_id = luigi.Parameter()
dem_id = luigi.Parameter()
def requires(self):
return MilPopulation(econ_id=self.econ_id, dem_id=self.dem_id)
def output(self):
return luigi.LocalTarget('temp/data.h5')
def run(self):
econ_sim_rates = | pd.read_hdf('temp/data.h5', 'econ_sim_rates') | pandas.read_hdf |
"""
Module contains tools for processing files into DataFrames or other objects
"""
from collections import abc, defaultdict
import csv
import datetime
from io import StringIO
import itertools
import re
import sys
from textwrap import fill
from typing import (
Any,
Dict,
Iterable,
Iterator,
List,
Optional,
Sequence,
Set,
Type,
cast,
)
import warnings
import numpy as np
import pandas._libs.lib as lib
import pandas._libs.ops as libops
import pandas._libs.parsers as parsers
from pandas._libs.parsers import STR_NA_VALUES
from pandas._libs.tslibs import parsing
from pandas._typing import FilePathOrBuffer, StorageOptions, Union
from pandas.errors import (
AbstractMethodError,
EmptyDataError,
ParserError,
ParserWarning,
)
from pandas.util._decorators import Appender
from pandas.core.dtypes.cast import astype_nansafe
from pandas.core.dtypes.common import (
ensure_object,
ensure_str,
is_bool_dtype,
is_categorical_dtype,
is_dict_like,
is_dtype_equal,
is_extension_array_dtype,
is_file_like,
is_float,
is_integer,
is_integer_dtype,
is_list_like,
is_object_dtype,
is_scalar,
is_string_dtype,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.dtypes.missing import isna
from pandas.core import algorithms, generic
from pandas.core.arrays import Categorical
from pandas.core.frame import DataFrame
from pandas.core.indexes.api import (
Index,
MultiIndex,
RangeIndex,
ensure_index_from_sequences,
)
from pandas.core.series import Series
from pandas.core.tools import datetimes as tools
from pandas.io.common import IOHandles, get_handle, validate_header_arg
from pandas.io.date_converters import generic_parser
# BOM character (byte order mark)
# This exists at the beginning of a file to indicate endianness
# of a file (stream). Unfortunately, this marker screws up parsing,
# so we need to remove it if we see it.
_BOM = "\ufeff"
_doc_read_csv_and_table = (
r"""
{summary}
Also supports optionally iterating or breaking of the file
into chunks.
Additional help can be found in the online docs for
`IO Tools <https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html>`_.
Parameters
----------
filepath_or_buffer : str, path object or file-like object
Any valid string path is acceptable. The string could be a URL. Valid
URL schemes include http, ftp, s3, gs, and file. For file URLs, a host is
expected. A local file could be: file://localhost/path/to/table.csv.
If you want to pass in a path object, pandas accepts any ``os.PathLike``.
By file-like object, we refer to objects with a ``read()`` method, such as
a file handle (e.g. via builtin ``open`` function) or ``StringIO``.
sep : str, default {_default_sep}
Delimiter to use. If sep is None, the C engine cannot automatically detect
the separator, but the Python parsing engine can, meaning the latter will
be used and automatically detect the separator by Python's builtin sniffer
tool, ``csv.Sniffer``. In addition, separators longer than 1 character and
different from ``'\s+'`` will be interpreted as regular expressions and
will also force the use of the Python parsing engine. Note that regex
delimiters are prone to ignoring quoted data. Regex example: ``'\r\t'``.
delimiter : str, default ``None``
Alias for sep.
header : int, list of int, default 'infer'
Row number(s) to use as the column names, and the start of the
data. Default behavior is to infer the column names: if no names
are passed the behavior is identical to ``header=0`` and column
names are inferred from the first line of the file, if column
names are passed explicitly then the behavior is identical to
``header=None``. Explicitly pass ``header=0`` to be able to
replace existing names. The header can be a list of integers that
specify row locations for a multi-index on the columns
e.g. [0,1,3]. Intervening rows that are not specified will be
skipped (e.g. 2 in this example is skipped). Note that this
parameter ignores commented lines and empty lines if
``skip_blank_lines=True``, so ``header=0`` denotes the first line of
data rather than the first line of the file.
names : array-like, optional
List of column names to use. If the file contains a header row,
then you should explicitly pass ``header=0`` to override the column names.
Duplicates in this list are not allowed.
index_col : int, str, sequence of int / str, or False, default ``None``
Column(s) to use as the row labels of the ``DataFrame``, either given as
string name or column index. If a sequence of int / str is given, a
MultiIndex is used.
Note: ``index_col=False`` can be used to force pandas to *not* use the first
column as the index, e.g. when you have a malformed file with delimiters at
the end of each line.
usecols : list-like or callable, optional
Return a subset of the columns. If list-like, all elements must either
be positional (i.e. integer indices into the document columns) or strings
that correspond to column names provided either by the user in `names` or
inferred from the document header row(s). For example, a valid list-like
`usecols` parameter would be ``[0, 1, 2]`` or ``['foo', 'bar', 'baz']``.
Element order is ignored, so ``usecols=[0, 1]`` is the same as ``[1, 0]``.
To instantiate a DataFrame from ``data`` with element order preserved use
``pd.read_csv(data, usecols=['foo', 'bar'])[['foo', 'bar']]`` for columns
in ``['foo', 'bar']`` order or
``pd.read_csv(data, usecols=['foo', 'bar'])[['bar', 'foo']]``
for ``['bar', 'foo']`` order.
If callable, the callable function will be evaluated against the column
names, returning names where the callable function evaluates to True. An
example of a valid callable argument would be ``lambda x: x.upper() in
['AAA', 'BBB', 'DDD']``. Using this parameter results in much faster
parsing time and lower memory usage.
squeeze : bool, default False
If the parsed data only contains one column then return a Series.
prefix : str, optional
Prefix to add to column numbers when no header, e.g. 'X' for X0, X1, ...
mangle_dupe_cols : bool, default True
Duplicate columns will be specified as 'X', 'X.1', ...'X.N', rather than
'X'...'X'. Passing in False will cause data to be overwritten if there
are duplicate names in the columns.
dtype : Type name or dict of column -> type, optional
Data type for data or columns. E.g. {{'a': np.float64, 'b': np.int32,
'c': 'Int64'}}
Use `str` or `object` together with suitable `na_values` settings
to preserve and not interpret dtype.
If converters are specified, they will be applied INSTEAD
of dtype conversion.
engine : {{'c', 'python'}}, optional
Parser engine to use. The C engine is faster while the python engine is
currently more feature-complete.
converters : dict, optional
Dict of functions for converting values in certain columns. Keys can either
be integers or column labels.
true_values : list, optional
Values to consider as True.
false_values : list, optional
Values to consider as False.
skipinitialspace : bool, default False
Skip spaces after delimiter.
skiprows : list-like, int or callable, optional
Line numbers to skip (0-indexed) or number of lines to skip (int)
at the start of the file.
If callable, the callable function will be evaluated against the row
indices, returning True if the row should be skipped and False otherwise.
An example of a valid callable argument would be ``lambda x: x in [0, 2]``.
skipfooter : int, default 0
Number of lines at bottom of file to skip (Unsupported with engine='c').
nrows : int, optional
Number of rows of file to read. Useful for reading pieces of large files.
na_values : scalar, str, list-like, or dict, optional
Additional strings to recognize as NA/NaN. If dict passed, specific
per-column NA values. By default the following values are interpreted as
NaN: '"""
+ fill("', '".join(sorted(STR_NA_VALUES)), 70, subsequent_indent=" ")
+ """'.
keep_default_na : bool, default True
Whether or not to include the default NaN values when parsing the data.
Depending on whether `na_values` is passed in, the behavior is as follows:
* If `keep_default_na` is True, and `na_values` are specified, `na_values`
is appended to the default NaN values used for parsing.
* If `keep_default_na` is True, and `na_values` are not specified, only
the default NaN values are used for parsing.
* If `keep_default_na` is False, and `na_values` are specified, only
the NaN values specified `na_values` are used for parsing.
* If `keep_default_na` is False, and `na_values` are not specified, no
strings will be parsed as NaN.
Note that if `na_filter` is passed in as False, the `keep_default_na` and
`na_values` parameters will be ignored.
na_filter : bool, default True
Detect missing value markers (empty strings and the value of na_values). In
data without any NAs, passing na_filter=False can improve the performance
of reading a large file.
verbose : bool, default False
Indicate number of NA values placed in non-numeric columns.
skip_blank_lines : bool, default True
If True, skip over blank lines rather than interpreting as NaN values.
parse_dates : bool or list of int or names or list of lists or dict, \
default False
The behavior is as follows:
* boolean. If True -> try parsing the index.
* list of int or names. e.g. If [1, 2, 3] -> try parsing columns 1, 2, 3
each as a separate date column.
* list of lists. e.g. If [[1, 3]] -> combine columns 1 and 3 and parse as
a single date column.
* dict, e.g. {{'foo' : [1, 3]}} -> parse columns 1, 3 as date and call
result 'foo'
If a column or index cannot be represented as an array of datetimes,
say because of an unparsable value or a mixture of timezones, the column
or index will be returned unaltered as an object data type. For
non-standard datetime parsing, use ``pd.to_datetime`` after
``pd.read_csv``. To parse an index or column with a mixture of timezones,
specify ``date_parser`` to be a partially-applied
:func:`pandas.to_datetime` with ``utc=True``. See
:ref:`io.csv.mixed_timezones` for more.
Note: A fast-path exists for iso8601-formatted dates.
infer_datetime_format : bool, default False
If True and `parse_dates` is enabled, pandas will attempt to infer the
format of the datetime strings in the columns, and if it can be inferred,
switch to a faster method of parsing them. In some cases this can increase
the parsing speed by 5-10x.
keep_date_col : bool, default False
If True and `parse_dates` specifies combining multiple columns then
keep the original columns.
date_parser : function, optional
Function to use for converting a sequence of string columns to an array of
datetime instances. The default uses ``dateutil.parser.parser`` to do the
conversion. Pandas will try to call `date_parser` in three different ways,
advancing to the next if an exception occurs: 1) Pass one or more arrays
(as defined by `parse_dates`) as arguments; 2) concatenate (row-wise) the
string values from the columns defined by `parse_dates` into a single array
and pass that; and 3) call `date_parser` once for each row using one or
more strings (corresponding to the columns defined by `parse_dates`) as
arguments.
dayfirst : bool, default False
DD/MM format dates, international and European format.
cache_dates : bool, default True
If True, use a cache of unique, converted dates to apply the datetime
conversion. May produce significant speed-up when parsing duplicate
date strings, especially ones with timezone offsets.
.. versionadded:: 0.25.0
iterator : bool, default False
Return TextFileReader object for iteration or getting chunks with
``get_chunk()``.
.. versionchanged:: 1.2
``TextFileReader`` is a context manager.
chunksize : int, optional
Return TextFileReader object for iteration.
See the `IO Tools docs
<https://pandas.pydata.org/pandas-docs/stable/io.html#io-chunking>`_
for more information on ``iterator`` and ``chunksize``.
.. versionchanged:: 1.2
``TextFileReader`` is a context manager.
compression : {{'infer', 'gzip', 'bz2', 'zip', 'xz', None}}, default 'infer'
For on-the-fly decompression of on-disk data. If 'infer' and
`filepath_or_buffer` is path-like, then detect compression from the
following extensions: '.gz', '.bz2', '.zip', or '.xz' (otherwise no
decompression). If using 'zip', the ZIP file must contain only one data
file to be read in. Set to None for no decompression.
thousands : str, optional
Thousands separator.
decimal : str, default '.'
Character to recognize as decimal point (e.g. use ',' for European data).
lineterminator : str (length 1), optional
Character to break file into lines. Only valid with C parser.
quotechar : str (length 1), optional
The character used to denote the start and end of a quoted item. Quoted
items can include the delimiter and it will be ignored.
quoting : int or csv.QUOTE_* instance, default 0
Control field quoting behavior per ``csv.QUOTE_*`` constants. Use one of
QUOTE_MINIMAL (0), QUOTE_ALL (1), QUOTE_NONNUMERIC (2) or QUOTE_NONE (3).
doublequote : bool, default ``True``
When quotechar is specified and quoting is not ``QUOTE_NONE``, indicate
whether or not to interpret two consecutive quotechar elements INSIDE a
field as a single ``quotechar`` element.
escapechar : str (length 1), optional
One-character string used to escape other characters.
comment : str, optional
Indicates remainder of line should not be parsed. If found at the beginning
of a line, the line will be ignored altogether. This parameter must be a
single character. Like empty lines (as long as ``skip_blank_lines=True``),
fully commented lines are ignored by the parameter `header` but not by
`skiprows`. For example, if ``comment='#'``, parsing
``#empty\\na,b,c\\n1,2,3`` with ``header=0`` will result in 'a,b,c' being
treated as the header.
encoding : str, optional
Encoding to use for UTF when reading/writing (ex. 'utf-8'). `List of Python
standard encodings
<https://docs.python.org/3/library/codecs.html#standard-encodings>`_ .
dialect : str or csv.Dialect, optional
If provided, this parameter will override values (default or not) for the
following parameters: `delimiter`, `doublequote`, `escapechar`,
`skipinitialspace`, `quotechar`, and `quoting`. If it is necessary to
override values, a ParserWarning will be issued. See csv.Dialect
documentation for more details.
error_bad_lines : bool, default True
Lines with too many fields (e.g. a csv line with too many commas) will by
default cause an exception to be raised, and no DataFrame will be returned.
If False, then these "bad lines" will dropped from the DataFrame that is
returned.
warn_bad_lines : bool, default True
If error_bad_lines is False, and warn_bad_lines is True, a warning for each
"bad line" will be output.
delim_whitespace : bool, default False
Specifies whether or not whitespace (e.g. ``' '`` or ``'\t'``) will be
used as the sep. Equivalent to setting ``sep='\\s+'``. If this option
is set to True, nothing should be passed in for the ``delimiter``
parameter.
low_memory : bool, default True
Internally process the file in chunks, resulting in lower memory use
while parsing, but possibly mixed type inference. To ensure no mixed
types either set False, or specify the type with the `dtype` parameter.
Note that the entire file is read into a single DataFrame regardless,
use the `chunksize` or `iterator` parameter to return the data in chunks.
(Only valid with C parser).
memory_map : bool, default False
If a filepath is provided for `filepath_or_buffer`, map the file object
directly onto memory and access the data directly from there. Using this
option can improve performance because there is no longer any I/O overhead.
float_precision : str, optional
Specifies which converter the C engine should use for floating-point
values. The options are ``None`` or 'high' for the ordinary converter,
'legacy' for the original lower precision pandas converter, and
'round_trip' for the round-trip converter.
.. versionchanged:: 1.2
{storage_options}
.. versionadded:: 1.2
Returns
-------
DataFrame or TextParser
A comma-separated values (csv) file is returned as two-dimensional
data structure with labeled axes.
See Also
--------
DataFrame.to_csv : Write DataFrame to a comma-separated values (csv) file.
read_csv : Read a comma-separated values (csv) file into DataFrame.
read_fwf : Read a table of fixed-width formatted lines into DataFrame.
Examples
--------
>>> pd.{func_name}('data.csv') # doctest: +SKIP
"""
)
def validate_integer(name, val, min_val=0):
"""
Checks whether the 'name' parameter for parsing is either
an integer OR float that can SAFELY be cast to an integer
without losing accuracy. Raises a ValueError if that is
not the case.
Parameters
----------
name : string
Parameter name (used for error reporting)
val : int or float
The value to check
min_val : int
Minimum allowed value (val < min_val will result in a ValueError)
"""
msg = f"'{name:s}' must be an integer >={min_val:d}"
if val is not None:
if is_float(val):
if int(val) != val:
raise ValueError(msg)
val = int(val)
elif not (is_integer(val) and val >= min_val):
raise ValueError(msg)
return val
def _validate_names(names):
"""
Raise ValueError if the `names` parameter contains duplicates or has an
invalid data type.
Parameters
----------
names : array-like or None
An array containing a list of the names used for the output DataFrame.
Raises
------
ValueError
If names are not unique or are not ordered (e.g. set).
"""
if names is not None:
if len(names) != len(set(names)):
raise ValueError("Duplicate names are not allowed.")
if not (
is_list_like(names, allow_sets=False) or isinstance(names, abc.KeysView)
):
raise ValueError("Names should be an ordered collection.")
def _read(filepath_or_buffer: FilePathOrBuffer, kwds):
"""Generic reader of line files."""
if kwds.get("date_parser", None) is not None:
if isinstance(kwds["parse_dates"], bool):
kwds["parse_dates"] = True
# Extract some of the arguments (pass chunksize on).
iterator = kwds.get("iterator", False)
chunksize = validate_integer("chunksize", kwds.get("chunksize", None), 1)
nrows = kwds.get("nrows", None)
# Check for duplicates in names.
_validate_names(kwds.get("names", None))
# Create the parser.
parser = TextFileReader(filepath_or_buffer, **kwds)
if chunksize or iterator:
return parser
with parser:
return parser.read(nrows)
_parser_defaults = {
"delimiter": None,
"escapechar": None,
"quotechar": '"',
"quoting": csv.QUOTE_MINIMAL,
"doublequote": True,
"skipinitialspace": False,
"lineterminator": None,
"header": "infer",
"index_col": None,
"names": None,
"prefix": None,
"skiprows": None,
"skipfooter": 0,
"nrows": None,
"na_values": None,
"keep_default_na": True,
"true_values": None,
"false_values": None,
"converters": None,
"dtype": None,
"cache_dates": True,
"thousands": None,
"comment": None,
"decimal": ".",
# 'engine': 'c',
"parse_dates": False,
"keep_date_col": False,
"dayfirst": False,
"date_parser": None,
"usecols": None,
# 'iterator': False,
"chunksize": None,
"verbose": False,
"encoding": None,
"squeeze": False,
"compression": None,
"mangle_dupe_cols": True,
"infer_datetime_format": False,
"skip_blank_lines": True,
}
_c_parser_defaults = {
"delim_whitespace": False,
"na_filter": True,
"low_memory": True,
"memory_map": False,
"error_bad_lines": True,
"warn_bad_lines": True,
"float_precision": None,
}
_fwf_defaults = {"colspecs": "infer", "infer_nrows": 100, "widths": None}
_c_unsupported = {"skipfooter"}
_python_unsupported = {"low_memory", "float_precision"}
_deprecated_defaults: Dict[str, Any] = {}
_deprecated_args: Set[str] = set()
@Appender(
_doc_read_csv_and_table.format(
func_name="read_csv",
summary="Read a comma-separated values (csv) file into DataFrame.",
_default_sep="','",
storage_options=generic._shared_docs["storage_options"],
)
)
def read_csv(
filepath_or_buffer: FilePathOrBuffer,
sep=lib.no_default,
delimiter=None,
# Column and Index Locations and Names
header="infer",
names=None,
index_col=None,
usecols=None,
squeeze=False,
prefix=None,
mangle_dupe_cols=True,
# General Parsing Configuration
dtype=None,
engine=None,
converters=None,
true_values=None,
false_values=None,
skipinitialspace=False,
skiprows=None,
skipfooter=0,
nrows=None,
# NA and Missing Data Handling
na_values=None,
keep_default_na=True,
na_filter=True,
verbose=False,
skip_blank_lines=True,
# Datetime Handling
parse_dates=False,
infer_datetime_format=False,
keep_date_col=False,
date_parser=None,
dayfirst=False,
cache_dates=True,
# Iteration
iterator=False,
chunksize=None,
# Quoting, Compression, and File Format
compression="infer",
thousands=None,
decimal: str = ".",
lineterminator=None,
quotechar='"',
quoting=csv.QUOTE_MINIMAL,
doublequote=True,
escapechar=None,
comment=None,
encoding=None,
dialect=None,
# Error Handling
error_bad_lines=True,
warn_bad_lines=True,
# Internal
delim_whitespace=False,
low_memory=_c_parser_defaults["low_memory"],
memory_map=False,
float_precision=None,
storage_options: StorageOptions = None,
):
kwds = locals()
del kwds["filepath_or_buffer"]
del kwds["sep"]
kwds_defaults = _refine_defaults_read(
dialect, delimiter, delim_whitespace, engine, sep, defaults={"delimiter": ","}
)
kwds.update(kwds_defaults)
return _read(filepath_or_buffer, kwds)
@Appender(
_doc_read_csv_and_table.format(
func_name="read_table",
summary="Read general delimited file into DataFrame.",
_default_sep=r"'\\t' (tab-stop)",
storage_options=generic._shared_docs["storage_options"],
)
)
def read_table(
filepath_or_buffer: FilePathOrBuffer,
sep=lib.no_default,
delimiter=None,
# Column and Index Locations and Names
header="infer",
names=None,
index_col=None,
usecols=None,
squeeze=False,
prefix=None,
mangle_dupe_cols=True,
# General Parsing Configuration
dtype=None,
engine=None,
converters=None,
true_values=None,
false_values=None,
skipinitialspace=False,
skiprows=None,
skipfooter=0,
nrows=None,
# NA and Missing Data Handling
na_values=None,
keep_default_na=True,
na_filter=True,
verbose=False,
skip_blank_lines=True,
# Datetime Handling
parse_dates=False,
infer_datetime_format=False,
keep_date_col=False,
date_parser=None,
dayfirst=False,
cache_dates=True,
# Iteration
iterator=False,
chunksize=None,
# Quoting, Compression, and File Format
compression="infer",
thousands=None,
decimal: str = ".",
lineterminator=None,
quotechar='"',
quoting=csv.QUOTE_MINIMAL,
doublequote=True,
escapechar=None,
comment=None,
encoding=None,
dialect=None,
# Error Handling
error_bad_lines=True,
warn_bad_lines=True,
# Internal
delim_whitespace=False,
low_memory=_c_parser_defaults["low_memory"],
memory_map=False,
float_precision=None,
):
kwds = locals()
del kwds["filepath_or_buffer"]
del kwds["sep"]
kwds_defaults = _refine_defaults_read(
dialect, delimiter, delim_whitespace, engine, sep, defaults={"delimiter": "\t"}
)
kwds.update(kwds_defaults)
return _read(filepath_or_buffer, kwds)
def read_fwf(
filepath_or_buffer: FilePathOrBuffer,
colspecs="infer",
widths=None,
infer_nrows=100,
**kwds,
):
r"""
Read a table of fixed-width formatted lines into DataFrame.
Also supports optionally iterating or breaking of the file
into chunks.
Additional help can be found in the `online docs for IO Tools
<https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html>`_.
Parameters
----------
filepath_or_buffer : str, path object or file-like object
Any valid string path is acceptable. The string could be a URL. Valid
URL schemes include http, ftp, s3, and file. For file URLs, a host is
expected. A local file could be:
``file://localhost/path/to/table.csv``.
If you want to pass in a path object, pandas accepts any
``os.PathLike``.
By file-like object, we refer to objects with a ``read()`` method,
such as a file handle (e.g. via builtin ``open`` function)
or ``StringIO``.
colspecs : list of tuple (int, int) or 'infer'. optional
A list of tuples giving the extents of the fixed-width
fields of each line as half-open intervals (i.e., [from, to[ ).
String value 'infer' can be used to instruct the parser to try
detecting the column specifications from the first 100 rows of
the data which are not being skipped via skiprows (default='infer').
widths : list of int, optional
A list of field widths which can be used instead of 'colspecs' if
the intervals are contiguous.
infer_nrows : int, default 100
The number of rows to consider when letting the parser determine the
`colspecs`.
.. versionadded:: 0.24.0
**kwds : optional
Optional keyword arguments can be passed to ``TextFileReader``.
Returns
-------
DataFrame or TextParser
A comma-separated values (csv) file is returned as two-dimensional
data structure with labeled axes.
See Also
--------
DataFrame.to_csv : Write DataFrame to a comma-separated values (csv) file.
read_csv : Read a comma-separated values (csv) file into DataFrame.
Examples
--------
>>> pd.read_fwf('data.csv') # doctest: +SKIP
"""
# Check input arguments.
if colspecs is None and widths is None:
raise ValueError("Must specify either colspecs or widths")
elif colspecs not in (None, "infer") and widths is not None:
raise ValueError("You must specify only one of 'widths' and 'colspecs'")
# Compute 'colspecs' from 'widths', if specified.
if widths is not None:
colspecs, col = [], 0
for w in widths:
colspecs.append((col, col + w))
col += w
kwds["colspecs"] = colspecs
kwds["infer_nrows"] = infer_nrows
kwds["engine"] = "python-fwf"
return _read(filepath_or_buffer, kwds)
class TextFileReader(abc.Iterator):
"""
Passed dialect overrides any of the related parser options
"""
def __init__(self, f, engine=None, **kwds):
self.f = f
if engine is not None:
engine_specified = True
else:
engine = "python"
engine_specified = False
self.engine = engine
self._engine_specified = kwds.get("engine_specified", engine_specified)
_validate_skipfooter(kwds)
dialect = _extract_dialect(kwds)
if dialect is not None:
kwds = _merge_with_dialect_properties(dialect, kwds)
if kwds.get("header", "infer") == "infer":
kwds["header"] = 0 if kwds.get("names") is None else None
self.orig_options = kwds
# miscellanea
self._currow = 0
options = self._get_options_with_defaults(engine)
options["storage_options"] = kwds.get("storage_options", None)
self.chunksize = options.pop("chunksize", None)
self.nrows = options.pop("nrows", None)
self.squeeze = options.pop("squeeze", False)
self._check_file_or_buffer(f, engine)
self.options, self.engine = self._clean_options(options, engine)
if "has_index_names" in kwds:
self.options["has_index_names"] = kwds["has_index_names"]
self._engine = self._make_engine(self.engine)
def close(self):
self._engine.close()
def _get_options_with_defaults(self, engine):
kwds = self.orig_options
options = {}
for argname, default in _parser_defaults.items():
value = kwds.get(argname, default)
# see gh-12935
if argname == "mangle_dupe_cols" and not value:
raise ValueError("Setting mangle_dupe_cols=False is not supported yet")
else:
options[argname] = value
for argname, default in _c_parser_defaults.items():
if argname in kwds:
value = kwds[argname]
if engine != "c" and value != default:
if "python" in engine and argname not in _python_unsupported:
pass
elif value == _deprecated_defaults.get(argname, default):
pass
else:
raise ValueError(
f"The {repr(argname)} option is not supported with the "
f"{repr(engine)} engine"
)
else:
value = _deprecated_defaults.get(argname, default)
options[argname] = value
if engine == "python-fwf":
# pandas\io\parsers.py:907: error: Incompatible types in assignment
# (expression has type "object", variable has type "Union[int, str,
# None]") [assignment]
for argname, default in _fwf_defaults.items(): # type: ignore[assignment]
options[argname] = kwds.get(argname, default)
return options
def _check_file_or_buffer(self, f, engine):
# see gh-16530
if is_file_like(f) and engine != "c" and not hasattr(f, "__next__"):
# The C engine doesn't need the file-like to have the "__next__"
# attribute. However, the Python engine explicitly calls
# "__next__(...)" when iterating through such an object, meaning it
# needs to have that attribute
raise ValueError(
"The 'python' engine cannot iterate through this file buffer."
)
def _clean_options(self, options, engine):
result = options.copy()
fallback_reason = None
# C engine not supported yet
if engine == "c":
if options["skipfooter"] > 0:
fallback_reason = "the 'c' engine does not support skipfooter"
engine = "python"
sep = options["delimiter"]
delim_whitespace = options["delim_whitespace"]
if sep is None and not delim_whitespace:
if engine == "c":
fallback_reason = (
"the 'c' engine does not support "
"sep=None with delim_whitespace=False"
)
engine = "python"
elif sep is not None and len(sep) > 1:
if engine == "c" and sep == r"\s+":
result["delim_whitespace"] = True
del result["delimiter"]
elif engine not in ("python", "python-fwf"):
# wait until regex engine integrated
fallback_reason = (
"the 'c' engine does not support "
"regex separators (separators > 1 char and "
r"different from '\s+' are interpreted as regex)"
)
engine = "python"
elif delim_whitespace:
if "python" in engine:
result["delimiter"] = r"\s+"
elif sep is not None:
encodeable = True
encoding = sys.getfilesystemencoding() or "utf-8"
try:
if len(sep.encode(encoding)) > 1:
encodeable = False
except UnicodeDecodeError:
encodeable = False
if not encodeable and engine not in ("python", "python-fwf"):
fallback_reason = (
f"the separator encoded in {encoding} "
"is > 1 char long, and the 'c' engine "
"does not support such separators"
)
engine = "python"
quotechar = options["quotechar"]
if quotechar is not None and isinstance(quotechar, (str, bytes)):
if (
len(quotechar) == 1
and ord(quotechar) > 127
and engine not in ("python", "python-fwf")
):
fallback_reason = (
"ord(quotechar) > 127, meaning the "
"quotechar is larger than one byte, "
"and the 'c' engine does not support such quotechars"
)
engine = "python"
if fallback_reason and self._engine_specified:
raise ValueError(fallback_reason)
if engine == "c":
for arg in _c_unsupported:
del result[arg]
if "python" in engine:
for arg in _python_unsupported:
if fallback_reason and result[arg] != _c_parser_defaults[arg]:
raise ValueError(
"Falling back to the 'python' engine because "
f"{fallback_reason}, but this causes {repr(arg)} to be "
"ignored as it is not supported by the 'python' engine."
)
del result[arg]
if fallback_reason:
warnings.warn(
(
"Falling back to the 'python' engine because "
f"{fallback_reason}; you can avoid this warning by specifying "
"engine='python'."
),
ParserWarning,
stacklevel=5,
)
index_col = options["index_col"]
names = options["names"]
converters = options["converters"]
na_values = options["na_values"]
skiprows = options["skiprows"]
validate_header_arg(options["header"])
for arg in _deprecated_args:
parser_default = _c_parser_defaults[arg]
depr_default = _deprecated_defaults[arg]
if result.get(arg, depr_default) != depr_default:
msg = (
f"The {arg} argument has been deprecated and will be "
"removed in a future version.\n\n"
)
warnings.warn(msg, FutureWarning, stacklevel=2)
else:
result[arg] = parser_default
if index_col is True:
raise ValueError("The value of index_col couldn't be 'True'")
if _is_index_col(index_col):
if not isinstance(index_col, (list, tuple, np.ndarray)):
index_col = [index_col]
result["index_col"] = index_col
names = list(names) if names is not None else names
# type conversion-related
if converters is not None:
if not isinstance(converters, dict):
raise TypeError(
"Type converters must be a dict or subclass, "
f"input was a {type(converters).__name__}"
)
else:
converters = {}
# Converting values to NA
keep_default_na = options["keep_default_na"]
na_values, na_fvalues = _clean_na_values(na_values, keep_default_na)
# handle skiprows; this is internally handled by the
# c-engine, so only need for python parsers
if engine != "c":
if is_integer(skiprows):
skiprows = list(range(skiprows))
if skiprows is None:
skiprows = set()
elif not callable(skiprows):
skiprows = set(skiprows)
# put stuff back
result["names"] = names
result["converters"] = converters
result["na_values"] = na_values
result["na_fvalues"] = na_fvalues
result["skiprows"] = skiprows
return result, engine
def __next__(self):
try:
return self.get_chunk()
except StopIteration:
self.close()
raise
def _make_engine(self, engine="c"):
mapping: Dict[str, Type[ParserBase]] = {
"c": CParserWrapper,
"python": PythonParser,
"python-fwf": FixedWidthFieldParser,
}
if engine not in mapping:
raise ValueError(
f"Unknown engine: {engine} (valid options are {mapping.keys()})"
)
# error: Too many arguments for "ParserBase"
return mapping[engine](self.f, **self.options) # type: ignore[call-arg]
def _failover_to_python(self):
raise AbstractMethodError(self)
def read(self, nrows=None):
nrows = validate_integer("nrows", nrows)
index, columns, col_dict = self._engine.read(nrows)
if index is None:
if col_dict:
# Any column is actually fine:
new_rows = len(next(iter(col_dict.values())))
index = RangeIndex(self._currow, self._currow + new_rows)
else:
new_rows = 0
else:
new_rows = len(index)
df = DataFrame(col_dict, columns=columns, index=index)
self._currow += new_rows
if self.squeeze and len(df.columns) == 1:
return df[df.columns[0]].copy()
return df
def get_chunk(self, size=None):
if size is None:
size = self.chunksize
if self.nrows is not None:
if self._currow >= self.nrows:
raise StopIteration
size = min(size, self.nrows - self._currow)
return self.read(nrows=size)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def _is_index_col(col):
return col is not None and col is not False
def _is_potential_multi_index(
columns, index_col: Optional[Union[bool, Sequence[int]]] = None
):
"""
Check whether or not the `columns` parameter
could be converted into a MultiIndex.
Parameters
----------
columns : array-like
Object which may or may not be convertible into a MultiIndex
index_col : None, bool or list, optional
Column or columns to use as the (possibly hierarchical) index
Returns
-------
boolean : Whether or not columns could become a MultiIndex
"""
if index_col is None or isinstance(index_col, bool):
index_col = []
return (
len(columns)
and not isinstance(columns, MultiIndex)
and all(isinstance(c, tuple) for c in columns if c not in list(index_col))
)
def _evaluate_usecols(usecols, names):
"""
Check whether or not the 'usecols' parameter
is a callable. If so, enumerates the 'names'
parameter and returns a set of indices for
each entry in 'names' that evaluates to True.
If not a callable, returns 'usecols'.
"""
if callable(usecols):
return {i for i, name in enumerate(names) if usecols(name)}
return usecols
def _validate_usecols_names(usecols, names):
"""
Validates that all usecols are present in a given
list of names. If not, raise a ValueError that
shows what usecols are missing.
Parameters
----------
usecols : iterable of usecols
The columns to validate are present in names.
names : iterable of names
The column names to check against.
Returns
-------
usecols : iterable of usecols
The `usecols` parameter if the validation succeeds.
Raises
------
ValueError : Columns were missing. Error message will list them.
"""
missing = [c for c in usecols if c not in names]
if len(missing) > 0:
raise ValueError(
f"Usecols do not match columns, columns expected but not found: {missing}"
)
return usecols
def _validate_skipfooter_arg(skipfooter):
"""
Validate the 'skipfooter' parameter.
Checks whether 'skipfooter' is a non-negative integer.
Raises a ValueError if that is not the case.
Parameters
----------
skipfooter : non-negative integer
The number of rows to skip at the end of the file.
Returns
-------
validated_skipfooter : non-negative integer
The original input if the validation succeeds.
Raises
------
ValueError : 'skipfooter' was not a non-negative integer.
"""
if not is_integer(skipfooter):
raise ValueError("skipfooter must be an integer")
if skipfooter < 0:
raise ValueError("skipfooter cannot be negative")
return skipfooter
def _validate_usecols_arg(usecols):
"""
Validate the 'usecols' parameter.
Checks whether or not the 'usecols' parameter contains all integers
(column selection by index), strings (column by name) or is a callable.
Raises a ValueError if that is not the case.
Parameters
----------
usecols : list-like, callable, or None
List of columns to use when parsing or a callable that can be used
to filter a list of table columns.
Returns
-------
usecols_tuple : tuple
A tuple of (verified_usecols, usecols_dtype).
'verified_usecols' is either a set if an array-like is passed in or
'usecols' if a callable or None is passed in.
'usecols_dtype` is the inferred dtype of 'usecols' if an array-like
is passed in or None if a callable or None is passed in.
"""
msg = (
"'usecols' must either be list-like of all strings, all unicode, "
"all integers or a callable."
)
if usecols is not None:
if callable(usecols):
return usecols, None
if not is_list_like(usecols):
# see gh-20529
#
# Ensure it is iterable container but not string.
raise ValueError(msg)
usecols_dtype = lib.infer_dtype(usecols, skipna=False)
if usecols_dtype not in ("empty", "integer", "string"):
raise ValueError(msg)
usecols = set(usecols)
return usecols, usecols_dtype
return usecols, None
def _validate_parse_dates_arg(parse_dates):
"""
Check whether or not the 'parse_dates' parameter
is a non-boolean scalar. Raises a ValueError if
that is the case.
"""
msg = (
"Only booleans, lists, and dictionaries are accepted "
"for the 'parse_dates' parameter"
)
if parse_dates is not None:
if is_scalar(parse_dates):
if not lib.is_bool(parse_dates):
raise TypeError(msg)
elif not isinstance(parse_dates, (list, dict)):
raise TypeError(msg)
return parse_dates
class ParserBase:
def __init__(self, kwds):
self.names = kwds.get("names")
self.orig_names: Optional[List] = None
self.prefix = kwds.pop("prefix", None)
self.index_col = kwds.get("index_col", None)
self.unnamed_cols: Set = set()
self.index_names: Optional[List] = None
self.col_names = None
self.parse_dates = _validate_parse_dates_arg(kwds.pop("parse_dates", False))
self.date_parser = kwds.pop("date_parser", None)
self.dayfirst = kwds.pop("dayfirst", False)
self.keep_date_col = kwds.pop("keep_date_col", False)
self.na_values = kwds.get("na_values")
self.na_fvalues = kwds.get("na_fvalues")
self.na_filter = kwds.get("na_filter", False)
self.keep_default_na = kwds.get("keep_default_na", True)
self.true_values = kwds.get("true_values")
self.false_values = kwds.get("false_values")
self.mangle_dupe_cols = kwds.get("mangle_dupe_cols", True)
self.infer_datetime_format = kwds.pop("infer_datetime_format", False)
self.cache_dates = kwds.pop("cache_dates", True)
self._date_conv = _make_date_converter(
date_parser=self.date_parser,
dayfirst=self.dayfirst,
infer_datetime_format=self.infer_datetime_format,
cache_dates=self.cache_dates,
)
# validate header options for mi
self.header = kwds.get("header")
if isinstance(self.header, (list, tuple, np.ndarray)):
if not all(map(is_integer, self.header)):
raise ValueError("header must be integer or list of integers")
if any(i < 0 for i in self.header):
raise ValueError(
"cannot specify multi-index header with negative integers"
)
if kwds.get("usecols"):
raise ValueError(
"cannot specify usecols when specifying a multi-index header"
)
if kwds.get("names"):
raise ValueError(
"cannot specify names when specifying a multi-index header"
)
# validate index_col that only contains integers
if self.index_col is not None:
is_sequence = isinstance(self.index_col, (list, tuple, np.ndarray))
if not (
is_sequence
and all(map(is_integer, self.index_col))
or is_integer(self.index_col)
):
raise ValueError(
"index_col must only contain row numbers "
"when specifying a multi-index header"
)
elif self.header is not None:
# GH 27394
if self.prefix is not None:
raise ValueError(
"Argument prefix must be None if argument header is not None"
)
# GH 16338
elif not is_integer(self.header):
raise ValueError("header must be integer or list of integers")
# GH 27779
elif self.header < 0:
raise ValueError(
"Passing negative integer to header is invalid. "
"For no header, use header=None instead"
)
self._name_processed = False
self._first_chunk = True
self.handles: Optional[IOHandles] = None
def _open_handles(self, src: FilePathOrBuffer, kwds: Dict[str, Any]) -> None:
"""
Let the readers open IOHanldes after they are done with their potential raises.
"""
self.handles = get_handle(
src,
"r",
encoding=kwds.get("encoding", None),
compression=kwds.get("compression", None),
memory_map=kwds.get("memory_map", False),
storage_options=kwds.get("storage_options", None),
)
def _validate_parse_dates_presence(self, columns: List[str]) -> None:
"""
Check if parse_dates are in columns.
If user has provided names for parse_dates, check if those columns
are available.
Parameters
----------
columns : list
List of names of the dataframe.
Raises
------
ValueError
If column to parse_date is not in dataframe.
"""
cols_needed: Iterable
if is_dict_like(self.parse_dates):
cols_needed = itertools.chain(*self.parse_dates.values())
elif is_list_like(self.parse_dates):
# a column in parse_dates could be represented
# ColReference = Union[int, str]
# DateGroups = List[ColReference]
# ParseDates = Union[DateGroups, List[DateGroups],
# Dict[ColReference, DateGroups]]
cols_needed = itertools.chain.from_iterable(
col if is_list_like(col) else [col] for col in self.parse_dates
)
else:
cols_needed = []
# get only columns that are references using names (str), not by index
missing_cols = ", ".join(
sorted(
{
col
for col in cols_needed
if isinstance(col, str) and col not in columns
}
)
)
if missing_cols:
raise ValueError(
f"Missing column provided to 'parse_dates': '{missing_cols}'"
)
def close(self):
if self.handles is not None:
self.handles.close()
@property
def _has_complex_date_col(self):
return isinstance(self.parse_dates, dict) or (
isinstance(self.parse_dates, list)
and len(self.parse_dates) > 0
and isinstance(self.parse_dates[0], list)
)
def _should_parse_dates(self, i):
if isinstance(self.parse_dates, bool):
return self.parse_dates
else:
if self.index_names is not None:
name = self.index_names[i]
else:
name = None
j = self.index_col[i]
if is_scalar(self.parse_dates):
return (j == self.parse_dates) or (
name is not None and name == self.parse_dates
)
else:
return (j in self.parse_dates) or (
name is not None and name in self.parse_dates
)
def _extract_multi_indexer_columns(
self, header, index_names, col_names, passed_names=False
):
"""
extract and return the names, index_names, col_names
header is a list-of-lists returned from the parsers
"""
if len(header) < 2:
return header[0], index_names, col_names, passed_names
# the names are the tuples of the header that are not the index cols
# 0 is the name of the index, assuming index_col is a list of column
# numbers
ic = self.index_col
if ic is None:
ic = []
if not isinstance(ic, (list, tuple, np.ndarray)):
ic = [ic]
sic = set(ic)
# clean the index_names
index_names = header.pop(-1)
index_names, names, index_col = _clean_index_names(
index_names, self.index_col, self.unnamed_cols
)
# extract the columns
field_count = len(header[0])
def extract(r):
return tuple(r[i] for i in range(field_count) if i not in sic)
columns = list(zip(*(extract(r) for r in header)))
names = ic + columns
# If we find unnamed columns all in a single
# level, then our header was too long.
for n in range(len(columns[0])):
if all(ensure_str(col[n]) in self.unnamed_cols for col in columns):
header = ",".join(str(x) for x in self.header)
raise ParserError(
f"Passed header=[{header}] are too many rows "
"for this multi_index of columns"
)
# Clean the column names (if we have an index_col).
if len(ic):
col_names = [
r[0] if ((r[0] is not None) and r[0] not in self.unnamed_cols) else None
for r in header
]
else:
col_names = [None] * len(header)
passed_names = True
return names, index_names, col_names, passed_names
def _maybe_dedup_names(self, names):
# see gh-7160 and gh-9424: this helps to provide
# immediate alleviation of the duplicate names
# issue and appears to be satisfactory to users,
# but ultimately, not needing to butcher the names
# would be nice!
if self.mangle_dupe_cols:
names = list(names) # so we can index
# pandas\io\parsers.py:1559: error: Need type annotation for
# 'counts' [var-annotated]
counts = defaultdict(int) # type: ignore[var-annotated]
is_potential_mi = _is_potential_multi_index(names, self.index_col)
for i, col in enumerate(names):
cur_count = counts[col]
while cur_count > 0:
counts[col] = cur_count + 1
if is_potential_mi:
col = col[:-1] + (f"{col[-1]}.{cur_count}",)
else:
col = f"{col}.{cur_count}"
cur_count = counts[col]
names[i] = col
counts[col] = cur_count + 1
return names
def _maybe_make_multi_index_columns(self, columns, col_names=None):
# possibly create a column mi here
if _is_potential_multi_index(columns):
columns = MultiIndex.from_tuples(columns, names=col_names)
return columns
def _make_index(self, data, alldata, columns, indexnamerow=False):
if not _is_index_col(self.index_col) or not self.index_col:
index = None
elif not self._has_complex_date_col:
index = self._get_simple_index(alldata, columns)
index = self._agg_index(index)
elif self._has_complex_date_col:
if not self._name_processed:
(self.index_names, _, self.index_col) = _clean_index_names(
list(columns), self.index_col, self.unnamed_cols
)
self._name_processed = True
index = self._get_complex_date_index(data, columns)
index = self._agg_index(index, try_parse_dates=False)
# add names for the index
if indexnamerow:
coffset = len(indexnamerow) - len(columns)
# pandas\io\parsers.py:1604: error: Item "None" of "Optional[Any]"
# has no attribute "set_names" [union-attr]
index = index.set_names(indexnamerow[:coffset]) # type: ignore[union-attr]
# maybe create a mi on the columns
columns = self._maybe_make_multi_index_columns(columns, self.col_names)
return index, columns
_implicit_index = False
def _get_simple_index(self, data, columns):
def ix(col):
if not isinstance(col, str):
return col
raise ValueError(f"Index {col} invalid")
to_remove = []
index = []
for idx in self.index_col:
i = ix(idx)
to_remove.append(i)
index.append(data[i])
# remove index items from content and columns, don't pop in
# loop
for i in sorted(to_remove, reverse=True):
data.pop(i)
if not self._implicit_index:
columns.pop(i)
return index
def _get_complex_date_index(self, data, col_names):
def _get_name(icol):
if isinstance(icol, str):
return icol
if col_names is None:
raise ValueError(f"Must supply column order to use {icol!s} as index")
for i, c in enumerate(col_names):
if i == icol:
return c
to_remove = []
index = []
for idx in self.index_col:
name = _get_name(idx)
to_remove.append(name)
index.append(data[name])
# remove index items from content and columns, don't pop in
# loop
for c in sorted(to_remove, reverse=True):
data.pop(c)
col_names.remove(c)
return index
def _agg_index(self, index, try_parse_dates=True) -> Index:
arrays = []
for i, arr in enumerate(index):
if try_parse_dates and self._should_parse_dates(i):
arr = self._date_conv(arr)
if self.na_filter:
col_na_values = self.na_values
col_na_fvalues = self.na_fvalues
else:
col_na_values = set()
col_na_fvalues = set()
if isinstance(self.na_values, dict):
# pandas\io\parsers.py:1678: error: Value of type
# "Optional[Any]" is not indexable [index]
col_name = self.index_names[i] # type: ignore[index]
if col_name is not None:
col_na_values, col_na_fvalues = _get_na_values(
col_name, self.na_values, self.na_fvalues, self.keep_default_na
)
arr, _ = self._infer_types(arr, col_na_values | col_na_fvalues)
arrays.append(arr)
names = self.index_names
index = ensure_index_from_sequences(arrays, names)
return index
def _convert_to_ndarrays(
self, dct, na_values, na_fvalues, verbose=False, converters=None, dtypes=None
):
result = {}
for c, values in dct.items():
conv_f = None if converters is None else converters.get(c, None)
if isinstance(dtypes, dict):
cast_type = dtypes.get(c, None)
else:
# single dtype or None
cast_type = dtypes
if self.na_filter:
col_na_values, col_na_fvalues = _get_na_values(
c, na_values, na_fvalues, self.keep_default_na
)
else:
col_na_values, col_na_fvalues = set(), set()
if conv_f is not None:
# conv_f applied to data before inference
if cast_type is not None:
warnings.warn(
(
"Both a converter and dtype were specified "
f"for column {c} - only the converter will be used"
),
ParserWarning,
stacklevel=7,
)
try:
values = lib.map_infer(values, conv_f)
except ValueError:
mask = algorithms.isin(values, list(na_values)).view(np.uint8)
values = | lib.map_infer_mask(values, conv_f, mask) | pandas._libs.lib.map_infer_mask |
"""
This module handles data and provides convenient and efficient access to it.
"""
from __future__ import annotations
import os
import pickle
import sys
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
import pandas as pd
from bs4 import BeautifulSoup
from scipy import sparse
import util.tamer as tamer
from util import utils
from util.constants import *
from util.groups import Groups
from util.utils import GitUtil, SearchOptions, Settings
log = utils.get_logger(__name__)
settings = Settings.get_settings()
class DataHandler:
manuscripts: pd.DataFrame
"""Manuscripts
A dataframe containing all manuscripts with their respective metadata.
The dataframe will have the following structure:
Per row, there will be metadata to one manuscript. The row indices are integers 0..n.
The dataframe contains the following columns:
- 'shelfmark'
- 'shorttitle'
- 'country'
- 'settlement'
- 'repository'
- 'origin'
- 'date'
- 'Terminus post quem'
- 'Terminus ante quem'
- 'meandate'
- 'yearrange'
- 'support'
- 'folio'
- 'height'
- 'width'
- 'extent'
- 'description'
- 'creator'
- 'id'
- 'full_id'
- 'filename'
"""
person_names: Dict[str, str]
"""Name lookup dictionary
Lookup dictionary mapping person IDs to the full name of the person
"""
person_names_inverse: Dict[str, List[str]]
"""Inverted name lookup dictionary
Dictionary mapping person names to a list of IDs of persons with said name"""
text_matrix: pd.DataFrame
"""Text-Manuscript-Matrix
Sparse matrix with a row per manuscript and a column per text name.
True, if the manuscript contains the text.
Allows for lookups, which manuscripts a particular text is connected to.
""" # TODO: Document the type of ID used for MSs in index/row label
person_matrix: pd.DataFrame
"""Person-Manuscript-Matrix
Sparse matrix with a row per manuscript and a column per person ID.
True, if the manuscript is connected to the person (i.e. the description has the person tagged).
Allows for lookups, which manuscripts a particular person is connected to.
"""
groups: Groups
# CHORE: document
def __init__(self) -> None:
"""DataHandler constructor.
Returns a new instance of a DataHandler.
Should not be called directly, but rather through the factory method `DataHandler.get_handler()`.
"""
log.info("Creating new handler")
self.person_names, self.person_names_inverse = DataHandler._load_persons()
log.info("Loaded Person Info")
self.manuscripts = DataHandler._load_ms_info(persons=self.person_names)
log.info("Loaded MS Info")
self.text_matrix = DataHandler._load_text_matrix(self.manuscripts)
log.info("Loaded Text Info")
self.person_matrix = DataHandler._load_person_matrix(self.manuscripts)
log.info("Loaded Person-MSS-Matrix Info")
self.groups = Groups.from_cache() or Groups()
log.debug(f"Groups loaded: {self.groups}")
self.manuscripts.drop(columns=["content", "soup"], inplace=True)
log.info("Successfully created a Datahandler instance.")
GitUtil.update_handler_state()
# Static Methods
# ==============
@staticmethod
def _from_pickle() -> Optional[DataHandler]:
"""Load datahandler from pickle, if available. Returns None otherwise."""
if os.path.exists(HANDLER_PATH_PICKLE):
try:
prev = sys.getrecursionlimit()
with open(HANDLER_PATH_PICKLE, mode='rb') as file:
sys.setrecursionlimit(prev * 100)
obj = pickle.load(file)
sys.setrecursionlimit(prev)
if isinstance(obj, DataHandler):
obj.groups = Groups.from_cache() or Groups()
log.debug(f"Groups loaded: {obj.groups}")
return obj
except Exception:
log.exception("Cound not load handler from pickle")
return None
@staticmethod
def _load_ms_info(persons: Dict[str, str]) -> pd.DataFrame:
"""Load manuscript metadata"""
df = tamer.deliver_handler_data()
df['soup'] = df['content'].apply(lambda x: BeautifulSoup(x, 'xml', from_encoding='utf-8'))
msinfo = df['soup'].apply(lambda x: tamer.get_msinfo(x, persons))
log.info("Loaded MS Info")
df = df.join(msinfo)
return df
@staticmethod
def _load_text_matrix(df: pd.DataFrame) -> pd.DataFrame:
"""Load the text-manuscript-matrix"""
mss_ids, text_names, coords = tamer.get_text_mss_matrix_coordinatres(df)
r, c = map(list, zip(*coords))
row = np.array(r)
col = np.array(c)
data = np.array([True]*len(row))
matrix = sparse.coo_matrix((data, (row, col)))
df = | pd.DataFrame.sparse.from_spmatrix(matrix, index=mss_ids, columns=text_names) | pandas.DataFrame.sparse.from_spmatrix |
""" pandaspyomo: read data from coopr.pyomo models to pandas DataFrames
Pyomo is a GAMS-like model description language for mathematical
optimization problems. This module provides functions to read data from
Pyomo model instances and result objects. Use list_entities to get a list
of all entities (sets, params, variables, objectives or constraints) inside a
pyomo instance, before get its contents by get_entity (or get_entities).
Usage:
import pandaspyomo as pdpo
pdpo.list_entities(instance, 'var')
[('EprOut', ['time', 'process', 'commodity', 'commodity']), ...
('EprIn', ['time', 'process', 'commodity', 'commodity'])]
epr = pdpo.get_entities(instance, ['EprOut', 'EprInt'])
...
"""
import coopr.pyomo as pyomo
import pandas as pd
def get_entity(instance, name):
""" Return a DataFrame for an entity in model instance.
Args:
instance: a Pyomo ConcreteModel instance
name: name of a Set, Param, Var, Constraint or Objective
Returns:
a single-columned Pandas DataFrame with domain as index
"""
# retrieve entity, its type and its onset names
entity = instance.__getattribute__(name)
labels = _get_onset_names(entity)
# extract values
if isinstance(entity, pyomo.Set):
# Pyomo sets don't have values, only elements
results = pd.DataFrame([(v, 1) for v in entity.value])
# for unconstrained sets, the column label is identical to their index
# hence, make index equal to entity name and append underscore to name
# (=the later column title) to preserve identical index names for both
# unconstrained supersets
if not labels:
labels = [name]
name = name+'_'
elif isinstance(entity, pyomo.Param):
if entity.dim() > 1:
results = pd.DataFrame([v[0]+(v[1],) for v in entity.iteritems()])
else:
results = pd.DataFrame(entity.iteritems())
else:
# create DataFrame
if entity.dim() > 1:
# concatenate index tuples with value if entity has
# multidimensional indices v[0]
results = pd.DataFrame(
[v[0]+(v[1].value,) for v in entity.iteritems()])
else:
# otherwise, create tuple from scalar index v[0]
results = pd.DataFrame(
[(v[0], v[1].value) for v in entity.iteritems()])
# check for duplicate onset names and append one to several "_" to make
# them unique, e.g. ['sit', 'sit', 'com'] becomes ['sit', 'sit_', 'com']
for k, label in enumerate(labels):
if label in labels[:k]:
labels[k] = labels[k] + "_"
if not results.empty:
# name columns according to labels + entity name
results.columns = labels + [name]
results.set_index(labels, inplace=True)
return results
def get_entities(instance, names):
""" Return one DataFrame with entities in columns and a common index.
Works only on entities that share a common domain (set or set_tuple), which
is used as index of the returned DataFrame.
Args:
instance: a Pyomo ConcreteModel instance
names: list of entity names (as returned by list_entities)
Returns:
a Pandas DataFrame with entities as columns and domains as index
"""
df = pd.DataFrame()
for name in names:
other = get_entity(instance, name)
if df.empty:
df = other
else:
index_names_before = df.index.names
df = df.join(other, how='outer')
if index_names_before != df.index.names:
df.index.names = index_names_before
return df
def list_entities(instance, entity_type):
""" Return list of sets, params, variables, constraints or objectives
Args:
instance: a Pyomo ConcreteModel object
entity_type: "set", "par", "var", "con" or "obj"
Returns:
DataFrame of entities
Example:
>>> data = read_excel('mimo-example.xlsx')
>>> model = create_model(data, range(1,25))
>>> list_entities(model, 'obj') #doctest: +NORMALIZE_WHITESPACE
Description Domain
Name
obj minimize(cost = sum of all cost types) []
"""
# helper function to discern entities by type
def filter_by_type(entity, entity_type):
if entity_type == 'set':
return isinstance(entity, pyomo.Set) and not entity.virtual
elif entity_type == 'par':
return isinstance(entity, pyomo.Param)
elif entity_type == 'var':
return isinstance(entity, pyomo.Var)
elif entity_type == 'con':
return isinstance(entity, pyomo.Constraint)
elif entity_type == 'obj':
return isinstance(entity, pyomo.Objective)
else:
raise ValueError("Unknown entity_type '{}'".format(entity_type))
# iterate through all model components and keep only
iter_entities = instance.__dict__.iteritems()
entities = sorted(
(name, entity.doc, _get_onset_names(entity))
for (name, entity) in iter_entities
if filter_by_type(entity, entity_type))
# if something was found, wrap tuples in DataFrame, otherwise return empty
if entities:
entities = pd.DataFrame(entities,
columns=['Name', 'Description', 'Domain'])
entities.set_index('Name', inplace=True)
else:
entities = | pd.DataFrame() | pandas.DataFrame |
import re
import pandas as pd
import numpy as np
from datasets.constants import signal_types
from datasets.sources.source_base import SourceBase
import logging
logger = logging.getLogger(__name__)
class EverionSource(SourceBase):
FILES = {
'signals': r'^CsvData_signals_EV-[A-Z0-9-]{14}\.csv$',
'sensors': r'^CsvData_sensor_data_EV-[A-Z0-9-]{14}\.csv$',
'features': r'^CsvData_features_EV-[A-Z0-9-]{14}\.csv$',
# 'aggregates': r'^CsvData_aggregates_EV-[A-Z0-9-]{14}\.csv$',
# 'analytics': r'^CsvData_analytics_events_EV-[A-Z0-9-]{14}\.csv$',
# 'events': r'^CsvData_everion_events_EV-[A-Z0-9-]{14}\.csv$',
}
META = {
'inter_pulse_interval': {
'type': signal_types.RR_INTERVAL,
'unit': 'Milliseconds'
},
'heart_rate': {
'unit': 'BPM'
},
'heart_rate_variability': {
'unit': 'Milliseconds'
},
'gsr_electrode': {
'unit': 'Microsiemens'
},
'ctemp': {
'unit': 'Celsius'
},
'temperature_object': {
'unit': 'Celsius'
},
'temperature_barometer': {
'unit': 'Celsius'
},
'temperature_local': {
'unit': 'Celsius'
},
'barometer_pressure': {
'unit': 'Millibar'
},
'respiration_rate': {
'unit': 'BPM'
},
'oxygen_saturation': {
'unit': 'Percent'
},
}
SIGNAL_TAGS = {
6: ['heart_rate'],
7: ['oxygen_saturation'],
#8: ['perfusion_index'],
#9: ['motion_activity'],
#10: ['activity_classification'],
11: ['heart_rate_variability', 'heart_rate_variability_quality'],
12: ['respiration_rate'],
#13: ['energy'],
15: ['ctemp'],
19: ['temperature_local'],
20: ['barometer_pressure'],
21: ['gsr_electrode'],
#22: ['health_score'],
#23: ['relax_stress_intensity_score'],
#24: ['sleep_quality_index_score'],
#25: ['training_effect_score'],
#26: ['activity_score'],
#66: ['richness_score'],
#68: ['heart_rate_quality'],
#69: ['oxygen_saturation_quality'],
#70: ['blood_pulse_wave', 'blood_pulse_wave_quality'],
#71: ['number_of_steps'],
#72: ['activity_classification_quality'],
#73: ['energy_quality'],
#74: ['heart_rate_variability_quality'],
#75: ['respiration_rate_quality'],
#76: ['ctemp_quality'],
118: ['temperature_object'],
119: ['temperature_barometer'],
#133: ['perfusion_index_quality'],
#134: ['blood_pulse_wave_quality']
}
SENSOR_TAGS = {
80: ['led1_data'],
81: ['led2_data'],
82: ['led3_data'],
83: ['led4_data'],
84: ['accx_data'],
85: ['accy_data'],
86: ['accz_data'],
#88: ['led2_current'],
#89: ['led3_current'],
#90: ['led4_current'],
#91: ['current_offset'],
#92: ['compressed_data']
}
FEATURE_TAGS = {
14: ['inter_pulse_interval', 'inter_pulse_interval_deviation'],
#17: ['pis'],
#18: ['pid'],
#77: ['inter_pulse_deviation'],
#78: ['pis_quality'],
#79: ['pid_quality']
}
@classmethod
def name(cls):
return "Biovotion Everion"
@classmethod
def fileOptions(cls):
return [
{
'label': 'Signals Data',
'pattern': '^CsvData_signals_EV-[A-Z0-9-]{14}\.csv$',
'required': True,
'multiple': False,
'timestamp': False
},
{
'label': 'Sensor Data',
'pattern': '^CsvData_sensor_data_EV-[A-Z0-9-]{14}\.csv$',
'required': False,
'multiple': False,
'timestamp': False
},
{
'label': 'Features Data',
'pattern': '^CsvData_features_EV-[A-Z0-9-]{14}\.csv$',
'required': False,
'multiple': False,
'timestamp': False
},
]
@staticmethod
def extend_values(df, dtype='float64'):
values_extended = df['values'].str.extract(r'(?P<value>[\d.]+);?(?P<value2>[\d.]+)?') \
.astype({ 'value': dtype, 'value2': dtype }, copy=False)
df_extended = pd.concat([df, values_extended], axis=1)
df_extended.drop(columns='values', inplace=True)
return df_extended
@staticmethod
def get_dataframe_iterator(path, cols=['count', 'tag', 'time', 'values']):
parse_dates = ['time'] if 'time' in cols else None
return pd.read_csv(
path,
usecols=cols,
dtype={
'count': 'uint32',
'streamType': 'int8',
'tag': 'int8',
'values': 'object'
},
parse_dates=parse_dates,
date_parser=lambda x: pd.to_datetime(x, unit='s', utc=True),
chunksize=100000
)
@staticmethod
def split_data(df, predicate):
df = df.copy()
df_split = []
split_at = df[predicate(df)]['count'].unique()
for index, count in enumerate(split_at):
selected = df['count'] <= count
if index > 0:
selected = selected & (df['count'] > split_at[index - 1])
df_split.append(df[selected])
# If it was splitted append last segment, else whole dataframe
if split_at.size == 0:
df_split.append(df)
else:
df_split.append(df[df['count'] > split_at[-1]])
assert np.sum([len(part) for part in df_split]) == len(df)
return [part for part in df_split if not part.empty]
@classmethod
def create_time_lookup_for_ibi(cls, path, max_deviation=15, threshold=600):
df = pd.DataFrame()
df_iterator = cls.get_dataframe_iterator(path, ['tag', 'count', 'time', 'values'])
# append data from csv in chunks and drop duplicates
for chunk in df_iterator:
chunk.drop_duplicates(subset=['count', 'tag'], inplace=True)
chunk = chunk[chunk['tag'] == 14]
chunk = cls.extend_values(chunk)
chunk = chunk[chunk['value2'] <= max_deviation]
chunk.drop('value2', axis='columns', inplace=True)
chunk['value'] = chunk['value'].astype('uint16')
df = pd.concat([df, chunk], sort=False)
df.drop_duplicates(subset=['count', 'tag'], inplace=True)
df.sort_values(['tag', 'count'], inplace=True)
df.reset_index(drop=True, inplace=True)
# split dataframes in consecutive parts
df_split = cls.split_data(
df,
lambda x: x['time'].shift(-1, fill_value=x['time'].max()) - x['time'] - pd.to_timedelta(x['value'].shift(-1, fill_value=0), 'ms') > pd.to_timedelta(x['value'] + threshold, 'ms')
)
# calculate correct time and concatenate split dataframes
df = pd.DataFrame()
for each in df_split:
start_time = each['time'].min()
each['seconds'] = pd.Series(each['value'].cumsum(), dtype='uint32')
each['seconds'] = each['seconds'].shift(1, fill_value=0)
each['seconds'] = pd.to_timedelta(each['seconds'], unit='ms')
each['time'] = each['seconds'] + start_time
each.drop(['seconds'], axis='columns', inplace=True)
df = pd.concat([df, each])
df.reset_index(drop=True, inplace=True)
return df[['tag', 'count', 'time']]
@classmethod
def create_time_lookup(cls, path, tag):
if isinstance(tag, int):
tag = [tag]
elif not isinstance(tag, list):
raise TypeError(f'Expected tag to be int or list, but got {type(tag)}')
includes_interbeat_interval = 14 in tag
if includes_interbeat_interval:
tag = [value for value in tag if value != 14]
df = pd.DataFrame(columns=['tag', 'count', 'time'])
if tag:
df_iterator = cls.get_dataframe_iterator(path, ['tag', 'count', 'time'])
# append data from csv in chunks and drop duplicates
for chunk in df_iterator:
chunk.drop_duplicates(subset=['count', 'tag'], inplace=True)
subset = chunk['tag'].isin(tag)
df = pd.concat([df, chunk[subset]], sort=False)
# drop missed duplicates
df.drop_duplicates(subset=['count', 'tag'], inplace=True)
df.sort_values(['tag', 'count'], inplace=True)
df.reset_index(inplace=True, drop=True)
# calculate number of samples per second precision timestamp
samples_per_ts = df.reset_index().groupby(['tag', 'time']).count()
mean_spt = samples_per_ts.groupby('tag').mean()['count']
mean_spt.rename('mean_spt', inplace=True)
mean_spt = mean_spt.round(1)
high_freq_tags = list(mean_spt[mean_spt > 1].index)
# Calculate timestamps for signals with frequency higher than 1 Hz
if high_freq_tags:
df_split = cls.split_data(
df,
lambda x: x['time'].shift(-1, fill_value=x['time'].max()) - x['time'] > pd.Timedelta(1, 's')
)
df = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
##################################################
Extract Active Entries from ChEMBL SQLite Database
##################################################
*Created on Tue Feb 02, 2022 by <NAME>*
Extract active molecule entries from the SQLite version of the ChEMBL database.
Copy the result file to oracle-server with:
$ scp chembl_29_active_entries_with_smiles.tsv pahl@oracle-server:/mnt/data/PipelinePilot-data/public/users/pahl/chembl/
"""
import sys
import sqlite3
import pandas as pd
from rdkit.Chem.Scaffolds import MurckoScaffold
if __name__ == "__main__":
query = """
select md.chembl_id, act.standard_type as std_type, act.standard_value as act,
docs.journal, docs.year, docs.volume, docs.issue, docs.first_page, docs.doi,
pfc.l1 as pfc_l1, pfc.l2 as pfc_l2, pfc.l3 as pfc_l3, pfc.l4 as pfc_l4, pfc.l5 as pfc_l5
from activities act, molecule_dictionary md,
assays, target_dictionary td,
component_class cc, target_components tc,
protein_family_classification pfc,
docs
where act.molregno = md.molregno
and assays.doc_id = docs.doc_id
and assays.assay_id = act.assay_id
and td.tid = assays.tid
and tc.tid = assays.tid
and tc.component_id = cc.component_id
and cc.protein_class_id = pfc.protein_class_id
and act.standard_units = 'nM'
and act.standard_type in ('IC50', 'EC50', 'Ki', 'Kd')
and act.standard_relation in ('=', '<', '<=')
and assays.confidence_score = 9
and assays.assay_type = 'B'
and td.target_type = 'SINGLE PROTEIN'
and (act.data_validity_comment is Null or act.data_validity_comment = 'Manually validated');"""
print("Extract active entries from ChEMBL.")
assert len(sys.argv) == 2, "Usage: extract_nps_from_sqlite.py <chembl version>"
VERSION = sys.argv[1]
print(f"Extracting active entries from ChEMBL {VERSION} (SQLite)...")
conn = sqlite3.connect(f"./chembl_{VERSION}.db")
df = pd.read_sql(query, conn)
conn.close()
df.to_csv(f"chembl_{VERSION}_active_entries.tsv", sep="\t", index=False)
# df = pd.read_csv(f"chembl_{VERSION}_active_entries.tsv", sep="\t", low_memory=False)
print(f"{df.shape[0]} active entries extracted.")
print("Merging Smiles from medchem data...")
df_mc = pd.read_csv(f"./chembl_{VERSION}_medchem.tsv", sep="\t")
df = | pd.merge(df, df_mc, how="inner", on="chembl_id") | pandas.merge |
import pandas as pd
import path_utils
from Evolve import Evolve, replot_evo_dict_from_dir
import traceback as tb
import os, json, shutil
import numpy as np
import matplotlib.pyplot as plt
import itertools
from copy import deepcopy
import pprint as pp
from tabulate import tabulate
import seaborn as sns
import shutil
import psutil, time
import ray
'''
This is very similar to Benchmark.py, but that one was designed (when I had a
previous setup in mind) to run a set of parameters MULTIPLE times each. I.e.,
it would create an Evolve object and do evo_obj.evolve() several times to create
a distribution. Now, there's no "time dependence", so we really just want to be
able to look at separate parameter settings, but only running them once each.
I'm also getting rid of the whole "solved" aspect for now because it's based
on numbers that are hard to explain, making it a bit pointless.
run_param_dict() is the most basic function, just doing an evolution for a passed
param_dict. Other functions basically involve calling it given various inputs.
'''
################################ Statistics functions
@path_utils.timer
def run_param_dict(param_dict, N_gen, N_trials, base_dir):
'''
Pass a single params dict to run an evolve() of, including the env_name.
Also pass an output_dir, or it will use the default output folder.
This only runs each setting ONCE.
'''
# deepcopy, just to be safer
params = deepcopy(param_dict)
assert 'env_name' in params.keys(), 'Must supply an env_name!'
env_name = params['env_name']
params.pop('env_name')
params['base_dir'] = base_dir
try:
# Run a single parameters setting
e = Evolve(env_name, **params)
evo_dict = e.evolve(N_gen, N_trials=N_trials, print_gen=True)
e.save_all_evo_stats(evo_dict, save_plots=True)
return evo_dict
except:
print(f'\n\nError in evolve with params: {params}. Traceback:\n')
print(tb.format_exc())
print('\n\nAttempting to continue...\n\n')
return {}
@ray.remote
def run_param_dict_wrapper(param_dict, N_gen, N_trials, base_dir):
# If a run_fname_label is provided, use that to create a more informative dir name.
# Otherwise, just use the date.
if 'run_fname_label' in param_dict.keys():
run_fname_label = param_dict['run_fname_label']
else:
run_fname_label = 'vary_params'
# Make plots for this params set
if 'run_plot_label' in param_dict.keys():
run_plot_label = param_dict['run_plot_label']
else:
run_plot_label = run_fname_label
# Run dir for this set of params
params_dir = os.path.join(base_dir, '{}_{}'.format(run_fname_label, path_utils.get_date_str()))
os.mkdir(params_dir)
# Doing this so it just saves directly to this dir, which has a more
# informative name than Evolve.__init__() would create.
param_dict['run_dir'] = params_dir
print('\n\nNow running with params:')
pp.pprint(param_dict, width=1)
print('\n\n')
stats_dict = run_param_dict(param_dict, N_gen, N_trials, base_dir)
return stats_dict
@path_utils.timer
def run_multi_envs(env_list, **kwargs):
'''
Iterates over a list of env names you give it,
running them and recording info.
'''
N_gen = kwargs.get('N_gen', 1000)
N_trials = kwargs.get('N_trials', 1000)
# Create dir for the results of this stats set.
stats_dir = os.path.join(path_utils.get_output_dir(), 'Stats_{}'.format(path_utils.get_date_str()))
os.mkdir(stats_dir)
# Dict to hold results on timing, etc.
stats_dict = {}
for env_name in env_list:
print(f'\nGetting stats for env {env_name} now...\n')
param_dict = deepcopy(kwargs)
param_dict['env_name'] = env_name
stats_dict[env_name] = run_param_dict(param_dict, N_gen, N_trials, stats_dir)
# Save distributions to file
with open(os.path.join(stats_dir, 'multi_env_stats.json'), 'w+') as f:
json.dump(stats_dict, f, indent=4)
def run_classic_control_envs(**kwargs):
'''
Loads gym_envs_info.json. This contains info about the envs we want to analyze.
It then calls run_multi_envs() for the classic control envs.
'''
with open(os.path.join(path_utils.get_src_dir(), 'gym_envs_info.json'), 'r') as f:
envs_dict = json.load(f)
env_list = [k for k,v in envs_dict.items() if v['env_type']=='classic_control']
print(f'Getting stats for: {env_list}')
run_multi_envs(env_list, **kwargs)
def run_param_dict_list(params_dict_list, **kwargs):
'''
Pass this a list of dicts, where each has the different parameters you want
to gather stats for.
It then iterates through this list, doing a run for each dict.
Note that it modifies the passed params_dict_list to add the results to it.
'''
# Create dir for the results of this stats run if one isn't provided.
stats_dir = kwargs.get('stats_dir', None)
if stats_dir is None:
stats_dir = os.path.join(path_utils.get_output_dir(), 'Stats_{}'.format(path_utils.get_date_str()))
os.mkdir(stats_dir)
# Produce results in parallel
for d in params_dict_list:
# For non-ray use
'''d['result'] = run_param_dict_wrapper( d,
kwargs.get('N_gen', 100),
kwargs.get('N_trials', 10),
stats_dir)'''
# For use with ray
d['result_ID'] = run_param_dict_wrapper.remote( d,
kwargs.get('N_gen', 100),
kwargs.get('N_trials', 10),
stats_dir)
# Retrieve results from ID
for d in params_dict_list:
d['stats_dict'] = ray.get(d['result_ID'])
d.pop('result_ID')
#d['stats_dict'] = d['result'] # for non-ray use
#d.pop('result')
# Return passed list, which should have dicts
# modified with the results
return params_dict_list
@path_utils.timer
def run_vary_params(constant_params_dict, vary_params_dict, **kwargs):
'''
This is a convenience function to easily vary parameters for analysis.
You pass it constant_params_dict, which is a dict with the values that
you want to remain constant between runs. Then, pass it vary_params_dict,
which should have each parameter that you want to vary as a list of the values
it should take.
Example:
constant_params_dict = {
'env_name' : 'CartPole-v0',
'N_gen' : 1000,
'N_dist' : 100,
'NN' : 'FFNN_multilayer'
}
vary_params_dict = {
'N_hidden_units' : [2, 4, 8],
'act_fn' : ['tanh', 'relu']
}
This will do 3*2 = 6 runs, for each of the combinations of varying parameters.
'''
# Create informative dir name
vary_params = list(vary_params_dict.keys())
stats_dir = os.path.join(
path_utils.get_output_dir(),
'Stats_vary_{}_{}'.format('_'.join(vary_params), path_utils.get_date_str()))
print(f'\nSaving statistics run to {stats_dir}')
os.mkdir(stats_dir)
# Create runs dir
all_runs_dir = os.path.join(stats_dir, 'all_runs')
print(f'\nSaving all runs to {all_runs_dir}')
os.mkdir(all_runs_dir)
# Create dict of const and vary params, as separate items
all_params = {
'const_params' : constant_params_dict,
'vary_params' : vary_params_dict
}
other_run_params = ['N_gen', 'N_trials']
for p in other_run_params:
if p in kwargs.keys():
all_params[p] = kwargs.get(p, None)
# Save params to file
with open(os.path.join(stats_dir, 'all_params.json'), 'w+') as f:
json.dump(all_params, f, indent=4)
# Flatten list, pass to other function
flat_param_list = vary_params_cross_products(constant_params_dict, vary_params_dict)
flat_param_list = run_param_dict_list(flat_param_list, stats_dir=all_runs_dir, **kwargs)
# Parse results
for d in flat_param_list:
# For now I'll still keep vary_params_stats.csv, but I think it's not
# actually necessary.
# Get rid of this now
d.pop('stats_dict')
# Save results to csv for later parsing/plotting
df = pd.DataFrame(flat_param_list)
print(tabulate(df, headers=df.columns.values, tablefmt='psql'))
df_fname = os.path.join(stats_dir, 'vary_params_stats.csv')
df.to_csv(df_fname, index=False)
################################# Plotting functions
def plot_all_agg_stats(stats_dir):
'''
For plotting all the heatmaps/etc for a stats_dir.
'''
agg_stats_dir = os.path.join(stats_dir, 'agg_stats')
if os.path.exists(agg_stats_dir):
shutil.rmtree(agg_stats_dir)
print(f'\nSaving all aggregate stats to {agg_stats_dir}')
os.mkdir(agg_stats_dir)
all_params_fname = os.path.join(stats_dir, 'all_params.json')
with open(all_params_fname, 'r') as f:
all_params_dict = json.load(f)
# Import all scores
all_scores_fname = os.path.join(stats_dir, 'all_scores.csv')
df = | pd.read_csv(all_scores_fname) | pandas.read_csv |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeClassifier
from brightics.common.report import ReportBuilder, strip_margin, pandasDF2MD, plt2MD, dict2MD
from brightics.function.utils import _model_dict
from sklearn.tree.export import export_graphviz
from brightics.common.groupby import _function_by_group
from brightics.common.utils import check_required_parameters
def decision_tree_classification_train(table, group_by=None, **params):
check_required_parameters(_decision_tree_classification_train, params, ['table'])
if group_by is not None:
return _function_by_group(_decision_tree_classification_train, table, group_by=group_by, **params)
else:
return _decision_tree_classification_train(table, **params)
def _decision_tree_classification_train(table, feature_cols, label_col, # fig_size=np.array([6.4, 4.8]),
criterion='gini', splitter='best', max_depth=None, min_samples_split=2, min_samples_leaf=1,
min_weight_fraction_leaf=0.0, max_features=None, random_state=None, max_leaf_nodes=None,
min_impurity_decrease=0.0, min_impurity_split=None, class_weight=None, presort=False,
sample_weight=None, check_input=True, X_idx_sorted=None):
classifier = DecisionTreeClassifier(criterion, splitter, max_depth, min_samples_split, min_samples_leaf,
min_weight_fraction_leaf, max_features, random_state, max_leaf_nodes,
min_impurity_decrease, min_impurity_split, class_weight, presort)
classifier.fit(table[feature_cols], table[label_col],
sample_weight, check_input, X_idx_sorted)
try:
from sklearn.externals.six import StringIO
from sklearn.tree import export_graphviz
import pydotplus
dot_data = StringIO()
export_graphviz(classifier, out_file=dot_data,
feature_names=feature_cols, class_names=table[label_col].astype('str').unique(),
filled=True, rounded=True,
special_characters=True)
graph = pydotplus.graph_from_dot_data(dot_data.getvalue())
from brightics.common.report import png2MD
fig_tree = png2MD(graph.create_png())
except:
fig_tree = "Graphviz is needed to draw a Decision Tree graph. Please download it from http://graphviz.org/download/ and install it to your computer."
# json
model = _model_dict('decision_tree_classification_model')
model['feature_cols'] = feature_cols
model['label_col'] = label_col
model['classes'] = classifier.classes_
feature_importance = classifier.feature_importances_
model['feature_importance'] = feature_importance
model['max_features'] = classifier.max_features_
model['n_classes'] = classifier.n_classes_
model['n_features'] = classifier.n_features_
model['n_outputs'] = classifier.n_outputs_
model['tree'] = classifier.tree_
get_param = classifier.get_params()
model['parameters'] = get_param
model['classifier'] = classifier
# report
indices = np.argsort(feature_importance)
sorted_feature_cols = np.array(feature_cols)[indices]
plt.title('Feature Importances')
plt.barh(range(len(indices)), feature_importance[indices], color='b', align='center')
for i, v in enumerate(feature_importance[indices]):
plt.text(v, i, " {:.2f}".format(v), color='b', va='center', fontweight='bold')
plt.yticks(range(len(indices)), sorted_feature_cols)
plt.xlabel('Relative Importance')
plt.xlim(0, 1.1)
plt.tight_layout()
fig_feature_importances = plt2MD(plt)
plt.clf()
params = dict2MD(get_param)
feature_importance_df = | pd.DataFrame(data=feature_importance, index=feature_cols) | pandas.DataFrame |
def hover(x):
index=x.find(".")
if index==-1: return x
else: return x[:index]
def morph(x):
index=x.find(".")
if index==-1: return ""
else: return x[index+1:]
def stransform(inputw):
if len(inputw)>0 and inputw[0]=="[":
return " ʔăḏōnāy"
elif len(inputw)>1 and inputw[0]==inputw[1]:
return "-"+inputw[0]+"-"+inputw[1:]
else:
return inputw
def septransform(inputw):
if inputw=="":
return ""
if inputw=="־":
return ""
if inputw==" ":
return " "
if inputw=="׃ ":
return ""
else:
return """<span style="font-size: 157%;padding:15px;background: blueviolet;">{}</span>""".format(inputw)
def beautify(phon):
return phon.replace("ḏ","d").replace("ḡ","g").replace("ṯ","t").replace("ḵ","x").replace("ʔ","ʾ").replace("ʕ","ʿ").replace("ₐ","a").replace("î","ī").replace("ê","ē").replace("ô","ō").replace("û","ū").replace("ᵒ","ŏ").replace("ᵉ","ĕ").replace("ᵃ","ă").replace("ᵊ","ᵉ").replace("ʸ","").replace("ˈ",'<sub id="s">́</sub>').replace(" "," ").replace(" -","-")
def repl(inputw):
text=beautify(inputw)
return text
import pandas as pd
print('Loading verses...\r',end="")
df=pd.read_csv("../_data/bible.csv",sep="\t")
ixv=pd.read_csv("../_data/indexv.csv",sep="\t",header=None)
print('Loading words...\r',end="")
dfw=pd.read_csv("../_data/byword.csv",sep="\t").fillna(" ")
dfw["trans1"]=dfw["trans1"].apply(beautify)
#dfw["wordcat"]=dfwt +dfw["separ"].apply(septransform)
#words=dfw.fillna("").groupby("WLCverse")["wordcat"].apply(list).apply("".join).apply(repl)
glosstr0= | pd.DataFrame() | pandas.DataFrame |
# Copyright 2017 Regents of the University of Colorado. All Rights Reserved.
# Released under the MIT license.
# This software was developed at the University of Colorado's Laboratory for Atmospheric and Space Physics.
# Verify current version before use at: https://github.com/MAVENSDC/Pydivide
import calendar
import numpy as np
from .utilities import kp_regex
from .utilities import param_dict
from .utilities import remove_inst_tag
from .utilities import get_latest_files_from_date_range, read_iuvs_file, get_latest_iuvs_files_from_date_range
from .utilities import get_header_info
from .utilities import orbit_time
import pytplot
from _collections import OrderedDict
import builtins
import os
def maven_kp_to_tplot(filename=None, input_time=None, instruments=None, insitu_only=False, specified_files_only=False, ancillary_only=False):
'''
Read in a given filename in situ file into a dictionary object
Optional keywords maybe used to downselect instruments returned
and the time windows.
Input:
filename:
Name of the in situ KP file(s) to read in.
input_time:
Set a time bounds/filter on the data
(this will be necessary when this is called by a wrapper that
seeks to ingest all data within a range of dates that may
be allowed to span multiple days (files) ).
instruments:
Optional keyword listing the instruments to include
in the returned dictionary/structure.
insitu_only:
Optional keyword that allows you to specify that you only want
to download insitu files.
specified_files_only:
Optional keyword that allows you to specify you only want filenames
given in 'filename' to be read in, not other files close in date/time
as well.
ancillary_only:
Will only load in the spacecraft and APP info
Output:
A dictionary (data structure) containing up to all of the columns
included in a MAVEN in-situ Key parameter data file.
'''
import pandas as pd
import re
from datetime import datetime, timedelta
from dateutil.parser import parse
filenames = []
iuvs_filenames = []
if instruments is not None:
if not isinstance(instruments, builtins.list):
instruments = [instruments]
if filename is None and input_time is None:
print('You must specify either a set of filenames to read in, or a time frame in which '
'you want to search for downloaded files.')
if ancillary_only:
instruments=['SPACECRAFT']
if filename is not None:
if not isinstance(filename, builtins.list):
filename = [filename]
dates = []
for file in filename:
date = re.findall(r'_(\d{8})', file)[0]
dates.append(date)
if 'iuvs' in file:
iuvs_filenames.append(file)
else:
filenames.append(file)
dates.sort()
# To keep the rest of the code consistent, if someone gave a files, or files, to load, but no input_time,
# go ahead and create an 'input_time'
if input_time is None:
if len(dates) == 1:
input_time = str(dates[0][:4]) + '-' + str(dates[0][4:6]) + '-' + str(dates[0][6:])
else:
beg_date = min(dates)
end_date = max(dates)
input_time = [str(beg_date[:4]) + '-' + str(beg_date[4:6]) + '-' + str(beg_date[6:]),
str(end_date[:4]) + '-' + str(end_date[4:6]) + '-' + str(end_date[6:])]
# Check for orbit num rather than time string
if isinstance(input_time, builtins.list):
if isinstance(input_time[0], int):
input_time = orbit_time(input_time[0], input_time[1])
elif isinstance(input_time, int):
input_time = orbit_time(input_time)
# Turn string input into datetime objects
if isinstance(input_time, list):
if len(input_time[0]) <= 10:
input_time[0] = input_time[0] + ' 00:00:00'
if len(input_time[1]) <= 10:
input_time[1] = input_time[1] + ' 23:59:59'
date1 = parse(input_time[0])
date2 = parse(input_time[1])
else:
if len(input_time) <= 10:
input_time += ' 00:00:00'
date1 = parse(input_time)
date2 = date1 + timedelta(days=1)
date1_unix = calendar.timegm(date1.timetuple())
date2_unix = calendar.timegm(date2.timetuple())
# Grab insitu and iuvs files for the specified/created date ranges
date_range_filenames = get_latest_files_from_date_range(date1, date2)
date_range_iuvs_filenames = get_latest_iuvs_files_from_date_range(date1, date2)
# Add date range files to respective file lists if desired
if not specified_files_only:
filenames.extend(date_range_filenames)
iuvs_filenames.extend(date_range_iuvs_filenames)
if not date_range_filenames and not date_range_iuvs_filenames:
if not filenames and not iuvs_filenames:
print("No files found for the input date range, and no specific filenames were given. Exiting.")
return
# Going to look for files between time frames, but as we might have already specified
# certain files to load in, we don't want to load them in 2x... so doing a check for that here
filenames = list(set(filenames))
iuvs_filenames = list(set(iuvs_filenames))
kp_insitu = []
if filenames:
# Get column names
names, inst = [], []
crus_name, crus_inst = [], []
c_found = False
r_found = False
for f in filenames:
if kp_regex.match(os.path.basename(f)).group('description') == '_crustal' and not c_found:
name, inss = get_header_info(f)
# Strip off the first name for now (Time), and use that as the dataframe index.
# Seems to make sense for now, but will it always?
crus_name.extend(name[1:])
crus_inst.extend(inss[1:])
c_found = True
elif kp_regex.match(os.path.basename(f)).group('description') == '' and not r_found:
name, ins = get_header_info(f)
# Strip off the first name for now (Time), and use that as the dataframe index.
# Seems to make sense for now, but will it always?
names.extend(name[1:])
inst.extend(ins[1:])
r_found = True
all_names = names + crus_name
all_inst = inst + crus_inst
# Break up dictionary into instrument groups
lpw_group, euv_group, swe_group, swi_group, sta_group, sep_group, mag_group, ngi_group, app_group, sc_group, \
crus_group = [], [], [], [], [], [], [], [], [], [], []
for i, j in zip(all_inst, all_names):
if re.match('^LPW$', i.strip()):
lpw_group.append(j)
elif re.match('^LPW-EUV$', i.strip()):
euv_group.append(j)
elif re.match('^SWEA$', i.strip()):
swe_group.append(j)
elif re.match('^SWIA$', i.strip()):
swi_group.append(j)
elif re.match('^STATIC$', i.strip()):
sta_group.append(j)
elif re.match('^SEP$', i.strip()):
sep_group.append(j)
elif re.match('^MAG$', i.strip()):
mag_group.append(j)
elif re.match('^NGIMS$', i.strip()):
ngi_group.append(j)
elif re.match('^MODELED_MAG$', i.strip()):
crus_group.append(j)
elif re.match('^SPICE$', i.strip()):
# NB Need to split into APP and SPACECRAFT
if re.match('(.+)APP(.+)', j):
app_group.append(j)
else: # Everything not APP is SC in SPICE
# But do not include Orbit Num, or IO Flag
# Could probably stand to clean this line up a bit
if not re.match('(.+)(Orbit Number|Inbound Outbound Flag)', j):
sc_group.append(j)
else:
pass
delete_groups = []
if instruments is not None:
if 'LPW' not in instruments and 'lpw' not in instruments:
delete_groups += lpw_group
if 'MAG' not in instruments and 'mag' not in instruments:
delete_groups += mag_group
if 'EUV' not in instruments and 'euv' not in instruments:
delete_groups += euv_group
if 'SWI' not in instruments and 'swi' not in instruments:
delete_groups += swi_group
if 'SWE' not in instruments and 'swe' not in instruments:
delete_groups += swe_group
if 'NGI' not in instruments and 'ngi' not in instruments:
delete_groups += ngi_group
if 'SEP' not in instruments and 'sep' not in instruments:
delete_groups += sep_group
if 'STA' not in instruments and 'sta' not in instruments:
delete_groups += sta_group
if 'MODELED_MAG' not in instruments and 'modeled_mag' not in instruments:
delete_groups += crus_group
# Read in all relavent data into a pandas dataframe called "temp"
temp_data = []
filenames.sort()
for filename in filenames:
# Determine number of header lines
nheader = 0
with open(filename) as f:
for line in f:
if line.startswith('#'):
nheader += 1
if kp_regex.match(os.path.basename(filename)).group('description') == '_crustal':
temp_data.append(pd.read_fwf(filename, skiprows=nheader, index_col=0,
widths=[19] + len(crus_name) * [16], names=crus_name))
else:
temp_data.append(pd.read_fwf(filename, skiprows=nheader, index_col=0,
widths=[19] + len(names) * [16], names=names))
for i in delete_groups:
del temp_data[-1][i]
temp_unconverted = | pd.concat(temp_data, axis=0, sort=True) | pandas.concat |
import os
from urllib.request import urlretrieve
import pandas as pd
Fremont_URL = 'https://data.seattle.gov/api/views/65db-xm6k/rows.csv?accessType=DOWNLOAD'
def get_fremont_data(filename="Fremont.csv",url=Fremont_URL
,force_download=False):
"""
Download and cache the fremont data
Parameters
----------
filename: string(optinal)
location to save the data
url: string(oprinal)
web location of the data
force_download: bool (optinal)
if True, force redownload of the data
Returns
-------
data: pandas.DataFrame
The fremont bridge data
"""
if force_download or not os.path.exists(filename):
urlretrieve(url,filename)
data = pd.read_csv("Fremont.csv",index_col = "Date").drop("Fremont Bridge Total",axis=1)
try:
data.index = pd.to_datetime(data.index, format='%m/%d/%Y %I:%M:%S %p')
except:
data.index = | pd.to_datetime(data.index) | pandas.to_datetime |
import os, sys, json, warnings, logging as log
import pandas as pd, tqdm, dpath
import annotate, collect
from pprint import pprint
def make_items(iter_labeled_meta, iter_all_meta, n_unlabeled, read_rows):
'''Generate metadata from gold-standard and unlabled'''
labeled_items = [(meta, read_rows(meta['url']))
for meta in iter_labeled_meta]
annotated_table_urls = set([meta['url'] for meta, _ in labeled_items])
unlabeled_meta = []
for meta in iter_all_meta:
if (n_unlabeled is not None) and len(unlabeled_meta) >= n_unlabeled:
break
if meta['url'] not in annotated_table_urls:
unlabeled_meta.append(meta)
unlabeled_items = [(meta, read_rows(meta['url']))
for meta in unlabeled_meta]
return labeled_items, unlabeled_items
def make_labelquery(args):
querytype, template, slots, value, templates, namespace, kbdomain, name = args
return querytype, name, annotate.make_labelquery(*args)
def parallel_query(labelqueries,
templates,
namespace,
kbdomain,
max_workers=1):
import tqdm, multiprocessing
with multiprocessing.Pool(max_workers) as p:
stream_args = [(q['label'], q['template'], q['slots'], q['value'],
templates, namespace, kbdomain, name)
for name, q in labelqueries.items()]
t = len(stream_args)
# yield from tqdm.tqdm(p.imap_unordered(make_labelquery, stream_args), total=t)
yield from p.imap_unordered(make_labelquery, stream_args)
def cache_labelquery_results(modeldir,
namespace,
kbdomain,
selected_queries=[],
results_fname=None,
parallel=False,
verbose=False):
labelqueries, templates = annotate.load_labelqueries_templates(modeldir)
if not results_fname:
os.makedirs(os.path.join(modeldir, 'labelqueries', 'cache'),
exist_ok=True)
results_fname = os.path.join(modeldir, 'labelqueries', 'cache',
'results.json')
labelquery_results = load_labelquery_results(modeldir,
results_fname=results_fname)
l = len(labelqueries)
if parallel:
if selected_queries:
labelqueries = {
name: q
for name, q in labelqueries.items() if name in selected_queries
}
lqs = parallel_query(labelqueries,
templates,
namespace,
kbdomain,
max_workers=parallel)
for qt, name, lq in lqs:
labelquery_results.setdefault(qt, {})[name] = lq
else:
for i, (name, q) in enumerate(labelqueries.items()):
if selected_queries and (name not in selected_queries):
continue
lq = annotate.make_labelquery(q['label'],
q['template'],
q['slots'],
q['value'],
templates,
namespace,
kbdomain=kbdomain,
name=name)
if verbose:
print(len(lq.transformations), 'results')
labelquery_results.setdefault(q['label'], {})[name] = lq
with open(results_fname, 'w') as fw:
results_json = {
label: {name: vars(lq)
for name, lq in lqs.items()}
for label, lqs in labelquery_results.items()
}
json.dump(results_json, fw, indent=2)
with open(results_fname.replace('.json', '.stats.json'), 'w') as fw:
results_json = {
name: len(lq.transformations)
for label, lqs in labelquery_results.items()
for name, lq in lqs.items()
}
json.dump(results_json, fw, indent=2)
return labelquery_results
def load_labelquery_results(modeldir, results_fname=None):
typed_labelqueries = {}
if not results_fname:
os.makedirs(os.path.join(modeldir, 'labelqueries', 'cache'),
exist_ok=True)
results_fname = os.path.join(modeldir, 'labelqueries', 'cache',
'results.json')
if os.path.exists(results_fname):
typed_labelqueries = json.load(open(results_fname))
for lq_type, labelqueries in typed_labelqueries.items():
for name, lq_params in labelqueries.items():
labelqueries[name] = annotate.LabelQuery(**lq_params)
return typed_labelqueries
def transform_all(labelqueries, unlabeled_items, model, **kwargs):
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
id_items = {m['@id']: (m, r) for m, r in unlabeled_items}
lX = []
lq_labels = {}
l = len(labelqueries)
for i, (name, lq) in enumerate(labelqueries.items()):
print(f'Transforming using query {name:>4s} [{i+1:3d}/{l:3d}] ...',
end='\r',
file=sys.stderr)
# Get corresponding metadata for query results
selected_items = [
id_items[i] for i in lq.transformations if i in id_items
]
transformed_items = tuple(
zip(*[(lq.transform(m, r, **kwargs), r)
for m, r in selected_items]))
if transformed_items:
recs = tuple(
zip(*model.__class__.make_records(*transformed_items)))
if recs:
qlX, qly = recs
qlX = pd.DataFrame.from_records(list(qlX)).set_index('@id')
lX.append(qlX)
lq_labels[name] = pd.Series(qly, index=qlX.index)
print(file=sys.stderr)
lX = pd.concat(lX).drop_duplicates().replace([pd.np.nan], 0)
L = pd.DataFrame(index=lX.index) # rows: n_labelqueries x cols: labels
for lqname, qly in lq_labels.items():
L[lqname] = qly
return lX, L
def get_query_labelings(labeled_metas, labelqueries):
item_query_label = {}
for meta in labeled_metas:
for qid, lq in labelqueries.items():
for p, v in lq.transformations.get(meta['@id'], {}).items():
if v:
item_query_label.setdefault((meta['url'], p), {})[qid] = v
L = pd.DataFrame.from_dict(item_query_label, orient='index')
return L
def get_true_labelings(labeled_metas, eval_path):
item_truelabel = {}
for meta in labeled_metas:
for p, v in dpath.util.search(meta, eval_path, yielded=True):
if not meta.get('karma:isBad'):
item_truelabel[(meta['url'], p)] = v or None
return pd.Series(item_truelabel)
def to_numbered(L, y_true):
# Replace class labels by integers (nan => -1)
label_values = sorted(set(['']) | set(y_true.fillna('').values.flatten()))
value_repl = {v: i - 1 for i, v in enumerate(label_values)}
value_repl['UNK'] = -1
L = L.fillna(-1).replace(value_repl).astype('int32').values
y_true = y_true.fillna(-1).replace(value_repl).astype('int32').values
return L, y_true
def from_numbered(L, y_true, y_pred):
label_values = sorted(set(['']) | set(y_true.fillna('').values.flatten()))
repl_value = {i - 1: v for i, v in enumerate(label_values)}
y_pred = pd.Series(y_pred, index=L.index)
return y_pred.replace(repl_value).replace({'': pd.np.nan})
def labelmodel_predict(L_train, y_true, L_test, return_probs=False, **kwargs):
kwargs.setdefault('n_epochs', 500)
kwargs.setdefault('log_freq', 100)
from snorkel.labeling.model import LabelModel
n = len(set(y_true[~y_true.isna()].values))
log.info('y_true values: %s', set(y_true[~y_true.isna()].values))
label_model = LabelModel(cardinality=n, verbose=True)
L_train_val = set(L_train.values.flatten())
y_true_val = set(y_true.values.flatten())
log.info('Values in L_train but not y_true: %s', L_train_val - y_true_val)
log.info('Values in y_true but not L_train: %s', y_true_val - L_train_val)
L_train, Y_dev = to_numbered(L_train, y_true)
log.info('L_train values: %s, %s', set(L_train.flatten()), type(L_train))
log.info('Y_dev values: %s, %s', set(Y_dev.flatten()), type(Y_dev))
log.info('kwargs: %s', kwargs)
label_model.fit(L_train=L_train, Y_dev=Y_dev[Y_dev != -1], **kwargs)
y_pred = label_model.predict(to_numbered(L_test, y_true)[0],
return_probs=return_probs)
if return_probs:
y_pred, y_score = y_pred
y_pred = from_numbered(L_test, y_true, y_pred)
return (y_pred, y_score) if return_probs else y_pred
def save_query_analysis(modeldir, labeled_metas, labelquery_type):
os.makedirs(os.path.join(modeldir, 'labelqueries', 'cache'), exist_ok=True)
results_fname = os.path.join(modeldir, 'labelqueries', 'cache',
'gold.json')
typed_labelqueries = load_labelquery_results(modeldir,
results_fname=results_fname)
eval_path = annotate.labelquery_types[labelquery_type]['eval_path']
y_true = get_true_labelings(labeled_metas, eval_path)
L = get_query_labelings(labeled_metas, typed_labelqueries[labelquery_type])
L = L.reindex(index=y_true.index)
unk_value = annotate.labelquery_types[labelquery_type]['UNK']
L['-1'] = unk_value
L = L.replace({'UNK': unk_value})
y_true = y_true.replace({None: unk_value}).fillna(unk_value)
for modelname in ['-majority', '']:
import importlib
if importlib.util.find_spec("snorkel") is None:
y_pred = L.mode(axis=1)[0]
modelname = ''
else:
if modelname:
y_pred = L.mode(axis=1)[0]
else:
log.info(f'Fitting label model for {labelquery_type}...')
y_pred = labelmodel_predict(L, y_true, L)
name = f'{labelquery_type}{modelname}'
y = pd.DataFrame({'true': y_true, 'pred': y_pred}).fillna(unk_value)
y.to_csv(
os.path.join(modeldir, 'labelqueries', 'cache',
f'{name}-labeling.csv'))
target_names = [
x.split('/')[-1] for x in sorted(set(y.true) | set(y.pred))
]
import analyse
report = analyse.pd_report(y.true, y.pred, target_names)
log.info('Accuracy (%s): %.2f', name, report.loc['accuracy',
'f1-score'])
report.index.name = 'label'
report.to_csv(
os.path.join(modeldir, 'labelqueries', 'cache',
f'{name}-report.csv'))
confusion = analyse.pd_confusion(y.true,
y.pred,
target_names,
stack=True)
confusion.to_csv(
os.path.join(modeldir, 'labelqueries', 'cache',
f'{name}-confusion.csv'))
def sample_match_dist(y, y_match, subsample=1, **kwargs):
y, y_match = pd.Series(y), | pd.Series(y_match) | pandas.Series |
""" merge predictions and generate submission.
"""
import os
import sys
import glob
from pathlib import Path
import argparse
import cv2
import numpy as np
import pandas as pd
from tqdm import tqdm
import torch
from torch.utils.data import DataLoader
from torch.nn import functional as F
from albumentations import Compose, Normalize
from albumentations import HorizontalFlip, PadIfNeeded
from metric import iou_metric
import dataset
import models
import utils
from super_pool import SuperPool
def load_train_mask(image_id):
mask = cv2.imread(os.path.join('../input/train/masks', '%s.png' % image_id), 0)
return (mask / 255.0).astype(np.uint8)
def iou_metric_batch(y_true_in, y_pred_in):
batch_size = y_true_in.shape[0]
metric = []
for batch in range(batch_size):
value = iou_metric(y_true_in[batch], y_pred_in[batch])
metric.append(value)
return np.mean(metric)
# ref.: https://www.kaggle.com/stainsby/fast-tested-rle
def rle_encode(img):
'''
img: numpy array, 1 - mask, 0 - background
Returns run length as string formated
'''
pixels = img.flatten()
pixels = np.concatenate([[0], pixels, [0]])
runs = np.where(pixels[1:] != pixels[:-1])[0] + 1
runs[1::2] -= runs[::2]
return ' '.join(str(x) for x in runs)
def generate_submission(out_csv, preds):
pool = SuperPool()
sample_df = | pd.read_csv('../input/sample_submission.csv') | pandas.read_csv |
import random
import math
import numpy as np
import pygeos
import pandas as pd
# Smallest enclosing circle - Library (Python)
# Copyright (c) 2017 Project Nayuki
# https://www.nayuki.io/page/smallest-enclosing-circle
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public License
# along with this program (see COPYING.txt and COPYING.LESSER.txt).
# If not, see <http://www.gnu.org/licenses/>.
# Data conventions: A point is a pair of floats (x, y). A circle is a triple of floats (center x, center y, radius).
# Returns the smallest circle that encloses all the given points. Runs in expected O(n) time, randomized.
# Input: A sequence of pairs of floats or ints, e.g. [(0,5), (3.1,-2.7)].
# Output: A triple of floats representing a circle.
# Note: If 0 points are given, None is returned. If 1 point is given, a circle of radius 0 is returned.
#
# Initially: No boundary points known
def _make_circle(points):
# Convert to float and randomize order
shuffled = [(float(x), float(y)) for (x, y) in points]
random.shuffle(shuffled)
# Progressively add points to circle or recompute circle
c = None
for (i, p) in enumerate(shuffled):
if c is None or not _is_in_circle(c, p):
c = _make_circle_one_point(shuffled[: i + 1], p)
return c
# One boundary point known
def _make_circle_one_point(points, p):
c = (p[0], p[1], 0.0)
for (i, q) in enumerate(points):
if not _is_in_circle(c, q):
if c[2] == 0.0:
c = _make_diameter(p, q)
else:
c = _make_circle_two_points(points[: i + 1], p, q)
return c
# Two boundary points known
def _make_circle_two_points(points, p, q):
circ = _make_diameter(p, q)
left = None
right = None
px, py = p
qx, qy = q
# For each point not in the two-point circle
for r in points:
if _is_in_circle(circ, r):
continue
# Form a circumcircle and classify it on left or right side
cross = _cross_product(px, py, qx, qy, r[0], r[1])
c = _make_circumcircle(p, q, r)
if c is None:
continue
elif cross > 0.0 and (
left is None
or _cross_product(px, py, qx, qy, c[0], c[1])
> _cross_product(px, py, qx, qy, left[0], left[1])
):
left = c
elif cross < 0.0 and (
right is None
or _cross_product(px, py, qx, qy, c[0], c[1])
< _cross_product(px, py, qx, qy, right[0], right[1])
):
right = c
# Select which circle to return
if left is None and right is None:
return circ
if left is None:
return right
if right is None:
return left
if left[2] <= right[2]:
return left
return right
def _make_circumcircle(p0, p1, p2):
# Mathematical algorithm from Wikipedia: Circumscribed circle
ax, ay = p0
bx, by = p1
cx, cy = p2
ox = (min(ax, bx, cx) + max(ax, bx, cx)) / 2.0
oy = (min(ay, by, cy) + max(ay, by, cy)) / 2.0
ax -= ox
ay -= oy
bx -= ox
by -= oy
cx -= ox
cy -= oy
d = (ax * (by - cy) + bx * (cy - ay) + cx * (ay - by)) * 2.0
if d == 0.0:
return None
x = (
ox
+ (
(ax * ax + ay * ay) * (by - cy)
+ (bx * bx + by * by) * (cy - ay)
+ (cx * cx + cy * cy) * (ay - by)
)
/ d
)
y = (
oy
+ (
(ax * ax + ay * ay) * (cx - bx)
+ (bx * bx + by * by) * (ax - cx)
+ (cx * cx + cy * cy) * (bx - ax)
)
/ d
)
ra = math.hypot(x - p0[0], y - p0[1])
rb = math.hypot(x - p1[0], y - p1[1])
rc = math.hypot(x - p2[0], y - p2[1])
return (x, y, max(ra, rb, rc))
def _make_diameter(p0, p1):
cx = (p0[0] + p1[0]) / 2.0
cy = (p0[1] + p1[1]) / 2.0
r0 = math.hypot(cx - p0[0], cy - p0[1])
r1 = math.hypot(cx - p1[0], cy - p1[1])
return (cx, cy, max(r0, r1))
_MULTIPLICATIVE_EPSILON = 1 + 1e-14
def _is_in_circle(c, p):
return (
c is not None
and math.hypot(p[0] - c[0], p[1] - c[1]) <= c[2] * _MULTIPLICATIVE_EPSILON
)
# Returns twice the signed area of the triangle defined by (x0, y0), (x1, y1), (x2, y2).
def _cross_product(x0, y0, x1, y1, x2, y2):
return (x1 - x0) * (y2 - y0) - (y1 - y0) * (x2 - x0)
# end of Nayuiki script to define the smallest enclosing circle
# calculate the area of circumcircle
def _circle_area(points):
if len(points[0]) == 3:
points = [x[:2] for x in points]
circ = _make_circle(points)
return math.pi * circ[2] ** 2
def _circle_radius(points):
if len(points[0]) == 3:
points = [x[:2] for x in points]
circ = _make_circle(points)
return circ[2]
def _true_angle(a, b, c):
# calculate angle between points, return true or false if real corner
ba = a - b
bc = c - b
cosine_angle = np.dot(ba, bc) / (np.linalg.norm(ba) * np.linalg.norm(bc))
angle = np.arccos(cosine_angle)
if np.degrees(angle) <= 170:
return True
if np.degrees(angle) >= 190:
return True
return False
def get_corners(geom):
# count corners of geom
if geom is None:
return None
corners = 0 # define empty variables
points = list(geom.exterior.coords) # get points of a shape
stop = len(points) - 1 # define where to stop
for i in np.arange(
len(points)
): # for every point, calculate angle and add 1 if True angle
if i == 0:
continue
elif i == stop:
a = np.asarray(points[i - 1])
b = np.asarray(points[i])
c = np.asarray(points[1])
if _true_angle(a, b, c) is True:
corners = corners + 1
else:
continue
else:
a = np.asarray(points[i - 1])
b = np.asarray(points[i])
c = np.asarray(points[i + 1])
if _true_angle(a, b, c) is True:
corners = corners + 1
else:
continue
return corners
def squareness(geom):
if geom is None:
return None
def _angle(a, b, c):
ba = a - b
bc = c - b
cosine_angle = np.dot(ba, bc) / (np.linalg.norm(ba) * np.linalg.norm(bc))
angle = np.degrees(np.arccos(cosine_angle))
return angle
angles = []
points = list(geom.exterior.coords) # get points of a shape
stop = len(points) - 1 # define where to stop
for i in np.arange(
len(points)
): # for every point, calculate angle and add 1 if True angle
if i == 0:
continue
elif i == stop:
a = np.asarray(points[i - 1])
b = np.asarray(points[i])
c = np.asarray(points[1])
ang = _angle(a, b, c)
if ang <= 175:
angles.append(ang)
elif _angle(a, b, c) >= 185:
angles.append(ang)
else:
continue
else:
a = np.asarray(points[i - 1])
b = np.asarray(points[i])
c = np.asarray(points[i + 1])
ang = _angle(a, b, c)
if _angle(a, b, c) <= 175:
angles.append(ang)
elif _angle(a, b, c) >= 185:
angles.append(ang)
else:
continue
deviations = [abs(90 - i) for i in angles]
return np.mean(deviations)
def elongation(bbox):
a = bbox.area
p = bbox.length
cond1 = p ** 2
cond2 = 16 * a
bigger = cond1 >= cond2
sqrt = np.empty(len(a))
sqrt[bigger] = cond1[bigger] - cond2[bigger]
sqrt[~bigger] = 0
elo1 = ((p - np.sqrt(sqrt)) / 4) / ((p / 2) - ((p - np.sqrt(sqrt)) / 4))
elo2 = ((p + np.sqrt(sqrt)) / 4) / ((p / 2) - ((p + np.sqrt(sqrt)) / 4))
# use the smaller one (e.g. shorter/longer)
res = np.empty(len(a))
res[elo1 <= elo2] = elo1[elo1 <= elo2]
res[~(elo1 <= elo2)] = elo2[~(elo1 <= elo2)]
return res
def centroid_corner(geom):
'''all these characters working with corners could be merged and cleaned
'''
from shapely.geometry import Point
if geom is None:
return (None, None)
distances = [] # set empty list of distances
centroid = geom.centroid # define centroid
points = list(geom.exterior.coords) # get points of a shape
stop = len(points) - 1 # define where to stop
for i in np.arange(
len(points)
): # for every point, calculate angle and add 1 if True angle
if i == 0:
continue
elif i == stop:
a = np.asarray(points[i - 1])
b = np.asarray(points[i])
c = np.asarray(points[1])
p = Point(points[i])
if _true_angle(a, b, c) is True:
distance = centroid.distance(
p
) # calculate distance point - centroid
distances.append(distance) # add distance to the list
else:
continue
else:
a = np.asarray(points[i - 1])
b = np.asarray(points[i])
c = np.asarray(points[i + 1])
p = Point(points[i])
if _true_angle(a, b, c) is True:
distance = centroid.distance(p)
distances.append(distance)
else:
continue
if not distances: # circular buildings
from momepy.dimension import _longest_axis
if geom.has_z:
coords = [
(coo[0], coo[1]) for coo in geom.convex_hull.exterior.coords
]
else:
coords = geom.convex_hull.exterior.coords
return (_longest_axis(coords) / 2, 0)
return (np.mean(distances), np.std(distances))
def _azimuth(point1, point2):
"""azimuth between 2 shapely points (interval 0 - 180)"""
angle = np.arctan2(point2[0] - point1[0], point2[1] - point1[1])
return np.degrees(angle) if angle > 0 else np.degrees(angle) + 180
def _dist(a, b):
return math.hypot(b[0] - a[0], b[1] - a[1])
def solar_orientation_poly(bbox):
if bbox is None:
return None
bbox = list(bbox.exterior.coords)
axis1 = _dist(bbox[0], bbox[3])
axis2 = _dist(bbox[0], bbox[1])
if axis1 <= axis2:
az = _azimuth(bbox[0], bbox[1])
else:
az = _azimuth(bbox[0], bbox[3])
if 90 > az >= 45:
diff = az - 45
az = az - 2 * diff
elif 135 > az >= 90:
diff = az - 90
az = az - 2 * diff
diff = az - 45
az = az - 2 * diff
elif 181 > az >= 135:
diff = az - 135
az = az - 2 * diff
diff = az - 90
az = az - 2 * diff
diff = az - 45
az = az - 2 * diff
return az
def street_profile(streets, buildings, distance=3, tick_length=50):
pygeos_lines = streets.geometry.values.data
list_points = np.empty((0, 2))
ids = []
lengths = pygeos.length(pygeos_lines)
for ix, (line, length) in enumerate(zip(pygeos_lines, lengths)):
pts = pygeos.line_interpolate_point(
line, np.linspace(0, length, num=int((length) // distance))
) # .1 offset to keep a gap between two segments
list_points = np.append(list_points, pygeos.get_coordinates(pts), axis=0)
ids += [ix] * len(pts) * 2
ticks = []
for num, pt in enumerate(list_points, 1):
# start chainage 0
if num == 1:
angle = _getAngle(pt, list_points[num])
line_end_1 = _getPoint1(pt, angle, tick_length / 2)
angle = _getAngle(line_end_1, pt)
line_end_2 = _getPoint2(line_end_1, angle, tick_length)
ticks.append([line_end_1, pt])
ticks.append([line_end_2, pt])
# everything in between
if num < len(list_points) - 1:
angle = _getAngle(pt, list_points[num])
line_end_1 = _getPoint1(
list_points[num], angle, tick_length / 2
)
angle = _getAngle(line_end_1, list_points[num])
line_end_2 = _getPoint2(line_end_1, angle, tick_length)
ticks.append([line_end_1, list_points[num]])
ticks.append([line_end_2, list_points[num]])
# end chainage
if num == len(list_points):
angle = _getAngle(list_points[num - 2], pt)
line_end_1 = _getPoint1(pt, angle, tick_length / 2)
angle = _getAngle(line_end_1, pt)
line_end_2 = _getPoint2(line_end_1, angle, tick_length)
ticks.append([line_end_1, pt])
ticks.append([line_end_2, pt])
ticks = pygeos.linestrings(ticks)
inp, res = pygeos.STRtree(ticks).query_bulk(buildings.geometry.values.data, predicate='intersects')
intersections = pygeos.intersection(ticks[res], buildings.geometry.values.data[inp])
distances = pygeos.distance(intersections, pygeos.points(list_points[res // 2]))
dists = np.zeros((len(ticks),))
dists[:] = np.nan
dists[res] = distances
ids = np.array(ids)
widths = []
openness = []
deviations = []
for i in range(len(streets)):
f = ids == i
s = dists[f]
lefts = s[::2]
rights = s[1::2]
left_mean = np.nanmean(lefts) if ~np.isnan(lefts).all() else tick_length / 2
right_mean = np.nanmean(rights) if ~np.isnan(rights).all() else tick_length / 2
widths.append(np.mean([left_mean, right_mean]) * 2)
openness.append(np.isnan(s).sum() / (f).sum())
deviations.append(np.nanstd(s))
return (widths, deviations, openness)
# http://wikicode.wikidot.com/get-angle-of-line-between-two-points
# https://glenbambrick.com/tag/perpendicular/
# angle between two points
def _getAngle(pt1, pt2):
"""
pt1, pt2 : tuple
"""
x_diff = pt2[0] - pt1[0]
y_diff = pt2[1] - pt1[1]
return math.degrees(math.atan2(y_diff, x_diff))
# start and end points of chainage tick
# get the first end point of a tick
def _getPoint1(pt, bearing, dist):
"""
pt : tuple
"""
angle = bearing + 90
bearing = math.radians(angle)
x = pt[0] + dist * math.cos(bearing)
y = pt[1] + dist * math.sin(bearing)
return (x, y)
# get the second end point of a tick
def _getPoint2(pt, bearing, dist):
"""
pt : tuple
"""
bearing = math.radians(bearing)
x = pt[0] + dist * math.cos(bearing)
y = pt[1] + dist * math.sin(bearing)
return (x, y)
def get_edge_ratios(df, edges):
"""
df: cells/buildngs
edges: network
"""
# intersection-based join
buff = edges.buffer(0.01) # to avoid floating point error
inp, res = buff.sindex.query_bulk(df.geometry, predicate='intersects')
intersections = df.iloc[inp].reset_index(drop=True).intersection(buff.iloc[res].reset_index(drop=True))
mask = intersections.area > 0.0001
intersections = intersections[mask]
inp = inp[mask]
lengths = intersections.area
grouped = lengths.groupby(inp)
totals = grouped.sum()
ints_vect = []
for name, group in grouped:
ratios = group / totals.loc[name]
ints_vect.append({res[item[0]]: item[1] for item in ratios.iteritems()})
edge_dicts = pd.Series(ints_vect, index=totals.index)
# nearest neighbor join
nans = df.index[~df.index.isin(edge_dicts.index)]
buffered = df.loc[nans].buffer(500)
additional = []
for i in range(len(buffered)):
geom = buffered.geometry.iloc[i]
query = edges.sindex.query(geom)
b = 500
while query.size == 0:
query = edges.sindex.query(geom.buffer(b))
b += 500
additional.append({edges.iloc[query].distance(geom).idxmin(): 1})
additional = pd.Series(additional, index=nans)
return pd.concat([edge_dicts, additional]).sort_index()
def get_nodes(df, nodes, edges, node_id, edge_id, startID, endID):
nodes = nodes.set_index('nodeID')
node_ids = []
for edge_dict, geom in zip(df[edge_id], df.geometry):
edge = edges.iloc[max(edge_dict, key=edge_dict.get)]
startID = edge.node_start
start = nodes.loc[startID].geometry
sd = geom.distance(start)
endID = edge.node_end
end = nodes.loc[endID].geometry
ed = geom.distance(end)
if sd > ed:
node_ids.append(endID)
else:
node_ids.append(startID)
return | pd.Series(node_ids, index=df.index) | pandas.Series |
# This file is part of GridCal.
#
# GridCal is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GridCal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GridCal. If not, see <http://www.gnu.org/licenses/>.
# AUTHORS: <NAME> and <NAME>
# CONTACT: <EMAIL>, <EMAIL>
# thanks to <NAME> for his help at coding
# --------------------------- LIBRARIES
import numpy as np
import pandas as pd
import numba as nb
import time
from warnings import warn
from scipy.sparse import coo_matrix, csc_matrix
from scipy.sparse import hstack as hs, vstack as vs
from scipy.sparse.linalg import factorized, spsolve
from matplotlib import pyplot as plt
np.set_printoptions(linewidth=2000, edgeitems=1000)
pd.set_option('display.max_rows', 500)
| pd.set_option('display.max_columns', 500) | pandas.set_option |
from sklearn.metrics import confusion_matrix, classification_report
from matplotlib.colors import LinearSegmentedColormap
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib.pyplot import figure
import os
import warnings
warnings.filterwarnings("ignore")
def heatconmat(y_true, y_pred, homepath, mode="binary"):
cmap_reds = plt.get_cmap("Reds")
num_colors = 50
colors = ["white", "grey"] + [cmap_reds(i / num_colors) for i in range(2, num_colors)]
cmap2 = LinearSegmentedColormap.from_list('', colors, num_colors)
sns.set_context('talk')
df = | pd.Series(y_true) | pandas.Series |
#!/usr/bin/env python3
import argparse
import collections
import copy
import datetime
import functools
import glob
import json
import logging
import math
import operator
import os
import os.path
import re
import sys
import typing
import warnings
import matplotlib
import matplotlib.cm
import matplotlib.dates
import matplotlib.pyplot
import matplotlib.ticker
import networkx
import numpy
import pandas
import tabulate
import tqdm
import rows.console
import rows.load
import rows.location_finder
import rows.model.area
import rows.model.carer
import rows.model.datetime
import rows.model.historical_visit
import rows.model.history
import rows.model.json
import rows.model.location
import rows.model.metadata
import rows.model.past_visit
import rows.model.problem
import rows.model.rest
import rows.model.schedule
import rows.model.service_user
import rows.model.visit
import rows.parser
import rows.plot
import rows.routing_server
import rows.settings
import rows.sql_data_source
def handle_exception(exc_type, exc_value, exc_traceback):
"""Logs uncaught exceptions"""
if issubclass(exc_type, KeyboardInterrupt):
sys.__excepthook__(exc_type, exc_value, exc_traceback)
else:
logging.error("Uncaught exception", exc_info=(exc_type, exc_value, exc_traceback))
__COMMAND = 'command'
__PULL_COMMAND = 'pull'
__INFO_COMMAND = 'info'
__SHOW_WORKING_HOURS_COMMAND = 'show-working-hours'
__COMPARE_BOX_PLOTS_COMMAND = 'compare-box-plots'
__COMPARE_DISTANCE_COMMAND = 'compare-distance'
__COMPARE_WORKLOAD_COMMAND = 'compare-workload'
__COMPARE_QUALITY_COMMAND = 'compare-quality'
__COMPARE_COST_COMMAND = 'compare-cost'
__CONTRAST_WORKLOAD_COMMAND = 'contrast-workload'
__COMPARE_PREDICTION_ERROR_COMMAND = 'compare-prediction-error'
__COMPARE_BENCHMARK_COMMAND = 'compare-benchmark'
__COMPARE_BENCHMARK_TABLE_COMMAND = 'compare-benchmark-table'
__COMPARE_LITERATURE_TABLE_COMMAND = 'compare-literature-table'
__COMPARE_THIRD_STAGE_PLOT_COMMAND = 'compare-third-stage-plot'
__COMPARE_THIRD_STAGE_TABLE_COMMAND = 'compare-third-stage-table'
__COMPARE_THIRD_STAGE_SUMMARY_COMMAND = 'compare-third-stage-summary'
__COMPARE_QUALITY_OPTIMIZER_COMMAND = 'compare-quality-optimizer'
__COMPUTE_RISKINESS_COMMAND = 'compute-riskiness'
__COMPARE_DELAY_COMMAND = 'compare-delay'
__TYPE_ARG = 'type'
__ACTIVITY_TYPE = 'activity'
__VISITS_TYPE = 'visits'
__COMPARE_TRACE_COMMAND = 'compare-trace'
__CONTRAST_TRACE_COMMAND = 'contrast-trace'
__COST_FUNCTION_TYPE = 'cost_function'
__DEBUG_COMMAND = 'debug'
__AREA_ARG = 'area'
__FROM_ARG = 'from'
__TO_ARG = 'to'
__FILE_ARG = 'file'
__DATE_ARG = 'date'
__BASE_FILE_ARG = 'base-file'
__CANDIDATE_FILE_ARG = 'candidate-file'
__SOLUTION_FILE_ARG = 'solution'
__PROBLEM_FILE_ARG = 'problem'
__OUTPUT_PREFIX_ARG = 'output_prefix'
__OPTIONAL_ARG_PREFIX = '--'
__BASE_SCHEDULE_PATTERN = 'base_schedule_pattern'
__CANDIDATE_SCHEDULE_PATTERN = 'candidate_schedule_pattern'
__SCHEDULE_PATTERNS = 'schedule_patterns'
__LABELS = 'labels'
__OUTPUT = 'output'
__ARROWS = 'arrows'
__FILE_FORMAT_ARG = 'output_format'
__color_map = matplotlib.pyplot.get_cmap('tab20c')
FOREGROUND_COLOR = __color_map.colors[0]
FOREGROUND_COLOR2 = 'black'
def get_or_raise(obj, prop):
value = getattr(obj, prop)
if not value:
raise ValueError('{0} not set'.format(prop))
return value
def get_date_time(value):
date_time = datetime.datetime.strptime(value, '%Y-%m-%d')
return date_time
def get_date(value):
value_to_use = get_date_time(value)
return value_to_use.date()
def configure_parser():
parser = argparse.ArgumentParser(prog=sys.argv[0],
description='Robust Optimization '
'for Workforce Scheduling command line utility')
subparsers = parser.add_subparsers(dest=__COMMAND)
pull_parser = subparsers.add_parser(__PULL_COMMAND)
pull_parser.add_argument(__AREA_ARG)
pull_parser.add_argument(__OPTIONAL_ARG_PREFIX + __FROM_ARG)
pull_parser.add_argument(__OPTIONAL_ARG_PREFIX + __TO_ARG)
pull_parser.add_argument(__OPTIONAL_ARG_PREFIX + __OUTPUT_PREFIX_ARG)
info_parser = subparsers.add_parser(__INFO_COMMAND)
info_parser.add_argument(__FILE_ARG)
compare_distance_parser = subparsers.add_parser(__COMPARE_DISTANCE_COMMAND)
compare_distance_parser.add_argument(__OPTIONAL_ARG_PREFIX + __PROBLEM_FILE_ARG, required=True)
compare_distance_parser.add_argument(__OPTIONAL_ARG_PREFIX + __SCHEDULE_PATTERNS, nargs='+', required=True)
compare_distance_parser.add_argument(__OPTIONAL_ARG_PREFIX + __LABELS, nargs='+', required=True)
compare_distance_parser.add_argument(__OPTIONAL_ARG_PREFIX + __OUTPUT)
compare_distance_parser.add_argument(__OPTIONAL_ARG_PREFIX + __FILE_FORMAT_ARG, default=rows.plot.FILE_FORMAT)
compare_workload_parser = subparsers.add_parser(__COMPARE_WORKLOAD_COMMAND)
compare_workload_parser.add_argument(__PROBLEM_FILE_ARG)
compare_workload_parser.add_argument(__BASE_SCHEDULE_PATTERN)
compare_workload_parser.add_argument(__CANDIDATE_SCHEDULE_PATTERN)
compare_workload_parser.add_argument(__OPTIONAL_ARG_PREFIX + __FILE_FORMAT_ARG, default=rows.plot.FILE_FORMAT)
debug_parser = subparsers.add_parser(__DEBUG_COMMAND)
# debug_parser.add_argument(__PROBLEM_FILE_ARG)
# debug_parser.add_argument(__SOLUTION_FILE_ARG)
compare_trace_parser = subparsers.add_parser(__COMPARE_TRACE_COMMAND)
compare_trace_parser.add_argument(__PROBLEM_FILE_ARG)
compare_trace_parser.add_argument(__FILE_ARG)
compare_trace_parser.add_argument(__OPTIONAL_ARG_PREFIX + __COST_FUNCTION_TYPE, required=True)
compare_trace_parser.add_argument(__OPTIONAL_ARG_PREFIX + __DATE_ARG, type=get_date)
compare_trace_parser.add_argument(__OPTIONAL_ARG_PREFIX + __OUTPUT)
compare_trace_parser.add_argument(__OPTIONAL_ARG_PREFIX + __ARROWS, type=bool, default=False)
contrast_workload_parser = subparsers.add_parser(__CONTRAST_WORKLOAD_COMMAND)
contrast_workload_parser.add_argument(__PROBLEM_FILE_ARG)
contrast_workload_parser.add_argument(__BASE_FILE_ARG)
contrast_workload_parser.add_argument(__CANDIDATE_FILE_ARG)
contrast_workload_parser.add_argument(__OPTIONAL_ARG_PREFIX + __TYPE_ARG)
compare_prediction_error_parser = subparsers.add_parser(__COMPARE_PREDICTION_ERROR_COMMAND)
compare_prediction_error_parser.add_argument(__BASE_FILE_ARG)
compare_prediction_error_parser.add_argument(__CANDIDATE_FILE_ARG)
contrast_trace_parser = subparsers.add_parser(__CONTRAST_TRACE_COMMAND)
contrast_trace_parser.add_argument(__PROBLEM_FILE_ARG)
contrast_trace_parser.add_argument(__BASE_FILE_ARG)
contrast_trace_parser.add_argument(__CANDIDATE_FILE_ARG)
contrast_trace_parser.add_argument(__OPTIONAL_ARG_PREFIX + __DATE_ARG, type=get_date, required=True)
contrast_trace_parser.add_argument(__OPTIONAL_ARG_PREFIX + __COST_FUNCTION_TYPE, required=True)
contrast_trace_parser.add_argument(__OPTIONAL_ARG_PREFIX + __OUTPUT)
show_working_hours_parser = subparsers.add_parser(__SHOW_WORKING_HOURS_COMMAND)
show_working_hours_parser.add_argument(__FILE_ARG)
show_working_hours_parser.add_argument(__OPTIONAL_ARG_PREFIX + __OUTPUT)
compare_quality_parser = subparsers.add_parser(__COMPARE_QUALITY_COMMAND)
compare_quality_optimizer_parser = subparsers.add_parser(__COMPARE_QUALITY_OPTIMIZER_COMMAND)
compare_quality_optimizer_parser.add_argument(__FILE_ARG)
subparsers.add_parser(__COMPARE_COST_COMMAND)
compare_benchmark_parser = subparsers.add_parser(__COMPARE_BENCHMARK_COMMAND)
compare_benchmark_parser.add_argument(__FILE_ARG)
subparsers.add_parser(__COMPARE_LITERATURE_TABLE_COMMAND)
subparsers.add_parser(__COMPARE_BENCHMARK_TABLE_COMMAND)
subparsers.add_parser(__COMPUTE_RISKINESS_COMMAND)
subparsers.add_parser(__COMPARE_DELAY_COMMAND)
subparsers.add_parser(__COMPARE_THIRD_STAGE_TABLE_COMMAND)
subparsers.add_parser(__COMPARE_THIRD_STAGE_PLOT_COMMAND)
compare_box_parser = subparsers.add_parser(__COMPARE_BOX_PLOTS_COMMAND)
compare_box_parser.add_argument(__PROBLEM_FILE_ARG)
compare_box_parser.add_argument(__BASE_FILE_ARG)
compare_box_parser.add_argument(__OPTIONAL_ARG_PREFIX + __OUTPUT)
third_stage_summary_parser = subparsers.add_parser(__COMPARE_THIRD_STAGE_SUMMARY_COMMAND)
third_stage_summary_parser.add_argument(__OPTIONAL_ARG_PREFIX + __OUTPUT)
return parser
def split_delta(delta: datetime.timedelta) -> typing.Tuple[int, int, int, int]:
days = int(delta.days)
hours = int((delta.total_seconds() - 24 * 3600 * days) // 3600)
minutes = int((delta.total_seconds() - 24 * 3600 * days - 3600 * hours) // 60)
seconds = int(delta.total_seconds() - 24 * 3600 * days - 3600 * hours - 60 * minutes)
assert hours < 24
assert minutes < 60
assert seconds < 60
return days, hours, minutes, seconds
def get_time_delta_label(total_travel_time: datetime.timedelta) -> str:
days, hours, minutes, seconds = split_delta(total_travel_time)
time = '{0:02d}:{1:02d}:{2:02d}'.format(hours, minutes, seconds)
if days == 0:
return time
elif days == 1:
return '1 day ' + time
else:
return '{0} days '.format(days) + time
def pull(args, settings):
area_code = get_or_raise(args, __AREA_ARG)
from_raw_date = get_or_raise(args, __FROM_ARG)
to_raw_date = get_or_raise(args, __TO_ARG)
output_prefix = get_or_raise(args, __OUTPUT_PREFIX_ARG)
console = rows.console.Console()
user_tag_finder = rows.location_finder.UserLocationFinder(settings)
location_cache = rows.location_finder.FileSystemCache(settings)
location_finder = rows.location_finder.MultiModeLocationFinder(location_cache, user_tag_finder, timeout=5.0)
data_source = rows.sql_data_source.SqlDataSource(settings, console, location_finder)
from_date_time = get_date_time(from_raw_date)
to_date_time = get_date_time(to_raw_date)
current_date_time = from_date_time
while current_date_time <= to_date_time:
schedule = data_source.get_past_schedule(rows.model.area.Area(code=area_code), current_date_time.date())
for visit in schedule.visits:
visit.visit.address = None
output_file = '{0}_{1}.json'.format(output_prefix, current_date_time.date().strftime('%Y%m%d'))
with open(output_file, 'w') as output_stream:
json.dump(schedule, output_stream, cls=rows.model.json.JSONEncoder)
current_date_time += datetime.timedelta(days=1)
def get_travel_time(schedule, user_tag_finder):
routes = schedule.routes()
total_travel_time = datetime.timedelta()
with rows.plot.create_routing_session() as session:
for route in routes:
visit_it = iter(route.visits)
current_visit = next(visit_it, None)
current_location = user_tag_finder.find(int(current_visit.visit.service_user))
while current_visit:
prev_location = current_location
current_visit = next(visit_it, None)
if not current_visit:
break
current_location = user_tag_finder.find(int(current_visit.visit.service_user))
travel_time_sec = session.distance(prev_location, current_location)
if travel_time_sec:
total_travel_time += datetime.timedelta(seconds=travel_time_sec)
return total_travel_time
def info(args, settings):
user_tag_finder = rows.location_finder.UserLocationFinder(settings)
user_tag_finder.reload()
schedule_file = get_or_raise(args, __FILE_ARG)
schedule_file_to_use = os.path.realpath(os.path.expandvars(schedule_file))
schedule = rows.load.load_schedule(schedule_file_to_use)
carers = {visit.carer for visit in schedule.visits}
print(get_travel_time(schedule, user_tag_finder), len(carers), len(schedule.visits))
def compare_distance(args, settings):
schedule_patterns = getattr(args, __SCHEDULE_PATTERNS)
labels = getattr(args, __LABELS)
output_file = getattr(args, __OUTPUT, 'distance')
output_file_format = getattr(args, __FILE_FORMAT_ARG)
data_frame_file = 'data_frame_cache.bin'
if os.path.isfile(data_frame_file):
data_frame = pandas.read_pickle(data_frame_file)
else:
problem = rows.load.load_problem(get_or_raise(args, __PROBLEM_FILE_ARG))
store = []
with rows.plot.create_routing_session() as routing_session:
distance_estimator = rows.plot.DistanceEstimator(settings, routing_session)
for label, schedule_pattern in zip(labels, schedule_patterns):
for schedule_path in glob.glob(schedule_pattern):
schedule = rows.load.load_schedule(schedule_path)
duration_estimator = rows.plot.DurationEstimator.create_expected_visit_duration(schedule)
frame = rows.plot.get_schedule_data_frame(schedule, problem, duration_estimator, distance_estimator)
visits = frame['Visits'].sum()
carers = len(frame.where(frame['Visits'] > 0))
idle_time = frame['Availability'] - frame['Travel'] - frame['Service']
idle_time[idle_time < pandas.Timedelta(0)] = pandas.Timedelta(0)
overtime = frame['Travel'] + frame['Service'] - frame['Availability']
overtime[overtime < pandas.Timedelta(0)] = pandas.Timedelta(0)
store.append({'Label': label,
'Date': schedule.metadata.begin,
'Availability': frame['Availability'].sum(),
'Travel': frame['Travel'].sum(),
'Service': frame['Service'].sum(),
'Idle': idle_time.sum(),
'Overtime': overtime.sum(),
'Carers': carers,
'Visits': visits})
data_frame = pandas.DataFrame(store)
data_frame.sort_values(by=['Date'], inplace=True)
data_frame.to_pickle(data_frame_file)
condensed_frame = pandas.pivot(data_frame, columns='Label', values='Travel', index='Date')
condensed_frame['Improvement'] = condensed_frame['2nd Stage'] - condensed_frame['3rd Stage']
condensed_frame['RelativeImprovement'] = condensed_frame['Improvement'] / condensed_frame['2nd Stage']
color_map = matplotlib.cm.get_cmap('Set1')
matplotlib.pyplot.set_cmap(color_map)
figure, ax = matplotlib.pyplot.subplots(1, 1, sharex=True)
try:
width = 0.20
dates = data_frame['Date'].unique()
time_delta_convert = rows.plot.TimeDeltaConverter()
indices = numpy.arange(1, len(dates) + 1, 1)
handles = []
position = 0
for color_number, label in enumerate(labels):
data_frame_to_use = data_frame[data_frame['Label'] == label]
handle = ax.bar(indices + position * width,
time_delta_convert(data_frame_to_use['Travel']),
width,
color=color_map.colors[color_number],
bottom=time_delta_convert.zero)
handles.append(handle)
position += 1
ax.yaxis_date()
yaxis_converter = rows.plot.CumulativeHourMinuteConverter()
ax.yaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(yaxis_converter))
ax.set_ylabel('Total Travel Time [hh:mm:ss]')
ax.set_yticks([time_delta_convert.zero + datetime.timedelta(seconds=seconds) for seconds in range(0, 30 * 3600, 4 * 3600 + 1)])
ax.set_xlabel('Day of October 2017')
translate_labels = {
'3rd Stage': '3rd Stage',
'Human Planners': 'Human Planners'
}
labels_to_use = [translate_labels[label] if label in translate_labels else label for label in labels]
rows.plot.add_legend(ax, handles, labels_to_use, ncol=3, loc='lower center', bbox_to_anchor=(0.5, -0.25)) # , bbox_to_anchor=(0.5, -1.1)
figure.tight_layout()
figure.subplots_adjust(bottom=0.20)
rows.plot.save_figure(output_file, output_file_format)
finally:
matplotlib.pyplot.cla()
matplotlib.pyplot.close(figure)
# figure, (ax1, ax2, ax3) = matplotlib.pyplot.subplots(3, 1, sharex=True)
# try:
# width = 0.20
# dates = data_frame['Date'].unique()
# time_delta_convert = rows.plot.TimeDeltaConverter()
# indices = numpy.arange(1, len(dates) + 1, 1)
#
# handles = []
# position = 0
# for label in labels:
# data_frame_to_use = data_frame[data_frame['Label'] == label]
#
# handle = ax1.bar(indices + position * width,
# time_delta_convert(data_frame_to_use['Travel']),
# width,
# bottom=time_delta_convert.zero)
#
# ax2.bar(indices + position * width,
# time_delta_convert(data_frame_to_use['Idle']),
# width,
# bottom=time_delta_convert.zero)
#
# ax3.bar(indices + position * width,
# time_delta_convert(data_frame_to_use['Overtime']),
# width,
# bottom=time_delta_convert.zero)
#
# handles.append(handle)
# position += 1
#
# ax1.yaxis_date()
# ax1.yaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(rows.plot.CumulativeHourMinuteConverter()))
# ax1.set_ylabel('Travel Time')
#
# ax2.yaxis_date()
# ax2.yaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(rows.plot.CumulativeHourMinuteConverter()))
# ax2.set_ylabel('Idle Time')
#
# ax3.yaxis_date()
# ax3.yaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(rows.plot.CumulativeHourMinuteConverter()))
# ax3.set_ylabel('Total Overtime')
# ax3.set_xlabel('Day of October 2017')
#
# translate_labels = {
# '3rd Stage': 'Optimizer',
# 'Human Planners': 'Human Planners'
# }
# labels_to_use = [translate_labels[label] if label in translate_labels else label for label in labels]
#
# rows.plot.add_legend(ax3, handles, labels_to_use, ncol=3, loc='lower center', bbox_to_anchor=(0.5, -1.1))
# figure.tight_layout()
# figure.subplots_adjust(bottom=0.20)
#
# rows.plot.save_figure(output_file, output_file_format)
# finally:
# matplotlib.pyplot.cla()
# matplotlib.pyplot.close(figure)
def calculate_forecast_visit_duration(problem):
forecast_visit_duration = rows.plot.VisitDict()
for recurring_visits in problem.visits:
for local_visit in recurring_visits.visits:
forecast_visit_duration[local_visit] = local_visit.duration
return forecast_visit_duration
def compare_workload(args, settings):
problem = rows.load.load_problem(get_or_raise(args, __PROBLEM_FILE_ARG))
diary_by_date_by_carer = collections.defaultdict(dict)
for carer_shift in problem.carers:
for diary in carer_shift.diaries:
diary_by_date_by_carer[diary.date][carer_shift.carer.sap_number] = diary
base_schedules = {rows.load.load_schedule(file_path): file_path
for file_path in glob.glob(getattr(args, __BASE_SCHEDULE_PATTERN))}
base_schedule_by_date = {schedule.metadata.begin: schedule for schedule in base_schedules}
candidate_schedules = {rows.load.load_schedule(file_path): file_path
for file_path in glob.glob(getattr(args, __CANDIDATE_SCHEDULE_PATTERN))}
candidate_schedule_by_date = {schedule.metadata.begin: schedule for schedule in candidate_schedules}
location_finder = rows.location_finder.UserLocationFinder(settings)
location_finder.reload()
output_file_format = getattr(args, __FILE_FORMAT_ARG)
dates = set(candidate_schedule_by_date.keys())
for date in base_schedule_by_date.keys():
dates.add(date)
dates = list(dates)
dates.sort()
with rows.plot.create_routing_session() as routing_session:
distance_estimator = rows.plot.DistanceEstimator(settings, routing_session)
for date in dates:
base_schedule = base_schedule_by_date.get(date, None)
if not base_schedule:
logging.error('No base schedule is available for %s', date)
continue
duration_estimator = rows.plot.DurationEstimator.create_expected_visit_duration(base_schedule)
candidate_schedule = candidate_schedule_by_date.get(date, None)
if not candidate_schedule:
logging.error('No candidate schedule is available for %s', date)
continue
base_schedule_file = base_schedules[base_schedule]
base_schedule_data_frame = rows.plot.get_schedule_data_frame(base_schedule, problem, duration_estimator, distance_estimator)
base_schedule_stem, base_schedule_ext = os.path.splitext(os.path.basename(base_schedule_file))
rows.plot.save_workforce_histogram(base_schedule_data_frame, base_schedule_stem, output_file_format)
candidate_schedule_file = candidate_schedules[candidate_schedule]
candidate_schedule_data_frame = rows.plot.get_schedule_data_frame(candidate_schedule, problem, duration_estimator, distance_estimator)
candidate_schedule_stem, candidate_schedule_ext \
= os.path.splitext(os.path.basename(candidate_schedule_file))
rows.plot.save_workforce_histogram(candidate_schedule_data_frame,
candidate_schedule_stem,
output_file_format)
rows.plot.save_combined_histogram(candidate_schedule_data_frame,
base_schedule_data_frame,
['2nd Stage', '3rd Stage'],
'contrast_workforce_{0}_combined'.format(date),
output_file_format)
def contrast_workload(args, settings):
__WIDTH = 0.35
__FORMAT = 'svg'
plot_type = getattr(args, __TYPE_ARG, None)
if plot_type != __ACTIVITY_TYPE and plot_type != __VISITS_TYPE:
raise ValueError(
'Unknown plot type: {0}. Use either {1} or {2}.'.format(plot_type, __ACTIVITY_TYPE, __VISITS_TYPE))
problem_file = get_or_raise(args, __PROBLEM_FILE_ARG)
problem = rows.load.load_problem(problem_file)
base_schedule = rows.load.load_schedule(get_or_raise(args, __BASE_FILE_ARG))
candidate_schedule = rows.load.load_schedule(get_or_raise(args, __CANDIDATE_FILE_ARG))
if base_schedule.metadata.begin != candidate_schedule.metadata.begin:
raise ValueError('Schedules begin at a different date: {0} vs {1}'
.format(base_schedule.metadata.begin, candidate_schedule.metadata.begin))
if base_schedule.metadata.end != candidate_schedule.metadata.end:
raise ValueError('Schedules end at a different date: {0} vs {1}'
.format(base_schedule.metadata.end, candidate_schedule.metadata.end))
location_finder = rows.location_finder.UserLocationFinder(settings)
location_finder.reload()
diary_by_date_by_carer = collections.defaultdict(dict)
for carer_shift in problem.carers:
for diary in carer_shift.diaries:
diary_by_date_by_carer[diary.date][carer_shift.carer.sap_number] = diary
date = base_schedule.metadata.begin
problem_file_base = os.path.basename(problem_file)
problem_file_name, problem_file_ext = os.path.splitext(problem_file_base)
with rows.plot.create_routing_session() as routing_session:
observed_duration_by_visit = calculate_expected_visit_duration(candidate_schedule)
base_schedule_frame = rows.plot.get_schedule_data_frame(base_schedule,
routing_session,
location_finder,
diary_by_date_by_carer[date],
observed_duration_by_visit)
candidate_schedule_frame = rows.plot.get_schedule_data_frame(candidate_schedule,
routing_session,
location_finder,
diary_by_date_by_carer[date],
observed_duration_by_visit)
color_map = matplotlib.cm.get_cmap('tab20')
matplotlib.pyplot.set_cmap(color_map)
figure, axis = matplotlib.pyplot.subplots()
matplotlib.pyplot.tight_layout()
try:
contrast_frame = pandas.DataFrame.merge(base_schedule_frame,
candidate_schedule_frame,
on='Carer',
how='left',
suffixes=['_Base', '_Candidate'])
contrast_frame['Visits_Candidate'] = contrast_frame['Visits_Candidate'].fillna(0)
contrast_frame['Availability_Candidate'] \
= contrast_frame['Availability_Candidate'].mask(pandas.isnull, contrast_frame['Availability_Base'])
contrast_frame['Travel_Candidate'] \
= contrast_frame['Travel_Candidate'].mask(pandas.isnull, datetime.timedelta())
contrast_frame['Service_Candidate'] \
= contrast_frame['Service_Candidate'].mask(pandas.isnull, datetime.timedelta())
contrast_frame = contrast_frame.sort_values(
by=['Availability_Candidate', 'Service_Candidate', 'Travel_Candidate'],
ascending=False)
if plot_type == __VISITS_TYPE:
indices = numpy.arange(len(contrast_frame.index))
base_handle = axis.bar(indices, contrast_frame['Visits_Base'], __WIDTH)
candidate_handle = axis.bar(indices + __WIDTH, contrast_frame['Visits_Candidate'], __WIDTH)
axis.legend((base_handle, candidate_handle),
('Human Planners', 'Constraint Programming'), loc='best')
output_file = problem_file_name + '_contrast_visits_' + date.isoformat() + '.' + __FORMAT
elif plot_type == __ACTIVITY_TYPE:
indices = numpy.arange(len(base_schedule_frame.index))
def plot_activity_stacked_histogram(availability, travel, service, axis, width=0.35, initial_width=0.0,
color_offset=0):
time_delta_converter = rows.plot.TimeDeltaConverter()
travel_series = numpy.array(time_delta_converter(travel))
service_series = numpy.array(time_delta_converter(service))
idle_overtime_series = list(availability - travel - service)
idle_series = numpy.array(time_delta_converter(
map(lambda value: value if value.days >= 0 else datetime.timedelta(), idle_overtime_series)))
overtime_series = numpy.array(time_delta_converter(
map(lambda value: datetime.timedelta(
seconds=abs(value.total_seconds())) if value.days < 0 else datetime.timedelta(),
idle_overtime_series)))
service_handle = axis.bar(indices + initial_width, service_series,
width,
bottom=time_delta_converter.zero,
color=color_map.colors[0 + color_offset])
travel_handle = axis.bar(indices + initial_width,
travel_series,
width,
bottom=service_series + time_delta_converter.zero_num,
color=color_map.colors[2 + color_offset])
idle_handle = axis.bar(indices + initial_width,
idle_series,
width,
bottom=service_series + travel_series + time_delta_converter.zero_num,
color=color_map.colors[4 + color_offset])
overtime_handle = axis.bar(indices + initial_width,
overtime_series,
width,
bottom=idle_series + service_series + travel_series + time_delta_converter.zero_num,
color=color_map.colors[6 + color_offset])
return service_handle, travel_handle, idle_handle, overtime_handle
travel_candidate_handle, service_candidate_handle, idle_candidate_handle, overtime_candidate_handle \
= plot_activity_stacked_histogram(contrast_frame.Availability_Candidate,
contrast_frame.Travel_Candidate,
contrast_frame.Service_Candidate,
axis,
__WIDTH)
travel_base_handle, service_base_handle, idle_base_handle, overtime_base_handle \
= plot_activity_stacked_histogram(contrast_frame.Availability_Base,
contrast_frame.Travel_Base,
contrast_frame.Service_Base,
axis,
__WIDTH,
__WIDTH,
1)
axis.yaxis_date()
axis.yaxis.set_major_formatter(matplotlib.dates.DateFormatter("%H:%M:%S"))
axis.legend(
(travel_candidate_handle, service_candidate_handle, idle_candidate_handle, overtime_candidate_handle,
travel_base_handle, service_base_handle, idle_base_handle, overtime_base_handle),
('', '', '', '', 'Service', 'Travel', 'Idle', 'Overtime'), loc='best', ncol=2, columnspacing=0)
output_file = problem_file_name + '_contrast_activity_' + date.isoformat() + '.' + __FORMAT
bottom, top = axis.get_ylim()
axis.set_ylim(bottom, top + 0.025)
else:
raise ValueError('Unknown plot type {0}'.format(plot_type))
matplotlib.pyplot.subplots_adjust(left=0.125)
matplotlib.pyplot.savefig(output_file, format=__FORMAT, dpi=300)
finally:
matplotlib.pyplot.cla()
matplotlib.pyplot.close(figure)
def parse_time_delta(text):
if text:
time = datetime.datetime.strptime(text, '%H:%M:%S').time()
return datetime.timedelta(hours=time.hour, minutes=time.minute, seconds=time.second)
return None
class TraceLog:
__STAGE_PATTERN = re.compile('^\w+(?P<number>\d+)(:?\-Patch)?$')
__PENALTY_PATTERN = re.compile('^MissedVisitPenalty:\s+(?P<penalty>\d+)$')
__CARER_USED_PATTERN = re.compile('^CarerUsedPenalty:\s+(?P<penalty>\d+)$')
class ProgressMessage:
def __init__(self, **kwargs):
self.__branches = kwargs.get('branches', None)
self.__cost = kwargs.get('cost', None)
self.__dropped_visits = kwargs.get('dropped_visits', None)
self.__memory_usage = kwargs.get('memory_usage', None)
self.__solutions = kwargs.get('solutions', None)
self.__wall_time = parse_time_delta(kwargs.get('wall_time', None))
@property
def cost(self):
return self.__cost
@property
def solutions(self):
return self.__solutions
@property
def dropped_visits(self):
return self.__dropped_visits
class ProblemMessage:
def __init__(self, **kwargs):
self.__carers = kwargs.get('carers', None)
self.__visits = kwargs.get('visits', None)
self.__date = kwargs.get('date', None)
if self.__date:
self.__date = datetime.datetime.strptime(self.__date, '%Y-%b-%d').date()
self.__visit_time_windows = parse_time_delta(kwargs.get('visit_time_windows', None))
self.__break_time_windows = parse_time_delta(kwargs.get('break_time_windows', None))
self.__shift_adjustment = parse_time_delta(kwargs.get('shift_adjustment', None))
self.__area = kwargs.get('area', None)
self.__missed_visit_penalty = kwargs.get('missed_visit_penalty', None)
self.__carer_used_penalty = kwargs.get('carer_used_penalty', None)
@property
def date(self):
return self.__date
@property
def carers(self):
return self.__carers
@property
def visits(self):
return self.__visits
@property
def visit_time_window(self):
return self.__visit_time_windows
@property
def carer_used_penalty(self):
return self.__carer_used_penalty
@carer_used_penalty.setter
def carer_used_penalty(self, value):
self.__carer_used_penalty = value
@property
def missed_visit_penalty(self):
return self.__missed_visit_penalty
@missed_visit_penalty.setter
def missed_visit_penalty(self, value):
self.__missed_visit_penalty = value
@property
def shift_adjustment(self):
return self.__shift_adjustment
StageSummary = collections.namedtuple('StageSummary', ['duration', 'final_cost', 'final_dropped_visits'])
def __init__(self, time_point):
self.__start = time_point
self.__events = []
self.__current_stage = None
self.__current_strategy = None
self.__problem = TraceLog.ProblemMessage()
@staticmethod
def __parse_stage_number(body):
comment = body.get('comment', None)
if comment:
match = TraceLog.__STAGE_PATTERN.match(comment)
if match:
return int(match.group('number'))
return None
def append(self, time_point, body):
if 'branches' in body:
body_to_use = TraceLog.ProgressMessage(**body)
elif 'type' in body:
if body['type'] == 'started':
self.__current_stage = self.__parse_stage_number(body)
elif body['type'] == 'finished':
self.__current_stage = None
self.__current_strategy = None
elif body['type'] == 'unknown':
if 'comment' in body:
if 'MissedVisitPenalty' in body['comment']:
match = re.match(self.__PENALTY_PATTERN, body['comment'])
assert match is not None
missed_visit_penalty = int(match.group('penalty'))
self.__problem.missed_visit_penalty = missed_visit_penalty
elif 'CarerUsedPenalty' in body['comment']:
match = re.match(self.__CARER_USED_PATTERN, body['comment'])
assert match is not None
carer_used_penalty = int(match.group('penalty'))
self.__problem.carer_used_penalty = carer_used_penalty
body_to_use = body
elif 'area' in body:
body_to_use = TraceLog.ProblemMessage(**body)
if body_to_use.missed_visit_penalty is None and self.__problem.missed_visit_penalty is not None:
body_to_use.missed_visit_penalty = self.__problem.missed_visit_penalty
if body_to_use.carer_used_penalty is None and self.__problem.carer_used_penalty is not None:
body_to_use.carer_used_penalty = self.__problem.carer_used_penalty
self.__problem = body_to_use
else:
body_to_use = body
# quick fix to prevent negative computation time if the time frame crosses midnight
if self.__start < time_point:
computation_time = time_point - self.__start
else:
computation_time = time_point + datetime.timedelta(hours=24) - self.__start
self.__events.append([computation_time, self.__current_stage, self.__current_strategy, time_point, body_to_use])
def compute_stages(self) -> typing.List[StageSummary]:
groups = dict()
for delta, stage, topic, time, message in self.__events:
if isinstance(message, TraceLog.ProgressMessage):
if stage not in groups:
groups[stage] = []
groups[stage].append([delta, topic, message])
result = []
def create_stage_summary(group):
duration = group[-1][0] - group[0][0]
cost = group[-1][2].cost
dropped_visits = group[-1][2].dropped_visits
return TraceLog.StageSummary(duration=duration, final_cost=cost, final_dropped_visits=dropped_visits)
if len(groups) == 1:
result.append(create_stage_summary(groups[None]))
else:
for stage in range(1, max(filter(lambda s: s is not None, groups)) + 1):
result.append(create_stage_summary(groups[stage]))
return result
def has_stages(self):
for relative_time, stage, strategy, absolute_time, event in self.__events:
if isinstance(event, TraceLog.ProblemMessage) or isinstance(event, TraceLog.ProgressMessage):
continue
if 'type' in event and event['type'] == 'started':
return True
return False
def best_cost(self, stage: int):
best_cost, _ = self.__best_cost_and_time(stage)
return best_cost
def best_cost_time(self, stage: int):
_, best_cost_time = self.__best_cost_and_time(stage)
return best_cost_time
def last_cost(self):
last_cost, _ = self.__last_cost_and_time()
return last_cost
def last_cost_time(self):
_, last_cost_time = self.__last_cost_and_time()
return last_cost_time
def computation_time(self):
computation_time = datetime.timedelta.max
for relative_time, stage, strategy, absolute_time, event in self.__events:
computation_time = relative_time
return computation_time
def __best_cost_and_time(self, stage: int):
best_cost = float('inf')
best_time = datetime.timedelta.max
for relative_time, event_stage, strategy, absolute_time, event in self.__filtered_events():
if event_stage > stage:
continue
if best_cost > event.cost:
best_cost = event.cost
best_time = relative_time
return best_cost, best_time
def __last_cost_and_time(self):
last_cost = float('inf')
last_time = datetime.timedelta.max
for relative_time, stage, strategy, absolute_time, event in self.__filtered_events():
last_cost = event.cost
last_time = relative_time
return last_cost, last_time
def __filtered_events(self):
for relative_time, stage, strategy, absolute_time, event in self.__events:
if stage != 2 and stage != 3:
continue
if strategy == 'DELAY_RISKINESS_REDUCTION':
continue
if not isinstance(event, TraceLog.ProgressMessage):
continue
yield relative_time, stage, strategy, absolute_time, event
@property
def strategy(self):
return self.__current_strategy
@strategy.setter
def strategy(self, value):
self.__current_strategy = value
@property
def visits(self):
return self.__problem.visits
@property
def carers(self):
return self.__problem.carers
@property
def date(self):
return self.__problem.date
@property
def visit_time_window(self):
return self.__problem.visit_time_window
@property
def carer_used_penalty(self):
return self.__problem.carer_used_penalty
@property
def missed_visit_penalty(self):
return self.__problem.missed_visit_penalty
@property
def shift_adjustment(self):
return self.__problem.shift_adjustment
@property
def events(self):
return self.__events
def read_traces(trace_file) -> typing.List[TraceLog]:
log_line_pattern = re.compile('^\w+\s+(?P<time>\d+:\d+:\d+\.\d+).*?]\s+(?P<body>.*)$')
other_line_pattern = re.compile('^.*?\[\w+\s+(?P<time>\d+:\d+:\d+\.\d+).*?\]\s+(?P<body>.*)$')
strategy_line_pattern = re.compile('^Solving the (?P<stage_name>\w+) stage using (?P<strategy_name>\w+) strategy$')
loaded_visits_pattern = re.compile('^Loaded past visits in \d+ seconds$')
trace_logs = []
has_preambule = False
with open(trace_file, 'r') as input_stream:
current_log = None
for line in input_stream:
match = log_line_pattern.match(line)
if not match:
match = other_line_pattern.match(line)
if match:
raw_time = match.group('time')
time = datetime.datetime.strptime(raw_time, '%H:%M:%S.%f')
try:
raw_body = match.group('body')
body = json.loads(raw_body)
if 'comment' in body and (body['comment'] == 'All'
or 'MissedVisitPenalty' in body['comment']
or 'CarerUsedPenalty' in body['comment']):
if body['comment'] == 'All':
if 'type' in body:
if body['type'] == 'finished':
has_preambule = False
current_log.strategy = None
elif body['type'] == 'started':
has_preambule = True
current_log = TraceLog(time)
current_log.append(time, body)
trace_logs.append(current_log)
else:
current_log.append(time, body)
elif 'area' in body and not has_preambule:
current_log = TraceLog(time)
current_log.append(time, body)
trace_logs.append(current_log)
else:
current_log.append(time, body)
except json.decoder.JSONDecodeError:
strategy_match = strategy_line_pattern.match(match.group('body'))
if strategy_match:
current_log.strategy = strategy_match.group('strategy_name')
continue
loaded_visits_match = loaded_visits_pattern.match(match.group('body'))
if loaded_visits_match:
continue
warnings.warn('Failed to parse line: ' + line)
elif 'GUIDED_LOCAL_SEARCH specified without sane timeout: solve may run forever.' in line:
continue
else:
warnings.warn('Failed to match line: ' + line)
return trace_logs
def traces_to_data_frame(trace_logs):
columns = ['relative_time', 'cost', 'dropped_visits', 'solutions', 'stage', 'stage_started', 'date', 'carers',
'visits']
has_stages = [trace.has_stages() for trace in trace_logs]
if all(has_stages) != any(has_stages):
raise ValueError('Some traces have stages while others do not')
has_stages = all(has_stages)
data = []
if has_stages:
for trace in trace_logs:
current_carers = None
current_visits = None
current_stage_started = None
current_stage_name = None
for rel_time, stage, strategy, abs_time, event in trace.events:
if isinstance(event, TraceLog.ProblemMessage):
current_carers = event.carers
current_visits = event.visits
elif isinstance(event, TraceLog.ProgressMessage):
if not current_stage_name:
continue
data.append([rel_time,
event.cost, event.dropped_visits, event.solutions,
current_stage_name, current_stage_started,
trace.date, current_carers, current_visits])
elif 'type' in event:
if 'comment' in event and event['type'] == 'unknown':
continue
if event['type'] == 'finished':
current_carers = None
current_visits = None
current_stage_started = None
current_stage_name = None
continue
if event['type'] == 'started':
current_stage_started = rel_time
current_stage_name = event['comment']
else:
for trace in trace_logs:
current_carers = None
current_visits = None
for rel_time, stage, strategy, abs_time, event in trace.events:
if isinstance(event, TraceLog.ProblemMessage):
current_carers = event.carers
current_visits = event.visits
elif isinstance(event, TraceLog.ProgressMessage):
data.append([rel_time,
event.cost, event.dropped_visits, event.solutions,
None, None,
trace.date, current_carers, current_visits])
return pandas.DataFrame(data=data, columns=columns)
def parse_pandas_duration(value):
raw_hours, raw_minutes, raw_seconds = value.split(':')
return datetime.timedelta(hours=int(raw_hours), minutes=int(raw_minutes), seconds=int(raw_seconds))
class DateTimeFormatter:
def __init__(self, format):
self.__format = format
def __call__(self, x, pos=None):
if x < 0:
return None
x_to_use = x
if isinstance(x, numpy.int64):
x_to_use = x.item()
delta = datetime.timedelta(seconds=x_to_use)
time_point = datetime.datetime(2017, 1, 1) + delta
return time_point.strftime(self.__format)
class AxisSettings:
def __init__(self, minutes_per_step, format_pattern, units_label, right_xlimit, xticks):
self.__minutes_per_step = minutes_per_step
self.__format_pattern = format_pattern
self.__formatter = matplotlib.ticker.FuncFormatter(DateTimeFormatter(self.__format_pattern))
self.__units_label = units_label
self.__right_xlimit = right_xlimit
self.__xticks = xticks
@property
def formatter(self):
return self.__formatter
@property
def units_label(self):
return self.__units_label
@property
def right_xlimit(self):
return self.__right_xlimit
@property
def xticks(self):
return self.__xticks
@staticmethod
def infer(max_relative_time):
if datetime.timedelta(minutes=30) < max_relative_time < datetime.timedelta(hours=1):
minutes_step = 10
format = '%H:%M'
units = '[hh:mm]'
elif datetime.timedelta(hours=1) <= max_relative_time:
minutes_step = 60
format = '%H:%M'
units = '[hh:mm]'
else:
assert max_relative_time <= datetime.timedelta(minutes=30)
minutes_step = 5
format = '%M:%S'
units = '[mm:ss]'
right_xlimit = (max_relative_time + datetime.timedelta(minutes=1)).total_seconds() // 60 * 60
xticks = numpy.arange(0, max_relative_time.total_seconds() + minutes_step * 60, minutes_step * 60)
return AxisSettings(minutes_step, format, units, right_xlimit, xticks)
def format_timedelta_pandas(x, pos=None):
if x < 0:
return None
time_delta = pandas.to_timedelta(x)
hours = int(time_delta.total_seconds() / matplotlib.dates.SEC_PER_HOUR)
minutes = int(time_delta.total_seconds() / matplotlib.dates.SEC_PER_MIN) - 60 * hours
return '{0:02d}:{1:02d}'.format(hours, minutes)
def format_time(x, pos=None):
if isinstance(x, numpy.int64):
x = x.item()
delta = datetime.timedelta(seconds=x)
time_point = datetime.datetime(2017, 1, 1) + delta
return time_point.strftime('%H:%M')
__SCATTER_POINT_SIZE = 1
__Y_AXIS_EXTENSION = 1.2
def add_trace_legend(axis, handles, bbox_to_anchor=(0.5, -0.23), ncol=3):
first_row = handles[0]
def legend_single_stage(row):
handle, multi_visits, visits, carers, cost_function, date = row
date_time = datetime.datetime.combine(date, datetime.time())
return 'V{0:02}/{1:03} C{2:02} {3} {4}'.format(multi_visits,
visits,
carers,
cost_function,
date_time.strftime('%d-%m'))
def legend_multi_stage(row):
handle, multi_visits, visits, multi_carers, carers, cost_function, date = row
date_time = datetime.datetime.combine(date, datetime.time())
return 'V{0:02}/{1:03} C{2:02}/{3:02} {4} {5}' \
.format(multi_visits, visits, multi_carers, carers, cost_function, date_time.strftime('%d-%m'))
if len(first_row) == 6:
legend_formatter = legend_single_stage
elif len(first_row) == 7:
legend_formatter = legend_multi_stage
else:
raise ValueError('Expecting row of either 6 or 7 elements')
return rows.plot.add_legend(axis,
list(map(operator.itemgetter(0), handles)),
list(map(legend_formatter, handles)),
ncol,
bbox_to_anchor)
def scatter_cost(axis, data_frame, color):
return axis.scatter(
[time_delta.total_seconds() for time_delta in data_frame['relative_time']], data_frame['cost'],
s=__SCATTER_POINT_SIZE,
c=color)
def scatter_dropped_visits(axis, data_frame, color):
axis.scatter(
[time_delta.total_seconds() for time_delta in data_frame['relative_time']],
data_frame['dropped_visits'],
s=__SCATTER_POINT_SIZE,
c=color)
def draw_avline(axis, point, color='lightgrey', linestyle='--'):
axis.axvline(point, color=color, linestyle=linestyle, linewidth=0.8, alpha=0.8)
def get_problem_stats(problem, date):
problem_visits = [visit for carer_visits in problem.visits
for visit in carer_visits.visits if visit.date == date]
return len(problem_visits), len([visit for visit in problem_visits if visit.carer_count > 1])
def compare_trace(args, settings):
problem = rows.load.load_problem(get_or_raise(args, __PROBLEM_FILE_ARG))
cost_function = get_or_raise(args, __COST_FUNCTION_TYPE)
trace_file = get_or_raise(args, __FILE_ARG)
trace_file_base_name = os.path.basename(trace_file)
trace_file_stem, trace_file_ext = os.path.splitext(trace_file_base_name)
output_file_stem = getattr(args, __OUTPUT, trace_file_stem)
trace_logs = read_traces(trace_file)
data_frame = traces_to_data_frame(trace_logs)
current_date = getattr(args, __DATE_ARG, None)
dates = data_frame['date'].unique()
if current_date and current_date not in dates:
raise ValueError('Date {0} is not present in the data set'.format(current_date))
color_numbers = [0, 2, 4, 6, 8, 10, 12, 1, 3, 5, 7, 9, 11, 13]
color_number_it = iter(color_numbers)
color_map = matplotlib.cm.get_cmap('tab20')
matplotlib.pyplot.set_cmap(color_map)
figure, (ax1, ax2) = matplotlib.pyplot.subplots(2, 1, sharex=True)
max_relative_time = datetime.timedelta()
try:
if current_date:
current_color = color_map.colors[next(color_number_it)]
total_problem_visits, total_multiple_carer_visits = get_problem_stats(problem, current_date)
current_date_frame = data_frame[data_frame['date'] == current_date]
max_relative_time = max(current_date_frame['relative_time'].max(), max_relative_time)
ax_settings = AxisSettings.infer(max_relative_time)
stages = current_date_frame['stage'].unique()
if len(stages) > 1:
handles = []
for stage in stages:
time_delta = current_date_frame[current_date_frame['stage'] == stage]['stage_started'].iloc[0]
current_stage_data_frame = current_date_frame[current_date_frame['stage'] == stage]
draw_avline(ax1, time_delta.total_seconds())
draw_avline(ax2, time_delta.total_seconds())
total_stage_visits = current_stage_data_frame['visits'].iloc[0]
carers = current_stage_data_frame['carers'].iloc[0]
handle = scatter_cost(ax1, current_date_frame, current_color)
scatter_dropped_visits(ax2, current_stage_data_frame, current_color)
handles.append([handle,
total_multiple_carer_visits,
total_stage_visits,
carers,
cost_function,
current_date])
ax2.set_xlim(left=0)
ax2.set_ylim(bottom=-10)
ax2.xaxis.set_major_formatter(ax_settings.formatter)
else:
total_visits = current_date_frame['visits'].iloc[0]
if total_visits != (total_problem_visits + total_multiple_carer_visits):
raise ValueError('Number of visits in problem and solution does not match: {0} vs {1}'
.format(total_visits, (total_problem_visits + total_multiple_carer_visits)))
carers = current_date_frame['carers'].iloc[0]
handle = ax1.scatter(
[time_delta.total_seconds() for time_delta in current_date_frame['relative_time']],
current_date_frame['cost'], s=1)
add_trace_legend(ax1, [[handle, total_multiple_carer_visits, total_problem_visits, carers, cost_function]])
scatter_dropped_visits(ax2, current_date_frame, current_color)
ax1_y_bottom, ax1_y_top = ax1.get_ylim()
ax1.set_ylim(bottom=0, top=ax1_y_top * __Y_AXIS_EXTENSION)
ax1.set_ylabel('Cost Function [s]')
ax2_y_bottom, ax2_y_top = ax2.get_ylim()
ax2.set_ylim(bottom=-10, top=ax2_y_top * __Y_AXIS_EXTENSION)
ax2.xaxis.set_major_formatter(ax_settings.formatter)
ax2.set_ylabel('Declined Visits')
ax2.set_xlabel('Computation Time ' + ax_settings.units_label)
rows.plot.save_figure(output_file_stem + '_' + current_date.isoformat())
else:
handles = []
for current_date in dates:
current_color = color_map.colors[next(color_number_it)]
current_date_frame = data_frame[data_frame['date'] == current_date]
max_relative_time = max(current_date_frame['relative_time'].max(), max_relative_time)
total_problem_visits, total_multiple_carer_visits = get_problem_stats(problem, current_date)
stages = current_date_frame['stage'].unique()
if len(stages) > 1:
stage_linestyles = [None, 'dotted', 'dashed']
for stage, linestyle in zip(stages, stage_linestyles):
time_delta = current_date_frame[current_date_frame['stage'] == stage]['stage_started'].iloc[0]
draw_avline(ax1, time_delta.total_seconds(), color=current_color, linestyle=linestyle)
draw_avline(ax2, time_delta.total_seconds(), color=current_color, linestyle=linestyle)
total_carers = current_date_frame['carers'].max()
multi_carers = current_date_frame['carers'].min()
if multi_carers == total_carers:
multi_carers = 0
total_visits = current_date_frame['visits'].max()
multi_visits = current_date_frame['visits'].min()
if multi_visits == total_visits:
multi_visits = 0
handle = scatter_cost(ax1, current_date_frame, current_color)
scatter_dropped_visits(ax2, current_date_frame, current_color)
handles.append([handle,
multi_visits,
total_visits,
multi_carers,
total_carers,
cost_function,
current_date])
else:
total_visits = current_date_frame['visits'].iloc[0]
if total_visits != (total_problem_visits + total_multiple_carer_visits):
raise ValueError('Number of visits in problem and solution does not match: {0} vs {1}'
.format(total_visits, (total_problem_visits + total_multiple_carer_visits)))
carers = current_date_frame['carers'].iloc[0]
handle = scatter_cost(ax1, current_date_frame, current_color)
handles.append([handle,
total_multiple_carer_visits,
total_problem_visits,
carers,
cost_function,
current_date])
scatter_dropped_visits(ax2, current_date_frame, current_color)
ax_settings = AxisSettings.infer(max_relative_time)
ax1.ticklabel_format(style='sci', axis='y', scilimits=(-2, 2))
ax1.xaxis.set_major_formatter(ax_settings.formatter)
# if add_arrows:
# ax1.arrow(950, 200000, 40, -110000, head_width=10, head_length=20000, fc='k', ec='k')
# ax2.arrow(950, 60, 40, -40, head_width=10, head_length=10, fc='k', ec='k')
ax1_y_bottom, ax1_y_top = ax1.get_ylim()
ax1.set_ylim(bottom=0, top=ax1_y_top * __Y_AXIS_EXTENSION)
ax1.set_xlim(left=0, right=ax_settings.right_xlimit)
ax1.set_ylabel('Cost Function [s]')
ax2_y_bottom, ax2_y_top = ax2.get_ylim()
ax2.set_ylim(bottom=-10, top=ax2_y_top * __Y_AXIS_EXTENSION)
ax2.set_xlim(left=0, right=ax_settings.right_xlimit)
ax2.set_ylabel('Declined Visits')
ax2.set_xlabel('Computation Time ' + ax_settings.units_label)
ax2.set_xticks(ax_settings.xticks)
ax2.xaxis.set_major_formatter(ax_settings.formatter)
matplotlib.pyplot.tight_layout()
rows.plot.save_figure(output_file_stem)
finally:
matplotlib.pyplot.cla()
matplotlib.pyplot.close(figure)
def get_schedule_stats(data_frame):
def get_stage_stats(stage):
if stage and (isinstance(stage, str) or (isinstance(stage, float) and not numpy.isnan(stage))):
stage_frame = data_frame[data_frame['stage'] == stage]
else:
stage_frame = data_frame[data_frame['stage'].isnull()]
min_carers, max_carers = stage_frame['carers'].min(), stage_frame['carers'].max()
if min_carers != max_carers:
raise ValueError(
'Numbers of carer differs within stage in range [{0}, {1}]'.format(min_carers, max_carers))
min_visits, max_visits = stage_frame['visits'].min(), stage_frame['visits'].max()
if min_visits != max_visits:
raise ValueError(
'Numbers of carer differs within stage in range [{0}, {1}]'.format(min_visits, max_visits))
return min_carers, min_visits
stages = data_frame['stage'].unique()
if len(stages) > 1:
data = []
for stage in stages:
carers, visits = get_stage_stats(stage)
data.append([stage, carers, visits])
return data
else:
stage_to_use = None
if len(stages) == 1:
stage_to_use = stages[0]
carers, visits = get_stage_stats(stage_to_use)
return [[None, carers, visits]]
def contrast_trace(args, settings):
problem_file = get_or_raise(args, __PROBLEM_FILE_ARG)
problem = rows.load.load_problem(problem_file)
problem_file_base = os.path.basename(problem_file)
problem_file_name, problem_file_ext = os.path.splitext(problem_file_base)
output_file_stem = getattr(args, __OUTPUT, problem_file_name + '_contrast_traces')
cost_function = get_or_raise(args, __COST_FUNCTION_TYPE)
base_trace_file = get_or_raise(args, __BASE_FILE_ARG)
candidate_trace_file = get_or_raise(args, __CANDIDATE_FILE_ARG)
base_frame = traces_to_data_frame(read_traces(base_trace_file))
candidate_frame = traces_to_data_frame(read_traces(candidate_trace_file))
current_date = get_or_raise(args, __DATE_ARG)
if current_date not in base_frame['date'].unique():
raise ValueError('Date {0} is not present in the base data set'.format(current_date))
if current_date not in candidate_frame['date'].unique():
raise ValueError('Date {0} is not present in the candidate data set'.format(current_date))
max_relative_time = datetime.timedelta()
max_relative_time = max(base_frame[base_frame['date'] == current_date]['relative_time'].max(), max_relative_time)
max_relative_time = max(candidate_frame[candidate_frame['date'] == current_date]['relative_time'].max(), max_relative_time)
max_relative_time = datetime.timedelta(minutes=20)
ax_settings = AxisSettings.infer(max_relative_time)
color_map = matplotlib.cm.get_cmap('Set1')
matplotlib.pyplot.set_cmap(color_map)
figure, (ax1, ax2) = matplotlib.pyplot.subplots(2, 1, sharex=True)
try:
def plot(data_frame, color):
stages = data_frame['stage'].unique()
if len(stages) > 1:
for stage, linestyle in zip(stages, [None, 'dotted', 'dashed']):
time_delta = data_frame[data_frame['stage'] == stage]['stage_started'].iloc[0]
draw_avline(ax1, time_delta.total_seconds(), linestyle=linestyle)
draw_avline(ax2, time_delta.total_seconds(), linestyle=linestyle)
scatter_dropped_visits(ax2, data_frame, color=color)
return scatter_cost(ax1, data_frame, color=color)
base_current_data_frame = base_frame[base_frame['date'] == current_date]
base_handle = plot(base_current_data_frame, color_map.colors[0])
base_stats = get_schedule_stats(base_current_data_frame)
candidate_current_data_frame = candidate_frame[candidate_frame['date'] == current_date]
candidate_handle = plot(candidate_current_data_frame, color_map.colors[1])
candidate_stats = get_schedule_stats(candidate_current_data_frame)
labels = []
for stages in [base_stats, candidate_stats]:
if len(stages) == 1:
labels.append('Direct')
elif len(stages) > 1:
labels.append('Multistage')
else:
raise ValueError()
ax1.set_ylim(bottom=0.0)
ax1.set_ylabel('Cost Function [s]')
ax1.ticklabel_format(style='sci', axis='y', scilimits=(-2, 2))
ax1.xaxis.set_major_formatter(ax_settings.formatter)
ax1.set_xlim(left=0.0, right=max_relative_time.total_seconds())
legend1 = ax1.legend([base_handle, candidate_handle], labels)
for handle in legend1.legendHandles:
handle._sizes = [25]
ax2.set_xlim(left=0.0, right=max_relative_time.total_seconds())
ax2.set_ylim(bottom=0.0)
ax2.set_ylabel('Declined Visits')
ax2.set_xlabel('Computation Time ' + ax_settings.units_label)
ax1.set_xticks(ax_settings.xticks)
ax2.set_xticks(ax_settings.xticks)
ax2.xaxis.set_major_formatter(ax_settings.formatter)
legend2 = ax2.legend([base_handle, candidate_handle], labels)
for handle in legend2.legendHandles:
handle._sizes = [25]
figure.tight_layout()
matplotlib.pyplot.tight_layout()
rows.plot.save_figure(output_file_stem + '_' + current_date.isoformat())
finally:
matplotlib.pyplot.cla()
matplotlib.pyplot.close(figure)
figure, (ax1, ax2) = matplotlib.pyplot.subplots(2, 1, sharex=True)
try:
candidate_current_data_frame = candidate_frame[candidate_frame['date'] == current_date]
scatter_dropped_visits(ax2, candidate_current_data_frame, color=color_map.colors[1])
scatter_cost(ax1, candidate_current_data_frame, color=color_map.colors[1])
stage2_started = \
candidate_current_data_frame[candidate_current_data_frame['stage'] == 'Stage2']['stage_started'].iloc[0]
ax1.set_ylim(bottom=0, top=6 * 10 ** 4)
ax1.set_ylabel('Cost Function [s]')
ax1.ticklabel_format(style='sci', axis='y', scilimits=(-2, 2))
ax1.xaxis.set_major_formatter(ax_settings.formatter)
ax1.set_xlim(left=0, right=12)
ax2.set_xlim(left=0, right=12)
x_ticks_positions = range(0, 12 + 1, 2)
# matplotlib.pyplot.locator_params(axis='x', nbins=6)
ax2.set_ylim(bottom=-10.0, top=120)
ax2.set_ylabel('Declined Visits')
ax2.set_xlabel('Computation Time ' + ax_settings.units_label)
ax2.set_xticks(x_ticks_positions)
ax2.xaxis.set_major_formatter(ax_settings.formatter)
matplotlib.pyplot.tight_layout()
# rows.plot.save_figure(output_file_stem + '_first_stage_' + current_date.isoformat())
finally:
matplotlib.pyplot.cla()
matplotlib.pyplot.close(figure)
def compare_box_plots(args, settings):
problem_file = get_or_raise(args, __PROBLEM_FILE_ARG)
problem = rows.load.load_problem(problem_file)
problem_file_base = os.path.basename(problem_file)
problem_file_name, problem_file_ext = os.path.splitext(problem_file_base)
base_trace_file = get_or_raise(args, __BASE_FILE_ARG)
output_file_stem = getattr(args, __OUTPUT, problem_file_name)
traces = read_traces(base_trace_file)
figure, (ax1, ax2, ax3) = matplotlib.pyplot.subplots(1, 3)
stages = [trace.compute_stages() for trace in traces]
num_stages = max(len(s) for s in stages)
durations = [[getattr(local_stage[num_stage], 'duration').total_seconds() for local_stage in stages] for num_stage in range(num_stages)]
max_duration = max(max(stage_durations) for stage_durations in durations)
axis_settings = AxisSettings.infer(datetime.timedelta(seconds=max_duration))
try:
ax1.boxplot(durations, flierprops=dict(marker='.'), medianprops=dict(color=FOREGROUND_COLOR))
ax1.set_yticks(axis_settings.xticks)
ax1.yaxis.set_major_formatter(axis_settings.formatter)
ax1.set_xlabel('Stage')
ax1.set_ylabel('Duration [hh:mm]')
costs = [[getattr(local_stage[num_stage], 'final_cost') for local_stage in stages] for num_stage in range(num_stages)]
ax2.boxplot(costs, flierprops=dict(marker='.'), medianprops=dict(color=FOREGROUND_COLOR))
formatter = matplotlib.ticker.ScalarFormatter()
formatter.set_scientific(True)
formatter.set_powerlimits((-3, 3))
ax2.yaxis.set_major_formatter(formatter)
ax2.set_xlabel('Stage')
ax2.set_ylabel('Cost')
declined_visits = [[getattr(local_stage[num_stage], 'final_dropped_visits') for local_stage in stages] for num_stage in range(num_stages)]
ax3.boxplot(declined_visits, flierprops=dict(marker='.'), medianprops=dict(color=FOREGROUND_COLOR))
max_declined_visits = max(max(declined_visits))
ax3.set_xlabel('Stage')
ax3.set_ylabel('Declined Visits')
dropped_visit_ticks = None
if max_declined_visits < 100:
dropped_visit_ticks = range(0, max_declined_visits + 1)
else:
dropped_visit_ticks = range(0, max_declined_visits + 100, 100)
ax3.set_yticks(dropped_visit_ticks)
figure.tight_layout()
rows.plot.save_figure(output_file_stem)
finally:
matplotlib.pyplot.cla()
matplotlib.pyplot.close(figure)
def compare_prediction_error(args, settings):
base_schedule = rows.plot.load_schedule(get_or_raise(args, __BASE_FILE_ARG))
candidate_schedule = rows.plot.load_schedule(get_or_raise(args, __CANDIDATE_FILE_ARG))
observed_duration_by_visit = rows.plot.calculate_observed_visit_duration(base_schedule)
expected_duration_by_visit = calculate_expected_visit_duration(candidate_schedule)
data = []
for visit in base_schedule.visits:
observed_duration = observed_duration_by_visit[visit.visit]
expected_duration = expected_duration_by_visit[visit.visit]
data.append([visit.key, observed_duration.total_seconds(), expected_duration.total_seconds()])
frame = pandas.DataFrame(columns=['Visit', 'ObservedDuration', 'ExpectedDuration'], data=data)
frame['Error'] = (frame.ObservedDuration - frame.ExpectedDuration) / frame.ObservedDuration
figure, axis = matplotlib.pyplot.subplots()
try:
axis.plot(frame['Error'], label='(Observed - Expected)/Observed)')
axis.legend()
axis.set_ylim(-20, 2)
axis.grid()
matplotlib.pyplot.show()
finally:
matplotlib.pyplot.cla()
matplotlib.pyplot.close(figure)
def remove_violated_visits(rough_schedule: rows.model.schedule.Schedule,
metadata: TraceLog,
problem: rows.model.problem.Problem,
duration_estimator: rows.plot.DurationEstimator,
distance_estimator: rows.plot.DistanceEstimator) -> rows.model.schedule.Schedule:
max_delay = metadata.visit_time_window
min_delay = -metadata.visit_time_window
dropped_visits = 0
allowed_visits = []
for route in rough_schedule.routes:
carer_diary = problem.get_diary(route.carer, metadata.date)
if not carer_diary:
continue
for visit in route.visits:
if visit.check_in is not None:
check_in_delay = visit.check_in - datetime.datetime.combine(metadata.date, visit.time)
if check_in_delay > max_delay: # or check_in_delay < min_delay:
dropped_visits += 1
continue
allowed_visits.append(visit)
# schedule does not have visits which exceed time windows
first_improved_schedule = rows.model.schedule.Schedule(carers=rough_schedule.carers, visits=allowed_visits)
allowed_visits = []
for route in first_improved_schedule.routes:
if not route.visits:
continue
diary = problem.get_diary(route.carer, metadata.date)
assert diary is not None
# shift adjustment is added twice because it is allowed to extend the time before and after the working hours
max_shift_end = max(event.end for event in diary.events) + metadata.shift_adjustment + metadata.shift_adjustment
first_visit = route.visits[0]
current_time = datetime.datetime.combine(metadata.date, first_visit.time)
if current_time <= max_shift_end:
allowed_visits.append(first_visit)
visits_made = []
total_slack = datetime.timedelta()
if len(route.visits) == 1:
visit = route.visits[0]
visit_duration = duration_estimator(visit.visit)
if visit_duration is None:
visit_duration = visit.duration
current_time += visit_duration
if current_time <= max_shift_end:
visits_made.append(visit)
else:
dropped_visits += 1
else:
for prev_visit, next_visit in route.edges():
visit_duration = duration_estimator(prev_visit.visit)
if visit_duration is None:
visit_duration = prev_visit.duration
current_time += visit_duration
current_time += distance_estimator(prev_visit, next_visit)
start_time = max(current_time, datetime.datetime.combine(metadata.date, next_visit.time) - max_delay)
total_slack += start_time - current_time
current_time = start_time
if current_time <= max_shift_end:
visits_made.append(next_visit)
else:
dropped_visits += 1
if current_time <= max_shift_end:
total_slack += max_shift_end - current_time
total_break_duration = datetime.timedelta()
for carer_break in diary.breaks:
total_break_duration += carer_break.duration
if total_slack + datetime.timedelta(hours=2) < total_break_duration:
# route is not respecting contractual breaks
visits_made.pop()
for visit in visits_made:
allowed_visits.append(visit)
# schedule does not contain visits which exceed overtime of the carer
return rows.model.schedule.Schedule(carers=rough_schedule.carers, visits=allowed_visits)
class ScheduleCost:
CARER_COST = datetime.timedelta(seconds=60 * 60 * 4)
def __init__(self, travel_time: datetime.timedelta, carers_used: int, visits_missed: int, missed_visit_penalty: int):
self.__travel_time = travel_time
self.__carers_used = carers_used
self.__visits_missed = visits_missed
self.__missed_visit_penalty = missed_visit_penalty
@property
def travel_time(self) -> datetime.timedelta:
return self.__travel_time
@property
def visits_missed(self) -> int:
return self.__visits_missed
@property
def missed_visit_penalty(self) -> int:
return self.__missed_visit_penalty
@property
def carers_used(self) -> int:
return self.__carers_used
def total_cost(self, include_vehicle_cost: bool) -> datetime.timedelta:
cost = self.__travel_time.total_seconds() + self.__missed_visit_penalty * self.__visits_missed
if include_vehicle_cost:
cost += self.CARER_COST.total_seconds() * self.__carers_used
return cost
def get_schedule_cost(schedule: rows.model.schedule.Schedule,
metadata: TraceLog,
problem: rows.model.problem.Problem,
distance_estimator: rows.plot.DistanceEstimator) -> ScheduleCost:
carer_used_ids = set()
visit_made_ids = set()
travel_time = datetime.timedelta()
for route in schedule.routes:
if not route.visits:
continue
carer_used_ids.add(route.carer.sap_number)
for visit in route.visits:
visit_made_ids.add(visit.visit.key)
for source, destination in route.edges():
travel_time += distance_estimator(source, destination)
available_visit_ids = {visit.key for visit in problem.requested_visits(schedule.date)}
return ScheduleCost(travel_time, len(carer_used_ids), len(available_visit_ids.difference(visit_made_ids)), metadata.missed_visit_penalty)
def compare_schedule_cost(args, settings):
ProblemConfig = collections.namedtuple('ProblemConfig',
['ProblemPath', 'HumanSolutionPath', 'SolverSecondSolutionPath', 'SolverThirdSolutionPath'])
simulation_dir = '/home/pmateusz/dev/cordia/simulations/current_review_simulations'
solver_log_file = os.path.join(simulation_dir, 'solutions/c350past_distv90b90e30m1m1m5.err.log')
problem_data = [ProblemConfig(os.path.join(simulation_dir, 'problems/C350_past.json'),
os.path.join(simulation_dir, 'planner_schedules/C350_planners_201710{0:02d}.json'.format(day)),
os.path.join(simulation_dir, 'solutions/second_stage_c350past_distv90b90e30m1m1m5_201710{0:02d}.gexf'.format(day)),
os.path.join(simulation_dir, 'solutions/c350past_distv90b90e30m1m1m5_201710{0:02d}.gexf'.format(day)))
for day in range(1, 15, 1)]
solver_traces = read_traces(solver_log_file)
assert len(solver_traces) == len(problem_data)
results = []
include_vehicle_cost = False
with rows.plot.create_routing_session() as routing_session:
distance_estimator = rows.plot.DistanceEstimator(settings, routing_session)
def normalize_cost(value) -> float:
if isinstance(value, datetime.timedelta):
value_to_use = value.total_seconds()
elif isinstance(value, float) or isinstance(value, int):
value_to_use = value
else:
return float('inf')
return round(value_to_use / 3600, 2)
for solver_trace, problem_data in list(zip(solver_traces, problem_data)):
problem = rows.load.load_problem(os.path.join(simulation_dir, problem_data.ProblemPath))
human_schedule = rows.load.load_schedule(os.path.join(simulation_dir, problem_data.HumanSolutionPath))
solver_second_schedule = rows.load.load_schedule(os.path.join(simulation_dir, problem_data.SolverSecondSolutionPath))
solver_third_schedule = rows.load.load_schedule(os.path.join(simulation_dir, problem_data.SolverThirdSolutionPath))
assert solver_second_schedule.date == human_schedule.date
assert solver_third_schedule.date == human_schedule.date
available_carers = problem.available_carers(human_schedule.date)
requested_visits = problem.requested_visits(human_schedule.date)
one_carer_visits = [visit for visit in requested_visits if visit.carer_count == 1]
two_carer_visits = [visit for visit in requested_visits if visit.carer_count == 2]
duration_estimator = rows.plot.DurationEstimator.create_expected_visit_duration(solver_third_schedule)
human_schedule_to_use = remove_violated_visits(human_schedule, solver_trace, problem, duration_estimator, distance_estimator)
solver_second_schedule_to_use = remove_violated_visits(solver_second_schedule, solver_trace, problem, duration_estimator,
distance_estimator)
solver_third_schedule_to_use = remove_violated_visits(solver_third_schedule, solver_trace, problem, duration_estimator,
distance_estimator)
human_cost = get_schedule_cost(human_schedule_to_use, solver_trace, problem, distance_estimator)
solver_second_cost = get_schedule_cost(solver_second_schedule_to_use, solver_trace, problem, distance_estimator)
solver_third_cost = get_schedule_cost(solver_third_schedule_to_use, solver_trace, problem, distance_estimator)
results.append(collections.OrderedDict(date=solver_trace.date,
day=solver_trace.date.day,
carers=len(available_carers),
one_carer_visits=len(one_carer_visits),
two_carer_visits=2 * len(two_carer_visits),
missed_visit_penalty=normalize_cost(solver_trace.missed_visit_penalty),
carer_used_penalty=normalize_cost(solver_trace.carer_used_penalty),
planner_missed_visits=human_cost.visits_missed,
solver_second_missed_visits=solver_second_cost.visits_missed,
solver_third_missed_visits=solver_third_cost.visits_missed,
planner_travel_time=normalize_cost(human_cost.travel_time),
solver_second_travel_time=normalize_cost(solver_second_cost.travel_time),
solver_third_travel_time=normalize_cost(solver_third_cost.travel_time),
planner_carers_used=human_cost.carers_used,
solver_second_carers_used=solver_second_cost.carers_used,
solver_third_carers_used=solver_third_cost.carers_used,
planner_total_cost=normalize_cost(human_cost.total_cost(include_vehicle_cost)),
solver_second_total_cost=normalize_cost(solver_second_cost.total_cost(include_vehicle_cost)),
solver_third_total_cost=normalize_cost(solver_third_cost.total_cost(include_vehicle_cost)),
solver_second_time=int(math.ceil(solver_trace.best_cost_time(2).total_seconds())),
solver_third_time=int(math.ceil(solver_trace.best_cost_time(3).total_seconds()))))
data_frame = pandas.DataFrame(data=results)
print(tabulate.tabulate(data_frame, tablefmt='psql', headers='keys'))
print(tabulate.tabulate(data_frame[['day', 'carers', 'one_carer_visits', 'two_carer_visits', 'missed_visit_penalty',
'planner_total_cost', 'solver_second_total_cost', 'solver_third_total_cost',
'planner_missed_visits', 'solver_second_missed_visits', 'solver_third_missed_visits',
'planner_travel_time', 'solver_second_travel_time', 'solver_third_travel_time', 'solver_second_time',
'solver_third_time']],
tablefmt='latex', headers='keys', showindex=False))
def get_consecutive_visit_time_span(schedule: rows.model.schedule.Schedule, start_time_estimator):
client_visits = collections.defaultdict(list)
for visit in schedule.visits:
client_visits[visit.visit.service_user].append(visit)
for client in client_visits:
visits = client_visits[client]
used_keys = set()
unique_visits = []
for visit in visits:
date_time = start_time_estimator(visit)
if date_time.hour == 0 and date_time.minute == 0:
continue
if visit.visit.key not in used_keys:
used_keys.add(visit.visit.key)
unique_visits.append(visit)
unique_visits.sort(key=start_time_estimator)
client_visits[client] = unique_visits
client_span = collections.defaultdict(datetime.timedelta)
for client in client_visits:
if len(client_visits[client]) < 2:
continue
last_visit = client_visits[client][0]
total_span = datetime.timedelta()
for next_visit in client_visits[client][1:]:
total_span += start_time_estimator(next_visit) - start_time_estimator(last_visit)
last_visit = next_visit
client_span[client] = total_span
return client_span
def get_carer_client_frequency(schedule: rows.model.schedule.Schedule):
client_assigned_carers = collections.defaultdict(collections.Counter)
for visit in schedule.visits:
client_assigned_carers[int(visit.visit.service_user)][int(visit.carer.sap_number)] += 1
return client_assigned_carers
def get_visits(problem: rows.model.problem.Problem, date: datetime.date):
visits = set()
for local_visits in problem.visits:
for visit in local_visits.visits:
if date != visit.date:
continue
visit.service_user = local_visits.service_user
visits.add(visit)
return visits
def get_teams(problem: rows.model.problem.Problem, schedule: rows.model.schedule.Schedule):
multiple_carer_visit_keys = set()
for visit in get_visits(problem, schedule.date):
if visit.carer_count > 1:
multiple_carer_visit_keys.add(visit.key)
client_visit_carers = collections.defaultdict(lambda: collections.defaultdict(list))
for visit in schedule.visits:
if visit.visit.key not in multiple_carer_visit_keys:
continue
client_visit_carers[visit.visit.service_user][visit.visit.key].append(int(visit.carer.sap_number))
for client in client_visit_carers:
for visit_key in client_visit_carers[client]:
client_visit_carers[client][visit_key].sort()
teams = set()
for client in client_visit_carers:
for visit_key in client_visit_carers[client]:
teams.add(tuple(client_visit_carers[client][visit_key]))
return teams
def compare_schedule_quality(args, settings):
ProblemConfig = collections.namedtuple('ProblemConfig', ['ProblemPath', 'HumanSolutionPath', 'SolverSolutionPath'])
def compare_quality(solver_trace, problem, human_schedule, solver_schedule, duration_estimator, distance_estimator):
visits = get_visits(problem, solver_trace.date)
multiple_carer_visit_keys = {visit.key for visit in visits if visit.carer_count > 1}
clients = list({int(visit.service_user) for visit in visits})
# number of different carers assigned throughout the day
human_carer_frequency = get_carer_client_frequency(human_schedule)
solver_carer_frequency = get_carer_client_frequency(solver_schedule)
def median_carer_frequency(client_counters):
total_counters = []
for client in client_counters:
# total_counters += len(client_counters[client])
total_counters.append(len(client_counters[client]))
# return total_counters / len(client_counters)
return numpy.median(total_counters)
human_schedule_squared = []
solver_schedule_squared = []
for client in clients:
if client in human_carer_frequency:
human_schedule_squared.append(sum(human_carer_frequency[client][carer] ** 2 for carer in human_carer_frequency[client]))
else:
human_schedule_squared.append(0)
if client in solver_carer_frequency:
solver_schedule_squared.append(sum(solver_carer_frequency[client][carer] ** 2 for carer in solver_carer_frequency[client]))
else:
solver_schedule_squared.append(0)
human_matching_dominates = 0
solver_matching_dominates = 0
for index in range(len(clients)):
if human_schedule_squared[index] > solver_schedule_squared[index]:
human_matching_dominates += 1
elif human_schedule_squared[index] < solver_schedule_squared[index]:
solver_matching_dominates += 1
matching_no_diff = len(clients) - human_matching_dominates - solver_matching_dominates
assert matching_no_diff >= 0
human_schedule_span = get_consecutive_visit_time_span(human_schedule, lambda visit: visit.check_in)
solver_schedule_span = get_consecutive_visit_time_span(solver_schedule, lambda visit: datetime.datetime.combine(visit.date, visit.time))
human_span_dominates = 0
solver_span_dominates = 0
for client in clients:
if human_schedule_span[client] > solver_schedule_span[client]:
human_span_dominates += 1
elif human_schedule_span[client] < solver_schedule_span[client]:
solver_span_dominates += 1
span_no_diff = len(clients) - human_span_dominates - solver_span_dominates
assert span_no_diff > 0
human_teams = get_teams(problem, human_schedule)
solver_teams = get_teams(problem, solver_schedule)
human_schedule_frame = rows.plot.get_schedule_data_frame(human_schedule, problem, duration_estimator, distance_estimator)
solver_schedule_frame = rows.plot.get_schedule_data_frame(solver_schedule, problem, duration_estimator, distance_estimator)
human_visits = human_schedule_frame['Visits'].median()
solver_visits = solver_schedule_frame['Visits'].median()
human_total_overtime = compute_overtime(human_schedule_frame).sum()
solver_total_overtime = compute_overtime(solver_schedule_frame).sum()
return {'problem': str(human_schedule.date),
'visits': len(visits),
'clients': len(clients),
'human_overtime': human_total_overtime,
'solver_overtime': solver_total_overtime,
'human_visits_median': human_visits,
'solver_visits_median': solver_visits,
'human_visit_span_dominates': human_span_dominates,
'solver_visit_span_dominates': solver_span_dominates,
'visit_span_indifferent': span_no_diff,
'human_matching_dominates': human_matching_dominates,
'solver_matching_dominates': solver_matching_dominates,
'human_carer_frequency': median_carer_frequency(human_carer_frequency),
'solver_carer_frequency': median_carer_frequency(solver_carer_frequency),
'matching_indifferent': matching_no_diff,
'human_teams': len(human_teams),
'solver_teams': len(solver_teams)}
simulation_dir = '/home/pmateusz/dev/cordia/simulations/current_review_simulations'
solver_log_file = os.path.join(simulation_dir, 'solutions/c350past_distv90b90e30m1m1m5.err.log')
problem_data = [ProblemConfig(os.path.join(simulation_dir, 'problems/C350_past.json'),
os.path.join(simulation_dir, 'planner_schedules/C350_planners_201710{0:02d}.json'.format(day)),
os.path.join(simulation_dir, 'solutions/c350past_distv90b90e30m1m1m5_201710{0:02d}.gexf'.format(day)))
for day in range(1, 15, 1)]
solver_traces = read_traces(solver_log_file)
assert len(solver_traces) == len(problem_data)
results = []
with rows.plot.create_routing_session() as routing_session:
distance_estimator = rows.plot.DistanceEstimator(settings, routing_session)
for solver_trace, problem_data in zip(solver_traces, problem_data):
problem = rows.load.load_problem(os.path.join(simulation_dir, problem_data.ProblemPath))
human_schedule = rows.load.load_schedule(os.path.join(simulation_dir, problem_data.HumanSolutionPath))
solver_schedule = rows.load.load_schedule(os.path.join(simulation_dir, problem_data.SolverSolutionPath))
assert solver_trace.date == human_schedule.date
assert solver_trace.date == solver_schedule.date
duration_estimator = rows.plot.DurationEstimator.create_expected_visit_duration(solver_schedule)
human_schedule_to_use = remove_violated_visits(human_schedule, solver_trace, problem, duration_estimator, distance_estimator)
solver_schedule_to_use = remove_violated_visits(solver_schedule, solver_trace, problem, duration_estimator, distance_estimator)
row = compare_quality(solver_trace, problem, human_schedule_to_use, solver_schedule_to_use, duration_estimator, distance_estimator)
results.append(row)
data_frame = pandas.DataFrame(data=results)
data_frame['human_visit_span_dominates_rel'] = data_frame['human_visit_span_dominates'] / data_frame['clients']
data_frame['human_visit_span_dominates_rel_label'] = data_frame['human_visit_span_dominates_rel'].apply(lambda v: '{0:.2f}'.format(v * 100.0))
data_frame['solver_visit_span_dominates_rel'] = data_frame['solver_visit_span_dominates'] / data_frame['clients']
data_frame['solver_visit_span_dominates_rel_label'] = data_frame['solver_visit_span_dominates_rel'].apply(lambda v: '{0:.2f}'.format(v * 100.0))
data_frame['visit_span_indifferent_rel'] = data_frame['visit_span_indifferent'] / data_frame['clients']
data_frame['human_matching_dominates_rel'] = data_frame['human_matching_dominates'] / data_frame['clients']
data_frame['human_matching_dominates_rel_label'] = data_frame['human_matching_dominates_rel'].apply(lambda v: '{0:.2f}'.format(v * 100.0))
data_frame['solver_matching_dominates_rel'] = data_frame['solver_matching_dominates'] / data_frame['clients']
data_frame['solver_matching_dominates_rel_label'] = data_frame['solver_matching_dominates_rel'].apply(lambda v: '{0:.2f}'.format(v * 100.0))
data_frame['matching_indifferent_rel'] = data_frame['matching_indifferent'] / data_frame['clients']
data_frame['day'] = data_frame['problem'].apply(lambda label: datetime.datetime.strptime(label, '%Y-%m-%d').date().day)
data_frame['human_overtime_label'] = data_frame['human_overtime'].apply(get_time_delta_label)
data_frame['solver_overtime_label'] = data_frame['solver_overtime'].apply(get_time_delta_label)
print(tabulate.tabulate(data_frame, tablefmt='psql', headers='keys'))
print(tabulate.tabulate(data_frame[['day', 'human_visits_median', 'solver_visits_median', 'human_overtime_label', 'solver_overtime_label',
'human_carer_frequency', 'solver_carer_frequency',
'human_matching_dominates_rel_label', 'solver_matching_dominates_rel_label',
'human_teams', 'solver_teams']], tablefmt='latex', showindex=False, headers='keys'))
BenchmarkData = collections.namedtuple('BenchmarkData', ['BestCost', 'BestCostTime', 'BestBound', 'ComputationTime'])
class MipTrace:
__MIP_HEADER_PATTERN = re.compile('^\s*Expl\s+Unexpl\s+|\s+Obj\s+Depth\s+IntInf\s+|\s+Incumbent\s+BestBd\s+Gap\s+|\s+It/Node\s+Time\s*$')
__MIP_LINE_PATTERN = re.compile('^(?P<solution_flag>[\w\*]?)\s*'
'(?P<explored_nodes>\d+)\s+'
'(?P<nodes_to_explore>\d+)\s+'
'(?P<node_relaxation>[\w\.]*)\s+'
'(?P<node_depth>\d*)\s+'
'(?P<fractional_variables>\w*)\s+'
'(?P<incumbent>[\d\.\-]*)\s+'
'(?P<lower_bound>[\d\.\-]*)\s+'
'(?P<gap>[\d\.\%\-]*)\s+'
'(?P<simplex_it_per_node>[\d\.\-]*)\s+'
'(?P<elapsed_time>\d+)s$')
__SUMMARY_PATTERN = re.compile('^Best\sobjective\s(?P<objective>[e\d\.\+]+),\s'
'best\sbound\s(?P<bound>[e\d\.\+]+),\s'
'gap\s(?P<gap>[e\d\.\+]+)\%$')
class MipProgressMessage:
def __init__(self, has_solution, best_cost, lower_bound, elapsed_time):
self.__has_solution = has_solution
self.__best_cost = best_cost
self.__lower_bound = lower_bound
self.__elapsed_time = elapsed_time
@property
def has_solution(self):
return self.__has_solution
@property
def best_cost(self):
return self.__best_cost
@property
def lower_bound(self):
return self.__lower_bound
@property
def elapsed_time(self):
return self.__elapsed_time
def __init__(self, best_objective: float, best_bound: float, events: typing.List[MipProgressMessage]):
self.__best_objective = best_objective
self.__best_bound = best_bound
self.__events = events
@staticmethod
def read_from_file(path) -> 'MipTrace':
events = []
best_objective = float('inf')
best_bound = float('-inf')
with open(path, 'r') as fp:
lines = fp.readlines()
lines_it = iter(lines)
for line in lines_it:
if re.match(MipTrace.__MIP_HEADER_PATTERN, line):
break
next(lines_it, None) # read the empty line
for line in lines_it:
line_match = re.match(MipTrace.__MIP_LINE_PATTERN, line)
if not line_match:
break
raw_solution_flag = line_match.group('solution_flag')
raw_incumbent = line_match.group('incumbent')
raw_lower_bound = line_match.group('lower_bound')
raw_elapsed_time = line_match.group('elapsed_time')
has_solution = raw_solution_flag == 'H' or raw_solution_flag == '*'
incumbent = float(raw_incumbent) if raw_incumbent and raw_incumbent != '-' else float('inf')
lower_bound = float(raw_lower_bound) if raw_lower_bound else float('-inf')
elapsed_time = datetime.timedelta(seconds=int(raw_elapsed_time)) if raw_elapsed_time else datetime.timedelta()
events.append(MipTrace.MipProgressMessage(has_solution, incumbent, lower_bound, elapsed_time))
next(lines_it, None)
for line in lines_it:
line_match = re.match(MipTrace.__SUMMARY_PATTERN, line)
if line_match:
raw_objective = line_match.group('objective')
if raw_objective:
best_objective = float(raw_objective)
raw_bound = line_match.group('bound')
if raw_bound:
best_bound = float(raw_bound)
return MipTrace(best_objective, best_bound, events)
def best_cost(self):
return self.__best_objective
def best_cost_time(self):
for event in reversed(self.__events):
if event.has_solution:
return event.elapsed_time
return datetime.timedelta.max
def best_bound(self):
return self.__best_bound
def computation_time(self):
if self.__events:
return self.__events[-1].elapsed_time
return datetime.timedelta.max
class DummyTrace:
def __init__(self):
pass
def best_cost(self):
return float('inf')
def best_bound(self):
return 0
def best_cost_time(self):
return datetime.timedelta(hours=23, minutes=59, seconds=59)
def compare_benchmark_table(args, settings):
ProblemConfig = collections.namedtuple('ProblemConfig', ['ProblemPath', 'Carers', 'Visits', 'Visits2', 'MipSolutionLog',
'CpTeamSolutionLog',
'CpWindowsSolutionLog'])
simulation_dir = '/home/pmateusz/dev/cordia/simulations/current_review_simulations'
old_simulation_dir = '/home/pmateusz/dev/cordia/simulations/review_simulations_old'
dummy_log = DummyTrace()
problem_configs = [ProblemConfig(os.path.join(simulation_dir, 'benchmark/25/problem_201710{0:02d}_v25m0c3.json'.format(day_number)),
3, 25, 0,
os.path.join(simulation_dir, 'benchmark/25/solutions/problem_201710{0:02d}_v25m0c3_mip.log'.format(day_number)),
os.path.join(simulation_dir, 'benchmark/25/solutions/problem_201710{0:02d}_v25m0c3.err.log'.format(day_number)),
os.path.join(simulation_dir, 'benchmark/25/solutions/problem_201710{0:02d}_v25m0c3.err.log'.format(day_number)))
for day_number in range(1, 15, 1)]
problem_configs.extend(
[ProblemConfig(os.path.join(simulation_dir, 'benchmark/25/problem_201710{0:02d}_v25m5c3.json'.format(day_number)),
3, 20, 5,
os.path.join(simulation_dir, 'benchmark/25/solutions/problem_201710{0:02d}_v25m5c3_mip.log'.format(day_number)),
os.path.join(simulation_dir, 'benchmark/25/solutions/problem_201710{0:02d}_teams_v25m5c3.err.log'.format(day_number)),
os.path.join(simulation_dir, 'benchmark/25/solutions/problem_201710{0:02d}_windows_v25m5c3.err.log'.format(day_number)))
for day_number in range(1, 15, 1)])
problem_configs.extend(
[ProblemConfig(os.path.join(simulation_dir, 'benchmark/50/problem_201710{0:02d}_v50m0c5.json'.format(day_number)),
5, 50, 0,
os.path.join(simulation_dir, 'benchmark/50/solutions/problem_201710{0:02d}_v50m0c5_mip.log'.format(day_number)),
os.path.join(simulation_dir, 'benchmark/50/solutions/problem_201710{0:02d}_v50m0c5.err.log'.format(day_number)),
os.path.join(simulation_dir, 'benchmark/50/solutions/problem_201710{0:02d}_v50m0c5.err.log'.format(day_number)))
for day_number in range(1, 15, 1)])
problem_configs.extend(
[ProblemConfig(os.path.join(simulation_dir, 'benchmark/50/problem_201710{0:02d}_v50m10c5.json'.format(day_number)),
5, 40, 10,
os.path.join(simulation_dir, 'benchmark/50/solutions/problem_201710{0:02d}_v50m10c5_mip.log'.format(day_number)),
os.path.join(simulation_dir, 'benchmark/50/solutions/problem_201710{0:02d}_teams_v50m10c5.err.log'.format(day_number)),
os.path.join(simulation_dir, 'benchmark/50/solutions/problem_201710{0:02d}_windows_v50m10c5.err.log'.format(day_number)))
for day_number in range(1, 15, 1)])
logs = []
for problem_config in problem_configs:
with warnings.catch_warnings():
warnings.simplefilter('ignore')
if os.path.exists(problem_config.CpTeamSolutionLog):
cp_team_logs = read_traces(problem_config.CpTeamSolutionLog)
if not cp_team_logs:
warnings.warn('File {0} is empty'.format(problem_config.CpTeamSolutionLog))
cp_team_logs = dummy_log
else:
cp_team_log = cp_team_logs[0]
else:
cp_team_logs = dummy_log
if os.path.exists(problem_config.CpWindowsSolutionLog):
cp_window_logs = read_traces(problem_config.CpWindowsSolutionLog)
if not cp_window_logs:
warnings.warn('File {0} is empty'.format(problem_config.CpWindowsSolutionLog))
cp_window_logs = dummy_log
else:
cp_window_log = cp_window_logs[0]
else:
cp_window_logs = dummy_log
if os.path.exists(problem_config.MipSolutionLog):
mip_log = MipTrace.read_from_file(problem_config.MipSolutionLog)
if not mip_log:
warnings.warn('File {0} is empty'.format(problem_config.MipSolutionLog))
mip_log = dummy_log
else:
mip_log = dummy_log
logs.append([problem_config, mip_log, cp_team_log, cp_window_log])
def get_gap(cost: float, lower_bound: float) -> float:
if lower_bound == 0.0:
return float('inf')
return (cost - lower_bound) * 100.0 / lower_bound
def get_delta(cost, cost_to_compare):
return (cost - cost_to_compare) * 100.0 / cost_to_compare
def get_computation_time_label(time: datetime.timedelta) -> str:
return str(time.total_seconds())
data = []
for problem_config, mip_log, cp_team_log, cp_window_log in logs:
data.append(collections.OrderedDict(
date=cp_team_log.date,
visits=problem_config.Visits,
visits_of_two=problem_config.Visits2,
carers=cp_team_log.carers,
penalty=cp_team_log.missed_visit_penalty,
lower_bound=mip_log.best_bound(),
mip_best_cost=mip_log.best_cost(),
mip_best_gap=get_gap(mip_log.best_cost(), mip_log.best_bound()),
mip_best_time=get_computation_time_label(mip_log.best_cost_time()),
team_best_cost=cp_team_log.best_cost(),
team_best_gap=get_gap(cp_team_log.best_cost(), mip_log.best_bound()),
team_best_delta=get_gap(cp_team_log.best_cost(), mip_log.best_cost()),
team_best_time=get_computation_time_label(cp_team_log.best_cost_time()),
windows_best_cost=cp_window_log.best_cost(),
windows_best_gap=get_gap(cp_window_log.best_cost(), mip_log.best_bound()),
windows_best_delta=get_gap(cp_window_log.best_cost(), mip_log.best_cost()),
windows_best_time=get_computation_time_label(cp_window_log.best_cost_time())))
data_frame = pandas.DataFrame(data=data)
def get_duration_label(time_delta: datetime.timedelta) -> str:
assert time_delta.days == 0
hours = int(time_delta.total_seconds() / 3600)
minutes = int(time_delta.total_seconds() / 60 - hours * 60)
seconds = int(time_delta.total_seconds() - 3600 * hours - 60 * minutes)
# return '{0:02d}:{1:02d}:{2:02d}'.format(hours, minutes, seconds)
return '{0:,.0f}'.format(time_delta.total_seconds())
def get_cost_label(cost: float) -> str:
return '{0:,.0f}'.format(cost)
def get_gap_label(gap: float) -> str:
return '{0:,.2f}'.format(gap)
def get_problem_label(problem, date: datetime.date):
label = '{0:2d} {1}'.format(date.day, problem.Visits)
if problem.Visits2 == 0:
return label
return label + '/' + str(problem.Visits2)
print_data = []
for problem_config, mip_log, cp_team_log, cp_window_log in logs:
best_cost = min([mip_log.best_cost(), cp_team_log.best_cost(), cp_window_log.best_cost()])
print_data.append(collections.OrderedDict(Problem=get_problem_label(problem_config, cp_team_log.date),
Penalty=get_cost_label(cp_team_log.missed_visit_penalty),
LB=get_cost_label(mip_log.best_bound()),
MIP_COST=get_cost_label(mip_log.best_cost()),
MIP_GAP=get_gap_label(get_gap(mip_log.best_cost(), mip_log.best_bound())),
MIP_DELTA=get_gap_label(get_delta(mip_log.best_cost(), best_cost)),
MIP_TIME=get_duration_label(mip_log.best_cost_time()),
TEAMS_GAP=get_gap_label(get_gap(cp_team_log.best_cost(), mip_log.best_bound())),
TEAMS_DELTA=get_gap_label(get_delta(cp_team_log.best_cost(), best_cost)),
TEAMS_COST=get_cost_label(cp_team_log.best_cost()),
TEAMS_Time=get_duration_label(cp_team_log.best_cost_time()),
WINDOWS_COST=get_cost_label(cp_window_log.best_cost()),
WINDOWS_GAP=get_gap_label(get_gap(cp_window_log.best_cost(), mip_log.best_bound())),
WINDOWS_DELTA=get_gap_label(get_delta(cp_window_log.best_cost(), best_cost)),
WINDOWS_TIME=get_duration_label(cp_window_log.best_cost_time())
))
data_frame = pandas.DataFrame(data=print_data)
print(tabulate.tabulate(
data_frame[['Problem', 'Penalty', 'LB', 'MIP_COST', 'MIP_TIME', 'TEAMS_COST', 'TEAMS_Time', 'WINDOWS_COST', 'WINDOWS_TIME']],
tablefmt='latex', headers='keys', showindex=False))
print(tabulate.tabulate(
data_frame[['Problem', 'MIP_GAP', 'MIP_DELTA', 'MIP_TIME', 'TEAMS_GAP', 'TEAMS_DELTA', 'TEAMS_Time', 'WINDOWS_GAP', 'WINDOWS_DELTA',
'WINDOWS_TIME']],
tablefmt='latex', headers='keys', showindex=False))
@functools.total_ordering
class ProblemMetadata:
WINDOW_LABELS = ['', 'F', 'S', 'M', 'L', 'A']
def __init__(self, case: int, visits: int, windows: int):
assert visits == 20 or visits == 50 or visits == 80
assert 0 <= windows < len(ProblemMetadata.WINDOW_LABELS)
self.__case = case
self.__visits = visits
self.__windows = windows
def __eq__(self, other) -> bool:
if isinstance(other, ProblemMetadata):
return self.case == other.case and self.visits == other.visits and self.__windows == other.windows
return False
def __neq__(self, other) -> bool:
return not (self == other)
def __lt__(self, other) -> bool:
assert isinstance(other, ProblemMetadata)
if self.windows != other.windows:
return self.windows < other.windows
if self.visits != other.visits:
return self.visits < other.visits
if self.case != other.case:
return self.case < other.case
return False
@property
def label(self) -> str:
return '{0:>2}{1}'.format(self.instance_number, self.windows_label)
@property
def windows(self) -> int:
return self.__windows
@property
def windows_label(self) -> str:
return ProblemMetadata.WINDOW_LABELS[self.__windows]
@property
def visits(self) -> int:
return self.__visits
@property
def case(self) -> int:
return self.__case
@property
def instance_number(self) -> int:
if self.__visits == 20:
return self.__case
if self.__visits == 50:
return 5 + self.__case
return 8 + self.__case
def compare_literature_table(args, settings):
LIU2019 = 'liu2019'
AFIFI2016 = 'afifi2016'
DECERLE2018 = 'decerle2018'
GAYRAUD2015 = 'gayraud2015'
PARRAGH2018 = 'parragh2018'
BREDSTROM2008 = 'bredstrom2008combined'
BREDSTROM2007 = 'bredstrom2007branchandprice'
InstanceConfig = collections.namedtuple('InstanceConfig', ['name', 'nickname', 'result', 'who', 'is_optimal'])
instance_data = [
InstanceConfig(name='case_1_20_4_2_1', nickname='1N', result=5.13, who=BREDSTROM2008, is_optimal=True),
InstanceConfig(name='case_2_20_4_2_1', nickname='2N', result=4.98, who=BREDSTROM2008, is_optimal=True),
InstanceConfig(name='case_3_20_4_2_1', nickname='3N', result=5.19, who=BREDSTROM2008, is_optimal=True),
InstanceConfig(name='case_4_20_4_2_1', nickname='4N', result=7.21, who=BREDSTROM2008, is_optimal=True),
InstanceConfig(name='case_5_20_4_2_1', nickname='5N', result=5.37, who=BREDSTROM2008, is_optimal=True),
InstanceConfig(name='case_1_50_10_5_1', nickname='6N', result=14.45, who=DECERLE2018, is_optimal=True),
InstanceConfig(name='case_2_50_10_5_1', nickname='7N', result=13.02, who=DECERLE2018, is_optimal=True),
InstanceConfig(name='case_3_50_10_5_1', nickname='8N', result=34.94, who=PARRAGH2018, is_optimal=True),
InstanceConfig(name='case_1_80_16_8_1', nickname='9N', result=43.48, who=PARRAGH2018, is_optimal=True),
InstanceConfig(name='case_2_80_16_8_1', nickname='10N', result=12.08, who=PARRAGH2018, is_optimal=True),
InstanceConfig(name='case_1_20_4_2_2', nickname='1S', result=3.55, who=BREDSTROM2008, is_optimal=True),
InstanceConfig(name='case_2_20_4_2_2', nickname='2S', result=4.27, who=BREDSTROM2008, is_optimal=True),
InstanceConfig(name='case_3_20_4_2_2', nickname='3S', result=3.63, who=BREDSTROM2008, is_optimal=True),
InstanceConfig(name='case_4_20_4_2_2', nickname='4S', result=6.14, who=BREDSTROM2008, is_optimal=True),
InstanceConfig(name='case_5_20_4_2_2', nickname='5S', result=3.93, who=BREDSTROM2008, is_optimal=True),
InstanceConfig(name='case_1_50_10_5_2', nickname='6S', result=8.14, who=BREDSTROM2007, is_optimal=True),
InstanceConfig(name='case_2_50_10_5_2', nickname='7S', result=8.39, who=BREDSTROM2007, is_optimal=True),
InstanceConfig(name='case_3_50_10_5_2', nickname='8S', result=9.54, who=BREDSTROM2007, is_optimal=True),
InstanceConfig(name='case_1_80_16_8_2', nickname='9S', result=11.93, who=AFIFI2016, is_optimal=False),
InstanceConfig(name='case_2_80_16_8_2', nickname='10S', result=8.54, who=LIU2019, is_optimal=False),
InstanceConfig(name='case_1_20_4_2_3', nickname='1M', result=3.55, who=BREDSTROM2007, is_optimal=True),
InstanceConfig(name='case_2_20_4_2_3', nickname='2M', result=3.58, who=BREDSTROM2007, is_optimal=True),
InstanceConfig(name='case_3_20_4_2_3', nickname='3M', result=3.33, who=BREDSTROM2007, is_optimal=True),
InstanceConfig(name='case_4_20_4_2_3', nickname='4M', result=5.67, who=BREDSTROM2007, is_optimal=True),
InstanceConfig(name='case_5_20_4_2_3', nickname='5M', result=3.53, who=BREDSTROM2008, is_optimal=True),
InstanceConfig(name='case_1_50_10_5_3', nickname='6M', result=7.7, who=AFIFI2016, is_optimal=False),
InstanceConfig(name='case_2_50_10_5_3', nickname='7M', result=7.48, who=AFIFI2016, is_optimal=False),
InstanceConfig(name='case_3_50_10_5_3', nickname='8M', result=8.54, who=BREDSTROM2008, is_optimal=True),
InstanceConfig(name='case_1_80_16_8_3', nickname='9M', result=10.92, who=AFIFI2016, is_optimal=False),
InstanceConfig(name='case_2_80_16_8_3', nickname='10M', result=7.62, who=AFIFI2016, is_optimal=False),
InstanceConfig(name='case_1_20_4_2_4', nickname='1L', result=3.39, who=BREDSTROM2007, is_optimal=True),
InstanceConfig(name='case_2_20_4_2_4', nickname='2L', result=3.42, who=BREDSTROM2007, is_optimal=True),
InstanceConfig(name='case_3_20_4_2_4', nickname='3L', result=3.29, who=BREDSTROM2007, is_optimal=True),
InstanceConfig(name='case_4_20_4_2_4', nickname='4L', result=5.13, who=BREDSTROM2007, is_optimal=True),
InstanceConfig(name='case_5_20_4_2_4', nickname='5L', result=3.34, who=BREDSTROM2007, is_optimal=True),
InstanceConfig(name='case_1_50_10_5_4', nickname='6L', result=7.14, who=BREDSTROM2007, is_optimal=True),
InstanceConfig(name='case_2_50_10_5_4', nickname='7L', result=6.88, who=BREDSTROM2007, is_optimal=False),
InstanceConfig(name='case_3_50_10_5_4', nickname='8L', result=8, who=AFIFI2016, is_optimal=False),
InstanceConfig(name='case_1_80_16_8_4', nickname='9L', result=10.43, who=LIU2019, is_optimal=False),
InstanceConfig(name='case_2_80_16_8_4', nickname='10L', result=7.36, who=LIU2019, is_optimal=False),
InstanceConfig(name='case_1_20_4_2_5', nickname='1H', result=2.95, who=PARRAGH2018, is_optimal=False),
InstanceConfig(name='case_2_20_4_2_5', nickname='2H', result=2.88, who=PARRAGH2018, is_optimal=False),
InstanceConfig(name='case_3_20_4_2_5', nickname='3H', result=2.74, who=PARRAGH2018, is_optimal=False),
InstanceConfig(name='case_4_20_4_2_5', nickname='4H', result=4.29, who=GAYRAUD2015, is_optimal=False),
InstanceConfig(name='case_5_20_4_2_5', nickname='5H', result=2.81, who=PARRAGH2018, is_optimal=False),
InstanceConfig(name='case_1_50_10_5_5', nickname='6H', result=6.48, who=DECERLE2018, is_optimal=False),
InstanceConfig(name='case_2_50_10_5_5', nickname='7H', result=5.71, who=PARRAGH2018, is_optimal=False),
InstanceConfig(name='case_3_50_10_5_5', nickname='8H', result=6.52, who=PARRAGH2018, is_optimal=False),
InstanceConfig(name='case_1_80_16_8_5', nickname='9H', result=8.51, who=PARRAGH2018, is_optimal=False),
InstanceConfig(name='case_2_80_16_8_5', nickname='10H', result=6.31, who=PARRAGH2018, is_optimal=False)
]
instance_dirs = ['/home/pmateusz/dev/cordia/simulations/current_review_simulations/hc/solutions/case20',
'/home/pmateusz/dev/cordia/simulations/current_review_simulations/hc/solutions/case50',
'/home/pmateusz/dev/cordia/simulations/current_review_simulations/hc/solutions/case80']
instance_dict = {instance.name: instance for instance in instance_data}
print_data = []
instance_pattern = re.compile(r'case_(?P<case>\d+)_(?P<visits>\d+)_(?P<carers>\d+)_(?P<synchronized_visits>\d+)_(?P<windows>\d+)')
instance_counter = 1
last_visits = None
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
for instance_dir in instance_dirs:
for instance in instance_data:
instance_log_path = os.path.join(instance_dir, instance.name + '.dat.err.log')
if not os.path.exists(instance_log_path):
continue
solver_logs = read_traces(instance_log_path)
if not solver_logs:
continue
instance = instance_dict[instance.name]
name_match = instance_pattern.match(instance.name)
if not name_match:
continue
first_solver_logs = solver_logs[0]
case = int(name_match.group('case'))
visits = int(name_match.group('visits'))
carers = int(name_match.group('carers'))
synchronized_visits = int(name_match.group('synchronized_visits'))
windows_configuration = int(name_match.group('windows'))
problem_meta = ProblemMetadata(case, visits, windows_configuration)
if last_visits and last_visits != visits:
instance_counter = 1
normalized_result = float('inf')
if first_solver_logs.best_cost(3) < 100:
normalized_result = round(first_solver_logs.best_cost(3), 2)
delta = round((instance.result - normalized_result) / instance.result * 100, 2)
printable_literature_result = str(instance.result)
if instance.is_optimal:
printable_literature_result += '*'
printable_literature_result += 'cite{{{0}}}'.format(instance.who)
print_data.append(collections.OrderedDict(
metadata=problem_meta,
problem=problem_meta.label,
case=instance_counter,
v1=visits - 2 * synchronized_visits,
v2=synchronized_visits,
carers=carers,
time_windows=problem_meta.windows_label,
literature_result=printable_literature_result,
result=normalized_result,
delta=delta,
time=round(first_solver_logs.best_cost_time(3).total_seconds(), 2) if normalized_result != float('inf') else float('inf')
))
last_visits = visits
instance_counter += 1
print_data.sort(key=lambda dict_obj: dict_obj['metadata'])
print(tabulate.tabulate(
pandas.DataFrame(data=print_data)[['problem', 'carers', 'v1', 'v2', 'literature_result', 'result', 'time', 'delta']],
showindex=False,
tablefmt='latex', headers='keys'))
def compare_planner_optimizer_quality(args, settings):
data_file = getattr(args, __FILE_ARG)
data_frame = pandas.read_csv(data_file)
figsize = (2.5, 5)
labels = ['Planners', 'Algorithm']
data_frame['travel_time'] = data_frame['Travel Time'].apply(parse_pandas_duration)
data_frame['span'] = data_frame['Span'].apply(parse_pandas_duration)
data_frame['overtime'] = data_frame['Overtime'].apply(parse_pandas_duration)
data_frame_planners = data_frame[data_frame['Type'] == 'Planners']
data_frame_solver = data_frame[data_frame['Type'] == 'Solver']
overtime_per_carer = [list((data_frame_planners['overtime'] / data_frame_planners['Carers']).values),
list((data_frame_solver['overtime'] / data_frame_solver['Carers']).values)]
def to_matplotlib_minutes(value):
return value * 60 * 1000000000
fig, ax = matplotlib.pyplot.subplots(1, 1, figsize=figsize)
ax.boxplot(overtime_per_carer, flierprops=dict(marker='.'), medianprops=dict(color=FOREGROUND_COLOR))
ax.set_xticklabels(labels, rotation=45)
ax.set_ylabel('Overtime per Carer [HH:MM]')
ax.yaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(format_timedelta_pandas))
ax.set_yticks([0, to_matplotlib_minutes(10), to_matplotlib_minutes(20), to_matplotlib_minutes(30)])
fig.tight_layout()
rows.plot.save_figure('quality_boxplot_overtime')
travel_time_per_carer = [list((data_frame_planners['travel_time'] / data_frame_planners['Carers']).values),
list((data_frame_solver['travel_time'] / data_frame_solver['Carers']).values)]
fig, ax = matplotlib.pyplot.subplots(1, 1, figsize=figsize)
ax.boxplot(travel_time_per_carer, flierprops=dict(marker='.'), medianprops=dict(color=FOREGROUND_COLOR))
ax.set_xticklabels(labels, rotation=45)
ax.set_ylabel('Travel Time per Carer [HH:MM]')
ax.yaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(format_timedelta_pandas))
ax.set_yticks([0, to_matplotlib_minutes(30), to_matplotlib_minutes(60),
to_matplotlib_minutes(90), to_matplotlib_minutes(120)])
fig.tight_layout()
rows.plot.save_figure('quality_boxplot_travel_time')
span_per_client = [list((data_frame_planners['span'] / data_frame_planners['Clients']).values),
list((data_frame_solver['span'] / data_frame_solver['Clients']).values)]
fig, ax = matplotlib.pyplot.subplots(1, 1, figsize=figsize)
ax.boxplot(span_per_client, flierprops=dict(marker='.'), medianprops=dict(color=FOREGROUND_COLOR))
ax.set_xticklabels(labels, rotation=45)
ax.set_ylabel('Visit Span per Client [HH:MM]')
ax.yaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(format_timedelta_pandas))
ax.set_yticks([0, to_matplotlib_minutes(6 * 60), to_matplotlib_minutes(7 * 60), to_matplotlib_minutes(8 * 60),
to_matplotlib_minutes(9 * 60)])
ax.set_ylim(bottom=6 * 60 * 60 * 1000000000)
fig.tight_layout()
rows.plot.save_figure('quality_span')
teams = [list(data_frame_planners['Teams'].values), list(data_frame_solver['Teams'].values)]
fig, ax = matplotlib.pyplot.subplots(1, 1, figsize=figsize)
ax.boxplot(teams, flierprops=dict(marker='.'), medianprops=dict(color=FOREGROUND_COLOR))
ax.set_xticklabels(labels, rotation=45)
ax.set_ylabel('Teams of 2 Carers')
fig.tight_layout()
rows.plot.save_figure('quality_teams')
better_matching = [list(data_frame_planners['Better Matching'].values),
list(data_frame_solver['Better Matching'].values)]
fig, ax = matplotlib.pyplot.subplots(1, 1, figsize=figsize)
ax.boxplot(better_matching, flierprops=dict(marker='.'), medianprops=dict(color=FOREGROUND_COLOR))
ax.set_xticklabels(labels, rotation=45)
ax.set_ylabel('Better Client-Carer Matching')
fig.tight_layout()
rows.plot.save_figure('quality_matching')
def parse_percent(value):
value_to_use = value.replace('%', '')
return float(value_to_use) / 100.0
def parse_duration_seconds(value):
return datetime.timedelta(seconds=value)
def compare_benchmark(args, settings):
data_file_path = getattr(args, __FILE_ARG)
data_frame = | pandas.read_csv(data_file_path) | pandas.read_csv |
import unittest
import os
from collections import defaultdict
from unittest import mock
import warnings
import pandas as pd
import numpy as np
from dataprofiler.profilers import FloatColumn
from dataprofiler.profilers.profiler_options import FloatOptions
test_root_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
class TestFloatColumn(unittest.TestCase):
def test_base_case(self):
data = pd.Series([], dtype=object)
profiler = FloatColumn(data.name)
profiler.update(data)
self.assertEqual(profiler.match_count, 0)
self.assertEqual(profiler.min, None)
self.assertEqual(profiler.max, None)
self.assertEqual(profiler.sum, 0)
self.assertEqual(profiler.mean, 0)
self.assertTrue(profiler.median is np.nan)
self.assertEqual([np.nan], profiler.mode)
self.assertTrue(profiler.variance is np.nan)
self.assertTrue(profiler.skewness is np.nan)
self.assertTrue(profiler.kurtosis is np.nan)
self.assertTrue(profiler.stddev is np.nan)
self.assertIsNone(profiler.histogram_selection)
self.assertEqual(len(profiler.quantiles), 999)
self.assertIsNone(profiler.data_type_ratio)
def test_single_data_variance_case(self):
data = pd.Series([1.5]).apply(str)
profiler = FloatColumn(data.name)
profiler.update(data)
self.assertEqual(profiler.match_count, 1.0)
self.assertEqual(profiler.mean, 1.5)
self.assertTrue(profiler.variance is np.nan)
data = pd.Series([2.5]).apply(str)
profiler.update(data)
self.assertEqual(profiler.match_count, 2)
self.assertEqual(profiler.mean, 2.0)
self.assertEqual(profiler.variance, 0.5)
def test_profiled_precision(self):
"""
Checks whether the precision for the profiler is correct.
:return:
"""
df_1 = pd.Series([0.4, 0.3, 0.1, 0.1, 0.1]).apply(str)
df_2 = pd.Series([0.11, 0.11, 0.12, 2.11]).apply(str)
df_3 = pd.Series([4.114, 3.161, 2.512, 2.131]).apply(str)
df_mix = pd.Series([4.1, '3.', 2.52, 2.13143]).apply(str)
float_profiler = FloatColumn("Name")
float_profiler.update(df_3)
self.assertEqual(4, float_profiler.precision['min'])
self.assertEqual(4, float_profiler.precision['max'])
float_profiler.update(df_2)
self.assertEqual(2, float_profiler.precision['min'])
self.assertEqual(4, float_profiler.precision['max'])
float_profiler.update(df_1)
self.assertEqual(1, float_profiler.precision['min'])
self.assertEqual(4, float_profiler.precision['max'])
float_profiler = FloatColumn("Name")
float_profiler.update(df_mix)
self.assertEqual(1, float_profiler.precision['min'])
self.assertEqual(6, float_profiler.precision['max'])
# edge cases #
# integer with 0s on right and left side
df_ints = pd.Series(['0013245678', '123456700', '0012345600'])
float_profiler = FloatColumn("Name")
float_profiler.update(df_ints)
self.assertEqual(6, float_profiler.precision['min'])
self.assertEqual(8, float_profiler.precision['max'])
# scientific
df_scientific = pd.Series(['1.23e-3', '2.2344', '1.244e4'])
float_profiler = FloatColumn("Name")
float_profiler.update(df_scientific)
self.assertEqual(3, float_profiler.precision['min'])
self.assertEqual(5, float_profiler.precision['max'])
# plus
df_plus = pd.Series(['+1.3e-3', '+2.244', '+1.3324e4'])
float_profiler = FloatColumn("Name")
float_profiler.update(df_plus)
self.assertEqual(2, float_profiler.precision['min'])
self.assertEqual(5, float_profiler.precision['max'])
# minus
df_minus = pd.Series(['-1.3234e-3', '-0.244', '-1.3324e4'])
float_profiler = FloatColumn("Name")
float_profiler.update(df_minus)
self.assertEqual(3, float_profiler.precision['min'])
self.assertEqual(5, float_profiler.precision['max'])
# spaces around values
df_spaces = pd.Series([' -1.3234e-3 ', ' -0.244 '])
float_profiler = FloatColumn("Name")
float_profiler.update(df_spaces)
self.assertEqual(3, float_profiler.precision['min'])
self.assertEqual(5, float_profiler.precision['max'])
# constant precision
df_constant = pd.Series(['1.34', '+1.23e-4', '00101',
'+100.', '0.234', '-432', '.954',
'+.342', '-123e1', '23.1'])
float_profiler = FloatColumn("Name")
float_profiler.update(df_constant)
self.assertEqual(3, float_profiler.precision['min'])
self.assertEqual(3, float_profiler.precision['max'])
self.assertEqual(3, float_profiler.precision['mean'])
self.assertEqual(10, float_profiler.precision['sample_size'])
self.assertEqual(0, float_profiler.precision['var'])
self.assertEqual(0, float_profiler.precision['std'])
# random precision
df_random = pd.Series(['+ 9', '-.3', '-1e-3', '3.2343', '0',
'1230', '0.33', '4.3', '302.1', '-4.322'])
float_profiler = FloatColumn("Name")
float_profiler.update(df_random)
self.assertEqual(0, float_profiler.precision['min'])
self.assertEqual(5, float_profiler.precision['max'])
self.assertEqual(2.4444, float_profiler.precision['mean'])
self.assertEqual(9, float_profiler.precision['sample_size'])
self.assertEqual(2.7778, float_profiler.precision['var'])
self.assertEqual(1.6667, float_profiler.precision['std'])
# Ensure order doesn't change anything
df_random_order = pd.Series(['1230', '0.33', '4.3', '302.1', '-4.322',
'+ 9', '-.3', '-1e-3', '3.2343', '0'])
float_profiler_order = FloatColumn("Name")
float_profiler_order.update(df_random)
self.assertDictEqual(
float_profiler.precision, float_profiler_order.precision
)
# check to make sure all formats of precision are correctly predicted
samples = [
# value, min expected precision
['10.01', 4],
['.01', 1],
['0.01', 1],
['-0.01', 1],
['+0.01', 1],
[' +0.013', 2],
[' -1.3234e-3 ', 5],
[' 0012345600 ', 6],
[' 0012345600. ', 8],
[' -0012345600. ', 8],
]
for sample in samples:
df_series = pd.Series([sample[0]])
min_expected_precision = sample[1]
precision = FloatColumn._get_float_precision(df_series)
self.assertEqual(min_expected_precision, precision['min'],
msg='Errored for: {}'.format(sample[0]))
def test_profiled_min(self):
# test with multiple values
data = np.linspace(-5, 5, 11)
df = pd.Series(data).apply(str)
profiler = FloatColumn(df.name)
profiler.update(df[1:])
self.assertEqual(profiler.min, -4)
profiler.update(df)
self.assertEqual(profiler.min, -5)
profiler.update(pd.Series(['-4']))
self.assertEqual(profiler.min, -5)
# empty data
data = pd.Series([], dtype=object)
profiler = FloatColumn(data.name)
profiler.update(data)
self.assertEqual(profiler.min, None)
# data with None value
df = pd.Series([2.0, 3.0, None, np.nan]).apply(str)
profiler = FloatColumn(df.name)
profiler.update(df)
self.assertEqual(profiler.min, 2.0)
# data with one value
df = pd.Series([2.0]).apply(str)
profiler = FloatColumn(df.name)
profiler.update(df)
self.assertEqual(profiler.min, 2.0)
# data with unique value
df = pd.Series([2.0, 2.0, 2.0, 2.0, 2.0]).apply(str)
profiler = FloatColumn(df.name)
profiler.update(df)
self.assertEqual(profiler.min, 2.0)
# data with unique value as zero
df = pd.Series([0.0, 0.0, 0.0, 0.0, 0.0]).apply(str)
profiler = FloatColumn(df.name)
profiler.update(df)
self.assertEqual(profiler.min, 0.0)
def test_profiled_max(self):
data = np.linspace(-5, 5, 11)
df = pd.Series(data).apply(str)
profiler = FloatColumn(df.name)
profiler.update(df[:-1])
self.assertEqual(profiler.max, 4)
profiler.update(df)
self.assertEqual(profiler.max, 5)
profiler.update(pd.Series(['4']))
self.assertEqual(profiler.max, 5)
# empty data
data = pd.Series([], dtype=object)
profiler = FloatColumn(data.name)
profiler.update(data)
self.assertEqual(profiler.max, None)
# data with None value
df = pd.Series([2.0, 3.0, None, np.nan]).apply(str)
profiler = FloatColumn(df.name)
profiler.update(df)
self.assertEqual(profiler.max, 3.0)
# data with one value
df = pd.Series([2.0]).apply(str)
profiler = FloatColumn(df.name)
profiler.update(df)
self.assertEqual(profiler.max, 2.0)
# data with unique value
df = pd.Series([2.0, 2.0, 2.0, 2.0, 2.0]).apply(str)
profiler = FloatColumn(df.name)
profiler.update(df)
self.assertEqual(profiler.max, 2.0)
# data with unique value as zero
df = pd.Series([0.0, 0.0, 0.0, 0.0, 0.0]).apply(str)
profiler = FloatColumn(df.name)
profiler.update(df)
self.assertEqual(profiler.max, 0.0)
def test_profiled_mode(self):
# disabled mode
df = pd.Series([1, 1, 1, 1, 1, 1, 1]).apply(str)
options = FloatOptions()
options.mode.is_enabled = False
profiler = FloatColumn(df.name, options)
profiler.update(df)
self.assertListEqual([np.nan], profiler.mode)
# same values
df = pd.Series([1, 1, 1, 1, 1, 1, 1]).apply(str)
profiler = FloatColumn(df.name)
profiler.update(df)
self.assertListEqual([1], profiler.mode)
# multiple modes
df = pd.Series([1.5, 1.5, 2.5, 2.5, 3.5, 3.5, 4.1, 4.1]).apply(str)
profiler = FloatColumn(df.name)
profiler.update(df)
np.testing.assert_array_almost_equal([1.5, 2.5, 3.5, 4.1], profiler.mode,
decimal=2)
# with different values
df = pd.Series([1.25, 1.25, 1.25, 1.25, 2.9]).apply(str)
profiler = FloatColumn(df.name)
profiler.update(df)
np.testing.assert_array_almost_equal([1.25], profiler.mode, decimal=2)
# with negative values
df = pd.Series([-1.1, 1.9, 1.9, 1.9, 2.1, 2.01, 2.01, 2.01]).apply(str)
profiler = FloatColumn(df.name)
profiler.update(df)
np.testing.assert_array_almost_equal([1.9, 2.01], profiler.mode,
decimal=2)
# all unique values
df = pd.Series([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).apply(str)
profiler = FloatColumn(df.name)
profiler.update(df)
# By default, returns 5 of the possible modes
np.testing.assert_array_almost_equal([1, 2, 3, 4, 5],
profiler.mode, decimal=2)
# Edge case where mode appears later in the dataset
df = pd.Series([1, 2, 3, 4, 5, 6.2, 6.2]).apply(str)
profiler = FloatColumn(df.name)
profiler.update(df)
np.testing.assert_array_almost_equal([6.2], profiler.mode, decimal=2)
df = pd.Series([2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7.1, 7.1, 7.1]).apply(str)
profiler = FloatColumn(df.name)
profiler.update(df)
np.testing.assert_array_almost_equal([7.1], profiler.mode, decimal=2)
def test_top_k_modes(self):
# Default options
options = FloatOptions()
df = pd.Series([1, 1, 2, 2, 3, 3, 4, 4, 5, 5]).apply(str)
profiler = FloatColumn(df.name, options)
profiler.update(df)
self.assertEqual(5, len(profiler.mode))
# Test if top_k_modes is less than the number of modes
options = FloatOptions()
options.mode.top_k_modes = 2
df = pd.Series([1, 1, 2, 2, 3, 3, 4, 4, 5, 5]).apply(str)
profiler = FloatColumn(df.name, options)
profiler.update(df)
self.assertEqual(2, len(profiler.mode))
# Test if top_k_mode is greater than the number of modes
options = FloatOptions()
options.mode.top_k_modes = 8
df = pd.Series([1, 1, 2, 2, 3, 3, 4, 4, 5, 5]).apply(str)
profiler = FloatColumn(df.name, options)
profiler.update(df)
# Only 5 possible modes so return 5
self.assertEqual(5, len(profiler.mode))
def test_profiled_median(self):
# disabled median
df = pd.Series([1, 1, 1, 1, 1, 1, 1]).apply(str)
options = FloatOptions()
options.median.is_enabled = False
profiler = FloatColumn(df.name, options)
profiler.update(df)
self.assertTrue(profiler.median is np.nan)
# same values
df = | pd.Series([1, 1, 1, 1, 1, 1, 1]) | pandas.Series |
# Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import warnings
from typing import (
TYPE_CHECKING,
Any,
Dict,
List,
Mapping,
NamedTuple,
Optional,
Set,
TextIO,
Tuple,
Union,
)
import numpy as np
import pandas as pd # type: ignore
from pandas.core.dtypes.common import ( # type: ignore
is_bool_dtype,
is_datetime_or_timedelta_dtype,
is_float_dtype,
is_integer_dtype,
is_string_dtype,
)
from pandas.core.dtypes.inference import is_list_like
if TYPE_CHECKING:
from elasticsearch import Elasticsearch
from numpy.typing import DTypeLike
ES_FLOAT_TYPES: Set[str] = {"double", "float", "half_float", "scaled_float"}
ES_INTEGER_TYPES: Set[str] = {"long", "integer", "short", "byte"}
ES_COMPATIBLE_TYPES: Dict[str, Set[str]] = {
"double": ES_FLOAT_TYPES,
"scaled_float": ES_FLOAT_TYPES,
"float": ES_FLOAT_TYPES,
"half_float": ES_FLOAT_TYPES,
"long": ES_INTEGER_TYPES,
"integer": ES_INTEGER_TYPES,
"short": ES_INTEGER_TYPES,
"byte": ES_INTEGER_TYPES,
"date": {"date_nanos"},
"date_nanos": {"date"},
"keyword": {"text"},
}
class Field(NamedTuple):
"""Holds all information on a particular field in the mapping"""
column: str
es_field_name: str
is_source: bool
es_dtype: str
es_date_format: Optional[str]
pd_dtype: type
is_searchable: bool
is_aggregatable: bool
is_scripted: bool
aggregatable_es_field_name: str
@property
def is_numeric(self) -> bool:
return is_integer_dtype(self.pd_dtype) or is_float_dtype(self.pd_dtype)
@property
def is_timestamp(self) -> bool:
return | is_datetime_or_timedelta_dtype(self.pd_dtype) | pandas.core.dtypes.common.is_datetime_or_timedelta_dtype |
from __future__ import division
from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS
from sklearn.pipeline import Pipeline
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.ensemble import RandomForestClassifier
from sklearn import preprocessing,svm
from sklearn.model_selection import GridSearchCV,cross_validate
from sklearn.naive_bayes import MultinomialNB
from wordcloud import WordCloud
from tempfile import mkdtemp
from shutil import rmtree
from KNN import KNN
from textblob import TextBlob
import scipy.sparse as sp
import warnings
import pandas as pd
import numpy as np
def textblob_tokenizer(str_input):
blob=TextBlob(str_input.lower())
tokens=blob.words
words=[token.stem() for token in tokens]
return words
def cleanPrep(data):
prep_list=['that','as','about','above','across','after','against','along','among','around','at','before','behind','below','beneath','beside','between','by','down','during','except','for','from','in','in front of','inside','instead of','into','like','near','of','off','on','onto','on top of','out of','outside','over','past','since','through','to','toward','under','underneath','until','up','upon','with','within','without','according to','because of','by way of','in addition to','front of','in place of','regard to','in spite of','instead of','on account of','out of']
for index,row in data.iteritems():
dataWords=row.split()
Words=[word for word in dataWords if word.lower() not in prep_list]
result=' '.join(Words)
data.replace(to_replace=row,value=result)
return data
def grid(X,y,pipe):
Cs=[0.001,0.01,0.1,1,10,100,1000]
gammas=[0.001,0.01,0.1,1,10,100,1000]
kernels=['linear','rbf']
nfolds=None
param_grid={'C':Cs,'gamma':gammas,'kernel':kernels}
grid=GridSearchCV(pipe,param_grid,scoring='accuracy',cv=nfolds,n_jobs=-1)
grid.fit(X,y)
return grid.best_params_
def wordcloud(train_data):
wordcloud_list={}
for cat in np.unique(train_data["Category"]):
lst=train_data.loc[(train_data["Category"]==cat),["Content"]]
s=[]
for val in lst.values:
s.append(val)
g=''.join(str(x).encode('utf-8') for x in s)
g.encode('utf-8')
stop=set(ENGLISH_STOP_WORDS)
stop.add("now")
stop.add("said")
stop.add("like")
stop.add("u2013")
stop.add("u201")
stop.add("u201d")
stop.add("u2019")
stop.add("u2019s")
wordcloud=WordCloud(stopwords=stop).generate(g)
wordcloud_list[cat]=wordcloud
image=wordcloud.to_image()
image.show()
return wordcloud_list
if __name__=='__main__':
#That is for ignoring all the warnings from the sklearn "library"
warnings.filterwarnings(module='sklearn*',action='ignore')
#Here starts the main code
train_data = pd.read_csv('train_set.csv', sep="\t",index_col=False,encoding='utf-8')
test_data = pd.read_csv('test_set.csv', sep="\t",index_col=False,encoding='utf-8')
le = preprocessing.LabelEncoder()
le.fit(train_data["Category"])
y = le.transform(train_data["Category"])
count_vectorizer= CountVectorizer(stop_words=ENGLISH_STOP_WORDS,min_df=0.02,max_df=0.7,analyzer='word',tokenizer=textblob_tokenizer)
#This is for title
X = count_vectorizer.fit_transform(train_data['Content'])
Test= count_vectorizer.transform(test_data['Content'])
X_Title = count_vectorizer.fit_transform(train_data['Title'])
Test_Title=count_vectorizer.transform(test_data['Title'])
#count_vectorizer= count_vectorizer.fit(train_data['Content'])
#X=count_vectorizer.transform(train_data['Content'])
#Test= count_vectorizer.transform(test_data['Content'])
#X_Title=count_vectorizer.transform(train_data['Title'])
#Test_Title=count_vectorizer.transform(test_data['Title'])
X1=sp.hstack((X,X_Title))
Test1=sp.hstack((Test,Test_Title))
#Here is the transformer and the classifiers
cachedir=mkdtemp()
svd=TruncatedSVD(n_components=100)
Rf=RandomForestClassifier()
MyMethod=svm.SVC(C=0.001,gamma=0.001,kernel='linear')
Svm=svm.SVC()
Mult_NB=MultinomialNB()
k=int(raw_input("Give me k for neighbors:"))
Knn=KNN(n_neighbors=k)
scoring={'acc':'accuracy','prec_macro':'precision_macro','rec_macro':'recall_macro','f1_mac':'f1_macro'}
mv={'Random_Forest':[],'Naive_Bayes':[],'KNN':[],'SVM':[],'My Method':[]}
clf_list={'Random_Forest':Rf,'SVM':Svm,'My Method':MyMethod,'KNN':Knn}
for (nm,clf) in clf_list.iteritems():
estimators=[('svd',svd),('clf',clf)]
pipe=Pipeline(steps=estimators)
pipe.fit(X1,y)
if nm=='My Method':
y_pred = pipe.predict(Test1)
predicted_categories = le.inverse_transform(y_pred)
scores=cross_validate(pipe,X1,y,scoring=scoring,cv=10,n_jobs=-1,return_train_score=False)
mv[nm].append(scores['test_acc'].mean())
mv[nm].append(scores['test_prec_macro'].mean())
mv[nm].append(scores['test_rec_macro'].mean())
mv[nm].append(scores['test_f1_mac'].mean())
Mult_NB.fit(X1,y)
scores=cross_validate(Mult_NB,X1,y,scoring=scoring,cv=10,return_train_score=False)
mv['Naive_Bayes'].append(scores['test_acc'].mean())
mv['Naive_Bayes'].append(scores['test_prec_macro'].mean())
mv['Naive_Bayes'].append(scores['test_rec_macro'].mean())
mv['Naive_Bayes'].append(scores['test_f1_mac'].mean())
id_data=test_data['Id']
dictio={'ID':[],'Predicted_Category':[]}
for i in range(len(predicted_categories)):
dictio['ID'].append(id_data[i])
dictio['Predicted_Category'].append(predicted_categories[i])
out=pd.DataFrame(data=dictio)
df= | pd.DataFrame(data=mv,index=['Accuracy','Precision','Recall','F-Measure']) | pandas.DataFrame |
#-*-coding=utf-8-*-
from emotion import emo
from collections import OrderedDict
from eval import getCNNDaata
import pandas as pd
from DBHandler import getStockList
from TuHandler import TuHandler
from datetime import date
class dataFetcher:
codeList = []
def __init__(self, listName):
self.codeList = getStockList(listName)
def get_emotion(self, source , start, end):
try:
return pd.read_csv('emo.csv')
except:
pass
start_str = str(start)
end_str = str(end)
posData, negData = getCNNDaata(True, "1472610048" ,source, start_str , end_str)
em = emo("mergedResult")
for stock in posData:
for date in posData[stock]:
name = "%s %s"%(stock, date)
emoV = em.getEmo(posData[stock][date], name)
if emoV['wnum']:
expo = emoV['totalV']/emoV['wnum']
posData[stock][date] = [posData[stock][date], expo]
else:
posData[stock][date] = [posData[stock][date], 5]
em.clear()
for stock in negData:
for date in negData[stock]:
name = "%s %s"%(stock, date)
emoV = em.getEmo(negData[stock][date], name)
if emoV['wnum']:
expo = emoV['totalV']/emoV['wnum']
negData[stock][date] = [negData[stock][date], expo]
else:
negData[stock][date] = [negData[stock][date], 5]
FinalData = OrderedDict()
for stock in negData:
for date in negData[stock]:
FinalData.setdefault(stock, OrderedDict()).setdefault(date, -1)
expo = 0.0
try:
expo = 0.6*posData[stock][date][1] + 0.4*negData[stock][date][1]
except KeyError:
expo = negData[stock][date][1]
FinalData[stock][date] = expo
for stock in posData:
for date in posData[stock]:
FinalData.setdefault(stock, OrderedDict()).setdefault(date, -1)
expo = FinalData[stock][date]
if FinalData[stock][date] == -1:
try:
expo = 0.6*posData[stock][date][1] + 0.4*negData[stock][date][1]
except KeyError:
expo = posData[stock][date][1]
FinalData[stock][date] = expo
list = []
for stock in FinalData:
# FinalData[stock].setdefault('code', stock)
for date in FinalData[stock]:
if date=='code': continue
d = pd.DataFrame({'emo':FinalData[stock][date], 'code' : stock, 'date' : date}, index=[0])
list.append(d)
emo_df = pd.concat(list, axis=0)
print(emo_df)
return emo_df
def get_train_data(self, source = "", start = date.today(), end = date.today()):
pre_list = []
turnover_list = []
tu = TuHandler(self.codeList, start, end)
self.codeList = tu.stockList
emo_df = pd.read_csv('emo.csv')
SMB_df = | pd.read_csv('SMB.csv') | pandas.read_csv |
# -*- coding: utf-8 -*-
from datetime import timedelta
import operator
from string import ascii_lowercase
import warnings
import numpy as np
import pytest
from pandas.compat import lrange
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
Categorical, DataFrame, MultiIndex, Series, Timestamp, date_range, isna,
notna, to_datetime, to_timedelta)
import pandas.core.algorithms as algorithms
import pandas.core.nanops as nanops
import pandas.util.testing as tm
def assert_stat_op_calc(opname, alternative, frame, has_skipna=True,
check_dtype=True, check_dates=False,
check_less_precise=False, skipna_alternative=None):
"""
Check that operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
alternative : function
Function that opname is tested against; i.e. "frame.opname()" should
equal "alternative(frame)".
frame : DataFrame
The object that the tests are executed on
has_skipna : bool, default True
Whether the method "opname" has the kwarg "skip_na"
check_dtype : bool, default True
Whether the dtypes of the result of "frame.opname()" and
"alternative(frame)" should be checked.
check_dates : bool, default false
Whether opname should be tested on a Datetime Series
check_less_precise : bool, default False
Whether results should only be compared approximately;
passed on to tm.assert_series_equal
skipna_alternative : function, default None
NaN-safe version of alternative
"""
f = getattr(frame, opname)
if check_dates:
df = DataFrame({'b': date_range('1/1/2001', periods=2)})
result = getattr(df, opname)()
assert isinstance(result, Series)
df['a'] = lrange(len(df))
result = getattr(df, opname)()
assert isinstance(result, Series)
assert len(result)
if has_skipna:
def wrapper(x):
return alternative(x.values)
skipna_wrapper = tm._make_skipna_wrapper(alternative,
skipna_alternative)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
tm.assert_series_equal(result0, frame.apply(wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
# HACK: win32
tm.assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False,
check_less_precise=check_less_precise)
else:
skipna_wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
tm.assert_series_equal(result0, frame.apply(skipna_wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
if opname in ['sum', 'prod']:
expected = frame.apply(skipna_wrapper, axis=1)
tm.assert_series_equal(result1, expected, check_dtype=False,
check_less_precise=check_less_precise)
# check dtypes
if check_dtype:
lcd_dtype = frame.values.dtype
assert lcd_dtype == result0.dtype
assert lcd_dtype == result1.dtype
# bad axis
with pytest.raises(ValueError, match='No axis named 2'):
f(axis=2)
# all NA case
if has_skipna:
all_na = frame * np.NaN
r0 = getattr(all_na, opname)(axis=0)
r1 = getattr(all_na, opname)(axis=1)
if opname in ['sum', 'prod']:
unit = 1 if opname == 'prod' else 0 # result for empty sum/prod
expected = pd.Series(unit, index=r0.index, dtype=r0.dtype)
tm.assert_series_equal(r0, expected)
expected = pd.Series(unit, index=r1.index, dtype=r1.dtype)
tm.assert_series_equal(r1, expected)
def assert_stat_op_api(opname, float_frame, float_string_frame,
has_numeric_only=False):
"""
Check that API for operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
float_frame : DataFrame
DataFrame with columns of type float
float_string_frame : DataFrame
DataFrame with both float and string columns
has_numeric_only : bool, default False
Whether the method "opname" has the kwarg "numeric_only"
"""
# make sure works on mixed-type frame
getattr(float_string_frame, opname)(axis=0)
getattr(float_string_frame, opname)(axis=1)
if has_numeric_only:
getattr(float_string_frame, opname)(axis=0, numeric_only=True)
getattr(float_string_frame, opname)(axis=1, numeric_only=True)
getattr(float_frame, opname)(axis=0, numeric_only=False)
getattr(float_frame, opname)(axis=1, numeric_only=False)
def assert_bool_op_calc(opname, alternative, frame, has_skipna=True):
"""
Check that bool operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
alternative : function
Function that opname is tested against; i.e. "frame.opname()" should
equal "alternative(frame)".
frame : DataFrame
The object that the tests are executed on
has_skipna : bool, default True
Whether the method "opname" has the kwarg "skip_na"
"""
f = getattr(frame, opname)
if has_skipna:
def skipna_wrapper(x):
nona = x.dropna().values
return alternative(nona)
def wrapper(x):
return alternative(x.values)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
tm.assert_series_equal(result0, frame.apply(wrapper))
tm.assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False) # HACK: win32
else:
skipna_wrapper = alternative
wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
tm.assert_series_equal(result0, frame.apply(skipna_wrapper))
tm.assert_series_equal(result1, frame.apply(skipna_wrapper, axis=1),
check_dtype=False)
# bad axis
with pytest.raises(ValueError, match='No axis named 2'):
f(axis=2)
# all NA case
if has_skipna:
all_na = frame * np.NaN
r0 = getattr(all_na, opname)(axis=0)
r1 = getattr(all_na, opname)(axis=1)
if opname == 'any':
assert not r0.any()
assert not r1.any()
else:
assert r0.all()
assert r1.all()
def assert_bool_op_api(opname, bool_frame_with_na, float_string_frame,
has_bool_only=False):
"""
Check that API for boolean operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
float_frame : DataFrame
DataFrame with columns of type float
float_string_frame : DataFrame
DataFrame with both float and string columns
has_bool_only : bool, default False
Whether the method "opname" has the kwarg "bool_only"
"""
# make sure op works on mixed-type frame
mixed = float_string_frame
mixed['_bool_'] = np.random.randn(len(mixed)) > 0.5
getattr(mixed, opname)(axis=0)
getattr(mixed, opname)(axis=1)
if has_bool_only:
getattr(mixed, opname)(axis=0, bool_only=True)
getattr(mixed, opname)(axis=1, bool_only=True)
getattr(bool_frame_with_na, opname)(axis=0, bool_only=False)
getattr(bool_frame_with_na, opname)(axis=1, bool_only=False)
class TestDataFrameAnalytics(object):
# ---------------------------------------------------------------------
# Correlation and covariance
@td.skip_if_no_scipy
def test_corr_pearson(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'pearson')
@td.skip_if_no_scipy
def test_corr_kendall(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'kendall')
@td.skip_if_no_scipy
def test_corr_spearman(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'spearman')
def _check_method(self, frame, method='pearson'):
correls = frame.corr(method=method)
expected = frame['A'].corr(frame['C'], method=method)
tm.assert_almost_equal(correls['A']['C'], expected)
@td.skip_if_no_scipy
def test_corr_non_numeric(self, float_frame, float_string_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
# exclude non-numeric types
result = float_string_frame.corr()
expected = float_string_frame.loc[:, ['A', 'B', 'C', 'D']].corr()
tm.assert_frame_equal(result, expected)
@td.skip_if_no_scipy
@pytest.mark.parametrize('meth', ['pearson', 'kendall', 'spearman'])
def test_corr_nooverlap(self, meth):
# nothing in common
df = DataFrame({'A': [1, 1.5, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1.5, 1],
'C': [np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan]})
rs = df.corr(meth)
assert isna(rs.loc['A', 'B'])
assert isna(rs.loc['B', 'A'])
assert rs.loc['A', 'A'] == 1
assert rs.loc['B', 'B'] == 1
assert isna(rs.loc['C', 'C'])
@td.skip_if_no_scipy
@pytest.mark.parametrize('meth', ['pearson', 'spearman'])
def test_corr_constant(self, meth):
# constant --> all NA
df = DataFrame({'A': [1, 1, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1, 1]})
rs = df.corr(meth)
assert isna(rs.values).all()
def test_corr_int(self):
# dtypes other than float64 #1761
df3 = DataFrame({"a": [1, 2, 3, 4], "b": [1, 2, 3, 4]})
df3.cov()
df3.corr()
@td.skip_if_no_scipy
def test_corr_int_and_boolean(self):
# when dtypes of pandas series are different
# then ndarray will have dtype=object,
# so it need to be properly handled
df = DataFrame({"a": [True, False], "b": [1, 0]})
expected = DataFrame(np.ones((2, 2)), index=[
'a', 'b'], columns=['a', 'b'])
for meth in ['pearson', 'kendall', 'spearman']:
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore", RuntimeWarning)
result = df.corr(meth)
tm.assert_frame_equal(result, expected)
def test_corr_cov_independent_index_column(self):
# GH 14617
df = pd.DataFrame(np.random.randn(4 * 10).reshape(10, 4),
columns=list("abcd"))
for method in ['cov', 'corr']:
result = getattr(df, method)()
assert result.index is not result.columns
assert result.index.equals(result.columns)
def test_corr_invalid_method(self):
# GH 22298
df = pd.DataFrame(np.random.normal(size=(10, 2)))
msg = ("method must be either 'pearson', "
"'spearman', 'kendall', or a callable, ")
with pytest.raises(ValueError, match=msg):
df.corr(method="____")
def test_cov(self, float_frame, float_string_frame):
# min_periods no NAs (corner case)
expected = float_frame.cov()
result = float_frame.cov(min_periods=len(float_frame))
tm.assert_frame_equal(expected, result)
result = float_frame.cov(min_periods=len(float_frame) + 1)
assert isna(result.values).all()
# with NAs
frame = float_frame.copy()
frame['A'][:5] = np.nan
frame['B'][5:10] = np.nan
result = float_frame.cov(min_periods=len(float_frame) - 8)
expected = float_frame.cov()
expected.loc['A', 'B'] = np.nan
expected.loc['B', 'A'] = np.nan
# regular
float_frame['A'][:5] = np.nan
float_frame['B'][:10] = np.nan
cov = float_frame.cov()
tm.assert_almost_equal(cov['A']['C'],
float_frame['A'].cov(float_frame['C']))
# exclude non-numeric types
result = float_string_frame.cov()
expected = float_string_frame.loc[:, ['A', 'B', 'C', 'D']].cov()
tm.assert_frame_equal(result, expected)
# Single column frame
df = DataFrame(np.linspace(0.0, 1.0, 10))
result = df.cov()
expected = DataFrame(np.cov(df.values.T).reshape((1, 1)),
index=df.columns, columns=df.columns)
tm.assert_frame_equal(result, expected)
df.loc[0] = np.nan
result = df.cov()
expected = DataFrame(np.cov(df.values[1:].T).reshape((1, 1)),
index=df.columns, columns=df.columns)
tm.assert_frame_equal(result, expected)
def test_corrwith(self, datetime_frame):
a = datetime_frame
noise = Series(np.random.randn(len(a)), index=a.index)
b = datetime_frame.add(noise, axis=0)
# make sure order does not matter
b = b.reindex(columns=b.columns[::-1], index=b.index[::-1][10:])
del b['B']
colcorr = a.corrwith(b, axis=0)
tm.assert_almost_equal(colcorr['A'], a['A'].corr(b['A']))
rowcorr = a.corrwith(b, axis=1)
tm.assert_series_equal(rowcorr, a.T.corrwith(b.T, axis=0))
dropped = a.corrwith(b, axis=0, drop=True)
tm.assert_almost_equal(dropped['A'], a['A'].corr(b['A']))
assert 'B' not in dropped
dropped = a.corrwith(b, axis=1, drop=True)
assert a.index[-1] not in dropped.index
# non time-series data
index = ['a', 'b', 'c', 'd', 'e']
columns = ['one', 'two', 'three', 'four']
df1 = DataFrame(np.random.randn(5, 4), index=index, columns=columns)
df2 = DataFrame(np.random.randn(4, 4),
index=index[:4], columns=columns)
correls = df1.corrwith(df2, axis=1)
for row in index[:4]:
tm.assert_almost_equal(correls[row],
df1.loc[row].corr(df2.loc[row]))
def test_corrwith_with_objects(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame()
cols = ['A', 'B', 'C', 'D']
df1['obj'] = 'foo'
df2['obj'] = 'bar'
result = df1.corrwith(df2)
expected = df1.loc[:, cols].corrwith(df2.loc[:, cols])
tm.assert_series_equal(result, expected)
result = df1.corrwith(df2, axis=1)
expected = df1.loc[:, cols].corrwith(df2.loc[:, cols], axis=1)
tm.assert_series_equal(result, expected)
def test_corrwith_series(self, datetime_frame):
result = datetime_frame.corrwith(datetime_frame['A'])
expected = datetime_frame.apply(datetime_frame['A'].corr)
tm.assert_series_equal(result, expected)
def test_corrwith_matches_corrcoef(self):
df1 = DataFrame(np.arange(10000), columns=['a'])
df2 = DataFrame(np.arange(10000) ** 2, columns=['a'])
c1 = df1.corrwith(df2)['a']
c2 = np.corrcoef(df1['a'], df2['a'])[0][1]
tm.assert_almost_equal(c1, c2)
assert c1 < 1
def test_corrwith_mixed_dtypes(self):
# GH 18570
df = pd.DataFrame({'a': [1, 4, 3, 2], 'b': [4, 6, 7, 3],
'c': ['a', 'b', 'c', 'd']})
s = pd.Series([0, 6, 7, 3])
result = df.corrwith(s)
corrs = [df['a'].corr(s), df['b'].corr(s)]
expected = pd.Series(data=corrs, index=['a', 'b'])
tm.assert_series_equal(result, expected)
def test_corrwith_index_intersection(self):
df1 = pd.DataFrame(np.random.random(size=(10, 2)),
columns=["a", "b"])
df2 = pd.DataFrame(np.random.random(size=(10, 3)),
columns=["a", "b", "c"])
result = df1.corrwith(df2, drop=True).index.sort_values()
expected = df1.columns.intersection(df2.columns).sort_values()
tm.assert_index_equal(result, expected)
def test_corrwith_index_union(self):
df1 = pd.DataFrame(np.random.random(size=(10, 2)),
columns=["a", "b"])
df2 = pd.DataFrame(np.random.random(size=(10, 3)),
columns=["a", "b", "c"])
result = df1.corrwith(df2, drop=False).index.sort_values()
expected = df1.columns.union(df2.columns).sort_values()
tm.assert_index_equal(result, expected)
def test_corrwith_dup_cols(self):
# GH 21925
df1 = pd.DataFrame(np.vstack([np.arange(10)] * 3).T)
df2 = df1.copy()
df2 = pd.concat((df2, df2[0]), axis=1)
result = df1.corrwith(df2)
expected = pd.Series(np.ones(4), index=[0, 0, 1, 2])
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_corrwith_spearman(self):
# GH 21925
df = pd.DataFrame(np.random.random(size=(100, 3)))
result = df.corrwith(df**2, method="spearman")
expected = Series(np.ones(len(result)))
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_corrwith_kendall(self):
# GH 21925
df = pd.DataFrame(np.random.random(size=(100, 3)))
result = df.corrwith(df**2, method="kendall")
expected = Series(np.ones(len(result)))
tm.assert_series_equal(result, expected)
# ---------------------------------------------------------------------
# Describe
def test_bool_describe_in_mixed_frame(self):
df = DataFrame({
'string_data': ['a', 'b', 'c', 'd', 'e'],
'bool_data': [True, True, False, False, False],
'int_data': [10, 20, 30, 40, 50],
})
# Integer data are included in .describe() output,
# Boolean and string data are not.
result = df.describe()
expected = DataFrame({'int_data': [5, 30, df.int_data.std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
tm.assert_frame_equal(result, expected)
# Top value is a boolean value that is False
result = df.describe(include=['bool'])
expected = DataFrame({'bool_data': [5, 2, False, 3]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
def test_describe_bool_frame(self):
# GH 13891
df = pd.DataFrame({
'bool_data_1': [False, False, True, True],
'bool_data_2': [False, True, True, True]
})
result = df.describe()
expected = DataFrame({'bool_data_1': [4, 2, True, 2],
'bool_data_2': [4, 2, True, 3]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
df = pd.DataFrame({
'bool_data': [False, False, True, True, False],
'int_data': [0, 1, 2, 3, 4]
})
result = df.describe()
expected = DataFrame({'int_data': [5, 2, df.int_data.std(), 0, 1,
2, 3, 4]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
tm.assert_frame_equal(result, expected)
df = pd.DataFrame({
'bool_data': [False, False, True, True],
'str_data': ['a', 'b', 'c', 'a']
})
result = df.describe()
expected = DataFrame({'bool_data': [4, 2, True, 2],
'str_data': [4, 3, 'a', 2]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
def test_describe_categorical(self):
df = DataFrame({'value': np.random.randint(0, 10000, 100)})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
cat_labels = Categorical(labels, labels)
df = df.sort_values(by=['value'], ascending=True)
df['value_group'] = pd.cut(df.value, range(0, 10500, 500),
right=False, labels=cat_labels)
cat = df
# Categoricals should not show up together with numerical columns
result = cat.describe()
assert len(result.columns) == 1
# In a frame, describe() for the cat should be the same as for string
# arrays (count, unique, top, freq)
cat = Categorical(["a", "b", "b", "b"], categories=['a', 'b', 'c'],
ordered=True)
s = Series(cat)
result = s.describe()
expected = Series([4, 2, "b", 3],
index=['count', 'unique', 'top', 'freq'])
tm.assert_series_equal(result, expected)
cat = Series(Categorical(["a", "b", "c", "c"]))
df3 = DataFrame({"cat": cat, "s": ["a", "b", "c", "c"]})
result = df3.describe()
tm.assert_numpy_array_equal(result["cat"].values, result["s"].values)
def test_describe_categorical_columns(self):
# GH 11558
columns = pd.CategoricalIndex(['int1', 'int2', 'obj'],
ordered=True, name='XXX')
df = DataFrame({'int1': [10, 20, 30, 40, 50],
'int2': [10, 20, 30, 40, 50],
'obj': ['A', 0, None, 'X', 1]},
columns=columns)
result = df.describe()
exp_columns = pd.CategoricalIndex(['int1', 'int2'],
categories=['int1', 'int2', 'obj'],
ordered=True, name='XXX')
expected = DataFrame({'int1': [5, 30, df.int1.std(),
10, 20, 30, 40, 50],
'int2': [5, 30, df.int2.std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'],
columns=exp_columns)
tm.assert_frame_equal(result, expected)
tm.assert_categorical_equal(result.columns.values,
expected.columns.values)
def test_describe_datetime_columns(self):
columns = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01'],
freq='MS', tz='US/Eastern', name='XXX')
df = DataFrame({0: [10, 20, 30, 40, 50],
1: [10, 20, 30, 40, 50],
2: ['A', 0, None, 'X', 1]})
df.columns = columns
result = df.describe()
exp_columns = pd.DatetimeIndex(['2011-01-01', '2011-02-01'],
freq='MS', tz='US/Eastern', name='XXX')
expected = DataFrame({0: [5, 30, df.iloc[:, 0].std(),
10, 20, 30, 40, 50],
1: [5, 30, df.iloc[:, 1].std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
expected.columns = exp_columns
tm.assert_frame_equal(result, expected)
assert result.columns.freq == 'MS'
assert result.columns.tz == expected.columns.tz
def test_describe_timedelta_values(self):
# GH 6145
t1 = pd.timedelta_range('1 days', freq='D', periods=5)
t2 = pd.timedelta_range('1 hours', freq='H', periods=5)
df = pd.DataFrame({'t1': t1, 't2': t2})
expected = DataFrame({'t1': [5, pd.Timedelta('3 days'),
df.iloc[:, 0].std(),
pd.Timedelta('1 days'),
pd.Timedelta('2 days'),
pd.Timedelta('3 days'),
pd.Timedelta('4 days'),
pd.Timedelta('5 days')],
't2': [5, pd.Timedelta('3 hours'),
df.iloc[:, 1].std(),
pd.Timedelta('1 hours'),
pd.Timedelta('2 hours'),
pd.Timedelta('3 hours'),
pd.Timedelta('4 hours'),
pd.Timedelta('5 hours')]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
result = df.describe()
tm.assert_frame_equal(result, expected)
exp_repr = (" t1 t2\n"
"count 5 5\n"
"mean 3 days 00:00:00 0 days 03:00:00\n"
"std 1 days 13:56:50.394919 0 days 01:34:52.099788\n"
"min 1 days 00:00:00 0 days 01:00:00\n"
"25% 2 days 00:00:00 0 days 02:00:00\n"
"50% 3 days 00:00:00 0 days 03:00:00\n"
"75% 4 days 00:00:00 0 days 04:00:00\n"
"max 5 days 00:00:00 0 days 05:00:00")
assert repr(result) == exp_repr
def test_describe_tz_values(self, tz_naive_fixture):
# GH 21332
tz = tz_naive_fixture
s1 = Series(range(5))
start = Timestamp(2018, 1, 1)
end = Timestamp(2018, 1, 5)
s2 = Series(date_range(start, end, tz=tz))
df = pd.DataFrame({'s1': s1, 's2': s2})
expected = DataFrame({'s1': [5, np.nan, np.nan, np.nan, np.nan, np.nan,
2, 1.581139, 0, 1, 2, 3, 4],
's2': [5, 5, s2.value_counts().index[0], 1,
start.tz_localize(tz),
end.tz_localize(tz), np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan]},
index=['count', 'unique', 'top', 'freq', 'first',
'last', 'mean', 'std', 'min', '25%', '50%',
'75%', 'max']
)
result = df.describe(include='all')
tm.assert_frame_equal(result, expected)
# ---------------------------------------------------------------------
# Reductions
def test_stat_op_api(self, float_frame, float_string_frame):
assert_stat_op_api('count', float_frame, float_string_frame,
has_numeric_only=True)
assert_stat_op_api('sum', float_frame, float_string_frame,
has_numeric_only=True)
assert_stat_op_api('nunique', float_frame, float_string_frame)
assert_stat_op_api('mean', float_frame, float_string_frame)
assert_stat_op_api('product', float_frame, float_string_frame)
assert_stat_op_api('median', float_frame, float_string_frame)
assert_stat_op_api('min', float_frame, float_string_frame)
assert_stat_op_api('max', float_frame, float_string_frame)
assert_stat_op_api('mad', float_frame, float_string_frame)
assert_stat_op_api('var', float_frame, float_string_frame)
assert_stat_op_api('std', float_frame, float_string_frame)
assert_stat_op_api('sem', float_frame, float_string_frame)
assert_stat_op_api('median', float_frame, float_string_frame)
try:
from scipy.stats import skew, kurtosis # noqa:F401
assert_stat_op_api('skew', float_frame, float_string_frame)
assert_stat_op_api('kurt', float_frame, float_string_frame)
except ImportError:
pass
def test_stat_op_calc(self, float_frame_with_na, mixed_float_frame):
def count(s):
return notna(s).sum()
def nunique(s):
return len(algorithms.unique1d(s.dropna()))
def mad(x):
return np.abs(x - x.mean()).mean()
def var(x):
return np.var(x, ddof=1)
def std(x):
return np.std(x, ddof=1)
def sem(x):
return np.std(x, ddof=1) / np.sqrt(len(x))
def skewness(x):
from scipy.stats import skew # noqa:F811
if len(x) < 3:
return np.nan
return skew(x, bias=False)
def kurt(x):
from scipy.stats import kurtosis # noqa:F811
if len(x) < 4:
return np.nan
return kurtosis(x, bias=False)
assert_stat_op_calc('nunique', nunique, float_frame_with_na,
has_skipna=False, check_dtype=False,
check_dates=True)
# mixed types (with upcasting happening)
assert_stat_op_calc('sum', np.sum, mixed_float_frame.astype('float32'),
check_dtype=False, check_less_precise=True)
assert_stat_op_calc('sum', np.sum, float_frame_with_na,
skipna_alternative=np.nansum)
assert_stat_op_calc('mean', np.mean, float_frame_with_na,
check_dates=True)
assert_stat_op_calc('product', np.prod, float_frame_with_na)
assert_stat_op_calc('mad', mad, float_frame_with_na)
assert_stat_op_calc('var', var, float_frame_with_na)
assert_stat_op_calc('std', std, float_frame_with_na)
assert_stat_op_calc('sem', sem, float_frame_with_na)
assert_stat_op_calc('count', count, float_frame_with_na,
has_skipna=False, check_dtype=False,
check_dates=True)
try:
from scipy import skew, kurtosis # noqa:F401
assert_stat_op_calc('skew', skewness, float_frame_with_na)
assert_stat_op_calc('kurt', kurt, float_frame_with_na)
except ImportError:
pass
# TODO: Ensure warning isn't emitted in the first place
@pytest.mark.filterwarnings("ignore:All-NaN:RuntimeWarning")
def test_median(self, float_frame_with_na, int_frame):
def wrapper(x):
if isna(x).any():
return np.nan
return np.median(x)
assert_stat_op_calc('median', wrapper, float_frame_with_na,
check_dates=True)
assert_stat_op_calc('median', wrapper, int_frame, check_dtype=False,
check_dates=True)
@pytest.mark.parametrize('method', ['sum', 'mean', 'prod', 'var',
'std', 'skew', 'min', 'max'])
def test_stat_operators_attempt_obj_array(self, method):
# GH#676
data = {
'a': [-0.00049987540199591344, -0.0016467257772919831,
0.00067695870775883013],
'b': [-0, -0, 0.0],
'c': [0.00031111847529610595, 0.0014902627951905339,
-0.00094099200035979691]
}
df1 = | DataFrame(data, index=['foo', 'bar', 'baz'], dtype='O') | pandas.DataFrame |
import pandas as pd
#import openpyxl
from openpyxl import workbook
from openpyxl import load_workbook
import numpy as np
from scipy.stats import spearmanr
from .general_functions import *
class Abundances():
def __init__(self):
self.abundance_df = pd.DataFrame(index=[], columns=[])
self.corr_matrix = None
self.corr_signature = None
self.sample_names = []
self.header_present = False
self.abundance_raw_df = None
def addMasking(self):
""" merges abundance dataframe and taxonomy dataframe """
self.abundance_df['masked'] = [False]*len(self.abundance_df.index)
self.abundance_df['colour'] = ['undefined']*len(self.abundance_df.index)
def addSample(self, sample_name, filename):
""" adds a sample (as one column) to the dataframes for relative and raw counts"""
tax_levels = None
if len(self.abundance_df.columns) == 0:
self.abundance_df = pd.read_csv(filename, header=0, sep='\t') #krona (no header, no index)
cols = list(self.abundance_df.columns)
self.abundance_df = self.abundance_df[cols[0:2] + cols[:1:-1]]
self.tax_levels = self.abundance_df.columns.tolist()[2:]
self.abundance_df = self.abundance_df[self.abundance_df.columns.tolist()[0:2] + self.tax_levels]
self.abundance_df.rename(columns={self.abundance_df.columns[0]:sample_name}, inplace=True)
self.abundance_df.index = self.abundance_df[self.tax_levels[0]]+'_'
self.abundance_df.index.name = None
self.abundance_raw_df = self.abundance_df.loc[:,[self.abundance_df.columns[1]] + self.tax_levels]
self.abundance_raw_df.rename(columns={self.abundance_raw_df.columns[0]:sample_name}, inplace=True)
self.abundance_raw_df.index = self.abundance_raw_df[self.tax_levels[0]]+'_'
self.abundance_raw_df.index.name = None
self.abundance_df = self.abundance_df.loc[:,[self.abundance_df.columns[0]] + self.tax_levels]
else:
sample_df = pd.read_csv(filename, header=0, sep='\t')
sample_raw_df = sample_df.loc[:,[sample_df.columns[1]]+self.tax_levels]
sample_raw_df.rename(columns={sample_raw_df.columns[0]:sample_name}, inplace=True)
sample_raw_df.index = sample_raw_df[self.tax_levels[0]]+'_'
sample_raw_df.index.name = None
sample_df.rename(columns={sample_df.columns[0]:sample_name}, inplace=True)
sample_df.index = sample_df[self.tax_levels[0]]+'_'
sample_df.index.name = None
self.abundance_df = pd.merge(self.abundance_df, sample_df, how='outer', on=self.tax_levels)
self.abundance_df.index = self.abundance_df[self.tax_levels[0]]+'_'
self.abundance_df.index.name = None
self.abundance_df.fillna(value=0, inplace=True)
self.abundance_raw_df = pd.merge(self.abundance_raw_df, sample_raw_df, how='outer', on=self.tax_levels)
self.abundance_raw_df.index = self.abundance_raw_df[self.tax_levels[0]]+'_'
self.abundance_raw_df.index.name = None
self.abundance_raw_df.fillna(value=0, inplace=True)
self.abundance_df[sample_name] = self.abundance_df[sample_name].astype(float)
self.abundance_raw_df[sample_name] = self.abundance_raw_df[sample_name].astype(float)
self.sample_names.append(sample_name.strip())
self.abundance_df = self.abundance_df[self.sample_names + self.tax_levels]
self.abundance_raw_df = self.abundance_raw_df[self.sample_names + self.tax_levels]
myindex = list(self.abundance_df.index)
newlist = sorted(set([i for i in myindex if myindex.count(i)>1]))
#problems with the ncbi taxonomy (typos?)
for i in newlist:
self.abundance_df.loc[i,self.sample_names] = self.abundance_df.loc[i].sum(numeric_only=True)
self.abundance_df.drop(i, inplace=True)
self.abundance_raw_df.loc[i,self.sample_names] = self.abundance_raw_df.loc[i].sum(numeric_only=True)
self.abundance_raw_df.drop(i, inplace=True)
return self.tax_levels
def addRelSample(self, sample_name, filename):
""" adds a sample (as one column) to the dataframes for relative counts """
tax_levels = None
if len(self.abundance_df.columns) == 0:
self.abundance_df = | pd.read_csv(filename, header=0, sep='\t') | pandas.read_csv |
from argparse import ArgumentParser
import pandas as pd
from fyne import heston
from utils import years_to_expiry
def get_heston_greeks(date, bbo, underlying, discount, vols, params):
_, kappa, theta, nu, rho = params
mid = bbo.mean(axis=1).unstack(['Class', 'Expiry', 'Strike'])
mid.name = 'Mid'
properties = mid.columns.to_frame(index=False)
strikes = discount[properties['Expiry']].values*properties['Strike'].values
expiries = years_to_expiry(date, properties['Expiry'])
put = (properties['Class'] == 'P')
deltas = pd.DataFrame(heston.delta(underlying.values[:, None],
strikes, expiries, vols.values[:, None],
kappa, theta, nu, rho, put),
mid.index, mid.columns).unstack('Time')
vegas = pd.DataFrame(heston.vega(underlying.values[:, None],
strikes, expiries, vols.values[:, None],
kappa, theta, nu, rho),
mid.index, mid.columns).unstack('Time')
return pd.concat([deltas, vegas], keys=['Delta', 'Vega'], axis=1)
if __name__ == '__main__':
cli = ArgumentParser()
cli.add_argument('date')
cli.add_argument('bbo_filename')
cli.add_argument('underlying_filename')
cli.add_argument('discount_filename')
cli.add_argument('params_filename')
cli.add_argument('vols_filename')
cli.add_argument('dest_filename')
args = cli.parse_args()
date = pd.to_datetime(args.date)
bbo = pd.read_parquet(args.bbo_filename)
underlying = pd.read_parquet(args.underlying_filename).mean(axis=1)
discount = | pd.read_parquet(args.discount_filename) | pandas.read_parquet |
import lightgbm as lgb
import pandas as pd
import pytest
import shap
from pyspark.ml.classification import RandomForestClassifier
from pyspark.sql import SparkSession
from sklearn.datasets import make_classification
from shapicant import PandasSelector, SparkSelector, SparkUdfSelector
@pytest.fixture
def data():
return make_classification(
n_samples=1000,
n_features=25,
n_informative=3,
n_redundant=2,
n_repeated=0,
n_classes=3,
n_clusters_per_class=1,
shuffle=False,
random_state=42,
)
def test_pandas_selector(data):
X = pd.DataFrame(data[0])
y = data[1]
model = lgb.LGBMClassifier(
boosting_type="rf", subsample_freq=1, subsample=0.632, n_estimators=100, n_jobs=-1, random_state=42
)
explainer_type = shap.TreeExplainer
selector = PandasSelector(model, explainer_type, n_iter=50, random_state=42)
selector.fit(X, y)
X_selected = selector.transform(X, alpha=0.05)
assert selector.p_values_.between(0, 1).all()
assert X_selected.columns.tolist() == [0, 1, 2, 3, 4]
def test_spark_selector(data):
spark = SparkSession.builder.config("spark.sql.shuffle.partitions", "10").getOrCreate()
sdf = spark.createDataFrame(pd.DataFrame(data[0]).assign(label=data[1]))
model = RandomForestClassifier(featureSubsetStrategy="all", numTrees=20, seed=42)
explainer_type = shap.TreeExplainer
selector = SparkSelector(model, explainer_type, n_iter=10, random_state=42)
selector.fit(sdf, label_col="label")
sdf_selected = selector.transform(sdf, label_col="label", alpha=0.10)
assert selector.p_values_.between(0, 1).all()
assert sdf_selected.columns == ["0", "1", "2", "3", "4", "label"]
def test_spark_udf_selector(data):
spark = SparkSession.builder.config("spark.sql.shuffle.partitions", "10").getOrCreate()
sdf = spark.createDataFrame( | pd.DataFrame(data[0]) | pandas.DataFrame |
import os
# disable tensorflow debugging information
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import warnings
# suppress warnings:
warnings.filterwarnings("ignore")
from deep_utils import tf_set_seed
from utils.utils import save_params
from datetime import datetime
import tensorflow as tf
import numpy as np
from data.load_data import load_data
from models import load_model
from utils.callbacks import get_callbacks
from sklearn.metrics import classification_report, confusion_matrix
from argparse import ArgumentParser
from deep_utils import remove_create
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sn
parser = ArgumentParser()
parser.add_argument('--seed', default=1234, type=int, help="Set random seed for reproducibility")
parser.add_argument('--model-name', default='FFTCustom', help="Choose the model to train. The default is FFTCustom")
parser.add_argument('--data-path', default='./data/DATA.mat', help="Path to the Data. The default is ./data/DATA.mat")
parser.add_argument('--epochs', default=2, type=int, help="Number of training epochs, default is set to 200")
parser.add_argument('--batch-size', default=4, type=int, help="batch size, default is set to 4")
parser.add_argument('--checkpoints', default="./checkpoints", type=str,
help="Path to checkpoints, default is ./checkpoints")
parser.add_argument("--early-stopping", default=100, type=int,
help="early stopping patience epoch number, default is 15")
parser.add_argument("--reduce-lr", default=50, type=int, help="reduce lr patience, default is 10")
parser.add_argument("--dir-name", default='', type=str,
help="directory name of outputs, default is ''. If provided will overwrite existing files")
args = parser.parse_args()
# set seed for reproducibility
tf_set_seed(args.seed)
def main():
# load model
model = load_model(model_name=args.model_name)
print(f"[INFO] Model:{args.model_name} is loaded ...")
model.summary()
# load data
(x_train, y_train), (x_test, y_test) = load_data(model_name=args.model_name,
data_path=args.data_path,
seed=args.seed)
# train the model
print(f"[INFO] Started the training for model: {args.model_name} ...")
if args.dir_name:
dir_ = args.checkpoints + '/' + args.model_name + '/' + args.dir_name
if os.path.exists(dir_):
print(f"[INFO] {dir_} exists, removing it ...")
remove_create(dir_)
else:
os.makedirs(dir_, exist_ok=False)
else:
dir_ = args.checkpoints + '/' + args.model_name + "/" + '_{}'.format(
str(datetime.now()).replace(':', '_').replace(' ', '_'))
os.makedirs(dir_, exist_ok=False)
# save params
save_params(dir_ + "/params.txt", args)
callbacks = get_callbacks(dir_,
early_stopping_p=args.early_stopping,
reduce_lr_patience=args.reduce_lr)
print(f"[INFO] Training with the following arguments {args}")
model.fit(x_train, y_train,
epochs=args.epochs,
batch_size=args.batch_size,
verbose=1,
validation_data=(x_test, y_test),
callbacks=callbacks,
shuffle=False)
print("[INFO] confusion matrix:!")
print("[INFO] Loading best model:")
model = tf.keras.models.load_model(dir_ + '/model_best')
y_pred = np.around(model.predict(x_test))
rep = classification_report(y_test, y_pred)
with open(dir_ + "/classification_report.txt", mode='w') as f:
f.write(rep)
print(rep)
print("[INFO] Computing Confusion matrix")
conf_matrix = confusion_matrix(y_test, y_pred)
df_cm = | pd.DataFrame(conf_matrix, index=["healthy", "schizophrenia"], columns=["healthy", "schizophrenia"]) | pandas.DataFrame |
import warnings
import pandas as pd
from sklearn.base import BaseEstimator, RegressorMixin
from sklearn.utils import check_array, check_X_y
from sklearn.utils.validation import check_is_fitted
class TimeSynchronousDownscaler(BaseEstimator):
def _check_X_y(self, X, y, **kwargs):
if isinstance(X, pd.DataFrame) and isinstance(X, pd.DataFrame):
assert X.index.equals(y.index)
check_X_y(X, y) # this may be inefficient
else:
X, y = check_X_y(X, y)
warnings.warn('X and y do not have pandas DateTimeIndexes, making one up...')
index = pd.date_range(periods=len(X), start='1950', freq='MS')
X = pd.DataFrame(X, index=index)
y = | pd.DataFrame(y, index=index) | pandas.DataFrame |
#!/usr/bin/env python
''' ---------------- About the script ----------------
Assignment 3: Sentiment Analysis
This script calculates sentiment scores of over a million headlines taken from the Australian news source ABC (Start Date: 2003-02-19 ; End Date: 2020-12-31) using the spaCyTextBlob approach, creates and saves two plots of sentiment over time with a 1-week and a 1-month rolling averages. Also, it creates one plot with 1-day, 1-week, 1-month and 1-year rolling averages together for a better comparison.
Example:
$ python sentiment.py
'''
"""---------------- Importing libraries ----------------
"""
# importing libraries
import spacy
import os
import sys
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
sys.path.append(os.path.join(".."))
from spacytextblob.spacytextblob import SpacyTextBlob
# initialising spacy
nlp = spacy.load("en_core_web_sm")
# initialising spaCyTextBlob and adding it as a new component to spaCy nlp pipeline.
spacy_text_blob = SpacyTextBlob()
nlp.add_pipe(spacy_text_blob)
"""---------------- Main script ----------------
"""
def main():
"""------ Reading data and preparation ------
"""
# Defining path to the csv file
in_file = os.path.join("..", "data", "abcnews-date-text", "abcnews-date-text.csv")
# Reading the csv file and saving into a variable
abc_news = | pd.read_csv(in_file) | pandas.read_csv |
'''Report for the entire Project.
Run this report with: `streamlit run 09-1_project-report.py`
This should provide an interactive mechanism to query the recommender system.
'''
import streamlit as st
import pandas as pd
import numpy as np
import sys
sys.path.insert(1, '..')
import recommender as rcmd
from recommender.contrib import fmp_api as fmp
import sklearn_recommender as skr
import tensorflow as tf
from sklearn.metrics.pairwise import cosine_similarity
# -- Setup --
st.title("Stock Recommender System")
@st.cache
def load_data():
'''Load all data and setup the system.'''
# retrieve all relevant symbols
stocks = fmp.profile.list_symbols()
cache = rcmd.stocks.Cache()
# load the relevant profile informations
df_profile = cache.load_profile_data()
# generate glove embeddings
skr.glove.download('twitter')
gt = skr.glove.GloVeTransformer('twitter', 25, 'sent', tokenizer=skr.nlp.tokenize_clean)
embs = gt.transform(df_profile['description'].fillna(""))
df_embs = pd.concat([df_profile[['symbol']], | pd.DataFrame(embs) | pandas.DataFrame |
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from sklearn.metrics import confusion_matrix
from mpl_toolkits.mplot3d import Axes3D
plt.rc('font', family='serif')
class Plots():
def boxcar(data):
f, (ax) = plt.subplots(1, 1, figsize=(12, 4))
f.suptitle('Boxcar', fontsize=14)
sns.boxplot(x=data.columns[1], y=data.columns[0], data=data, ax=ax)
def Pairplot(X,y):
pp = sns.pairplot(X, size=1.5, aspect=1.5,
plot_kws=dict(edgecolor="k", linewidth=0.5),
diag_kind="kde", diag_kws=dict(shade=True),hue=y)
plt.show()
def Corrmatrix(self,X):
f, ax = plt.subplots(figsize=(10, 6))
corr = X.corr()
hm = sns.heatmap(round(corr,2), annot=True, ax=ax, cmap="coolwarm",fmt='.2f',
linewidths=.05)
f.subplots_adjust(top=0.93)
t= f.suptitle('Log Attributes Correlation Heatmap', fontsize=14)
plt.show()
def Frequencies(X):
title = fig.suptitle("Frequencies", fontsize=14)
fig.subplots_adjust(top=0.85, wspace=0.3)
ax = fig.add_subplot(1,1, 1)
ax.set_xlabel("")
ax.set_ylabel("Frequency")
ax.tick_params(axis='both', which='major', labelsize=8.5)
sns.kdeplot(X, color='steelblue', ax=ax,shade=True)
plt.show()
def Barplot(X):
fig = plt.figure(figsize = (6, 4))
fig.suptitle("", fontsize=14)
sns.barplot(data=X,palette=sns.xkcd_palette(['windows blue']))
def confusion(y,yp,n_classes):
confusion_1 = confusion_matrix(y, yp)
confusion_1.dtype=float
for i in range(0,n_classes,1):
confusion_1[i,:]=confusion_1[i,:]/sum(confusion_1[i,:])
confusion_1= | pd.DataFrame(confusion_1,columns=['Shale','Brine Sands','Gas Sands'], index=['Shale',' Brine Sands','Gas Sands'] ) | pandas.DataFrame |
"""
Tests for character matrix formation.
"""
import unittest
import numpy as np
import pandas as pd
import cassiopeia as cas
class TestCharacterMatrixFormation(unittest.TestCase):
def setUp(self):
at_dict = {
"cellBC": ["cellA", "cellA", "cellA", "cellB", "cellC"],
"intBC": ["A", "B", "C", "A", "C"],
"r1": ["None", "ATC", "GGG", "None", "GAA"],
"r2": ["None", "AAA", "GAA", "None", "GAA"],
"r3": ["ATC", "TTT", "ATA", "ATA", "ATA"],
"UMI": [5, 10, 1, 30, 30],
}
self.alleletable_basic = pd.DataFrame.from_dict(at_dict)
self.mutation_priors = pd.DataFrame.from_dict(
{
"ATC": 0.5,
"GGG": 0.2,
"GAA": 0.1,
"AAA": 0.05,
"TTT": 0.05,
"ATA": 0.1,
},
orient="index",
columns=["freq"],
)
## setup complicated allele table
at_dict = {
"cellBC": [
"cellA",
"cellA",
"cellA",
"cellB",
"cellB",
"cellC",
"cellD",
"cellD",
],
"intBC": ["A", "B", "C", "B", "C", "A", "A", "B"],
"r1": ["AAA", "AAB", "AAC", "AAD", "ABA", "ABB", "AAA", "AAB"],
"r2": ["BAA", "BAB", "BAC", "BAD", "BBA", "BBB", "BAA", "BAB"],
"r3": ["CAA", "CAB", "CAC", "CAD", "CBA", "CBB", "CAA", "CAB"],
"UMI": [5, 10, 30, 30, 10, 10, 3, 3],
"Mouse": ["M1", "M1", "M1", "M1", "M1", "M1", "M2", "M2"],
}
self.allele_table_mouse = pd.DataFrame(at_dict)
## set up non-cassiopeia allele table
self.noncassiopeia_alleletable = self.alleletable_basic.copy()
self.noncassiopeia_alleletable.rename(
columns={"r1": "cs1", "r2": "cs2", "r3": "cs3"}, inplace=True
)
# allele table with conflicts
at_dict = {
"cellBC": ["cellA", "cellA", "cellA", "cellB", "cellC", "cellA"],
"intBC": ["A", "B", "C", "A", "C", "A"],
"r1": ["None", "ATC", "GGG", "None", "GAA", "None"],
"r2": ["None", "AAA", "GAA", "None", "GAA", "ACT"],
"r3": ["ATC", "TTT", "ATA", "ATA", "ATA", "None"],
"UMI": [5, 10, 1, 30, 30, 5],
}
self.alleletable_conflict = pd.DataFrame.from_dict(at_dict)
def test_basic_character_matrix_formation(self):
(
character_matrix,
priors,
indel_states,
) = cas.pp.convert_alleletable_to_character_matrix(
self.alleletable_basic
)
self.assertEqual(character_matrix.shape[0], 3)
self.assertEqual(character_matrix.shape[1], 9)
expected_df = pd.DataFrame.from_dict(
{
"cellA": [0, 0, 1, 1, 1, 1, 1, 1, 1],
"cellB": [0, 0, 2, -1, -1, -1, -1, -1, -1],
"cellC": [-1, -1, -1, -1, -1, -1, 2, 1, 1],
},
orient="index",
columns=[f"r{i}" for i in range(1, 10)],
)
pd.testing.assert_frame_equal(character_matrix, expected_df)
def test_character_matrix_formation_custom_missing_data(self):
self.alleletable_basic.loc[0, "r1"] = "missing"
(
character_matrix,
priors,
indel_states,
) = cas.pp.convert_alleletable_to_character_matrix(
self.alleletable_basic,
missing_data_allele="missing",
missing_data_state=-3,
)
self.assertEqual(character_matrix.shape[0], 3)
self.assertEqual(character_matrix.shape[1], 9)
expected_df = pd.DataFrame.from_dict(
{
"cellA": [-3, 0, 1, 1, 1, 1, 1, 1, 1],
"cellB": [0, 0, 2, -3, -3, -3, -3, -3, -3],
"cellC": [-3, -3, -3, -3, -3, -3, 2, 1, 1],
},
orient="index",
columns=[f"r{i}" for i in range(1, 10)],
)
pd.testing.assert_frame_equal(character_matrix, expected_df)
def test_character_matrix_formation_with_conflicts(self):
(
character_matrix,
priors,
indel_states,
) = cas.pp.convert_alleletable_to_character_matrix(
self.alleletable_conflict
)
self.assertEqual(character_matrix.shape[0], 3)
self.assertEqual(character_matrix.shape[1], 9)
expected_df = pd.DataFrame.from_dict(
{
"cellA": [0, (0, 1), (0, 1), 1, 1, 1, 1, 1, 1],
"cellB": [0, 0, 2, -1, -1, -1, -1, -1, -1],
"cellC": [-1, -1, -1, -1, -1, -1, 2, 1, 1],
},
orient="index",
columns=[f"r{i}" for i in range(1, 10)],
)
pd.testing.assert_frame_equal(character_matrix, expected_df)
def test_character_matrix_formation_with_conflicts_no_collapse(self):
(
character_matrix,
priors,
indel_states,
) = cas.pp.convert_alleletable_to_character_matrix(
self.alleletable_conflict, collapse_duplicates=False
)
self.assertEqual(character_matrix.shape[0], 3)
self.assertEqual(character_matrix.shape[1], 9)
expected_df = pd.DataFrame.from_dict(
{
"cellA": [(0, 0), (0, 1), (1, 0), 1, 1, 1, 1, 1, 1],
"cellB": [0, 0, 2, -1, -1, -1, -1, -1, -1],
"cellC": [-1, -1, -1, -1, -1, -1, 2, 1, 1],
},
orient="index",
columns=[f"r{i}" for i in range(1, 10)],
)
pd.testing.assert_frame_equal(character_matrix, expected_df)
def test_ignore_intbc(self):
(
character_matrix,
priors,
indel_states,
) = cas.pp.convert_alleletable_to_character_matrix(
self.alleletable_basic, ignore_intbcs=["B"]
)
self.assertEqual(character_matrix.shape[0], 3)
self.assertEqual(character_matrix.shape[1], 6)
expected_df = pd.DataFrame.from_dict(
{
"cellA": [0, 0, 1, 1, 1, 1],
"cellB": [0, 0, 2, -1, -1, -1],
"cellC": [-1, -1, -1, 2, 1, 1],
},
orient="index",
columns=[f"r{i}" for i in range(1, 7)],
)
pd.testing.assert_frame_equal(character_matrix, expected_df)
def test_filter_out_low_diversity_intbcs(self):
(
character_matrix,
priors,
indel_states,
) = cas.pp.convert_alleletable_to_character_matrix(
self.alleletable_basic, allele_rep_thresh=0.99
)
self.assertEqual(character_matrix.shape[0], 3)
self.assertEqual(character_matrix.shape[1], 2)
expected_df = pd.DataFrame.from_dict(
{"cellA": [1, 1], "cellB": [2, -1], "cellC": [-1, 2]},
orient="index",
columns=[f"r{i}" for i in range(1, 3)],
)
| pd.testing.assert_frame_equal(character_matrix, expected_df) | pandas.testing.assert_frame_equal |
#!/usr/bin/env python3
#
# - import a csv table of score files (and possibly edf files)
# - strip out spaces in column names
# - consolidate into trial datablocks (with consensus)
# TODO: use relative paths in csv?
#======================================
import pdb
import os
import argparse
import pandas as pd
import numpy as np
import scoreblock as sb
raise Exception('deprecated, replaced by scoreloader.py')
#==============================================================================
pp = argparse.ArgumentParser()
pp.add_argument('-c', required=True, default=None, type=str, help='csv table of score files')
pp.add_argument('--dest', type=str, default='ANL-load-scores', help='output folder')
args = pp.parse_args()
os.makedirs(args.dest, exist_ok=True)
# import table of score files
df = pd.read_csv(args.c, index_col=0)
# for the case for a csv with 'humanScores' and 'edf' files stacked together
if 'filetype' in df.columns:
df = df[df['filetype'] == 'humanScores']
# load scores for each trial/day/scorer
ydata = []
for i, row in df.iterrows():
print(row['trial'], row['genotype'], row['day'], row['scorer'])
dfi = pd.read_csv(row['file'])
dfi.columns = [col.replace(" ","") for col in list(dfi.columns)]
ydata.append(dfi['Score'].values)
# combine all score vectors into a single stacked dataframe
ydata = np.asarray(ydata)
index_cols = ['trial', 'genotype', 'day', 'scorer']
data_cols = ['Epoch-%5.5i' % (i+1) for i in range(ydata.shape[1])]
df_data = pd.DataFrame(ydata, columns=data_cols)
df_index = df[index_cols]
df_scores = | pd.concat([df_index, df_data], axis=1) | pandas.concat |
import numpy as np
import pandas as pd
from pathlib import Path
import bw2data as bd
import bw_processing as bwp
from fs.zipfs import ZipFS
from consumption_model_ch.utils import get_habe_filepath
# Local files
from .sensitivity_analysis import get_mask
DATA_DIR = Path(__file__).parent.resolve() / "data"
KONSUMGUETER_DICT = {
"anzahlneuwagen11": "cg_nonewcars",
"anzahlgebrauchtwagen11": "cg_nousedcars",
"anzahlmotorraeder11": "cg_nomotorbikes",
"anzahlfahrraeder11": "cg_nobicycles",
"anzahltiefkuehler11": "cg_nofreezers",
"anzahlgeschirrspueler11": "cg_nodishwashers",
"anzahlwaschmaschinen11": "cg_nowashmachines",
"anzahlwaeschetrockner11": "cg_nodriers",
"anzahlroehrenfernseher11": "cg_nocrttvs",
"anzahllcdfernseher11": "cg_nolcdtvs",
"anzahlparabolantennen11": "cg_nosat",
"anzahlvideokameras11": "cg_nocams",
"anzahlvideorecorder11": "cg_novideorecs",
"anzahlspielkonsolen11": "cg_novieogames",
"anzahldesktopcomputer11": "cg_nodesktoppcs",
"anzahllaptopcomputer11": "cg_nolaptops",
"anzahldrucker11": "cg_noprinters",
"anzahlmobiltelefone11": "cg_nomobilephones",
"anzahlmp3player11": "cg_nomp3players",
"anzahlgpsgeraete11": "cg_nogps",
}
def get_household_data(indices, co_name="swiss consumption 1.0"):
# 1. Get some metadata from the consumption database
co = bd.Database(co_name)
year_habe = co.metadata['year_habe']
dir_habe = co.metadata['dir_habe']
# 2. Extract total demand from HABE
path_ausgaben = get_habe_filepath(dir_habe, year_habe, 'Ausgaben')
path_mengen = get_habe_filepath(dir_habe, year_habe, 'Mengen')
path_konsumgueter = get_habe_filepath(dir_habe, year_habe, 'Konsumgueter')
# change codes to be consistent with consumption database and Andi's codes
ausgaben = | pd.read_csv(path_ausgaben, sep='\t') | pandas.read_csv |
def getMetroStatus():
import http.client, urllib.request, urllib.parse, urllib.error, base64, time
headers = {
# Request headers
'api_key': '6b700f7ea9db408e9745c207da7ca827',}
params = urllib.parse.urlencode({})
try:
conn = http.client.HTTPSConnection('api.wmata.com')
conn.request("GET", "/StationPrediction.svc/json/GetPrediction/All?%s" % params, "{body}", headers)
response = conn.getresponse()
data = response.read()
return str(data) #returns the data as a string rather than raw bytes
conn.close()
except Exception as e:
print("[Errno {0}] {1}".format(e.errno, e.strerror))
def JSONfromMetro(trainString): #converts the string into a dictionary file
import json, re
fixSlash=re.compile(r'\\') #this line and the next remove triple-slashes, which screw up the json module
fixedTrainString=fixSlash.sub('',trainString)
trainJSON=json.loads(fixedTrainString[2:-2]+"}") #slightly adjusts the string to put it in json form
if isinstance(trainJSON,dict) and 'Trains' in trainJSON.keys():
return trainJSON['Trains']
else:
return None
def saveWMATASQL(trainData, engine): #saves the current WMATA data to open engine
import datetime, pandas as pd
#the line below creates a table name starting with WMATA and then containing the date and time information, with each day/hour/minute/second taking two characters
if not isinstance(trainData, list):
return None
DTstring=str(datetime.datetime.now().month)+str(datetime.datetime.now().day).rjust(2,'0')+str(datetime.datetime.now().hour).rjust(2,'0')+str(datetime.datetime.now().minute).rjust(2,'0')+str(datetime.datetime.now().second).rjust(2,'0')
trainFrame=pd.DataFrame('-', index=range(len(trainData)), columns=['DT','Car','Loc','Lin','Des','Min','Gro']) #creates trainFrame, the DataFrame to send to the SQL server
for iter in range(len(trainData)): #for all the trains in trainData
trainFrame.loc[iter]['DT']=DTstring
for colName in ['Car','LocationCode','Line','DestinationCode','Min','Group']: #select the six relevant fields
trainFrame.loc[iter][colName[:3]]=trainData[iter][colName] #and fill in the relevant data
trainFrame.to_sql('WMATAFull', engine, if_exists='append') #send trainFrame to the SQL server
return trainFrame
def lineNextDF(line, destList, arrData):
import pandas as pd
timeString=arrData.DT.iloc[0]
rowName=pd.to_datetime('2016-'+timeString[0]+'-'+timeString[1:3]+' '+timeString[3:5]+':'+timeString[5:7]+':'+timeString[7:])
# names the row as a timestamp with the month day hour minute second
lineStat=pd.DataFrame('-',index=[rowName],columns=line)
for station in line: #repeat the below process for every station on the line
trains2consider=arrData.loc[lambda df: df.Loc==station].loc[lambda df: df.Des.isin(destList)] #pull out the trains at that station heading toward the destinations
if len(trains2consider.index)>0: #If you found a train
if trains2consider.Des.iloc[0] in ['A11','B08','E01','K04']: #the next few lines set the station status to the color and ETA of the first arriving train
lineStat.loc[rowName,station]=trains2consider.Lin.iloc[0].lower()+':'+trains2consider.Min.iloc[0] #if the train is terminating early (at Grovesnor, Silver Spring or Mt Vernon), use lowercase
elif trains2consider.Des.iloc[0]=='E06':
lineStat.loc[rowName,station]='Yl:'+trains2consider.Min.iloc[0]
elif trains2consider.Des.iloc[0]=='A13':
lineStat.loc[rowName,station]='Rd:'+trains2consider.Min.iloc[0]
else:
lineStat.loc[rowName,station]=trains2consider.Lin.iloc[0]+':'+trains2consider.Min.iloc[0] #otherwise use upper
return lineStat
def allLNtoNE(arrData, surgeNum): #all of the lines to the North and East during Surge 4
import pandas as pd
LNlist=[]
for num in range(len(lineList[surgeNum])):
LNlist.append(lineNextDF(lineList[surgeNum][num], NEdestList[surgeNum][num], arrData)) #run for each line and destination
return pd.concat(LNlist, axis=1, join='outer') #then join them all together
def allLNtoSW(arrData, surgeNum): #all of the lines to the South and West during Surge 4
import pandas as pd
LNlist=[]
for num in range(1,1+len(lineList[surgeNum])):
LNlist.append(lineNextDF(lineList[surgeNum][-num][::-1], SWdestList[surgeNum][-num][::-1], arrData)) #run for each line and destination
return pd.concat(LNlist, axis=1, join='outer') #then join them all together
def WMATAtableSQL(timeMin,intervalSec, surgeNum): #records for timeMin minutes, about ever intervalSec seconds
import time, pandas as pd
from sqlalchemy import create_engine
engine = create_engine('postgresql+psycopg2://Original:tolistbtGU!@team<EMAIL>:5432/WmataData') #opens the engine to WmataData
#creates a list of the table we're creating to add to the index
isStart=True
startTime=time.time()
while time.time()<(startTime+60*timeMin): #runs for timeMin minutes
stepStart=time.time()
WMATAdf=saveWMATASQL(JSONfromMetro(getMetroStatus()),engine) #save the current train data and appends the name to tableList
if isinstance(WMATAdf,pd.DataFrame) and len(WMATAdf.index)>0: #if you got data back
if isStart: #and it's the first row
allLN2NE=allLNtoNE(WMATAdf,surgeNum) #set allLNtoNE equal to the all LineNext to NE data
allLN2SW=allLNtoSW(WMATAdf,surgeNum) #set allLNtoSW equal to the all LineNext to SW data
isStart=False #and the next row will not be the first row
else: #for other rows
allLN2NE=allLN2NE.append(allLNtoNE(WMATAdf,surgeNum)) #append the data
allLN2SW=allLN2SW.append(allLNtoSW(WMATAdf,surgeNum))
stepTime=time.time()-stepStart #calculates the time this step took
if stepTime<intervalSec: #if intervalSec seconds have not passed,
time.sleep(intervalSec-stepTime) #wait until a total of intervalSec have passed
engine.connect().close()
return [allLN2NE, allLN2SW]
def lineNextSQL(line, timeString,destList, engine): #reads the next train to arrive at the stations in line heading toward destList and returns it as a Data Frame
import pandas as pd
from sqlalchemy import create_engine
isEngineNone=(engine is None)
if isEngineNone: #if there's not an engine, make one
engine = create_engine('postgresql+psycopg2://Original:tolistbtGU!@teamoriginal.ccc95gjlnnnc.us-east-1.rds.amazonaws.com:5432/WmataData')
query='SELECT * FROM "WMATAFull" WHERE "DT"='+"'"+timeString+"';"
arrData=pd.read_sql(query,engine)
if isEngineNone:
engine.connect().close()
return lineNextDF(line, destList, arrData)
def lineNextTableSQL(line, firstTime, lastTime, destList): #saves the next train arrivals for a line and destList over time
import time, pandas as pd
from sqlalchemy import create_engine
print(time.strftime("%a, %d %b %Y %H:%M:%S"))
engine = create_engine('postgresql+psycopg2://Original:tolistbtGU!<EMAIL>:5432/WmataData')
query='SELECT * FROM "WMATAFull" WHERE "DT">='+"'"+firstTime+"' AND "+'"DT"<='+"'"+lastTime+"';"
arrData=pd.read_sql(query,engine)
print(time.strftime("%a, %d %b %Y %H:%M:%S"))
if len(arrData.index)==0:
return None
timesPD=arrData.DT.value_counts().sort_index().index #pull out each time and call it timesPD
lineStats=lineNextDF(line, destList, arrData.loc[lambda df: df.DT==timesPD[0]]) #save the first status
for num in range(1,len(timesPD)): #for each time
lineStats=lineStats.append(lineNextDF(line, destList, arrData.loc[lambda df: df.DT==timesPD[num]])) #add the data for that time
engine.connect().close()
print(time.strftime("%a, %d %b %Y %H:%M:%S"))
return lineStats
def allLNtoNEtable(firstTime, lastTime, surgeNum): #saves the next train arrivals for a line and destList over time
import pandas as pd
from sqlalchemy import create_engine
engine = create_engine('postgresql+psycopg2://Original:tolistbtGU!@<EMAIL>:5432/WmataData')
query='SELECT * FROM "WMATAFull" WHERE "DT">='+"'"+firstTime+"' AND "+'"DT"<='+"'"+lastTime+"';"
arrData=pd.read_sql(query,engine)
if len(arrData.index)==0: #if you didn't get any data,
return None #return nothing
timesPD=arrData.DT.value_counts().sort_index().index #pull out each time and call it timesPD
lineStats=allLNtoNE(arrData.loc[lambda df: df.DT==timesPD[0]],surgeNum) #save the first status
for num in range(1,len(timesPD)): #for each time
lineStats=lineStats.append(allLNtoNE(arrData.loc[lambda df: df.DT==timesPD[num]],surgeNum)) #add the data for that time
engine.connect().close()
return lineStats
def allLNtoSWtable(firstTime, lastTime, surgeNum): #saves the next train arrivals for a line and destList over time
import pandas as pd
from sqlalchemy import create_engine
engine = create_engine('postgresql+psycopg2://Original:tolistbtGU!@teamoriginal.ccc95gjlnnnc.us-east-1.rds.amazonaws.com:5432/WmataData')
query='SELECT * FROM "WMATAFull" WHERE "DT">='+"'"+firstTime+"' AND "+'"DT"<='+"'"+lastTime+"';"
arrData=pd.read_sql(query,engine)
if len(arrData.index)==0: #if you didn't get any data,
return None #return nothing
timesPD=arrData.DT.value_counts().sort_index().index #pull out each time and call it timesPD
lineStats=allLNtoSW(arrData.loc[lambda df: df.DT==timesPD[0]],surgeNum) #save the first status
for num in range(1,len(timesPD)): #for each time
lineStats=lineStats.append(allLNtoSW(arrData.loc[lambda df: df.DT==timesPD[num]],surgeNum)) #add the data for that time
engine.connect().close()
return lineStats
def trainBuild(lineStat,startTime): #determines how long it took the train arriving after startTime to reach every station and returns it as one row data frame
import pandas as pd
timeRow=list(lineStat.index).index(startTime) #finds the row number from lineStat labeled startTime and calls it timeRow
specTrain=pd.concat([pd.DataFrame('-',index=[startTime],columns=['Col']),pd.DataFrame(0,index=[startTime],columns=list(lineStat.columns))], axis=1, join='outer')
while timeRow<len(lineStat.index)-1 and (not isinstance(lineStat.iloc[timeRow][0], str) or len(lineStat.iloc[timeRow][0])<6 or lineStat.iloc[timeRow][0][-3:]!='BRD'): #while timeRow is in bounds and no train is boarding,
timeRow+=1 #go to the next line
skipRows=timeRow-list(lineStat.index).index(startTime) #skipRows is the number of rows to skip the next time it looks for a train
if timeRow>=len(lineStat.index): #if you get to the end,
return [specTrain, skipRows] #just return what you have
specTrain.loc[startTime,'Col']=lineStat.iloc[timeRow][0][:2] #fills in the color, which is stored as the first two letters in the status
timeDif=lineStat.index[timeRow]-startTime #set timeDif to the diffence between arrival at this station and startTime
specTrain.loc[startTime,lineStat.columns[0]]=timeDif.seconds #store timeDif as seconds
for stationNum in range(1,len(lineStat.columns)): #this fills in the difference arrival time for every station
isTrainBoarding=False
while timeRow<(len(lineStat.index)-1) and not isTrainBoarding: #while timeRow is in bounds and the train is not boarding
#The line below says that a train is boarding if either it has status "BRD" or it has status "ARR" and 20 seconds later the station is waiting for a different train
isTrainBoarding=lineStat.iloc[timeRow][stationNum]==(specTrain.loc[startTime,'Col']+":BRD") or (lineStat.iloc[timeRow][stationNum]==(specTrain.loc[startTime,'Col']+":ARR") and (lineStat.iloc[timeRow+1][stationNum][:2]!=specTrain.loc[startTime,'Col']))
timeRow+=1 #go to the next line
if timeRow>=len(lineStat.index)-1: #if you get to the end,
return [specTrain, skipRows] #just return what you have
timeDif=lineStat.index[timeRow]-startTime #set timeDif to the diffence between arrival at this station and startTime
specTrain.loc[startTime,lineStat.columns[stationNum]]=timeDif.seconds #store timeDif as seconds (converted into a string)
if stationNum<len(lineStat.columns)-1: #if you found a trains, go down a certain number of rows before checking the next station
if lineStat.columns[stationNum] in minDist.keys() and lineStat.columns[stationNum+1] in minDist.keys(): #if both stations are in minDist
timeRow+=minDist[lineStat.columns[stationNum]][lineStat.columns[stationNum+1]]['weight'] #go down the number of rows recorded in minDist
else:
timeRow+=2 #if the connection isn't in minDist, go down two rows
if (specTrain.loc[startTime,'Col'].islower() and lineStat.columns[stationNum] in ['A11','B08','E01','K04']) or (specTrain.loc[startTime,'Col']=='Yl' and lineStat.columns[stationNum]=='E05') or (specTrain.loc[startTime,'Col']=='Rd' and lineStat.columns[stationNum]=='A13'):
break
return [specTrain, skipRows]
def trainTable(lineStat): #returns a table listing the trains by start time, color and the time they took to reach a given station
import pandas as pd
[masterTable,rowNum]=trainBuild(lineStat,lineStat.index[0]) #builds the first row and lets it now how many rows to go forward to get to the next train arrival
currentColor=masterTable.iloc[0][0] #record the color of the first train as currentColor
newTrain=masterTable #newTrain just needs to be something for when it's referenced in the if statement
while rowNum<len(lineStat.index):# and newTrain.iloc[0][-1]!=0: #keep going as long as there's data to analyze and each train gets to the end
while rowNum<len(lineStat.index)-1 and lineStat.iloc[rowNum][0]==currentColor+':BRD': #while the train (with currentColor) is boarding,
rowNum+=1 #go to the next row
[newTrain, skipRows]=trainBuild(lineStat,lineStat.index[rowNum]) #once you've gotten to a new train arrival, record it as newTrain and note the rows to skip
masterTable=masterTable.append(newTrain) #append newTrain to the masterTable
currentColor=masterTable.iloc[-1][0] #xchange currentColor to the color of the train that just boarded
rowNum+=skipRows+1 #skip ahead to the next train
return masterTable
def lastBRDtime(newTrainBRDtime, lineStat, stationNum): #finds the last time a train boarded at a given station before newTrainBRDtime
import pandas as pd
timeRow=list(lineStat.index).index(newTrainBRDtime)-2 #start with a time two rows before the train reaches the station
isTrainBoarding=False # the next few lines just say keep moving backwards in time until you get to a train board
while timeRow>0 and not isTrainBoarding: #if you haven't hit the beginning and a train isn't boarding
isTrainBoarding=isinstance(lineStat.iloc[timeRow,stationNum], str) and len(lineStat.iloc[timeRow, stationNum])==6 and lineStat.iloc[timeRow,stationNum][-3:]=='BRD' #a train is boarding if it's a string of length 6 with BRD as the last three letters
timeRow-=1
return lineStat.index[timeRow] #return that time
def trainTableIntermediate(lineStat, stationList): #returns a table listing the trains by start time, color and the time they took to reach a given station, with the possibility that a train started at an intermediary station
import pandas as pd
staNumList=[]
for station in stationList: #turn the list of stations into a list of numbers corresponding to the stations' location in lineStat's columns
staNumList.append(list(lineStat.columns).index(station))
[masterTable,rowNum]=trainBuild(lineStat,lineStat.index[0]) #builds the first row and lets it now how many rows to go forward to get to the next train arrival
currentColor=masterTable.iloc[0][0] #record the color of the first train as currentColor
newTrain=masterTable #newTrain just needs to be something for when it's referenced in the if statement
while rowNum<len(lineStat.index):# and newTrain.iloc[0][-1]!=0: #keep going as long as there's data to analyze and each train gets to the end
while rowNum<len(lineStat.index)-1 and lineStat.iloc[rowNum,0]==currentColor+':BRD': #while the train (with currentColor) is boarding,
rowNum+=1 #go to the next row
[newTrain, skipRows]=trainBuild(lineStat,lineStat.index[rowNum]) #once you've gotten to a new train arrival, record it as newTrain and note the rows to skip
for staNum in staNumList: #for all the intermediary stations in stationList
mostRecentBRDtime=lastBRDtime(newTrain.index[0]+pd.to_timedelta(newTrain.iloc[0,staNum],unit='s'), lineStat, staNum) #find the last train to board at this station
if mostRecentBRDtime>=masterTable.index[-1]+pd.to_timedelta(masterTable.iloc[-1,staNum]+42,unit='s'): #if that train left more than 42 seconds after the last train in the table
intermedTrain=trainBuild(lineStat.iloc[:,staNum:],mostRecentBRDtime)[0] #it's a different train, so figure out how long it took to arrive
for colNum in range(staNum): #for all the stations before the intermediary station,
intermedTrain.insert(colNum+1,lineStat.columns[colNum],0) #insert a column with value 0 and the correct station
masterTable=masterTable.append(intermedTrain) #append the intermediary train to masterTable
break
masterTable=masterTable.append(newTrain) #append newTrain to the masterTable
currentColor=newTrain.iloc[0,0] #change currentColor to the color of the train that just boarded
rowNum+=skipRows+1 #skip ahead to the next train
masterTable.index+=pd.to_timedelta(masterTable.iloc[:,staNum],unit='s') #normalize the index so that the time in each row is when the train arrived at the first station to include all trains, i.e., the last intermediary station
return masterTable
def trainTableMerge(innerTrainTable, outerTrainTable): #this function merges two sets of train tables, where all the stations in innerTrainTable are also in outerTrainTable
import pandas as pd
centralTrainTable=innerTrainTable.loc[lambda df: df.Col>'ZZ'] #only worry about the trains that are lowercase
numLeftStations=list(outerTrainTable.columns).index(centralTrainTable.columns[1]) #numLeftStations is the number of stations in outerTrainTable before the first one in innerTrainTable
numRightStations=len(outerTrainTable.columns)-list(outerTrainTable.columns).index(centralTrainTable.columns[-1])-1 #numRightStations is the number of stations in outerTrainTable after the last one in innerTrainTable
if numLeftStations>1:
for staNum in range(numLeftStations-1): #insert the left stations from outerTrainTable with 0 as the value
centralTrainTable.insert(1+staNum,outerTrainTable.columns[1+staNum],0)
if numRightStations>0:
for staNum in range(-numRightStations,0): #insert the right stations from outerTrainTable with 0 as the value
centralTrainTable.insert(len(centralTrainTable.columns),outerTrainTable.columns[staNum],0)
newTrainTable=pd.concat([outerTrainTable,centralTrainTable]) #join the two tables together
newTrainTable.index+=pd.to_timedelta(newTrainTable.iloc[:,numLeftStations],unit='s') #normalize the index so that the time in each row is when the train arrived at the first station to include all trains, i.e., the last intermediary station
return newTrainTable.sort_index() #return the combined table sorted by when the trains arrived at the first station in innerTrainTable
def allTrainsNE(allLN2NE, surgeNum): #returns all trains heading toward the North and East (Glenmont, Greenbelt, New Carrollton, Largo) as a dictioanry of panda dataframes
GRtrains=trainTable(allLN2NE.loc[:, sGLine+cGYLine+nGYEnd]) #produces all green line trains
if surgeNum in [3,4]: #if it's surge 3 or 4
YLtrains=trainTable(allLN2NE.loc[:, ['C07']+cGYLine+nGYEnd]).loc[lambda df: df.Col.isin(['YL','yl'])] #return yellow line trains starting at the Pentagon
BLtrains=trainTable(allLN2NE.loc[:, ['C07']+BArlCem+SOBLine+SBLine]) #return blue line trains starting at the Pentagon
else:
YLtrains=trainTable(allLN2NE.loc[:,wBEnd+BYLine+cGYLine+nGYEnd]).loc[lambda df: df.Col=='YL'] #otherwise, return yellow line Rush Plus trains from Van Dorn onward
yltrains=trainTable(allLN2NE.loc[:, sYEnd+BYLine+cGYLine+nGYEnd[:6]]).loc[lambda df: df.Col.isin(['Yl','yl'])] #return normal yellow line trains
if surgeNum==2:
BLtrains=trainTable(allLN2NE.loc[:, wBEnd+BYLine]).loc[lambda df: df.Col=='BL'] #for surge 2, return trains that run to the Pentagon
else:
BLtrains=trainTable(allLN2NE.loc[:, wBEnd+BYLine+BArlCem+SOBLine+SBLine]).loc[lambda df: df.Col=='BL'] #otherwise, blue lines run all the way
if surgeNum==2:
SVtrains=trainTable(allLN2NE.loc[:, wSEnd+SOLine+SOBLine[:-3]]) #for surge 2, return trains that run to Eastern Market
ORtrains=trainTable(allLN2NE.loc[:, wOEnd+SOLine+SOBLine[:-3]]) #for surge 2, return trains that run to Eastern Market
else:
SVtrains=trainTable(allLN2NE.loc[:, wSEnd+SOLine+SOBLine+SBLine]) #otherwise, silver line trains run all the way
if surgeNum in [1,5]: #during surges 1 and 5, there are intermediate orange line trains
ORtrains=trainTableIntermediate(allLN2NE.loc[:, wOEnd+SOLine+SOBLine+eOLine], ['K04']) #that start at Ballston (K04)
else:
ORtrains=trainTable(allLN2NE.loc[:, wOEnd+SOLine+SOBLine[:-1]])
RDtrains=trainTableIntermediate(allLN2NE.loc[:,wRedEnd+cRedLine+eRedEnd],['A13','A11']) #for red line trains, produce trains that run all the way and trains that run from Grovesnor to Silver Spring
if surgeNum in [3,4]:
return {'GR':GRtrains,'YL':YLtrains,'BL':BLtrains,'SV':SVtrains,'OR':ORtrains,'RD':RDtrains} #combine them all into a dictionary
else:
return {'GR':GRtrains,'YL':YLtrains,'yl':yltrains,'BL':BLtrains,'SV':SVtrains,'OR':ORtrains,'RD':RDtrains} #combine them all into a dictionary
#note: [::-1] reverses the direction of a list without affecting how it's called later
def allTrainsSW(allLN2SW,surgeNum): #returns all trains heading toward the South and West (Branch Ave, Huntington, Franconia-Springfield, Vienna, Wiehle, Shady Grove) as a dictionary of panda dataframes
GRtrains=trainTable(allLN2SW.loc[:, (sGLine+cGYLine+nGYEnd)[::-1]]).loc[lambda df: df.Col=='GR'] #produces all green line trains
if surgeNum in [3,4]: #if it's surge 3 or 4
YLtrains=trainTableIntermediate(allLN2SW.loc[:, (['C07']+cGYLine+nGYEnd)[::-1]],['E06','E01']).loc[lambda df: df.Col.isin(['YL','Yl','yl'])] #run the yellow line to the Pentagon
BLtrains=trainTable(allLN2SW.loc[:, (['C07']+BArlCem+SOBLine+SBLine)[::-1]]).loc[lambda df: df.Col=='BL'] #run the blue line to the Pentagon
else:
YLtrains=trainTable(allLN2SW.loc[:, (wBEnd+BYLine+cGYLine+nGYEnd)[::-1]]).loc[lambda df: df.Col=='YL'] #otherwise, produce yellow line Rush Plus trains to Franconia-Springfied
yltrains=trainTableIntermediate(allLN2SW.loc[:, (sYEnd+BYLine+cGYLine+nGYEnd[:6])[::-1]],['E06','E01']).loc[lambda df: df.Col.isin(['yl','YL'])] #return normal yellow line trains
if surgeNum==2:
BLtrains=trainTable(allLN2SW.loc[:, (wBEnd+BYLine)[::-1]])[lambda df: df.Col=='BL'] #for surge 2, produce trains from the Pentagon to Van Dorn
else:
BLtrains=trainTable(allLN2SW.loc[:, (wBEnd+BYLine+BArlCem+SOBLine+SBLine)[::-1]]).loc[lambda df: df.Col=='BL']
if surgeNum==2:
SVtrains=trainTable(allLN2SW.loc[:, (wSEnd+SOLine+SOBLine[:-3])[::-1]]).loc[lambda df: df.Col=='SV'] #for surge 2, produce trains that run to Eastern Market
ORtrains=trainTable(allLN2SW.loc[:, (wOEnd+SOLine+SOBLine[:-3])[::-1]]).loc[lambda df: df.Col=='OR'] #for surge 2, produce trains that run to Eastern Market
else:
SVtrains=trainTable(allLN2SW.loc[:, (wSEnd+SOLine+SOBLine+SBLine)[::-1]]).loc[lambda df: df.Col=='SV'] #produce silver line trains
ORtrains=trainTable(allLN2SW.loc[:, (wOEnd+SOLine+SOBLine+eOLine)[::-1]]) #produce orange line trains
RDtrains=trainTableIntermediate(allLN2SW.loc[:,(wRedEnd+cRedLine+eRedEnd)[::-1]],['B08','B35']) #for red line trains, return trains that run all the way and trains that run from Grovesnor to Silver Spring
if surgeNum in [3,4]:
return {'GR':GRtrains,'YL':YLtrains,'BL':BLtrains,'SV':SVtrains,'OR':ORtrains,'RD':RDtrains} #combine them all into a dictionary
else:
return {'GR':GRtrains,'YL':YLtrains,'yl':yltrains,'BL':BLtrains,'SV':SVtrains,'OR':ORtrains,'RD':RDtrains} #combine them all into a dictionary
def trainTableErrHandling(lineStat): #returns a table listing the trains by start time, color and the time they took to reach a given station, removing trains that arrive at the same time or before the train that started before them
import pandas as pd
[masterTable,rowNum]=trainBuild(lineStat,lineStat.index[0]) #builds the first row and lets it now how many rows to go forward to get to the next train arrival
currentColor=masterTable.iloc[0][0] #record the color of the first train as currentColor
newTrain=masterTable #newTrain just needs to be something for when it's referenced in the if statement
while rowNum<len(lineStat.index) and newTrain.iloc[0][-1]!=0: #keep going as long as there's data to analyze and each train gets to the end
while rowNum<len(lineStat.index)-1 and lineStat.iloc[rowNum][0]==currentColor+':BRD': #while the train (with currentColor) is boarding,
rowNum+=1 #go to the next row
[newTrain, skipRows]=trainBuild(lineStat,lineStat.index[rowNum]) #once you've gotten to a new train arrival, record it as newTrain and note the rows to skip
if newTrain.iloc[0][-1]==0 or (newTrain.index[0]-masterTable.index[-1]).seconds>int(masterTable.iloc[-1][-1])-int(newTrain.iloc[0][-1]): #if you've reached the end or newTrain arrived at the last station after the last train
masterTable=masterTable.append(newTrain) #append newTrain to the masterTable
else: #but if that's not the case, something went wrong with the last train
masterTable=pd.concat([masterTable.iloc[:][:-1],newTrain]) #replace the last row of masterTable with the data for newTrain
currentColor=masterTable.iloc[-1][0] #exchange currentColor to the color of the train that just boarded
rowNum+=skipRows+1 #skip ahead to the next train
return masterTable
def trainTableSurgeNE(month, dayList, surgeNum): #this code asssembles all the trains moving toward the North and East during a surge
import pandas as pd
isFirst=True
for day in dayList: #for all the days on the list
tempLN=allLNtoNEtable(str(month)+str(day).rjust(2,'0')+'045000',str(month)+str(day).rjust(2,'0')+'101000',surgeNum) #form the lineNext for the morning
if isinstance(tempLN, pd.DataFrame) and len(tempLN.index)>200: #if there's more than 200 lines (over an hour) of data
if isFirst: #if it's the first time you found data
trainsSurge=allTrainsNE(tempLN,surgeNum) #set the data to the trains of that set
isFirst=False
else: #if it's not the first time you found data
tempTrains=allTrainsNE(tempLN,surgeNum)
for color in trainsSurge.keys(): #for each color
trainsSurge[color]=trainsSurge[color].append(tempTrains[color]) #append the new data to the existing data
#this is the same as above, but for the afternoon instead of the morning
tempLN=allLNtoNEtable(str(month)+str(day).rjust(2,'0')+'165000',str(month)+str(day)+'221000',surgeNum)
if isinstance(tempLN, pd.DataFrame) and len(tempLN.index)>200:
if isFirst:
trainsSurge=allTrainsNE(tempLN,surgeNum)
isFirst=False
else:
tempTrains=allTrainsNE(tempLN,surgeNum)
for color in trainsSurge.keys():
trainsSurge[color]=trainsSurge[color].append(tempTrains[color])
return trainsSurge
#this is the same as above, but for trains heading to the South and West instead
def trainTableSurgeSW(month, dayList, surgeNum):
import pandas as pd
isFirst=True
for day in dayList:
tempLN=allLNtoSWtable(str(month)+str(day).rjust(2,'0')+'045000',str(month)+str(day).rjust(2,'0')+'101000',surgeNum)
if isinstance(tempLN, pd.DataFrame) and len(tempLN.index)>200:
if isFirst:
trainsSurge=allTrainsSW(tempLN,surgeNum)
isFirst=False
else:
tempTrains=allTrainsSW(tempLN,surgeNum)
for color in trainsSurge.keys():
trainsSurge[color]=trainsSurge[color].append(tempTrains[color])
tempLN=allLNtoSWtable(str(month)+str(day).rjust(2,'0')+'165000',str(month)+str(day).rjust(2,'0')+'221000',surgeNum)
if isinstance(tempLN, pd.DataFrame) and len(tempLN.index)>200:
if isFirst:
trainsSurge=allTrainsSW(tempLN,surgeNum)
isFirst=False
else:
tempTrains=allTrainsSW(tempLN,surgeNum)
for color in trainsSurge.keys():
trainsSurge[color]=trainsSurge[color].append(tempTrains[color])
return trainsSurge
def saveWMATAtrainSQL(timeList, duration, surgeNum): #saves the data from WMATA and the train instances to the SQL database
import time, pandas as pd
from sqlalchemy import create_engine
for hour2wake in timeList: #run at timeList
while int(time.strftime('%H'))<hour2wake or int(time.strftime('%H'))>timeList[-1]: #while it's not yet time to run
time.sleep(180) #wait 3 minutes
[allLN2NE,allLN2SW]=WMATAtableSQL(duration,20,5) #then run for duration period, recording data every 20 seconds
allLN2NE.to_csv('LNtoNE'+time.strftime("%d%H%M%S")+'.csv') #this saves a copy of the lineNext, so I can reconstruct the trainBuild if there's a problem
allLN2SW.to_csv('LNtoSW'+time.strftime("%d%H%M%S")+'.csv') #ditto
trains2NE=allTrainsNE(allLN2NE, surgeNum) #this creates the train dictionary
trains2SW=allTrainsNE(allLN2SW, surgeNum) #ditto
engine = create_engine('postgresql+psycopg2://Original:tolistbtGU!@<EMAIL>:5432/WmataData') #opens the engine to WmataData
for color in trains2NE.keys(): #for all the colors
if color=='YL': #for rush plus yellow lines, save them as YLP and the surge number
trains2NE['YL'].to_sql('NEtrainsYLP'+str(surgeNum), engine, if_exists='append')
trains2SW['YL'].to_sql('SWtrainsYLP', engine, if_exists='append')
else: #for other colors, save them by the color and the surge number
trains2NE[color].to_sql('NEtrains'+color+str(surgeNum), engine, if_exists='append')
trains2SW[color].to_sql('SWtrains'+color+str(surgeNum), engine, if_exists='append')
engine.connect().close()
return
def trainData(line,destList,month,dayList): #gets all the train arrival data for a specific line for specific dates
import pandas as pd
isFirst=True
for day in dayList: #for all the days on the list
tempLN=lineNextTableSQL(line,str(month)+str(day).rjust(2,'0')+'045000',str(month)+str(day).rjust(2,'0')+'101000',destList) #form the lineNext for the morning
if isinstance(tempLN, pd.DataFrame) and len(tempLN.index)>200: #if there's more than 200 lines (over an hour) of data
if isFirst: #if it's the first time you found data
trains=trainTable(tempLN) #set the data to the trains of that set
isFirst=False
else: #if it's not the first time you found data
tempTrains=trainTable(tempLN)
trains=trains.append(tempTrains) #append the new data to the existing data
#the code below is the same concept, but for the afternoon
tempLN=lineNextTableSQL(line,str(month)+str(day).rjust(2,'0')+'165000',str(month)+str(day).rjust(2,'0')+'221000',destList) #form the lineNext for the afternoon
if isinstance(tempLN, pd.DataFrame) and len(tempLN.index)>200: #if there's more than 200 lines (over an hour) of data
if isFirst: #if it's the first time you found data
trains=trainTable(tempLN) #set the data to the trains of that set
isFirst=False
else: #if it's not the first time you found data
tempTrains=trainTable(tempLN)
trains=trains.append(tempTrains) #append the new data to the existing data
return trains
def trainDataCSV(fileName): #converts data from a csv file into our standard format
import pandas as pd
rawData=pd.read_csv(fileName) #reads the file
return rawData.rename(index=pd.to_datetime(rawData.iloc[:,0])).drop(rawData.columns[0],axis=1) #renames the indices as the first column (converted into dateTimes), then drops the first column
def getRidersTrans(): #gets the ridership data and the station RTU/name translator
import pandas as pd
from sqlalchemy import create_engine
engine = create_engine('postgresql+psycopg2://Original:tolistbtGU!@teamoriginal.ccc95gjlnnnc.us-east-1.rds.amazonaws.com:5432/SHSTUFF')
query1='SELECT * FROM "WMATARidershipExtract";'
ridershipDF=pd.read_sql(query1,engine)
query2='SELECT * FROM "RTUname";'
RTUname=pd.read_sql(query2,engine)
return [ridershipDF,RTUname.rename(index=RTUname.loc[:,'index']).drop('index',axis=1)]
def dateQH(DTinput): #given a panda datetime, it outputs the date and quarter hour string for pulling ridership data
import pandas as pd
startMinute=DTinput.minute-DTinput.minute%15
if startMinute==45:
endHour=DTinput.hour%12+1
endMinute=0
else:
endHour=DTinput.hour%12
endMinute=startMinute+15
if DTinput.hour>11:
ampm=' PM'
else:
ampm=' AM'
QHstring=str(DTinput.hour%12)+':'+str(startMinute).rjust(2,'0')+ampm+' to '+str(endHour)+':'+str(endMinute).rjust(2,'0')+ampm
return [DTinput.date(),QHstring] #returns the date and quarter hour string, e.g., '5:00 PM to 5:15 PM'
def tripTimes(trainsData, stationSplits): #divides the train trips for the purpose of predictions, splitting the trains at the stations in stationSplits
import pandas as pd
[ridershipDF,stationTrans]=getRidersTrans() #gets the ridership data and station name/translator
#the next few lines creates the columns: leg1riders, leg2riders,... lastLegRiders,leg1,leg2,...lastLeg
legList=['lastLeg']
riderList=['lastLegRiders']
for num in range(len(stationSplits),0,-1):
legList.insert(0,'leg'+str(num))
riderList.insert(0,'leg'+str(num)+'Riders')
trips=pd.DataFrame(0,index=trainsData.index,columns=riderList+legList)
#the stations are the first station, the station splits and the last station
stations=[trainsData.columns[1]]+stationSplits+[trainsData.columns[-1]]
for row in trainsData.index: #for all the rows
[date,QHstring]=dateQH(row) #gets the date and quarter hour
for colNum in range(len(legList)): # and columns
trips.loc[row,legList[colNum]]=trainsData.loc[row,stations[colNum+1]]-trainsData.loc[row,stations[colNum]] #make the trip elements the difference between the station arrival data
legStations=(trainsData.loc[:,stations[colNum]:stations[colNum+1]]).columns #legStations are the station in this leg
trips.loc[row,legList[colNum]+'Riders']=ridershipDF[lambda df:df.station.isin(stationTrans.loc[legStations,'Station Name'])][lambda df:df.dateday==date][lambda df:df.hour_interval==QHstring].entries.sum()
return trips
def headerTimes(trainsData): #turns the date/time information into numbers that machine learning tools can use
import pandas as pd
headerInfo=pd.DataFrame(0,index=trainsData.index,columns=['secSince5','weekday','evening','lastTrain','lastTrainColor'])
for time in trainsData.index:
headerInfo.loc[time,'weekday']=time.weekday()
if time.hour>11: #if it's past 11
headerInfo.loc[time,'evening']=1 #set evening to 1
headerInfo.loc[time,'secSince5']=(time-pd.to_datetime('2016-'+str(time.month)+'-'+str(time.day)+' 17:00')).seconds #set secSince5 to seconds since 5pm
else:
headerInfo.loc[time,'secSince5']=(time-pd.to_datetime('2016-'+str(time.month)+'-'+str(time.day)+' 5:00')).seconds #set secSince5 to seconds since 5am
for timeRow in range(1,len(headerInfo.index)):
headerInfo.iloc[timeRow,3]=(headerInfo.index[timeRow]-headerInfo.index[timeRow-1]).seconds
for color in trainsData.Col.value_counts().index:
if color>'-':
for timeRow in range(1,len(trainsData[lambda df:df.Col==color].index)):
headerInfo.loc[trainsData[lambda df:df.Col==color].index[timeRow],'lastTrainColor']=(trainsData[lambda df:df.Col==color].index[timeRow]-trainsData[lambda df:df.Col==color].index[timeRow-1]).seconds
return headerInfo
def trainTestSet(oldTrainsData, stationSplits):
import pandas as pd, numpy as ny
trainsData=oldTrainsData.reset_index().drop_duplicates(subset='index', keep='last').set_index('index').sort_index() #removes duplicated index
tripTimeTable=tripTimes(trainsData, stationSplits)
headerInfo= headerTimes(trainsData)
secSince5B4=pd.Series(headerInfo.secSince5.iloc[:-1],index=trainsData.index[1:],name='secSince5B4')
tripB4Table=tripTimeTable.iloc[1:] #this line and the next few lines make a new table with the data from the trip before
tripB4Table=tripB4Table.rename_axis((lambda name:name+'B4'),axis='columns')
for rowNum in range(len(tripB4Table.index)):
for colNum in range(len(tripB4Table.columns)):
tripB4Table.iloc[rowNum,colNum]=tripTimeTable.iloc[rowNum,colNum]
colorSeries=trainsData.iloc[1:,0].map({'OR':1,'or':2,'SV':3,'BL':5,'yl':10,'Yl':11,'YL':12,'GR':15,'RD':20,'rd':21}) #remapping strings into numbers using the subjective numbering system I just made up
stations2NE=[] #list of stations in order from south/west to north/east
for line in lineList[0]:
stations2NE+=line
NEdirection=pd.Series(ny.sign(stations2NE.index(trainsData.columns[-1])-stations2NE.index(trainsData.columns[1])),index=trainsData.index, name='NEdirection') #NE direction is 1 if the train is headed to the north or east, -1 otherwise
testTable= | pd.concat([colorSeries, NEdirection, headerInfo.iloc[1:],secSince5B4,tripB4Table,tripTimeTable.iloc[1:]],axis=1) | pandas.concat |
# -*- coding: utf-8 -*-
"""Core logic for computing subtrees."""
# standard library imports
import contextlib
import os
import sys
from collections import Counter
from collections import OrderedDict
from itertools import chain
from itertools import combinations
from pathlib import Path
# third-party imports
import networkx as nx
import numpy as np
import pandas as pd
from Bio import SeqIO
# first-party imports
import sh
# module imports
from .common import CLUSTER_HIST_FILE
from .common import NAME
from .common import SEARCH_PATHS
from .common import cluster_set_name
from .common import fasta_records
from .common import get_paths_from_file
from .common import homo_degree_dist_filename
from .common import logger
from .common import protein_properties_filename
from .common import write_tsv_or_parquet
from .protein import Sanitizer
# global constants
STATFILE_SUFFIX = f"-{NAME}_stats.tsv"
ANYFILE_SUFFIX = f"-{NAME}_ids-any.tsv"
ALLFILE_SUFFIX = f"-{NAME}_ids-all.tsv"
CLUSTFILE_SUFFIX = f"-{NAME}_clusts.tsv"
SEQ_FILE_TYPE = "fasta"
UNITS = {
"Mb": {"factor": 1, "outunits": "MB"},
"Gb": {"factor": 1024, "outunits": "MB"},
"s": {"factor": 1, "outunits": "s"},
"m": {"factor": 60, "outunits": "s"},
"h": {"factor": 3600, "outunits": "s"},
}
SEQ_IN_LINE = 6
IDENT_STATS_LINE = 7
FIRST_LOG_LINE = 14
LAST_LOG_LINE = 23
STAT_SUFFIXES = ["size", "mem", "time", "memory"]
RENAME_STATS = {
"throughput": "throughput_seq_s",
"time": "CPU_time",
"max_size": "max_cluster_size",
"avg_size": "avg_cluster_size",
"min_size": "min_cluster_size",
"seqs": "unique_seqs",
"singletons": "singleton_clusters",
}
ID_SEPARATOR = "."
IDENT_LOG_MIN = -3
IDENT_LOG_MAX = 0
FASTA_EXT_LIST = [".faa", ".fa", ".fasta"]
FAA_EXT = "faa"
# helper functions
def read_synonyms(filepath):
"""Read a file of synonymous IDs into a dictionary."""
synonym_dict = {}
try:
synonym_frame = pd.read_csv(filepath, sep="\t")
except FileNotFoundError:
logger.error(f'Synonym tsv file "{filepath}" does not exist')
sys.exit(1)
except pd.errors.EmptyDataError:
logger.error(f'Synonym tsv "{filepath}" is empty')
sys.exit(1)
if len(synonym_frame) > 0:
if "#file" in synonym_frame:
synonym_frame.drop("#file", axis=1, inplace=True)
key = list({("Substr", "Dups")}.intersection({synonym_frame.columns}))[
0
]
for group in synonym_frame.groupby("id"):
synonym_dict[group[0]] = group[1][key]
return synonym_dict
def parse_usearch_log(filepath, rundict):
"""Parse the usearch log file into a stats dictionary."""
with filepath.open() as logfile:
for lineno, line in enumerate(logfile):
if lineno < FIRST_LOG_LINE:
if lineno == SEQ_IN_LINE:
split = line.split()
rundict["seqs_in"] = int(split[0])
rundict["singleton_seqs_in"] = int(split[4])
if lineno == IDENT_STATS_LINE:
split = line.split()
rundict["max_identical_seqs"] = int(split[6].rstrip(","))
rundict["avg_identical_seqs"] = float(split[8])
continue
if lineno > LAST_LOG_LINE:
break
split = line.split()
if split:
stat = split[0].lower()
if split[1] in STAT_SUFFIXES:
stat += "_" + split[1]
val = split[2]
else:
val = split[1].rstrip(",")
# rename poorly-named stats
stat = RENAME_STATS.get(stat, stat)
# strip stats with units at the end
conversion_factor = 1
for unit in UNITS:
if val.endswith(unit):
val = val.rstrip(unit)
conversion_factor = UNITS[unit]["factor"]
stat += "_" + UNITS[unit]["outunits"]
break
# convert string values to int or float where possible
try:
val = int(val)
val *= conversion_factor
except ValueError:
try:
val = float(val)
val *= conversion_factor
except ValueError:
pass
rundict[stat] = val
@contextlib.contextmanager
def in_working_directory(path):
"""Change working directory and return to previous wd on exit."""
original_cwd = Path.cwd()
os.chdir(path)
try:
yield
finally:
os.chdir(original_cwd)
def get_fasta_ids(fasta):
"""Get the IDS from a FASTA file."""
idset = set()
with fasta.open() as fasta_fh:
for line in fasta_fh:
if line.startswith(">"):
idset.add(line.split()[0][1:])
return list(idset)
def parse_chromosome(ident):
"""Parse chromosome identifiers."""
# If ident contains an underscore, work on the
# last part only (e.g., MtrunA17_Chr4g0009691)
undersplit = ident.split("_")
if len(undersplit) > 1:
ident = undersplit[-1].upper()
if ident.startswith("CHR"):
ident = ident[3:]
# Chromosome numbers are integers suffixed by 'G'
try:
chromosome = "Chr" + str(int(ident[: ident.index("G")]))
except ValueError:
chromosome = None
return chromosome
def parse_subids(ident):
"""Parse the subidentifiers from identifiers."""
subids = ident.split(ID_SEPARATOR)
subids += [
chromosome
for chromosome in [parse_chromosome(ident) for ident in subids]
if chromosome is not None
]
return subids
def parse_clusters(outdir, delete=True, count_clusters=True, synonyms=None):
"""Parse clusters, counting occurrances."""
if synonyms is None:
synonyms = {}
cluster_list = []
id_list = []
degree_list = []
size_list = []
degree_counter = Counter()
any_counter = Counter()
all_counter = Counter()
graph = nx.Graph()
for fasta in outdir.glob("*"):
cluster_id = int(fasta.name)
ids = get_fasta_ids(fasta)
if len(synonyms) > 0:
syn_ids = set(ids).intersection(synonyms.keys())
for i in syn_ids:
ids.extend(synonyms[i])
n_ids = len(ids)
degree_list.append(n_ids)
degree_counter.update({n_ids: 1})
id_list += ids
cluster_list += [cluster_id] * n_ids
size_list += [n_ids] * n_ids
# Do 'any' and 'all' counters
id_counter = Counter()
id_counter.update(
chain.from_iterable(
[parse_subids(cluster_id) for cluster_id in ids]
)
)
if count_clusters:
any_counter.update(id_counter.keys())
all_counter.update(
[
cluster_id
for cluster_id in id_counter.keys()
if id_counter[cluster_id] == n_ids
]
)
elif n_ids > 1:
any_counter.update({s: n_ids for s in id_counter.keys()})
all_counter.update(
{
cluster_id: n_ids
for cluster_id in id_counter.keys()
if id_counter[cluster_id] == n_ids
}
)
# Do graph components
graph.add_nodes_from(ids)
if n_ids > 1:
edges = combinations(ids, 2)
graph.add_edges_from(edges, weight=n_ids)
if delete:
fasta.unlink()
if delete:
outdir.rmdir()
return (
graph,
cluster_list,
id_list,
size_list,
degree_list,
degree_counter,
any_counter,
all_counter,
)
def prettyprint_float(val, digits):
"""Print a floating-point value in a nice way."""
format_string = "%." + f"{digits:d}" + "f"
return (format_string % val).rstrip("0").rstrip(".")
def homology_cluster(
seqfile,
identity,
delete=True,
write_ids=False,
do_calc=True,
min_id_freq=0,
substrs=None,
dups=None,
cluster_stats=True,
outname=None,
click_loguru=None,
):
"""Cluster at a global sequence identity threshold."""
try:
usearch = sh.Command("usearch", search_paths=SEARCH_PATHS)
except sh.CommandNotFound:
logger.error("usearch must be installed first.")
sys.exit(1)
try:
inpath, dirpath = get_paths_from_file(seqfile)
except FileNotFoundError:
logger.error(f'Input file "{seqfile}" does not exist!')
sys.exit(1)
stem = inpath.stem
dirpath = inpath.parent
if outname is None:
outname = cluster_set_name(stem, identity)
outdir = f"{outname}/"
logfile = f"{outname}.log"
outfilepath = dirpath / outdir
logfilepath = dirpath / logfile
histfilepath = dirpath / homo_degree_dist_filename(outname)
gmlfilepath = dirpath / f"{outname}.gml"
statfilepath = dirpath / f"{outname}-stats.tsv"
anyfilepath = dirpath / f"{outname}-anyhist.tsv"
allfilepath = dirpath / f"{outname}-allhist.tsv"
idpath = dirpath / f"{outname}-ids.tsv"
if identity == 0.0:
identity_string = "Minimum"
else:
identity_string = f"{prettyprint_float(identity *100, 2)}%"
logger.info(f'{identity_string} sequence identity cluster "{outname}":')
if not delete:
logger.debug(f"Cluster files will be kept in {logfile} and {outdir}")
if cluster_stats and write_ids:
logger.debug(
f"File of cluster ID usage will be written to {anyfilepath} and"
f" {allfilepath}"
)
if not do_calc:
if not logfilepath.exists():
logger.error("Previous results must exist, rerun with --do_calc")
sys.exit(1)
logger.debug("Using previous results for calculation")
if min_id_freq:
logger.debug(
"Minimum number of times ID's must occur to be counted:"
f" {min_id_freq}"
)
synonyms = {}
if substrs is not None:
logger.debug(f"using duplicates in {dirpath / dups}")
synonyms.update(read_synonyms(dirpath / substrs))
if dups is not None:
logger.debug(f"using duplicates in {dirpath/dups}")
synonyms.update(read_synonyms(dirpath / dups))
click_loguru.elapsed_time("Clustering")
if do_calc:
#
# Delete previous results, if any.
#
if outfilepath.exists() and outfilepath.is_file():
outfilepath.unlink()
elif outfilepath.exists() and outfilepath.is_dir():
for file in outfilepath.glob("*"):
file.unlink()
else:
outfilepath.mkdir()
#
# Do the calculation.
#
with in_working_directory(dirpath):
output = usearch(
[
"-cluster_fast",
seqfile,
"-id",
identity,
"-clusters",
outdir,
"-log",
logfile,
]
)
logger.debug(output)
run_stat_dict = OrderedDict([("divergence", 1.0 - identity)])
parse_usearch_log(logfilepath, run_stat_dict)
run_stats = pd.DataFrame(
list(run_stat_dict.items()), columns=["stat", "val"]
)
run_stats.set_index("stat", inplace=True)
write_tsv_or_parquet(run_stats, statfilepath)
if delete:
logfilepath.unlink()
if not cluster_stats:
file_sizes = []
file_names = []
record_counts = []
logger.debug("Ordering clusters by number of records and size.")
for fasta_path in outfilepath.glob("*"):
records, size = fasta_records(fasta_path)
if records == 1:
fasta_path.unlink()
continue
file_names.append(fasta_path.name)
file_sizes.append(size)
record_counts.append(records)
file_frame = pd.DataFrame(
list(zip(file_names, file_sizes, record_counts)),
columns=["name", "size", "seqs"],
)
file_frame.sort_values(
by=["seqs", "size"], ascending=False, inplace=True
)
file_frame["idx"] = range(len(file_frame))
for unused_id, row in file_frame.iterrows():
(outfilepath / row["name"]).rename(
outfilepath / f'{row["idx"]}.fa'
)
file_frame.drop(["name"], axis=1, inplace=True)
file_frame.set_index("idx", inplace=True)
# write_tsv_or_parquet(file_frame, "clusters.tsv")
# cluster histogram
cluster_hist = pd.DataFrame(file_frame["seqs"].value_counts())
cluster_hist.rename(columns={"seqs": "clusters"}, inplace=True)
cluster_hist.index.name = "n"
cluster_hist.sort_index(inplace=True)
total_seqs = sum(file_frame["seqs"])
n_clusters = len(file_frame)
cluster_hist["pct_clusts"] = (
cluster_hist["clusters"] * 100.0 / n_clusters
)
cluster_hist["pct_seqs"] = (
cluster_hist["clusters"] * cluster_hist.index * 100.0 / total_seqs
)
cluster_hist.to_csv(CLUSTER_HIST_FILE, sep="\t", float_format="%06.3f")
return n_clusters, run_stats, cluster_hist
(
cluster_graph,
clusters,
ids,
sizes,
unused_degrees,
degree_counts,
any_counts,
all_counts,
) = parse_clusters( # pylint: disable=unused-variable
outfilepath, delete=delete, synonyms=synonyms
)
#
# Write out list of clusters and ids.
#
id_frame = pd.DataFrame.from_dict(
{
"id": ids,
"hom.cluster": pd.array(clusters, dtype=pd.UInt32Dtype()),
"siz": sizes,
}
)
id_frame.sort_values("siz", ascending=False, inplace=True)
id_frame = id_frame.reindex(
["hom.cluster", "siz", "id"],
axis=1,
)
id_frame.reset_index(inplace=True)
id_frame.drop(["index"], axis=1, inplace=True)
id_frame.to_csv(idpath, sep="\t")
del ids, clusters, sizes, id_frame
click_loguru.elapsed_time("graph")
#
# Write out degree distribution.
#
cluster_hist = pd.DataFrame(
list(degree_counts.items()), columns=["degree", "clusters"]
)
cluster_hist.sort_values(["degree"], inplace=True)
cluster_hist.set_index("degree", inplace=True)
total_clusters = cluster_hist["clusters"].sum()
cluster_hist["pct_total"] = (
cluster_hist["clusters"] * 100.0 / total_clusters
)
cluster_hist.to_csv(histfilepath, sep="\t", float_format="%06.3f")
del degree_counts
#
# Do histograms of "any" and "all" id usage in cluster
#
hist_value = f"{identity:f}"
any_hist = pd.DataFrame(
list(any_counts.items()), columns=["id", hist_value]
)
any_hist.set_index("id", inplace=True)
any_hist.sort_values(hist_value, inplace=True, ascending=False)
all_hist = pd.DataFrame(
list(all_counts.items()), columns=["id", hist_value]
)
all_hist.set_index("id", inplace=True)
all_hist.sort_values(hist_value, inplace=True, ascending=False)
if min_id_freq:
any_hist = any_hist[any_hist[hist_value] > min_id_freq]
all_hist = all_hist[all_hist[hist_value] > min_id_freq]
if write_ids:
any_hist.to_csv(anyfilepath, sep="\t")
all_hist.to_csv(allfilepath, sep="\t")
#
# Compute cluster stats
#
# degree_sequence = sorted([d for n, d in cluster_graph.degree()], reverse=True)
# degreeCount = Counter(degree_sequence)
# degree_hist = pd.DataFrame(list(degreeCount.items()),
# columns=['degree', 'count'])
# degree_hist.set_index('degree', inplace=True)
# degree_hist.sort_values('degree', inplace=True)
# degree_hist.to_csv(histfilepath, sep='\t')
nx.write_gml(cluster_graph, gmlfilepath)
click_loguru.elapsed_time("final")
return run_stats, cluster_graph, cluster_hist, any_hist, all_hist
def cluster_in_steps(seqfile, steps, min_id_freq=0, substrs=None, dups=None):
"""Cluster in steps from low to 100% identity."""
try:
inpath, dirpath = get_paths_from_file(seqfile)
except FileNotFoundError:
logger.error('Input file "%s" does not exist!', seqfile)
sys.exit(1)
stat_path = dirpath / (inpath.stem + STATFILE_SUFFIX)
any_path = dirpath / (inpath.stem + ANYFILE_SUFFIX)
all_path = dirpath / (inpath.stem + ALLFILE_SUFFIX)
logsteps = [1.0] + list(
1.0 - np.logspace(IDENT_LOG_MIN, IDENT_LOG_MAX, num=steps)
)
min_fmt = prettyprint_float(min(logsteps) * 100.0, 2)
max_fmt = prettyprint_float(max(logsteps) * 100.0, 2)
logger.info(
f"Clustering at {steps} levels from {min_fmt}% to {max_fmt}% global"
" sequence identity"
)
stat_list = []
all_frames = []
any_frames = []
for id_level in logsteps:
(
stats,
unused_graph,
unused_hist,
any_,
all_,
) = homology_cluster( # pylint: disable=unused-variable
seqfile,
id_level,
min_id_freq=min_id_freq,
substrs=substrs,
dups=dups,
)
stat_list.append(stats)
any_frames.append(any_)
all_frames.append(all_)
logger.info(f"Collating results on {seqfile}.")
#
# Concatenate and write stats
#
stats = | pd.DataFrame(stat_list) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import random as rm
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.font_manager as fm
import os
import matplotlib.colors as colors
import matplotlib
#-----------------------------------------------------------------------------
# Rutas para las fuentes -----------------------------------------------------
prop = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Heavy.otf' )
prop_1 = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Book.otf')
prop_2 = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Black.otf')
######################################################################################
## ----------------------LECTURA DE LOS ARRAY CONDICIONALES----------------------- ##
######################################################################################
Prob_Condicional_morning_JJA= np.load('/home/nacorreasa/Maestria/Datos_Tesis/Arrays/Array_Mark_Trans_MorningJJAmanianaCon.npy')
Prob_Condicional_noon_JJA = np.load('/home/nacorreasa/Maestria/Datos_Tesis/Arrays/Array_Mark_Trans_NoonJJAnoonCon.npy')
Prob_Condicional_tarde_JJA = np.load('/home/nacorreasa/Maestria/Datos_Tesis/Arrays/Array_Mark_Trans_TardeJJAtardeCon.npy')
Prob_Condicional_morning_SON= np.load('/home/nacorreasa/Maestria/Datos_Tesis/Arrays/Array_Mark_Trans_MorningSONmanianaCon.npy')
Prob_Condicional_noon_SON = np.load('/home/nacorreasa/Maestria/Datos_Tesis/Arrays/Array_Mark_Trans_NoonSONnoonCon.npy')
Prob_Condicional_tarde_SON = np.load('/home/nacorreasa/Maestria/Datos_Tesis/Arrays/Array_Mark_Trans_TardeSONtardeCon.npy')
Prob_Condicional_morning_DEF= np.load('/home/nacorreasa/Maestria/Datos_Tesis/Arrays/Array_Mark_Trans_MorningDEFmanianaCon.npy')
Prob_Condicional_noon_DEF = np.load('/home/nacorreasa/Maestria/Datos_Tesis/Arrays/Array_Mark_Trans_NoonDEFnoonCon.npy')
Prob_Condicional_tarde_DEF = np.load('/home/nacorreasa/Maestria/Datos_Tesis/Arrays/Array_Mark_Trans_TardeDEFtardeCon.npy')
Prob_Condicional_morning_MAM= np.load('/home/nacorreasa/Maestria/Datos_Tesis/Arrays/Array_Mark_Trans_MorningMAMmanianaCon.npy')
Prob_Condicional_noon_MAM = np.load('/home/nacorreasa/Maestria/Datos_Tesis/Arrays/Array_Mark_Trans_NoonMAMnoonCon.npy')
Prob_Condicional_tarde_MAM = np.load('/home/nacorreasa/Maestria/Datos_Tesis/Arrays/Array_Mark_Trans_TardeMAMtardeCon.npy')
######################################################################################
## ----------SIMULACION DE LAS REFLECTANCIAS CON LAS CADENAS DE MARKOV------------- ##
######################################################################################
import random as rm
def Markov_forecast(initial_state, steps, transitionMatrix):
"""
Do Markov chain forecasting
INPUTS
initial_state : The initial state
steps : number of predictions
transitionMatrix : Array of states conditional probability
OUTPUTS
states : array of forecasting states
"""
states = np.zeros((steps), dtype=int)
n = transitionMatrix.shape[0]
s = np.arange(1,n+1)
states[0] = initial_state
for i in range(steps-1):
states[i+1] = np.random.choice(s,p=transitionMatrix[states[i]-1])
return states
EI_List = [1, 2, 3, 4, 5]
steps_time = 10 ##--> 2 y media Horas, el array final es de estos pasos
Simu_E1_JJA_Morning = Markov_forecast(EI_List[0], steps_time, Prob_Condicional_morning_JJA/100)
Simu_E1_JJA_Noon = Markov_forecast(EI_List[0], steps_time, Prob_Condicional_noon_JJA /100)
Simu_E1_JJA_Tarde = Markov_forecast(EI_List[0], steps_time, Prob_Condicional_tarde_JJA /100)
Simu_E1_MAM_Morning = Markov_forecast(EI_List[0], steps_time, Prob_Condicional_morning_MAM/100)
Simu_E1_MAM_Noon = Markov_forecast(EI_List[0], steps_time, Prob_Condicional_noon_MAM /100)
Simu_E1_MAM_Tarde = Markov_forecast(EI_List[0], steps_time, Prob_Condicional_tarde_MAM /100)
Simu_E1_SON_Morning = Markov_forecast(EI_List[0], steps_time, Prob_Condicional_morning_SON/100)
Simu_E1_SON_Noon = Markov_forecast(EI_List[0], steps_time, Prob_Condicional_noon_SON /100)
Simu_E1_SON_Tarde = Markov_forecast(EI_List[0], steps_time, Prob_Condicional_tarde_SON /100)
Simu_E1_DEF_Morning = Markov_forecast(EI_List[0], steps_time, Prob_Condicional_morning_DEF/100)
Simu_E1_DEF_Noon = Markov_forecast(EI_List[0], steps_time, Prob_Condicional_noon_DEF /100)
Simu_E1_DEF_Tarde = Markov_forecast(EI_List[0], steps_time, Prob_Condicional_tarde_DEF /100)
###------------------------GRAFICA DE LAS SIMULACIONES---------------------------##
Maniana_time = pd.date_range("06:00", "08:30", freq="15min").time[1:]
Noon_time = pd.date_range("10:00", "12:30", freq="15min").time[1:]
Tarde_time = pd.date_range("15:00", "17:30", freq="15min").time[1:]
Maniana_time = [now.strftime("%H:%M") for now in Maniana_time]
Noon_time = [now.strftime("%H:%M") for now in Noon_time]
Tarde_time = [now.strftime("%H:%M") for now in Tarde_time]
fig = plt.figure(figsize=[14, 6])
plt.rc('axes', edgecolor='gray')
ax1 = fig.add_subplot(1,3,1)
ax1.spines['top'].set_visible(False)
ax1.spines['right'].set_visible(False)
ax1.plot(np.arange(0,10,1), Simu_E1_JJA_Morning, color='#b2df8a', label = 'JJA')
ax1.plot(np.arange(0,10,1), Simu_E1_DEF_Morning, color='#33a02c', label = 'DEF')
ax1.plot(np.arange(0,10,1), Simu_E1_MAM_Morning, color='#a6cee3', label = 'MAM')
ax1.plot(np.arange(0,10,1), Simu_E1_SON_Morning, color='#1f78b4', label = 'SON')
ax1.scatter(np.arange(0,10,1), Simu_E1_JJA_Morning, marker='.', color = '#b2df8a', s=30)
ax1.scatter(np.arange(0,10,1), Simu_E1_DEF_Morning, marker='.', color = '#33a02c', s=30)
ax1.scatter(np.arange(0,10,1), Simu_E1_MAM_Morning, marker='.', color = '#a6cee3', s=30)
ax1.scatter(np.arange(0,10,1), Simu_E1_SON_Morning, marker='.', color = '#1f78b4', s=30)
ax1.set_title(u'Simulación de irradiancia 2h 15 min \n adelante en la mañana', fontproperties=prop, fontsize = 14)
ax1.set_xticks(np.arange(0,len(Maniana_time), 1), minor=False)
ax1.set_xticklabels(np.array(Maniana_time), minor=False, rotation = 23)
ax1.set_ylabel(u'Estado', fontproperties=prop_1, fontsize = 13)
ax1.set_xlabel(u'Tiempo', fontproperties=prop_1, fontsize = 13)
ax1.set_ylim(0.5, 5.5)
ax1.set_yticks(np.arange(1,6,1), minor=False)
ax1.grid(which='major', linestyle=':', linewidth=0.5, alpha=0.7)
#ax1.legend()
ax2 = fig.add_subplot(1,3,2)
ax2.spines['top'].set_visible(False)
ax2.spines['right'].set_visible(False)
ax2.plot(np.arange(0,10,1), Simu_E1_JJA_Noon, color='#b2df8a', label = 'JJA')
ax2.plot(np.arange(0,10,1), Simu_E1_DEF_Noon, color='#33a02c', label = 'DEF')
ax2.plot(np.arange(0,10,1), Simu_E1_MAM_Noon, color='#a6cee3', label = 'MAM')
ax2.plot(np.arange(0,10,1), Simu_E1_SON_Noon, color='#1f78b4', label = 'SON')
ax2.scatter(np.arange(0,10,1), Simu_E1_JJA_Noon, marker='.', color = '#b2df8a', s=30)
ax2.scatter(np.arange(0,10,1), Simu_E1_DEF_Noon, marker='.', color = '#33a02c', s=30)
ax2.scatter(np.arange(0,10,1), Simu_E1_MAM_Noon, marker='.', color = '#a6cee3', s=30)
ax2.scatter(np.arange(0,10,1), Simu_E1_SON_Noon, marker='.', color = '#1f78b4', s=30)
ax2.set_title(u'Simulación de irradiancia 2h 15 min \n adelante al medio dia', fontproperties=prop, fontsize = 14)
ax2.set_xticks(np.arange(0,len(Noon_time), 1), minor=False)
ax2.set_xticklabels(np.array(Noon_time), minor=False, rotation = 23)
ax2.set_ylabel(u'Estado', fontproperties=prop_1, fontsize = 13)
ax2.set_xlabel(u'Tiempo', fontproperties=prop_1, fontsize = 13)
ax2.set_ylim(0.5, 5.5)
ax2.set_yticks(np.arange(1,6,1), minor=False)
ax2.grid(which='major', linestyle=':', linewidth=0.5, alpha=0.7)
#ax2.legend()
ax3 = fig.add_subplot(1,3,3)
ax3.spines['top'].set_visible(False)
ax3.spines['right'].set_visible(False)
ax3.plot(np.arange(0,10,1), Simu_E1_JJA_Tarde, color='#b2df8a', label = 'JJA')
ax3.plot(np.arange(0,10,1), Simu_E1_DEF_Tarde, color='#33a02c', label = 'DEF')
ax3.plot(np.arange(0,10,1), Simu_E1_MAM_Tarde, color='#a6cee3', label = 'MAM')
ax3.plot(np.arange(0,10,1), Simu_E1_SON_Tarde, color='#1f78b4', label = 'SON')
ax3.scatter(np.arange(0,10,1), Simu_E1_JJA_Tarde, marker='.', color = '#b2df8a', s=30)
ax3.scatter(np.arange(0,10,1), Simu_E1_DEF_Tarde, marker='.', color = '#33a02c', s=30)
ax3.scatter(np.arange(0,10,1), Simu_E1_MAM_Tarde, marker='.', color = '#a6cee3', s=30)
ax3.scatter(np.arange(0,10,1), Simu_E1_SON_Tarde, marker='.', color = '#1f78b4', s=30)
ax3.set_title(u'Simulación de irradiancia 2h 15 min \n adelante en la tarde', fontproperties=prop, fontsize = 14)
ax3.set_xticks(np.arange(0,len(Tarde_time), 1), minor=False)
ax3.set_xticklabels(np.array(Tarde_time), minor=False, rotation = 23)
ax3.set_ylabel(u'Estado', fontproperties=prop_1, fontsize = 13)
ax3.set_xlabel(u'Tiempo', fontproperties=prop_1, fontsize = 13)
ax3.set_ylim(0.5, 5.5)
ax3.set_yticks(np.arange(1,6,1), minor=False)
ax3.grid(which='major', linestyle=':', linewidth=0.5, alpha=0.7)
ax3.legend()
plt.savefig('/home/nacorreasa/Escritorio/Figuras/Simulation_States_Radiacion.pdf', format = 'pdf')
plt.close('all')
os.system('scp /home/nacorreasa/Escritorio/Figuras/Simulation_States_Radiacion.pdf [email protected]:/var/www/nacorreasa/Graficas_Resultados/Estudio')
################################################################################
##--------------------LECTURA DE LOS DATOS ORIGINALES ------------------------##
################################################################################
df = pd.read_csv('/home/nacorreasa/Maestria/Datos_Tesis/Experimentos_Panel/Panel975.txt', sep=',', index_col =0)
df = df[df['radiacion'] > 0]
df = df[(df['NI'] >= 0) & (df['strength'] >= 0)& (df['strength'] <= 80)]
#df = df[df['calidad']<100]
df.index = pd.to_datetime(df.index, format="%Y-%m-%d %H:%M", errors='coerce')
df = df.between_time('06:00', '17:00')
Histo_pot, Bins_pot = np.histogram(df['strength'][np.isfinite(df['strength'])] ,
bins= df['strength'].quantile([.1, .5, .7, .75, .8, .85,.9, 0.95, 0.98, 0.99, 1]).values ,
density=True)
###############################################################################################
##--PRONOSTICO DE LA DISTRIBUCIÓN.POTENCIA POR LA RELACIÓN DE LOS ESTADOS DE LOS BORDES --- ##
###############################################################################################
def FDP_Simulations(Simulacion, df, field, trimestre, etapa, ajuste):
"""
It obtains the bivariate histogram of probability of a variable,
based on the states siutation. It takes the bins down and above
of solar radiation. It has 20 bins and 10 steps of time
INPUTS
Simulacion : Array with the simulated states
df : DataFrame with the original values for computting the histogram
field : Str of field name of interes of the DataFrame with the data
trimestre : 0 DEF, 1 MAM, 2 JJA, 3 SON
etapa : 0 Morning, 1 Noon, 2 Afernoon
ajuste : 0 lineal, 1 cuadratico, 2 cubico
OUTPUTS
Histos : 2D array with FDP for each simulation time
"""
Coeficientes = np.load('/home/nacorreasa/Maestria/Datos_Tesis/Arrays/CoeficientesAjuste.npy')
Coef = Coeficientes[trimestre, etapa, ajuste]
Ref_Bins_dwn = [ 1, 271, 542, 813, 1084]
Ref_Bins_abv = [ 271, 542, 813, 1084, 1355]
Histos=[]
for j in range(len(Simulacion)):
for i in range(len(Ref_Bins_dwn)):
if i+1 == Simulacion[j]:
Pot_dwn = (Ref_Bins_dwn[i]**Coef[2])*(np.e**Coef[3])
Pot_abv = (Ref_Bins_abv[i]**Coef[2])*(np.e**Coef[3])
print(i+1)
if Pot_abv > 0 and Pot_abv <1:
Pot_abv = 1
if Pot_dwn <0 :
Pot_dwn = 0
if Pot_abv > 80 :
Pot_abv = 80
else:
pass
df_temp = df[(df[field].values >= Pot_dwn) & (df[field].values <= Pot_abv)]
Histo_temp, Bins_temp = np.histogram(df_temp[field][np.isfinite(df_temp[field])] , bins=Bins_pot, density=True)
Histos.append(Histo_temp)
else:
pass
Histos = np.array(Histos).T
return Histos
# def FDP_Simulations(Simulacion, df, field):
# """
# It obtains the bivariate histogram of probability of a variable,
# based on the states siutation. It takes the bins down and above
# of solar radiation. It has 20 bins and 10 steps of time
#
# INPUTS
# Simulacion : Array with the simulated states
# df : DataFrame with the original values for computting the histogram
# field : Str of field name of interes of the DataFrame with the data
#
# OUTPUTS
# Histos : 2D array with FDP for each simulation time
# """
# Ref_Bins_dwn = [ 1, 271, 542, 813, 1084]
# Ref_Bins_abv = [ 271, 542, 813, 1084, 1355]
# Histos=[]
# for j in range(len(Simulacion)):
# for i in range(len(Ref_Bins_dwn)):
# if i+1 == Simulacion[j]:
# """
# A continuación la ecuación de la relación, hay q cambiarla
# """
# Pot_dwn = 0.063*(Ref_Bins_dwn[i])-11.295
# Pot_abv = 0.063*(Ref_Bins_abv[i])-11.295
# print(i+1)
# if Pot_dwn <0:
# Pot_dwn = 0
# else:
# pass
# df_temp = df[(df[field] >= Pot_dwn) & (df[field] <= Pot_abv)]
# Histo_temp, Bins_temp = np.histogram(df_temp[field][np.isfinite(df_temp[field])] , bins=Bins_pot, density=True)
# Histos.append(Histo_temp)
# else:
# pass
# Histos = np.array(Histos).T
# return Histos
Histo_JJA_Morning = FDP_Simulations(Simu_E1_JJA_Morning, df, 'strength', 2, 0, 0)
Histo_JJA_Noon = FDP_Simulations(Simu_E1_JJA_Noon, df, 'strength', 2, 1, 0)
Histo_JJA_Tarde = FDP_Simulations(Simu_E1_JJA_Tarde, df, 'strength', 2, 2, 0)
Histo_DEF_Morning = FDP_Simulations(Simu_E1_DEF_Morning, df, 'strength', 0, 0, 0)
Histo_DEF_Noon = FDP_Simulations(Simu_E1_DEF_Noon, df, 'strength', 0, 1, 0)
Histo_DEF_Tarde = FDP_Simulations(Simu_E1_DEF_Tarde, df, 'strength', 0, 2, 0)
Histo_SON_Morning = FDP_Simulations(Simu_E1_SON_Morning, df, 'strength', 3, 0, 0)
Histo_SON_Noon = FDP_Simulations(Simu_E1_SON_Noon, df, 'strength', 3, 1, 0)
Histo_SON_Tarde = FDP_Simulations(Simu_E1_SON_Tarde, df, 'strength', 3, 2, 0)
Histo_MAM_Morning = FDP_Simulations(Simu_E1_MAM_Morning, df, 'strength', 1, 0, 0)
Histo_MAM_Noon = FDP_Simulations(Simu_E1_MAM_Noon, df, 'strength', 1, 1, 0)
Histo_MAM_Tarde = FDP_Simulations(Simu_E1_MAM_Tarde, df, 'strength', 1, 2, 0)
################################################################################
## -----------------------------------GRAFICA-------------------------------- ##
################################################################################
class MidpointNormalize(colors.Normalize):
"""
Normalise the colorbar so that diverging bars work there way either side from a prescribed midpoint value)
e.g. im=ax1.imshow(array, norm=MidpointNormalize(midpoint=0.,vmin=-100, vmax=100))
"""
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
colors.Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y), np.isnan(value))
cmap = matplotlib.cm.hot_r
# cmap = matplotlib.cm.Spectral_r
data = np.array([Histo_DEF_Morning, Histo_MAM_Morning, Histo_JJA_Morning, Histo_SON_Morning, Histo_DEF_Noon, Histo_MAM_Noon, Histo_JJA_Noon, Histo_SON_Noon, Histo_DEF_Tarde, Histo_MAM_Tarde, Histo_JJA_Tarde, Histo_SON_Tarde])
titles = [ u'DEF Mañana', u'MAM Mañana', u'JJA Mañana',u'SON Mañana', 'DEF Medio dia', 'MAM Medio dia', 'JJA Medio dia','SON Medio dia', 'DEF Tarde', 'MAM Tarde', 'JJA Tarde','SON Tarde']
x_arrays = [Maniana_time, Maniana_time, Maniana_time, Maniana_time, Noon_time ,Noon_time, Noon_time, Noon_time, Tarde_time, Tarde_time, Tarde_time, Tarde_time ]
plt.close('all')
fig = plt.figure(figsize=(13,10))
for i in range(0, 12):
ax = fig.add_subplot(3, 4, i+1)
# mapa = ax.imshow(data[i][::-1], interpolation = 'hamming', cmap=cmap,
mapa = ax.imshow(data[i][::-1], interpolation = None, cmap=cmap,
# clim=(data.min()), vmin=Histo_pot.min(), vmax=Histo_pot.max())
clim=(data.min()), vmin=data.min(), vmax=data.max())
ax.set_yticks(range(0,data[i].shape[0]), minor=False)
ax.set_yticklabels(Bins_pot[1:][::-1], minor=False)
ax.set_xticks(range(0,data[i].shape[1]), minor=False)
ax.set_xticklabels(np.array(x_arrays[i]), minor=False, rotation = 31)
ax.set_ylabel(u'Potencia $[W]$', fontproperties = prop_1, fontsize=12)
# ax.set_xlabel('Tiempo', fontproperties = prop_1, fontsize=12)
ax.set_title(titles[i], fontproperties = prop, fontsize =15)
cbar_ax = fig.add_axes([0.11, 0.06, 0.78, 0.008])
cbar = fig.colorbar(mapa, cax=cbar_ax, orientation='horizontal', format="%.2f")
cbar.set_label(u"Probabilidad", fontsize=13, fontproperties=prop)
plt.subplots_adjust(left=0.125, bottom=0.085, right=0.9, top=0.95, wspace=0.4, hspace=0.003)
plt.savefig('/home/nacorreasa/Escritorio/Figuras/MarkovSimulation_Radiacion.pdf', format='pdf', transparent=True)
plt.close('all')
os.system('scp /home/nacorreasa/Escritorio/Figuras/MarkovSimulation_Radiacion.pdf [email protected]:/var/www/nacorreasa/Graficas_Resultados/Estudio')
# OTRO CODE
#-----------------------------------------------------------------------------
# Rutas para las fuentes -----------------------------------------------------
prop = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Heavy.otf' )
prop_1 = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Book.otf')
prop_2 = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Black.otf')
##########################################################################################
## ----------------LECTURA DE LOS DATOS DE LOS EXPERIMENTOS Y RADIACION---------------- ##
##########################################################################################
df_P975 = pd.read_csv('/home/nacorreasa/Maestria/Datos_Tesis/Experimentos_Panel/Panel975.txt', sep=',', index_col =0)
df_P350 = pd.read_csv('/home/nacorreasa/Maestria/Datos_Tesis/Experimentos_Panel/Panel350.txt', sep=',', index_col =0)
df_P348 = pd.read_csv('/home/nacorreasa/Maestria/Datos_Tesis/Experimentos_Panel/Panel348.txt', sep=',', index_col =0)
df_P975['Fecha_hora'] = df_P975.index
df_P350['Fecha_hora'] = df_P350.index
df_P348['Fecha_hora'] = df_P348.index
df_P975.index = pd.to_datetime(df_P975.index, format="%Y-%m-%d %H:%M:%S", errors='coerce')
df_P350.index = pd.to_datetime(df_P350.index, format="%Y-%m-%d %H:%M:%S", errors='coerce')
df_P348.index = pd.to_datetime(df_P348.index, format="%Y-%m-%d %H:%M:%S", errors='coerce')
## ----------------ACOTANDO LOS DATOS A VALORES VÁLIDOS---------------- ##
df_P975 = df_P975[(df_P975['NI'] > 0) & (df_P975['strength'] > 0)]
df_P350 = df_P350[(df_P350['NI'] > 0) & (df_P350['strength'] > 0)]
df_P348 = df_P348[(df_P348['NI'] > 0) & (df_P348['strength'] > 0)]
df_P975 = df_P975[df_P975['radiacion'] > 0]
df_P350 = df_P350[df_P350['radiacion'] > 0]
df_P348 = df_P348[df_P348['radiacion'] > 0]
df_P975 = df_P975[df_P975['strength'] <=80]
df_P350 = df_P350[df_P350['strength'] <=100]
df_P348 = df_P348[df_P348['strength'] <=100]
df_P975_h = df_P975.groupby(pd.Grouper(freq="H")).mean()
df_P350_h = df_P350.groupby(pd.Grouper(freq="H")).mean()
df_P348_h = df_P348.groupby( | pd.Grouper(freq="H") | pandas.Grouper |
import numpy as np
import scipy as sp
from scipy import stats as spstats
import pandas as pd
from six.moves import range
from numpy.testing import assert_array_equal, assert_array_almost_equal
import numpy.testing as npt
import nose.tools
import nose.tools as nt
from nose.tools import assert_equal, assert_almost_equal, raises
import pandas.util.testing as pdt
from .. import statistical as stat
rs = np.random.RandomState(sum(map(ord, "moss_stats")))
a_norm = rs.randn(100)
a_range = np.arange(101)
datasets = [dict(X=spstats.norm(0, 1).rvs((24, 12)),
y=spstats.bernoulli(.5).rvs(24),
runs=np.repeat([0, 1], 12)) for i in range(3)]
datasets_3d = [dict(X=spstats.norm(0, 1).rvs((4, 24, 12)),
y=spstats.bernoulli(.5).rvs(24),
runs=np.repeat([0, 1], 12)) for i in range(3)]
def test_bootstrap():
"""Test that bootstrapping gives the right answer in dumb cases."""
a_ones = np.ones(10)
n_boot = 5
out1 = stat.bootstrap(a_ones, n_boot=n_boot)
assert_array_equal(out1, np.ones(n_boot))
out2 = stat.bootstrap(a_ones, n_boot=n_boot, func=np.median)
assert_array_equal(out2, np.ones(n_boot))
def test_bootstrap_length():
"""Test that we get a bootstrap array of the right shape."""
out = stat.bootstrap(a_norm)
assert_equal(len(out), 10000)
n_boot = 100
out = stat.bootstrap(a_norm, n_boot=n_boot)
assert_equal(len(out), n_boot)
def test_bootstrap_range():
"""Test that boostrapping a random array stays within the right range."""
min, max = a_norm.min(), a_norm.max()
out = stat.bootstrap(a_norm)
nose.tools.assert_less(min, out.min())
nose.tools.assert_greater_equal(max, out.max())
def test_bootstrap_multiarg():
"""Test that bootstrap works with multiple input arrays."""
x = np.vstack([[1, 10] for i in range(10)])
y = np.vstack([[5, 5] for i in range(10)])
def test_func(x, y):
return np.vstack((x, y)).max(axis=0)
out_actual = stat.bootstrap(x, y, n_boot=2, func=test_func)
out_wanted = np.array([[5, 10], [5, 10]])
assert_array_equal(out_actual, out_wanted)
def test_bootstrap_axis():
"""Test axis kwarg to bootstrap function."""
x = rs.randn(10, 20)
n_boot = 100
out_default = stat.bootstrap(x, n_boot=n_boot)
assert_equal(out_default.shape, (n_boot,))
out_axis = stat.bootstrap(x, n_boot=n_boot, axis=0)
assert_equal(out_axis.shape, (n_boot, 20))
def test_bootstrap_random_seed():
"""Test that we can get reproducible resamples by seeding the RNG."""
data = rs.randn(50)
seed = 42
boots1 = stat.bootstrap(data, random_seed=seed)
boots2 = stat.bootstrap(data, random_seed=seed)
assert_array_equal(boots1, boots2)
def test_smooth_bootstrap():
"""Test smooth bootstrap."""
x = rs.randn(15)
n_boot = 100
out_normal = stat.bootstrap(x, n_boot=n_boot, func=np.median)
out_smooth = stat.bootstrap(x, n_boot=n_boot,
smooth=True, func=np.median)
assert(np.median(out_normal) in x)
assert(not np.median(out_smooth) in x)
def test_bootstrap_ols():
"""Test bootstrap of OLS model fit."""
def ols_fit(X, y):
return np.dot(np.dot(np.linalg.inv(np.dot(X.T, X)), X.T), y)
X = np.column_stack((rs.randn(50, 4), np.ones(50)))
w = [2, 4, 0, 3, 5]
y_noisy = np.dot(X, w) + rs.randn(50) * 20
y_lownoise = np.dot(X, w) + rs.randn(50)
n_boot = 500
w_boot_noisy = stat.bootstrap(X, y_noisy,
n_boot=n_boot,
func=ols_fit)
w_boot_lownoise = stat.bootstrap(X, y_lownoise,
n_boot=n_boot,
func=ols_fit)
assert_equal(w_boot_noisy.shape, (n_boot, 5))
assert_equal(w_boot_lownoise.shape, (n_boot, 5))
nose.tools.assert_greater(w_boot_noisy.std(),
w_boot_lownoise.std())
def test_bootstrap_units():
"""Test that results make sense when passing unit IDs to bootstrap."""
data = rs.randn(50)
ids = np.repeat(range(10), 5)
bwerr = rs.normal(0, 2, 10)
bwerr = bwerr[ids]
data_rm = data + bwerr
seed = 77
boots_orig = stat.bootstrap(data_rm, random_seed=seed)
boots_rm = stat.bootstrap(data_rm, units=ids, random_seed=seed)
nose.tools.assert_greater(boots_rm.std(), boots_orig.std())
@raises(ValueError)
def test_bootstrap_arglength():
"""Test that different length args raise ValueError."""
stat.bootstrap(np.arange(5), np.arange(10))
@raises(TypeError)
def test_bootstrap_noncallable():
"""Test that we get a TypeError with noncallable statfunc."""
non_func = "mean"
stat.bootstrap(a_norm, 100, non_func)
def test_percentiles():
"""Test function to return sequence of percentiles."""
single_val = 5
single = stat.percentiles(a_range, single_val)
assert_equal(single, single_val)
multi_val = [10, 20]
multi = stat.percentiles(a_range, multi_val)
assert_array_equal(multi, multi_val)
array_val = rs.randint(0, 101, 5).astype(float)
array = stat.percentiles(a_range, array_val)
assert_array_almost_equal(array, array_val)
def test_percentiles_acc():
"""Test accuracy of calculation."""
# First a basic case
data = np.array([10, 20, 30])
val = 20
perc = stat.percentiles(data, 50)
assert_equal(perc, val)
# Now test against scoreatpercentile
percentiles = rs.randint(0, 101, 10)
out = stat.percentiles(a_norm, percentiles)
for score, pct in zip(out, percentiles):
assert_equal(score, sp.stats.scoreatpercentile(a_norm, pct))
def test_percentiles_axis():
"""Test use of axis argument to percentils."""
data = rs.randn(10, 10)
# Test against the median with 50th percentile
median1 = np.median(data)
out1 = stat.percentiles(data, 50)
assert_array_almost_equal(median1, out1)
for axis in range(2):
median2 = np.median(data, axis=axis)
out2 = stat.percentiles(data, 50, axis=axis)
assert_array_almost_equal(median2, out2)
median3 = np.median(data, axis=0)
out3 = stat.percentiles(data, [50, 95], axis=0)
assert_array_almost_equal(median3, out3[0])
assert_equal(2, len(out3))
def test_ci():
"""Test ci against percentiles."""
a = rs.randn(100)
p = stat.percentiles(a, [2.5, 97.5])
c = stat.ci(a, 95)
assert_array_equal(p, c)
def test_vector_reject():
"""Test vector rejection function."""
x = rs.randn(30)
y = x + rs.randn(30) / 2
x_ = stat.vector_reject(x, y)
assert_almost_equal(np.dot(x_, y), 0)
def test_add_constant():
"""Test the add_constant function."""
a = rs.randn(10, 5)
wanted = np.column_stack((a, np.ones(10)))
got = stat.add_constant(a)
assert_array_equal(wanted, got)
def test_randomize_onesample():
"""Test performance of randomize_onesample."""
a_zero = rs.normal(0, 1, 50)
t_zero, p_zero = stat.randomize_onesample(a_zero)
nose.tools.assert_greater(p_zero, 0.05)
a_five = rs.normal(5, 1, 50)
t_five, p_five = stat.randomize_onesample(a_five)
nose.tools.assert_greater(0.05, p_five)
t_scipy, p_scipy = sp.stats.ttest_1samp(a_five, 0)
nose.tools.assert_almost_equal(t_scipy, t_five)
def test_randomize_onesample_range():
"""Make sure that output is bounded between 0 and 1."""
for i in range(100):
a = rs.normal(rs.randint(-10, 10),
rs.uniform(.5, 3), 100)
t, p = stat.randomize_onesample(a, 100)
nose.tools.assert_greater_equal(1, p)
nose.tools.assert_greater_equal(p, 0)
def test_randomize_onesample_getdist():
"""Test that we can get the null distribution if we ask for it."""
a = rs.normal(0, 1, 20)
out = stat.randomize_onesample(a, return_dist=True)
assert_equal(len(out), 3)
def test_randomize_onesample_iters():
"""Make sure we get the right number of samples."""
a = rs.normal(0, 1, 20)
t, p, samples = stat.randomize_onesample(a, return_dist=True)
assert_equal(len(samples), 10000)
for n in rs.randint(5, 1e4, 5):
t, p, samples = stat.randomize_onesample(a, n, return_dist=True)
assert_equal(len(samples), n)
def test_randomize_onesample_seed():
"""Test that we can seed the random state and get the same distribution."""
a = rs.normal(0, 1, 20)
seed = 42
t_a, p_a, samples_a = stat.randomize_onesample(a, 1000,
random_seed=seed,
return_dist=True)
t_b, t_b, samples_b = stat.randomize_onesample(a, 1000,
random_seed=seed,
return_dist=True)
assert_array_equal(samples_a, samples_b)
def test_randomize_onesample_multitest():
"""Test that randomizing over multiple tests works."""
a = rs.normal(0, 1, (20, 5))
t, p = stat.randomize_onesample(a, 1000)
assert_equal(len(t), 5)
assert_equal(len(p), 5)
t, p, dist = stat.randomize_onesample(a, 1000, return_dist=True)
assert_equal(dist.shape, (5, 1000))
def test_randomize_onesample_correction():
"""Test that maximum based correction (seems to) work."""
a = rs.normal(0, 1, (100, 10))
t_un, p_un = stat.randomize_onesample(a, 1000, corrected=False)
t_corr, p_corr = stat.randomize_onesample(a, 1000, corrected=True)
assert_array_equal(t_un, t_corr)
npt.assert_array_less(p_un, p_corr)
def test_randomize_onesample_h0():
"""Test that we can supply a null hypothesis for the group mean."""
a = rs.normal(4, 1, 100)
t, p = stat.randomize_onesample(a, 1000, h_0=0)
assert p < 0.01
t, p = stat.randomize_onesample(a, 1000, h_0=4)
assert p > 0.01
def test_randomize_onesample_scalar():
"""Single values returned from randomize_onesample should be scalars."""
a = rs.randn(40)
t, p = stat.randomize_onesample(a)
assert np.isscalar(t)
assert np.isscalar(p)
a = rs.randn(40, 3)
t, p = stat.randomize_onesample(a)
assert not np.isscalar(t)
assert not np.isscalar(p)
def test_randomize_corrmat():
"""Test the correctness of the correlation matrix p values."""
a = rs.randn(30)
b = a + rs.rand(30) * 3
c = rs.randn(30)
d = [a, b, c]
p_mat, dist = stat.randomize_corrmat(d, tail="upper", corrected=False,
return_dist=True)
nose.tools.assert_greater(p_mat[2, 0], p_mat[1, 0])
corrmat = np.corrcoef(d)
pctile = 100 - spstats.percentileofscore(dist[2, 1], corrmat[2, 1])
nose.tools.assert_almost_equal(p_mat[2, 1] * 100, pctile)
d[1] = -a + rs.rand(30)
p_mat = stat.randomize_corrmat(d)
nose.tools.assert_greater(0.05, p_mat[1, 0])
def test_randomize_corrmat_dist():
"""Test that the distribution looks right."""
a = rs.randn(3, 20)
for n_i in [5, 10]:
p_mat, dist = stat.randomize_corrmat(a, n_iter=n_i, return_dist=True)
assert_equal(n_i, dist.shape[-1])
p_mat, dist = stat.randomize_corrmat(a, n_iter=10000, return_dist=True)
diag_mean = dist[0, 0].mean()
assert_equal(diag_mean, 1)
off_diag_mean = dist[0, 1].mean()
nose.tools.assert_greater(0.05, off_diag_mean)
def test_randomize_corrmat_correction():
"""Test that FWE correction works."""
a = rs.randn(3, 20)
p_mat = stat.randomize_corrmat(a, "upper", False)
p_mat_corr = stat.randomize_corrmat(a, "upper", True)
triu = np.triu_indices(3, 1)
npt.assert_array_less(p_mat[triu], p_mat_corr[triu])
def test_randimoize_corrmat_tails():
"""Test that the tail argument works."""
a = rs.randn(30)
b = a + rs.rand(30) * 8
c = rs.randn(30)
d = [a, b, c]
p_mat_b = stat.randomize_corrmat(d, "both", False, random_seed=0)
p_mat_u = stat.randomize_corrmat(d, "upper", False, random_seed=0)
p_mat_l = stat.randomize_corrmat(d, "lower", False, random_seed=0)
assert_equal(p_mat_b[0, 1], p_mat_u[0, 1] * 2)
assert_equal(p_mat_l[0, 1], 1 - p_mat_u[0, 1])
def test_randomise_corrmat_seed():
"""Test that we can seed the corrmat randomization."""
a = rs.randn(3, 20)
_, dist1 = stat.randomize_corrmat(a, random_seed=0, return_dist=True)
_, dist2 = stat.randomize_corrmat(a, random_seed=0, return_dist=True)
assert_array_equal(dist1, dist2)
@raises(ValueError)
def test_randomize_corrmat_tail_error():
"""Test that we are strict about tail paramete."""
a = rs.randn(3, 30)
stat.randomize_corrmat(a, "hello")
# def test_randomize_classifier():
# """Test basic functions of randomize_classifier."""
# data = dict(X=spstats.norm(0, 1).rvs((100, 12)),
# y=spstats.bernoulli(.5).rvs(100),
# runs=np.repeat([0, 1], 50))
# model = GaussianNB()
# p_vals, perm_vals = stat.randomize_classifier(data, model,
# return_dist=True)
# p_min, p_max = p_vals.min(), p_vals.max()
# perm_mean = perm_vals.mean()
#
# # Test that the p value are well behaved
# nose.tools.assert_greater_equal(1, p_max)
# nose.tools.assert_greater_equal(p_min, 0)
#
# # Test that the mean is close to chance (this is probabilistic)
# nose.tools.assert_greater(.1, np.abs(perm_mean - 0.5))
#
# # Test that the distribution looks normal (this is probabilistic)
# val, p = spstats.normaltest(perm_vals)
# nose.tools.assert_greater(p, 0.001)
#
#
# def test_randomize_classifier_dimension():
# """Test that we can have a time dimension and it's where we expect."""
# data = datasets_3d[0]
# n_perm = 30
# model = GaussianNB()
# p_vals, perm_vals = stat.randomize_classifier(data, model, n_perm,
# return_dist=True)
# nose.tools.assert_equal(len(p_vals), len(data["X"]))
# nose.tools.assert_equal(perm_vals.shape, (n_perm, len(data["X"])))
#
#
# def test_randomize_classifier_seed():
# """Test that we can give a particular random seed to the permuter."""
# data = datasets[0]
# model = GaussianNB()
# seed = 1
# out_a = stat.randomize_classifier(data, model, random_seed=seed)
# out_b = stat.randomize_classifier(data, model, random_seed=seed)
# assert_array_equal(out_a, out_b)
#
#
# def test_randomize_classifier_number():
# """Test size of randomize_classifier vectors."""
# data = datasets[0]
# model = GaussianNB()
# for n_iter in rs.randint(10, 250, 5):
# p_vals, perm_dist = stat.randomize_classifier(data, model, n_iter,
# return_dist=True)
# nose.tools.assert_equal(len(perm_dist), n_iter)
def test_transition_probabilities():
# Test basic
sched = [0, 1, 0, 1]
expected = pd.DataFrame([[0, 1], [1, 0]])
actual = stat.transition_probabilities(sched)
npt.assert_array_equal(expected, actual)
sched = [0, 0, 1, 1]
expected = pd.DataFrame([[.5, .5], [0, 1]])
actual = stat.transition_probabilities(sched)
npt.assert_array_equal(expected, actual)
a = rs.rand(100) < .5
a = np.where(a, "foo", "bar")
out = stat.transition_probabilities(a)
npt.assert_array_equal(out.columns.tolist(), ["bar", "foo"])
npt.assert_array_equal(out.columns, out.index)
def test_upsample():
y = np.cumsum(rs.randn(100))
yy1 = stat.upsample(y, 1)
assert_equal(len(yy1), 100)
npt.assert_array_almost_equal(y, yy1)
yy2 = stat.upsample(y, 2)
assert_equal(len(yy2), 199)
npt.assert_array_almost_equal(y, yy2[::2])
class TestRemoveUnitVariance(object):
rs = np.random.RandomState(93)
df = pd.DataFrame(dict(value=rs.rand(8),
group=np.repeat(np.tile(["m", "n"], 2), 2),
cond=np.tile(["x", "y"], 4),
unit=np.repeat(["a", "b"], 4)))
def test_remove_all(self):
df = stat.remove_unit_variance(self.df, "value", "unit")
nt.assert_in("value_within", df)
nt.assert_equal(self.df.value.mean(), self.df.value_within.mean())
nt.assert_equal(self.df.groupby("unit").value_within.mean().var(), 0)
def test_remove_by_group(self):
df = stat.remove_unit_variance(self.df, "value", "unit", "group")
grp = df.groupby("group")
pdt.assert_series_equal(grp.value.mean(), grp.value_within.mean(),
check_names=False)
for _, g in grp:
nt.assert_equal(g.groupby("unit").value_within.mean().var(), 0)
def test_suffix(self):
df = stat.remove_unit_variance(self.df, "value", "unit", suffix="_foo")
nt.assert_in("value_foo", df)
class TestVectorizedCorrelation(object):
rs = np.random.RandomState()
a = rs.randn(50)
b = rs.randn(50)
c = rs.randn(5, 50)
d = rs.randn(5, 50)
def test_vector_to_vector(self):
r_got = stat.vectorized_correlation(self.a, self.b)
r_want, _ = spstats.pearsonr(self.a, self.b)
npt.assert_almost_equal(r_got, r_want)
def test_vector_to_matrix(self):
r_got = stat.vectorized_correlation(self.a, self.c)
nt.assert_equal(r_got.shape, (self.c.shape[0],))
for i, r_got_i in enumerate(r_got):
r_want_i, _ = spstats.pearsonr(self.a, self.c[i])
npt.assert_almost_equal(r_got_i, r_want_i)
def test_matrix_to_matrix(self):
r_got = stat.vectorized_correlation(self.c, self.d)
nt.assert_equal(r_got.shape, (self.c.shape[0],))
for i, r_got_i in enumerate(r_got):
r_want_i, _ = spstats.pearsonr(self.c[i], self.d[i])
npt.assert_almost_equal(r_got_i, r_want_i)
class TestPercentChange(object):
ts_array = np.arange(6).reshape(1, 6)
ts = pd.DataFrame(ts_array)
def test_df(self):
out = stat.percent_change(self.ts)
want = pd.DataFrame([[-100, -60, -20, 20, 60, 100]], dtype=np.float)
pdt.assert_frame_equal(out, want)
def test_df_multirun(self):
out = stat.percent_change(self.ts, 2)
want = pd.DataFrame([[-100, 0, 100, -25, 0, 25]], dtype=np.float)
| pdt.assert_frame_equal(out, want) | pandas.util.testing.assert_frame_equal |
import numpy as np
import pandas as pd
import rasterio
import statsmodels.formula.api as smf
from scipy.sparse import coo_matrix
import scipy.spatial
import patsy
from statsmodels.api import add_constant, OLS
from .utils import transform_coord
def test_linearity(x, y, n_knots=5, verbose=True):
"""Test linearity between two variables.
Run a linear regression of y on x, and take the residuals.
Fit the residuals with a natural spline with `n_knots` knots.
Conduct a joint F-test for all columns in the natural spline basis matrix.
Example:
>>> import numpy as np
>>> rng = np.random.default_rng(0)
>>> x = np.linspace(0., 1., 101)
>>> y = 5 * x + 3 + rng.random(size=101) / 5
>>> test_linearity(x, y, n_knots=5, verbose=False)
0.194032
"""
residuals = OLS(y, add_constant(x)).fit().resid
basis_matrix = patsy.dmatrix(
f"cr(x, df={n_knots - 1}, constraints='center') - 1", {'x': x},
return_type='dataframe')
results = OLS(residuals, basis_matrix).fit()
results.summary()
nobs = results.nobs
f_value = results.fvalue
p_value = np.round(results.f_pvalue, 6)
print('Test for Linearity: '
f'N = {nobs:.0f}; df={nobs - n_knots - 1:.0f}; '
f'F = {f_value:.3f}; p = {p_value:.6f}.')
return p_value
def winsorize(s, lower, upper, verbose=False):
"""Winsorizes a pandas series.
Args:
s (pandas.Series): the series to be winsorized
lower, upper (int): number between 0 to 100
"""
lower_value = np.nanpercentile(s.values, lower)
upper_value = np.nanpercentile(s.values, upper)
if verbose:
print(f'Winsorizing to {lower_value} - {upper_value}')
return s.clip(lower_value, upper_value)
def demean(df, column, by):
"""Demean a column in a pandas DataFrame.
Args:
df (pandas.DataFrame): data
column (str): the column to be demeaned
by (list of str): the column names
"""
return (
df[column].values -
(df.loc[:, by + [column]]
.groupby(by).transform(np.nanmean).values.squeeze()))
def load_gd_census(GPS_FILE, MASTER_FILE):
# read GPS coords + treatment status
df = pd.read_csv(
GPS_FILE,
usecols=['village_code', 'ge', 'hi_sat', 'treat',
'latitude', 'longitude', 'elevation', 'accuracy', 'eligible',
'GPS_imputed'],
dtype={
'village_code': 'Int64',
'ge': 'Int32',
'hi_sat': 'Int32',
'treat': 'Int32',
'eligible': 'Int32',
'GPS_imputed': 'Int32'})
# drop non GE households
df = df.loc[df['ge'] == 1, :].copy()
# treat x eligible = cash inflow
df.loc[:, 'treat_eligible'] = (
df.loc[:, 'treat'].values * df.loc[:, 'eligible'].values)
# read sat level identifiers
df_master = pd.read_stata(
MASTER_FILE,
columns=['village_code', 'satlevel_name']
).astype({'village_code': 'Int64'})
df_master = df_master.drop_duplicates()
# merge treatment
df = pd.merge(
df, df_master,
on='village_code', how='left')
assert df['satlevel_name'].notna().all(), (
'Missing saturation level identifier')
return df.drop(columns=['ge'])
def snap_to_grid(df, lon_col, lat_col,
min_lon, max_lon, min_lat, max_lat, step,
**kwargs):
"""Collapses variables in a data frame onto a grid.
Args:
df (pandas.DataFrame)
lon_col, lat_col (str): name of lon, lat columns
min_lon, max_lon, min_lat, max_lat, step (float)
**kwargs: passed to pandas agg() function after grouping by lat, lon
Returns:
(numpy.ndarray, numpy.ndarray): lon and lat grids
pandas.DataFrame: output data frame
"""
df_copy = df.copy()
# snap to grid
df_copy.loc[:, 'grid_lon'] = np.round(
(df[lon_col].values - min_lon - step / 2) / step
).astype(np.int32)
df_copy.loc[:, 'grid_lat'] = np.round(
(df[lat_col].values - min_lat - step / 2) / step
).astype(np.int32)
# construct the grid
grid_lon, grid_lat = np.meshgrid(
np.arange(0, np.round((max_lon - min_lon) / step).astype(np.int32)),
np.arange(0, np.round((max_lat - min_lat) / step).astype(np.int32)))
df_grid = pd.DataFrame({'grid_lon': grid_lon.flatten(),
'grid_lat': grid_lat.flatten()})
# collapse
df_output = pd.merge(
df_grid.assign(is_in_grid=True),
df_copy.groupby(['grid_lon', 'grid_lat']).agg(**kwargs),
how='outer', on=['grid_lon', 'grid_lat'])
print(f"Dropping {df_output['is_in_grid'].isna().sum()} observations;\n"
f"Keeping {df_output['is_in_grid'].notna().sum()} observations")
df_output = df_output.loc[df_output['is_in_grid'].notna(), :].copy()
return (grid_lon, grid_lat), df_output.drop(columns=['is_in_grid'])
def control_for_spline(x, y, z, cr_df=3):
# handle nan's
is_na = np.any((np.isnan(x), np.isnan(y), np.isnan(z)), axis=0)
df = pd.DataFrame({'x': x[~is_na], 'y': y[~is_na], 'z': z[~is_na]})
mod = smf.ols(formula=f"z ~ 1 + cr(x, df={cr_df}) + cr(y, df={cr_df})",
data=df)
res = mod.fit()
# return nan's for cases where any one of x, y, z is nan
z_out = np.full_like(z, np.nan)
z_out[~is_na] = z[~is_na] - res.fittedvalues
return z_out
def load_nightlight_from_point(df, NL_IN_DIR, lon_col='lon', lat_col='lat'):
# extract nightlight values
ds = rasterio.open(NL_IN_DIR)
band = ds.read().squeeze(0)
idx = np.round(transform_coord(
transform=ds.transform,
to='colrow',
xy=df.loc[:, [lon_col, lat_col]].values)).astype(np.int)
df.loc[:, 'nightlight'] = [band[i[1], i[0]] for i in idx]
# winsorize + normalize
# df.loc[:, 'nightlight_winsnorm'] = winsorize(
# df['nightlight'], 0, 99)
# df.loc[:, 'nightlight_winsnorm'] = (
# (df['nightlight_winsnorm'].values -
# np.nanmean(df['nightlight_winsnorm'].values)) /
# np.nanstd(df['nightlight_winsnorm'].values))
return df
# def load_nightlight_asis(input_dir):
# """Loads nightlight data, keeping its raster grid as is.
# Args:
# input_dir (str)
# Returns:
# dict {str: float}: with the following keys
# min_lon, max_lon, min_lat, max_lat, step
# pandas.DataFrame
# """
# # load satellite data
# print('Loading nightlight data')
# ds = rasterio.open(input_dir)
# band = ds.read().squeeze(0)
# # define the grid
# grid = {
# 'min_lon': ds.bounds[0],
# 'min_lat': ds.bounds[1],
# 'max_lon': ds.bounds[2],
# 'max_lat': ds.bounds[3],
# 'step': ds.transform[0],
# }
# # construct the grid
# grid_lon, grid_lat = np.meshgrid(
# np.arange(0, ds.width),
# np.arange(0, ds.height))
# # convert to data frame
# df = pd.DataFrame({
# 'grid_lon': grid_lon.flatten(),
# 'grid_lat': grid_lat[::-1].flatten(),
# 'nightlight': band.flatten(),
# })
# # recover lon, lat
# df.loc[:, 'lon'] = (
# df['grid_lon'] * grid['step'] + grid['min_lon'] + grid['step'] / 2)
# df.loc[:, 'lat'] = (
# df['grid_lat'] * grid['step'] + grid['min_lat'] + grid['step'] / 2)
# # winsorize + normalize
# df.loc[:, 'nightlight'] = winsorize(
# df['nightlight'], 0, 99)
# df.loc[:, 'nightlight'] = (
# (df['nightlight'].values -
# np.nanmean(df['nightlight'].values)) /
# np.nanstd(df['nightlight'].values))
# return grid, df
def load_building(input_dir, grid, agg=True):
"""Loads building polygons.
Args:
input_dir (str): file to load
grid (dict {str: float}): dict with the following keys:
min_lon, max_lon, min_lat, max_lat, step
agg (bool): whether to perform aggregation
Returns:
tuple of numpy.ndarray: (grid_lon, grid_lat)
pandas.DataFrame: gridded dataframe
"""
tin_roofs = [0, 1, 5]
thatched_roofs = [2, 3, 6]
# load satellite predictions
# print('Loading building polygon data')
df = pd.read_csv(input_dir)
n_clusters = df['color_group'].max() + 1
for i in range(n_clusters):
df.loc[:, f'color_group_{i}'] = (df['color_group'].values == i)
df.loc[:, 'color_tin'] = df['color_group'].isin(tin_roofs)
df.loc[:, 'color_thatched'] = df['color_group'].isin(thatched_roofs)
# tin roof area
df.loc[:, 'color_tin_area'] = (
df['color_tin'].values * df['area'].values)
# thatched roof area
df.loc[:, 'color_thatched_area'] = (
df['color_thatched'].values * df['area'].values)
# create new var: luminosity
# df.loc[:, 'RGB_mean'] = (
# df.loc[:, ['R_mean', 'G_mean', 'B_mean']].mean(axis=1))
# control for lat lon cubic spline
# df.loc[:, 'RGB_mean_spline'] = control_for_spline(
# x=df['centroid_lon'].values,
# y=df['centroid_lat'].values,
# z=df['RGB_mean'].values,
# )
# normalize
# df.loc[:, 'RGB_mean_spline'] = (
# (df['RGB_mean_spline'].values -
# np.nanmean(df['RGB_mean_spline'].values)) /
# np.nanstd(df['RGB_mean_spline'].values))
if not agg:
return df
# snap to grid
# color_group_agg = {
# f'color_group_{i}': pd.NamedAgg(
# column=f'color_group_{i}', aggfunc='mean')
# for i in range(n_clusters)}
(grid_lon, grid_lat), df = snap_to_grid(
df, lon_col='centroid_lon', lat_col='centroid_lat', **grid,
# house_count=pd.NamedAgg(column='area', aggfunc='count'),
area_sum=pd.NamedAgg(column='area', aggfunc='sum'),
# RGB_mean=pd.NamedAgg(column='RGB_mean', aggfunc='mean'),
# RGB_mean_spline=pd.NamedAgg(column='RGB_mean_spline',
# aggfunc='mean'),
tin_area_sum=pd.NamedAgg(column='color_tin_area', aggfunc='sum'),
# thatched_area_sum=pd.NamedAgg(column='color_thatched_area',
# aggfunc='sum'),
# tin_count=pd.NamedAgg(column='color_tin', aggfunc='sum'),
# thatched_count=pd.NamedAgg(column='color_thatched', aggfunc='sum'),
# **color_group_agg,
)
df.fillna(0, inplace=True)
# df.loc[:, 'house_count_0'] = (
# df['house_count'] == 0).values.astype(np.float)
# df.loc[:, 'area_sum_pct'] = (
# df['area_sum'].values / ((grid['step'] * 111000) ** 2))
# df.loc[:, 'tin_count_pct'] = (
# df['tin_count'].values / df['house_count'].values)
# df.loc[:, 'tin_area_pct'] = (
# df['tin_area_sum'].values / df['area_sum'].values)
# df.loc[:, 'tin_area_sum_pct'] = (
# df['tin_area_sum'].values / ((grid['step'] * 111000) ** 2))
# recover lon, lat
df.loc[:, 'lon'] = (
df['grid_lon'] * grid['step'] + grid['min_lon'] + grid['step'] / 2)
df.loc[:, 'lat'] = (
df['grid_lat'] * grid['step'] + grid['min_lat'] + grid['step'] / 2)
return (grid_lon, grid_lat), df
def load_survey(SVY_IN_DIR):
# load survey data
df_svy = | pd.read_stata(SVY_IN_DIR) | pandas.read_stata |
import pandas as pd
from pandas.io.json import json_normalize
def venues_explore(client,lat,lng, limit=100, verbose=0, sort='popular', radius=2000, offset=1, day='any',query=''):
'''funtion to get n-places using explore in foursquare, where n is the limit when calling the function.
This returns a pandas dataframe with name, city ,country, lat, long, address and main category as columns
Arguments: *client, *lat, *long, limit (defaults to 100), radius (defaults to 2000), verbose (defaults to 0), offset (defaults to 1), day (defaults to any)'''
# create a dataframe
df_a = pd.DataFrame(columns=['Name',
'City',
'Latitude',
'Longitude',
'Category',
'Address'])
ll=lat+','+lng
if offset<=50:
for i_offset in range(0,offset):
#get venues using client https://github.com/mLewisLogic/foursquare
venues = client.venues.explore(params={'ll':ll,
'limit':limit,
'intent' : 'browse',
'sort':sort,
'radius':radius,
'offset':i_offset,
'day':day,
'query':query
})
venues=venues['groups'][0]['items']
df_venues = pd.DataFrame.from_dict(venues)
df_venues['venue'][0]
#print('limit', limit, 'sort', sort, 'radius', radius)
for i, value in df_venues['venue'].items():
if verbose==1:
print('i', i, 'name', value['name'])
venueName=value['name']
try:
venueCity=value['location']['city']
except:
venueCity=''
venueCountry=value['location']['country']
venueLat=value['location']['lat']
venueLng=value['location']['lng']
venueCountry=value['location']['country']
try:
venueAddress=value['location']['address']
except:
venueAddress=''
venueCategory=value['categories'][0]['name']
df_a=df_a.append([{'Name':venueName,
'City':venueCity,
'Country':venueCountry,
'Latitude':venueLat,
'Longitude':venueLng,
'Category':venueCategory,
'Address':venueAddress
}])
else:
print('ERROR: offset value per Foursquare API is up to 50. Please use a lower value.')
return df_a.reset_index()
def venues_explore_near(client,near, limit=100, verbose=0, sort='popular', radius=100000, offset=1, day='any',query=''):
'''funtion to get n-places using explore in foursquare, where n is the limit when calling the function.
This returns a pandas dataframe with name, city ,country, near, address and main category as columns.
"near" argument searches within the bounds of the geocode for a string naming a place in the world.
Arguments: *client, *near, limit (defaults to 100), radius (defaults to 100000, max according to api docs), verbose (defaults to 0), offset (defaults to 1), day (defaults to any)'''
# create a dataframe
df_a = pd.DataFrame(columns=['Name',
'City',
'Latitude',
'Longitude',
'Category',
'Address'])
if offset<=50:
for i_offset in range(0,offset):
#get venues using client https://github.com/mLewisLogic/foursquare
venues = client.venues.explore(params={'near':near,
'limit':limit,
'intent' : 'browse',
'sort':sort,
'radius':radius,
'offset':i_offset,
'day':day,
'query':query
})
venues=venues['groups'][0]['items']
df_venues = pd.DataFrame.from_dict(venues)
df_venues['venue'][0]
#print('limit', limit, 'sort', sort, 'radius', radius)
for i, value in df_venues['venue'].items():
if verbose==1:
print('i', i, 'name', value['name'])
venueName=value['name']
try:
venueCity=value['location']['city']
except:
venueCity=''
venueCountry=value['location']['country']
venueLat=value['location']['lat']
venueLng=value['location']['lng']
venueCountry=value['location']['country']
try:
venueAddress=value['location']['address']
except:
venueAddress=''
venueCategory=value['categories'][0]['name']
df_a=df_a.append([{'Name':venueName,
'City':venueCity,
'Country':venueCountry,
'Latitude':venueLat,
'Longitude':venueLng,
'Category':venueCategory,
'Address':venueAddress
}])
else:
print('ERROR: offset value according to Foursquare API is up to 50. Please use a lower value.')
return df_a.reset_index()
def get_categories():
'''Function to get a Pandas DataFrame of all categories in Foursquare as listed in https://developer.foursquare.com/docs/resources/categories
It uses json_normalize to get nested information and return a DataFrame with main, sub and sub-sub categories name and ID'''
df1 = pd.read_json('https://api.foursquare.com/v2/venues/categories?v=20170211&oauth_token=QEJ4AQPTMMNB413HGNZ5YDMJSHTOHZHMLZCAQCCLXIX41OMP&includeSupportedCC=true')
df1=df1.iloc[0,1]
df1 = | json_normalize(df1) | pandas.io.json.json_normalize |
import datetime as dt
import unittest
from unittest.mock import patch
import numpy as np
import numpy.testing as npt
import pandas as pd
from pandas.util.testing import assert_frame_equal, assert_series_equal, assert_index_equal
import seaice.timeseries.warp as warp
from seaice.timeseries.common import SeaIceTimeseriesInvalidArgument
class Test_filter_failed_qa(unittest.TestCase):
def test_failed_qa_set_to_na(self):
columns = ['Foo', 'Bar', 'failed_qa', 'filename']
actual = pd.DataFrame([[1, 2, True, '/foo'], [1, 2, False, '/foo'], [1, 2, True, '/foo']],
columns=columns)
expected = pd.DataFrame([[np.nan, np.nan, True, ''],
[1, 2, False, '/foo'],
[np.nan, np.nan, True, '']], columns=columns)
actual = warp.filter_failed_qa(actual)
assert_frame_equal(expected, actual)
class Test_climatologyMeans(unittest.TestCase):
def test_means(self):
index = pd.period_range(start='2000-05', end='2016-05', freq='12M')
values = np.array([10, 20, 30, 40, 50, 50, 50, 50, 90, 99,
100, 100, 100, 100, 100, 100, 10])
climatology_years = (2010, 2015)
series = pd.Series(values, index=index)
expected = pd.Series(100, index=[5])
actual = warp.climatology_means(series, climatology_years)
assert_series_equal(expected, actual)
def test_multiple_months_in_series(self):
anything = 3.14159
index = pd.PeriodIndex(['2000-05', '2000-11', '2001-05', '2001-11', '2002-05', '2002-11',
'2003-05', '2003-11', '2004-05', '2004-11', '2005-05'],
freq='6M')
climatology_years = (2000, 2001)
values = [15., 99., 15., 99., anything, anything,
anything, anything, anything, anything, anything]
series = pd.Series(values, index=index)
actual = warp.climatology_means(series, climatology_years)
expected = pd.Series([15., 99], index=[5, 11])
assert_series_equal(expected, actual)
class TestFilterHemisphere(unittest.TestCase):
def setUp(self):
datetimes = pd.to_datetime(['1990-01-01', '1995-01-01', '2000-01-01', '2010-01-01'])
daily_period_index = datetimes.to_period(freq='D')
monthly_period_index = datetimes.to_period(freq='M')
self.daily_df = pd.DataFrame({
'hemisphere': ['S', 'N', 'S', 'N'],
'total_extent_km2': [19900000.0, 1995000.0, 2000000.0, 2010000.0],
'total_area_km2': [1990.0, 1995.0, 2000.0, 2010.0]
}, index=daily_period_index)
self.monthly_df = pd.DataFrame({
'hemisphere': ['S', 'N', 'S', 'N'],
'total_extent_km2': [19900000.0, 1995000.0, 2000000.0, 2010000.0],
'total_area_km2': [1990.0, 1995.0, 2000.0, 2010.0]
}, index=monthly_period_index)
def test_daily_works_with_hemisphere(self):
expected = self.daily_df.copy().ix[[0, 2]]
actual = warp.filter_hemisphere(self.daily_df, 'S')
assert_frame_equal(expected, actual)
def test_daily_raises_error_with_none(self):
with self.assertRaises(SeaIceTimeseriesInvalidArgument):
warp.filter_hemisphere(self.daily_df, None)
def test_monthly_works_with_hemisphere(self):
expected = self.monthly_df.copy().ix[[1, 3]]
actual = warp.filter_hemisphere(self.monthly_df, 'N')
assert_frame_equal(expected, actual)
def test_monthly_works_with_none(self):
with self.assertRaises(SeaIceTimeseriesInvalidArgument):
warp.filter_hemisphere(self.monthly_df, None)
class TestCollapseHemisphereFilter(unittest.TestCase):
def test_frame_collapses(self):
frame_length = 10
index = pd.MultiIndex.from_tuples([('foo', 'N')]*frame_length, names=('date', 'hemisphere'))
df = pd.DataFrame({'data': [5]*frame_length}, index=index)
expected = df.reset_index(level='hemisphere', drop=False)
actual = warp.collapse_hemisphere_index(df)
assert_frame_equal(expected, actual)
class TestFilterBeforeAndFilterAfter(unittest.TestCase):
def setUp(self):
datetimes = pd.to_datetime(['1990-01-01', '1995-01-01', '2000-01-01', '2010-01-01'])
daily_period_index = datetimes.to_period(freq='D')
monthly_period_index = datetimes.to_period(freq='M')
self.daily_df = pd.DataFrame({
'total_extent_km2': [19900000.0, 1995000.0, 2000000.0, 2010000.0],
'total_area_km2': [1990.0, 1995.0, 2000.0, 2010.0]
}, index=daily_period_index)
self.daily_df_with_datetimeindex = pd.DataFrame({
'total_extent_km2': [19900000.0, 1995000.0, 2000000.0, 2010000.0],
'total_area_km2': [1990.0, 1995.0, 2000.0, 2010.0]
}, index=datetimes)
self.monthly_df = pd.DataFrame({
'total_extent_km2': [19900000.0, 1995000.0, 2000000.0, 2010000.0],
'total_area_km2': [1990.0, 1995.0, 2000.0, 2010.0]
}, index=monthly_period_index)
self.monthly_df_with_datetimeindex = pd.DataFrame({
'total_extent_km2': [19900000.0, 1995000.0, 2000000.0, 2010000.0],
'total_area_km2': [1990.0, 1995.0, 2000.0, 2010.0]
}, index=monthly_period_index.to_timestamp())
def test_filter_before_works_with_daily_df_and_none(self):
expected = self.daily_df.copy()
actual = warp.filter_before(self.daily_df, None)
assert_frame_equal(expected, actual)
def test_filter_before_works_with_daily_df_and_none_and_DateTimeIndex(self):
expected = self.daily_df_with_datetimeindex.copy()
actual = warp.filter_before(self.daily_df_with_datetimeindex, None)
assert_frame_equal(expected, actual)
def test_filter_before_works_with_daily_df(self):
expected = self.daily_df.copy().ix[1:]
actual = warp.filter_before(self.daily_df, dt.datetime(1990, 5, 21))
assert_frame_equal(expected, actual)
def test_filter_before_works_with_monthly_df_and_none(self):
expected = self.monthly_df.copy()
actual = warp.filter_before(self.monthly_df, None)
assert_frame_equal(expected, actual)
def test_filter_before_works_with_monthly_df_and_none_and_DateTimeIndex(self):
expected = self.monthly_df_with_datetimeindex.copy()
actual = warp.filter_before(self.monthly_df_with_datetimeindex, None)
assert_frame_equal(expected, actual)
def test_filter_before_works_with_monthly_df(self):
expected = self.monthly_df.copy().ix[1:]
actual = warp.filter_before(self.monthly_df, dt.datetime(1990, 5, 21))
assert_frame_equal(expected, actual)
def test_filter_after_works_with_daily_df_and_none(self):
expected = self.daily_df.copy()
actual = warp.filter_after(self.daily_df, None)
assert_frame_equal(expected, actual)
def test_filter_after_works_with_daily_df_and_none_and_DateTimeIndex(self):
expected = self.daily_df_with_datetimeindex.copy()
actual = warp.filter_after(self.daily_df_with_datetimeindex, None)
assert_frame_equal(expected, actual)
def test_filter_after_works_with_daily_df(self):
expected = self.daily_df.copy().ix[0:1]
actual = warp.filter_after(self.daily_df, dt.datetime(1990, 5, 21))
assert_frame_equal(expected, actual)
def test_filter_after_works_with_monthly_df_and_none(self):
expected = self.monthly_df.copy()
actual = warp.filter_after(self.monthly_df, None)
| assert_frame_equal(expected, actual) | pandas.util.testing.assert_frame_equal |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Investing.com API - Market and historical data downloader
# https://github.com/crapher/pyinvesting.git
#
# Copyright 2020 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from . import __user_agent__
from pyquery import PyQuery as pq
import re
import requests as rq
import pandas as pd
import numpy as np
import threading
import time
class OnlineScrapping:
def __init__(self, proxy_url=None):
"""
Class constructor
Parameters
----------
proxy_url : str, optional
The proxy URL with one of the following formats:
- scheme://user:pass@hostname:port
- scheme://user:pass@ip:port
- scheme://hostname:port
- scheme://ip:port
Ex. https://john:[email protected]:3128
"""
if proxy_url:
self._proxies = {'http': proxy_url, 'https': proxy_url}
else:
self._proxies = None
self._stream_server = None
self._stream_server_lock = threading.Lock()
########################
#### PUBLIC METHODS ####
########################
def get_stream_server(self):
"""
Returns a stream server to be used for the websocket to retrive quotes.
"""
with self._stream_server_lock:
if not self._stream_server:
content = self._get_page_content('https://www.investing.com/indices/us-30')
self._stream_server = self._get_stream_server_from_page(content)
return self._stream_server
def get_quotes_from_link(self, pair_id, link):
"""
Parses and returns a dataframe with the quotes for the specified pair_id.
Parameters
----------
pair_id : int
The pair_id value received in the search ticker query.
link : str
The link value received in the search ticker query.
"""
content = self._get_page_content(link)
with self._stream_server_lock:
if not self._stream_server:
self._stream_server = self._get_stream_server_from_page(content)
return self._get_quotes_from_page(content, pair_id)
#########################
#### PRIVATE METHODS ####
#########################
def _get_page_content(self, url):
if url.startswith('/'): # relative URL
url = 'https://www.investing.com{}'.format(url)
headers = {
'User-Agent': __user_agent__,
'Accept-Encoding': 'gzip, deflate'
}
response = rq.get(url = url, headers = headers, proxies = self._proxies)
response.raise_for_status()
return response.text
def _get_stream_server_from_page(self, text):
result = re.search('//stream(\d+).forexpros', text)
return result.group(1) if result else None
def _get_quotes_from_page(self, text, pair_id):
data = {'pair_id': pair_id, 'bid': np.NAN, 'ask': np.NAN, 'last': np.NAN, 'high': np.NAN, 'low': np.NAN, 'pcp': np.NAN, 'turnover': np.NAN, 'pc': np.NAN, 'timestamp': int(time.time())}
keys = list(data)
doc = pq(text)
spans = doc("span[class*='{}']".format(pair_id))
for span in spans:
classes = span.attrib['class']
for key in keys:
if '{}-{}'.format(pair_id, key) in classes:
try:
data[key] = float(span.text.replace(',','').replace('%','').strip())
keys.remove(key)
except:
pass
break
if data['last'] and data['pc']:
data['pc'] = data['last'] - data['pc']
elif data['pc']: # There is no last value
data['pc'] = np.NAN
result = pd.DataFrame(data, index=[0])
result.columns = ['pair_id', 'bid', 'ask', 'last', 'high', 'low', 'change', 'turnover', 'previous_close', 'timestamp']
result['datetime'] = | pd.to_datetime(result['timestamp'], unit='s') | pandas.to_datetime |
from datetime import datetime
import operator
import numpy as np
import pytest
from pandas import DataFrame, Index, Series, bdate_range
import pandas._testing as tm
from pandas.core import ops
class TestSeriesLogicalOps:
@pytest.mark.parametrize("bool_op", [operator.and_, operator.or_, operator.xor])
def test_bool_operators_with_nas(self, bool_op):
# boolean &, |, ^ should work with object arrays and propagate NAs
ser = Series(bdate_range("1/1/2000", periods=10), dtype=object)
ser[::2] = np.nan
mask = ser.isna()
filled = ser.fillna(ser[0])
result = bool_op(ser < ser[9], ser > ser[3])
expected = bool_op(filled < filled[9], filled > filled[3])
expected[mask] = False
tm.assert_series_equal(result, expected)
def test_logical_operators_bool_dtype_with_empty(self):
# GH#9016: support bitwise op for integer types
index = list("bca")
s_tft = Series([True, False, True], index=index)
s_fff = Series([False, False, False], index=index)
s_empty = Series([], dtype=object)
res = s_tft & s_empty
expected = s_fff
tm.assert_series_equal(res, expected)
res = s_tft | s_empty
expected = s_tft
tm.assert_series_equal(res, expected)
def test_logical_operators_int_dtype_with_int_dtype(self):
# GH#9016: support bitwise op for integer types
# TODO: unused
# s_0101 = Series([0, 1, 0, 1])
s_0123 = Series(range(4), dtype="int64")
s_3333 = Series([3] * 4)
s_4444 = Series([4] * 4)
res = s_0123 & s_3333
expected = Series(range(4), dtype="int64")
tm.assert_series_equal(res, expected)
res = s_0123 | s_4444
expected = Series(range(4, 8), dtype="int64")
tm.assert_series_equal(res, expected)
s_1111 = Series([1] * 4, dtype="int8")
res = s_0123 & s_1111
expected = Series([0, 1, 0, 1], dtype="int64")
tm.assert_series_equal(res, expected)
res = s_0123.astype(np.int16) | s_1111.astype(np.int32)
expected = Series([1, 1, 3, 3], dtype="int32")
tm.assert_series_equal(res, expected)
def test_logical_operators_int_dtype_with_int_scalar(self):
# GH#9016: support bitwise op for integer types
s_0123 = Series(range(4), dtype="int64")
res = s_0123 & 0
expected = Series([0] * 4)
tm.assert_series_equal(res, expected)
res = s_0123 & 1
expected = Series([0, 1, 0, 1])
tm.assert_series_equal(res, expected)
def test_logical_operators_int_dtype_with_float(self):
# GH#9016: support bitwise op for integer types
s_0123 = Series(range(4), dtype="int64")
msg = "Cannot perform.+with a dtyped.+array and scalar of type"
with pytest.raises(TypeError, match=msg):
s_0123 & np.NaN
with pytest.raises(TypeError, match=msg):
s_0123 & 3.14
msg = "unsupported operand type.+for &:"
with pytest.raises(TypeError, match=msg):
s_0123 & [0.1, 4, 3.14, 2]
with pytest.raises(TypeError, match=msg):
s_0123 & np.array([0.1, 4, 3.14, 2])
with pytest.raises(TypeError, match=msg):
s_0123 & Series([0.1, 4, -3.14, 2])
def test_logical_operators_int_dtype_with_str(self):
s_1111 = Series([1] * 4, dtype="int8")
msg = "Cannot perform 'and_' with a dtyped.+array and scalar of type"
with pytest.raises(TypeError, match=msg):
s_1111 & "a"
with pytest.raises(TypeError, match="unsupported operand.+for &"):
s_1111 & ["a", "b", "c", "d"]
def test_logical_operators_int_dtype_with_bool(self):
# GH#9016: support bitwise op for integer types
s_0123 = Series(range(4), dtype="int64")
expected = Series([False] * 4)
result = s_0123 & False
tm.assert_series_equal(result, expected)
result = s_0123 & [False]
tm.assert_series_equal(result, expected)
result = s_0123 & (False,)
tm.assert_series_equal(result, expected)
result = s_0123 ^ False
expected = Series([False, True, True, True])
tm.assert_series_equal(result, expected)
def test_logical_operators_int_dtype_with_object(self):
# GH#9016: support bitwise op for integer types
s_0123 = Series(range(4), dtype="int64")
result = s_0123 & Series([False, np.NaN, False, False])
expected = Series([False] * 4)
tm.assert_series_equal(result, expected)
s_abNd = Series(["a", "b", np.NaN, "d"])
with pytest.raises(TypeError, match="unsupported.* 'int' and 'str'"):
s_0123 & s_abNd
def test_logical_operators_bool_dtype_with_int(self):
index = list("bca")
s_tft = Series([True, False, True], index=index)
s_fff = Series([False, False, False], index=index)
res = s_tft & 0
expected = s_fff
tm.assert_series_equal(res, expected)
res = s_tft & 1
expected = s_tft
tm.assert_series_equal(res, expected)
def test_logical_ops_bool_dtype_with_ndarray(self):
# make sure we operate on ndarray the same as Series
left = Series([True, True, True, False, True])
right = [True, False, None, True, np.nan]
expected = Series([True, False, False, False, False])
result = left & right
tm.assert_series_equal(result, expected)
result = left & np.array(right)
tm.assert_series_equal(result, expected)
result = left & Index(right)
tm.assert_series_equal(result, expected)
result = left & Series(right)
tm.assert_series_equal(result, expected)
expected = Series([True, True, True, True, True])
result = left | right
tm.assert_series_equal(result, expected)
result = left | np.array(right)
tm.assert_series_equal(result, expected)
result = left | Index(right)
tm.assert_series_equal(result, expected)
result = left | Series(right)
tm.assert_series_equal(result, expected)
expected = Series([False, True, True, True, True])
result = left ^ right
tm.assert_series_equal(result, expected)
result = left ^ np.array(right)
tm.assert_series_equal(result, expected)
result = left ^ Index(right)
tm.assert_series_equal(result, expected)
result = left ^ Series(right)
tm.assert_series_equal(result, expected)
def test_logical_operators_int_dtype_with_bool_dtype_and_reindex(self):
# GH#9016: support bitwise op for integer types
# with non-matching indexes, logical operators will cast to object
# before operating
index = list("bca")
s_tft = | Series([True, False, True], index=index) | pandas.Series |
# Copyright 2020 The SQLFlow Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
from __future__ import absolute_import, division, print_function
import warnings
import numpy as np
import pandas as pd
from sklearn import preprocessing
warnings.filterwarnings('ignore')
### Loding raw data
raw = | pd.read_csv('household_power_consumption.csv') | pandas.read_csv |
# Authors: <NAME> (<EMAIL>), <NAME> (<EMAIL>)
import pandas as pd
import numpy as np
from datetime import datetime, timedelta
from typing import Union
from copy import deepcopy
from itertools import compress
import json
time_dict = {0: "Now", 7: "One Week", 14: "Two Weeks", 28: "Four Weeks", 42: "Six Weeks"}
class DELPHIDataSaver:
def __init__(
self, path_to_folder_danger_map: str,
path_to_website_predicted: str,
df_global_parameters: Union[pd.DataFrame, None],
df_global_predictions_since_today: pd.DataFrame,
df_global_predictions_since_100_cases: pd.DataFrame,
):
self.PATH_TO_FOLDER_DANGER_MAP = path_to_folder_danger_map
self.PATH_TO_WEBSITE_PREDICTED = path_to_website_predicted
self.df_global_parameters = df_global_parameters
self.df_global_predictions_since_today = df_global_predictions_since_today
self.df_global_predictions_since_100_cases = df_global_predictions_since_100_cases
def save_all_datasets(self, save_since_100_cases=False, website=False):
today_date_str = "".join(str(datetime.now().date()).split("-"))
# Save parameters
self.df_global_parameters.to_csv(
self.PATH_TO_FOLDER_DANGER_MAP + f"/predicted/Parameters_Global_{today_date_str}.csv", index=False
)
self.df_global_parameters.to_csv(
self.PATH_TO_WEBSITE_PREDICTED + f"/predicted/Parameters_Global_{today_date_str}.csv", index=False
)
# Save predictions since today
self.df_global_predictions_since_today.to_csv(
self.PATH_TO_FOLDER_DANGER_MAP + f"/predicted/Global_{today_date_str}.csv", index=False
)
self.df_global_predictions_since_today.to_csv(
self.PATH_TO_WEBSITE_PREDICTED + f"/predicted/Global_{today_date_str}.csv", index=False
)
if website:
self.df_global_parameters.to_csv(
self.PATH_TO_WEBSITE_PREDICTED + f"/predicted/Parameters_Global_Python_{today_date_str}.csv",
index=False
)
self.df_global_predictions_since_today.to_csv(
self.PATH_TO_WEBSITE_PREDICTED + f"/predicted/Global_Python_{today_date_str}_Scenarios.csv",
index=False
)
self.df_global_predictions_since_today.to_csv(
self.PATH_TO_WEBSITE_PREDICTED + f"/predicted/Global_Python_Scenarios.csv", index=False
)
if save_since_100_cases:
# Save predictions since 100 cases
self.df_global_predictions_since_100_cases.to_csv(
self.PATH_TO_FOLDER_DANGER_MAP + f"/predicted/Global_since100_{today_date_str}.csv", index=False
)
self.df_global_predictions_since_100_cases.to_csv(
self.PATH_TO_WEBSITE_PREDICTED + f"/predicted/Global_since100_{today_date_str}.csv", index=False
)
if website:
self.df_global_predictions_since_100_cases.to_csv(
self.PATH_TO_WEBSITE_PREDICTED + f"/predicted/Global_since100_{today_date_str}_Scenarios.csv",
index=False
)
self.df_global_predictions_since_100_cases.to_csv(
self.PATH_TO_WEBSITE_PREDICTED + f"/predicted/Global_since100_Scenarios.csv", index=False
)
@staticmethod
def create_nested_dict_from_final_dataframe(df_predictions: pd.DataFrame) -> dict:
dict_all_results = {}
default_policy = "Lockdown"
default_policy_enaction_time = "Now"
for province in df_predictions.Province.unique():
df_predictions_province = df_predictions[df_predictions.Province == province].reset_index(drop=True)
dict_all_results[province] = {
"Day": sorted(list(df_predictions_province.Day.unique())),
"Total Detected True": df_predictions_province[
(df_predictions_province.Policy == default_policy)
& (df_predictions_province.Time == default_policy_enaction_time)
].sort_values("Day")["Total Detected True"].tolist(),
"Total Detected Deaths True": df_predictions_province[
(df_predictions_province.Policy == default_policy)
& (df_predictions_province.Time == default_policy_enaction_time)
].sort_values("Day")["Total Detected Deaths True"].tolist(),
}
dict_all_results[province].update({
policy: {
policy_enaction_time: {
"Total Detected": df_predictions_province[
(df_predictions_province.Policy == policy)
& (df_predictions_province.Time == policy_enaction_time)
].sort_values("Day")["Total Detected"].tolist(),
"Total Detected Deaths": df_predictions_province[
(df_predictions_province.Policy == policy)
& (df_predictions_province.Time == policy_enaction_time)
].sort_values("Day")["Total Detected Deaths"].tolist(),
}
for policy_enaction_time in df_predictions_province.Time.unique()
}
for policy in df_predictions_province.Policy.unique()
})
return dict_all_results
def save_policy_predictions_to_dict_pickle(self, website=False):
today_date_str = "".join(str(datetime.now().date()).split("-"))
dict_predictions_policies_US_since_100_cases = DELPHIDataSaver.create_nested_dict_from_final_dataframe(
self.df_global_predictions_since_100_cases
)
with open(
self.PATH_TO_FOLDER_DANGER_MAP +
f'/predicted/US_Python_{today_date_str}_Scenarios_since_100_cases.json', 'w'
) as handle:
json.dump(dict_predictions_policies_US_since_100_cases, handle)
with open(
self.PATH_TO_FOLDER_DANGER_MAP + f'/predicted/US_Python_Scenarios_since_100_cases.json', 'w'
) as handle:
json.dump(dict_predictions_policies_US_since_100_cases, handle)
if website:
with open(
self.PATH_TO_WEBSITE_PREDICTED +
f'/predicted/US_Python_{today_date_str}_Scenarios_since_100_cases.json', 'w'
) as handle:
json.dump(dict_predictions_policies_US_since_100_cases, handle)
with open(
self.PATH_TO_WEBSITE_PREDICTED + f'/predicted/US_Python_Scenarios_since_100_cases.json', 'w'
) as handle:
json.dump(dict_predictions_policies_US_since_100_cases, handle)
class DELPHIDataCreator:
def __init__(
self, x_sol_final: np.array, date_day_since100: datetime,
best_params: np.array, continent: str, country: str, province: str,
):
assert len(best_params) == 7, f"Expected 7 best parameters, got {len(best_params)}"
self.x_sol_final = x_sol_final
self.date_day_since100 = date_day_since100
self.best_params = best_params
self.continent = continent
self.country = country
self.province = province
def create_dataset_parameters(self, mape) -> pd.DataFrame:
df_parameters = pd.DataFrame({
"Continent": [self.continent], "Country": [self.country], "Province": [self.province],
"Data Start Date": [self.date_day_since100], "MAPE": [mape], "Infection Rate": [self.best_params[0]],
"Median Day of Action": [self.best_params[1]], "Rate of Action": [self.best_params[2]],
"Rate of Death": [self.best_params[3]], "Mortality Rate": [self.best_params[4]],
"Internal Parameter 1": [self.best_params[5]], "Internal Parameter 2": [self.best_params[6]],
})
return df_parameters
def create_datasets_predictions(self) -> (pd.DataFrame, pd.DataFrame):
n_days_btw_today_since_100 = (datetime.now() - self.date_day_since100).days
n_days_since_today = self.x_sol_final.shape[1] - n_days_btw_today_since_100
all_dates_since_today = [
str((datetime.now() + timedelta(days=i)).date())
for i in range(n_days_since_today)
]
# Predictions
total_detected = self.x_sol_final[15, :] # DT
total_detected = [round(x, 0) for x in total_detected]
active_cases = (
self.x_sol_final[4, :] + self.x_sol_final[5, :] + self.x_sol_final[7, :] + self.x_sol_final[8, :]
) # DHR + DQR + DHD + DQD
active_cases = [round(x, 0) for x in active_cases]
active_hospitalized = self.x_sol_final[4, :] + self.x_sol_final[7, :] # DHR + DHD
active_hospitalized = [round(x, 0) for x in active_hospitalized]
cumulative_hospitalized = self.x_sol_final[11, :] # TH
cumulative_hospitalized = [round(x, 0) for x in cumulative_hospitalized]
total_detected_deaths = self.x_sol_final[14, :] # DD
total_detected_deaths = [round(x, 0) for x in total_detected_deaths]
active_ventilated = self.x_sol_final[12, :] + self.x_sol_final[13, :] # DVR + DVD
active_ventilated = [round(x, 0) for x in active_ventilated]
# Generation of the dataframe since today
df_predictions_since_today_cont_country_prov = pd.DataFrame({
"Continent": [self.continent for _ in range(n_days_since_today)],
"Country": [self.country for _ in range(n_days_since_today)],
"Province": [self.province for _ in range(n_days_since_today)],
"Day": all_dates_since_today,
"Total Detected": total_detected[n_days_btw_today_since_100:],
"Active": active_cases[n_days_btw_today_since_100:],
"Active Hospitalized": active_hospitalized[n_days_btw_today_since_100:],
"Cumulative Hospitalized": cumulative_hospitalized[n_days_btw_today_since_100:],
"Total Detected Deaths": total_detected_deaths[n_days_btw_today_since_100:],
"Active Ventilated": active_ventilated[n_days_btw_today_since_100:],
})
# Generation of the dataframe from the day since 100th case
all_dates_since_100 = [
str((self.date_day_since100 + timedelta(days=i)).date())
for i in range(self.x_sol_final.shape[1])
]
df_predictions_since_100_cont_country_prov = pd.DataFrame({
"Continent": [self.continent for _ in range(len(all_dates_since_100))],
"Country": [self.country for _ in range(len(all_dates_since_100))],
"Province": [self.province for _ in range(len(all_dates_since_100))],
"Day": all_dates_since_100,
"Total Detected": total_detected,
"Active": active_cases,
"Active Hospitalized": active_hospitalized,
"Cumulative Hospitalized": cumulative_hospitalized,
"Total Detected Deaths": total_detected_deaths,
"Active Ventilated": active_ventilated,
})
return df_predictions_since_today_cont_country_prov, df_predictions_since_100_cont_country_prov
def create_datasets_predictions_scenario(
self, policy="Lockdown", time=0, totalcases=None,
) -> (pd.DataFrame, pd.DataFrame):
n_days_btw_today_since_100 = (datetime.now() - self.date_day_since100).days
n_days_since_today = self.x_sol_final.shape[1] - n_days_btw_today_since_100
all_dates_since_today = [
str((datetime.now() + timedelta(days=i)).date())
for i in range(n_days_since_today)
]
# Predictions
total_detected = self.x_sol_final[15, :] # DT
total_detected = [round(x, 0) for x in total_detected]
active_cases = (
self.x_sol_final[4, :] + self.x_sol_final[5, :] + self.x_sol_final[7, :] + self.x_sol_final[8, :]
) # DHR + DQR + DHD + DQD
active_cases = [round(x, 0) for x in active_cases]
active_hospitalized = self.x_sol_final[4, :] + self.x_sol_final[7, :] # DHR + DHD
active_hospitalized = [round(x, 0) for x in active_hospitalized]
cumulative_hospitalized = self.x_sol_final[11, :] # TH
cumulative_hospitalized = [round(x, 0) for x in cumulative_hospitalized]
total_detected_deaths = self.x_sol_final[14, :] # DD
total_detected_deaths = [round(x, 0) for x in total_detected_deaths]
active_ventilated = self.x_sol_final[12, :] + self.x_sol_final[13, :] # DVR + DVD
active_ventilated = [round(x, 0) for x in active_ventilated]
# Generation of the dataframe since today
df_predictions_since_today_cont_country_prov = pd.DataFrame({
"Policy": [policy for _ in range(n_days_since_today)],
"Time": [time_dict[time] for _ in range(n_days_since_today)],
"Continent": [self.continent for _ in range(n_days_since_today)],
"Country": [self.country for _ in range(n_days_since_today)],
"Province": [self.province for _ in range(n_days_since_today)],
"Day": all_dates_since_today,
"Total Detected": total_detected[n_days_btw_today_since_100:],
"Active": active_cases[n_days_btw_today_since_100:],
"Active Hospitalized": active_hospitalized[n_days_btw_today_since_100:],
"Cumulative Hospitalized": cumulative_hospitalized[n_days_btw_today_since_100:],
"Total Detected Deaths": total_detected_deaths[n_days_btw_today_since_100:],
"Active Ventilated": active_ventilated[n_days_btw_today_since_100:],
})
# Generation of the dataframe from the day since 100th case
all_dates_since_100 = [
str((self.date_day_since100 + timedelta(days=i)).date())
for i in range(self.x_sol_final.shape[1])
]
df_predictions_since_100_cont_country_prov = pd.DataFrame({
"Policy": [policy for _ in range(len(all_dates_since_100))],
"Time": [time_dict[time] for _ in range(len(all_dates_since_100))],
"Continent": [self.continent for _ in range(len(all_dates_since_100))],
"Country": [self.country for _ in range(len(all_dates_since_100))],
"Province": [self.province for _ in range(len(all_dates_since_100))],
"Day": all_dates_since_100,
"Total Detected": total_detected,
"Active": active_cases,
"Active Hospitalized": active_hospitalized,
"Cumulative Hospitalized": cumulative_hospitalized,
"Total Detected Deaths": total_detected_deaths,
"Active Ventilated": active_ventilated,
})
if totalcases is not None: # Merging the historical values to both dataframes when available
df_predictions_since_today_cont_country_prov = df_predictions_since_today_cont_country_prov.merge(
totalcases[["country", "province", "date", "case_cnt", "death_cnt"]],
left_on=["Country", "Province", "Day"],
right_on=["country", "province", "date"],
how="left",
)
df_predictions_since_today_cont_country_prov.rename(
columns={"case_cnt": "Total Detected True", "death_cnt": "Total Detected Deaths True"},
inplace=True,
)
df_predictions_since_today_cont_country_prov.drop(
["country", "province", "date"], axis=1, inplace=True
)
df_predictions_since_100_cont_country_prov = df_predictions_since_100_cont_country_prov.merge(
totalcases[["country", "province", "date", "case_cnt", "death_cnt"]],
left_on=["Country", "Province", "Day"],
right_on=["country", "province", "date"],
how="left",
)
df_predictions_since_100_cont_country_prov.rename(
columns={"case_cnt": "Total Detected True", "death_cnt": "Total Detected Deaths True"},
inplace=True,
)
df_predictions_since_100_cont_country_prov.drop(
["country", "province", "date"], axis=1, inplace=True
)
return df_predictions_since_today_cont_country_prov, df_predictions_since_100_cont_country_prov
class DELPHIAggregations:
@staticmethod
def get_aggregation_per_country(df: pd.DataFrame) -> pd.DataFrame:
df = df[df["Province"] != "None"]
df_agg_country = df.groupby(["Continent", "Country", "Day"]).sum().reset_index()
df_agg_country["Province"] = "None"
df_agg_country = df_agg_country[[
'Continent', 'Country', 'Province', 'Day', 'Total Detected', 'Active',
'Active Hospitalized', 'Cumulative Hospitalized', 'Total Detected Deaths', 'Active Ventilated'
]]
return df_agg_country
@staticmethod
def get_aggregation_per_continent(df: pd.DataFrame) -> pd.DataFrame:
df_agg_continent = df.groupby(["Continent", "Day"]).sum().reset_index()
df_agg_continent["Country"] = "None"
df_agg_continent["Province"] = "None"
df_agg_continent = df_agg_continent[[
'Continent', 'Country', 'Province', 'Day', 'Total Detected', 'Active',
'Active Hospitalized', 'Cumulative Hospitalized', 'Total Detected Deaths', 'Active Ventilated'
]]
return df_agg_continent
@staticmethod
def get_aggregation_world(df: pd.DataFrame) -> pd.DataFrame:
df_agg_world = df.groupby("Day").sum().reset_index()
df_agg_world["Continent"] = "None"
df_agg_world["Country"] = "None"
df_agg_world["Province"] = "None"
df_agg_world = df_agg_world[[
'Continent', 'Country', 'Province', 'Day', 'Total Detected', 'Active',
'Active Hospitalized', 'Cumulative Hospitalized', 'Total Detected Deaths', 'Active Ventilated'
]]
return df_agg_world
@staticmethod
def append_all_aggregations(df: pd.DataFrame) -> pd.DataFrame:
df_agg_since_today_per_country = DELPHIAggregations.get_aggregation_per_country(df)
df_agg_since_today_per_continent = DELPHIAggregations.get_aggregation_per_continent(df)
df_agg_since_today_world = DELPHIAggregations.get_aggregation_world(df)
df = pd.concat([
df, df_agg_since_today_per_country,
df_agg_since_today_per_continent, df_agg_since_today_world
])
df.sort_values(["Continent", "Country", "Province", "Day"], inplace=True)
return df
class DELPHIAggregations:
@staticmethod
def get_aggregation_per_country(df: pd.DataFrame) -> pd.DataFrame:
df = df[df["Province"] != "None"]
df_agg_country = df.groupby(["Continent", "Country", "Day"]).sum().reset_index()
df_agg_country["Province"] = "None"
df_agg_country = df_agg_country[[
'Continent', 'Country', 'Province', 'Day', 'Total Detected', 'Active',
'Active Hospitalized', 'Cumulative Hospitalized', 'Total Detected Deaths', 'Active Ventilated'
]]
return df_agg_country
@staticmethod
def get_aggregation_per_continent(df: pd.DataFrame) -> pd.DataFrame:
df_agg_continent = df.groupby(["Continent", "Day"]).sum().reset_index()
df_agg_continent["Country"] = "None"
df_agg_continent["Province"] = "None"
df_agg_continent = df_agg_continent[[
'Continent', 'Country', 'Province', 'Day', 'Total Detected', 'Active',
'Active Hospitalized', 'Cumulative Hospitalized', 'Total Detected Deaths', 'Active Ventilated'
]]
return df_agg_continent
@staticmethod
def get_aggregation_world(df: pd.DataFrame) -> pd.DataFrame:
df_agg_world = df.groupby("Day").sum().reset_index()
df_agg_world["Continent"] = "None"
df_agg_world["Country"] = "None"
df_agg_world["Province"] = "None"
df_agg_world = df_agg_world[[
'Continent', 'Country', 'Province', 'Day', 'Total Detected', 'Active',
'Active Hospitalized', 'Cumulative Hospitalized', 'Total Detected Deaths', 'Active Ventilated'
]]
return df_agg_world
@staticmethod
def append_all_aggregations(df: pd.DataFrame) -> pd.DataFrame:
df_agg_since_today_per_country = DELPHIAggregations.get_aggregation_per_country(df)
df_agg_since_today_per_continent = DELPHIAggregations.get_aggregation_per_continent(df)
df_agg_since_today_world = DELPHIAggregations.get_aggregation_world(df)
df = pd.concat([
df, df_agg_since_today_per_country,
df_agg_since_today_per_continent, df_agg_since_today_world
])
df.sort_values(["Continent", "Country", "Province", "Day"], inplace=True)
return df
class DELPHIAggregationsPolicies:
@staticmethod
def get_aggregation_per_country(df: pd.DataFrame) -> pd.DataFrame:
df = df[df["Province"] != "None"]
df_agg_country = df.groupby(["Policy", "Time", "Continent", "Country", "Day"]).sum().reset_index()
df_agg_country["Province"] = "None"
df_agg_country = df_agg_country[[
'Policy', 'Time', 'Continent', 'Country', 'Province', 'Day', 'Total Detected', 'Active',
'Active Hospitalized', 'Cumulative Hospitalized', 'Total Detected Deaths', 'Active Ventilated'
]]
return df_agg_country
@staticmethod
def get_aggregation_per_continent(df: pd.DataFrame) -> pd.DataFrame:
df_agg_continent = df.groupby(["Policy", "Time", "Continent", "Day"]).sum().reset_index()
df_agg_continent["Country"] = "None"
df_agg_continent["Province"] = "None"
df_agg_continent = df_agg_continent[[
'Policy', 'Time', 'Continent', 'Country', 'Province', 'Day', 'Total Detected', 'Active',
'Active Hospitalized', 'Cumulative Hospitalized', 'Total Detected Deaths', 'Active Ventilated'
]]
return df_agg_continent
@staticmethod
def get_aggregation_world(df: pd.DataFrame) -> pd.DataFrame:
df_agg_world = df.groupby(["Policy", "Time", "Day"]).sum().reset_index()
df_agg_world["Continent"] = "None"
df_agg_world["Country"] = "None"
df_agg_world["Province"] = "None"
df_agg_world = df_agg_world[[
'Policy', 'Time', 'Continent', 'Country', 'Province', 'Day', 'Total Detected', 'Active',
'Active Hospitalized', 'Cumulative Hospitalized', 'Total Detected Deaths', 'Active Ventilated'
]]
return df_agg_world
@staticmethod
def append_all_aggregations(df: pd.DataFrame) -> pd.DataFrame:
df_agg_since_today_per_country = DELPHIAggregations.get_aggregation_per_country(df)
df_agg_since_today_per_continent = DELPHIAggregations.get_aggregation_per_continent(df)
df_agg_since_today_world = DELPHIAggregations.get_aggregation_world(df)
df = pd.concat([
df, df_agg_since_today_per_country,
df_agg_since_today_per_continent, df_agg_since_today_world
])
df.sort_values(["Policy", "Time", "Continent", "Country", "Province", "Day"], inplace=True)
return df
def get_initial_conditions(params_fitted, global_params_fixed):
alpha, days, r_s, r_dth, p_dth, k1, k2 = params_fitted
N, PopulationCI, PopulationR, PopulationD, PopulationI, p_d, p_h, p_v = global_params_fixed
S_0 = (
(N - PopulationCI / p_d) -
(PopulationCI / p_d * (k1 + k2)) -
(PopulationR / p_d) -
(PopulationD / p_d)
)
E_0 = PopulationCI / p_d * k1
I_0 = PopulationCI / p_d * k2
AR_0 = (PopulationCI / p_d - PopulationCI) * (1 - p_dth)
DHR_0 = (PopulationCI * p_h) * (1 - p_dth)
DQR_0 = PopulationCI * (1 - p_h) * (1 - p_dth)
AD_0 = (PopulationCI / p_d - PopulationCI) * p_dth
DHD_0 = PopulationCI * p_h * p_dth
DQD_0 = PopulationCI * (1 - p_h) * p_dth
R_0 = PopulationR / p_d
D_0 = PopulationD / p_d
TH_0 = PopulationCI * p_h
DVR_0 = (PopulationCI * p_h * p_v) * (1 - p_dth)
DVD_0 = (PopulationCI * p_h * p_v) * p_dth
DD_0 = PopulationD
DT_0 = PopulationI
x_0_cases = [
S_0, E_0, I_0, AR_0, DHR_0, DQR_0, AD_0, DHD_0, DQD_0,
R_0, D_0, TH_0, DVR_0, DVD_0, DD_0, DT_0
]
return x_0_cases
def create_fitting_data_from_validcases(validcases):
validcases_nondeath = validcases["case_cnt"].tolist()
validcases_death = validcases["death_cnt"].tolist()
balance = validcases_nondeath[-1] / max(validcases_death[-1], 10) / 3
fitcasesnd = validcases_nondeath
fitcasesd = validcases_death
return balance, fitcasesnd, fitcasesd
def mape(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred)[y_true > 0] / y_true[y_true > 0])) * 100
def convert_dates_us_policies(x):
if x == "Not implemented":
return np.nan
else:
x_long = x + "20"
return pd.to_datetime(x_long, format="%d-%b-%Y")
def read_policy_data_us_only():
data_path = (
"E:/Github/DELPHI/data_sandbox"
# "/Users/hamzatazi/Desktop/MIT/999.1 Research Assistantship/" +
# "4. COVID19_Global/DELPHI/data_sandbox"
)
df = pd.read_csv(data_path + "/25042020_raw_policy_data_US_only.csv")
df.State = df.State.apply(lambda x: x[0].upper() + x[1:])
concat_data = []
for i, measure in enumerate(df.Measure.unique()):
df_temp = df[df.Measure == measure].reset_index(drop=True)
df_concat = pd.DataFrame({
f"province_{i}": df_temp.State,
f"{measure}": df_temp.Date
})
concat_data.append(df_concat)
df_format = pd.concat(concat_data, axis=1)
df_format.drop(
[f"province_{i}" for i in range(1, len(df.Measure.unique()))],
axis=1, inplace=True
)
df_format.columns = ["province"] + list(df_format.columns)[1:]
for col in list(df_format.columns)[1:]:
df_format[col] = df_format[col].apply(
lambda x: convert_dates_us_policies(x)
)
n_dates = (datetime.now() - datetime(2020, 3, 1)).days + 1
list_all_dates = [
datetime(2020, 3, 1) + timedelta(days=i)
for i in range(n_dates)
]
df_format["province"] = df_format.province.replace({
"District-of-columbia": "District of Columbia",
"New-york": "New York", "North-carolina": "North Carolina",
"North-dakota": "North Dakota", "Rhode-island": "Rhode Island",
"South-carolina": "South Carolina", "South-dakota": "South Dakota",
"West-virginia": "West Virginia", "New-jersey": "New Jersey",
"New-hampshire": "New Hampshire", "New-mexico": "New Mexico",
})
df_to_concat_final = []
for i, province in enumerate(df_format.province.unique()):
df_temp = df_format[
df_format.province == province
].reset_index(drop=True)
columns_to_iter = [
"Mass_Gathering_Restrictions", "Initial_Business_Closure",
"Educational_Facilities_Closed", "Non_Essential_Services_Closed",
"Stay_at_home_order"
]
df_i = pd.DataFrame({
"province": [province for _ in range(n_dates)],
"date": list_all_dates,
"Mass_Gathering_Restrictions": [0 for _ in range(n_dates)],
"Initial_Business_Closure": [0 for _ in range(n_dates)],
"Educational_Facilities_Closed": [0 for _ in range(n_dates)],
"Non_Essential_Services_Closed": [0 for _ in range(n_dates)],
"Stay_at_home_order": [0 for _ in range(n_dates)],
"Travel_severely_limited": [0 for _ in range(n_dates)],
})
date_mgr = df_temp.iloc[0, 1]
date_ibc = df_temp.iloc[0, 2]
date_efc = df_temp.iloc[0, 3]
date_nesc = df_temp.iloc[0, 4]
date_saho = df_temp.iloc[0, 5]
# No date_tsl as no state actually implemented it
for col, date_col in zip(
columns_to_iter,
[date_mgr, date_ibc, date_efc, date_nesc, date_saho]
):
df_i.loc[df_i["date"] >= date_col, col] = 1
df_to_concat_final.append(df_i)
df_final = pd.concat(df_to_concat_final)
df_final.reset_index(drop=True, inplace=True)
output = deepcopy(df_final)
msr = ['No_Measure', 'Restrict_Mass_Gatherings', 'Mass_Gatherings_Authorized_But_Others_Restricted',
'Restrict_Mass_Gatherings_and_Schools', 'Authorize_Schools_but_Restrict_Mass_Gatherings_and_Others',
'Restrict_Mass_Gatherings_and_Schools_and_Others', 'Lockdown']
output['No_Measure'] = (df_final.sum(axis=1) == 0).apply(lambda x: int(x))
output['Restrict_Mass_Gatherings'] = [int(a and b) for a, b in
zip(df_final.sum(axis=1) == 1, df_final['Mass_Gathering_Restrictions'] == 1)]
output['Mass_Gatherings_Authorized_But_Others_Restricted'] = [
int(a and b and c) for a, b, c in zip(
df_final.sum(axis=1) > 0,
df_final['Mass_Gathering_Restrictions'] == 0,
df_final['Stay_at_home_order'] == 0,
)
]
output['Restrict_Mass_Gatherings_and_Schools'] = [
int(a and b and c)
for a, b, c in zip(
df_final.sum(axis=1) == 2,
df_final['Educational_Facilities_Closed'] == 1,
df_final['Mass_Gathering_Restrictions'] == 1,
)
]
output['Authorize_Schools_but_Restrict_Mass_Gatherings_and_Others'] = [
int(a and b and c and d) for a, b, c, d in zip(
df_final.sum(axis=1) > 1,
df_final['Educational_Facilities_Closed'] == 0,
df_final['Mass_Gathering_Restrictions'] == 1,
df_final['Stay_at_home_order'] == 0,
)
]
output['Restrict_Mass_Gatherings_and_Schools_and_Others'] = [
int(a and b and c and d) for a, b, c, d in zip(
df_final.sum(axis=1) > 2,
df_final['Educational_Facilities_Closed'] == 1,
df_final['Mass_Gathering_Restrictions'] == 1,
df_final['Stay_at_home_order'] == 0,
)
]
output['Lockdown'] = (df_final['Stay_at_home_order'] == 1).apply(lambda x: int(x))
output['country'] = "US"
output = output.loc[:, ['country', 'province', 'date'] + msr]
return output
def read_measures_oxford_data():
measures = pd.read_csv('https://ocgptweb.azurewebsites.net/CSVDownload')
filtr = ['CountryName', 'CountryCode', 'Date']
target = ['ConfirmedCases', 'ConfirmedDeaths']
msr = ['S1_School closing',
'S2_Workplace closing', 'S3_Cancel public events',
'S4_Close public transport',
'S5_Public information campaigns',
'S6_Restrictions on internal movement',
'S7_International travel controls', 'S8_Fiscal measures',
'S9_Monetary measures',
'S10_Emergency investment in health care',
'S11_Investment in Vaccines']
measures = measures.loc[:, filtr + msr + target]
measures['Date'] = measures['Date'].apply(lambda x: datetime.strptime(str(x), '%Y%m%d'))
for col in target:
measures[col] = measures[col].fillna(0)
measures = measures.loc[:, measures.isnull().mean() < 0.1]
msr = set(measures.columns).intersection(set(msr))
measures = measures.fillna(0)
for col in msr:
measures[col] = measures[col].apply(lambda x: int(x > 0))
measures = measures[[
'CountryName', 'Date', 'S1_School closing', 'S2_Workplace closing', 'S3_Cancel public events',
'S4_Close public transport', 'S5_Public information campaigns',
'S6_Restrictions on internal movement', 'S7_International travel controls'
]]
measures["CountryName"] = measures.CountryName.replace({
"United States": "US", "South Korea": "Korea, South", "Democratic Republic of Congo": "Congo (Kinshasa)",
"Czech Republic": "Czechia", "Slovak Republic": "Slovakia",
})
return measures
def gamma_t(day, state, params_dic):
dsd, median_day_of_action, rate_of_action = params_dic[state]
t = (day - | pd.to_datetime(dsd) | pandas.to_datetime |
##############################################################################
#######################bibliotecas
##############################################################################
import pandas as pd
import numpy as np
# from eod_historical_data import (get_api_key,
# get_eod_data,
# get_dividends,
# get_exchange_symbols,
# get_exchanges, get_currencies, get_indexes)
import datetime as dt
import requests_cache
# from io import StringIO
import requests
import io
import yfinance as yf
# from datetime import timedelta, datetime
##############################################################################
################Cache session (to avoid too much data consumption)
##############################################################################
expire_after = dt.timedelta(days=1)
session = requests_cache.CachedSession(cache_name='cache', backend='sqlite',
expire_after=expire_after)
# verificar a ultima data com dados
inicio = '2020-08-27'
fim = '2020-08-31'
##############################################################################
##################pega os preços mais recentes
##############################################################################
# pega a listad das novas datas
prices = yf.download(tickers="MGLU3.SA", start=inicio,end=fim,
rounding=True)[['Open', 'High','Low','Adj Close', 'Volume']].reset_index()
datas = pd.DataFrame(prices['Date'])
datas_lista = datas['Date'].to_list()
# loop para pegar os prices new do eod
prices = []
for i in range(len(datas_lista)):
url="https://eodhistoricaldata.com/api/eod-bulk-last-day/SA?api_token=60<PASSWORD>1<PASSWORD>.3<PASSWORD>&date={}" .format(datas_lista[i])
r = session.get(url)
s = requests.get(url).content
df_temp = pd.read_csv(io.StringIO(s.decode('utf-8')))
df_temp = df_temp[df_temp.Open.notnull()]
prices.append(df_temp)
prices = | pd.concat(prices) | pandas.concat |
# ----------------------------------------------------------------------------
# File name: NumericalEng.py
#
# Created on: Aug. 11 2020
#
# by <NAME>
#
# Description:
#
# 1) This module to engineer numerical features
#
#
#
# -----------------------------------------------------------------------------
#first load in all necessary librares
import pandas as pd
import numpy as np
import datetime
from sklearn.base import BaseEstimator, TransformerMixin
#Custom transformer we wrote to engineer features ( bathrooms per bedroom and/or how old the house is in 2019 )
#passed as boolen arguements to its constructor
class NumericalTransformer(BaseEstimator, TransformerMixin):
#Class Constructor
def __init__( self, outlier_1099=True,site0_corr=True, zone_corr=True, floor_cal=True, square_log=True,time_process=True, years_old=True):
self._outlier_1099 = outlier_1099
self._site0_corr = site0_corr
self._zone_corr = zone_corr
self._floor_cal = floor_cal
self._square_log = square_log
self._time_process = time_process
self._years_old = years_old
#Return self, nothing else to do here
def fit( self, X, y = None ):
return self
#Custom transform method we wrote that creates aformentioned features and drops redundant ones
def transform(self, X, y = None):
#Check if needed
if self._outlier_1099:
# Remove outliers
X = X [ ~(X['building_id'] == 1099) ]
if self._site0_corr:
#X = X.query('not (building_id <= 104 & meter == 0 & timestamp <= "2016-05-20")')
X = X[~((X['building_id']<=104) & (X['meter']==0) & (X['timestamp'] <= "2016-05-20"))]
X.loc[:,'timestamp'] = | pd.to_datetime(X['timestamp']) | pandas.to_datetime |
''' Import modules for reidentification attack'''
import pandas as pd
import numpy as np
import random
import requests
import string
import uuid
import time
from faker import Faker
from datetime import datetime
import scipy.stats as ss
import matplotlib.pyplot as plt
import zipcodes as zc
from tqdm import tqdm
import logging
diseases = {
9:"High Blood Pressure",
8:"Alzheimer",
7:"Heart Disease",
6:"Depression",
5:"Arthritis",
4:"Osteoporosis",
3:"Diabetes",
2:"COPD",
1:"Cancer",
0:"Stroke"
}
disease_numbers = {
9: 0,
8: -1,
7: 1,
6: -2,
5: 2,
4: -3,
3: 3,
2: -4,
1: 4,
0: -5
}
def do_encode(df, cols, diseases):
""" Encodes variables to be compatible with smartnoise
Args:
df = df
cols = columns to be encoded
diseases = dictionary of potential diseases
Returns:
df_enc = new data frame with encoded variables
"""
df_enc = df.copy()
for _ in cols:
if _ == "Diagnosis":
df_enc[f'{_}_encoded'] = df_enc[_].map({v: k for k, v in diseases.items()})
df_enc[f'{_}_encoded'] = df_enc[f'{_}_encoded'].astype(int)
elif _ == "Gender":
df_enc[f'{_}_encoded'] = df_enc[_].replace({"F": 0, "M": 1})
df_enc[f'{_}_encoded'] = df_enc[f'{_}_encoded'].astype(int)
else:
df_enc[f'{_}_encoded'] = df_enc[_].astype(int)
return df_enc[[f'{_}_encoded' for _ in cols]]
def get_medical_data(n, lang, disease_numbers, k, logger):
""" Create medical data set
Args:
n = amount of data to be created (non-anonymized)
lang = language for personal data, such as names
k = level of anonymization
logger = pass a custom logger
Returns:
df = returns medical data set
"""
custodian_id = []
age = []
gender = []
zipcode = []
diagnosis = []
treatment = []
severity = []
duplicate_test = 0
fake = Faker(lang)
logging.info('Generating demographic examples')
for n in tqdm(range(n)):
valid=False
while valid == False:
# Generate values and append it to lists
gender_select = random.choice(["M", "F"])
age_select = random.choice(['10-19', '20-29', '30-39', '40-49', '50-59', '60-69', '70-79', '80-89'])
if lang == "de-DE":
zipcode_select = f"{random.choice([_ for _ in list(zips['zipcode']) if len(str(_)) > 4])[0:3]}**"
else:
zipcode_select = f"{fake.address()[-5:][0:3]}**"
df_temp = pd.DataFrame([gender, age, zipcode]).transpose()
if len(df_temp[(df_temp[0]==gender_select) & (df_temp[1]==age_select) & (df_temp[2]==zipcode_select)]) > 0:
duplicate_test += 1
continue
else:
valid=True
gender.append(gender_select)
zipcode.append(zipcode_select)
age.append(age_select)
custodian_id = [uuid.uuid4().hex for i in range(len(gender) * k)]
treatment = [f'0{str(random.randint(20,50))}' for i in range(len(gender) * k)]
severity = [random.choice(['recovered', 'unchanged', 'intensive care']) for i in range(len(gender) * k)]
if k > 0:
gender = [item for item in gender for i in range(k)]
age = [item for item in age for i in range(k)]
zipcode = [item for item in zipcode for i in range(k)]
diagnosis = assign_ndis(len(zipcode), diseases, disease_numbers, True)
df = | pd.DataFrame([custodian_id, gender, age, zipcode, diagnosis, treatment, severity]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
@author: <NAME> - https://www.linkedin.com/in/adamrvfisher/
"""
#BTC strategy model with brute force optimization, need BTC data set to run
#BTC/USD time series can be found for free on Investing.com
#Import modules
import numpy as np
import random as rand
import pandas as pd
import time as t
from pandas import read_csv
#Number of iterations for brute force optimization
iterations = range(0, 2000)
#Can access BTC/USD time series for free on Investing.com
df = read_csv('BTCUSD.csv', sep = ',')
#Variable assignments
Empty = []
Counter = 0
Dataset = pd.DataFrame()
Portfolio = pd.DataFrame()
#Start timer
Start = t.time()
#Formatting
df = df.set_index('Date')
df = df.iloc[::-1]
df['Adj Close'] = df['Adj Close'].str.replace(',', '')
df['Adj Close'] = pd.to_numeric(df['Adj Close'], errors='coerce')
df['High'] = df['High'].str.replace(',', '')
df['High'] = pd.to_numeric(df['High'], errors='coerce')
df['Open'] = df['Open'].str.replace(',', '')
df['Open'] = pd.to_numeric(df['Open'], errors='coerce')
df['Low'] = df['Low'].str.replace(',', '')
df['Low'] = pd.to_numeric(df['Low'], errors='coerce')
#Return calculation
df['LogRet'] = np.log(df['Adj Close']/df['Adj Close'].shift(1))
df['LogRet'] = df['LogRet'].fillna(0)
#Multiplier calculation
df['BTCmult'] = df['LogRet'].cumsum().apply(np.exp)
#Performance metrics
dailyreturn = df['LogRet'].mean()
dailyvol = df['LogRet'].std()
sharpe =(dailyreturn/dailyvol)
#Brute force optimization
for i in iterations:
#Hi/Low window
a = rand.randint(4,15)
#Threshold
b = .85 + rand.random() * .3
df['ndayhi'] = df['Adj Close'].shift(1).rolling(window = a).max()
df['ndaylo'] = df['Adj Close'].shift(1).rolling(window = a).min()
#If Adj Close higher than scaled Nday High, then long otherwise flat
df['Signal'] = np.where(df['Adj Close'].shift(1) > (df['ndayhi'].shift(1) * b), 1, 0)
#Apply returns to position
df['Pass'] = df['Signal'].shift(1) * df['LogRet']
#Strategy returns on $1
df['StratReturns'] = df['Pass'].cumsum().apply(np.exp)
#Performance metrics
stratdailyreturn = df['Pass'].mean()
stratdailyvol = df['Pass'].std()
#Iteration counter
Counter = Counter + 1
#Performance constraint
if stratdailyvol == 0:
continue
#Performance metrics
stratsharpe =(stratdailyreturn/stratdailyvol)
#Performance constraint
if stratsharpe < sharpe:
continue
#Max drawdown calculation
MultiplierMax = Portfolio['Multiplier'].cummax()
Drawdown = (Portfolio['Multiplier']/MultiplierMax) - 1
Drawdown = Drawdown.fillna(0)
MaxDD = abs(min(Drawdown.cummin()))
#Counter display
print(Counter)
#Save params and metrics
Empty.append(a)
Empty.append(b)
Empty.append(stratsharpe)
Empty.append(stratsharpe/MaxDD)
Empty.append(stratdailyreturn/MaxDD)
Empty.append(MaxDD)
#List to Series
Emptyseries = | pd.Series(Empty) | pandas.Series |
import pandas as pd
import numpy as np
import pytest
import unittest
import datetime
import sys
import context
from fastbt.utils import *
def equation(a,b,c,x,y):
return a*x**2 + b*y + c
def test_multiargs_simple():
seq = pd.Series([equation(1,2,3,4,y) for y in range(20, 30)]).sort_index()
seq.index = range(20,30)
constants = {'a':1, 'b':2, 'c':3, 'x':4}
variables = {'y': range(20, 30)}
par = multi_args(equation, constants=constants, variables=variables).sort_index()
# Check both values and indexes
for x,y in zip(seq, par):
assert x == y
for x,y in zip (seq.index, par.index):
assert (x,) == y
def test_multiargs_product():
seq = []
for x in range(0,10):
for y in range(10,15):
seq.append(equation(1,2,3,x,y))
index = pd.MultiIndex.from_product([range(0, 10), range(10, 15)])
seq = pd.Series(seq)
seq.index = index
seq = seq.sort_index()
constants = {'a':1, 'b':2, 'c':3}
variables = {'x': range(0, 10), 'y': range(10,15)}
par = multi_args(equation, constants=constants,
variables=variables, isProduct=True).sort_index()
# Check both values and indexes
for x,y in zip(seq, par):
assert x == y
for x,y in zip (seq.index, par.index):
assert x == y
def test_multiargs_max_limit():
seq = []
for x in range(0,100):
for y in range(100, 150):
seq.append(equation(1,2,3,x,y))
index = pd.MultiIndex.from_product([range(0, 100), range(100, 150)])
seq = pd.Series(seq)
seq.index = index
seq = seq.sort_index()
constants = {'a':1, 'b':2, 'c':3}
variables = {'x': range(0, 100), 'y': range(100,150)}
par = multi_args(equation, constants=constants,
variables=variables, isProduct=True).sort_index()
assert len(par) == 1000
assert len(seq) == 5000
# Check both values and indexes
for x,y in zip(seq, par):
assert x == y
for x,y in zip (seq.index, par.index):
assert x == y
@pytest.mark.parametrize("maxLimit", [2000, 3000, 5000, 10000])
def test_multiargs_max_limit_adjust(maxLimit):
seq = []
for x in range(0,100):
for y in range(100, 150):
seq.append(equation(1,2,3,x,y))
index = pd.MultiIndex.from_product([range(0, 100), range(100, 150)])
seq = pd.Series(seq)
seq.index = index
seq = seq.sort_index()
constants = {'a':1, 'b':2, 'c':3}
variables = {'x': range(0, 100), 'y': range(100,150)}
par = multi_args(equation, constants=constants,
variables=variables, isProduct=True, maxLimit=maxLimit).sort_index()
assert len(par) == min(maxLimit, 5000)
assert len(seq) == 5000
# Check both values and indexes
for x,y in zip(seq, par):
assert x == y
for x,y in zip (seq.index, par.index):
assert x == y
def test_tick():
assert tick(112.71) == 112.7
assert tick(112.73) == 112.75
assert tick(1054.85, tick_size=0.1) == 1054.8
assert tick(1054.851, tick_size=0.1) == 1054.9
assert tick(104.73, 1) == 105
assert tick(103.2856, 0.01) == 103.29
assert tick(0.007814, 0.001) == 0.008
assert tick(0.00003562, 0.000001) == 0.000036
assert tick(0.000035617, 0.00000002) == 0.00003562
def test_tick_series():
s = pd.Series([100.43, 200.32, 300.32])
result = [100.45, 200.3, 300.3]
for x,y in zip(tick(s), result):
assert x==y
def test_stop_loss():
assert stop_loss(100, 3) == 97
assert stop_loss(100, 3, order='S') == 103
assert stop_loss(1013, 2.5, order='B', tick_size=0.1) == 987.7
assert stop_loss(100, -3) == 103 # This should be depreceated
assert stop_loss(100, -3, order='S') == 97
def test_stop_loss_error():
with pytest.raises(ValueError):
assert stop_loss(100, 3, 'BS')
def test_stop_loss_series():
p = pd.Series([100.75, 150.63, 180.32])
result = [95.71, 143.1, 171.3]
for x,y in zip(stop_loss(p, 5, tick_size=0.01), result):
assert pytest.approx(x, rel=0.001, abs=0.001) == y
# Test for sell
result = [105.79, 158.16, 189.34]
for x,y in zip(stop_loss(p, 5, order='S', tick_size=0.01), result):
assert pytest.approx(x, rel=0.001, abs=0.001) == y
def test_create_orders_simple():
df = pd.DataFrame(np.arange(20).reshape(5,4), columns=list('ABCD'))
orders = create_orders(df, {'A': 'one', 'B': 'two', 'C': 'three', 'D': 'four'},
exchange='NSE', num=range(5))
df['exchange'] = 'NSE'
df['num'] = [0,1,2,3,4]
assert list(orders.columns) == ['one', 'two', 'three', 'four', 'exchange', 'num']
assert list(df.exchange) == ['NSE'] * 5
class TestRecursiveMerge(unittest.TestCase):
def setUp(self):
df1 = pd.DataFrame(np.random.randn(6,3), columns=list('ABC'))
df2 = pd.DataFrame(np.random.randn(10,3), columns=list('DEF'))
df3 = pd.DataFrame(np.random.randn(7,4), columns=list('GHIJ'))
df4 = pd.DataFrame(np.random.randn(10,7), columns=list('AMNDXYZ'))
df1['idx'] = range(100,106)
df2['idx'] = range(100, 110)
df3['idx'] = range(100, 107)
df4['idx'] = range(100, 110)
self.dfs = [df1, df2, df3, df4]
def test_recursive_merge_simple(self):
df = recursive_merge(self.dfs)
assert len(df) == 6
assert df.shape == (6, 21)
assert df.loc[3, 'X'] == self.dfs[3].loc[3, 'X']
assert df.iloc[2, 11] == self.dfs[2].iloc[2, 3]
def test_recursive_on(self):
df = recursive_merge(self.dfs, on=['idx'])
assert df.shape == (6, 18)
assert df.loc[3, 'X'] == self.dfs[3].loc[3, 'X']
assert df.iloc[2, 11] == self.dfs[3].iloc[2, 0]
def test_recursive_on(self):
dct = {'1': 'D', '2': 'G', '3': 'X'}
df = recursive_merge(self.dfs, on=['idx'], columns=dct)
assert df.shape == (6, 7)
assert list(sorted(df.columns)) == ['A', 'B', 'C', 'D', 'G', 'X', 'idx']
assert df.loc[3, 'X'] == self.dfs[3].loc[3, 'X']
def test_get_nearest_option():
assert get_nearest_option(23120) == [23100]
assert get_nearest_option(23120, opt='P') == [23100]
assert get_nearest_option(28427, n=3) == [28400, 28500, 28600]
assert get_nearest_option(28400, n=3) == [28400, 28500, 28600]
assert get_nearest_option(28495, n=5, opt='P') == [28400, 28300, 28200, 28100, 28000]
assert get_nearest_option(3000, n=3, step=30) == [3000, 3030, 3060]
def test_calendar_simple():
s,e = '2019-01-01', '2019-01-10'
for a,b in zip(calendar(s,e), pd.bdate_range(s,e)):
assert a == b
for a,b in zip(calendar(s,e,alldays=True), pd.date_range(s,e)):
assert a == b
def test_calendar_holidays():
s,e,h = '2019-01-01', '2019-01-07', ['2019-01-03', '2019-01-07']
bdays = [pd.to_datetime(dt) for dt in [
'2019-01-01', '2019-01-02', '2019-01-04'
]]
for a,b in zip(calendar(s,e,h), bdays):
assert a == b
days = [pd.to_datetime(dt) for dt in [
'2019-01-01', '2019-01-02', '2019-01-04', '2019-01-05', '2019-01-06'
]]
for a,b in zip(calendar(s,e,h,True), days):
assert a == b
def test_calendar_bdate_timestamp():
s,e,st,et = '2019-01-01', '2019-01-01', '04:00', '18:00'
for a,b in zip(calendar(s,e,start_time=st, end_time=et),
pd.date_range('2019-01-01 04:00', '2019-01-01 18:00', freq='H')):
assert a == b
def test_calendar_timestamp_length():
s,e,st = '2019-01-01', '2019-01-01', '04:00'
assert len(calendar(s,e,start_time=st, freq='1min')) == 1200
assert len(calendar(s,e,start_time=st, freq='H')) == 20
et = '16:00'
assert len(calendar(s,e,end_time=et, freq='1min')) == 961
assert len(calendar(s,e,end_time=et, freq='H')) == 17
assert len(calendar(s,e,start_time=st, end_time=et, freq='1min')) == 721
assert len(calendar(s,e,start_time=st, end_time=et, freq='H')) == 13
def test_calendar_timestamp_position():
s,e,st,et = '2019-01-01', '2019-01-04', '10:00', '18:00'
ts = calendar(s,e,start_time=st, end_time=et, freq='1min')
assert str(ts[721]) == '2019-01-02 14:00:00'
assert str(ts[1000]) == '2019-01-03 10:38:00'
def test_calendar_multiple_days():
s,e,st,et = '2019-01-01', '2019-01-10', '10:00:00', '21:59:59'
kwargs = {'start': s, 'end': e, 'start_time': st, 'end_time': et}
holidays = ['2019-01-04', '2019-01-05', '2019-01-06']
assert len(calendar(**kwargs)) == 8
assert len(calendar(alldays=True, **kwargs)) == 10
assert len(calendar(holidays=holidays, alldays=True, **kwargs)) == 7
assert len(calendar(holidays=holidays, alldays=True, **kwargs, freq='H')) == 7*12
assert len(calendar(holidays=holidays, alldays=True, **kwargs, freq='10min')) == 7*12*6
assert len(calendar(holidays=holidays, alldays=True, **kwargs, freq='s')) == 7*12*3600
class TestGetOHLCIntraday(unittest.TestCase):
def setUp(self):
timestamp = pd.date_range('2019-01-01', freq='15min', periods=480)
dfs = []
for i,s in zip(range(1,4), ['A', 'B', 'C']):
df = | pd.DataFrame() | pandas.DataFrame |
from argparse import ArgumentParser
from pathlib import Path
import pandas as pd
from tqdm.auto import tqdm
def run() -> None:
parser = ArgumentParser()
parser.add_argument('input_file', type=Path)
parser.add_argument('output_file', type=Path)
parser.add_argument('--no-finding-class', type=int, default=14)
parser.add_argument('--add-fake-box-for-no-findings', action='store_true')
parser.add_argument('--threshold', type=float, default=0.5)
args = parser.parse_args()
train_data = pd.read_csv(args.input_file)
image_ids = pd.unique(train_data['image_id'])
new_image_info = []
for image_id in tqdm(image_ids):
image_info = train_data[train_data['image_id'] == image_id].reset_index()
no_finding_image_info = image_info[image_info['class_id'] == args.no_finding_class]
if len(no_finding_image_info) > 1:
if len(image_info) != len(no_finding_image_info):
raise ValueError(f'Image {image_id} has both finding and no findings')
image_info = image_info.drop(columns='rad_id').drop_duplicates(['image_id'])
if args.add_fake_box_for_no_findings:
image_info['x_min'] = 0.0
image_info['y_min'] = 0.0
image_info['x_max'] = 1.0
image_info['y_max'] = 1.0
image_info_averaged = averageCoordinates(image_info, args.threshold)
new_image_info.append(image_info_averaged)
train_with_averaged_coordinates = pd.concat(new_image_info)
train_with_averaged_coordinates.to_csv(args.output_file, index=False)
if __name__ == '__main__':
run()
# Code below is taken from https://etrain.xyz/en/posts/vinbigdata-chest-x-ray-abnormalities-detection
# Only modification is that I don't use width and height info
def bb_iou(boxA, boxB):
# determine the (x, y)-coordinates of the intersection rectangle
xA = max(boxA[0], boxB[0])
yA = max(boxA[1], boxB[1])
xB = min(boxA[2], boxB[2])
yB = min(boxA[3], boxB[3])
# compute the area of intersection rectangle
interArea = max(0, xB - xA + 1) * max(0, yB - yA + 1)
# compute the area of both the prediction and ground-truth
# rectangles
boxAArea = (boxA[2] - boxA[0] + 1) * (boxA[3] - boxA[1] + 1)
boxBArea = (boxB[2] - boxB[0] + 1) * (boxB[3] - boxB[1] + 1)
# compute the intersection over union by taking the intersection
# area and dividing it by the sum of prediction + ground-truth
# areas - the interesection area
iou = interArea / float(boxAArea + boxBArea - interArea)
# return the intersection over union value
return iou
def averageCoordinates(df, threshold):
tmp_df = df.reset_index()
duplicate = {}
for index1, row1 in tmp_df.iterrows():
if index1 < len(tmp_df) - 1:
next_index = index1 + 1
for index2, row2 in tmp_df.loc[next_index:,:].iterrows():
if row1["class_id"] == row2["class_id"]:
boxA = [row1['x_min'], row1['y_min'], row1['x_max'], row1['y_max']]
boxB = [row2['x_min'], row2['y_min'], row2['x_max'], row2['y_max']]
iou = bb_iou(boxA, boxB)
if iou > threshold:
if row1["index"] not in duplicate:
duplicate[row1["index"]] = []
duplicate[row1["index"]].append(row2["index"])
remove_keys = []
for k in duplicate:
for i in duplicate[k]:
if i in duplicate:
for id in duplicate[i]:
if id not in duplicate[k]:
duplicate[k].append(id)
if i not in remove_keys:
remove_keys.append(i)
for i in remove_keys:
del duplicate[i]
rows = []
removed_index = []
for k in duplicate:
row = tmp_df[tmp_df['index'] == k].iloc[0]
X_min = [row['x_min']]
X_max = [row['x_max']]
Y_min = [row['y_min']]
Y_max = [row['y_max']]
removed_index.append(k)
for i in duplicate[k]:
removed_index.append(i)
row = tmp_df[tmp_df['index'] == i].iloc[0]
X_min.append(row['x_min'])
X_max.append(row['x_max'])
Y_min.append(row['y_min'])
Y_max.append(row['y_max'])
X_min_avg = sum(X_min) / len(X_min)
X_max_avg = sum(X_max) / len(X_max)
Y_min_avg = sum(Y_min) / len(Y_min)
Y_max_avg = sum(Y_max) / len(Y_max)
new_row = [row['image_id'], row['class_name'], row['class_id'], X_min_avg, Y_min_avg, X_max_avg, Y_max_avg]
rows.append(new_row)
for index, row in tmp_df.iterrows():
if row['index'] not in removed_index:
new_row = [row['image_id'], row['class_name'], row['class_id'], row['x_min'], row['y_min'], row['x_max'], row['y_max']]
rows.append(new_row)
new_df = | pd.DataFrame(rows, columns =['image_id', 'class_name', 'class_id', 'x_min', 'y_min', 'x_max', 'y_max']) | pandas.DataFrame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.