prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
from ...cg.shapes import asShape as pShape
from ...common import requires as _requires
from warnings import warn
@_requires("geopandas")
def to_df(df, geom_col="geometry", **kw):
"""Convert a ``geopandas.GeoDataFrame`` into a normal
``pandas.DataFrame`` with a column containing PySAL shapes.
Parameters
----------
df : geopandas.GeoDataFrame
A ``geopandas.GeoDataFrame`` (or ``pandas.DataFrame``)
with a column containing geo-interfaced shapes.
geom_col : str
The column name in ``df`` contains the geometry. Default is ``'geometry'``.
**kw : dict
Optional keyword arguments for ``pandas.DataFrame()``.
Returns
-------
df : pandas.DataFrame
The data converted into a ``pandas.DataFrame`` object.
See Also
--------
pandas.DataFrame
"""
import pandas as pd
from geopandas import GeoDataFrame, GeoSeries
df[geom_col] = df[geom_col].apply(pShape)
if isinstance(df, (GeoDataFrame, GeoSeries)):
df = | pd.DataFrame(df, **kw) | pandas.DataFrame |
import pandas as pd
num_of_parallel_requests = 5
period = 5.0
class RequestQueue:
def __init__(self):
self.items = | pd.DataFrame(columns=["id", "timestamp", "shard", "load", "expected_end_time", "actual_end_time"]) | pandas.DataFrame |
'''
The analysis module
Handles the analyses of the info and data space for experiment evaluation and design.
'''
from slm_lab.agent import AGENT_DATA_NAMES
from slm_lab.env import ENV_DATA_NAMES
from slm_lab.lib import logger, math_util, util, viz
from slm_lab.spec import spec_util
import numpy as np
import os
import pandas as pd
import pydash as ps
import regex as re
import shutil
FITNESS_COLS = ['strength', 'speed', 'stability', 'consistency']
# TODO improve to make it work with any reward mean
FITNESS_STD = util.read('slm_lab/spec/_fitness_std.json')
NOISE_WINDOW = 0.05
NORM_ORDER = 1 # use L1 norm in fitness vector norm
MA_WINDOW = 100
logger = logger.get_logger(__name__)
'''
Fitness analysis
'''
def calc_strength_sr(aeb_df, rand_reward, std_reward):
'''
Calculate strength for each reward as
strength = (reward - rand_reward) / (std_reward - rand_reward)
'''
return (aeb_df['reward'] - rand_reward) / (std_reward - rand_reward)
def calc_strength(aeb_df):
'''
Strength of an agent in fitness is its maximum strength_ma. Moving average is used to denoise signal.
For an agent total reward at a time, calculate strength by normalizing it with a given baseline rand_reward and solution std_reward, i.e.
strength = (reward - rand_reward) / (std_reward - rand_reward)
**Properties:**
- random agent has strength 0, standard agent has strength 1.
- strength is standardized to be independent of the actual sign and scale of raw reward
- scales relative to std_reward: if an agent achieve x2 std_reward, the strength is x2, and so on.
This allows for standard comparison between agents on the same problem using an intuitive measurement of strength. With proper scaling by a difficulty factor, we can compare across problems of different difficulties.
'''
return aeb_df['strength_ma'].max()
def calc_speed(aeb_df, std_timestep):
'''
Find the maximum strength_ma, and the time to first reach it. Then the strength/time divided by the standard std_strength/std_timestep is speed, i.e.
speed = (max_strength_ma / timestep_to_first_reach) / (std_strength / std_timestep)
**Properties:**
- random agent has speed 0, standard agent has speed 1.
- if both agents reach the same max strength_ma, and one reaches it in half the timesteps, it is twice as fast.
- speed is standardized regardless of the scaling of absolute timesteps, or even the max strength attained
This allows an intuitive measurement of learning speed and the standard comparison between agents on the same problem.
'''
first_max_idx = aeb_df['strength_ma'].idxmax() # this returns the first max
max_row = aeb_df.loc[first_max_idx]
std_strength = 1.
if max_row['total_t'] == 0: # especially for random agent
speed = 0.
else:
speed = (max_row['strength_ma'] / max_row['total_t']) / (std_strength / std_timestep)
return speed
def calc_stability(aeb_df):
'''
Stability = fraction of monotonically increasing elements in the denoised series of strength_ma, or 0 if strength_ma is all <= 0.
**Properties:**
- stable agent has value 1, unstable agent < 1, and non-solution = 0.
- uses strength_ma to be more robust to noise
- sharp gain in strength is considered stable
- monotonically increasing implies strength can keep growing and as long as it does not fall much, it is considered stable
'''
if (aeb_df['strength_ma'].values <= 0.).all():
stability = 0.
else:
mono_inc_sr = np.diff(aeb_df['strength_ma']) >= 0.
stability = mono_inc_sr.sum() / mono_inc_sr.size
return stability
def calc_consistency(aeb_fitness_df):
'''
Calculate the consistency of trial by the fitness_vectors of its sessions:
consistency = ratio of non-outlier vectors
**Properties:**
- outliers are calculated using MAD modified z-score
- if all the fitness vectors are zero or all strength are zero, consistency = 0
- works for all sorts of session fitness vectors, with the standard scale
When an agent fails to achieve standard strength, it is meaningless to measure consistency or give false interpolation, so consistency is 0.
'''
fitness_vecs = aeb_fitness_df.values
if ~np.any(fitness_vecs) or ~np.any(aeb_fitness_df['strength']):
# no consistency if vectors all 0
consistency = 0.
elif len(fitness_vecs) == 2:
# if only has 2 vectors, check norm_diff
diff_norm = np.linalg.norm(np.diff(fitness_vecs, axis=0), NORM_ORDER) / np.linalg.norm(np.ones(len(fitness_vecs[0])), NORM_ORDER)
consistency = diff_norm <= NOISE_WINDOW
else:
is_outlier_arr = math_util.is_outlier(fitness_vecs)
consistency = (~is_outlier_arr).sum() / len(is_outlier_arr)
return consistency
def calc_epi_reward_ma(aeb_df, ckpt=None):
'''Calculates the episode reward moving average with the MA_WINDOW'''
rewards = aeb_df['reward']
if ckpt == 'eval':
# online eval mode reward is reward_ma from avg
aeb_df['reward_ma'] = rewards
else:
aeb_df['reward_ma'] = rewards.rolling(window=MA_WINDOW, min_periods=0, center=False).mean()
return aeb_df
def calc_fitness(fitness_vec):
'''
Takes a vector of qualifying standardized dimensions of fitness and compute the normalized length as fitness
use L1 norm for simplicity and intuititveness of linearity
'''
if isinstance(fitness_vec, pd.Series):
fitness_vec = fitness_vec.values
elif isinstance(fitness_vec, pd.DataFrame):
fitness_vec = fitness_vec.iloc[0].values
std_fitness_vector = np.ones(len(fitness_vec))
fitness = np.linalg.norm(fitness_vec, NORM_ORDER) / np.linalg.norm(std_fitness_vector, NORM_ORDER)
return fitness
def calc_aeb_fitness_sr(aeb_df, env_name):
'''Top level method to calculate fitness vector for AEB level data (strength, speed, stability)'''
std = FITNESS_STD.get(env_name)
if std is None:
std = FITNESS_STD.get('template')
logger.warn(f'The fitness standard for env {env_name} is not built yet. Contact author. Using a template standard for now.')
# calculate the strength sr and the moving-average (to denoise) first before calculating fitness
aeb_df['strength'] = calc_strength_sr(aeb_df, std['rand_epi_reward'], std['std_epi_reward'])
aeb_df['strength_ma'] = aeb_df['strength'].rolling(MA_WINDOW, min_periods=0, center=False).mean()
strength = calc_strength(aeb_df)
speed = calc_speed(aeb_df, std['std_timestep'])
stability = calc_stability(aeb_df)
aeb_fitness_sr = pd.Series({
'strength': strength, 'speed': speed, 'stability': stability})
return aeb_fitness_sr
'''
Checkpoint and early termination analysis
'''
def get_reward_mas(agent, name='eval_reward_ma'):
'''Return array of the named reward_ma for all of an agent's bodies.'''
bodies = getattr(agent, 'nanflat_body_a', [agent.body])
return np.array([getattr(body, name) for body in bodies], dtype=np.float16)
def get_std_epi_rewards(agent):
'''Return array of std_epi_reward for each of the environments.'''
bodies = getattr(agent, 'nanflat_body_a', [agent.body])
return np.array([ps.get(FITNESS_STD, f'{body.env.name}.std_epi_reward') for body in bodies], dtype=np.float16)
def new_best(agent):
'''Check if algorithm is now the new best result, then update the new best'''
best_reward_mas = get_reward_mas(agent, 'best_reward_ma')
eval_reward_mas = get_reward_mas(agent, 'eval_reward_ma')
best = (eval_reward_mas >= best_reward_mas).all()
if best:
bodies = getattr(agent, 'nanflat_body_a', [agent.body])
for body in bodies:
body.best_reward_ma = body.eval_reward_ma
return best
def all_solved(agent):
'''Check if envs have all been solved using std from slm_lab/spec/_fitness_std.json'''
eval_reward_mas = get_reward_mas(agent, 'eval_reward_ma')
std_epi_rewards = get_std_epi_rewards(agent)
solved = (
not np.isnan(std_epi_rewards).any() and
(eval_reward_mas >= std_epi_rewards).all()
)
return solved
def is_unfit(fitness_df, session):
'''Check if a fitness_df is unfit. Used to determine of trial should stop running more sessions'''
if FITNESS_STD.get(session.spec['env'][0]['name']) is None:
return False # fitness not known
mean_fitness_df = calc_mean_fitness(fitness_df)
return mean_fitness_df['strength'].iloc[0] <= NOISE_WINDOW
'''
Analysis interface methods
'''
def save_spec(spec, info_space, unit='experiment'):
'''Save spec to proper path. Called at Experiment or Trial init.'''
prepath = util.get_prepath(spec, info_space, unit)
util.write(spec, f'{prepath}_spec.json')
def calc_mean_fitness(fitness_df):
'''Method to calculated mean over all bodies for a fitness_df'''
return fitness_df.mean(axis=1, level=3)
def get_session_data(session, body_df_kind='eval', tmp_space_session_sub=False):
'''
Gather data from session from all the bodies
Depending on body_df_kind, will use eval_df or train_df
'''
session_data = {}
for aeb, body in util.ndenumerate_nonan(session.aeb_space.body_space.data):
aeb_df = body.eval_df if body_df_kind == 'eval' else body.train_df
# TODO tmp substitution since SpaceSession does not have run_eval_episode yet
if tmp_space_session_sub:
aeb_df = body.train_df
session_data[aeb] = aeb_df.copy()
return session_data
def calc_session_fitness_df(session, session_data):
'''Calculate the session fitness df'''
session_fitness_data = {}
for aeb in session_data:
aeb_df = session_data[aeb]
aeb_df = calc_epi_reward_ma(aeb_df, ps.get(session.info_space, 'ckpt'))
util.downcast_float32(aeb_df)
body = session.aeb_space.body_space.data[aeb]
aeb_fitness_sr = calc_aeb_fitness_sr(aeb_df, body.env.name)
aeb_fitness_df = pd.DataFrame([aeb_fitness_sr], index=[session.index])
aeb_fitness_df = aeb_fitness_df.reindex(FITNESS_COLS[:3], axis=1)
session_fitness_data[aeb] = aeb_fitness_df
# form multi_index df, then take mean across all bodies
session_fitness_df = pd.concat(session_fitness_data, axis=1)
mean_fitness_df = calc_mean_fitness(session_fitness_df)
session_fitness = calc_fitness(mean_fitness_df)
logger.info(f'Session mean fitness: {session_fitness}\n{mean_fitness_df}')
return session_fitness_df
def calc_trial_fitness_df(trial):
'''
Calculate the trial fitness df by aggregating from the collected session_data_dict (session_fitness_df's).
Adds a consistency dimension to fitness vector.
'''
trial_fitness_data = {}
try:
all_session_fitness_df = pd.concat(list(trial.session_data_dict.values()))
except ValueError as e:
logger.exception('Sessions failed, no data to analyze. Check stack trace above')
for aeb in util.get_df_aeb_list(all_session_fitness_df):
aeb_fitness_df = all_session_fitness_df.loc[:, aeb]
aeb_fitness_sr = aeb_fitness_df.mean()
consistency = calc_consistency(aeb_fitness_df)
aeb_fitness_sr = aeb_fitness_sr.append(pd.Series({'consistency': consistency}))
aeb_fitness_df = pd.DataFrame([aeb_fitness_sr], index=[trial.index])
aeb_fitness_df = aeb_fitness_df.reindex(FITNESS_COLS, axis=1)
trial_fitness_data[aeb] = aeb_fitness_df
# form multi_index df, then take mean across all bodies
trial_fitness_df = pd.concat(trial_fitness_data, axis=1)
mean_fitness_df = calc_mean_fitness(trial_fitness_df)
trial_fitness_df = mean_fitness_df
trial_fitness = calc_fitness(mean_fitness_df)
logger.info(f'Trial mean fitness: {trial_fitness}\n{mean_fitness_df}')
return trial_fitness_df
def plot_session(session_spec, info_space, session_data):
'''Plot the session graph, 2 panes: reward, loss & explore_var. Each aeb_df gets its own color'''
max_tick_unit = ps.get(session_spec, 'meta.max_tick_unit')
aeb_count = len(session_data)
palette = viz.get_palette(aeb_count)
fig = viz.tools.make_subplots(rows=3, cols=1, shared_xaxes=True, print_grid=False)
for idx, (a, e, b) in enumerate(session_data):
aeb_str = f'{a}{e}{b}'
aeb_df = session_data[(a, e, b)]
aeb_df.fillna(0, inplace=True) # for saving plot, cant have nan
fig_1 = viz.plot_line(aeb_df, 'reward_ma', max_tick_unit, legend_name=aeb_str, draw=False, trace_kwargs={'legendgroup': aeb_str, 'line': {'color': palette[idx]}})
fig.append_trace(fig_1.data[0], 1, 1)
fig_2 = viz.plot_line(aeb_df, ['loss'], max_tick_unit, y2_col=['explore_var'], trace_kwargs={'legendgroup': aeb_str, 'showlegend': False, 'line': {'color': palette[idx]}}, draw=False)
fig.append_trace(fig_2.data[0], 2, 1)
fig.append_trace(fig_2.data[1], 3, 1)
fig.layout['xaxis1'].update(title=max_tick_unit, zerolinewidth=1)
fig.layout['yaxis1'].update(fig_1.layout['yaxis'])
fig.layout['yaxis1'].update(domain=[0.55, 1])
fig.layout['yaxis2'].update(fig_2.layout['yaxis'])
fig.layout['yaxis2'].update(showgrid=False, domain=[0, 0.45])
fig.layout['yaxis3'].update(fig_2.layout['yaxis2'])
fig.layout['yaxis3'].update(overlaying='y2', anchor='x2')
fig.layout.update(ps.pick(fig_1.layout, ['legend']))
fig.layout.update(title=f'session graph: {session_spec["name"]} t{info_space.get("trial")} s{info_space.get("session")}', width=500, height=600)
viz.plot(fig)
return fig
def gather_aeb_rewards_df(aeb, session_datas, max_tick_unit):
'''Gather rewards from each session for a body into a df'''
aeb_session_rewards = {}
for s, session_data in session_datas.items():
aeb_df = session_data[aeb]
aeb_reward_sr = aeb_df['reward_ma']
aeb_reward_sr.index = aeb_df[max_tick_unit]
# guard for duplicate eval result
aeb_reward_sr = aeb_reward_sr[~aeb_reward_sr.index.duplicated()]
if util.in_eval_lab_modes():
# guard for eval appending possibly not ordered
aeb_reward_sr.sort_index(inplace=True)
aeb_session_rewards[s] = aeb_reward_sr
aeb_rewards_df = pd.DataFrame(aeb_session_rewards)
return aeb_rewards_df
def build_aeb_reward_fig(aeb_rewards_df, aeb_str, color, max_tick_unit):
'''Build the aeb_reward envelope figure'''
mean_sr = aeb_rewards_df.mean(axis=1)
std_sr = aeb_rewards_df.std(axis=1).fillna(0)
max_sr = mean_sr + std_sr
min_sr = mean_sr - std_sr
x = aeb_rewards_df.index.tolist()
max_y = max_sr.tolist()
min_y = min_sr.tolist()
envelope_trace = viz.go.Scatter(
x=x + x[::-1],
y=max_y + min_y[::-1],
fill='tozerox',
fillcolor=viz.lower_opacity(color, 0.2),
line=dict(color='rgba(0, 0, 0, 0)'),
showlegend=False,
legendgroup=aeb_str,
)
df = | pd.DataFrame({max_tick_unit: x, 'mean_reward': mean_sr}) | pandas.DataFrame |
import numpy as np
import pytest
from pandas import (
DataFrame,
Index,
MultiIndex,
Series,
Timestamp,
date_range,
to_datetime,
)
import pandas._testing as tm
import pandas.tseries.offsets as offsets
class TestRollingTS:
# rolling time-series friendly
# xref GH13327
def setup_method(self, method):
self.regular = DataFrame(
{"A": date_range("20130101", periods=5, freq="s"), "B": range(5)}
).set_index("A")
self.ragged = DataFrame({"B": range(5)})
self.ragged.index = [
Timestamp("20130101 09:00:00"),
Timestamp("20130101 09:00:02"),
Timestamp("20130101 09:00:03"),
Timestamp("20130101 09:00:05"),
Timestamp("20130101 09:00:06"),
]
def test_doc_string(self):
df = DataFrame(
{"B": [0, 1, 2, np.nan, 4]},
index=[
Timestamp("20130101 09:00:00"),
Timestamp("20130101 09:00:02"),
Timestamp("20130101 09:00:03"),
Timestamp("20130101 09:00:05"),
Timestamp("20130101 09:00:06"),
],
)
df
df.rolling("2s").sum()
def test_invalid_window_non_int(self):
# not a valid freq
msg = "passed window foobar is not compatible with a datetimelike index"
with pytest.raises(ValueError, match=msg):
self.regular.rolling(window="foobar")
# not a datetimelike index
msg = "window must be an integer"
with pytest.raises(ValueError, match=msg):
self.regular.reset_index().rolling(window="foobar")
@pytest.mark.parametrize("freq", ["2MS", offsets.MonthBegin(2)])
def test_invalid_window_nonfixed(self, freq):
# non-fixed freqs
msg = "\\<2 \\* MonthBegins\\> is a non-fixed frequency"
with pytest.raises(ValueError, match=msg):
self.regular.rolling(window=freq)
@pytest.mark.parametrize("freq", ["1D", offsets.Day(2), "2ms"])
def test_valid_window(self, freq):
self.regular.rolling(window=freq)
@pytest.mark.parametrize("minp", [1.0, "foo", np.array([1, 2, 3])])
def test_invalid_minp(self, minp):
# non-integer min_periods
msg = (
r"local variable 'minp' referenced before assignment|"
"min_periods must be an integer"
)
with pytest.raises(ValueError, match=msg):
self.regular.rolling(window="1D", min_periods=minp)
def test_invalid_center_datetimelike(self):
# center is not implemented
msg = "center is not implemented for datetimelike and offset based windows"
with pytest.raises(NotImplementedError, match=msg):
self.regular.rolling(window="1D", center=True)
def test_on(self):
df = self.regular
# not a valid column
msg = (
r"invalid on specified as foobar, must be a column "
"\\(of DataFrame\\), an Index or None"
)
with pytest.raises(ValueError, match=msg):
df.rolling(window="2s", on="foobar")
# column is valid
df = df.copy()
df["C"] = date_range("20130101", periods=len(df))
df.rolling(window="2d", on="C").sum()
# invalid columns
msg = "window must be an integer"
with pytest.raises(ValueError, match=msg):
df.rolling(window="2d", on="B")
# ok even though on non-selected
df.rolling(window="2d", on="C").B.sum()
def test_monotonic_on(self):
# on/index must be monotonic
df = DataFrame(
{"A": date_range("20130101", periods=5, freq="s"), "B": range(5)}
)
assert df.A.is_monotonic
df.rolling("2s", on="A").sum()
df = df.set_index("A")
assert df.index.is_monotonic
df.rolling("2s").sum()
def test_non_monotonic_on(self):
# GH 19248
df = DataFrame(
{"A": date_range("20130101", periods=5, freq="s"), "B": range(5)}
)
df = df.set_index("A")
non_monotonic_index = df.index.to_list()
non_monotonic_index[0] = non_monotonic_index[3]
df.index = non_monotonic_index
assert not df.index.is_monotonic
msg = "index must be monotonic"
with pytest.raises(ValueError, match=msg):
df.rolling("2s").sum()
df = df.reset_index()
msg = (
r"invalid on specified as A, must be a column "
"\\(of DataFrame\\), an Index or None"
)
with pytest.raises(ValueError, match=msg):
df.rolling("2s", on="A").sum()
def test_frame_on(self):
df = DataFrame(
{"B": range(5), "C": date_range("20130101 09:00:00", periods=5, freq="3s")}
)
df["A"] = [
Timestamp("20130101 09:00:00"),
Timestamp("20130101 09:00:02"),
Timestamp("20130101 09:00:03"),
Timestamp("20130101 09:00:05"),
Timestamp("20130101 09:00:06"),
]
# we are doing simulating using 'on'
expected = df.set_index("A").rolling("2s").B.sum().reset_index(drop=True)
result = df.rolling("2s", on="A").B.sum()
tm.assert_series_equal(result, expected)
# test as a frame
# we should be ignoring the 'on' as an aggregation column
# note that the expected is setting, computing, and resetting
# so the columns need to be switched compared
# to the actual result where they are ordered as in the
# original
expected = (
df.set_index("A").rolling("2s")[["B"]].sum().reset_index()[["B", "A"]]
)
result = df.rolling("2s", on="A")[["B"]].sum()
tm.assert_frame_equal(result, expected)
def test_frame_on2(self):
# using multiple aggregation columns
df = DataFrame(
{
"A": [0, 1, 2, 3, 4],
"B": [0, 1, 2, np.nan, 4],
"C": Index(
[
Timestamp("20130101 09:00:00"),
Timestamp("20130101 09:00:02"),
Timestamp("20130101 09:00:03"),
Timestamp("20130101 09:00:05"),
Timestamp("20130101 09:00:06"),
]
),
},
columns=["A", "C", "B"],
)
expected1 = DataFrame(
{"A": [0.0, 1, 3, 3, 7], "B": [0, 1, 3, np.nan, 4], "C": df["C"]},
columns=["A", "C", "B"],
)
result = df.rolling("2s", on="C").sum()
expected = expected1
tm.assert_frame_equal(result, expected)
expected = Series([0, 1, 3, np.nan, 4], name="B")
result = df.rolling("2s", on="C").B.sum()
tm.assert_series_equal(result, expected)
expected = expected1[["A", "B", "C"]]
result = df.rolling("2s", on="C")[["A", "B", "C"]].sum()
tm.assert_frame_equal(result, expected)
def test_basic_regular(self):
df = self.regular.copy()
df.index = date_range("20130101", periods=5, freq="D")
expected = df.rolling(window=1, min_periods=1).sum()
result = df.rolling(window="1D").sum()
tm.assert_frame_equal(result, expected)
df.index = date_range("20130101", periods=5, freq="2D")
expected = df.rolling(window=1, min_periods=1).sum()
result = df.rolling(window="2D", min_periods=1).sum()
tm.assert_frame_equal(result, expected)
expected = df.rolling(window=1, min_periods=1).sum()
result = df.rolling(window="2D", min_periods=1).sum()
tm.assert_frame_equal(result, expected)
expected = df.rolling(window=1).sum()
result = df.rolling(window="2D").sum()
tm.assert_frame_equal(result, expected)
def test_min_periods(self):
# compare for min_periods
df = self.regular
# these slightly different
expected = df.rolling(2, min_periods=1).sum()
result = df.rolling("2s").sum()
tm.assert_frame_equal(result, expected)
expected = df.rolling(2, min_periods=1).sum()
result = df.rolling("2s", min_periods=1).sum()
tm.assert_frame_equal(result, expected)
def test_closed(self):
# xref GH13965
df = DataFrame(
{"A": [1] * 5},
index=[
Timestamp("20130101 09:00:01"),
Timestamp("20130101 09:00:02"),
Timestamp("20130101 09:00:03"),
Timestamp("20130101 09:00:04"),
Timestamp("20130101 09:00:06"),
],
)
# closed must be 'right', 'left', 'both', 'neither'
msg = "closed must be 'right', 'left', 'both' or 'neither'"
with pytest.raises(ValueError, match=msg):
self.regular.rolling(window="2s", closed="blabla")
expected = df.copy()
expected["A"] = [1.0, 2, 2, 2, 1]
result = df.rolling("2s", closed="right").sum()
tm.assert_frame_equal(result, expected)
# default should be 'right'
result = df.rolling("2s").sum()
tm.assert_frame_equal(result, expected)
expected = df.copy()
expected["A"] = [1.0, 2, 3, 3, 2]
result = df.rolling("2s", closed="both").sum()
tm.assert_frame_equal(result, expected)
expected = df.copy()
expected["A"] = [np.nan, 1.0, 2, 2, 1]
result = df.rolling("2s", closed="left").sum()
tm.assert_frame_equal(result, expected)
expected = df.copy()
expected["A"] = [np.nan, 1.0, 1, 1, np.nan]
result = df.rolling("2s", closed="neither").sum()
tm.assert_frame_equal(result, expected)
def test_ragged_sum(self):
df = self.ragged
result = df.rolling(window="1s", min_periods=1).sum()
expected = df.copy()
expected["B"] = [0.0, 1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="2s", min_periods=1).sum()
expected = df.copy()
expected["B"] = [0.0, 1, 3, 3, 7]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="2s", min_periods=2).sum()
expected = df.copy()
expected["B"] = [np.nan, np.nan, 3, np.nan, 7]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="3s", min_periods=1).sum()
expected = df.copy()
expected["B"] = [0.0, 1, 3, 5, 7]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="3s").sum()
expected = df.copy()
expected["B"] = [0.0, 1, 3, 5, 7]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="4s", min_periods=1).sum()
expected = df.copy()
expected["B"] = [0.0, 1, 3, 6, 9]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="4s", min_periods=3).sum()
expected = df.copy()
expected["B"] = [np.nan, np.nan, 3, 6, 9]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="5s", min_periods=1).sum()
expected = df.copy()
expected["B"] = [0.0, 1, 3, 6, 10]
tm.assert_frame_equal(result, expected)
def test_ragged_mean(self):
df = self.ragged
result = df.rolling(window="1s", min_periods=1).mean()
expected = df.copy()
expected["B"] = [0.0, 1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="2s", min_periods=1).mean()
expected = df.copy()
expected["B"] = [0.0, 1, 1.5, 3.0, 3.5]
tm.assert_frame_equal(result, expected)
def test_ragged_median(self):
df = self.ragged
result = df.rolling(window="1s", min_periods=1).median()
expected = df.copy()
expected["B"] = [0.0, 1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="2s", min_periods=1).median()
expected = df.copy()
expected["B"] = [0.0, 1, 1.5, 3.0, 3.5]
tm.assert_frame_equal(result, expected)
def test_ragged_quantile(self):
df = self.ragged
result = df.rolling(window="1s", min_periods=1).quantile(0.5)
expected = df.copy()
expected["B"] = [0.0, 1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="2s", min_periods=1).quantile(0.5)
expected = df.copy()
expected["B"] = [0.0, 1, 1.5, 3.0, 3.5]
tm.assert_frame_equal(result, expected)
def test_ragged_std(self):
df = self.ragged
result = df.rolling(window="1s", min_periods=1).std(ddof=0)
expected = df.copy()
expected["B"] = [0.0] * 5
tm.assert_frame_equal(result, expected)
result = df.rolling(window="1s", min_periods=1).std(ddof=1)
expected = df.copy()
expected["B"] = [np.nan] * 5
tm.assert_frame_equal(result, expected)
result = df.rolling(window="3s", min_periods=1).std(ddof=0)
expected = df.copy()
expected["B"] = [0.0] + [0.5] * 4
tm.assert_frame_equal(result, expected)
result = df.rolling(window="5s", min_periods=1).std(ddof=1)
expected = df.copy()
expected["B"] = [np.nan, 0.707107, 1.0, 1.0, 1.290994]
tm.assert_frame_equal(result, expected)
def test_ragged_var(self):
df = self.ragged
result = df.rolling(window="1s", min_periods=1).var(ddof=0)
expected = df.copy()
expected["B"] = [0.0] * 5
tm.assert_frame_equal(result, expected)
result = df.rolling(window="1s", min_periods=1).var(ddof=1)
expected = df.copy()
expected["B"] = [np.nan] * 5
tm.assert_frame_equal(result, expected)
result = df.rolling(window="3s", min_periods=1).var(ddof=0)
expected = df.copy()
expected["B"] = [0.0] + [0.25] * 4
tm.assert_frame_equal(result, expected)
result = df.rolling(window="5s", min_periods=1).var(ddof=1)
expected = df.copy()
expected["B"] = [np.nan, 0.5, 1.0, 1.0, 1 + 2 / 3.0]
tm.assert_frame_equal(result, expected)
def test_ragged_skew(self):
df = self.ragged
result = df.rolling(window="3s", min_periods=1).skew()
expected = df.copy()
expected["B"] = [np.nan] * 5
tm.assert_frame_equal(result, expected)
result = df.rolling(window="5s", min_periods=1).skew()
expected = df.copy()
expected["B"] = [np.nan] * 2 + [0.0, 0.0, 0.0]
tm.assert_frame_equal(result, expected)
def test_ragged_kurt(self):
df = self.ragged
result = df.rolling(window="3s", min_periods=1).kurt()
expected = df.copy()
expected["B"] = [np.nan] * 5
tm.assert_frame_equal(result, expected)
result = df.rolling(window="5s", min_periods=1).kurt()
expected = df.copy()
expected["B"] = [np.nan] * 4 + [-1.2]
tm.assert_frame_equal(result, expected)
def test_ragged_count(self):
df = self.ragged
result = df.rolling(window="1s", min_periods=1).count()
expected = df.copy()
expected["B"] = [1.0, 1, 1, 1, 1]
tm.assert_frame_equal(result, expected)
df = self.ragged
result = df.rolling(window="1s").count()
tm.assert_frame_equal(result, expected)
result = df.rolling(window="2s", min_periods=1).count()
expected = df.copy()
expected["B"] = [1.0, 1, 2, 1, 2]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="2s", min_periods=2).count()
expected = df.copy()
expected["B"] = [np.nan, np.nan, 2, np.nan, 2]
tm.assert_frame_equal(result, expected)
def test_regular_min(self):
df = DataFrame(
{"A": date_range("20130101", periods=5, freq="s"), "B": [0.0, 1, 2, 3, 4]}
).set_index("A")
result = df.rolling("1s").min()
expected = df.copy()
expected["B"] = [0.0, 1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
df = DataFrame(
{"A": date_range("20130101", periods=5, freq="s"), "B": [5, 4, 3, 4, 5]}
).set_index("A")
tm.assert_frame_equal(result, expected)
result = df.rolling("2s").min()
expected = df.copy()
expected["B"] = [5.0, 4, 3, 3, 4]
tm.assert_frame_equal(result, expected)
result = df.rolling("5s").min()
expected = df.copy()
expected["B"] = [5.0, 4, 3, 3, 3]
tm.assert_frame_equal(result, expected)
def test_ragged_min(self):
df = self.ragged
result = df.rolling(window="1s", min_periods=1).min()
expected = df.copy()
expected["B"] = [0.0, 1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="2s", min_periods=1).min()
expected = df.copy()
expected["B"] = [0.0, 1, 1, 3, 3]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="5s", min_periods=1).min()
expected = df.copy()
expected["B"] = [0.0, 0, 0, 1, 1]
tm.assert_frame_equal(result, expected)
def test_perf_min(self):
N = 10000
dfp = DataFrame(
{"B": np.random.randn(N)}, index=date_range("20130101", periods=N, freq="s")
)
expected = dfp.rolling(2, min_periods=1).min()
result = dfp.rolling("2s").min()
assert ((result - expected) < 0.01).all().bool()
expected = dfp.rolling(200, min_periods=1).min()
result = dfp.rolling("200s").min()
assert ((result - expected) < 0.01).all().bool()
def test_ragged_max(self):
df = self.ragged
result = df.rolling(window="1s", min_periods=1).max()
expected = df.copy()
expected["B"] = [0.0, 1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="2s", min_periods=1).max()
expected = df.copy()
expected["B"] = [0.0, 1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
result = df.rolling(window="5s", min_periods=1).max()
expected = df.copy()
expected["B"] = [0.0, 1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"freq, op, result_data",
[
("ms", "min", [0.0] * 10),
("ms", "mean", [0.0] * 9 + [2.0 / 9]),
("ms", "max", [0.0] * 9 + [2.0]),
("s", "min", [0.0] * 10),
("s", "mean", [0.0] * 9 + [2.0 / 9]),
("s", "max", [0.0] * 9 + [2.0]),
("min", "min", [0.0] * 10),
("min", "mean", [0.0] * 9 + [2.0 / 9]),
("min", "max", [0.0] * 9 + [2.0]),
("h", "min", [0.0] * 10),
("h", "mean", [0.0] * 9 + [2.0 / 9]),
("h", "max", [0.0] * 9 + [2.0]),
("D", "min", [0.0] * 10),
("D", "mean", [0.0] * 9 + [2.0 / 9]),
("D", "max", [0.0] * 9 + [2.0]),
],
)
def test_freqs_ops(self, freq, op, result_data):
# GH 21096
index = date_range(start="2018-1-1 01:00:00", freq=f"1{freq}", periods=10)
s = Series(data=0, index=index)
s.iloc[1] = np.nan
s.iloc[-1] = 2
result = getattr(s.rolling(window=f"10{freq}"), op)()
expected = Series(data=result_data, index=index)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"f",
[
"sum",
"mean",
pytest.param(
"count",
marks=pytest.mark.filterwarnings("ignore:min_periods:FutureWarning"),
),
"median",
"std",
"var",
"kurt",
"skew",
"min",
"max",
],
)
def test_all(self, f):
# simple comparison of integer vs time-based windowing
df = self.regular * 2
er = df.rolling(window=1)
r = df.rolling(window="1s")
result = getattr(r, f)()
expected = getattr(er, f)()
tm.assert_frame_equal(result, expected)
result = r.quantile(0.5)
expected = er.quantile(0.5)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"f",
[
"sum",
"mean",
"count",
"median",
"std",
"var",
"kurt",
"skew",
"min",
"max",
],
)
def test_all2(self, f):
# more sophisticated comparison of integer vs.
# time-based windowing
df = DataFrame(
{"B": np.arange(50)}, index=date_range("20130101", periods=50, freq="H")
)
# in-range data
dft = df.between_time("09:00", "16:00")
r = dft.rolling(window="5H")
result = getattr(r, f)()
# we need to roll the days separately
# to compare with a time-based roll
# finally groupby-apply will return a multi-index
# so we need to drop the day
def agg_by_day(x):
x = x.between_time("09:00", "16:00")
return getattr(x.rolling(5, min_periods=1), f)()
expected = (
df.groupby(df.index.day).apply(agg_by_day).reset_index(level=0, drop=True)
)
tm.assert_frame_equal(result, expected)
def test_groupby_monotonic(self):
# GH 15130
# we don't need to validate monotonicity when grouping
data = [
["David", "1/1/2015", 100],
["David", "1/5/2015", 500],
["David", "5/30/2015", 50],
["David", "7/25/2015", 50],
["Ryan", "1/4/2014", 100],
["Ryan", "1/19/2015", 500],
["Ryan", "3/31/2016", 50],
["Joe", "7/1/2015", 100],
["Joe", "9/9/2015", 500],
["Joe", "10/15/2015", 50],
]
df = DataFrame(data=data, columns=["name", "date", "amount"])
df["date"] = to_datetime(df["date"])
expected = (
df.set_index("date")
.groupby("name")
.apply(lambda x: x.rolling("180D")["amount"].sum())
)
result = df.groupby("name").rolling("180D", on="date")["amount"].sum()
tm.assert_series_equal(result, expected)
def test_non_monotonic(self):
# GH 13966 (similar to #15130, closed by #15175)
dates = date_range(start="2016-01-01 09:30:00", periods=20, freq="s")
df = DataFrame(
{
"A": [1] * 20 + [2] * 12 + [3] * 8,
"B": np.concatenate((dates, dates)),
"C": np.arange(40),
}
)
result = df.groupby("A").rolling("4s", on="B").C.mean()
expected = (
df.set_index("B").groupby("A").apply(lambda x: x.rolling("4s")["C"].mean())
)
tm.assert_series_equal(result, expected)
df2 = df.sort_values("B")
result = df2.groupby("A").rolling("4s", on="B").C.mean()
tm.assert_series_equal(result, expected)
def test_rolling_cov_offset(self):
# GH16058
idx = date_range("2017-01-01", periods=24, freq="1h")
ss = Series(np.arange(len(idx)), index=idx)
result = ss.rolling("2h").cov()
expected = Series([np.nan] + [0.5] * (len(idx) - 1), index=idx)
tm.assert_series_equal(result, expected)
expected2 = ss.rolling(2, min_periods=1).cov()
tm.assert_series_equal(result, expected2)
result = ss.rolling("3h").cov()
expected = Series([np.nan, 0.5] + [1.0] * (len(idx) - 2), index=idx)
tm.assert_series_equal(result, expected)
expected2 = ss.rolling(3, min_periods=1).cov()
tm.assert_series_equal(result, expected2)
def test_rolling_on_decreasing_index(self):
# GH-19248, GH-32385
index = [
Timestamp("20190101 09:00:30"),
Timestamp("20190101 09:00:27"),
Timestamp("20190101 09:00:20"),
Timestamp("20190101 09:00:18"),
Timestamp("20190101 09:00:10"),
]
df = DataFrame({"column": [3, 4, 4, 5, 6]}, index=index)
result = df.rolling("5s").min()
expected = | DataFrame({"column": [3.0, 3.0, 4.0, 4.0, 6.0]}, index=index) | pandas.DataFrame |
"""
author: <NAME>
date: 2020-11-27
This script imports the train and test csv from the proccessed data folder and performing machine learning modelling and alaysis.
Usage: machine_learning_analysis.py --in_train=<in_train> --in_test=<in_test> --out_path=<out_path>
Options:
--in_train=<in_train> path including filename of the input train data file to process (this is a required option)
--in_test=<in_test> path including filename of the input test data file to process (this is a required option)
--out_path=<out_path> path to where the figures and tables will be written to (this is a required option)
Example:
python machine_learning_analysis.py --in_train="../data/processed/bank-additional-full_train.csv" --in_test="../data/processed/bank-additional-full_test.csv" --out_path="../results/"
"""
#Standards
import os
import numpy as np
import pandas as pd
import string
from collections import deque
from docopt import docopt
#Plots
import matplotlib.pyplot as plt
import altair as alt
import seaborn as sns
# classifiers / models
from sklearn.linear_model import LinearRegression, LogisticRegression, Ridge, RidgeCV, RidgeClassifier
from sklearn.dummy import DummyClassifier, DummyRegressor
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC, SVR
# Data, preprocessing and pipeline
#Pro
from sklearn import datasets
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import (
OneHotEncoder,
OrdinalEncoder,
PolynomialFeatures,
StandardScaler,
)
from sklearn.pipeline import Pipeline, make_pipeline
from sklearn.compose import (
ColumnTransformer,
TransformedTargetRegressor,
make_column_transformer,
)
# metrics for class imbalance
from sklearn.metrics import (
accuracy_score,
classification_report,
confusion_matrix,
plot_confusion_matrix,
f1_score,
make_scorer,
precision_score,
recall_score,
)
# hyperparameter optimization
from sklearn.model_selection import (
GridSearchCV,
RandomizedSearchCV,
ShuffleSplit,
cross_val_score,
cross_validate,
train_test_split,
)
# other
# ignore warning
import warnings
warnings.filterwarnings('ignore')
opt = docopt(__doc__)
def main(in_train, in_test, out_path):
"""
Take in the data, perform preprocessing to model fitting, generate analysis with figures and table and output to the given path.
Parameters
----------
in_train : string
path including filename of the input train data file to process
in_test : string
path including filename of the input test data file to process
out_path : string
path to where the processed data will be written to
Example
----------
main("../data/processed/bank-additional-full_train.csv", "../data/processed/bank-additional-full_test.csv", "../result")
"""
# load in data (should be full data before split) ##need update once clean data script is finalized.
train_df = pd.read_csv(in_train, sep=',')
test_df = pd.read_csv(in_test, sep=',')
# load in data (should be full data before split) ## Removed once clean data script is finalized.
# df2 = df.copy()
# df2.loc[df['y'] == 'no', 'target'] = 0
# df2.loc[df['y'] == 'yes', 'target'] = 1
# load in data (should be full data before split) ## need to update in directly taking in train df and test df from data clean sript
# train_df, test_df = train_test_split(df2, test_size = 0.20, random_state=123)
# Define types of features: numeric, categorical, ordinal for now. No drop features ## need update on drop feature after data clean.
numeric_features = ["age", "campaign", "pdays", "previous", "emp.var.rate",
"cons.price.idx", "cons.conf.idx", "euribor3m", "nr.employed", "duration"]
categorical_features = ["job", "poutcome", "month", "day_of_week", "contact","marital", "default", "housing", "loan"]
ordinal_features = ["education"]
education_ordering = ['illiterate', 'basic.4y','basic.6y','basic.9y','high.school',
'professional.course','university.degree', 'unknown']
drop_features = []
target = ["y"]
# drop target for train and test data.
X_train = train_df.drop(columns=target)
y_train = train_df[target]
X_test = test_df.drop(columns=target)
y_test = test_df[target]
# Define preprocessing transformers (preprocessors - column transformer)
numeric_transformer = make_pipeline(
SimpleImputer(strategy="median"),
StandardScaler()
)
ordinal_transformer = make_pipeline(
SimpleImputer(strategy="most_frequent"),
OrdinalEncoder(categories=[education_ordering])
)
categorical_transformer = make_pipeline(
SimpleImputer(strategy="constant", fill_value="missing"),
OneHotEncoder(handle_unknown="ignore", sparse=False)
)
preprocessor = make_column_transformer(
("drop", drop_features),
(numeric_transformer, numeric_features),
(ordinal_transformer, ordinal_features),
(categorical_transformer, categorical_features)
)
# A function to store mean cross-validation validation values
def store_cross_val_results(model_name, scores, results_dict):
results_dict[model_name] = {
"Accuracy": "{:0.3f}".format(np.mean(scores["test_accuracy"])),
# "mean_fit_time (s)": "{:0.4f}".format(np.mean(scores["fit_time"])), #since it's not critical to get the result within an hour or so, fit and score time would not matter much
# "mean_score_time (s)": "{:0.4f}".format(np.mean(scores["score_time"])),
"Recall": "{:0.3f}".format(np.mean(scores["test_recall"])),
"Precision": "{:0.3f}".format(np.mean(scores["test_precision"])),
"f1": "{:0.3f}".format(np.mean(scores["test_f1"])),
"AP": "{:0.3f}".format(np.mean(scores["test_average_precision"])),
"Roc_Auc": "{:0.3f}".format(np.mean(scores["test_roc_auc"])),
}
# A summary dictionary to store the scores for different models.
results_df = {}
# Define model metrics, fit and score the baseline model: Dummy Classifier
scoring=["accuracy", "f1", "recall", "precision", "average_precision", "roc_auc"]
pipe = make_pipeline(preprocessor, DummyClassifier(strategy="most_frequent"))
scores = cross_validate(pipe, X_train, y_train, return_train_score=True, scoring=scoring)
summary = store_cross_val_results("Dummy", scores, results_df)
| pd.DataFrame(results_df) | pandas.DataFrame |
import csv
import re
import pandas as pd
from os.path import join, exists
from os import mkdir
from shutil import rmtree
from os import remove as remove_file
from sys import stdout
import zipfile
import ntpath
from .. enums import POSSIBLE_INPUTS, POSSIBLE_COMMANDS, INPUT_DEFAULTS, PATH_TO_STORAGE
from .. sys_functions.find_files_in_folder import find_files
from .. sys_functions.get_inputs import get_path_to_FEB_files, get_optional_input
from .. logger import console_log as log
# define headers
HEADER_PAR = ['a','b']
HEADER_TIM = ['time']
HEADER_STR = ['sx','sy','sz','sxy','sxz','syz']
HEADER_DIS = ['ux','uy','uz']
HEADER_POS = ['x','y','z']
HEADER_FAL = ['fail']
# define last simulation timestamp
LAST_TIMESTAMP = 0.2
def includeNodeNumberInHeader(header):
clock = 0
counter = 0
new_header = []
for _ in range(8):
for i in range(len(header)):
if clock == 0:
counter += 1
new_header.append(header[i] + "_" + str(counter))
clock = clock + 1 if clock < 2 else 0
return new_header
def create_header():
a = HEADER_PAR
a.extend(HEADER_TIM)
a.extend(HEADER_FAL)
a.extend(includeNodeNumberInHeader(HEADER_POS))
a.extend(includeNodeNumberInHeader(HEADER_DIS))
a.extend(HEADER_STR)
return a
def decode_data(file):
data = {}
with open(file, 'r') as datafile:
for line in datafile:
if line.find("*Time") != -1:
time = float(line.split("=")[1])
data[time] = []
elif line.find("*") == -1:
# if line.find(","):
# line = line.replace(",","")
# line.replace("\\n","")
line = re.sub(",", '', line)
line = re.sub("\n", '', line)
str_data = line[2:].split(" ")
data[time].extend([float(s) for s in str_data])
return data
def get_param_val(file):
params = {}
with open(file, 'r',newline='') as csvfile:
spamreader = csv.reader(csvfile, delimiter=',', quotechar='|')
for i, row in enumerate(spamreader):
params[i] = [float(v) for v in row[-1].split(";")]
return params
def compile_data(files):
frames = []
total_files = len(files)
for i, (fp, _, _) in enumerate(files):
ndf = pd.read_pickle(fp)
frames.append(ndf)
log.log_message("compiled pickle: ", (i + 1) /total_files)
df = pd.concat(frames, ignore_index=True)
return df
def make_pickle(inputs):
function_name = 'MAKE_PICKLE'
log.log_step("\n== {} ==\n".format(function_name))
# Get optional Inputs
inp_folder = get_optional_input(inputs, 'INPUT_FOLDER', function_name)
out_folder = get_optional_input(inputs, 'OUTPUT_FOLDER', function_name)
inpf_name = ntpath.basename(inp_folder)
temp_folder = join(inp_folder,"tmp")
if not exists(temp_folder):
mkdir(temp_folder)
# zipfile handler
zipf = zipfile.ZipFile(join(inp_folder,'zipped_data.zip'), 'w', zipfile.ZIP_DEFLATED)
files = find_files(inp_folder, ("fileFormat","txt"))
csv_files = find_files(inp_folder, ("fileFormat","csv"))
for (fp, ff, fn) in csv_files:
if fn.find('modified_param_log') != -1:
param_file = (fp, ff, fn)
else:
raise(AssertionError("modified_params_log file not found."))
# -----------------
# organize files:
str_files = {}
dis_files = {}
pos_files = {}
log.log_message("-- Sorting files")
for (fp, ff, fn) in files:
fs = fn.split("_")
if len(fs) == 2:
key = "initial"
else:
key = int(fs[-1])
if fs[0] == 'displacement':
dis_files[key] = (fp, ff, fn)
elif fs[0] == 'stress':
str_files[key] = (fp, ff, fn)
elif fs[0] == 'position':
pos_files[key] = (fp, ff, fn)
files = [str_files, dis_files, pos_files]
# -----------------
# Determining critial values
log.log_message("-- Checking lengths")
lengths = [len(l) for l in files]
idxmin = lengths.index(min(lengths))
baseDict = files[idxmin]
baseDictLength = len(baseDict)
# get param values
log.log_message("-- Getting params")
params = get_param_val(param_file[0])
# -----------------
# create dataframe
log.log_message("-- Creating dataframe")
header = create_header()
df = pd.DataFrame(columns=header)
rowCounter = 0
fileCounter = 0
# -----------------
# creating temporary pickles
log.log_message("-- Filling dataframe")
for i, key in enumerate(baseDict):
if key in params:
str_data = decode_data(str_files[key][0])
dis_data = decode_data(dis_files[key][0])
pos_data = decode_data(pos_files[key][0])
org_keys = sorted(str_data.keys())
if len(org_keys) > 0:
last_timestamp = org_keys[-1]
failed = 1 if float(last_timestamp) != LAST_TIMESTAMP else 0
for time in org_keys:
df.loc[rowCounter] = params[key] + [time] + [failed] + pos_data[time] + dis_data[time] + str_data[time]
rowCounter += 1
stdout.write(".")
stdout.flush()
if i % 100 == 0 and i != 0:
# Log
log.log_message("\nBatch: #{fc} -> {pc}%".format(fc=fileCounter, pc = i/baseDictLength))
df.to_pickle(join(temp_folder,"data%s.pickle" % fileCounter))
# create new instance
df = | pd.DataFrame(columns=header) | pandas.DataFrame |
from random import randint
import numpy as np
import pandas as pd
def cross_validation(data, column_target, k=10):
column_values = data[column_target].value_counts().index # classes do problema
class_data = [data[data[column_target] == valor] for valor in column_values] # separação das instancias em classes
class_quant = [] # quantidade de cada classe para manter a proporção
class_remainder = [] # resto da div
for data in class_data:
class_quant.append(int(data.shape[0] / k))
class_remainder.append(int(data.shape[0] % k))
folds = [pd.DataFrame(columns=data.columns) for i in range(k)] # inicia a lista de folds
for data in class_data:
data.index = range(data.shape[0]) # reindex das tabelas
for i in range(k): # para cada fold
for j in range(len(class_data)): # para cada classe
for _ in range(class_quant[j]): # até a quantidade certa
index = randint(0, class_data[j].shape[0]-1) # determina o index
folds[i] = folds[i].append(class_data[j].iloc[index], ignore_index=True) # adiciona a instancia em um fold
class_data[j].drop([index], inplace=True) # retira da tabela
class_data[j].index = range(class_data[j].shape[0]) # reindex
if class_remainder[j] > 0:
index = randint(0, class_data[j].shape[0]-1)
folds[i] = folds[i].append(class_data[j].iloc[index], ignore_index=True)
class_data[j].drop([index], inplace=True)
class_data[j].index = range(class_data[j].shape[0])
class_remainder[j] -= 1
new_train_datas = [pd.DataFrame(columns=data.columns) for i in range(k)] # inicia a lista de dados de treinamento
new_test_datas = [pd.DataFrame(columns=data.columns) for i in range(k)] # inicia a lista de dados de teste
for i in range(k): # cria as novas tabelas de dados utilizando os folds
new_test_datas[i] = new_test_datas[i].append(folds[i], ignore_index=True)
for j in range(k):
if i != j:
new_train_datas[i] = new_train_datas[i].append(folds[j], ignore_index=True)
return (new_train_datas, new_test_datas)
def bootstrap(data, k=5):
new_sets = []
for _ in range(k):
train_instances = []
test_instances = []
reg_index = {}
for _ in range(data.shape[0]):
index = randint(0, data.shape[0] - 1)
train_instances.append(data.iloc[index])
reg_index[index] = True
for index in range(data.shape[0]):
if not index in reg_index:
test_instances.append(data.iloc[index])
new_train = | pd.DataFrame(train_instances) | pandas.DataFrame |
import pandas as pd
import numpy as np
import math
import os
import time
from DataCleanService.src.main.utils.utils import remove_gz_suffix, remove_gz_suffix_for_condo
from DataCleanService.src.main.config import constants, DataCleanServiceConfig
import glob
# TODO: data format exception (str, float...)
def select_related_rows(df, prefix):
df = df[df['Taxes'] != 0]
if prefix == 'Sold':
df.dropna(subset=['Cd'], inplace=True)
df = df[df['Sp_dol'] > 50000]
# TODO: Remove this constraint
# df['lp/sp'] = abs(df['Lp_dol'] - df['Sp_dol']) / df['Sp_dol']
# df = df[df['lp/sp'] <= 0.3]
# df.drop(columns=['lp/sp'], inplace=True)
if prefix == 'Listing':
df = df[df['Lp_dol'] > 50000]
df.drop(columns=['Sp_dol', 'Cd'], inplace=True)
year, month, day, hour, minute = time.strftime("%Y,%m,%d,%H,%M").split(',')
cur_date = str(year) + '-' + str(month) + '-' + str(day)
df['Cd'] = cur_date
df.index = range(len(df))
return df
def complement_null(df, depth_median, front_median):
df[constants.CMPLMT_NONE_COL] = df[constants.CMPLMT_NONE_COL].fillna(value='None')
df[constants.CMPLMT_ZERO_COL] = df[constants.CMPLMT_ZERO_COL].fillna(value=0)
df['Den_fr'] = df['Den_fr'].fillna(value='N')
# Depth / Front_ft: Condo related cols -> 0 House-related cols -> median
df_cdhs = df[df['Type_own1_out'].isin(constants.CDHS_LABEL)][['Depth', 'Front_ft']]
df_part_hs = df[~df['Type_own1_out'].isin(constants.CDHS_LABEL)][['Depth', 'Front_ft']]
df_cdhs['Depth'] = df_cdhs['Depth'].fillna(value=0)
df_cdhs['Front_ft'] = df_cdhs['Front_ft'].fillna(value=0)
if (depth_median == 0) & (front_median == 0):
depth_median = df_part_hs['Depth'].median()
front_median = df_part_hs['Front_ft'].median()
median = [[depth_median, front_median]]
df_median = pd.DataFrame(median, columns=['depth_median', 'front_median'])
df_median.to_csv(DataCleanServiceConfig.CLEAN_DATA_MEDIAN_FILE, index=None)
df_part_hs['Depth'] = df_part_hs['Depth'].fillna(value=depth_median)
df_part_hs['Front_ft'] = df_part_hs['Front_ft'].fillna(value=front_median)
depth_front = pd.concat([df_cdhs, df_part_hs], ignore_index=False)
df = df.join(depth_front, lsuffix='_x', rsuffix='')
df.drop(columns=['Depth_x', 'Front_ft_x'], inplace=True)
return df
def process_cols(df, comm_list):
# Process Area code
df.Area_code = df.Area_code.astype(str)
df['Area_code'] = df.Area_code.str.extract('(\d+)', expand=True).astype(float)
# Process Garage
df['Garage'] = df['Gar'] + df['Gar_spaces']
df.drop(columns=['Gar', 'Gar_spaces'], inplace=True)
# Process lat & lng
df['lng'] = df['lng'].apply(lambda x: x * (-1))
# Process Community
if comm_list is None:
cm_count = df.Community.value_counts()
cm_h = {cm_count.index[i]: cm_count.values[i] for i in range(len(cm_count.values)) if
cm_count.values[i] > constants.COMM_TH}
selected_cm = [*(cm_h.keys())]
df_comm = pd.DataFrame(selected_cm, columns=['Comm'])
df_comm.to_csv(DataCleanServiceConfig.COMM_FILE, index=None)
else:
selected_cm = comm_list
df.Community.where(df['Community'].isin(selected_cm), 'Other', inplace=True)
return df
def process_date(df):
df['Cd'] = pd.to_datetime(df['Cd'])
df['month'] = df.Cd.dt.month
df.index = range(len(df))
month_dic = {1: 'Jan', 2: 'Feb', 3: 'Mar', 4: 'Apr', 5: 'May', 6: 'Jun', 7: 'Jul', 8: 'Aug', 9: 'Sep', 10: 'Oct',
11: 'Nov', 12: 'Dec'}
df_month = pd.DataFrame(0, index=np.arange(len(df)), columns=constants.MONTH)
df = | pd.concat([df, df_month], axis=1) | pandas.concat |
#!/usr/bin/env python
# encoding: utf-8
'''
asreml.Gmatrix -- shortdesc
asreml.Gmatrix is a description
It defines classes_and_methods
@author: user_name
@copyright: 2020 organization_name. All rights reserved.
@license: license
@contact: user_email
@deffield updated: Updated
'''
import sys
import os
import time
import numpy as np
import array as arr
import pandas as pd
import Utils
from optparse import OptionParser
from logging_utils import setup_logging_to_file, log_exception, log_info,log_warn
__all__ = []
__version__ = 0.1
__date__ = '2020-06-02'
__updated__ = '2020-06-02'
DEBUG = 1
TESTRUN = 0
PROFILE = 0
def getMAFvalue(arr, idx):
if arr == [0,0]:
return 0 if idx == 1 else 2
elif arr == [1,1]:
return 2 if idx == 1 else 0
elif arr == [0,1] or arr == [1,0]:
return 1
else:
return ''
def mafMatrix(infile, outdir, df):
log_info('MAF Matrix')
log_info('Calculate frequencies')
df_freq = pd.DataFrame(columns=['0', '1'], index=df.index)
try:
for index, row in df.iterrows():
freq = [0,0]
count = 0
for val in row.values:
count_0 = np.count_nonzero(np.asarray(val, dtype='i1') == 0)
count_1 = np.count_nonzero(np.asarray(val, dtype='i1') == 1)
count_neg = np.count_nonzero(np.asarray(val, dtype='i1') == -1)
if count_neg > 0:
continue
else:
freq[0] += count_0
freq[1] += count_1
count += 2
df_freq.loc[index] = [freq[0]/count, freq[1]/count]
# gt_freq = gt.count_alleles().to_frequencies()
log_info('Write frequencies')
Utils.saveFile(df_freq, os.path.join(outdir, "freq.alleles.txt"), index=True)
except Exception as e:
log_warn(e)
log_info('Construct MAF matrix')
try:
vector_maf = pd.DataFrame(columns=['MAF'], index=df.index)
for index, row in df.iterrows():
arr = df_freq.loc[index].values
idx = np.argmin(arr)
# idx = np.where(arr == np.amin(arr))
vector_maf.loc[index] = arr[idx]
df.loc[index] = df.loc[index].apply(lambda x: getMAFvalue(x, idx))
log_info('Write MAF matrix')
df = df.transpose()
Utils.saveFile(df, os.path.join(outdir, "matrix.maf.txt"), index=True)
log_info('Write MAF frequencies')
df_maf = pd.DataFrame(columns=df.columns, index=df.index)
for index, row in df.iterrows():
df_maf.loc[index] = list(vector_maf['MAF'].values)
Utils.saveFile(vector_maf, os.path.join(outdir, "freq.maf.txt"), index=True)
Utils.saveFile(df_maf, os.path.join(outdir, "matrix.P.txt"), index=True)
log_info("End MAF")
return df
except Exception as e:
log_warn(e)
def codeGenotype(code):
try:
if code == 1:
return [0, 0]
elif code == 2:
return [0, 1]
elif code == 3:
return [1, 0]
elif code == 4:
return [1, 1]
else:
return [-1,-1]
except Exception as e:
log_warn(e)
def loadrQTL(infile):
log_info('Load rQTL')
files = infile.split(',')
try:
df_final = None
for f in files:
df = Utils.loadFile(f)
df = df.set_index('ID', drop=True)
df = df.iloc[4:]
df = df.astype(dtype='int32')
df = df.applymap(lambda x: codeGenotype(x))
df = df.transpose()
if df_final is None:
df_final = df
else:
df_final = pd.concat([df_final, df], axis=1, sort=False)
return df_final
except Exception as e:
log_exception(e)
def calculateSimilarity(lst0, lst1):
try:
count = 0
for i in range(len(lst0)):
count += 1 if lst0[i] == lst1[i] else 0
return count / len(lst0)
except Exception as e:
log_warn(e)
return 0
def tabulate(df_maf, outdir):
log_info('Construct Relationship Matrix using Tabulation method')
df_grm = | pd.DataFrame(columns=df_maf.index, index=df_maf.index) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import library.areamanager as areamanager
import pandas as pd
import json
import time
import collections
import numpy as np
import pickle
import library.cat_utils as cat_utils
import library.geo_utils as geo_utils
from library.parallel_util import run_parallel
from library.constants import geocat_constants,experiment_constants
from tqdm import tqdm
import math
import sklearn
import sklearn.neighbors
SPLIT_YEAR=2017
earth_radius = 6371000/1000 # km in earth
cities=['lasvegas','phoenix','charlotte','madison']
# cities=experiment_constants.CITIES
#cities=['madison']
dict_alias_title,category_tree,dict_alias_depth=cat_utils.cat_structs("../../data/categories.json")
undirected_category_tree=category_tree.to_undirected()
def category_filter(categories):
tmp_cat_list=list()
if categories != None:
for category in categories:
try:
if dict_alias_depth[dict_alias_title[category]] <= 2:
tmp_cat_list.append(dict_alias_title[category])
except:
pass
tmp_cat_list=cat_utils.get_most_detailed_categories(tmp_cat_list,dict_alias_title,dict_alias_depth)
return tmp_cat_list
def category_normalization(categories):
if categories != None:
return categories
else:
return []
TRAIN_SIZE=experiment_constants.TRAIN_SIZE
TEST_SIZE=1-TRAIN_SIZE
# In[2]:
fbusiness=open("../../data/business.json")
poi_data = dict()
start_time=time.time()
for i, line in enumerate(fbusiness):
# json to dict
obj_json = json.loads(line)
# add to the data collection
if obj_json['categories'] != None:
poi_data[obj_json['business_id']]={'latitude':obj_json['latitude'],
'longitude':obj_json['longitude'],
'categories':obj_json['categories'].split(', ')}
else:
poi_data[obj_json['business_id']]={'latitude':obj_json['latitude'],
'longitude':obj_json['longitude'],
'categories':obj_json['categories']}
print(time.time()-start_time)
# In[3]:
areas=dict()
for city in cities:
areas[city]=areamanager.delimiter_area(city)
# In[4]:
cities_pid_in_area=dict()
start_time=time.time()
for city in cities:
area=areas[city]
pid_in_area=collections.defaultdict(bool)
for poi_id in poi_data:
if areamanager.poi_in_area(area,poi_data[poi_id]):
pid_in_area[poi_id]=True
cities_pid_in_area[city]=pid_in_area
print(time.time()-start_time)
# In[5]:
fuser=open("../../data/user.json")
user_friend = dict()
user_data = dict()
start_time=time.time()
for i, line in enumerate(fuser):
# json to dict
obj_json = json.loads(line)
# add to the data collection
user_friend[obj_json['user_id']]=obj_json['friends'].split(', ')
custom_obj = dict()
for key, value in obj_json.items():
if key not in ['friends','elite','name','user_id']:
custom_obj[key] = value
user_data[obj_json['user_id']] = custom_obj
print(time.time()-start_time)
# In[6]:
freview=open("../../data/review.json")
cities_checkin_data=dict()
for city in cities:
cities_checkin_data[city]=list()
start_time=time.time()
for i, line in enumerate(freview):
# json to dict
obj_json = json.loads(line)
for city in cities:
if cities_pid_in_area[city][obj_json['business_id']]:
# add to the data collection
cities_checkin_data[city].append({'user_id':obj_json['user_id'],
'poi_id':obj_json['business_id'],
'date':obj_json['date']})
break
if i % 500000 ==0:
print(i)
print(time.time()-start_time)
ftip=open("../../data/tip.json")
start_time=time.time()
for i, line in enumerate(ftip):
# json to dict
obj_json = json.loads(line)
for city in cities:
if cities_pid_in_area[city][obj_json['business_id']]:
# add to the data collection
cities_checkin_data[city].append({'user_id':obj_json['user_id'],
'poi_id':obj_json['business_id'],
'date':obj_json['date']})
break
if i % 500000 ==0:
print(i)
print(time.time()-start_time)
# In[ ]:
# df_checkin=pd.read_csv("../../data/checkin.csv")
# df_checkin=df_checkin.set_index("user_id")
# In[ ]:
# city_area=areamanager.delimiter_area('madison')
# df_checkin_city=areamanager.pois_in_area(city_area,df_checkin.reset_index())
# In[ ]:
# i=0
# for idx,checkin in df_checkin.iterrows():
# # print(checkin.business_id)
# if cities_pid_in_area['madison'][checkin.business_id]:
# i+=1
# i
# In[ ]:
# print(len(df_checkin_city['business_id'].drop_duplicates()))
# print(len(df_checkin_city['user_id'].drop_duplicates()))
# print(len(df_checkin_city))
# In[7]:
genoptions=['poi','neighbor','user','checkin','test','train'
,'user_data']
genoptions=['checkin',
'poi','neighbor',
'user','user_data'
]
# In[ ]:
for city in cities:
print("CITY: %s" % (city))
# Pega os checkins da cidade
checkin_data=cities_checkin_data[city]
print("checkin_data size: %d"%(len(checkin_data)))
# transforma em dataframe
df_checkin=pd.DataFrame.from_dict(checkin_data)
df_checkin.head(1)
# Começa a parte de filtragrem
df_diff_users_visited=df_checkin[['user_id','poi_id']].drop_duplicates().reset_index(drop=True).groupby('poi_id').count().reset_index().rename(columns={"user_id":"diffusersvisited"})
df_diff_users_visited=df_diff_users_visited[df_diff_users_visited['diffusersvisited']>=5]
del df_diff_users_visited['diffusersvisited']
df_checkin=pd.merge(df_checkin,df_diff_users_visited,on='poi_id',how='inner')
df_checkin['Count']=df_checkin.groupby(['user_id'])['user_id'].transform('count')
df_checkin=df_checkin[df_checkin['Count']>=20]
del df_checkin['Count']
# converte para dicionario, ou lista de dicionarios
checkin_data=list(df_checkin.to_dict('index').values())
# termina a parte de filtragem
# pega todos ids dos usuarios
users_id = set()
for check in checkin_data:
users_id.add(check['user_id'])
# quantidade de usuarios
user_num=len(users_id)
# pega todos ids dos pois
pois_id = set()
for check in checkin_data:
pois_id.add(check['poi_id'])
#quantidade de pois
poi_num=len(pois_id)
print("user_num:%d, poi_num:%d"%(user_num,poi_num))
# Começa a transformar ids de String para inteiro
users_id_to_int = dict()
for i,user_id in enumerate(users_id):
users_id_to_int[user_id]=i
fuid=open('../../data/user/id/'+city+'.pickle','wb')
pickle.dump(users_id_to_int,fuid)
fuid.close()
pois_id_to_int = dict()
for i,poi_id in enumerate(pois_id):
pois_id_to_int[poi_id]=i
fpid=open('../../data/poi/id/'+city+'.pickle','wb')
pickle.dump(pois_id_to_int,fpid)
fpid.close()
# Termina de transformar ids de String para inteiro
# cria dicionario de "objetos" ou dicionarios de pois da cidade
# alem de aplicar filtragem categorica
city_poi_data=dict()
if 'poi' in genoptions:
for poi_id in pois_id:
city_poi_data[pois_id_to_int[poi_id]]=poi_data[poi_id].copy()
city_poi_data[pois_id_to_int[poi_id]] = {'categories':category_normalization(city_poi_data[pois_id_to_int[poi_id]]['categories'])}
fpoi=open('../../data/poi_full/'+city+'.pickle','wb')
pickle.dump(city_poi_data,fpoi)
fpoi.close()
city_poi_data=dict()
if 'poi' in genoptions:
for poi_id in pois_id:
city_poi_data[pois_id_to_int[poi_id]]=poi_data[poi_id].copy()
city_poi_data[pois_id_to_int[poi_id]]['categories']=category_filter(poi_data[poi_id]['categories'])
fpoi=open('../../data/poi/'+city+'.pickle','wb')
pickle.dump(city_poi_data,fpoi)
fpoi.close()
# pega os vizinhos de cada poi
# print("Pegando vizinhos...")
if 'neighbor' in genoptions:
poi_neighbors={}
pois_id=[pois_id_to_int[pid] for pid in pois_id]
pois_coos = np.array([(city_poi_data[pid]['latitude'],city_poi_data[pid]['longitude']) for pid in pois_id])*np.pi/180
poi_coos_balltree = sklearn.neighbors.BallTree(pois_coos,metric="haversine")
poi_neighbors = {lid: list(poi_coos_balltree.query_radius([pois_coos[lid]],geocat_constants.NEIGHBOR_DISTANCE/earth_radius)[0]) for lid in pois_id}
# print(poi_neighbors)
# args=[(lid,) for lid in pois_id]
# def neighbors_searcher(poi_id):
# neighbors=list()
# for npoi_id in pois_id:
# if geo_utils.dist((city_poi_data[poi_id]['latitude'],city_poi_data[poi_id]['longitude']),(city_poi_data[npoi_id]['latitude'],city_poi_data[npoi_id]['longitude'])) <= geocat_constants.NEIGHBOR_DISTANCE:
# neighbors.append(npoi_id)
# return neighbors
# poi_neighbors = run_parallel(neighbors_searcher,args,chunksize=60)
# list to dict
# poi_neighbors = {i: poi_neighbors[i] for i in range(len(poi_neighbors))}
print("Terminou vizinhos...")
fneighbors=open('../../data/neighbor/'+city+'.pickle','wb')
pickle.dump(poi_neighbors,fneighbors)
fneighbors.close()
city_user_friend=dict()
countusf=0
print("Inicio Amigos...")
users_id = list(users_id)
if 'user' in genoptions:
for i in tqdm(range(len(users_id))):
user_id=users_id[i]
ucity_friends=list()
for friend_id in user_friend[user_id]:
try:
ucity_friends.append(users_id_to_int[friend_id])
countusf+=1
except:
pass
city_user_friend[users_id_to_int[user_id]]=ucity_friends
fuser=open('../../data/user/friend/'+city+'.pickle','wb')
pickle.dump(city_user_friend,fuser)
fuser.close()
print("Fim Amigos...")
print("Friends: %d"%(countusf))
city_user_data = dict()
if 'user_data' in genoptions:
for i in tqdm(range(len(users_id))):
user_id=users_id[i]
city_user_data[users_id_to_int[user_id]]=user_data[user_id].copy()
fuser=open('../../data/user/'+city+'.pickle','wb')
pickle.dump(city_user_data,fuser)
fuser.close()
if 'checkin' in genoptions:
for checkin in checkin_data:
checkin['user_id'] = users_id_to_int[checkin['user_id']]
checkin['poi_id'] = pois_id_to_int[checkin['poi_id']]
checkin['date'] = | pd.to_datetime(checkin['date']) | pandas.to_datetime |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 15 07:39:40 2020
@author: adonay
"""
import os.path as op
import numpy as np
import pandas as pd
import pickle
import matplotlib.pyplot as plt
import utils_io as uio
import utils_signal_processing as sig_proc
import utils_feature_extraction as feat_ext
from mne.filter import filter_data
from sklearn.decomposition import PCA
import matplotlib
font = {'family' : 'normal',
'size' : 15}
matplotlib.rc('font', **font)
def zscore(x):
x = (x - np.nanmean(x))/ np.nanstd(x)
return x
def minmax_scaler(x):
xmin, xmax = np.min(x), np.max(x)
x = (x - xmin)/ (xmax - xmin)
return x
def make_fig():
fig = plt.figure(figsize=(15, 20), constrained_layout=True)
gs = fig.add_gridspec(4, 4)
ax1 = fig.add_subplot(gs[0, :])
ax1.set_title('TS, peaks and pk slopes')
ax2 = fig.add_subplot(gs[1,:-2])
ax3 = fig.add_subplot(gs[2, :-2])
ax4 = fig.add_subplot(gs[3, 0])
ax5 = fig.add_subplot(gs[3, 1])
ax6 = fig.add_subplot(gs[1, 2:])
ax7 = fig.add_subplot(gs[2, 2:])
ax8 = fig.add_subplot(gs[3, 2:])
return fig, [ax1, ax2, ax3, ax4, ax5, ax6, ax7, ax8]
def filename_creator(subj, folder_name):
diagn = df_beh.loc[subj, 'gen_diagnosis']
diag_num = df_beh.loc[subj, 'gen_diagnosis_num']
age = df_beh.loc[subj, 'age']
if diagn == "Control":
severity = 0
ttl = f"{n} {subj}, age {df_beh.loc[subj, 'age']} ,{diagn}"
elif diagn == "PD":
ttl = f"{n} {subj}, age {df_beh.loc[subj, 'age']} ,{diagn}, {df_beh.loc[subj, 'updrs_arm_total_R']}"
severity = df_beh.loc[subj, 'updrs_arm_total_R']
else:
ttl = f"{n} {subj}, age {df_beh.loc[subj, 'age']} ,{diagn}, {df_beh.loc[subj, 'common_arm_score_L']}"
severity = df_beh.loc[subj, 'common_arm_score_L']
fname = f"{folder_name}/{severity}_{diag_num}_{age}_{subj}.png"
return fname
# def save_plot(x, y, folder):
# Path definitions
root_dir = '/home/adonay/Desktop/projects/Ataxia'
model_name = '_resnet152_FingerTappingJan29shuffle1_650000'
paths = uio.get_paths(model_name, root_dir)
sfreq_common = 60
BP_filr = [1, 10]
# load data
df_beh = | pd.read_csv(paths['beh'], index_col=0) | pandas.read_csv |
import numpy as np
import pandas as pd
import pyprind
import os
from py_stringsimjoin.utils.generic_helper import \
find_output_attribute_indices, get_output_header_from_tables, \
get_output_row_from_tables
def get_pairs_with_missing_value_disk(ltable, rtable,
l_key_attr, r_key_attr,
l_join_attr, r_join_attr,
temp_dir, data_limit_per_core,
missing_pairs_file_name, l_out_attrs=None,
r_out_attrs=None, l_out_prefix='l_',
r_out_prefix='r_', out_sim_score=False,
show_progress=True):
# find column indices of key attr, join attr and output attrs in ltable
l_columns = list(ltable.columns.values)
l_key_attr_index = l_columns.index(l_key_attr)
l_join_attr_index = l_columns.index(l_join_attr)
l_out_attrs_indices = find_output_attribute_indices(l_columns, l_out_attrs)
# find column indices of key attr, join attr and output attrs in rtable
r_columns = list(rtable.columns.values)
r_key_attr_index = r_columns.index(r_key_attr)
r_join_attr_index = r_columns.index(r_join_attr)
r_out_attrs_indices = find_output_attribute_indices(r_columns, r_out_attrs)
# find ltable records with missing value in l_join_attr
ltable_missing = ltable[pd.isnull(ltable[l_join_attr])]
# find ltable records which do not contain missing value in l_join_attr
ltable_not_missing = ltable[pd.notnull(ltable[l_join_attr])]
# find rtable records with missing value in r_join_attr
rtable_missing = rtable[pd.isnull(rtable[r_join_attr])]
output_rows = []
has_output_attributes = (l_out_attrs is not None or
r_out_attrs is not None)
if show_progress:
print('Finding pairs with missing value...')
prog_bar = pyprind.ProgBar(len(ltable_missing) + len(rtable_missing))
# For each ltable record with missing value in l_join_attr,
# output a pair corresponding to every record in rtable.
for l_row in ltable_missing.itertuples(index=False):
for r_row in rtable.itertuples(index=False):
if has_output_attributes:
record = get_output_row_from_tables(
l_row, r_row,
l_key_attr_index, r_key_attr_index,
l_out_attrs_indices, r_out_attrs_indices)
else:
record = [l_row[l_key_attr_index], r_row[r_key_attr_index]]
output_rows.append(record)
# Flushing the data onto the disk if in-memory size exceeds the permissible data limit
if len(output_rows) > data_limit_per_core:
df = pd.DataFrame(output_rows)
with open(missing_pairs_file_name, 'a+') as myfile:
df.to_csv(myfile, header=False, index=False)
output_rows = []
if show_progress:
prog_bar.update()
# if output rows have some data left, flush the same to the disk to maintain consistency.
if len(output_rows) > 0:
df = pd.DataFrame(output_rows)
with open(missing_pairs_file_name, 'a+') as myfile:
df.to_csv(myfile, header=False, index=False)
output_rows = []
# For each rtable record with missing value in r_join_attr,
# output a pair corresponding to every record in ltable which
# doesn't have a missing value in l_join_attr.
for r_row in rtable_missing.itertuples(index=False):
for l_row in ltable_not_missing.itertuples(index=False):
if has_output_attributes:
record = get_output_row_from_tables(
l_row, r_row,
l_key_attr_index, r_key_attr_index,
l_out_attrs_indices, r_out_attrs_indices)
else:
record = [l_row[l_key_attr_index], r_row[r_key_attr_index]]
if out_sim_score:
record.append(np.NaN)
output_rows.append(record)
# Flushing the data onto the disk if in-memory size exceeds the permissible data limit
if len(output_rows) > data_limit_per_core:
df = pd.DataFrame(output_rows)
with open(missing_pairs_file_name, 'a+') as myfile:
df.to_csv(myfile, header=False, index=False)
output_rows = []
if show_progress:
prog_bar.update()
# if output rows have some data left, flush the same to the disk to maintain consistency.
if len(output_rows) > 0:
df = | pd.DataFrame(output_rows) | pandas.DataFrame |
import leidenalg
import graphtools
import sklearn
from igraph import Graph
import numpy as np
import seaborn as sns
import pandas as pd
from scipy.spatial.distance import squareform
from scipy.cluster import hierarchy
class AffinityLeiden(sklearn.base.BaseEstimator, sklearn.base.ClusterMixin):
def __init__(
self,
knn=5,
knn_max=None,
knn_dist="euclidean",
n_pca=100,
decay=40,
n_landmark=2000,
resolution_parameter=1,
n_jobs=1,
verbose=True,
random_state=None,
):
self.knn = knn
self.knn_max = knn_max
self.knn_dist = knn_dist
self.decay = decay
self.n_pca = n_pca
self.n_jobs = n_jobs
self.n_landmark = n_landmark
self.resolution_parameter = resolution_parameter
self.random_state = random_state
self.verbose = verbose
def fit(self, X, y=None):
if isinstance(X, list):
X = np.array(X)
if X.ndim < 2:
raise ValueError("Cannot fit 1D array.")
if X.shape[0] == 1:
raise ValueError("Input contains only 1 sample.")
self.n_features_in_ = X.shape[1]
graph = graphtools.Graph(
X,
n_pca=self.n_pca,
n_landmark=self.n_landmark,
distance=self.knn_dist,
knn=self.knn,
knn_max=self.knn_max,
decay=self.decay,
thresh=1e-4,
n_jobs=self.n_jobs,
verbose=self.verbose,
random_state=self.random_state,
)
self.affinity_matrix_ = graph.diff_op.todense()
affinity_igraph = Graph().Weighted_Adjacency(
matrix=self.affinity_matrix_.tolist(), mode="undirected"
)
partition = leidenalg.find_partition(
affinity_igraph,
partition_type=leidenalg.RBConfigurationVertexPartition,
weights=affinity_igraph.es["weight"],
n_iterations=-1,
seed=self.random_state,
resolution_parameter=self.resolution_parameter,
)
self.labels_ = np.array(partition.membership)
self.q_ = partition.q
return self
@property
def singularities(self):
self._singularities = (
np.unique(self.labels_, return_counts=True)[1] == 1
).sum()
return self._singularities
@property
def n_clusters(self):
self._n_labels = len(np.unique(self.labels_))
return self._n_labels
@property
def mean_cluster_size(self):
self._mean_cluster_size = np.unique(self.labels_, return_counts=True)[1].mean()
return self._mean_cluster_size
def set_index(self, index):
assert len(index) == len(self.labels_)
self.index = index
return self
def adjusted_mutual_info_score(self, s):
return sklearn.metrics.adjusted_mutual_info_score(
self.labels_, s[self.index].values
)
def adjusted_rand_score(self, s):
return sklearn.metrics.adjusted_rand_score(self.labels_, s[self.index].values)
def consensus_matrix(
dfs, column="cluster", weights=None, combine_nt=False, nt_threshold=1
):
# check for identical indices
[ | pd.testing.assert_index_equal(dfs[0].index, df.index) | pandas.testing.assert_index_equal |
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 09 16:48:04 2016
@author: rakhunzy
"""
import numpy as np
import pandas as pd
import sys
from matplotlib import pyplot as plt
# In[]
def middle_point(lst):
return lst[len(lst)/2]
def point_index(contour, point):
return np.argwhere(np.all(contour == point,axis=1))[0,0]
def get_line(start, end):
"""Bresenham's Line Algorithm
Produces a list of tuples from start and end
>>> points1 = get_line((0, 0), (3, 4))
>>> points2 = get_line((3, 4), (0, 0))
>>> assert(set(points1) == set(points2))
>>> print points1
[(0, 0), (1, 1), (1, 2), (2, 3), (3, 4)]
>>> print points2
[(3, 4), (2, 3), (1, 2), (1, 1), (0, 0)]
"""
# Setup initial conditions
x1 = int(start[0])
y1 = int(start[1])
x2 = int(end[0])
y2 = int(end[1])
dx = x2 - x1
dy = y2 - y1
# Determine how steep the line is
is_steep = abs(dy) > abs(dx)
# Rotate line
if is_steep:
x1, y1 = y1, x1
x2, y2 = y2, x2
# Swap start and end points if necessary and store swap state
swapped = False
if x1 > x2:
x1, x2 = x2, x1
y1, y2 = y2, y1
swapped = True
# Recalculate differentials
dx = x2 - x1
dy = y2 - y1
# Calculate error
error = int(dx / 2.0)
ystep = 1 if y1 < y2 else -1
# Iterate over bounding box generating points between start and end
y = y1
points = []
for x in range(x1, x2 + 1):
coord = (y, x) if is_steep else (x, y)
points.append(coord)
error -= abs(dy)
if error < 0:
y += ystep
error += dx
# Reverse the list if the coordinates were swapped
if swapped:
points.reverse()
return np.array(points)
# In[]
def read_contours(fname):
content = []
for s in open(fname).readlines():
s = s.strip('\t\n').split('\t')
image_filename = s[0]
rect = np.array([np.float(i) for i in s[1:5]])
coordinates = np.array([np.float(i) for i in s[5:]])
coordinates = np.reshape(coordinates, (len(coordinates)/2, 2))
content.append([image_filename, rect, coordinates,])
return content
# In[]
def get_contour_section(contour, index1, index2):
if index1 < index2:
return contour[index1:index2]
else:
return np.vstack((contour[index1:],contour[:index2]))
def make_dense_contour(contour):
contour = np.append(contour, [contour[0]], axis=0)
new_contour = []
for i in range(len(contour)-1):
if i == 0:
new_contour = get_line(contour[i],contour[i+1])
else:
new_contour = np.vstack((new_contour, get_line(contour[i],contour[i+1])))
return new_contour
def double_landmarks(contour, lm_indices):
primary_points = np.array(lm_indices)
primary_points = np.append(primary_points, primary_points[0])
new_points = []
for i in range(len(primary_points) - 1):
point = middle_point(get_contour_section(contour, primary_points[i], primary_points[i + 1]))
point_idx = point_index(contour, point)
new_points.append(point_idx)
old_points = np.array([lm_indices])
new_points = np.array([new_points])
result = np.vstack((old_points, new_points)).transpose().reshape((1,len(lm_indices)*2))
return np.ravel(result)
def contour_to_landmarks(contour):
contour_max = np.max(contour, axis=0)
contour_min = np.min(contour, axis=0)
point_top = middle_point(contour[np.where( contour[:,1] == contour_min[1])])
point_bottom = middle_point(contour[np.where( contour[:,1] == contour_max[1])])
# point_left = middle_point(contour[np.where( contour[:,0] == contour_min[0])])
# point_right = middle_point(contour[np.where( contour[:,0] == contour_max[0])])
lm_2 = [point_index(contour, p) for p in (point_top, point_bottom)]
lm_4 = double_landmarks(contour, lm_2)
#lm_4 = [point_index(contour, p) for p in (point_top, point_right, point_bottom, point_left)]
lm_8 = double_landmarks(contour, lm_4)
lm_16 = double_landmarks(contour, lm_8)
return contour[lm_16]
# x y width height
def bounding_box(iterable):
min_x, min_y = np.min(iterable, axis=0)
max_x, max_y = np.max(iterable, axis=0)
return np.array([min_x, min_y, max_x-min_x, max_y - min_y])
#rect = contours[0][1]
#contour = contours[0][2]
def line_to_landmark(rect, contour):
dcontour = make_dense_contour(contour)
landmarks = contour_to_landmarks(dcontour)
landmarks = np.array([[lm[0] + rect[0], lm[1] + rect[1]] for lm in landmarks])
bbox = bounding_box(landmarks)
flat_landmarks = np.ravel(landmarks.reshape((1,len(landmarks)*2)))
result = np.hstack((bbox, flat_landmarks))
return result
if False:
input_file_name = '../../data/train/1.pts'
# In[]
input_file_name = sys.argv[1]
output_file_name = input_file_name[:-4] + '_boxede16.lms'
contours = read_contours(input_file_name)
# In[]
df = | pd.DataFrame([c[0] for c in contours]) | pandas.DataFrame |
import numpy as np
import pandas as pd
from pandas import compat
from pandas.core.series import Series
from pandas.core.frame import DataFrame
from pandas.core.indexing import is_list_like
from pandas.core.arrays.categorical import _factorize_from_iterable
class Smarties:
def __init__(self, main_lookup=None):
self.main_lookup=main_lookup
return None
def transform(self, df):
result_lookup = self.main_lookup
try:
df = df[result_lookup['normal']]
except:
list1 = result_lookup['normal']
list2 = df.columns
for i in list1:
if i in list2:
print('ok',i)
else:
print('missing!',i)
raise Exception('You are missing a column key, should be:' + str(result_lookup['normal']))
encoding_lookup = result_lookup['encoding']
with_dummies = [df.drop(encoding_lookup.keys(), axis=1)] #drop columns to encode
for key in encoding_lookup:
values = df[key].values
#Check to see if encoding took place
number_of_cols = len(encoding_lookup[key])
number_of_rows = df.shape[0]
dummy_mat = np.zeros((number_of_rows, number_of_cols), dtype=np.uint8)
for row in range(number_of_rows):
indices = [i for i, s in enumerate(encoding_lookup[key]) if key + '_' + str(values[row]) == s]
if len(indices) > 0:
dummy_mat[row][indices[0]] = 1
with_dummies.append(DataFrame(dummy_mat, index=df.index, columns=encoding_lookup[key]))
return pd.concat(with_dummies, axis=1)
def fit_transform(self, data, y=None, prefix=None, prefix_sep='_', dummy_na=False, columns=None, sparse=False, drop_first=False):
"""
Convert categorical variable into dummy/indicator variables
"""
#from pandas.core.reshape.concat import concat
from itertools import cycle
if 'DataFrame' not in str(type(data)): #convert series to dataframe
data = data.to_frame()
main_lookup={}
main_lookup['normal'] = data.columns
if isinstance(data, DataFrame):
# determine columns being encoded
if columns is None:
columns_to_encode = data.select_dtypes(
include=['object', 'category']).columns
else:
columns_to_encode = columns
# validate prefixes and separator to avoid silently dropping cols
def check_len(item, name):
length_msg = ("Length of '{0}' ({1}) did not match the length of "
"the columns being encoded ({2}).")
if | is_list_like(item) | pandas.core.indexing.is_list_like |
#!/usr/bin/env python
# coding: utf-8
# ## 라이브러리 import
# In[1]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
import seaborn as sns
COLORS = sns.color_palette()
import chart_studio.plotly as py
import cufflinks as cf
print(cf.__version__)
cf.go_offline()
get_ipython().run_line_magic('config', "InlineBackend.figure_format = 'retina'")
import plotly
plotly.offline.init_notebook_mode()
# ## 그래프 시각화의 한글지원 코드
# In[2]:
import matplotlib
from matplotlib import font_manager, rc
import platform
try :
if platform.system() == 'Windows':
# 윈도우인 경우
font_name = font_manager.FontProperties(fname="c:/Windows/Fonts/malgun.ttf").get_name()
rc('font', family=font_name)
else:
# Mac 인 경우
rc('font', family='AppleGothic')
except :
pass
matplotlib.rcParams['axes.unicode_minus'] = False
# # <데이터 소개>
# - 서울시 공공자전거 이용현황(2019년 10월1일 ~ 11월30일)
# ### 연령대별로 EDA
# - 연령대별로는 어떻게 봐야할까?
# - 이용시간, 이동거리, 이용건수를 추출
# - 이용시간 대비 이동거리와 이용건수 비교, 분석
# - 운동량은 날려야할까?
#
# #### 데이터 로드 및 concat
# - 공공데이터 csv파일의 한글깨짐현상
# - 공공데이터 파일의 Encoding은 utf-8방식으로 통일해 주었으면 좋겠지만, 거의 대부분 cp949나 euc-kr방식으로 인코딩 되어 있음
# - 해당 서울시 공공자전거 csv파일의 cp949로 인코딩이 되어 있고 utf8 불러왔을 때, ???? 현상이 나타남
# - utf8 로 변환 후 재로드
# In[4]:
df1 = pd.read_csv('/Users/wglee/Desktop/DATA ANALYSIS/데이터사이언스school/EDA프로젝트/EDA프로젝트데이터/서울특별시 공공자전거 이용정보(시간대별)_20190601_20191130(7).csv', encoding='utf-8')
df1 = df1.loc[458674:]
df2 = pd.read_csv('/Users/wglee/Desktop/DATA ANALYSIS/데이터사이언스school/EDA프로젝트/EDA프로젝트데이터/서울특별시 공공자전거 이용정보(시간대별)_20190601_20191130(8).csv', encoding='utf-8')
df3 = pd.read_csv('/Users/wglee/Desktop/DATA ANALYSIS/데이터사이언스school/EDA프로젝트/EDA프로젝트데이터/서울특별시 공공자전거 이용정보(시간대별)_20190601_20191130(9).csv', encoding='utf-8')
df4 = | pd.read_csv('/Users/wglee/Desktop/DATA ANALYSIS/데이터사이언스school/EDA프로젝트/EDA프로젝트데이터/서울특별시 공공자전거 이용정보(시간대별)_20190601_20191130(10).csv', encoding='utf-8') | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 3 23:04:33 2019
当给定了影像数据和量表时,如果量表数据包括而且大于影像数据时,我们需要从中提取与影像数据匹配的部分
@author: lenovo
"""
import sys
import os
cpwd = __file__
root = os.path.dirname(os.path.dirname(__file__))
sys.path.append(root)
print(f'##{root}')
import pandas as pd
import Utils.lc_copy_selected_file_V6 as copy
class screening_covariance_to_match_neuroimage():
def __init__(sel):
sel.folder = r'D:\WorkStation_2018\WorkStation_dynamicFC_V1\Data\zDynamic\state\covariances\folder_MDD.xlsx'
sel.path_neuroimage = r'D:\WorkStation_2018\WorkStation_dynamicFC_V1\Data\zDynamic\state\allState17_5\state5_all\state5\state5_MDD'
sel.cov_path = r'D:\WorkStation_2018\WorkStation_dynamicFC_V1\Data\zDynamic\state\covariances\ageANDsex_MDD.xlsx'
sel.save_path = r'D:\WorkStation_2018\WorkStation_dynamicFC_V1\Data\zDynamic\state\allState17_5\state5_all\state5\cov'
sel.save_name = 'state5_cov_MDD.xlsx'
def fetch_folder(sel):
""" fetch sel.folder"""
sel_folder = copy.CopyFmri(
reference_file=sel.folder,
targe_file_folder=sel.path_neuroimage,
keywork_reference_for_uid='([1-9]\d*)',
ith_reference_for_uid=0,
keyword_targetfile_for_uid='([1-9]\d*)',
matching_pointnumber_in_backwards=1,
ith_targetfile_for_uid=0,
keyword_targetfile_not_for_uid='',
keyword_parentfolder_contain_targetfile='',
savePath=sel.save_path,
n_processess=2,
ifSaveLog=0,
ifCopy=0,
ifMove=0,
saveInToOneOrMoreFolder='saveToOneFolder',
saveNameSuffix='',
ifRun=0)
result = sel_folder.main_run()
uid = result.allSubjName
values = [int(v) for v in uid.values]
uid = | pd.DataFrame(values) | pandas.DataFrame |
'''
Created on Apr 3, 2020
@author: <NAME>, Blue Lightning Development, LLC
'''
import os
import pandas as pd
pathToRepository = 'C:/Users/NOOK/GITHUB/COVID-19' # change to where you checked out https://github.com/CSSEGISandData/COVID-19.git
states = ["Alabama", "Alaska", "Arizona", "Arkansas", "California", "Colorado",
"Connecticut", "Delaware", "District of Columbia", "Florida", "Georgia", "Hawaii", "Idaho", "Illinois",
"Indiana", "Iowa", "Kansas", "Kentucky", "Louisiana", "Maine", "Maryland",
"Massachusetts", "Michigan", "Minnesota", "Mississippi", "Missouri", "Montana",
"Nebraska", "Nevada", "New Hampshire", "New Jersey", "New Mexico", "New York",
"North Carolina", "North Dakota", "Ohio", "Oklahoma", "Oregon", "Pennsylvania", "Puerto Rico",
"Rhode Island", "South Carolina", "South Dakota", "Tennessee", "Texas", "Utah",
"Vermont", "Virginia", "Washington", "West Virginia", "Wisconsin", "Wyoming"]
states2 = { # yes these are redundant; but one is sorted by name and the other by 2 letter
'AK': 'Alaska',
'AL': 'Alabama',
'AR': 'Arkansas',
'AS': 'American Samoa',
'AZ': 'Arizona',
'CA': 'California',
'CO': 'Colorado',
'CT': 'Connecticut',
'DC': 'District of Columbia',
'DE': 'Delaware',
'FL': 'Florida',
'GA': 'Georgia',
'GU': 'Guam',
'HI': 'Hawaii',
'IA': 'Iowa',
'ID': 'Idaho',
'IL': 'Illinois',
'IN': 'Indiana',
'KS': 'Kansas',
'KY': 'Kentucky',
'LA': 'Louisiana',
'MA': 'Massachusetts',
'MD': 'Maryland',
'ME': 'Maine',
'MI': 'Michigan',
'MN': 'Minnesota',
'MO': 'Missouri',
'MP': 'Northern Mariana Islands',
'MS': 'Mississippi',
'MT': 'Montana',
'NA': 'National',
'NC': 'North Carolina',
'ND': 'North Dakota',
'NE': 'Nebraska',
'NH': 'New Hampshire',
'NJ': 'New Jersey',
'NM': 'New Mexico',
'NV': 'Nevada',
'NY': 'New York',
'OH': 'Ohio',
'OK': 'Oklahoma',
'OR': 'Oregon',
'PA': 'Pennsylvania',
'PR': 'Puerto Rico',
'RI': 'Rhode Island',
'SC': 'South Carolina',
'SD': 'South Dakota',
'TN': 'Tennessee',
'TX': 'Texas',
'UT': 'Utah',
'VA': 'Virginia',
'VI': 'Virgin Islands',
'VT': 'Vermont',
'WA': 'Washington',
'WI': 'Wisconsin',
'WV': 'West Virginia',
'WY': 'Wyoming'
}
def updateHospitalizations(pathToRepository, pull=True):
if (pull) :
os.system('git -C %s pull' % pathToRepository)
f = pd.DataFrame(columns=states)
base = '%s/csse_covid_19_data/csse_covid_19_daily_reports_us/' % pathToRepository
for (__, __, filenames) in os.walk(base):
for name in filenames:
if (name.endswith('.csv')) :
print(name, pd.to_datetime(name[0:-4]))
daily = | pd.read_csv(base + name, encoding='utf8') | pandas.read_csv |
## By <NAME>
## Created 2018. Edited AS 2019. Edited AJ 2020
import sys
import argparse
import gzip
import pandas as pd
import os
import multiprocessing as mp
import re
from traceback import print_exc
def revComp(my_seq): ## obtain reverse complement of a sequence
base_comp = {'A':'T', 'C':'G','G':'C', 'T':'A', 'N':'N', " ":" "}
lms = list(my_seq[:-1]) ## parse string into list of components
lms.reverse()
try:
lms = [base_comp[base] for base in lms]
except TypeError:
pass
lms = ''.join(lms) ## get string from reversed and complemented list
return(lms)
def BC_scan(barcodes,start_scan,end_scan,seq): ## Given start and end position of scanning sequence, look for barcodes
str_list = [seq[i:i+16] for i in range(start_scan,end_scan)] ## barcode file has "\n" in encoding
bc_intersect = list(set(str_list).intersection(barcodes))
if bc_intersect:
bc_intersect = [element for element in bc_intersect] ## allow for possibility of multiple barcodes matching
bc_pos = [seq.index(element) for element in bc_intersect]
else:
bc_intersect = 'no_bc'
bc_pos='-'
return{'bc_pos':bc_pos,'bc_intersect':bc_intersect}
def TDetectFwd(seq): ## Looks for T9 in the forward strand
try:
fwd_ix = seq.index('TTTTTTTTT') ### Only detects the first occurrence of T9
return(fwd_ix)
except ValueError:
fwd_ix = -1
return(fwd_ix)
def TDetectRev(rev_seq): ## If it was actually the reverse strand then it looks at T9 in the reverse complement
try:
rev_ix = rev_seq.index('TTTTTTTTT')
return(rev_ix)
except ValueError:
rev_ix = -1
return(rev_ix)
def TSODetectFwd(end_seq): ## Looks for middle 15 bases of TSO in the forward strand
tso = 'TGGTATCAACGCAGA'
try:
fwdTSO_ix = len(end_seq) - end_seq.index(revComp(tso))
return(fwdTSO_ix)
except ValueError:
fwdTSO_ix = -1
return(fwdTSO_ix)
def TSODetectRev(revEnd_seq): ## If it was actually the reverse strand then it looks at TSO in the reverse complement
tso = 'AAGCAGTGGTATCAACGCAGAGTACAT'
try:
revTSO_ix = len(revEnd_seq) - revEnd_seq.index(revComp(tso))
return(revTSO_ix)
except ValueError:
revTSO_ix = -1
return(revTSO_ix)
def prelim(args):
global barcodes
global cluster_id
global umiLength
file_name = re.split('/|.fq.gz|.fastq.gz',args.fq)[-2]
print(file_name)
# If output file already exists, delete it.
if os.path.isfile('%sPolyT_BCDetection_%s_.csv' %(args.outDir, file_name)):
os.system('rm %sPolyT_BCDetection_%s_.csv' %(args.outDir,file_name))
# If secondary output file for reads that are too short already exists, delete it.
if os.path.isfile('%sTooShort.csv' %(args.outDir)):
os.system('rm %sTooShort.csv' %(args.outDir))
bc_file = args.bcClust.replace(u'\xa0', u'')
barcodes = [x.strip('\n').split('\t')[0] for x in open(bc_file).readlines()]
cluster_id = [x.strip('\n').split('\t')[1] for x in open(bc_file).readlines()]
if args.chemistry == "v2":
umiLength = 10
elif args.chemistry == "v3":
umiLength = 12
return()
def addToDict(d, line, rn):
seq = line[:-1][:200]
rev_seq = revComp(line)[:200]
end_seq = line[-200:-1]
revEnd_seq = revComp(line)[-200:]
d['Readname'].append(rn)
d['length'].append(len(line))
## Getting TSO stats for read
fwdTSO_ix = TSODetectFwd(end_seq)
revTSO_ix = TSODetectRev(revEnd_seq)
if fwdTSO_ix == revTSO_ix == -1:
d['tso_status'].append('-')
d['tso_position'].append('-')
elif fwdTSO_ix == -1 and revTSO_ix != -1:
d['tso_status'].append('TSO_found')
d['tso_position'].append([revTSO_ix])
elif fwdTSO_ix != -1 and revTSO_ix == -1:
d['tso_status'].append('TSO_found')
d['tso_position'].append([fwdTSO_ix])
elif fwdTSO_ix != -1 and revTSO_ix != -1:
d['tso_status'].append('DoubleTSO')
d['tso_position'].append('-')
## Getting T9 position
fwd_ix = TDetectFwd(seq)
rev_ix = TDetectRev(rev_seq)
if fwd_ix == rev_ix == -1: ## If valuError, output is -1, so no polyT found
d['T9_status'].append('poly_T_not_found')
d['position'].append('-')
d['bc_position'].append('-')
d['BarcodeFound'].append('no_bc')
d['Cluster'].append('no_clust')
d['Strand_info'].append('none')
d['UMIs'].append('-')
elif fwd_ix == -1 and rev_ix != -1: ## PolyT found in reverse complement only
d['T9_status'].append('poly_T_found')
d['position'].append([rev_ix])
d['Strand_info'].append('rev')
start_scan = rev_ix-36
end_scan = rev_ix-6
if start_scan >= 0 and end_scan > 0:
bc_found = BC_scan(barcodes,start_scan,end_scan,rev_seq)
elif start_scan < 0 and end_scan >0:
start_scan = 0
bc_found = BC_scan(barcodes,start_scan,end_scan,rev_seq)
elif start_scan <0 and end_scan <= 0:
bc_found = {'bc_pos':'-','bc_intersect':'no_bc'}
if bc_found:
d['BarcodeFound'].append(bc_found.get('bc_intersect'))
d['bc_position'].append(bc_found.get('bc_pos'))
if 'no_bc' in bc_found.get('bc_intersect'):
d['Cluster'].append('no_clust')
d['UMIs'].append('-')
else:
d['Cluster'].append([cluster_id[x] for x in [barcodes.index(item) for item in bc_found.get('bc_intersect')]])
UMI_start = int(bc_found.get('bc_pos')[0])+16
UMI_end = int(bc_found.get('bc_pos')[0])+16+umiLength
d['UMIs'].append(rev_seq[UMI_start:UMI_end])
#bc_count += 1
else:
d['BarcodeFound'].append('no_bc')
d['Cluster'].append('no_clust')
d['bc_position'].append('-')
d['UMIs'].append('-')
elif fwd_ix != -1 and rev_ix == -1: ## PolyT found in sequence but NOT the reverse complement
d['T9_status'].append('poly_T_found')
d['position'].append([fwd_ix])
d['Strand_info'].append('fwd')
start_scan = fwd_ix-36
end_scan = fwd_ix-16
if start_scan >= 0 and end_scan > 0:
bc_found = BC_scan(barcodes,start_scan,end_scan,seq)
elif start_scan < 0 and end_scan >0:
start_scan = 0
bc_found = BC_scan(barcodes,start_scan,end_scan,seq)
elif start_scan <0 and end_scan <= 0:
bc_found = {'bc_pos':'-','bc_intersect':'no_bc'}
else:
print("wtf",fwd_ix,rev_ix,start_scan,end_scan)
if bc_found:
d['BarcodeFound'].append(bc_found.get('bc_intersect'))
d['bc_position'].append(bc_found.get('bc_pos'))
if 'no_bc' in bc_found.get('bc_intersect'):
d['Cluster'].append('no_clust')
d['UMIs'].append('-')
else:
d['Cluster'].append([cluster_id[x] for x in [barcodes.index(item) for item in bc_found.get('bc_intersect')]])
UMI_start = int(bc_found.get('bc_pos')[0])+16
UMI_end = int(bc_found.get('bc_pos')[0])+16+umiLength
d['UMIs'].append(seq[UMI_start:UMI_end])
#bc_count += 1
else:
d['BarcodeFound'].append('no_bc')
d['Cluster'].append('no_clust')
d['bc_position'].append('-')
d['UMIs'].append('-')
else: ## PolyT found in both. Could mean one of three things
start_scan_f = fwd_ix-36
end_scan_f = fwd_ix-16
if start_scan_f >= 0 and end_scan_f > 0:
bc_found_f = BC_scan(barcodes,start_scan_f,end_scan_f,seq)
elif start_scan_f < 0 and end_scan_f >0:
start_scan_f = 0
bc_found_f = BC_scan(barcodes,start_scan_f,end_scan_f,seq)
elif start_scan_f <0 and end_scan_f <= 0:
bc_found_f = {'bc_pos':'-','bc_intersect':'no_bc'}
start_scan_r = rev_ix-36
end_scan_r = rev_ix-6
if start_scan_r >= 0 and end_scan_r > 0:
bc_found_r = BC_scan(barcodes,start_scan_r,end_scan_r,rev_seq)
elif start_scan_r < 0 and end_scan_r >0:
start_scan_r = 0
bc_found_r = BC_scan(barcodes,start_scan_r,end_scan_r,rev_seq)
elif start_scan_r <0 and end_scan_r <= 0:
bc_found_r = {'bc_pos':'-','bc_intersect':'no_bc'}
if bc_found_f and bc_found_r and bc_found_r.get('bc_intersect') != 'no_bc' and bc_found_f.get('bc_intersect') != 'no_bc':
## BC found in forward AND reverse strand implies something is wrong, discard the read
d['T9_status'].append('poly_T_found')
d['position'].append([fwd_ix,rev_ix])
d['BarcodeFound'].append('DoubleBC')
d['bc_position'].append('-')
d['Cluster'].append('no_clust')
d['Strand_info'].append('both')
d['UMIs'].append('-')
#chimera += 1
elif bc_found_f and not bc_found_r: ## Barcode found in fwd strand, reverse strand polyT was a false positive
d['T9_status'].append('poly_T_found')
d['position'].append([fwd_ix])
d['Strand_info'].append('fwd')
d['BarcodeFound'].append(bc_found_f.get('bc_intersect'))
d['bc_position'].append(bc_found_f.get('bc_pos'))
if 'no_bc' in bc_found.get('bc_intersect'):
d['Cluster'].append('no_clust')
d['UMIs'].append('-')
else:
d['Cluster'].append([cluster_id[x] for x in [barcodes.index(item) for item in bc_found_f.get('bc_intersect')]])
UMI_start = int(bc_found.get('bc_pos')[0])+16
UMI_end = int(bc_found.get('bc_pos')[0])+16+umiLength
d['UMIs'].append(seq[UMI_start:UMI_end])
#bc_count += 1
elif bc_found_r and not bc_found_f: ## Barcode found in reverse strand, fwd T9 was a false positive
d['T9_status'].append('poly_T_found')
d['position'].append([rev_ix])
d['Strand_info'].append('rev')
d['BarcodeFound'].append(bc_found_f.get('bc_intersect'))
d['bc_position'].append(bc_found_f.get('bc_pos'))
if 'no_bc' in bc_found.get('bc_intersect'):
d['Cluster'].append('no_clust')
d['UMIs'].append('-')
else:
d['Cluster'].append([cluster_id[x] for x in [barcodes.index(item) for item in bc_found_f.get('bc_intersect')]])
UMI_start = int(bc_found.get('bc_pos')[0])+16
UMI_end = int(bc_found.get('bc_pos')[0])+16+umiLength
d['UMIs'].append(rev_seq[UMI_start:UMI_end])
#bc_count += 1
else:
d['T9_status'].append('poly_T_found')
d['position'].append('-')
d['Strand_info'].append('both')
d['bc_position'].append('-')
d['BarcodeFound'].append('no_bc')
d['Cluster'].append('no_clust')
d['UMIs'].append('-')
return(d)
def chunkAndProcess(args,d,tooShort):
step = 4 ## read lines in file with a step size of 4
startLine = 0
sim_processes = args.numProc
print("Number of simultaneous processes: ", sim_processes)
readCount = sum(1 for i in gzip.open(args.fq, 'rb')) // 4
# print("Number of reads in fastq file: ", readCount)
toFork = (readCount // sim_processes) + 1
# print("Number of reads per process: ", toFork) ## test
# Set childNo to 1 to give first child that childNo.
childNo = 1
while readCount >= 1:
isChild = os.fork()
if isChild == 0:
break
else:
if childNo == sim_processes:
break
else:
startLine = startLine + (toFork * 4)
readCount = readCount - toFork
childNo += 1
if isChild != 0:
os.waitpid(isChild, 0)
sys.exit()
with gzip.open(args.fq,"rt",encoding ='utf-8') as f:
for _ in zip(range(startLine), f):
pass
for lineno, line in enumerate(f, start = startLine):
if lineno < startLine + (toFork * 4):
if lineno % step == 0:
rn = line[:-1]
if 'runid' in rn:
rn = line.split(' ')[0]
if lineno % step == 1 and len(line) >= 201:
d = addToDict(d, line, rn)
if lineno % step == 1 and len(line) < 201:
tooShort.append(rn)
else:
break
writeTooShort(args, tooShort, childNo)
df = pd.DataFrame(data=d)
df.to_csv('%stmp%d' %(args.outDir, childNo), sep = "\t",index=False, header=False)
return()
def writeTooShort(args, tooShort, childNo):
tmp_d = {'Name':tooShort}
tmp_df = | pd.DataFrame(data=tmp_d) | pandas.DataFrame |
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2020, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import os.path
import unittest
import pandas as pd
import pandas.io.common
import biom
import skbio
import qiime2
from pandas.util.testing import assert_frame_equal, assert_series_equal
from q2_types.feature_table import BIOMV210Format
from q2_types.feature_data import (
TaxonomyFormat, HeaderlessTSVTaxonomyFormat, TSVTaxonomyFormat,
DNAFASTAFormat, DNAIterator, PairedDNAIterator,
PairedDNASequencesDirectoryFormat, AlignedDNAFASTAFormat,
DifferentialFormat, AlignedDNAIterator
)
from q2_types.feature_data._transformer import (
_taxonomy_formats_to_dataframe, _dataframe_to_tsv_taxonomy_format)
from qiime2.plugin.testing import TestPluginBase
# NOTE: these tests are fairly high-level and mainly test the transformer
# interfaces for the three taxonomy file formats. More in-depth testing for
# border cases, errors, etc. are in `TestTaxonomyFormatsToDataFrame` and
# `TestDataFrameToTSVTaxonomyFormat` below, which test the lower-level helper
# functions utilized by the transformers.
class TestTaxonomyFormatTransformers(TestPluginBase):
package = 'q2_types.feature_data.tests'
def test_taxonomy_format_to_dataframe_with_header(self):
index = pd.Index(['seq1', 'seq2'], name='Feature ID', dtype=object)
exp = pd.DataFrame([['k__Foo; p__Bar', '-1.0'],
['k__Foo; p__Baz', '-42.0']], index=index,
columns=['Taxon', 'Confidence'], dtype=object)
_, obs = self.transform_format(
TaxonomyFormat, pd.DataFrame,
filename=os.path.join('taxonomy', '3-column.tsv'))
assert_frame_equal(obs, exp)
def test_taxonomy_format_to_dataframe_without_header(self):
# Bug identified in https://github.com/qiime2/q2-types/issues/107
index = pd.Index(['seq1', 'seq2'], name='Feature ID', dtype=object)
columns = ['Taxon', 'Unnamed Column 1', 'Unnamed Column 2']
exp = pd.DataFrame([['k__Foo; p__Bar', 'some', 'another'],
['k__Foo; p__Baz', 'column', 'column!']],
index=index, columns=columns, dtype=object)
_, obs = self.transform_format(
TaxonomyFormat, pd.DataFrame,
filename=os.path.join('taxonomy', 'headerless.tsv'))
assert_frame_equal(obs, exp)
def test_taxonomy_format_to_series_with_header(self):
index = pd.Index(['seq1', 'seq2'], name='Feature ID', dtype=object)
exp = pd.Series(['k__Foo; p__Bar', 'k__Foo; p__Baz'], index=index,
name='Taxon', dtype=object)
_, obs = self.transform_format(
TaxonomyFormat, pd.Series,
filename=os.path.join('taxonomy', '3-column.tsv'))
assert_series_equal(obs, exp)
def test_taxonomy_format_to_series_without_header(self):
# Bug identified in https://github.com/qiime2/q2-types/issues/107
index = pd.Index(['seq1', 'seq2'], name='Feature ID', dtype=object)
exp = pd.Series(['k__Foo; p__Bar', 'k__Foo; p__Baz'], index=index,
name='Taxon', dtype=object)
_, obs = self.transform_format(
TaxonomyFormat, pd.Series,
filename=os.path.join('taxonomy', 'headerless.tsv'))
assert_series_equal(obs, exp)
def test_headerless_tsv_taxonomy_format_to_tsv_taxonomy_format(self):
exp = (
'Feature ID\tTaxon\tUnnamed Column 1\tUnnamed Column 2\n'
'seq1\tk__Foo; p__Bar\tsome\tanother\n'
'seq2\tk__Foo; p__Baz\tcolumn\tcolumn!\n'
)
_, obs = self.transform_format(
HeaderlessTSVTaxonomyFormat, TSVTaxonomyFormat,
filename=os.path.join('taxonomy', 'headerless.tsv'))
with obs.open() as fh:
self.assertEqual(fh.read(), exp)
def test_tsv_taxonomy_format_to_dataframe(self):
index = pd.Index(['seq1', 'seq2'], name='Feature ID', dtype=object)
exp = pd.DataFrame([['k__Foo; p__Bar', '-1.0'],
['k__Foo; p__Baz', '-42.0']], index=index,
columns=['Taxon', 'Confidence'], dtype=object)
_, obs = self.transform_format(
TSVTaxonomyFormat, pd.DataFrame,
filename=os.path.join('taxonomy', '3-column.tsv'))
assert_frame_equal(obs, exp)
def test_tsv_taxonomy_format_to_series(self):
index = pd.Index(['seq1', 'seq2'], name='Feature ID', dtype=object)
exp = pd.Series(['k__Foo; p__Bar', 'k__Foo; p__Baz'], index=index,
name='Taxon', dtype=object)
_, obs = self.transform_format(
TSVTaxonomyFormat, pd.Series,
filename=os.path.join('taxonomy', '3-column.tsv'))
assert_series_equal(obs, exp)
def test_dataframe_to_tsv_taxonomy_format(self):
index = pd.Index(['seq1', 'seq2'], name='Feature ID', dtype=object)
columns = ['Taxon', 'Foo', 'Bar']
df = pd.DataFrame([['taxon1', '42', 'foo'], ['taxon2', '43', 'bar']],
index=index, columns=columns, dtype=object)
exp = (
'Feature ID\tTaxon\tFoo\tBar\n'
'seq1\ttaxon1\t42\tfoo\n'
'seq2\ttaxon2\t43\tbar\n'
)
transformer = self.get_transformer(pd.DataFrame, TSVTaxonomyFormat)
obs = transformer(df)
with obs.open() as fh:
self.assertEqual(fh.read(), exp)
def test_series_to_tsv_taxonomy_format(self):
index = pd.Index(['emrakul', 'peanut'], name='Feature ID',
dtype=object)
series = pd.Series(['taxon1', 'taxon2'],
index=index, name='Taxon', dtype=object)
exp = (
'Feature ID\tTaxon\n'
'emrakul\ttaxon1\n'
'peanut\ttaxon2\n'
)
transformer = self.get_transformer(pd.Series, TSVTaxonomyFormat)
obs = transformer(series)
with obs.open() as fh:
self.assertEqual(fh.read(), exp)
def test_biom_table_to_tsv_taxonomy_format(self):
filepath = self.get_data_path(
os.path.join('taxonomy',
'feature-table-with-taxonomy-metadata_v210.biom'))
table = biom.load_table(filepath)
transformer = self.get_transformer(biom.Table, TSVTaxonomyFormat)
obs = transformer(table)
self.assertIsInstance(obs, TSVTaxonomyFormat)
self.assertEqual(
obs.path.read_text(),
'Feature ID\tTaxon\nO0\ta; b\nO1\ta; b\nO2\ta; b\nO3\ta; b\n')
def test_biom_table_to_tsv_taxonomy_format_no_taxonomy_md(self):
filepath = self.get_data_path(
os.path.join('taxonomy',
'feature-table-with-taxonomy-metadata_v210.biom'))
table = biom.load_table(filepath)
observation_metadata = [dict(taxon=['a', 'b']) for _ in range(4)]
table = biom.Table(table.matrix_data,
observation_ids=table.ids(axis='observation'),
sample_ids=table.ids(axis='sample'),
observation_metadata=observation_metadata)
transformer = self.get_transformer(biom.Table, TSVTaxonomyFormat)
with self.assertRaisesRegex(ValueError,
'O0 does not contain `taxonomy`'):
transformer(table)
def test_biom_table_to_tsv_taxonomy_format_missing_md(self):
filepath = self.get_data_path(
os.path.join('taxonomy',
'feature-table-with-taxonomy-metadata_v210.biom'))
table = biom.load_table(filepath)
observation_metadata = [dict(taxonomy=['a', 'b']) for _ in range(4)]
observation_metadata[2]['taxonomy'] = None # Wipe out one entry
table = biom.Table(table.matrix_data,
observation_ids=table.ids(axis='observation'),
sample_ids=table.ids(axis='sample'),
observation_metadata=observation_metadata)
transformer = self.get_transformer(biom.Table, TSVTaxonomyFormat)
with self.assertRaisesRegex(TypeError, 'problem preparing.*O2'):
transformer(table)
def test_biom_v210_format_to_tsv_taxonomy_format(self):
filename = os.path.join(
'taxonomy', 'feature-table-with-taxonomy-metadata_v210.biom')
_, obs = self.transform_format(BIOMV210Format, TSVTaxonomyFormat,
filename=filename)
self.assertIsInstance(obs, TSVTaxonomyFormat)
self.assertEqual(
obs.path.read_text(),
'Feature ID\tTaxon\nO0\ta; b\nO1\ta; b\nO2\ta; b\nO3\ta; b\n')
def test_biom_v210_format_no_md_to_tsv_taxonomy_format(self):
with self.assertRaisesRegex(TypeError, 'observation metadata'):
self.transform_format(
BIOMV210Format, TSVTaxonomyFormat,
filename=os.path.join('taxonomy', 'feature-table_v210.biom'))
def test_taxonomy_format_with_header_to_metadata(self):
_, obs = self.transform_format(TaxonomyFormat, qiime2.Metadata,
os.path.join('taxonomy',
'3-column.tsv'))
index = pd.Index(['seq1', 'seq2'], name='Feature ID', dtype=object)
exp_df = pd.DataFrame([['k__Foo; p__Bar', '-1.0'],
['k__Foo; p__Baz', '-42.0']], index=index,
columns=['Taxon', 'Confidence'], dtype=object)
exp = qiime2.Metadata(exp_df)
self.assertEqual(exp, obs)
def test_taxonomy_format_without_header_to_metadata(self):
_, obs = self.transform_format(TaxonomyFormat, qiime2.Metadata,
os.path.join('taxonomy',
'headerless.tsv'))
index = pd.Index(['seq1', 'seq2'], name='Feature ID', dtype=object)
columns = ['Taxon', 'Unnamed Column 1', 'Unnamed Column 2']
exp_df = pd.DataFrame([['k__Foo; p__Bar', 'some', 'another'],
['k__Foo; p__Baz', 'column', 'column!']],
index=index, columns=columns, dtype=object)
exp = qiime2.Metadata(exp_df)
self.assertEqual(exp, obs)
def test_tsv_taxonomy_format_to_metadata(self):
_, obs = self.transform_format(TSVTaxonomyFormat, qiime2.Metadata,
os.path.join('taxonomy',
'3-column.tsv'))
index = | pd.Index(['seq1', 'seq2'], name='Feature ID', dtype=object) | pandas.Index |
"""
Collection of tests asserting things that should be true for
any index subclass. Makes use of the `indices` fixture defined
in pandas/tests/indexes/conftest.py.
"""
import re
import numpy as np
import pytest
from pandas._libs.tslibs import iNaT
from pandas.core.dtypes.common import is_period_dtype, needs_i8_conversion
import pandas as pd
from pandas import (
CategoricalIndex,
DatetimeIndex,
MultiIndex,
PeriodIndex,
RangeIndex,
TimedeltaIndex,
)
import pandas._testing as tm
class TestCommon:
def test_droplevel(self, index):
# GH 21115
if isinstance(index, MultiIndex):
# Tested separately in test_multi.py
return
assert index.droplevel([]).equals(index)
for level in index.name, [index.name]:
if isinstance(index.name, tuple) and level is index.name:
# GH 21121 : droplevel with tuple name
continue
with pytest.raises(ValueError):
index.droplevel(level)
for level in "wrong", ["wrong"]:
with pytest.raises(
KeyError,
match=r"'Requested level \(wrong\) does not match index name \(None\)'",
):
index.droplevel(level)
def test_constructor_non_hashable_name(self, index):
# GH 20527
if isinstance(index, MultiIndex):
pytest.skip("multiindex handled in test_multi.py")
message = "Index.name must be a hashable type"
renamed = [["1"]]
# With .rename()
with pytest.raises(TypeError, match=message):
index.rename(name=renamed)
# With .set_names()
with pytest.raises(TypeError, match=message):
index.set_names(names=renamed)
def test_constructor_unwraps_index(self, index):
if isinstance(index, pd.MultiIndex):
raise pytest.skip("MultiIndex has no ._data")
a = index
b = type(a)(a)
tm.assert_equal(a._data, b._data)
@pytest.mark.parametrize("itm", [101, "no_int"])
# FutureWarning from non-tuple sequence of nd indexing
@pytest.mark.filterwarnings("ignore::FutureWarning")
def test_getitem_error(self, index, itm):
with pytest.raises(IndexError):
index[itm]
@pytest.mark.parametrize(
"fname, sname, expected_name",
[
("A", "A", "A"),
("A", "B", None),
("A", None, None),
(None, "B", None),
(None, None, None),
],
)
def test_corner_union(self, index, fname, sname, expected_name):
# GH 9943 9862
# Test unions with various name combinations
# Do not test MultiIndex or repeats
if isinstance(index, MultiIndex) or not index.is_unique:
pytest.skip("Not for MultiIndex or repeated indices")
# Test copy.union(copy)
first = index.copy().set_names(fname)
second = index.copy().set_names(sname)
union = first.union(second)
expected = index.copy().set_names(expected_name)
tm.assert_index_equal(union, expected)
# Test copy.union(empty)
first = index.copy().set_names(fname)
second = index.drop(index).set_names(sname)
union = first.union(second)
expected = index.copy().set_names(expected_name)
tm.assert_index_equal(union, expected)
# Test empty.union(copy)
first = index.drop(index).set_names(fname)
second = index.copy().set_names(sname)
union = first.union(second)
expected = index.copy().set_names(expected_name)
tm.assert_index_equal(union, expected)
# Test empty.union(empty)
first = index.drop(index).set_names(fname)
second = index.drop(index).set_names(sname)
union = first.union(second)
expected = index.drop(index).set_names(expected_name)
tm.assert_index_equal(union, expected)
@pytest.mark.parametrize(
"fname, sname, expected_name",
[
("A", "A", "A"),
("A", "B", None),
("A", None, None),
(None, "B", None),
(None, None, None),
],
)
def test_union_unequal(self, index, fname, sname, expected_name):
if isinstance(index, MultiIndex) or not index.is_unique:
pytest.skip("Not for MultiIndex or repeated indices")
# test copy.union(subset) - need sort for unicode and string
first = index.copy().set_names(fname)
second = index[1:].set_names(sname)
union = first.union(second).sort_values()
expected = index.set_names(expected_name).sort_values()
tm.assert_index_equal(union, expected)
@pytest.mark.parametrize(
"fname, sname, expected_name",
[
("A", "A", "A"),
("A", "B", None),
("A", None, None),
(None, "B", None),
(None, None, None),
],
)
def test_corner_intersect(self, index, fname, sname, expected_name):
# GH35847
# Test intersections with various name combinations
if isinstance(index, MultiIndex) or not index.is_unique:
pytest.skip("Not for MultiIndex or repeated indices")
# Test copy.intersection(copy)
first = index.copy().set_names(fname)
second = index.copy().set_names(sname)
intersect = first.intersection(second)
expected = index.copy().set_names(expected_name)
tm.assert_index_equal(intersect, expected)
# Test copy.intersection(empty)
first = index.copy().set_names(fname)
second = index.drop(index).set_names(sname)
intersect = first.intersection(second)
expected = index.drop(index).set_names(expected_name)
tm.assert_index_equal(intersect, expected)
# Test empty.intersection(copy)
first = index.drop(index).set_names(fname)
second = index.copy().set_names(sname)
intersect = first.intersection(second)
expected = index.drop(index).set_names(expected_name)
tm.assert_index_equal(intersect, expected)
# Test empty.intersection(empty)
first = index.drop(index).set_names(fname)
second = index.drop(index).set_names(sname)
intersect = first.intersection(second)
expected = index.drop(index).set_names(expected_name)
tm.assert_index_equal(intersect, expected)
@pytest.mark.parametrize(
"fname, sname, expected_name",
[
("A", "A", "A"),
("A", "B", None),
("A", None, None),
(None, "B", None),
(None, None, None),
],
)
def test_intersect_unequal(self, index, fname, sname, expected_name):
if isinstance(index, MultiIndex) or not index.is_unique:
pytest.skip("Not for MultiIndex or repeated indices")
# test copy.intersection(subset) - need sort for unicode and string
first = index.copy().set_names(fname)
second = index[1:].set_names(sname)
intersect = first.intersection(second).sort_values()
expected = index[1:].set_names(expected_name).sort_values()
tm.assert_index_equal(intersect, expected)
def test_to_flat_index(self, index):
# 22866
if isinstance(index, MultiIndex):
pytest.skip("Separate expectation for MultiIndex")
result = index.to_flat_index()
tm.assert_index_equal(result, index)
def test_set_name_methods(self, index):
new_name = "This is the new name for this index"
# don't tests a MultiIndex here (as its tested separated)
if isinstance(index, MultiIndex):
pytest.skip("Skip check for MultiIndex")
original_name = index.name
new_ind = index.set_names([new_name])
assert new_ind.name == new_name
assert index.name == original_name
res = index.rename(new_name, inplace=True)
# should return None
assert res is None
assert index.name == new_name
assert index.names == [new_name]
# FIXME: dont leave commented-out
# with pytest.raises(TypeError, match="list-like"):
# # should still fail even if it would be the right length
# ind.set_names("a")
with pytest.raises(ValueError, match="Level must be None"):
index.set_names("a", level=0)
# rename in place just leaves tuples and other containers alone
name = ("A", "B")
index.rename(name, inplace=True)
assert index.name == name
assert index.names == [name]
def test_copy_and_deepcopy(self, index):
from copy import copy, deepcopy
if isinstance(index, MultiIndex):
pytest.skip("Skip check for MultiIndex")
for func in (copy, deepcopy):
idx_copy = func(index)
assert idx_copy is not index
assert idx_copy.equals(index)
new_copy = index.copy(deep=True, name="banana")
assert new_copy.name == "banana"
def test_unique(self, index):
# don't test a MultiIndex here (as its tested separated)
# don't test a CategoricalIndex because categories change (GH 18291)
if isinstance(index, (MultiIndex, CategoricalIndex)):
pytest.skip("Skip check for MultiIndex/CategoricalIndex")
# GH 17896
expected = index.drop_duplicates()
for level in 0, index.name, None:
result = index.unique(level=level)
tm.assert_index_equal(result, expected)
msg = "Too many levels: Index has only 1 level, not 4"
with pytest.raises(IndexError, match=msg):
index.unique(level=3)
msg = (
fr"Requested level \(wrong\) does not match index name "
fr"\({re.escape(index.name.__repr__())}\)"
)
with pytest.raises(KeyError, match=msg):
index.unique(level="wrong")
def test_get_unique_index(self, index):
# MultiIndex tested separately
if not len(index) or isinstance(index, MultiIndex):
pytest.skip("Skip check for empty Index and MultiIndex")
idx = index[[0] * 5]
idx_unique = index[[0]]
# We test against `idx_unique`, so first we make sure it's unique
# and doesn't contain nans.
assert idx_unique.is_unique is True
try:
assert idx_unique.hasnans is False
except NotImplementedError:
pass
for dropna in [False, True]:
result = idx._get_unique_index(dropna=dropna)
tm.assert_index_equal(result, idx_unique)
# nans:
if not index._can_hold_na:
pytest.skip("Skip na-check if index cannot hold na")
if | is_period_dtype(index.dtype) | pandas.core.dtypes.common.is_period_dtype |
# ********************************************************************************** #
# #
# Project: FastClassAI workbecnch #
# #
# Author: <NAME> #
# Contact: <EMAIL> #
# #
# This notebook is a part of Skin AanaliticAI development kit, created #
# for evaluation of public datasets used for skin cancer detection with #
# large number of AI models and data preparation pipelines. #
# #
# License: MIT #
# Copyright (C) 2021.01.30 <NAME> #
# https://opensource.org/licenses/MIT #
# #
# ********************************************************************************** #
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os # allow changing, and navigating files and folders,
import sys
import re # module to use regular expressions,
import glob # lists names in folders that match Unix shell patterns
import random # functions that use and generate random numbers
import pickle
import numpy as np # support for multi-dimensional arrays and matrices
import pandas as pd # library for data manipulation and analysis
import seaborn as sns # advance plots, for statistics,
import matplotlib as mpl # to get some basif functions, heping with plot mnaking
import scipy.cluster.hierarchy as sch
import matplotlib.pyplot as plt # for making plots,
from src.utils.model_summary_plots import visual_model_summary
# from src.utils.method_comparison_tools import method_comparison_boxplot # copied here for any potential changes,
# Function ..........................................................................
def create_class_colors_dict(*,
list_of_unique_names,
cmap_name="tab20",
cmap_colors_from=0,
cmap_colors_to=1
):
'''Returns dictionary that maps each class name in list_of_unique_names,
to to a distinct RGB color
. list_of_unique_names : list with unique, full names of clasesses, group etc..
. cmap_name : standard mpl colormap name.
. cmap_colors_from, cmap_colors_to, values between 0 and 1,
used to select range of colors in cmap,
'''
# create cmap
mycmap = plt.cm.get_cmap(cmap_name, len(list_of_unique_names)*10000)
newcolors = mycmap(np.linspace(cmap_colors_from, cmap_colors_to, len(list_of_unique_names)))
class_color_dict = dict()
for i, un in enumerate(list_of_unique_names):
class_color_dict[un] = newcolors[i]
return class_color_dict
# Function .............................................................................
def load_summary_files(*,
dataset_name,
dataset_variants,
module_names,
ai_methods,
keywords,
path_results,
verbose=False
):
# assure that you have proper datastructures
if isinstance(dataset_variants, str):
dataset_variants = [dataset_variants]
else:
pass
if isinstance(module_names, str):
module_names = [module_names]
else:
pass
if isinstance(ai_methods, str):
ai_methods = [ai_methods]
else:
pass
if isinstance(keywords, str):
keywords = [keywords]
else:
pass
# collect names of files that will be loaded
file_counter=0
for ai_method in ai_methods:
for dataset_variant in dataset_variants:
for module_name in module_names:
if verbose==True:
print("Loading files for: ", ai_method, dataset_variant, module_name, "Found: ", end="")
else:
pass
# path
rpath = os.path.join(path_results, f"{ai_method}__{dataset_name}__{dataset_variant}")
os.chdir(rpath)
# find all files in rpath
files = []
for file in glob.glob("*"):
files.append(file)
# select all with keywords,
files_s = pd.Series(files)
for k in keywords:
files_s = files_s.loc[files_s.str.contains(k)]
files_l = files_s.values.tolist()
# info part 2,
if verbose==True:
print(len(files_s)>0, "files")
else:
pass
# load files
if len(files_s)>0:
for file_name in files_l :
loaded_df = pd.read_csv(file_name)
loaded_df["file_name"]=[file_name]*loaded_df.shape[0]
loaded_df["path"]=[rpath]*loaded_df.shape[0]
if file_counter==0:
summary_df = loaded_df
file_counter += 1
else:
summary_df = pd.concat([summary_df, loaded_df], axis=0)
summary_df.reset_index(inplace=True, drop=True)
else:
pass
# info part 2,
if verbose==True:
print("----> Final Table has results for ", summary_df.shape[0], " models")
else:
pass
return summary_df
# Function .............................................................................
def create_new_df_feature(*, df, new_feature_name, old_features_list, return_full_df=True, verbose=False):
'''
create new feature by concatanating corresponsing cells
in pd dataframe form any number of other selected features
old_features_list: str, or list, with name/s of feature to be concatenated
return_full_df : bool, if True entire df is retuned
if False, return pd.series only with thenew feature
'''
if isinstance(old_features_list, str):
old_features_list = [old_features_list]
else:
pass
# check if all feaqtures are available
stop_the_function = False
for i, feature in enumerate(old_features_list):
try:
df.loc[:, feature]
except:
stop_the_function = True
if verbose==True:
print(f"ERROR: {feature} -- was not found in dataframe")
else:
pass
# concatanate values in each corresponding cell
if stop_the_function==True:
return None
else:
for i, feature in enumerate(old_features_list):
if i==0:
new_feature = df.loc[:, feature].values.tolist()
else:
another_new_feature = df.loc[:, feature].values.tolist()
new_feature = [f"{x}__{y}" for (x,y) in zip(new_feature, another_new_feature)]
if return_full_df==True:
df[new_feature_name] = new_feature
return df
else:
return pd.Series(new_feature)
# Function .............................................................................
def simple_visual_model_summary(*,
model_idx_in_sorted_summary_df=0,
subset_collection_name,
batch_names_list,
summary_df,
class_labels_configs,
path_data,
path_results,
N_displayed_images ="all",
max_img_per_col = 15,
fontsize_scale= 1
):
'''
Temporary function used only with FastClassAI pipeline, that will load raw images
from all batches in a given subset batch collection, eg batch 1 and 2 for test data,
then it will plot 3 figures
- 1st figure - images grouped by class assigned with the model and with color boxes showing true class
- 2nd/3rd figure - pie charts showing sencsitivity and specificity in pie charts
PS: I am working on better version
'''
# ....
idx_in_df = model_idx_in_sorted_summary_df
# *** find names and models to load
# sort summary df
sorted_summary_df = summary_df.sort_values('model_acc_valid', ascending=False)
sorted_summary_df.reset_index(inplace=True, drop=True)
# get all variables,
method = sorted_summary_df.method.iloc[idx_in_df]
dataset_name = sorted_summary_df.dataset_name.iloc[idx_in_df]
dataset_variant = sorted_summary_df.dataset_variant.iloc[idx_in_df]
model_ID = sorted_summary_df.model_ID.iloc[idx_in_df] # its an ID number given to the model in that dictionary,
# *** paths
path_to_raw_images_sorted_into_batches = os.path.join(path_data, f'{dataset_name}__{dataset_variant}')
path_to_batch_labels = os.path.join(path_data, f'{dataset_name}__{dataset_variant}__extracted_features')
path_to_model_predictions = os.path.join(path_results, f'{method}__{dataset_name}__{dataset_variant}')
# *** load data
# load model predictions,
os.chdir(path_to_model_predictions)
model_predictions_file_name = re.sub("summary_table.csv", "model_predictions_dict.p", sorted_summary_df.file_name.iloc[idx_in_df])
with open(model_predictions_file_name , 'rb') as file:
model_predictions_dict = pickle.load(file)
# get class_label colors,
class_labels_colors_toUse = class_labels_configs[dataset_variant]['class_labels_colors']
# caulate accuracy results
acc_results = f'acc={np.round(sorted_summary_df.loc[:, f"model_acc_{subset_collection_name}"].iloc[idx_in_df],2)}'
# display examples from best performing model,
visual_model_summary(
model_predictions_dict = model_predictions_dict,
model_ID = model_ID, # applicable only with a given model_predictions_dict
# what predicitons to display,
n = N_displayed_images, # use "all" to display all
examples_to_plot = "all", # correct and incorrect on the same plot,
class_colors = class_labels_colors_toUse,
# input data,
dataset_name = dataset_name,
subset_name = [subset_collection_name], # name used in xy_names eg: train, valid, test test_2
img_batch_subset_names = batch_names_list, # list, batch names that were placed in that collection,
path_to_raw_img_batch = path_to_raw_images_sorted_into_batches,
# ... settings for main plot,
title_prefix = f"{subset_collection_name}, {acc_results}",
make_plot_with_img_examples = True, # use False, to have only pie charts with classyfication summary
add_proba_values_to_img_name = True,
max_img_per_col = max_img_per_col,
# ... settings for annot. pie charts,
first_pie_title =f"Image Classyfication Results - True Class in pie chart ring\n{subset_collection_name} data",
second_pie_title =f"Class Detection Results - True Class in pie chart center \n{subset_collection_name} data",
pie_data_for_all_images_in_img_batch=True,
pie_charts_in_ne_row = 7,
# ... pie chart aestetics added later to tune pie charts
PIE_legend_loc = "upper right",
PIE_ax_title_fonsize_scale=0.6*fontsize_scale,
PIE_legend_fontsize_scale=1.4*fontsize_scale,
PIE_wedges_fontsize_scale=1*fontsize_scale,
PIE_legend_ncol=4,
PIE_tight_lyout=False,
PIE_title_ha="right",
PIE_figsze_scale=1.5,
PIE_subplots_adjust_top=0.75,
PIE_ax_title_fontcolor="black"
)
# Function .............................................................................
def prepare_summary_df(*,
dataset_name,
dataset_variants,
module_names,
ai_methods,
keywords,
path_results,
verbose=False
):
'''
helper function that loads results from model evaluation,
for all combinaiton of dataset_name, dataset_variants, module_names, ai_methods
and keywords, that allow to find one or more csv file names (order of keywords is not important)
it will provide only files wiht exact match for all keywords, if nothing is returned, set verbose==True,
ai_method, dataset_name, and dataset_variants, are used to build folder names in path_results
whereas keywords and module names are used to find files
caution, the function load_summary_files requires module names for iteration, but these are not used to find files,
it was an error that will be removed, if required results for speciffic module, place it name in keywords,
and only files created for that module will be loaded
'''
summary_df = load_summary_files(
dataset_name = dataset_name,
dataset_variants = dataset_variants,
module_names = module_names,
ai_methods = ai_methods,
keywords = keywords,
path_results = path_results,
verbose = verbose
)
summary_df = create_new_df_feature(
df = summary_df,
new_feature_name = "full_method_name",
old_features_list = ["method", "method_variant"],
)
summary_df = create_new_df_feature(
df = summary_df,
new_feature_name = "full_dataset_variant",
old_features_list = ["dataset_variant", 'module'],
verbose=True
)
summary_df = create_new_df_feature(
df = summary_df,
new_feature_name = "full_results_group_name",
old_features_list = ["method", "method_variant", "dataset_variant", 'module'],
)
return summary_df
# Function ..............................................................................
def method_comparison_boxplot(*,
title="Accuracy of models created with each method\n",
data, # pd.DataFrame with the results,
figsize=(10,4),
# ...
col_with_results, # df colname with values to display, eg: test_accuracy ...
col_with_group_names, # df colname with values that will be displayed as names of each box (these do not have to be unique)
col_with_group_ID, # df colname with values that will be grouped for separate boxes (must be unieque)
col_with_group_colors, # df colname with values that will have different colors (colors can not be mixed within diffeent group_ID)
# ... colors
cmap="tab10",
cmap_colors_from=0,
cmap_colors_to=1,
# .. legend
legend__bbox_to_anchor=(0.9, 1.15),
subplots_adjust_top = 0.8,
legend_ncols=4,
# .. baseline
baseline_title="", # "most frequent baseline",
baseline_loc = -0.05,
baseline = 0.25,
top_results = 0.9, # green zone on a plot,
# ... fontsize
title_fontsize=20,
legend_fontsize=10,
xticks_fontsize=10,
yticks_fontsize=15,
axes_labels_fontsize=20,
# ... axies labels
xaxis_label = "Method",
yaxis_label = "Accuracy\n",
paint_xticks=False
):
"""
Nice function to create ngs-like boxplots for comparison of acc of differemnt model groups
it is more generic version of the abofe function,
"""
# ...............................................
# managment
Stop_Function = False
# ...............................................
# data preparation - step.1 extraction
# ...............................................
# - extract unique values that will be searched,
unique_group_ID = data.loc[:,col_with_group_ID].unique().tolist()
unique_group_color_names = data.loc[:,col_with_group_colors].unique().tolist()
# - map colors onto color_groups_names
bx_color_dict = create_class_colors_dict(
list_of_unique_names=unique_group_color_names,
cmap_name=cmap,
cmap_colors_from=cmap_colors_from,
cmap_colors_to=cmap_colors_to
)
# - lists with data for boxes,
'one item for one box in each'
bx_data = []
bx_names = []
bx_colors = []
bx_id = []
bx_colors_dict_key = []
# - separate all boxes, and then find out what is the color and data associated with that box
for one_group_ID in unique_group_ID:
bx_id.append(one_group_ID)
# get the data and other columns for one box
data_df_for_one_box = data.loc[data.loc[:, col_with_group_ID]==one_group_ID,:]
# find out, data, name and color to display
# .... num. data ....
bx_data.append(data_df_for_one_box.loc[:,col_with_results].values) # np.array
# .... labels .....
one_bx_label = data_df_for_one_box.loc[:,col_with_group_names].unique().tolist()
if len(one_bx_label)==1:
bx_names.append(one_bx_label[0]) # np.array
else:
if verbose==1:
print(f"{one_group_ID} contains more then one group to display wiht different names !")
else:
Stop_Function = True
pass
# .... colors ....
one_box_color = data_df_for_one_box.loc[:,col_with_group_colors].map(bx_color_dict).iloc[0]
color_test_values = data_df_for_one_box.loc[:,col_with_group_colors].unique().tolist()
if len(color_test_values)==1:
bx_colors.append(one_box_color) # np.array
bx_colors_dict_key.append(color_test_values[0])
else:
if verbose==1:
print(f"{one_group_ID} contains more then one COLOR to display wiht different names !")
else:
Stop_Function = True
pass
# - check if everythign is in order
if len(bx_colors)!=len(bx_names) and len(bx_names)!=len(bx_data):
if verbose==True:
print("Error: some data are missing or belong to different gorups, and can not be displayed as coherent bocplot")
else:
pass
else:
# ...............................................
# data preparation - step.2 ordering
# ...............................................
# find memdians and reorder
bx_medians = list()
for i, d in enumerate(bx_data):
bx_medians.append(np.median(d))
# ...
ordered_data_df = pd.DataFrame({
"bx_data": bx_data,
"bx_medians": bx_medians,
"bx_names": bx_names,
"bx_colors": bx_colors,
"bx_id": bx_id,
"bx_colors_dict_key":bx_colors_dict_key
})
ordered_data_df = ordered_data_df.sort_values("bx_medians", ascending=True)
ordered_data_df = ordered_data_df.reset_index(drop=True)
# ...............................................
# boxplot
# ...............................................
# ...............................................
# boxplot, - plt.boxplot(ordered_bx_data);
fig, ax = plt.subplots(figsize=figsize, facecolor="white")
fig.suptitle(title, fontsize=title_fontsize)
# add boxes,
bx = ax.boxplot(ordered_data_df["bx_data"],
showfliers=True, # remove outliers, because we are interested in a general trend,
vert=True, # boxes are vertical
labels=ordered_data_df["bx_names"], # x-ticks labels
patch_artist=True,
widths=0.3
)
ax.grid(ls="--")
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.set_xticklabels(ordered_data_df["bx_names"], rotation=45, fontsize=xticks_fontsize, ha="right")
ax.set_yticks([0, .2, .4, .6, .8, 1])
ax.set_yticklabels(["0.0", "0.2", "0.4", "0.6", "0.8", "1.0"], fontsize=yticks_fontsize)
ax.set_ylabel(yaxis_label, fontsize=axes_labels_fontsize)
ax.set_xlabel(xaxis_label, fontsize=axes_labels_fontsize)
ax.set_ylim(0,1.02)
# add colors to each box individually,
for i, j in zip(range(len(bx['boxes'])),range(0, len(bx['caps']), 2)) :
median_color ="black"
box_color = bx_color_dict[ordered_data_df.loc[:,"bx_colors_dict_key"].iloc[i]]
# set properties of items with the same number as boxes,
plt.setp(bx['boxes'][i], color=box_color, facecolor=median_color, linewidth=2, alpha=0.8)
plt.setp(bx["medians"][i], color=median_color, linewidth=2)
plt.setp(bx["fliers"][i], markeredgecolor="black", marker=".") # outliers
# set properties of items with the 2x number of features as boxes,
plt.setp(bx['caps'][j], color=median_color)
plt.setp(bx['caps'][j+1], color=median_color)
plt.setp(bx['whiskers'][j], color=median_color)
plt.setp(bx['whiskers'][j+1], color=median_color)
# ...............................................
# set colors for xtick labels,
if paint_xticks==True:
for i, xtick in enumerate(ax.get_xticklabels()):
xtick.set_color(bx_color_dict[ordered_data_df["bx_colors_dict_key"].iloc[i]])
else:
pass
# ...............................................
# legend,
if ordered_data_df["bx_names"].shape[0]>0:
# create patch for each dataclass, - adapted to even larger number of classes then selected for example images,
patch_list_for_legend =[]
for i, m_name in enumerate(list(bx_color_dict.keys())):
label_text = f"{m_name}"
patch_list_for_legend.append(mpl.patches.Patch(color=bx_color_dict[m_name], label=label_text))
# add patches to plot,
fig.legend(
handles=patch_list_for_legend, frameon=False,
scatterpoints=1, ncol=legend_ncols,
bbox_to_anchor=legend__bbox_to_anchor, fontsize=legend_fontsize)
# ...............................................
# create space for the legend
fig.subplots_adjust(top=subplots_adjust_top)
# ...............................................
# ...............................................
# add line with baseline
ax.axhline(baseline, lw=2, ls="--", color="dimgrey")
ax.text(ordered_data_df.shape[0]+0.4, baseline+baseline_loc, baseline_title, ha="right", color="dimgrey", fontsize=yticks_fontsize)
# ...............................................
# color patches behing boxplots,
patch_width = 1 # ie. 1 = grey patch for 1 and 1 break
patch_color = "lightgrey"
pathces_starting_x = list(range(0, ordered_data_df.shape[0], patch_width*2))
# ...
for i, sx in enumerate(pathces_starting_x):
rect = plt.Rectangle((sx+0.5, 0), patch_width, 1000, color=patch_color, alpha=0.2, edgecolor=None)
ax.add_patch(rect)
# color patches for styling the accuracy,
rect = plt.Rectangle((0,0), ordered_data_df.shape[0]*100, baseline, color="red", alpha=0.1, edgecolor=None)
ax.add_patch(rect)
rect = plt.Rectangle((0,baseline), ordered_data_df.shape[0]*100, top_results-baseline, color="orange", alpha=0.1, edgecolor=None)
ax.add_patch(rect)
rect = plt.Rectangle((0, top_results), ordered_data_df.shape[0]*100, 10, color="forestgreen", alpha=0.1, edgecolor=None)
ax.add_patch(rect)
return fig
# Function .............................................................................
def create_boxplot_with_color_classes(*,
summary_df,
figsize = (10,6),
col_with_results ="model_acc_valid", # df colname with values to display, eg: test_accuracy ...
col_with_group_names ="full_method_name", # df colname with values that will be displayed as names of each box (these do not have to be unique)
col_with_group_ID ="full_results_group_name", # df colname with values that will be grouped for separate boxes (must be unieque)
col_with_group_colors="full_dataset_variant", # df colname with values that will have different colors (colors can not be mixed within diffeent group_ID)
baseline = 0.5,
fontsize_scale=1,
subplots_adjust_top = 0.6,
baseline_title ="baseline",
legend_ncols=1,
legend__bbox_to_anchor=(0.5, 1.1),
):
'''
Funtion returns boxplot showing accuracy or other metric displayed for any number
of groups of results (eg methods), divided into larger groups shown as different colors of boyes,
with color legen above the plot
summary_df : summary dataframe created with prepare_summary_df function,
col_with_results : str, df colname with values to display, eg: test_accuracy ...
col_with_group_names : str, df colname with values that will be displayed as
names of each box (these do not have to be unique)
col_with_group_ID : str, df colname with values that will be grouped for
separate boxes (must be unieque)
col_with_group_colors: str, df colname with values that will have different colors
(colors can not be mixed within diffeent group_ID)
'''
# boxplot
fig = method_comparison_boxplot(
title=f"Accuracy of models created with each method\n\n",
data = summary_df, # pd.DataFrame with the results,
figsize=figsize,
# ...
col_with_results =col_with_results, # df colname with values to display, eg: test_accuracy ...
col_with_group_names =col_with_group_names , # df colname with values that will be displayed as names of each box (these do not have to be unique)
col_with_group_ID =col_with_group_ID, # df colname with values that will be grouped for separate boxes (must be unieque)
col_with_group_colors=col_with_group_colors, # df colname with values that will have different colors (colors can not be mixed within diffeent group_ID)
# ... colors
cmap="tab10",
cmap_colors_from=0,
cmap_colors_to=0.5,
# .. legend
legend__bbox_to_anchor=(0.5, 1.1),
subplots_adjust_top = subplots_adjust_top,
legend_ncols=legend_ncols,
# .. baseline
baseline_title =baseline_title,
baseline_loc =-0.09,
baseline = baseline,
top_results = 0.9, # green zone on a plot,
# ... fontsize
title_fontsize=20*fontsize_scale,
legend_fontsize=10*fontsize_scale,
xticks_fontsize=10*fontsize_scale,
yticks_fontsize=15*fontsize_scale,
axes_labels_fontsize=15*fontsize_scale,
# ... axies labels
xaxis_label = "Method",
yaxis_label = "Accuracy\n",
paint_xticks=True
)
return fig
# Function .............................................................................
def preapre_table_with_n_best_results_in_each_group(*,
summary_df,
n_top_methods = 1,
sort_by = "model_acc_valid",
feature_used_to_group_models = "full_results_group_name",
display_table=False
):
'''
Function that takes summary df, selectes max n requested best perfoming models,
and return them all in sorted summary df table format,
if display_table==True, displays selected columns from that table to show all examples,
'''
# unique model group names
method_full_name_list = summary_df.loc[:, feature_used_to_group_models].unique().tolist()
# collect top methods,
for i, method_full_name in enumerate(method_full_name_list):
# . subset summary_df
summary_df_subset = summary_df.loc[summary_df.loc[:, feature_used_to_group_models]==method_full_name, :]
summary_df_subset = summary_df_subset.sort_values(sort_by, ascending=False)
# . place in
if i==0:
best_methods_summary_df = summary_df_subset.iloc[0:n_top_methods,:]
else:
best_methods_summary_df = pd.concat([best_methods_summary_df, summary_df_subset.iloc[0:n_top_methods,:]])
best_methods_summary_df.reset_index(drop=True, inplace=True)
# display examples:
# show best model examples
features_to_display = ["dataset_variant", "module","method", "method_variant",
"model_acc_train", "model_acc_valid", "model_acc_test",
"pca_components_used", "run_name"]
sorted_best_methods_summary_df = best_methods_summary_df.sort_values("model_acc_valid", ascending=False)
sorted_best_methods_summary_df.reset_index(drop=True, inplace=True)
if display_table==True:
features_to_display = ["dataset_variant", "module","method", "method_variant",
"model_acc_train", "model_acc_valid", "model_acc_test", "baseline_acc_test",
"pca_components_used", "run_name"]
display(sorted_best_methods_summary_df.loc[:, features_to_display])
else:
pass
return sorted_best_methods_summary_df
# Function .............................................................................
def model_summary_plot(*,
# input data
df,
y,
boxname,
boxcolor,
scatterpoints,
baseline,
# fig, general settings,
title=None ,
figsize=(30,15) ,
# box colors
boxcolor_dict = None,
cmap="tab10",
cmap_colors_from=0,
cmap_colors_to=0.5,
# axes
xaxis_label = None,
yaxis_label = None, # if Noene == ydata_colname
grid_dct=dict(lw=1),
# scatterpoints,
full_model_marker ="*",
full_model_markersize=60,
full_model_markercolor="black",
# legend
add_legend=True,
subplots_adjust_top = 0.7,
legend_title=None,
legend__bbox_to_anchor=(0.4, 0.9),
legend_ncols=1,
# baseline
baseline_title = "baseline",
baseline_loc =-0.09,
use_fixed_baselines = True,
baseline_limit_list = [0.5, 0.9, 1.5], # the last one
baseline_color_list = ["red", "orange", "forestgreen"],
# fontsizes
fontsize_scale =1,
title_fontsize =30,
legend_fontsize=20,
xticks_fontsize=20,
yticks_fontsize=20,
axes_labels_fontsize=25,
):
'''
NGS-like boxplot for displaying accuracy, or other results obtained with large number of models
# input data
df : pd.DataFrame
y : str, or list with values, df colname with values to display, eg: test_accuracy ...
boxname : str, or list with values, df colname with values that will be displayed as names of each box, if None,
(these, do not have to be unique, becaue box colors are also informative,
and you may use shorter names to make the plot nicer, )
boxcolor : str, or list with values, if None, all boxes will hae the same colors, and there is no legend displayed,
scatterpoints : list, with True/False values, data points in each group used as scatter points,
not the part of boxplot, if None, noe will be made,
baseline : str, or list with values, df colname with values for baseline thta will be displayed on a bacground,
# horizontal patches
use_fixed_baselines : bool , if True, three phorizontal patches of the same height will be added to plot,
baseline_limit_list : list with 3 floats, eg: [0.5, 0.9, 1.5], each float is the upper limit of the horizontal patch,
starting from the plot bottom
'''
# setup
assert type(df)==pd.DataFrame, "error: df is not pandas DataFrame"
# . set plot x/y labels,
if xaxis_label is None:
if isinstance(boxname, str):
xaxis_label=boxname
else:
xaxis_label="method"
if yaxis_label is None:
if isinstance(y, str):
yaxis_label=y
else:
yaxis_label="y"
# . fontsizes
title_fontsize = title_fontsize*fontsize_scale
legend_fontsize = legend_fontsize*fontsize_scale
xticks_fontsize = xticks_fontsize*fontsize_scale
yticks_fontsize = yticks_fontsize*fontsize_scale
axes_labels_fontsize = axes_labels_fontsize*fontsize_scale
# data preparation
# . extract columns, as list
if isinstance(y , str):
y = df.loc[:, y].values.tolist()
else:
pass
if isinstance(boxname , str):
boxname = df.loc[:, boxname].values.tolist()
else:
pass
#. optional values,
if boxcolor is not None:
if isinstance(boxcolor , str):
boxcolor = df.loc[:, boxcolor].values.tolist()
else:
pass
else:
boxcolor = ["method"]*len(y)
if baseline is not None:
if isinstance(baseline , str):
baseline = df.loc[:, baseline].values.tolist()
else:
pass
else:
baseline = [0.5]*len(y)
if scatterpoints is not None:
if isinstance(scatterpoints , str):
scatterpoints = df.loc[:, scatterpoints].values.tolist()
else:
pass
else:
scatterpoints = [False]*len(y) # ie, No data wil be plotted as scatter point,
# . create unique boxnames qwith colors and method names,
if boxcolor is not None:
boxname_full = [f"{x}__{y}" for (x,y) in zip (boxname, boxcolor)] # used to search values,
else:
boxname_full = boxname
# assign colors to each boxcolor name
# . define colors for each class in boccolor
if boxcolor_dict is None:
boxcolor_dict = create_class_colors_dict(
list_of_unique_names = pd.Series(boxcolor).unique().tolist(),
cmap_name = cmap,
cmap_colors_from = cmap_colors_from,
cmap_colors_to = cmap_colors_to
)
else:
pass
# . map colors onto boxcolor, that are names
boxcolor_value = pd.Series(boxcolor).map(boxcolor_dict)
# build pandas df wiht all data
boxplotdf = pd.DataFrame({
"y": y, # value on y-axis
"boxname_full": boxname_full, # used to separate each box (combines x-axis anme and color)
"boxcolor_value": boxcolor_value, # color for bocplot,
"boxname":boxname, # displayed on x axis,
"boxcolor":boxcolor, # displayed on legend,
"baseline": baseline, # displayed as bacground value,
"scatterpoints": scatterpoints, # it True, the point is plotted as scatterplot,
})
# data preparation - part 2 - prepare array and ncols for plot
# . lists with data for boxes,
'one item for one box in each'
x_axis_name = [] # xtick labels
x_axis_color = [] # xtick label color
bx_x = []
bx_y = []
bx_color = [] # box color, (only for boxes)
sc_y = []
sc_x = []
baseline_x = []
baseline_y = []
median_y = []
# . fill in values, in proper order with positons on x axis,
for i, one_boxname_full in enumerate(pd.Series(boxname_full).unique().tolist()):
# find data for boxes
boxplotdf_bx_subset = boxplotdf.loc[(boxplotdf.boxname_full==one_boxname_full) & (boxplotdf.scatterpoints==False), :]
if boxplotdf_bx_subset.shape[0]>0:
bx_x.append(i)
bx_y.append(boxplotdf_bx_subset.loc[:,"y"].values.tolist())
bx_color.append(boxplotdf_bx_subset.boxcolor_value.iloc[0])
else:
pass
# find data for scatter points,
boxplotdf_sc_subset = boxplotdf.loc[(boxplotdf.boxname_full==one_boxname_full) & (boxplotdf.scatterpoints==True), :]
sc_values = boxplotdf_sc_subset.loc[:,"y"].values.tolist()
if len(sc_values)>0:
sc_x.extend([i]*len(sc_values))
sc_y.extend(sc_values)
else:
pass
# axis_name, baseline,
boxplotdf_group_subset = boxplotdf.loc[boxplotdf.boxname_full==one_boxname_full, :]
baseline_x.append(i)
baseline_y.append(boxplotdf_group_subset.baseline.max())
median_y.append(boxplotdf_group_subset.y.median())
x_axis_name.append(boxplotdf_group_subset.boxname.iloc[0])
x_axis_color.append(boxplotdf_group_subset.boxcolor_value.iloc[0])
# order items on x axis,
# . dict with key == old postion, value == new postion
'''
I am using dict, because each item may have different number of
elements, and they are not in order, (ie one category may be nmissing and present in sc or bx)
that is completely normal !
'''
x_order = dict(zip(pd.Series(median_y).sort_values().index.values.tolist(), list(range(len(median_y)))))
bx_x = pd.Series(bx_x).map(x_order).values.tolist()
sc_x = pd.Series(sc_x).map(x_order).values.tolist()
baseline_x = pd.Series(baseline_x).map(x_order).values.tolist()
# . created ordered_xticks_labels
tempdf = pd.concat([pd.Series(median_y), pd.Series(x_axis_color), pd.Series(x_axis_name), pd.Series(bx_color), pd.Series(bx_y)], axis=1)
tempdf.columns=["median", "x_axis_color","x_axis_name", "bx_color", "by"]
tempdf = tempdf.sort_values("median")
tempdf.reset_index(drop=True, inplace=True)
ordered_xticks_labels = tempdf.x_axis_name.values.tolist()
ordered_xticks_colors = tempdf.x_axis_color.values.tolist()
ordered_bx_color = tempdf.bx_color.dropna().values.tolist()
ordered_b = tempdf.by.dropna().values.tolist()
# . add small gausion noise to sc_x positions,
sc_x = (np.random.normal(loc=0, scale=0.05, size=len(sc_x))+np.array(sc_x)).tolist()
# figure
fig, ax = plt.subplots(figsize=figsize, facecolor="white")
if title is not None:
fig.suptitle(title, fontsize=title_fontsize)
else:
pass
# boxplots
bx = ax.boxplot(
bx_y,
positions=bx_x,
showfliers=True, # remove outliers, because we are interested in a general trend,
vert=True, # boxes are vertical
patch_artist=True,
widths=0.3
)
# . add colors to each box individually,
for i, j in zip(range(len(bx['boxes'])),range(0, len(bx['caps']), 2)) :
median_color ="black"
box_color = bx_color[i]
# set properties of items with the same number as boxes,
plt.setp(bx['boxes'][i], color=box_color, facecolor=median_color, linewidth=2, alpha=0.8)
plt.setp(bx["medians"][i], color=median_color, linewidth=2)
plt.setp(bx["fliers"][i], markeredgecolor="black", marker=".") # outliers
# set properties of items with the 2x number of features as boxes,
plt.setp(bx['caps'][j], color=median_color)
plt.setp(bx['caps'][j+1], color=median_color)
plt.setp(bx['whiskers'][j], color=median_color)
plt.setp(bx['whiskers'][j+1], color=median_color)
# points,
if | pd.Series(scatterpoints) | pandas.Series |
import gc
import time
from datetime import datetime
from functools import partial
from heamylab import mini_sample
import pandas as pd
import numpy as np
# import lightgbm as lgb
# from lightgbm.plotting import plot_importance, plot_metric, plot_tree, create_tree_digraph
import xgboost as xgb
from sklearn import metrics
from xgboost import XGBClassifier
from xgboost.plotting import plot_importance, plot_tree, to_graphviz
from sklearn.metrics import mean_squared_error, accuracy_score, roc_auc_score
from sklearn.ensemble import (RandomForestClassifier, AdaBoostClassifier,
GradientBoostingClassifier, ExtraTreesClassifier)
from sklearn.preprocessing import LabelEncoder, StandardScaler, Normalizer
from sklearn.model_selection import GridSearchCV, KFold, StratifiedKFold, learning_curve, ShuffleSplit
from sklearn.model_selection import train_test_split # 训练集数据拆分
from sklearn.metrics import (roc_curve, auc, roc_auc_score, accuracy_score, precision_recall_fscore_support,
classification_report) # 模型评估
from sklearn.ensemble import (GradientBoostingClassifier, VotingClassifier,
BaggingClassifier, BaggingRegressor, RandomForestClassifier)
from sklearn.externals import joblib
from imblearn.over_sampling import SMOTE
import matplotlib.pyplot as plt
def load_data_source(trainf, testf=None):
return pd.read_csv(trainf, sep=","), | pd.read_csv(testf, sep=",") | pandas.read_csv |
"""
Preprocess sites data.
<NAME>
February 2022
"""
import sys
import os
import configparser
import pandas as pd
import geopandas as gpd
import pyproj
from shapely.ops import transform
from shapely.geometry import shape, Point, mapping, LineString, MultiPolygon
from tqdm import tqdm
CONFIG = configparser.ConfigParser()
CONFIG.read(os.path.join(os.path.dirname(__file__), 'script_config.ini'))
BASE_PATH = CONFIG['file_locations']['base_path']
DATA_RAW = os.path.join(BASE_PATH, 'raw')
DATA_PROCESSED = os.path.join(BASE_PATH, 'processed')
def run_site_processing(iso3, level):
"""
Meta function for running site processing at GID 1 level.
"""
create_national_sites_csv(iso3)
create_national_sites_shp(iso3)
process_country_shapes(iso3)
process_regions(iso3, level)
create_regional_sites_layer_gid_1(iso3, level)
tech_specific_sites_gid_1(iso3, level)
if str(level) == "2":
create_regional_sites_layer_gid_2(iso3, level)
tech_specific_sites_gid_2(iso3, level)
return
def create_national_sites_csv(iso3):
"""
Create a national sites csv layer for a selected country.
"""
filename = '{}.csv'.format(iso3)
folder = os.path.join(DATA_PROCESSED, iso3, 'sites')
path_csv = os.path.join(folder, filename)
### Produce national sites data layers
if not os.path.exists(path_csv):
print('site.csv data does not exist')
print('Subsetting site data for {}'.format(iso3))
if not os.path.exists(folder):
os.makedirs(folder)
filename = "mobile_codes.csv"
path = os.path.join(DATA_RAW, filename)
mobile_codes = pd.read_csv(path)
mobile_codes = mobile_codes[['iso3', 'mcc']].drop_duplicates()
subset = mobile_codes[mobile_codes['iso3'] == iso3]
mcc = subset['mcc'].values[0]
filename = "cell_towers.csv"
path = os.path.join(DATA_RAW, filename)
output = []
chunksize = 10 ** 6
for idx, chunk in enumerate(pd.read_csv(path, chunksize=chunksize)):
country_data = chunk.loc[chunk['mcc'] == mcc]
country_data = country_data.to_dict('records')
output = output + country_data
if len(output) == 0:
print('{} had no data'.format(iso3))
return
output = | pd.DataFrame(output) | pandas.DataFrame |
from datetime import datetime
from dateutil.tz import tzlocal
import pytest
from pandas.compat import IS64
from pandas import (
DateOffset,
DatetimeIndex,
Index,
Series,
bdate_range,
date_range,
)
import pandas._testing as tm
from pandas.tseries.offsets import (
BDay,
Day,
Hour,
)
START, END = datetime(2009, 1, 1), datetime(2010, 1, 1)
class TestDatetimeIndexOps:
def test_ops_properties_basic(self, datetime_series):
# sanity check that the behavior didn't change
# GH#7206
for op in ["year", "day", "second", "weekday"]:
msg = f"'Series' object has no attribute '{op}'"
with pytest.raises(AttributeError, match=msg):
getattr(datetime_series, op)
# attribute access should still work!
s = Series({"year": 2000, "month": 1, "day": 10})
assert s.year == 2000
assert s.month == 1
assert s.day == 10
msg = "'Series' object has no attribute 'weekday'"
with pytest.raises(AttributeError, match=msg):
s.weekday
@pytest.mark.parametrize(
"freq,expected",
[
("A", "day"),
("Q", "day"),
("M", "day"),
("D", "day"),
("H", "hour"),
("T", "minute"),
("S", "second"),
("L", "millisecond"),
("U", "microsecond"),
],
)
def test_resolution(self, request, tz_naive_fixture, freq, expected):
tz = tz_naive_fixture
if freq == "A" and not IS64 and isinstance(tz, tzlocal):
request.node.add_marker(
pytest.mark.xfail(reason="OverflowError inside tzlocal past 2038")
)
idx = date_range(start="2013-04-01", periods=30, freq=freq, tz=tz)
assert idx.resolution == expected
def test_infer_freq(self, freq_sample):
# GH 11018
idx = date_range("2011-01-01 09:00:00", freq=freq_sample, periods=10)
result = DatetimeIndex(idx.asi8, freq="infer")
tm.assert_index_equal(idx, result)
assert result.freq == freq_sample
@pytest.mark.parametrize("values", [["20180101", "20180103", "20180105"], []])
@pytest.mark.parametrize("freq", ["2D", Day(2), "2B", BDay(2), "48H", Hour(48)])
@pytest.mark.parametrize("tz", [None, "US/Eastern"])
def test_freq_setter(self, values, freq, tz):
# GH 20678
idx = DatetimeIndex(values, tz=tz)
# can set to an offset, converting from string if necessary
idx._data.freq = freq
assert idx.freq == freq
assert isinstance(idx.freq, DateOffset)
# can reset to None
idx._data.freq = None
assert idx.freq is None
def test_freq_setter_errors(self):
# GH 20678
idx = DatetimeIndex(["20180101", "20180103", "20180105"])
# setting with an incompatible freq
msg = (
"Inferred frequency 2D from passed values does not conform to "
"passed frequency 5D"
)
with pytest.raises(ValueError, match=msg):
idx._data.freq = "5D"
# setting with non-freq string
with pytest.raises(ValueError, match="Invalid frequency"):
idx._data.freq = "foo"
def test_freq_view_safe(self):
# Setting the freq for one DatetimeIndex shouldn't alter the freq
# for another that views the same data
dti = date_range("2016-01-01", periods=5)
dta = dti._data
dti2 = DatetimeIndex(dta)._with_freq(None)
assert dti2.freq is None
# Original was not altered
assert dti.freq == "D"
assert dta.freq == "D"
class TestBusinessDatetimeIndex:
def setup_method(self, method):
self.rng = bdate_range(START, END)
def test_comparison(self):
d = self.rng[10]
comp = self.rng > d
assert comp[11]
assert not comp[9]
def test_copy(self):
cp = self.rng.copy()
repr(cp)
tm.assert_index_equal(cp, self.rng)
def test_identical(self):
t1 = self.rng.copy()
t2 = self.rng.copy()
assert t1.identical(t2)
# name
t1 = t1.rename("foo")
assert t1.equals(t2)
assert not t1.identical(t2)
t2 = t2.rename("foo")
assert t1.identical(t2)
# freq
t2v = Index(t2.values)
assert t1.equals(t2v)
assert not t1.identical(t2v)
class TestCustomDatetimeIndex:
def setup_method(self, method):
self.rng = bdate_range(START, END, freq="C")
def test_comparison(self):
d = self.rng[10]
comp = self.rng > d
assert comp[11]
assert not comp[9]
def test_copy(self):
cp = self.rng.copy()
repr(cp)
| tm.assert_index_equal(cp, self.rng) | pandas._testing.assert_index_equal |
import logging
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
logger = logging.getLogger(__name__)
def evaluate_agents(agent_manager_list,
n_simulations=5,
fignum=None,
show=True,
plot=True,
sns_kwargs=None):
"""
Evaluate and compare each of the agents in agent_manager_list.
Parameters
----------
agent_manager_list : list of AgentManager objects.
n_simulations: int
Number of calls to the eval() method of each AgentManager instance.
fignum: string or int
Identifier of plot figure.
show: bool
If true, calls plt.show().
plot: bool
If false, do not plot.
sns_kwargs:
Extra parameters for sns.boxplot
Returns
-------
dataframe with the evaluation results.
"""
sns_kwargs = sns_kwargs or {}
#
# evaluation
#
eval_outputs = []
for agent_manager in agent_manager_list:
logger.info(f'Evaluating {agent_manager.agent_name}...')
outputs = agent_manager.eval_agents(n_simulations)
if len(outputs) > 0:
eval_outputs.append(outputs)
if len(eval_outputs) == 0:
logger.error('[evaluate_agents]: No evaluation data. Make sure AgentManager.fit() has been called.')
return
#
# plot
#
# build unique agent IDs (in case there are two agents with the same ID)
unique_ids = []
id_count = {}
for agent_manager in agent_manager_list:
name = agent_manager.agent_name
if name not in id_count:
id_count[name] = 1
else:
id_count[name] += 1
unique_ids.append(name + "*" * (id_count[name] - 1))
# convert output to DataFrame
data = {}
for agent_id, out in zip(unique_ids, eval_outputs):
data[agent_id] = out
output = pd.DataFrame(data)
# plot
if plot:
plt.figure(fignum)
with sns.axes_style("whitegrid"):
ax = sns.boxplot(data=output, **sns_kwargs)
ax.set_xlabel("agent")
ax.set_ylabel("evaluation output")
if show:
plt.show()
return output
def plot_writer_data(agent_manager,
tag,
fignum=None,
show=True,
preprocess_func=None,
title=None,
sns_kwargs=None):
"""
Given a list of AgentManager, plot data (corresponding to info) obtained in each episode.
The dictionary returned by agents' .fit() method must contain a key equal to `info`.
Parameters
----------
agent_manager : AgentManager, or list of AgentManager
tag : str
Tag of data to plot.
fignum: string or int
Identifier of plot figure.
show: bool
If true, calls plt.show().
preprocess_func: Callable
Function to apply to 'tag' column before plot. For instance, if tag=episode_rewards,
setting preprocess_func=np.cumsum will plot cumulative rewards
title: str (Optional)
Optional title to plot. If None, set to tag.
sns_kwargs: dict
Optional extra params for seaborn lineplot.
"""
sns_kwargs = sns_kwargs or {'ci': 'sd'}
title = title or tag
if preprocess_func is not None:
ylabel = 'value'
else:
ylabel = tag
preprocess_func = preprocess_func or (lambda x: x)
agent_manager_list = agent_manager
if not isinstance(agent_manager_list, list):
agent_manager_list = [agent_manager_list]
# preprocess agent stats
data_list = []
for manager in agent_manager_list:
# Important: since manager can be a RemoteAgentManager,
# it is important to avoid repeated accesses to its methods and properties.
# That is why writer_data is taken from the manager instance only in the line below.
writer_data = manager.get_writer_data()
agent_name = manager.agent_name
if writer_data is not None:
for idx in writer_data:
df = writer_data[idx]
df = pd.DataFrame(df[df['tag'] == tag])
df['value'] = preprocess_func(df['value'].values)
# update name according to AgentManager name
df['name'] = agent_name
data_list.append(df)
if len(data_list) == 0:
logger.error('[plot_writer_data]: No data to be plotted.')
return
all_writer_data = | pd.concat(data_list, ignore_index=True) | pandas.concat |
"""
Tests that work on both the Python and C engines but do not have a
specific classification into the other test modules.
"""
from datetime import datetime
from inspect import signature
from io import StringIO
import os
from pathlib import Path
import sys
import numpy as np
import pytest
from pandas.compat import PY310
from pandas.errors import (
EmptyDataError,
ParserError,
ParserWarning,
)
from pandas import (
DataFrame,
Index,
Series,
Timestamp,
compat,
)
import pandas._testing as tm
from pandas.io.parsers import TextFileReader
from pandas.io.parsers.c_parser_wrapper import CParserWrapper
xfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail")
skip_pyarrow = pytest.mark.usefixtures("pyarrow_skip")
def test_override_set_noconvert_columns():
# see gh-17351
#
# Usecols needs to be sorted in _set_noconvert_columns based
# on the test_usecols_with_parse_dates test from test_usecols.py
class MyTextFileReader(TextFileReader):
def __init__(self) -> None:
self._currow = 0
self.squeeze = False
class MyCParserWrapper(CParserWrapper):
def _set_noconvert_columns(self):
if self.usecols_dtype == "integer":
# self.usecols is a set, which is documented as unordered
# but in practice, a CPython set of integers is sorted.
# In other implementations this assumption does not hold.
# The following code simulates a different order, which
# before GH 17351 would cause the wrong columns to be
# converted via the parse_dates parameter
self.usecols = list(self.usecols)
self.usecols.reverse()
return CParserWrapper._set_noconvert_columns(self)
data = """a,b,c,d,e
0,1,20140101,0900,4
0,1,20140102,1000,4"""
parse_dates = [[1, 2]]
cols = {
"a": [0, 0],
"c_d": [Timestamp("2014-01-01 09:00:00"), Timestamp("2014-01-02 10:00:00")],
}
expected = DataFrame(cols, columns=["c_d", "a"])
parser = MyTextFileReader()
parser.options = {
"usecols": [0, 2, 3],
"parse_dates": parse_dates,
"delimiter": ",",
}
parser.engine = "c"
parser._engine = MyCParserWrapper(StringIO(data), **parser.options)
result = parser.read()
tm.assert_frame_equal(result, expected)
def test_read_csv_local(all_parsers, csv1):
prefix = "file:///" if compat.is_platform_windows() else "file://"
parser = all_parsers
fname = prefix + str(os.path.abspath(csv1))
result = parser.read_csv(fname, index_col=0, parse_dates=True)
expected = DataFrame(
[
[0.980269, 3.685731, -0.364216805298, -1.159738],
[1.047916, -0.041232, -0.16181208307, 0.212549],
[0.498581, 0.731168, -0.537677223318, 1.346270],
[1.120202, 1.567621, 0.00364077397681, 0.675253],
[-0.487094, 0.571455, -1.6116394093, 0.103469],
[0.836649, 0.246462, 0.588542635376, 1.062782],
[-0.157161, 1.340307, 1.1957779562, -1.097007],
],
columns=["A", "B", "C", "D"],
index=Index(
[
datetime(2000, 1, 3),
datetime(2000, 1, 4),
datetime(2000, 1, 5),
datetime(2000, 1, 6),
datetime(2000, 1, 7),
datetime(2000, 1, 10),
datetime(2000, 1, 11),
],
name="index",
),
)
tm.assert_frame_equal(result, expected)
@xfail_pyarrow
def test_1000_sep(all_parsers):
parser = all_parsers
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({"A": [1, 10], "B": [2334, 13], "C": [5, 10.0]})
result = parser.read_csv(StringIO(data), sep="|", thousands=",")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("squeeze", [True, False])
def test_squeeze(all_parsers, squeeze):
data = """\
a,1
b,2
c,3
"""
parser = all_parsers
index = Index(["a", "b", "c"], name=0)
expected = Series([1, 2, 3], name=1, index=index)
result = parser.read_csv_check_warnings(
FutureWarning,
"The squeeze argument has been deprecated "
"and will be removed in a future version. "
'Append .squeeze\\("columns"\\) to the call to squeeze.\n\n',
StringIO(data),
index_col=0,
header=None,
squeeze=squeeze,
)
if not squeeze:
expected = DataFrame(expected)
tm.assert_frame_equal(result, expected)
else:
tm.assert_series_equal(result, expected)
# see gh-8217
#
# Series should not be a view.
assert not result._is_view
@xfail_pyarrow
def test_unnamed_columns(all_parsers):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
parser = all_parsers
expected = DataFrame(
[[1, 2, 3, 4, 5], [6, 7, 8, 9, 10], [11, 12, 13, 14, 15]],
dtype=np.int64,
columns=["A", "B", "C", "Unnamed: 3", "Unnamed: 4"],
)
result = parser.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
def test_csv_mixed_type(all_parsers):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
parser = all_parsers
expected = DataFrame({"A": ["a", "b", "c"], "B": [1, 3, 4], "C": [2, 4, 5]})
result = parser.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
@xfail_pyarrow
def test_read_csv_low_memory_no_rows_with_index(all_parsers):
# see gh-21141
parser = all_parsers
if not parser.low_memory:
pytest.skip("This is a low-memory specific test")
data = """A,B,C
1,1,1,2
2,2,3,4
3,3,4,5
"""
result = parser.read_csv(StringIO(data), low_memory=True, index_col=0, nrows=0)
expected = DataFrame(columns=["A", "B", "C"])
tm.assert_frame_equal(result, expected)
def test_read_csv_dataframe(all_parsers, csv1):
parser = all_parsers
result = parser.read_csv(csv1, index_col=0, parse_dates=True)
expected = DataFrame(
[
[0.980269, 3.685731, -0.364216805298, -1.159738],
[1.047916, -0.041232, -0.16181208307, 0.212549],
[0.498581, 0.731168, -0.537677223318, 1.346270],
[1.120202, 1.567621, 0.00364077397681, 0.675253],
[-0.487094, 0.571455, -1.6116394093, 0.103469],
[0.836649, 0.246462, 0.588542635376, 1.062782],
[-0.157161, 1.340307, 1.1957779562, -1.097007],
],
columns=["A", "B", "C", "D"],
index=Index(
[
datetime(2000, 1, 3),
datetime(2000, 1, 4),
datetime(2000, 1, 5),
datetime(2000, 1, 6),
datetime(2000, 1, 7),
datetime(2000, 1, 10),
datetime(2000, 1, 11),
],
name="index",
),
)
tm.assert_frame_equal(result, expected)
@xfail_pyarrow
@pytest.mark.parametrize("nrows", [3, 3.0])
def test_read_nrows(all_parsers, nrows):
# see gh-10476
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
expected = DataFrame(
[["foo", 2, 3, 4, 5], ["bar", 7, 8, 9, 10], ["baz", 12, 13, 14, 15]],
columns=["index", "A", "B", "C", "D"],
)
parser = all_parsers
result = parser.read_csv(StringIO(data), nrows=nrows)
tm.assert_frame_equal(result, expected)
@xfail_pyarrow
@pytest.mark.parametrize("nrows", [1.2, "foo", -1])
def test_read_nrows_bad(all_parsers, nrows):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
msg = r"'nrows' must be an integer >=0"
parser = all_parsers
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), nrows=nrows)
def test_nrows_skipfooter_errors(all_parsers):
msg = "'skipfooter' not supported with 'nrows'"
data = "a\n1\n2\n3\n4\n5\n6"
parser = all_parsers
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), skipfooter=1, nrows=5)
@xfail_pyarrow
def test_missing_trailing_delimiters(all_parsers):
parser = all_parsers
data = """A,B,C,D
1,2,3,4
1,3,3,
1,4,5"""
result = parser.read_csv(StringIO(data))
expected = DataFrame(
[[1, 2, 3, 4], [1, 3, 3, np.nan], [1, 4, 5, np.nan]],
columns=["A", "B", "C", "D"],
)
tm.assert_frame_equal(result, expected)
@xfail_pyarrow
def test_skip_initial_space(all_parsers):
data = (
'"09-Apr-2012", "01:10:18.300", 2456026.548822908, 12849, '
"1.00361, 1.12551, 330.65659, 0355626618.16711, 73.48821, "
"314.11625, 1917.09447, 179.71425, 80.000, 240.000, -350, "
"70.06056, 344.98370, 1, 1, -0.689265, -0.692787, "
"0.212036, 14.7674, 41.605, -9999.0, -9999.0, "
"-9999.0, -9999.0, -9999.0, -9999.0, 000, 012, 128"
)
parser = all_parsers
result = parser.read_csv(
StringIO(data),
names=list(range(33)),
header=None,
na_values=["-9999.0"],
skipinitialspace=True,
)
expected = DataFrame(
[
[
"09-Apr-2012",
"01:10:18.300",
2456026.548822908,
12849,
1.00361,
1.12551,
330.65659,
355626618.16711,
73.48821,
314.11625,
1917.09447,
179.71425,
80.0,
240.0,
-350,
70.06056,
344.9837,
1,
1,
-0.689265,
-0.692787,
0.212036,
14.7674,
41.605,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
0,
12,
128,
]
]
)
tm.assert_frame_equal(result, expected)
@xfail_pyarrow
def test_trailing_delimiters(all_parsers):
# see gh-2442
data = """A,B,C
1,2,3,
4,5,6,
7,8,9,"""
parser = all_parsers
result = parser.read_csv(StringIO(data), index_col=False)
expected = DataFrame({"A": [1, 4, 7], "B": [2, 5, 8], "C": [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_escapechar(all_parsers):
# https://stackoverflow.com/questions/13824840/feature-request-for-
# pandas-read-csv
data = '''SEARCH_TERM,ACTUAL_URL
"bra tv board","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"tv p\xc3\xa5 hjul","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"SLAGBORD, \\"Bergslagen\\", IKEA:s 1700-tals series","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"''' # noqa:E501
parser = all_parsers
result = parser.read_csv(
StringIO(data), escapechar="\\", quotechar='"', encoding="utf-8"
)
assert result["SEARCH_TERM"][2] == 'SLAGBORD, "Bergslagen", IKEA:s 1700-tals series'
tm.assert_index_equal(result.columns, Index(["SEARCH_TERM", "ACTUAL_URL"]))
@xfail_pyarrow
def test_ignore_leading_whitespace(all_parsers):
# see gh-3374, gh-6607
parser = all_parsers
data = " a b c\n 1 2 3\n 4 5 6\n 7 8 9"
result = parser.read_csv(StringIO(data), sep=r"\s+")
expected = DataFrame({"a": [1, 4, 7], "b": [2, 5, 8], "c": [3, 6, 9]})
tm.assert_frame_equal(result, expected)
@xfail_pyarrow
@pytest.mark.parametrize("usecols", [None, [0, 1], ["a", "b"]])
def test_uneven_lines_with_usecols(all_parsers, usecols):
# see gh-12203
parser = all_parsers
data = r"""a,b,c
0,1,2
3,4,5,6,7
8,9,10"""
if usecols is None:
# Make sure that an error is still raised
# when the "usecols" parameter is not provided.
msg = r"Expected \d+ fields in line \d+, saw \d+"
with pytest.raises(ParserError, match=msg):
parser.read_csv(StringIO(data))
else:
expected = DataFrame({"a": [0, 3, 8], "b": [1, 4, 9]})
result = parser.read_csv(StringIO(data), usecols=usecols)
tm.assert_frame_equal(result, expected)
@xfail_pyarrow
@pytest.mark.parametrize(
"data,kwargs,expected",
[
# First, check to see that the response of parser when faced with no
# provided columns raises the correct error, with or without usecols.
("", {}, None),
("", {"usecols": ["X"]}, None),
(
",,",
{"names": ["Dummy", "X", "Dummy_2"], "usecols": ["X"]},
DataFrame(columns=["X"], index=[0], dtype=np.float64),
),
(
"",
{"names": ["Dummy", "X", "Dummy_2"], "usecols": ["X"]},
DataFrame(columns=["X"]),
),
],
)
def test_read_empty_with_usecols(all_parsers, data, kwargs, expected):
# see gh-12493
parser = all_parsers
if expected is None:
msg = "No columns to parse from file"
with pytest.raises(EmptyDataError, match=msg):
parser.read_csv(StringIO(data), **kwargs)
else:
result = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(result, expected)
@xfail_pyarrow
@pytest.mark.parametrize(
"kwargs,expected",
[
# gh-8661, gh-8679: this should ignore six lines, including
# lines with trailing whitespace and blank lines.
(
{
"header": None,
"delim_whitespace": True,
"skiprows": [0, 1, 2, 3, 5, 6],
"skip_blank_lines": True,
},
DataFrame([[1.0, 2.0, 4.0], [5.1, np.nan, 10.0]]),
),
# gh-8983: test skipping set of rows after a row with trailing spaces.
(
{
"delim_whitespace": True,
"skiprows": [1, 2, 3, 5, 6],
"skip_blank_lines": True,
},
DataFrame({"A": [1.0, 5.1], "B": [2.0, np.nan], "C": [4.0, 10]}),
),
],
)
def test_trailing_spaces(all_parsers, kwargs, expected):
data = "A B C \nrandom line with trailing spaces \nskip\n1,2,3\n1,2.,4.\nrandom line with trailing tabs\t\t\t\n \n5.1,NaN,10.0\n" # noqa:E501
parser = all_parsers
result = parser.read_csv(StringIO(data.replace(",", " ")), **kwargs)
tm.assert_frame_equal(result, expected)
def test_raise_on_sep_with_delim_whitespace(all_parsers):
# see gh-6607
data = "a b c\n1 2 3"
parser = all_parsers
with pytest.raises(ValueError, match="you can only specify one"):
parser.read_csv(StringIO(data), sep=r"\s", delim_whitespace=True)
def test_read_filepath_or_buffer(all_parsers):
# see gh-43366
parser = all_parsers
with pytest.raises(TypeError, match="Expected file path name or file-like"):
parser.read_csv(filepath_or_buffer=b"input")
@xfail_pyarrow
@pytest.mark.parametrize("delim_whitespace", [True, False])
def test_single_char_leading_whitespace(all_parsers, delim_whitespace):
# see gh-9710
parser = all_parsers
data = """\
MyColumn
a
b
a
b\n"""
expected = DataFrame({"MyColumn": list("abab")})
result = parser.read_csv(
StringIO(data), skipinitialspace=True, delim_whitespace=delim_whitespace
)
tm.assert_frame_equal(result, expected)
# Skip for now, actually only one test fails though, but its tricky to xfail
@skip_pyarrow
@pytest.mark.parametrize(
"sep,skip_blank_lines,exp_data",
[
(",", True, [[1.0, 2.0, 4.0], [5.0, np.nan, 10.0], [-70.0, 0.4, 1.0]]),
(r"\s+", True, [[1.0, 2.0, 4.0], [5.0, np.nan, 10.0], [-70.0, 0.4, 1.0]]),
(
",",
False,
[
[1.0, 2.0, 4.0],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[5.0, np.nan, 10.0],
[np.nan, np.nan, np.nan],
[-70.0, 0.4, 1.0],
],
),
],
)
def test_empty_lines(all_parsers, sep, skip_blank_lines, exp_data):
parser = all_parsers
data = """\
A,B,C
1,2.,4.
5.,NaN,10.0
-70,.4,1
"""
if sep == r"\s+":
data = data.replace(",", " ")
result = parser.read_csv(StringIO(data), sep=sep, skip_blank_lines=skip_blank_lines)
expected = DataFrame(exp_data, columns=["A", "B", "C"])
tm.assert_frame_equal(result, expected)
@xfail_pyarrow
def test_whitespace_lines(all_parsers):
parser = all_parsers
data = """
\t \t\t
\t
A,B,C
\t 1,2.,4.
5.,NaN,10.0
"""
expected = DataFrame([[1, 2.0, 4.0], [5.0, np.nan, 10.0]], columns=["A", "B", "C"])
result = parser.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
@xfail_pyarrow
@pytest.mark.parametrize(
"data,expected",
[
(
""" A B C D
a 1 2 3 4
b 1 2 3 4
c 1 2 3 4
""",
DataFrame(
[[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]],
columns=["A", "B", "C", "D"],
index=["a", "b", "c"],
),
),
(
" a b c\n1 2 3 \n4 5 6\n 7 8 9",
DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=["a", "b", "c"]),
),
],
)
def test_whitespace_regex_separator(all_parsers, data, expected):
# see gh-6607
parser = all_parsers
result = parser.read_csv(StringIO(data), sep=r"\s+")
tm.assert_frame_equal(result, expected)
def test_sub_character(all_parsers, csv_dir_path):
# see gh-16893
filename = os.path.join(csv_dir_path, "sub_char.csv")
expected = DataFrame([[1, 2, 3]], columns=["a", "\x1ab", "c"])
parser = all_parsers
result = parser.read_csv(filename)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("filename", ["sé-es-vé.csv", "ru-sй.csv", "中文文件名.csv"])
def test_filename_with_special_chars(all_parsers, filename):
# see gh-15086.
parser = all_parsers
df = DataFrame({"a": [1, 2, 3]})
with tm.ensure_clean(filename) as path:
df.to_csv(path, index=False)
result = parser.read_csv(path)
tm.assert_frame_equal(result, df)
def test_read_table_same_signature_as_read_csv(all_parsers):
# GH-34976
parser = all_parsers
table_sign = signature(parser.read_table)
csv_sign = signature(parser.read_csv)
assert table_sign.parameters.keys() == csv_sign.parameters.keys()
assert table_sign.return_annotation == csv_sign.return_annotation
for key, csv_param in csv_sign.parameters.items():
table_param = table_sign.parameters[key]
if key == "sep":
assert csv_param.default == ","
assert table_param.default == "\t"
assert table_param.annotation == csv_param.annotation
assert table_param.kind == csv_param.kind
continue
else:
assert table_param == csv_param
def test_read_table_equivalency_to_read_csv(all_parsers):
# see gh-21948
# As of 0.25.0, read_table is undeprecated
parser = all_parsers
data = "a\tb\n1\t2\n3\t4"
expected = parser.read_csv(StringIO(data), sep="\t")
result = parser.read_table(StringIO(data))
tm.assert_frame_equal(result, expected)
@pytest.mark.skipif(
PY310,
reason="GH41935 This test is leaking only on Python 3.10,"
"causing other tests to fail with a cryptic error.",
)
@pytest.mark.parametrize("read_func", ["read_csv", "read_table"])
def test_read_csv_and_table_sys_setprofile(all_parsers, read_func):
# GH#41069
parser = all_parsers
data = "a b\n0 1"
sys.setprofile(lambda *a, **k: None)
result = getattr(parser, read_func)(StringIO(data))
sys.setprofile(None)
expected = DataFrame({"a b": ["0 1"]})
tm.assert_frame_equal(result, expected)
@xfail_pyarrow
def test_first_row_bom(all_parsers):
# see gh-26545
parser = all_parsers
data = '''\ufeff"Head1"\t"Head2"\t"Head3"'''
result = parser.read_csv(StringIO(data), delimiter="\t")
expected = DataFrame(columns=["Head1", "Head2", "Head3"])
tm.assert_frame_equal(result, expected)
@xfail_pyarrow
def test_first_row_bom_unquoted(all_parsers):
# see gh-36343
parser = all_parsers
data = """\ufeffHead1\tHead2\tHead3"""
result = parser.read_csv(StringIO(data), delimiter="\t")
expected = DataFrame(columns=["Head1", "Head2", "Head3"])
tm.assert_frame_equal(result, expected)
@xfail_pyarrow
@pytest.mark.parametrize("nrows", range(1, 6))
def test_blank_lines_between_header_and_data_rows(all_parsers, nrows):
# GH 28071
ref = DataFrame(
[[np.nan, np.nan], [np.nan, np.nan], [1, 2], [np.nan, np.nan], [3, 4]],
columns=list("ab"),
)
csv = "\nheader\n\na,b\n\n\n1,2\n\n3,4"
parser = all_parsers
df = parser.read_csv(StringIO(csv), header=3, nrows=nrows, skip_blank_lines=False)
tm.assert_frame_equal(df, ref[:nrows])
@xfail_pyarrow
def test_no_header_two_extra_columns(all_parsers):
# GH 26218
column_names = ["one", "two", "three"]
ref = DataFrame([["foo", "bar", "baz"]], columns=column_names)
stream = StringIO("foo,bar,baz,bam,blah")
parser = all_parsers
with tm.assert_produces_warning(ParserWarning):
df = parser.read_csv(stream, header=None, names=column_names, index_col=False)
tm.assert_frame_equal(df, ref)
def test_read_csv_names_not_accepting_sets(all_parsers):
# GH 34946
data = """\
1,2,3
4,5,6\n"""
parser = all_parsers
with pytest.raises(ValueError, match="Names should be an ordered collection."):
parser.read_csv(StringIO(data), names=set("QAZ"))
@xfail_pyarrow
def test_read_table_delim_whitespace_default_sep(all_parsers):
# GH: 35958
f = StringIO("a b c\n1 -2 -3\n4 5 6")
parser = all_parsers
result = parser.read_table(f, delim_whitespace=True)
expected = DataFrame({"a": [1, 4], "b": [-2, 5], "c": [-3, 6]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("delimiter", [",", "\t"])
def test_read_csv_delim_whitespace_non_default_sep(all_parsers, delimiter):
# GH: 35958
f = StringIO("a b c\n1 -2 -3\n4 5 6")
parser = all_parsers
msg = (
"Specified a delimiter with both sep and "
"delim_whitespace=True; you can only specify one."
)
with pytest.raises(ValueError, match=msg):
parser.read_csv(f, delim_whitespace=True, sep=delimiter)
with pytest.raises(ValueError, match=msg):
parser.read_csv(f, delim_whitespace=True, delimiter=delimiter)
def test_read_csv_delimiter_and_sep_no_default(all_parsers):
# GH#39823
f = StringIO("a,b\n1,2")
parser = all_parsers
msg = "Specified a sep and a delimiter; you can only specify one."
with pytest.raises(ValueError, match=msg):
parser.read_csv(f, sep=" ", delimiter=".")
@pytest.mark.parametrize("kwargs", [{"delimiter": "\n"}, {"sep": "\n"}])
def test_read_csv_line_break_as_separator(kwargs, all_parsers):
# GH#43528
parser = all_parsers
data = """a,b,c
1,2,3
"""
msg = (
r"Specified \\n as separator or delimiter. This forces the python engine "
r"which does not accept a line terminator. Hence it is not allowed to use "
r"the line terminator as separator."
)
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), **kwargs)
def test_read_csv_posargs_deprecation(all_parsers):
# GH 41485
f = StringIO("a,b\n1,2")
parser = all_parsers
msg = (
"In a future version of pandas all arguments of read_csv "
"except for the argument 'filepath_or_buffer' will be keyword-only"
)
with tm.assert_produces_warning(FutureWarning, match=msg):
parser.read_csv(f, " ")
@pytest.mark.parametrize("delimiter", [",", "\t"])
def test_read_table_delim_whitespace_non_default_sep(all_parsers, delimiter):
# GH: 35958
f = StringIO("a b c\n1 -2 -3\n4 5 6")
parser = all_parsers
msg = (
"Specified a delimiter with both sep and "
"delim_whitespace=True; you can only specify one."
)
with pytest.raises(ValueError, match=msg):
parser.read_table(f, delim_whitespace=True, sep=delimiter)
with pytest.raises(ValueError, match=msg):
parser.read_table(f, delim_whitespace=True, delimiter=delimiter)
@pytest.mark.parametrize("func", ["read_csv", "read_table"])
def test_names_and_prefix_not_None_raises(all_parsers, func):
# GH#39123
f = StringIO("a,b\n1,2")
parser = all_parsers
msg = "Specified named and prefix; you can only specify one."
with pytest.raises(ValueError, match=msg):
with | tm.assert_produces_warning(FutureWarning) | pandas._testing.assert_produces_warning |
#!/usr/bin/env python3
# Author: <NAME>
import numpy as np
import pandas as pd
import gzip
import subprocess
import scipy.stats as stats
import argparse
import os
import feather
import rnaseqnorm
def gtf_to_bed(annotation_gtf, feature='gene', exclude_chrs=[]):
"""
Parse genes from GTF, create placeholder DataFrame for BED output
"""
chrom = []
start = []
end = []
gene_id = []
with open(annotation_gtf, 'r') as gtf:
for row in gtf:
row = row.strip().split('\t')
if row[0][0]=='#' or row[2]!=feature: continue # skip header
chrom.append(row[0])
# TSS: gene start (0-based coordinates for BED)
if row[6]=='+':
start.append(np.int64(row[3])-1)
end.append(np.int64(row[3]))
elif row[6]=='-':
start.append(np.int64(row[4])-1) # last base of gene
end.append(np.int64(row[4]))
else:
raise ValueError('Strand not specified.')
gene_id.append(row[8].split(';',1)[0].split(' ')[1].replace('"',''))
bed_df = | pd.DataFrame(data={'chr':chrom, 'start':start, 'end':end, 'gene_id':gene_id}, columns=['chr', 'start', 'end', 'gene_id'], index=gene_id) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 18 17:37:59 2020
@author: bernice
"""
#%% Final Project
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy import stats, integrate
import seaborn as sns
df = pd.read_csv('middleSchoolData.csv')
#%% 1) What is the correlation between the number of applications
# and admissions to HSPHS?
df1 = df[['applications','acceptances']].dropna()
x = df1['applications']
y = df1['acceptances']
# %matplotlib inline
np.random.seed(20180514)
sns.distplot(x)
sns.distplot(y)
plt.scatter(x, y)
plt.title('A plot to show the correlation between applications and acceptances')
plt.xlabel('applications')
plt.ylabel('acceptances')
plt.plot(np.unique(x), np.poly1d(np.polyfit(x, y, 1))(np.unique(x)), color='yellow')
plt.show()
correlation = np.corrcoef(x,y)
print(correlation)
#%% 2)What is a better predictor of admission to HSPHS?
# Raw number of applications or application “rate”?
import statsmodels.api as sm
df2 = df[['applications','acceptances','school_size']].dropna()
rate1 = df2['applications'] / df2['school_size']
rate2 = df2['acceptances'] / df2['school_size'] # admission
y1 = df2['acceptances']
y2 = rate2
x1 = df2['applications']
x2 = rate1
sns.distplot(x1)
sns.distplot(x2)
# Z-score the data:
x1 = stats.zscore(x1)
x2 = stats.zscore(x2)
y1 = stats.zscore(y1)
y2 = stats.zscore(y2)
# visualize ( the same as 1)
plt.scatter(x1, y1)
plt.title('A plot to show the correlation between applications and acceptances')
plt.xlabel('applications')
plt.ylabel('acceptances')
plt.plot(np.unique(x1), np.poly1d(np.polyfit(x1, y1, 1))(np.unique(x1)), color='yellow')
plt.show()
# linear regression
x1 = sm.add_constant(x1) # vector of ones
model = sm.OLS(y1,x1) # ordinary least squares from sm
results = model.fit() # fit model
print(results.summary()) # print summary
print(results.params) # print parameters, beta0 beta1
# visualize
plt.scatter(x2, y2)
plt.title('A plot to show the correlation between applications rate and acceptances')
plt.xlabel('applications rate')
plt.ylabel('acceptances')
plt.plot(np.unique(x2), np.poly1d(np.polyfit(x2, y2, 1))(np.unique(x2)), color='yellow')
plt.show()
# linear regression
x2 = sm.add_constant(x2) # vector of ones
model = sm.OLS(y2,x2) # ordinary least squares from sm
results = model.fit() # fit model
print(results.summary()) # print summary
print(results.params) # print parameters, beta0 beta1
#%% 3) Which school has the best *per student* odds of sending someone to HSPHS?
df3 = df[['school_name','applications','acceptances','school_size']].dropna()
rate = df3['acceptances'] / df3['school_size']
odds = rate / (1 - rate)
df3['odds'] = odds
df3 = df3.sort_values(by=['odds'], ascending=False)
#%% 4) Is there a relationship between how students perceive their school (as reported in columns
# L-Q) and how the school performs on objective measures of achievement (as noted in
# columns V-X).
import numpy as np
import pandas as pd
from sklearn.decomposition import PCA
from sklearn import preprocessing
import matplotlib.pyplot as plt
df4 = df[['rigorous_instruction','collaborative_teachers','supportive_environment',
'effective_school_leadership','strong_family_community_ties','trust',
'student_achievement','reading_scores_exceed','math_scores_exceed']].dropna()
# 1. find the principal components for School Climate
df_school = df4[['rigorous_instruction','collaborative_teachers','supportive_environment',
'effective_school_leadership','strong_family_community_ties','trust']]
# Compute correlation between each measure across all courses:
r = np.corrcoef(df_school.values,rowvar=False)
# Plot the data:
plt.imshow(r)
plt.colorbar()
scaled_data = preprocessing.scale(df_school)
pca = PCA()
pca.fit(scaled_data)
pca_data = pca.transform(scaled_data)
eig_vals = pca.explained_variance_
labels = ['PC' + str(x) for x in range(1, len(eig_vals)+1)]
plt.bar(x=range(1,len(eig_vals)+1), height=eig_vals, tick_label=labels)
plt.ylabel('Eigenvalue')
plt.xlabel('Principal Component')
plt.title('Screet Plot for School Climate')
plt.plot(eig_vals)
plt.show()
# from the plot, we could choose the most important component based on the Kaiser criterion line
# 2. find the principal components for Objective Achievements
df_objective = df4[['student_achievement','reading_scores_exceed','math_scores_exceed']]
scaled_data2 = preprocessing.scale(df_objective)
pca2 = PCA()
pca2.fit(scaled_data2)
pca_data2 = pca2.transform(scaled_data2)
eig_vals2 = pca2.explained_variance_
labels2 = ['PC' + str(x) for x in range(1, len(eig_vals2)+1)]
plt.bar(x=range(1,len(eig_vals2)+1), height=eig_vals2, tick_label=labels2)
plt.ylabel('Eigenvalue')
plt.xlabel('Principal Component')
plt.title('Screet Plot for Objective Achievement')
plt.plot(eig_vals2)
plt.show()
# from the plot, we could choose the most important component based on the Kaiser criterion line
# find the relationship between them
import statsmodels.api as sm
y = pca_data2[:,0] # objective achievement
x = pca_data[:,0] # school performance
# visualize
plt.scatter(x, y)
plt.title('A plot to show the correlation between school climaete and objective achievement')
plt.xlabel('school climate')
plt.ylabel('objective achievements')
plt.plot(np.unique(x), np.poly1d(np.polyfit(x, y, 1))(np.unique(x)), color='yellow')
plt.show()
# linear regression
x = sm.add_constant(x) # vector of ones
model = sm.OLS(y,x) # ordinary least squares from sm
results = model.fit() # fit model
print(results.summary()) # print summary
print(results.params) # print parameters, beta0 beta1
# They are negatively correlated
#%% 5) Test a hypothesis of your choice as to which kind of school (e.g. small schools vs. large
# schools or charter schools vs. not (or any other classification, such as rich vs. poor school))
# performs differently than another kind either on some dependent measure,
# e.g. objective measures of achievement or admission to HSPHS (pick one).
# I will classify schools as charter schools or public school to testify whether
# their admission to HSPHS performs differntly
df5 = df[['dbn','school_name','applications','acceptances','school_size']].dropna()
admission = df5['acceptances'] / df5['school_size']
df5['admission'] = admission
df5 = df5.sort_values(by=['school_size'])
length = len(df5) # the length is an even number, so we can simply classify the schools by select
# first half part and second half from the dataframe
data = df5['admission'].values
small_schools = data[:int(length/2),]
large_schools = data[int(length/2):,]
# I try t-test for the two groups (independent t-test)
t,p = stats.ttest_ind(small_schools, large_schools) # independent t-test
# Considering the sample may not derive from nomarl distribution population
# I try ks test to further confirm:
# the p-value is smaller than 0.05, thus we can conclude that there is a significant difference between two groups
#%% 6) Is there any evidence that the availability of material resources (e.g. per student spending or class size)
# impacts objective measures of achievement or admission to HSPHS?
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import IterativeImputer
df6 = df[['applications','acceptances','per_pupil_spending','school_size']].values
imp = IterativeImputer(max_iter=10, random_state=0)
imp.fit(df6)
IterativeImputer(random_state=0)
df6 = imp.transform(df6)
dataset = pd.DataFrame({'applications':df6[:,0],'acceptances':df6[:,1],'per_pupil_spending':df6[:,2],'school_size':df6[:,3]})
admission = dataset['acceptances'] / dataset['school_size']
dataset['admission'] = admission
df_new = dataset[['per_pupil_spending','admission']]
data = df_new.values
# 1. linear regression and correlation
import statsmodels.api as sm
y = data[:,1] # admission
x = data[:,0] # student spending
# visualize
plt.scatter(x, y)
plt.title('A plot to show the correlation between spending and admission')
plt.xlabel('spending')
plt.ylabel('admission')
plt.plot(np.unique(x), np.poly1d(np.polyfit(x, y, 1))(np.unique(x)), color='yellow')
plt.show()
x = sm.add_constant(x) # vector of ones
model = sm.OLS(y,x) # ordinary least squares from sm
results = model.fit() # fit model
print(results.summary()) # print summary
print(results.params) # print parameters, beta0 beta1
# 2. anova based on different groups
df_new = df_new.sort_values(by=['per_pupil_spending'],ascending=True)
data_new = df_new.values
length = len(data_new)
index = round(length / 4)
sample1 = data_new[:index+1,1]
sample2 = data_new[index:(2*index+1),1]
sample3 = data_new[(2*index):(3*index+1),1]
sample4 = data_new[(3*index):,1]
sns.distplot(sample4)
sns.distplot(sample1)
sns.distplot(sample2)
sns.distplot(sample3)
f,p1 = stats.f_oneway(sample1,sample2,sample3,sample4) # one-way anova for 4 sample means
# 3. kruskal-wallis on different groups
h,p2 = stats.kruskal(sample1,sample2,sample3,sample4) # 4 sample medians
#%% 7) What proportion of schools accounts for 90% of all students accepted to HSPHS?
df7 = df[['dbn','school_name','applications','acceptances']].dropna()
df7 = df7.sort_values(by=['acceptances'],ascending=False)
data = df7['acceptances'].values
num_acceptances = 0.9 * np.sum(data)
analysis = np.empty(len(data))
analysis[:] = np.NAN
for i in range(len(data)):
analysis[i] = np.sum(data[:i+1])
index = np.argmin(abs(analysis - num_acceptances))
proportion = index / len(data)
#bar graph
labels = df7['dbn']
data_ = data[:21,]
plt.barh(range(len(data_)), data_, align='center', alpha=0.5)
plt.yticks(range(len(data_)), labels)
plt.xlabel('Acceptances Count')
plt.title('Students accepted to HSPHS')
plt.show()
#%% 8) Build a model of your choice – clustering, classification or prediction – that includes all
# factors – as to what school characteristics are most important in terms of a) sending
# students to HSPHS, b) achieving high scores on objective measures of achievement?
# I found that these data for per_pupil_spending are absent in charter school, so I decided to separate
# charter school and public school groups to make appropriate predictions.
# pip install fancyimpute
from fancyimpute import KNN
from sklearn.decomposition import PCA
from sklearn import preprocessing
# charter school
charter_school = df[df['school_name'].str.contains('CHARTER')]
temp1 = charter_school.drop(['per_pupil_spending','avg_class_size'],axis=1)
temp2 = temp1.drop(['dbn','school_name'],axis=1)
c_filled_knn = KNN(k=3).fit_transform(temp2)
for i in range(20):
help_column_sc = np.log(c_filled_knn[:,i]+1)
c_filled_knn[:,i] = preprocessing.normalize([help_column_sc])
# public school
index_ = len(charter_school)
public_school = df.drop(list(range(len(df)-index_,len(df))))
temp3 = public_school.drop(['dbn','school_name'],axis=1)
p_filled_knn = KNN(k=3).fit_transform(temp3)
for i in range(20):
help_column_p = np.log(p_filled_knn[:,i]+1)
p_filled_knn[:,i] = preprocessing.normalize([help_column_p])
# Charter School
# Compute correlation between each measure across all variables in CHARTER SCHOOLS:
r = np.corrcoef(c_filled_knn,rowvar=False)
# Plot the data:
plt.imshow(r)
plt.colorbar()
plt.title('correlation between each measure across all variables in CHARTER SCHOOL')
# As we learned in the lecture and recitation we could firstly analyze which variables
# we need to reduce. Therefore, we could easily see the middle light square so we nned to
# do PCA as we already did in 4)
# so we can replace the school climate columns by the most important principal component
charter_school = pd.DataFrame(c_filled_knn,columns=['C','D','G','H','I','J','K','L','M','N','O','P','Q',
'R','S','T','U','V','W','X'])
# 1. find PC for school climate
school_climate = charter_school[['L','M','N','O','P','Q']].values
scaled_data = preprocessing.scale(school_climate)
pca = PCA()
pca.fit(school_climate)
pca_data = pca.transform(school_climate)
eig_vals = pca.explained_variance_
labels = ['PC' + str(x) for x in range(1, len(eig_vals)+1)]
plt.bar(x=range(1,len(eig_vals)+1), height=eig_vals, tick_label=labels)
plt.ylabel('Eigenvalue')
plt.xlabel('Principal Component')
plt.title('Screet Plot')
plt.plot(eig_vals)
plt.plot([0,len(eig_vals)],[1,1],color='red',linewidth=1) # Kaiser criterion line
plt.show()
#2. find PC for objective achievements
objective_achievement = charter_school[['V','W','X']]
scaled_data2 = preprocessing.scale(objective_achievement)
pca2 = PCA()
pca2.fit(scaled_data2)
pca_data2 = pca2.transform(scaled_data2)
eig_vals2 = pca2.explained_variance_
labels2 = ['PC' + str(x) for x in range(1, len(eig_vals2)+1)]
plt.bar(x=range(1,len(eig_vals2)+1), height=eig_vals2, tick_label=labels2)
plt.ylabel('Eigenvalue')
plt.xlabel('Principal Component')
plt.title('Screet Plot')
plt.plot(eig_vals)
plt.plot([0,len(eig_vals2)],[1,1],color='red',linewidth=1) # Kaiser criterion line
plt.show()
charter_school = charter_school.drop(['L','M','N','O','P','Q','V','W','X'],axis=1)
temp_sc = pca_data[:,0]
school_climate_new = pca_data[:,0]
charter_school['School Climate'] = school_climate_new
objective_achievement_new = pca_data2[:,0]
charter_school['Objective Achievement'] = objective_achievement_new
admission = charter_school['D'] / charter_school['U']
help_admission = np.log(admission+1)
admission = preprocessing.normalize([help_admission])
charter_school['Admission'] = admission.T
# do multiple linear regression: a) admission
from sklearn import linear_model
X = charter_school[['C','G','H','I','J','K','R','S','T','U','School Climate','Objective Achievement']].values
Y = charter_school['Admission'].values # admission
regr = linear_model.LinearRegression() # linearRegression function from linear_model
regr.fit(X,Y) # fit model
COD = regr.score(X,Y) # r^2
beta = regr.coef_ # beta
intercept = regr.intercept_ # intercept
analysis = pd.DataFrame(beta.T,columns=['Weight'])
IV = ['C','G','H','I','J','K','R','S','T','U','School Climate','Objective Achievement']
analysis['Name'] = IV
analysis.sort_values(by=['Weight'],ascending=False)
analysis['Absolute Weight'] = abs(beta.T)
analysis.sort_values(by=['Absolute Weight'],ascending=False)
#%%
# b) objective achievement
X = charter_school[['C','D','G','H','I','J','K','R','S','T','U','School Climate']].values
Y = charter_school['Objective Achievement'] # high scores on objective measures of achievement
regr = linear_model.LinearRegression() # linearRegression function from linear_model
regr.fit(X,Y) # fit model
COD = regr.score(X,Y) # r^2
beta = regr.coef_ # beta
intercept = regr.intercept_ # intercept
analysis = | pd.DataFrame(beta.T,columns=['Weight']) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2022/2/24 15:02
Desc: 东方财富网-数据中心-新股数据-打新收益率
东方财富网-数据中心-新股数据-打新收益率
http://data.eastmoney.com/xg/xg/dxsyl.html
东方财富网-数据中心-新股数据-新股申购与中签查询
http://data.eastmoney.com/xg/xg/default_2.html
"""
import pandas as pd
import requests
from tqdm import tqdm
from akshare.utils import demjson
def _get_page_num_dxsyl() -> int:
"""
东方财富网-数据中心-新股数据-打新收益率-总页数
http://data.eastmoney.com/xg/xg/dxsyl.html
:return: 总页数
:rtype: int
"""
url = "https://datainterface.eastmoney.com/EM_DataCenter/JS.aspx"
params = {
"st": "16",
"sr": "-1",
"ps": "500",
"p": '1',
"type": "NS",
"sty": "NSDXSYL",
"js": "({data:[(x)],pages:(pc)})",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[1:-1])
total_page = data_json["pages"]
return total_page
def stock_dxsyl_em() -> pd.DataFrame:
"""
东方财富网-数据中心-新股数据-打新收益率
http://data.eastmoney.com/xg/xg/dxsyl.html
:return: 指定市场的打新收益率数据
:rtype: pandas.DataFrame
"""
url = "https://datainterface.eastmoney.com/EM_DataCenter/JS.aspx"
page_num = _get_page_num_dxsyl()
big_df = pd.DataFrame()
for page in tqdm(range(1, page_num + 1), leave=False):
params = {
"st": "16",
"sr": "-1",
"ps": "500",
"p": str(page),
"type": "NS",
"sty": "NSDXSYL",
"js": "({data:[(x)],pages:(pc)})",
}
res = requests.get(url, params=params)
data_text = res.text
data_json = demjson.decode(data_text[1:-1])
temp_df = pd.DataFrame([item.split(',') for item in data_json["data"]])
big_df = pd.concat([big_df, temp_df], ignore_index=True)
big_df.reset_index(inplace=True)
big_df['index'] = big_df.index + 1
big_df.columns = [
"序号",
"股票代码",
"股票简称",
"发行价",
"最新价",
"网上-发行中签率",
"网上-有效申购股数",
"网上-有效申购户数",
"网上-超额认购倍数",
"网下-配售中签率",
"网下-有效申购股数",
"网下-有效申购户数",
"网下-配售认购倍数",
"总发行数量",
"开盘溢价",
"首日涨幅",
"打新收益",
"上市日期",
"-",
]
big_df = big_df[[
"序号",
"股票代码",
"股票简称",
"发行价",
"最新价",
"网上-发行中签率",
"网上-有效申购股数",
"网上-有效申购户数",
"网上-超额认购倍数",
"网下-配售中签率",
"网下-有效申购股数",
"网下-有效申购户数",
"网下-配售认购倍数",
"总发行数量",
"开盘溢价",
"首日涨幅",
"打新收益",
"上市日期",
]]
big_df["发行价"] = pd.to_numeric(big_df["发行价"], errors='coerce')
big_df["最新价"] = pd.to_numeric(big_df["最新价"])
big_df["网上-发行中签率"] = pd.to_numeric(big_df["网上-发行中签率"])
big_df["网上-有效申购股数"] = pd.to_numeric(big_df["网上-有效申购股数"])
big_df["网上-有效申购户数"] = pd.to_numeric(big_df["网上-有效申购户数"])
big_df["网上-超额认购倍数"] = pd.to_numeric(big_df["网上-超额认购倍数"])
big_df["网下-配售中签率"] = pd.to_numeric(big_df["网下-配售中签率"])
big_df["网下-有效申购股数"] = pd.to_numeric(big_df["网下-有效申购股数"])
big_df["网下-有效申购户数"] = pd.to_numeric(big_df["网下-有效申购户数"])
big_df["网下-配售认购倍数"] = pd.to_numeric(big_df["网下-配售认购倍数"])
big_df["总发行数量"] = pd.to_numeric(big_df["总发行数量"])
big_df["开盘溢价"] = pd.to_numeric(big_df["开盘溢价"])
big_df["首日涨幅"] = pd.to_numeric(big_df["首日涨幅"])
big_df["打新收益"] = pd.to_numeric(big_df["打新收益"])
return big_df
def stock_xgsglb_em(symbol: str = "京市A股") -> pd.DataFrame:
"""
新股申购与中签查询
http://data.eastmoney.com/xg/xg/default_2.html
:param symbol: choice of {"全部股票", "沪市A股", "科创板", "深市A股", "创业板", "京市A股"}
:type symbol: str
:return: 新股申购与中签数据
:rtype: pandas.DataFrame
"""
market_map = {
"全部股票": """(APPLY_DATE>'2010-01-01')""",
"沪市A股": """(APPLY_DATE>'2010-01-01')(SECURITY_TYPE_CODE in ("058001001","058001008"))(TRADE_MARKET_CODE in ("069001001001","069001001003","069001001006"))""",
"科创板": """(APPLY_DATE>'2010-01-01')(SECURITY_TYPE_CODE in ("058001001","058001008"))(TRADE_MARKET_CODE="069001001006")""",
"深市A股": """(APPLY_DATE>'2010-01-01')(SECURITY_TYPE_CODE="058001001")(TRADE_MARKET_CODE in ("069001002001","069001002002","069001002003","069001002005"))""",
"创业板": """(APPLY_DATE>'2010-01-01')(SECURITY_TYPE_CODE="058001001")(TRADE_MARKET_CODE="069001002002")""",
}
url = "http://datacenter-web.eastmoney.com/api/data/v1/get"
if symbol == "京市A股":
params = {
'sortColumns': 'APPLY_DATE',
'sortTypes': '-1',
'pageSize': '500',
'pageNumber': '1',
'columns': 'ALL',
'reportName': 'RPT_NEEQ_ISSUEINFO_LIST',
'quoteColumns': 'f14~01~SECURITY_CODE~SECURITY_NAME_ABBR',
'source': 'NEEQSELECT',
'client': 'WEB',
}
r = requests.get(url, params=params)
data_json = r.json()
total_page = data_json['result']['pages']
big_df = pd.DataFrame()
for page in tqdm(range(1, 1+int(total_page)), leave=False):
params.update({
'pageNumber': page
})
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json['result']['data'])
big_df = pd.concat([big_df, temp_df], ignore_index=True)
big_df.reset_index(inplace=True)
big_df['index'] = big_df.index + 1
big_df.columns = [
'序号',
'-',
'代码',
'-',
'简称',
'申购代码',
'发行总数',
'-',
'发行价格',
'发行市盈率',
'申购日',
'发行结果公告日',
'上市日',
'网上发行数量',
'顶格申购所需资金',
'申购上限',
'网上申购缴款日',
'网上申购退款日',
'-',
'网上获配比例',
'最新价',
'首日收盘价',
'网下有效申购倍数',
'每百股获利',
'-',
'-',
'-',
'-',
'-',
'-',
]
big_df = big_df[[
'序号',
'代码',
'简称',
'申购代码',
'发行总数',
'网上发行数量',
'顶格申购所需资金',
'申购上限',
'发行价格',
'最新价',
'首日收盘价',
'申购日',
'网上申购缴款日',
'网上申购退款日',
'上市日',
'发行结果公告日',
'发行市盈率',
'网上获配比例',
'网下有效申购倍数',
'每百股获利',
]]
big_df['发行总数'] = pd.to_numeric(big_df['发行总数'])
big_df['网上发行数量'] = pd.to_numeric(big_df['网上发行数量'])
big_df['顶格申购所需资金'] = pd.to_numeric(big_df['顶格申购所需资金'])
big_df['申购上限'] = pd.to_numeric(big_df['申购上限'])
big_df['发行价格'] = pd.to_numeric(big_df['发行价格'])
big_df['最新价'] = pd.to_numeric(big_df['最新价'])
big_df['首日收盘价'] = pd.to_numeric(big_df['首日收盘价'])
big_df['发行市盈率'] = pd.to_numeric(big_df['发行市盈率'])
big_df['网上获配比例'] = pd.to_numeric(big_df['网上获配比例'])
big_df['网下有效申购倍数'] = pd.to_numeric(big_df['网下有效申购倍数'])
big_df['每百股获利'] = pd.to_numeric(big_df['每百股获利'])
big_df['申购日'] = pd.to_datetime(big_df['申购日']).dt.date
big_df['网上申购缴款日'] = pd.to_datetime(big_df['网上申购缴款日']).dt.date
big_df['网上申购退款日'] = pd.to_datetime(big_df['网上申购退款日']).dt.date
big_df['上市日'] = pd.to_datetime(big_df['上市日']).dt.date
big_df['发行结果公告日'] = pd.to_datetime(big_df['发行结果公告日']).dt.date
return big_df
else:
params = {
'sortColumns': 'APPLY_DATE,SECURITY_CODE',
'sortTypes': '-1,-1',
'pageSize': '5000',
'pageNumber': '1',
'reportName': 'RPTA_APP_IPOAPPLY',
'columns': 'SECURITY_CODE,SECURITY_NAME,TRADE_MARKET_CODE,APPLY_CODE,TRADE_MARKET,MARKET_TYPE,ORG_TYPE,ISSUE_NUM,ONLINE_ISSUE_NUM,OFFLINE_PLACING_NUM,TOP_APPLY_MARKETCAP,PREDICT_ONFUND_UPPER,ONLINE_APPLY_UPPER,PREDICT_ONAPPLY_UPPER,ISSUE_PRICE,LATELY_PRICE,CLOSE_PRICE,APPLY_DATE,BALLOT_NUM_DATE,BALLOT_PAY_DATE,LISTING_DATE,AFTER_ISSUE_PE,ONLINE_ISSUE_LWR,INITIAL_MULTIPLE,INDUSTRY_PE_NEW,OFFLINE_EP_OBJECT,CONTINUOUS_1WORD_NUM,TOTAL_CHANGE,PROFIT,LIMIT_UP_PRICE,INFO_CODE,OPEN_PRICE,LD_OPEN_PREMIUM,LD_CLOSE_CHANGE,TURNOVERRATE,LD_HIGH_CHANG,LD_AVERAGE_PRICE,OPEN_DATE,OPEN_AVERAGE_PRICE,PREDICT_PE,PREDICT_ISSUE_PRICE2,PREDICT_ISSUE_PRICE,PREDICT_ISSUE_PRICE1,PREDICT_ISSUE_PE,PREDICT_PE_THREE,ONLINE_APPLY_PRICE,MAIN_BUSINESS',
'filter': market_map[symbol],
'source': 'WEB',
'client': 'WEB',
}
r = requests.get(url, params=params)
data_json = r.json()
total_page = data_json['result']['pages']
big_df = pd.DataFrame()
for page in tqdm(range(1, total_page+1), leave=False):
params.update({"pageNumber": page})
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json['result']['data'])
big_df = pd.concat([big_df, temp_df], ignore_index=True)
big_df.columns = [
"股票代码",
"股票简称",
"_",
"申购代码",
"_",
"_",
"_",
"发行总数",
"网上发行",
"_",
"顶格申购需配市值",
"_",
"申购上限",
"_",
"发行价格",
"最新价",
"首日收盘价",
"申购日期",
"中签号公布日",
"中签缴款日期",
"上市日期",
"发行市盈率",
"中签率",
"询价累计报价倍数",
"_",
"配售对象报价家数",
"连续一字板数量",
"涨幅",
"每中一签获利",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"行业市盈率",
"_",
"_",
"_",
]
big_df = big_df[
[
"股票代码",
"股票简称",
"申购代码",
"发行总数",
"网上发行",
"顶格申购需配市值",
"申购上限",
"发行价格",
"最新价",
"首日收盘价",
"申购日期",
"中签号公布日",
"中签缴款日期",
"上市日期",
"发行市盈率",
"行业市盈率",
"中签率",
"询价累计报价倍数",
"配售对象报价家数",
"连续一字板数量",
"涨幅",
"每中一签获利",
]
]
big_df['申购日期'] = pd.to_da | tetime(big_df['申购日期']) | pandas.to_datetime |
import os
import logging
import json
import glob
import collections
import yaml
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
from mathtools import utils
logger = logging.getLogger(__name__)
def load_vocabs(vocab_fn):
def get_part_name(event_name):
return utils.remove_prefix(event_name, 'pick up ')
def get_action_name(event_name, part_vocab):
if event_name.startswith('align'):
return 'align'
if event_name.startswith('attach'):
return 'attach'
if event_name.startswith('position'):
return 'position'
if event_name.startswith('slide'):
return 'slide'
if event_name.startswith('insert'):
return 'insert'
if event_name == '':
return 'NA'
if event_name == 'other':
return event_name
for part_name in part_vocab:
if part_name != '' and event_name.endswith(part_name):
return utils.remove_suffix(event_name, f" {part_name}")
else:
raise AssertionError(f"No part in vocab matching {event_name}")
def get_event_tuple(event_name, part_vocab, action_vocab):
if event_name == '':
return ('NA', 'NA') + tuple(False for name in part_vocab if name != '')
for name in action_vocab:
if event_name.startswith(name):
action_name = name
break
else:
raise AssertionError(f"No action in vocab matching {event_name}")
if event_name == 'align leg screw with table thread':
part_name = 'leg'
elif event_name == 'align side panel holes with front panel dowels':
part_name = 'side panel'
elif event_name == 'attach shelf to table':
part_name = 'shelf'
elif event_name == 'position the drawer right side up':
part_name = 'drawer'
elif event_name == 'slide bottom of drawer':
part_name = 'bottom panel'
elif event_name in ('NA', 'other'):
part_name = ''
else:
for name in part_vocab:
if name != '' and event_name.endswith(name):
part_name = name
break
else:
raise AssertionError(f"No part in vocab matching {event_name}")
part_is_active = tuple(part_name == name for name in part_vocab if name != '')
return (event_name, action_name) + part_is_active
with open(vocab_fn, 'rt') as file_:
event_vocab = file_.read().split('\n')
part_vocab = ('',) + tuple(
get_part_name(event_label) for event_label in event_vocab
if event_label.startswith('pick up ')
) + ('table', 'drawer')
action_vocab = tuple(set(
get_action_name(event_label, part_vocab) for event_label in event_vocab
))
event_df = pd.DataFrame(
tuple(get_event_tuple(name, part_vocab, action_vocab) for name in event_vocab),
columns=['event', 'action'] + [f"{name}_active" for name in part_vocab if name != '']
)
event_df = event_df.set_index('event')
return event_df, part_vocab, action_vocab
def load_action_labels(label_fn, event_vocab):
with open(label_fn, 'r') as _file:
gt_segments = json.load(_file)
def get_metadata(seq_name, ann_seq):
furn_name, string = seq_name.split('/')
person, color, place = string.split('_')[:3]
dir_name = seq_name.replace('/', '_')
split_name = ann_seq['subset']['subset']
return (furn_name, person, color, place, split_name, dir_name)
ignore_seqs = (
'Lack_Side_Table_Special_Test',
)
metadata = pd.DataFrame(
tuple(
get_metadata(seq_name, ann_seq)
for seq_name, ann_seq in gt_segments['database'].items()
if seq_name not in ignore_seqs
),
columns=['furn_name', 'person', 'color', 'place', 'split_name', 'dir_name']
)
ann_seqs = {
seq_name.replace('/', '_'): [ann for ann in ann_seq['annotation']]
for seq_name, ann_seq in gt_segments['database'].items()
if seq_name not in ignore_seqs
}
def make_action_labels(ann_seq):
action_names = [d['label'] for d in ann_seq]
action_bounds = [d['segment'] for d in ann_seq]
action_labels = pd.concat(
(
event_vocab.loc[action_names].reset_index(),
pd.DataFrame(action_bounds, columns=['start', 'end'])
), axis=1
)
return action_labels
seq_names = tuple(ann_seqs.keys())
action_labels = tuple(make_action_labels(ann_seqs[seq_name]) for seq_name in seq_names)
return metadata.index.to_numpy(), action_labels, metadata
def plot_event_labels(
fn, event_index_seq, action_index_seq, part_activity_seq,
event_vocab, action_vocab, part_vocab):
f, axes = plt.subplots(3, sharex=True, figsize=(12, 12))
axes[0].plot(event_index_seq)
axes[0].set_yticks(range(len(event_vocab)))
axes[0].set_yticklabels(event_vocab)
axes[1].plot(action_index_seq)
axes[1].set_yticks(range(len(action_vocab)))
axes[1].set_yticklabels(action_vocab)
axes[2].imshow(part_activity_seq.T, interpolation='none', aspect='auto')
axes[2].set_yticks(range(len(part_vocab)))
axes[2].set_yticklabels(part_vocab)
plt.tight_layout()
plt.savefig(fn)
plt.close()
def make_labels(seg_bounds, seg_labels, default_val, num_samples=None):
if num_samples is None:
num_samples = seg_bounds.max() + 1
label_shape = (num_samples,) + seg_labels.shape[1:]
labels = np.full(label_shape, default_val, dtype=seg_labels.dtype)
for (start, end), l in zip(seg_bounds, seg_labels):
labels[start:end + 1] = l
return labels
def make_event_data(
event_seq, filenames, event_to_index, action_to_index, part_vocab,
event_default, action_default, part_default):
event_indices = np.array([event_to_index[name] for name in event_seq['event']])
action_indices = np.array([action_to_index[name] for name in event_seq['action']])
part_names = [name for name in part_vocab if name != '']
col_names = [f"{name}_active" for name in part_names]
part_is_active = event_seq[col_names].values
seg_bounds = event_seq[['start', 'end']].values
event_index_seq = make_labels(
seg_bounds, event_indices, event_default,
num_samples=len(filenames)
)
action_index_seq = make_labels(
seg_bounds, action_indices, action_default,
num_samples=len(filenames)
)
part_activity_seq = make_labels(
seg_bounds, part_is_active, part_default,
num_samples=len(filenames)
)
data_and_labels = pd.DataFrame({
'fn': filenames,
'event': event_index_seq,
'action': action_index_seq
})
data_and_labels = pd.concat(
(data_and_labels, pd.DataFrame(part_activity_seq, columns=col_names)),
axis=1
)
return data_and_labels
def make_window_clips(event_data, event_vocab, action_vocab, stride=1, **win_params):
num_samples = event_data.shape[0]
win_indices = utils.slidingWindowSlices(event_data, stride=stride, **win_params)
d = {
# name: [utils.majorityVote(event_data.loc[indices][name]) for indices in win_indices]
name: [event_data.iloc[i][name] for i in range(0, num_samples, stride)]
for name in event_data.columns if name != 'fn'
}
d['event'] = [event_vocab[i] for i in d['event']]
d['action'] = [action_vocab[i] for i in d['action']]
d['start'] = [sl.start for sl in win_indices]
d['end'] = [min(sl.stop, event_data.shape[0]) - 1 for sl in win_indices]
window_clips = pd.DataFrame(d)
return window_clips
def make_slowfast_labels(segment_bounds, labels, fns, integerizer, col_format='standard'):
if col_format == 'standard':
col_dict = {
'video_name': fns[segment_bounds['start']].apply(
lambda x: os.path.dirname(x).split('/')[-1]
).to_list(),
'start_index': segment_bounds['start'].to_list(),
'end_index': segment_bounds['end'].to_list(),
'label_id': [integerizer[name] for name in labels.to_list()]
}
elif col_format == 'ikea_tk':
col_dict = {
'segment_id': [i for i, name in enumerate(labels.to_list())],
'label_id': [integerizer[name] for name in labels.to_list()],
'start_frame': fns[segment_bounds['start']].apply(
lambda x: int(os.path.splitext(os.path.basename(x))[0])
).to_list(),
'end_frame': fns[segment_bounds['end']].apply(
lambda x: int(os.path.splitext(os.path.basename(x))[0])
).to_list(),
'label_name': labels.to_list(),
'video_name': fns[segment_bounds['start']].apply(
lambda x: os.path.dirname(x).split('/')[-1]
).to_list(),
}
else:
accepted_args = ('standard', 'ikea_tk')
err_str = f"Unrecognized argument col_format={col_format}; expected one of {accepted_args}"
raise ValueError(err_str)
slowfast_labels = pd.DataFrame(col_dict)
return slowfast_labels
def getActivePart(part_activity_segs, part_labels):
is_active = part_activity_segs.to_numpy()
if (is_active.sum(axis=1) > 1).any():
raise AssertionError('Some columns have more than one active object!')
active_parts = [''] * len(part_activity_segs)
for row, col in zip(*is_active.nonzero()):
active_parts[row] = part_labels[col]
active_parts = pd.DataFrame({'part': active_parts})['part']
return active_parts
def main(
out_dir=None, data_dir=None, annotation_dir=None, frames_dir=None,
col_format='standard', win_params={}, slowfast_csv_params={},
label_types=('event', 'action', 'part')):
out_dir = os.path.expanduser(out_dir)
data_dir = os.path.expanduser(data_dir)
annotation_dir = os.path.expanduser(annotation_dir)
frames_dir = os.path.expanduser(frames_dir)
annotation_dir = os.path.join(annotation_dir, 'action_annotations')
logger = utils.setupRootLogger(filename=os.path.join(out_dir, 'log.txt'))
fig_dir = os.path.join(out_dir, 'figures')
if not os.path.exists(fig_dir):
os.makedirs(fig_dir)
out_labels_dir = os.path.join(out_dir, 'labels')
if not os.path.exists(out_labels_dir):
os.makedirs(out_labels_dir)
data_dirs = {name: os.path.join(out_dir, f"{name}-dataset") for name in label_types}
for name, dir_ in data_dirs.items():
if not os.path.exists(dir_):
os.makedirs(dir_)
event_vocab_df, part_vocab, action_vocab = load_vocabs(
os.path.join(data_dir, 'ANU_ikea_dataset', 'indexing_files', 'atomic_action_list.txt')
)
event_vocab_df.to_csv(os.path.join(out_labels_dir, 'event-vocab.csv'))
event_vocab = event_vocab_df.index.tolist()
vocabs = {
'event': event_vocab,
'action': action_vocab,
'part': part_vocab
}
vocabs = {label_name: vocabs[label_name] for label_name in label_types}
for name, vocab in vocabs.items():
utils.saveVariable(vocab, 'vocab', data_dirs[name])
label_fn = os.path.join(annotation_dir, 'gt_segments.json')
seq_ids, event_labels, metadata = load_action_labels(label_fn, event_vocab_df)
utils.saveMetadata(metadata, out_labels_dir)
for name, dir_ in data_dirs.items():
utils.saveMetadata(metadata, dir_)
logger.info(f"Loaded {len(seq_ids)} sequences from {label_fn}")
part_names = [name for name in part_vocab if name != '']
col_names = [f"{name}_active" for name in part_names]
integerizers = {
label_name: {name: i for i, name in enumerate(label_vocab)}
for label_name, label_vocab in vocabs.items()
}
all_slowfast_labels_seg = collections.defaultdict(list)
all_slowfast_labels_win = collections.defaultdict(list)
counts = np.zeros((len(action_vocab), len(part_vocab)), dtype=int)
for i, seq_id in enumerate(seq_ids):
seq_id_str = f"seq={seq_id}"
seq_dir_name = metadata['dir_name'].loc[seq_id]
event_segs = event_labels[i]
if not event_segs.any(axis=None):
logger.warning(f"No event labels for sequence {seq_id}")
continue
event_data = make_event_data(
event_segs, sorted(glob.glob(os.path.join(frames_dir, seq_dir_name, '*.jpg'))),
integerizers['event'], integerizers['action'], integerizers['part'],
event_vocab.index('NA'), action_vocab.index('NA'), False
)
event_wins = make_window_clips(
event_data, vocabs['event'], vocabs['action'],
**win_params
)
event_data.to_csv(os.path.join(out_labels_dir, f"{seq_id_str}_data.csv"), index=False)
event_segs.to_csv(os.path.join(out_labels_dir, f"{seq_id_str}_segs.csv"), index=False)
filenames = event_data['fn'].to_list()
label_indices = {}
for name in label_types:
if name == 'part':
label_indices[name] = event_data[col_names].to_numpy()
seg_labels_slowfast = make_slowfast_labels(
event_segs[['start', 'end']], getActivePart(event_segs[col_names], part_names),
event_data['fn'], integerizers[name],
col_format=col_format
)
win_labels_slowfast = make_slowfast_labels(
event_wins[['start', 'end']], getActivePart(event_wins[col_names], part_names),
event_data['fn'], integerizers[name],
col_format=col_format
)
else:
label_indices[name] = event_data[name].to_numpy()
seg_labels_slowfast = make_slowfast_labels(
event_segs[['start', 'end']], event_segs[name],
event_data['fn'], integerizers[name],
col_format=col_format
)
win_labels_slowfast = make_slowfast_labels(
event_wins[['start', 'end']], event_wins[name],
event_data['fn'], integerizers[name],
col_format=col_format
)
utils.saveVariable(filenames, f'{seq_id_str}_frame-fns', data_dirs[name])
utils.saveVariable(label_indices[name], f'{seq_id_str}_labels', data_dirs[name])
seg_labels_slowfast.to_csv(
os.path.join(data_dirs[name], f'{seq_id_str}_slowfast-labels.csv'),
**slowfast_csv_params
)
win_labels_slowfast.to_csv(
os.path.join(data_dirs[name], f'{seq_id_str}_slowfast-labels.csv'),
**slowfast_csv_params
)
all_slowfast_labels_seg[name].append(seg_labels_slowfast)
all_slowfast_labels_win[name].append(win_labels_slowfast)
plot_event_labels(
os.path.join(fig_dir, f"{seq_id_str}.png"),
label_indices['event'], label_indices['action'], label_indices['part'],
event_vocab, action_vocab, part_names
)
for part_activity_row, action_index in zip(label_indices['part'], label_indices['action']):
for i, is_active in enumerate(part_activity_row):
part_index = integerizers['part'][part_names[i]]
counts[action_index, part_index] += int(is_active)
for name, labels in all_slowfast_labels_seg.items():
pd.concat(labels, axis=0).to_csv(
os.path.join(data_dirs[name], 'slowfast-labels_seg.csv'),
**slowfast_csv_params
)
for name, labels in all_slowfast_labels_win.items():
| pd.concat(labels, axis=0) | pandas.concat |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = "<NAME>, <NAME>"
__copyright__ = "Copyright 2020, University of Oxford"
__email__ = "<EMAIL>"
__license__ = "MIT"
import pandas as pd
from haystac.workflow.scripts.utilities import REGEX_BLACKLIST
def entrez_pick_sequences(config, nuccore_file, taxa_file, output_file):
accessions = | pd.read_csv(nuccore_file, sep="\t") | pandas.read_csv |
#!/usr/bin/env python
# coding: utf-8
"""Modified version of 'eval_reco_trkx.py' (runs after 'tracks_from_gnn.py') script from the
exatrkx-iml2020. The code breakdown of the script is given in 'stt6_eval.ipynb' notebook."""
import os
import glob
import torch
import numpy as np
import pandas as pd
from typing import Any
device = 'cuda' if torch.cuda.is_available() else 'cpu'
class SttTorchDataReader(object):
"""Torch Geometric Data Reader from an Input Directory."""
def __init__(self, input_dir: str):
"""Initialize Instance Variables in Constructor"""
self.path = input_dir
all_files = sorted(glob.glob(os.path.join(input_dir, "*")))
self.nevts = len(all_files)
self.all_evtids = [os.path.basename(x) for x in all_files]
def read(self, evtid: int = None):
"""Read an Event from the Input Directory."""
event_fname = os.path.join(self.path, "{}".format(evtid))
event = torch.load(event_fname, map_location=device)
return event
def __call__(self, evtid: int, *args: Any, **kwds: Any) -> Any:
return self.read(evtid)
def evaluate_reco_tracks(truth_df: pd.DataFrame,
reco_df: pd.DataFrame,
particles_df: pd.DataFrame,
min_hits_truth: int = 9,
min_hits_reco: int = 5,
# min_pt: float = 1.,
frac_reco_matched: float = 0.5,
frac_truth_matched=0.5,
**kwargs):
"""
Args:
truth_df: a dataframe with columns of ['hit_id', 'particle_id']
reco_df: a dataframe with columns of ['hit_id', 'track_id']
particles_df: a dataframe with columns of
['particle_id', 'pt', 'eta', 'radius', 'vz'].
where radius = sqrt(vx**2 + vy**2) and
['vx', 'vy', 'vz'] are the production vertex of the particle
min_hits_truth: minimum number of hits for truth tracks
min_hits_reco: minimum number of hits for reconstructed tracks
# min_pt: minimum pT to filter out
frac_reco_matched: frac of reco tracks matched ??? (ADAK)
frac_truth_matched: frac of true tracks matched ??? (ADAK)
Returns:
A tuple of (
num_true_tracks: int, number of true tracks
num_reco_tracks: int, number of reconstructed tracks
n_matched_reco_tracks: int, number of reconstructed tracks matched to true tracks
matched_pids: np.narray, a list of particle IDs matched by reconstructed tracks
)
"""
# just in case particle_id == 0 included in truth.
if 'particle_id' in truth_df.columns:
truth_df = truth_df[truth_df.particle_id > 0]
# get number of spacepoints in each reconstructed tracks
n_reco_hits = reco_df.track_id.value_counts(sort=False) \
.reset_index().rename(columns={"index": "track_id", "track_id": "n_reco_hits"})
# only tracks with a minimum number of spacepoints are considered
n_reco_hits = n_reco_hits[n_reco_hits.n_reco_hits >= min_hits_reco]
reco_df = reco_df[reco_df.track_id.isin(n_reco_hits.track_id.values)]
# get number of spacepoints in each particle
hits = truth_df.merge(particles_df, on='particle_id', how='left')
n_true_hits = hits.particle_id.value_counts(sort=False) \
.reset_index().rename(columns={"index": "particle_id", "particle_id": "n_true_hits"})
# only particles leaves at least min_hits_truth spacepoints
# and with pT >= min_pt are considered.
particles_df = particles_df.merge(n_true_hits, on=['particle_id'], how='left')
is_trackable = particles_df.n_true_hits >= min_hits_truth
# event has 3 columnes [track_id, particle_id, hit_id]
event = pd.merge(reco_df, truth_df, on=['hit_id'], how='left')
# n_common_hits and n_shared should be exactly the same
# for a specific track id and particle id
# Each track_id will be assigned to multiple particles.
# To determine which particle the track candidate is matched to,
# we use the particle id that yields a maximum value of n_common_hits / n_reco_hits,
# which means the majority of the spacepoints associated with the reconstructed
# track candidate comes from that true track.
# However, the other way may not be true.
reco_matching = event.groupby(['track_id', 'particle_id']).size() \
.reset_index().rename(columns={0: "n_common_hits"})
# Each particle will be assigned to multiple reconstructed tracks
truth_matching = event.groupby(['particle_id', 'track_id']).size() \
.reset_index().rename(columns={0: "n_shared"})
# add number of hits to each of the maching dataframe
reco_matching = reco_matching.merge(n_reco_hits, on=['track_id'], how='left')
truth_matching = truth_matching.merge(n_true_hits, on=['particle_id'], how='left')
# calculate matching fraction
reco_matching = reco_matching.assign(
purity_reco=np.true_divide(reco_matching.n_common_hits, reco_matching.n_reco_hits))
truth_matching = truth_matching.assign(
purity_true=np.true_divide(truth_matching.n_shared, truth_matching.n_true_hits))
# select the best match
reco_matching['purity_reco_max'] = reco_matching.groupby(
"track_id")['purity_reco'].transform(max)
truth_matching['purity_true_max'] = truth_matching.groupby(
"track_id")['purity_true'].transform(max)
matched_reco_tracks = reco_matching[
(reco_matching.purity_reco_max >= frac_reco_matched)
& (reco_matching.purity_reco == reco_matching.purity_reco_max)]
matched_true_particles = truth_matching[
(truth_matching.purity_true_max >= frac_truth_matched)
& (truth_matching.purity_true == truth_matching.purity_true_max)]
# now, let's combine the two majority criteria
# reconstructed tracks must be in both matched dataframe
# and the so matched particle should be the same
# in this way, each track should be only assigned
combined_match = matched_true_particles.merge(
matched_reco_tracks, on=['track_id', 'particle_id'], how='inner')
num_reco_tracks = n_reco_hits.shape[0]
num_true_tracks = particles_df.shape[0]
# For GNN, there are non-negaliable cases where GNN-based
# track candidates are matched to particles not considered as interesting.
# which means there are paticles in matched_pids that do not exist in particles.
matched_pids = np.unique(combined_match.particle_id)
is_matched = particles_df.particle_id.isin(matched_pids).values
n_matched_particles = np.sum(is_matched)
n_matched_tracks = reco_matching[
reco_matching.purity_reco >= frac_reco_matched].shape[0]
n_matched_tracks_poi = reco_matching[
(reco_matching.purity_reco >= frac_reco_matched)
& (reco_matching.particle_id.isin(particles_df.particle_id.values))
].shape[0]
# print(n_matched_tracks_poi, n_matched_tracks)
# num_particles_matched_to = reco_matched.groupby("particle_id")['track_id']\
# .count().reset_index().rename(columns={"track_id": "n_tracks_matched"})
# n_duplicated_tracks = num_particles_matched_to.shape[0]
n_duplicated_tracks = n_matched_tracks_poi - n_matched_particles
particles_df = particles_df.assign(is_matched=is_matched, is_trackable=is_trackable)
return (num_true_tracks, num_reco_tracks, n_matched_particles,
n_matched_tracks, n_duplicated_tracks, n_matched_tracks_poi,
particles_df)
def run_one_evt(evtid, raw_trkx_data_reader, reco_trkx_data_reader, **kwargs):
print("Running {}".format(evtid))
# access torch data using reader's
raw_trkx_data = raw_trkx_data_reader(evtid)
reco_trkx_data = reco_trkx_data_reader(evtid)
# create truth, particles dataframes from torch data
_truth = pd.DataFrame({'hit_id': raw_trkx_data.hid.numpy(), 'particle_id': raw_trkx_data.pid.int().numpy()},
columns=['hit_id', 'particle_id'])
_particles = pd.DataFrame({'particle_id': raw_trkx_data.pid.int().numpy(), 'pt': raw_trkx_data.pt.numpy()},
columns=['particle_id', 'pt']).drop_duplicates(subset=['particle_id'])
results = evaluate_reco_tracks(_truth, reco_trkx_data, _particles, **kwargs)
return results[:-1] + (results[-1].assign(evtid=evtid),)
# %%
if __name__ == '__main__':
import time
import argparse
from multiprocessing import Pool
from functools import partial
parser = argparse.ArgumentParser(description='Evaluating tracking reconstruction')
add_arg = parser.add_argument
add_arg('--reco-tracks-path', help='path to reconstructed tracks', required=True)
add_arg('--raw-tracks-path', help='path to raw tracking data (for truth info)', required=True)
add_arg('--outname', help='output name without postfix', required=True)
add_arg('--max-evts', help='maximum number of events', type=int, default=1)
add_arg('-e', '--event-id', help='evaluate a particular event', type=int, default=None)
add_arg('-f', '--force', help='force to over write existing file', action='store_true')
add_arg("--num-workers", help='number of workers', default=1, type=int)
add_arg("--min-hits-truth", help='minimum number of hits in a truth track',
default=7, type=int)
add_arg("--min-hits-reco", help='minimum number of hits in a reconstructed track',
default=4, type=int)
add_arg('--min-pt', help='minimum pT of true track', type=float, default=1.0)
add_arg("--frac-reco-matched", help='fraction of matched hits over total hits in a reco track',
default=0.5, type=float)
add_arg("--frac-truth-matched", help='fraction of matched hits over total hits in a truth track',
default=0.5, type=float)
args = parser.parse_args()
reco_track_path = args.reco_tracks_path
num_workers = args.num_workers
outname = args.outname
outdir = os.path.dirname(os.path.abspath(outname))
os.makedirs(outdir, exist_ok=True)
# read reconstructed tracks
reco_trkx_reader = SttTorchDataReader(args.reco_tracks_path)
n_tot_files = reco_trkx_reader.nevts
all_evtids = reco_trkx_reader.all_evtids
max_evts = args.max_evts if 0 < args.max_evts <= n_tot_files else n_tot_files
print("Out of {} events processing {} events with {} workers".format(
n_tot_files, max_evts, args.num_workers))
print("Output directory:", outdir)
# read raw Torch/CSV files to get truth information
raw_trkx_reader = SttTorchDataReader(args.raw_tracks_path)
out_array = '{}_particles.h5'.format(outname)
if os.path.exists(out_array) and not args.force:
print("{} is there, use -f to overwrite the file".format(out_array))
exit(1)
if not args.event_id:
if num_workers > 1:
with Pool(num_workers) as p:
fnc = partial(run_one_evt,
raw_trkx_reader=raw_trkx_reader,
reco_trkx_reader=reco_trkx_reader,
**vars(args))
res = p.map(fnc, all_evtids[:max_evts])
else:
res = [run_one_evt(evtid, raw_trkx_reader, reco_trkx_reader, **vars(args))
for evtid in all_evtids[:max_evts]]
# merge results from each process
n_true_tracks = sum([x[0] for x in res])
n_reco_tracks = sum([x[1] for x in res])
n_matched_true_tracks = sum([x[2] for x in res])
n_matched_reco_tracks = sum([x[3] for x in res])
n_duplicated_reco_tracks = sum([x[4] for x in res])
n_matched_reco_tracks_poi = sum([x[5] for x in res])
particles = pd.concat([x[-1] for x in res], axis=0)
else:
(n_true_tracks, n_reco_tracks, n_matched_true_tracks, n_matched_reco_tracks,
n_duplicated_reco_tracks, n_matched_reco_tracks_poi, particles) = \
run_one_evt(args.event_id, raw_trkx_reader, reco_trkx_reader, **vars(args))
with | pd.HDFStore(out_array, 'w') | pandas.HDFStore |
# Copyright 2021 AstroLab Software
# Author: <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyspark.sql.functions import pandas_udf, PandasUDFType
from pyspark.sql.types import DoubleType, StringType
import pandas as pd
import numpy as np
import os
from fink_science.conversion import mag2fluxcal_snana
from fink_science.utilities import load_scikit_model, load_pcs
from fink_science.kilonova.lib_kn import extract_all_filters_fink
from fink_science.kilonova.lib_kn import get_features_name
from fink_science import __file__
from fink_science.tester import spark_unit_tests
@pandas_udf(DoubleType(), PandasUDFType.SCALAR)
def knscore(jd, fid, magpsf, sigmapsf, model_path=None, pcs_path=None, npcs=None) -> pd.Series:
""" Return the probability of an alert to be a Kilonova using a Random
Forest Classifier.
Parameters
----------
jd: Spark DataFrame Column
JD times (float)
fid: Spark DataFrame Column
Filter IDs (int)
magpsf, sigmapsf: Spark DataFrame Columns
Magnitude from PSF-fit photometry, and 1-sigma error
model_path: Spark DataFrame Column, optional
Path to the trained model. Default is None, in which case the default
model `data/models/KN_model_2PC.pkl` is loaded.
pcs_path: Spark DataFrame Column, optional
Path to the Principal Component file. Default is None, in which case
the `data/models/components.csv` is loaded.
npcs: Spark DataFrame Column, optional
Integer representing the number of Principal Component to use. It
should be consistent to the training model used. Default is None (i.e.
default npcs for the default `model_path`, that is 1).
Returns
----------
probabilities: 1D np.array of float
Probability between 0 (non-KNe) and 1 (KNe).
Examples
----------
>>> from fink_science.utilities import concat_col
>>> from pyspark.sql import functions as F
>>> df = spark.read.load(ztf_alert_sample)
# Required alert columns
>>> what = ['jd', 'fid', 'magpsf', 'sigmapsf']
# Use for creating temp name
>>> prefix = 'c'
>>> what_prefix = [prefix + i for i in what]
# Append temp columns with historical + current measurements
>>> for colname in what:
... df = concat_col(df, colname, prefix=prefix)
# Perform the fit + classification (default model)
>>> args = [F.col(i) for i in what_prefix]
>>> df = df.withColumn('pKNe', knscore(*args))
# Note that we can also specify a model
>>> extra_args = [F.lit(model_path), F.lit(comp_path), F.lit(2)]
>>> args = [F.col(i) for i in what_prefix] + extra_args
>>> df = df.withColumn('pKNe', knscore(*args))
# Drop temp columns
>>> df = df.drop(*what_prefix)
>>> df.agg({"pKNe": "min"}).collect()[0][0]
0.0
>>> df.agg({"pKNe": "max"}).collect()[0][0] < 1.0
True
"""
epoch_lim = [-50, 50]
time_bin = 0.25
flux_lim = 0
# Flag empty alerts
mask = magpsf.apply(lambda x: np.sum(np.array(x) == np.array(x))) > 1
if len(jd[mask]) == 0:
return pd.Series(np.zeros(len(jd), dtype=float))
# add an exploded column with SNID
df_tmp = pd.DataFrame.from_dict(
{
'jd': jd[mask],
'SNID': range(len(jd[mask]))
}
)
df_tmp = df_tmp.explode('jd')
# compute flux and flux error
data = [mag2fluxcal_snana(*args) for args in zip(
magpsf[mask].explode(),
sigmapsf[mask].explode())]
flux, error = np.transpose(data)
# make a Pandas DataFrame with exploded series
pdf = pd.DataFrame.from_dict({
'SNID': df_tmp['SNID'],
'MJD': df_tmp['jd'],
'FLUXCAL': flux,
'FLUXCALERR': error,
'FLT': fid[mask].explode().replace({1: 'g', 2: 'r'})
})
# Load pre-trained model `clf`
if model_path is not None:
model = load_scikit_model(model_path.values[0])
else:
curdir = os.path.dirname(os.path.abspath(__file__))
model_path = curdir + '/data/models/KN_model_2PC.pkl'
model = load_scikit_model(model_path)
# Load pcs
if npcs is not None:
npcs = int(npcs.values[0])
else:
npcs = 2
if pcs_path is not None:
pcs_path_ = pcs_path.values[0]
else:
curdir = os.path.dirname(os.path.abspath(__file__))
pcs_path_ = curdir + '/data/models/components.csv'
pcs = load_pcs(pcs_path_, npcs=npcs)
test_features = []
filters = ['g', 'r']
# extract features (all filters) for each ID
for id in np.unique(pdf['SNID']):
pdf_sub = pdf[pdf['SNID'] == id]
pdf_sub = pdf_sub[pdf_sub['FLUXCAL'] == pdf_sub['FLUXCAL']]
features = extract_all_filters_fink(
epoch_lim=epoch_lim, pcs=pcs,
time_bin=time_bin, filters=filters,
lc=pdf_sub, flux_lim=flux_lim)
test_features.append(features)
# Remove pathological values
names_root = [
'npoints_',
'residuo_'
] + [
'coeff' + str(i + 1) + '_' for i in range(len(pcs.keys()))
] + ['maxflux_']
columns = [i + j for j in ['g', 'r'] for i in names_root]
matrix = pd.DataFrame(test_features, columns=columns)
zeros = np.logical_or(
matrix['coeff1_g'].values == 0,
matrix['coeff1_r'].values == 0
)
matrix_clean = matrix[~zeros]
# If all alerts are flagged as bad
if np.shape(matrix_clean) == (0, len(get_features_name(npcs))):
to_return = np.zeros(len(jd), dtype=float)
return pd.Series(to_return)
# Otherwise make predictions
probabilities = model.predict_proba(matrix_clean.values)
probabilities_notkne = np.zeros(len(test_features))
probabilities_kne = np.zeros(len(test_features))
probabilities_notkne[~zeros] = probabilities.T[0]
probabilities_kne[~zeros] = probabilities.T[1]
probabilities_ = np.array([probabilities_notkne, probabilities_kne]).T
# Take only probabilities to be Ia
to_return = np.zeros(len(jd), dtype=float)
to_return[mask] = probabilities_.T[1]
return | pd.Series(to_return) | pandas.Series |
# pylint: disable=W0102
import unittest
import nose
import numpy as np
from pandas import Index, MultiIndex, DataFrame, Series
from pandas.sparse.array import SparseArray
from pandas.core.internals import *
import pandas.core.internals as internals
import pandas.util.testing as tm
from pandas.util.testing import (
assert_almost_equal, assert_frame_equal, randn)
from pandas.compat import zip, u
def assert_block_equal(left, right):
assert_almost_equal(left.values, right.values)
assert(left.dtype == right.dtype)
assert(left.items.equals(right.items))
assert(left.ref_items.equals(right.ref_items))
def get_float_mat(n, k, dtype):
return np.repeat(np.atleast_2d(np.arange(k, dtype=dtype)), n, axis=0)
TEST_COLS = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 's1', 's2']
N = 10
def get_float_ex(cols=['a', 'c', 'e'], dtype = np.float_):
floats = get_float_mat(N, len(cols), dtype = dtype).T
return make_block(floats, cols, TEST_COLS)
def get_complex_ex(cols=['h']):
complexes = (get_float_mat(N, 1, dtype = np.float_).T * 1j).astype(np.complex128)
return make_block(complexes, cols, TEST_COLS)
def get_obj_ex(cols=['b', 'd']):
mat = np.empty((N, 2), dtype=object)
mat[:, 0] = 'foo'
mat[:, 1] = 'bar'
return make_block(mat.T, cols, TEST_COLS)
def get_bool_ex(cols=['f']):
mat = np.ones((N, 1), dtype=bool)
return make_block(mat.T, cols, TEST_COLS)
def get_int_ex(cols=['g'], dtype = np.int_):
mat = randn(N, 1).astype(dtype)
return make_block(mat.T, cols, TEST_COLS)
def get_dt_ex(cols=['h']):
mat = randn(N, 1).astype(int).astype('M8[ns]')
return make_block(mat.T, cols, TEST_COLS)
def get_sparse_ex1():
sa1 = SparseArray([0, 0, 1, 2, 3, 0, 4, 5, 0, 6], fill_value=0)
return make_block(sa1, ['s1'], TEST_COLS)
def get_sparse_ex2():
sa2 = SparseArray([0, 0, 2, 3, 4, 0, 6, 7, 0, 8], fill_value=0)
return make_block(sa2, ['s2'], TEST_COLS)
def create_blockmanager(blocks):
l = []
for b in blocks:
l.extend(b.items)
items = Index(l)
for b in blocks:
b.ref_items = items
index_sz = blocks[0].shape[1]
return BlockManager(blocks, [items, np.arange(index_sz)])
def create_singleblockmanager(blocks):
l = []
for b in blocks:
l.extend(b.items)
items = Index(l)
for b in blocks:
b.ref_items = items
return SingleBlockManager(blocks, [items])
class TestBlock(unittest.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.fblock = get_float_ex()
self.cblock = get_complex_ex()
self.oblock = get_obj_ex()
self.bool_block = get_bool_ex()
self.int_block = get_int_ex()
def test_constructor(self):
int32block = get_int_ex(['a'],dtype = np.int32)
self.assert_(int32block.dtype == np.int32)
def test_pickle(self):
import pickle
def _check(blk):
pickled = pickle.dumps(blk)
unpickled = pickle.loads(pickled)
assert_block_equal(blk, unpickled)
_check(self.fblock)
_check(self.cblock)
_check(self.oblock)
_check(self.bool_block)
def test_ref_locs(self):
assert_almost_equal(self.fblock.ref_locs, [0, 2, 4])
def test_attrs(self):
self.assert_(self.fblock.shape == self.fblock.values.shape)
self.assert_(self.fblock.dtype == self.fblock.values.dtype)
self.assert_(len(self.fblock) == len(self.fblock.values))
def test_merge(self):
avals = randn(2, 10)
bvals = randn(2, 10)
ref_cols = ['e', 'a', 'b', 'd', 'f']
ablock = make_block(avals, ['e', 'b'], ref_cols)
bblock = make_block(bvals, ['a', 'd'], ref_cols)
merged = ablock.merge(bblock)
exvals = np.vstack((avals, bvals))
excols = ['e', 'b', 'a', 'd']
eblock = make_block(exvals, excols, ref_cols)
eblock = eblock.reindex_items_from(ref_cols)
assert_block_equal(merged, eblock)
# TODO: merge with mixed type?
def test_copy(self):
cop = self.fblock.copy()
self.assert_(cop is not self.fblock)
assert_block_equal(self.fblock, cop)
def test_items(self):
cols = self.fblock.items
self.assert_(np.array_equal(cols, ['a', 'c', 'e']))
cols2 = self.fblock.items
self.assert_(cols is cols2)
def test_assign_ref_items(self):
new_cols = Index(['foo', 'bar', 'baz', 'quux', 'hi'])
self.fblock.set_ref_items(new_cols)
self.assert_(np.array_equal(self.fblock.items,
['foo', 'baz', 'hi']))
def test_reindex_index(self):
pass
def test_reindex_items_from(self):
new_cols = Index(['e', 'b', 'c', 'f'])
reindexed = self.fblock.reindex_items_from(new_cols)
assert_almost_equal(reindexed.ref_locs, [0, 2])
self.assertEquals(reindexed.values.shape[0], 2)
self.assert_((reindexed.values[0] == 2).all())
self.assert_((reindexed.values[1] == 1).all())
def test_reindex_cast(self):
pass
def test_insert(self):
pass
def test_delete(self):
newb = self.fblock.delete('a')
assert_almost_equal(newb.ref_locs, [2, 4])
self.assert_((newb.values[0] == 1).all())
newb = self.fblock.delete('c')
assert_almost_equal(newb.ref_locs, [0, 4])
self.assert_((newb.values[1] == 2).all())
newb = self.fblock.delete('e')
assert_almost_equal(newb.ref_locs, [0, 2])
self.assert_((newb.values[1] == 1).all())
self.assertRaises(Exception, self.fblock.delete, 'b')
def test_split_block_at(self):
# with dup column support this method was taken out
# GH3679
raise nose.SkipTest
bs = list(self.fblock.split_block_at('a'))
self.assertEqual(len(bs), 1)
self.assertTrue(np.array_equal(bs[0].items, ['c', 'e']))
bs = list(self.fblock.split_block_at('c'))
self.assertEqual(len(bs), 2)
self.assertTrue(np.array_equal(bs[0].items, ['a']))
self.assertTrue(np.array_equal(bs[1].items, ['e']))
bs = list(self.fblock.split_block_at('e'))
self.assertEqual(len(bs), 1)
self.assertTrue(np.array_equal(bs[0].items, ['a', 'c']))
bblock = get_bool_ex(['f'])
bs = list(bblock.split_block_at('f'))
self.assertEqual(len(bs), 0)
def test_unicode_repr(self):
mat = np.empty((N, 2), dtype=object)
mat[:, 0] = 'foo'
mat[:, 1] = 'bar'
cols = ['b', u("\u05d0")]
str_repr = repr(make_block(mat.T, cols, TEST_COLS))
def test_get(self):
pass
def test_set(self):
pass
def test_fillna(self):
pass
def test_repr(self):
pass
class TestBlockManager(unittest.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.blocks = [get_float_ex(),
get_obj_ex(),
get_bool_ex(),
get_int_ex(),
get_complex_ex()]
all_items = [b.items for b in self.blocks]
items = sorted(all_items[0].append(all_items[1:]))
items = Index(items)
for b in self.blocks:
b.ref_items = items
self.mgr = BlockManager(self.blocks, [items, np.arange(N)])
def test_constructor_corner(self):
pass
def test_attrs(self):
self.assertEquals(self.mgr.nblocks, len(self.mgr.blocks))
self.assertEquals(len(self.mgr), len(self.mgr.items))
def test_is_mixed_dtype(self):
self.assert_(self.mgr.is_mixed_type)
mgr = create_blockmanager([get_bool_ex(['a']), get_bool_ex(['b'])])
self.assert_(not mgr.is_mixed_type)
def test_is_indexed_like(self):
self.assert_(self.mgr._is_indexed_like(self.mgr))
mgr2 = self.mgr.reindex_axis(np.arange(N - 1), axis=1)
self.assert_(not self.mgr._is_indexed_like(mgr2))
def test_block_id_vector_item_dtypes(self):
expected = [0, 1, 0, 1, 0, 2, 3, 4]
result = self.mgr.block_id_vector
assert_almost_equal(expected, result)
result = self.mgr.item_dtypes
# as the platform may not exactly match this, pseudo match
expected = ['float64', 'object', 'float64', 'object', 'float64',
'bool', 'int64', 'complex128']
for e, r in zip(expected, result):
np.dtype(e).kind == np.dtype(r).kind
def test_duplicate_item_failure(self):
items = Index(['a', 'a'])
blocks = [get_bool_ex(['a']), get_float_ex(['a'])]
for b in blocks:
b.ref_items = items
# test trying to create _ref_locs with/o ref_locs set on the blocks
self.assertRaises(AssertionError, BlockManager, blocks, [items, np.arange(N)])
blocks[0].set_ref_locs([0])
blocks[1].set_ref_locs([1])
mgr = BlockManager(blocks, [items, np.arange(N)])
mgr.iget(1)
# invalidate the _ref_locs
for b in blocks:
b._ref_locs = None
mgr._ref_locs = None
mgr._items_map = None
self.assertRaises(AssertionError, mgr._set_ref_locs, do_refs=True)
def test_contains(self):
self.assert_('a' in self.mgr)
self.assert_('baz' not in self.mgr)
def test_pickle(self):
import pickle
pickled = pickle.dumps(self.mgr)
mgr2 = pickle.loads(pickled)
# same result
assert_frame_equal(DataFrame(self.mgr), DataFrame(mgr2))
# share ref_items
self.assert_(mgr2.blocks[0].ref_items is mgr2.blocks[1].ref_items)
# GH2431
self.assertTrue(hasattr(mgr2, "_is_consolidated"))
self.assertTrue(hasattr(mgr2, "_known_consolidated"))
# reset to False on load
self.assertFalse(mgr2._is_consolidated)
self.assertFalse(mgr2._known_consolidated)
def test_get(self):
pass
def test_get_scalar(self):
for item in self.mgr.items:
for i, index in enumerate(self.mgr.axes[1]):
res = self.mgr.get_scalar((item, index))
exp = self.mgr.get(item)[i]
assert_almost_equal(res, exp)
def test_set(self):
pass
def test_set_change_dtype(self):
self.mgr.set('baz', np.zeros(N, dtype=bool))
self.mgr.set('baz', np.repeat('foo', N))
self.assert_(self.mgr.get('baz').dtype == np.object_)
mgr2 = self.mgr.consolidate()
mgr2.set('baz', np.repeat('foo', N))
self.assert_(mgr2.get('baz').dtype == np.object_)
mgr2.set('quux', randn(N).astype(int))
self.assert_(mgr2.get('quux').dtype == np.int_)
mgr2.set('quux', randn(N))
self.assert_(mgr2.get('quux').dtype == np.float_)
def test_copy(self):
shallow = self.mgr.copy(deep=False)
# we don't guaranteee block ordering
for blk in self.mgr.blocks:
found = False
for cp_blk in shallow.blocks:
if cp_blk.values is blk.values:
found = True
break
self.assert_(found == True)
def test_sparse(self):
mgr = create_blockmanager([get_sparse_ex1(),get_sparse_ex2()])
# what to test here?
self.assert_(mgr.as_matrix().dtype == np.float64)
def test_sparse_mixed(self):
mgr = create_blockmanager([get_sparse_ex1(),get_sparse_ex2(),get_float_ex()])
self.assert_(len(mgr.blocks) == 3)
self.assert_(isinstance(mgr,BlockManager))
# what to test here?
def test_as_matrix_float(self):
mgr = create_blockmanager([get_float_ex(['c'],np.float32), get_float_ex(['d'],np.float16), get_float_ex(['e'],np.float64)])
self.assert_(mgr.as_matrix().dtype == np.float64)
mgr = create_blockmanager([get_float_ex(['c'],np.float32), get_float_ex(['d'],np.float16)])
self.assert_(mgr.as_matrix().dtype == np.float32)
def test_as_matrix_int_bool(self):
mgr = create_blockmanager([get_bool_ex(['a']), get_bool_ex(['b'])])
self.assert_(mgr.as_matrix().dtype == np.bool_)
mgr = create_blockmanager([get_int_ex(['a'],np.int64), get_int_ex(['b'],np.int64), get_int_ex(['c'],np.int32), get_int_ex(['d'],np.int16), get_int_ex(['e'],np.uint8) ])
self.assert_(mgr.as_matrix().dtype == np.int64)
mgr = create_blockmanager([get_int_ex(['c'],np.int32), get_int_ex(['d'],np.int16), get_int_ex(['e'],np.uint8) ])
self.assert_(mgr.as_matrix().dtype == np.int32)
def test_as_matrix_datetime(self):
mgr = create_blockmanager([get_dt_ex(['h']), get_dt_ex(['g'])])
self.assert_(mgr.as_matrix().dtype == 'M8[ns]')
def test_astype(self):
# coerce all
mgr = create_blockmanager([get_float_ex(['c'],np.float32), get_float_ex(['d'],np.float16), get_float_ex(['e'],np.float64)])
for t in ['float16','float32','float64','int32','int64']:
tmgr = mgr.astype(t)
self.assert_(tmgr.as_matrix().dtype == np.dtype(t))
# mixed
mgr = create_blockmanager([get_obj_ex(['a','b']),get_bool_ex(['c']),get_dt_ex(['d']),get_float_ex(['e'],np.float32), get_float_ex(['f'],np.float16), get_float_ex(['g'],np.float64)])
for t in ['float16','float32','float64','int32','int64']:
tmgr = mgr.astype(t, raise_on_error = False).get_numeric_data()
self.assert_(tmgr.as_matrix().dtype == np.dtype(t))
def test_convert(self):
def _compare(old_mgr, new_mgr):
""" compare the blocks, numeric compare ==, object don't """
old_blocks = set(old_mgr.blocks)
new_blocks = set(new_mgr.blocks)
self.assert_(len(old_blocks) == len(new_blocks))
# compare non-numeric
for b in old_blocks:
found = False
for nb in new_blocks:
if (b.values == nb.values).all():
found = True
break
self.assert_(found == True)
for b in new_blocks:
found = False
for ob in old_blocks:
if (b.values == ob.values).all():
found = True
break
self.assert_(found == True)
# noops
mgr = create_blockmanager([get_int_ex(['f']), get_float_ex(['g'])])
new_mgr = mgr.convert()
_compare(mgr,new_mgr)
mgr = create_blockmanager([get_obj_ex(['a','b']), get_int_ex(['f']), get_float_ex(['g'])])
new_mgr = mgr.convert()
_compare(mgr,new_mgr)
# there could atcually be multiple dtypes resulting
def _check(new_mgr,block_type, citems):
items = set()
for b in new_mgr.blocks:
if isinstance(b,block_type):
for i in list(b.items):
items.add(i)
self.assert_(items == set(citems))
# convert
mat = np.empty((N, 3), dtype=object)
mat[:, 0] = '1'
mat[:, 1] = '2.'
mat[:, 2] = 'foo'
b = make_block(mat.T, ['a','b','foo'], TEST_COLS)
mgr = create_blockmanager([b, get_int_ex(['f']), get_float_ex(['g'])])
new_mgr = mgr.convert(convert_numeric = True)
_check(new_mgr,FloatBlock,['b','g'])
_check(new_mgr,IntBlock,['a','f'])
mgr = create_blockmanager([b, get_int_ex(['f'],np.int32), get_bool_ex(['bool']), get_dt_ex(['dt']),
get_int_ex(['i'],np.int64), get_float_ex(['g'],np.float64), get_float_ex(['h'],np.float16)])
new_mgr = mgr.convert(convert_numeric = True)
_check(new_mgr,FloatBlock,['b','g','h'])
_check(new_mgr,IntBlock,['a','f','i'])
_check(new_mgr,ObjectBlock,['foo'])
_check(new_mgr,BoolBlock,['bool'])
_check(new_mgr,DatetimeBlock,['dt'])
def test_xs(self):
pass
def test_interleave(self):
pass
def test_interleave_non_unique_cols(self):
df = DataFrame([
[Timestamp('20130101'), 3.5],
[Timestamp('20130102'), 4.5]],
columns=['x', 'x'],
index=[1, 2])
df_unique = df.copy()
df_unique.columns = ['x', 'y']
np.testing.assert_array_equal(df_unique.values, df.values)
def test_consolidate(self):
pass
def test_consolidate_ordering_issues(self):
self.mgr.set('f', randn(N))
self.mgr.set('d', randn(N))
self.mgr.set('b', randn(N))
self.mgr.set('g', randn(N))
self.mgr.set('h', randn(N))
cons = self.mgr.consolidate()
self.assertEquals(cons.nblocks, 1)
self.assert_(cons.blocks[0].items.equals(cons.items))
def test_reindex_index(self):
pass
def test_reindex_items(self):
def _check_cols(before, after, cols):
for col in cols:
assert_almost_equal(after.get(col), before.get(col))
# not consolidated
vals = randn(N)
self.mgr.set('g', vals)
reindexed = self.mgr.reindex_items(['g', 'c', 'a', 'd'])
self.assertEquals(reindexed.nblocks, 2)
assert_almost_equal(reindexed.get('g'), vals.squeeze())
_check_cols(self.mgr, reindexed, ['c', 'a', 'd'])
def test_xs(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
self.mgr.set_axis(1, index)
result = self.mgr.xs('bar', axis=1)
expected = self.mgr.get_slice(slice(3, 5), axis=1)
assert_frame_equal(DataFrame(result), DataFrame(expected))
def test_get_numeric_data(self):
int_ser = Series(np.array([0, 1, 2]))
float_ser = Series(np.array([0., 1., 2.]))
complex_ser = Series(np.array([0j, 1j, 2j]))
str_ser = Series(np.array(['a', 'b', 'c']))
bool_ser = Series(np.array([True, False, True]))
obj_ser = Series(np.array([1, 'a', 5]))
dt_ser = Series(tm.makeDateIndex(3))
# check types
df = DataFrame({'int': int_ser, 'float': float_ser,
'complex': complex_ser, 'str': str_ser,
'bool': bool_ser, 'obj': obj_ser,
'dt': dt_ser})
xp = DataFrame({'int': int_ser, 'float': float_ser,
'complex': complex_ser, 'bool': bool_ser})
rs = DataFrame(df._data.get_numeric_data())
assert_frame_equal(xp, rs)
xp = DataFrame({'bool': bool_ser})
rs = DataFrame(df._data.get_bool_data())
assert_frame_equal(xp, rs)
rs = DataFrame(df._data.get_bool_data())
df.ix[0, 'bool'] = not df.ix[0, 'bool']
self.assertEqual(rs.ix[0, 'bool'], df.ix[0, 'bool'])
rs = DataFrame(df._data.get_bool_data(copy=True))
df.ix[0, 'bool'] = not df.ix[0, 'bool']
self.assertEqual(rs.ix[0, 'bool'], not df.ix[0, 'bool'])
def test_missing_unicode_key(self):
df = | DataFrame({"a": [1]}) | pandas.DataFrame |
from butterfree.data import loader
from collections import defaultdict
import networkx as nx
import pygraphviz as pgv
import matplotlib.pyplot as plt
import pandas as pd
import os
import re
import numpy as np
import torch
from scipy import linalg
from networkx.drawing.nx_agraph import write_dot, graphviz_layout
cols = loader.all_columns_metaphlan()
def coo(A):
rows = []
columns = []
values = []
for i in range(A.shape[0]):
for j in range(A.shape[1]):
if A[i,j]:
rows.append(i)
columns.append(j)
values.append(A[i,j])
return rows, columns, values
def coo_tensor(A):
rows, columns, values = coo(A)
return torch.tensor([rows, columns], dtype=torch.long)
def get_eigenvectors(taxonomies):
"""
Returns eigenvectors and eigenvalues of Graph Laplacian.
"""
G = phylogenetic_tree(taxonomies)
G2 = G.to_undirected()
A = nx.convert_matrix.to_numpy_matrix(G2)
# Align nodes with taxonomic names
nodes = [x for x in G2.nodes()]
indices = reorder_columns(nodes, taxonomies)
A = A[indices, :][:, indices] # Shuffle rows and columns of adgacency matrix
# Compute laplacian
L = nx.laplacian_matrix(G2)
L = L[indices, :][:, indices] # Shuffle rows and columns of laplacian matrix
L = L.toarray()
# Compute eigenvectors and eigenvalues
W, V = linalg.eigh(L)
return A, L, W, V
def reorder_columns(nodes, taxonomies):
"""
Returns a list of indices to use to shuffle the elements of nodes such that shuffled(nodes) == taxonomies
"""
taxa = [t.split('|')[-1] for t in cols]
if set(nodes) != set(taxa):
raise ValueError("Elements of both lists must be the same.")
indices = [taxa.index(x) for x in nodes]
return indices
def phylogenetic_tree(taxonomies):
relationships = defaultdict(lambda : [])
unaccounted_for = []
for taxa in taxonomies:
components = taxa.split('|')
child = components[-1]
if len(components) > 1:
parent = components[-2]
relationships[parent].append(child)
else:
unaccounted_for.append(child)
G = nx.DiGraph()
for child in unaccounted_for:
G.add_node(child)
for parent, children in relationships.items():
for child in children:
G.add_edge(parent, child)
return G
def map_values_to_graph(graph, values_dict):
values = [values_dict.get(node, 0) for node in graph.nodes()]
return values
def plot_filtered(df, threshold, multiply=400, ax=None):
""" Extracts rows where abs > threshold and plots tree along with in between vertices """
filtered = df.loc[abs(df[1])>threshold]
cleaned_index = set()
for x in filtered.index:
components= x.split('|')
for i in range(len(components)):
cleaned_index.add('|'.join(components[:i+1]))
cleaned = df.loc[cleaned_index]
cleaned = cleaned*multiply
plot_tree(cleaned, ax=ax)
def plot_tree(df, ax=None):
graph = phylogenetic_tree(df.index)
values_dict = {k:v[0] for k,v in zip(df.index, df.values)}
mapped_values = map_values_to_graph(graph, values_dict)
pos = graphviz_layout(graph, prog='twopi')
nx.draw(graph, pos, cmap=plt.get_cmap('viridis'), node_color=mapped_values, with_labels=True, font_color='black', arrows=True, ax=ax)
def plot_intersection(df_dict, threshold, ax=None):
"""
Given a dict of {label:df} containing attribution values, finds the index containing rows that are above threshold for every
label and plots that tree.
"""
thresholded = {k: df.loc[abs(df[1])>threshold] for k,df in df_dict.items()}
intersection = None
for k, v in thresholded.items():
if intersection is None:
intersection = v.index
else:
intersection = intersection.intersection(v.index)
df = | pd.DataFrame(index=intersection) | pandas.DataFrame |
from datetime import timedelta
from functools import partial
import itertools
from parameterized import parameterized
import numpy as np
from numpy.testing import assert_array_equal, assert_almost_equal
import pandas as pd
from toolz import merge
from zipline.pipeline import SimplePipelineEngine, Pipeline, CustomFactor
from zipline.pipeline.common import (
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
SID_FIELD_NAME,
TS_FIELD_NAME,
)
from zipline.pipeline.data import DataSet
from zipline.pipeline.data import Column
from zipline.pipeline.domain import EquitySessionDomain
from zipline.pipeline.loaders.earnings_estimates import (
NextEarningsEstimatesLoader,
NextSplitAdjustedEarningsEstimatesLoader,
normalize_quarters,
PreviousEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader,
split_normalized_quarters,
)
from zipline.testing.fixtures import (
WithAdjustmentReader,
WithTradingSessions,
ZiplineTestCase,
)
from zipline.testing.predicates import assert_equal
from zipline.testing.predicates import assert_frame_equal
from zipline.utils.numpy_utils import datetime64ns_dtype
from zipline.utils.numpy_utils import float64_dtype
import pytest
class Estimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate = Column(dtype=float64_dtype)
class MultipleColumnsEstimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate1 = Column(dtype=float64_dtype)
estimate2 = Column(dtype=float64_dtype)
def QuartersEstimates(announcements_out):
class QtrEstimates(Estimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def MultipleColumnsQuartersEstimates(announcements_out):
class QtrEstimates(MultipleColumnsEstimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def QuartersEstimatesNoNumQuartersAttr(num_qtr):
class QtrEstimates(Estimates):
name = Estimates
return QtrEstimates
def create_expected_df_for_factor_compute(start_date, sids, tuples, end_date):
"""
Given a list of tuples of new data we get for each sid on each critical
date (when information changes), create a DataFrame that fills that
data through a date range ending at `end_date`.
"""
df = pd.DataFrame(tuples, columns=[SID_FIELD_NAME, "estimate", "knowledge_date"])
df = df.pivot_table(
columns=SID_FIELD_NAME, values="estimate", index="knowledge_date", dropna=False
)
df = df.reindex(pd.date_range(start_date, end_date))
# Index name is lost during reindex.
df.index = df.index.rename("knowledge_date")
df["at_date"] = end_date.tz_localize("utc")
df = df.set_index(["at_date", df.index.tz_localize("utc")]).ffill()
new_sids = set(sids) - set(df.columns)
df = df.reindex(columns=df.columns.union(new_sids))
return df
class WithEstimates(WithTradingSessions, WithAdjustmentReader):
"""
ZiplineTestCase mixin providing cls.loader and cls.events as class
level fixtures.
Methods
-------
make_loader(events, columns) -> PipelineLoader
Method which returns the loader to be used throughout tests.
events : pd.DataFrame
The raw events to be used as input to the pipeline loader.
columns : dict[str -> str]
The dictionary mapping the names of BoundColumns to the
associated column name in the events DataFrame.
make_columns() -> dict[BoundColumn -> str]
Method which returns a dictionary of BoundColumns mapped to the
associated column names in the raw data.
"""
# Short window defined in order for test to run faster.
START_DATE = pd.Timestamp("2014-12-28", tz="utc")
END_DATE = pd.Timestamp("2015-02-04", tz="utc")
@classmethod
def make_loader(cls, events, columns):
raise NotImplementedError("make_loader")
@classmethod
def make_events(cls):
raise NotImplementedError("make_events")
@classmethod
def get_sids(cls):
return cls.events[SID_FIELD_NAME].unique()
@classmethod
def make_columns(cls):
return {
Estimates.event_date: "event_date",
Estimates.fiscal_quarter: "fiscal_quarter",
Estimates.fiscal_year: "fiscal_year",
Estimates.estimate: "estimate",
}
def make_engine(self, loader=None):
if loader is None:
loader = self.loader
return SimplePipelineEngine(
lambda x: loader,
self.asset_finder,
default_domain=EquitySessionDomain(
self.trading_days,
self.ASSET_FINDER_COUNTRY_CODE,
),
)
@classmethod
def init_class_fixtures(cls):
cls.events = cls.make_events()
cls.ASSET_FINDER_EQUITY_SIDS = cls.get_sids()
cls.ASSET_FINDER_EQUITY_SYMBOLS = [
"s" + str(n) for n in cls.ASSET_FINDER_EQUITY_SIDS
]
# We need to instantiate certain constants needed by supers of
# `WithEstimates` before we call their `init_class_fixtures`.
super(WithEstimates, cls).init_class_fixtures()
cls.columns = cls.make_columns()
# Some tests require `WithAdjustmentReader` to be set up by the time we
# make the loader.
cls.loader = cls.make_loader(
cls.events, {column.name: val for column, val in cls.columns.items()}
)
class WithOneDayPipeline(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_columns(cls):
return {
MultipleColumnsEstimates.event_date: "event_date",
MultipleColumnsEstimates.fiscal_quarter: "fiscal_quarter",
MultipleColumnsEstimates.fiscal_year: "fiscal_year",
MultipleColumnsEstimates.estimate1: "estimate1",
MultipleColumnsEstimates.estimate2: "estimate2",
}
@classmethod
def make_events(cls):
return pd.DataFrame(
{
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp("2015-01-01"), pd.Timestamp("2015-01-06")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-10"),
pd.Timestamp("2015-01-20"),
],
"estimate1": [1.0, 2.0],
"estimate2": [3.0, 4.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015],
}
)
@classmethod
def make_expected_out(cls):
raise NotImplementedError("make_expected_out")
@classmethod
def init_class_fixtures(cls):
super(WithOneDayPipeline, cls).init_class_fixtures()
cls.sid0 = cls.asset_finder.retrieve_asset(0)
cls.expected_out = cls.make_expected_out()
def test_load_one_day(self):
# We want to test multiple columns
dataset = MultipleColumnsQuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=pd.Timestamp("2015-01-15", tz="utc"),
end_date=pd.Timestamp("2015-01-15", tz="utc"),
)
assert_frame_equal(results.sort_index(1), self.expected_out.sort_index(1))
class PreviousWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp("2015-01-10"),
"estimate1": 1.0,
"estimate2": 3.0,
FISCAL_QUARTER_FIELD_NAME: 1.0,
FISCAL_YEAR_FIELD_NAME: 2015.0,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp("2015-01-15", tz="utc"), cls.sid0),)
),
)
class NextWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp("2015-01-20"),
"estimate1": 2.0,
"estimate2": 4.0,
FISCAL_QUARTER_FIELD_NAME: 2.0,
FISCAL_YEAR_FIELD_NAME: 2015.0,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp("2015-01-15", tz="utc"), cls.sid0),)
),
)
dummy_df = pd.DataFrame(
{SID_FIELD_NAME: 0},
columns=[
SID_FIELD_NAME,
TS_FIELD_NAME,
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
"estimate",
],
index=[0],
)
class WithWrongLoaderDefinition(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_events(cls):
return dummy_df
def test_wrong_num_announcements_passed(self):
bad_dataset1 = QuartersEstimates(-1)
bad_dataset2 = QuartersEstimates(-2)
good_dataset = QuartersEstimates(1)
engine = self.make_engine()
columns = {
c.name + str(dataset.num_announcements): c.latest
for dataset in (bad_dataset1, bad_dataset2, good_dataset)
for c in dataset.columns
}
p = Pipeline(columns)
err_msg = (
r"Passed invalid number of quarters -[0-9],-[0-9]; "
r"must pass a number of quarters >= 0"
)
with pytest.raises(ValueError, match=err_msg):
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
def test_no_num_announcements_attr(self):
dataset = QuartersEstimatesNoNumQuartersAttr(1)
engine = self.make_engine()
p = Pipeline({c.name: c.latest for c in dataset.columns})
with pytest.raises(AttributeError):
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
class PreviousWithWrongNumQuarters(WithWrongLoaderDefinition, ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
class NextWithWrongNumQuarters(WithWrongLoaderDefinition, ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
options = [
"split_adjustments_loader",
"split_adjusted_column_names",
"split_adjusted_asof",
]
class WrongSplitsLoaderDefinition(WithEstimates, ZiplineTestCase):
"""
Test class that tests that loaders break correctly when incorrectly
instantiated.
Tests
-----
test_extra_splits_columns_passed(SplitAdjustedEstimatesLoader)
A test that checks that the loader correctly breaks when an
unexpected column is passed in the list of split-adjusted columns.
"""
@classmethod
def init_class_fixtures(cls):
super(WithEstimates, cls).init_class_fixtures()
@parameterized.expand(
itertools.product(
(
NextSplitAdjustedEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader,
),
)
)
def test_extra_splits_columns_passed(self, loader):
columns = {
Estimates.event_date: "event_date",
Estimates.fiscal_quarter: "fiscal_quarter",
Estimates.fiscal_year: "fiscal_year",
Estimates.estimate: "estimate",
}
with pytest.raises(ValueError):
loader(
dummy_df,
{column.name: val for column, val in columns.items()},
split_adjustments_loader=self.adjustment_reader,
split_adjusted_column_names=["estimate", "extra_col"],
split_adjusted_asof=pd.Timestamp("2015-01-01"),
)
class WithEstimatesTimeZero(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
cls.events : pd.DataFrame
Generated dynamically in order to test inter-leavings of estimates and
event dates for multiple quarters to make sure that we select the
right immediate 'next' or 'previous' quarter relative to each date -
i.e., the right 'time zero' on the timeline. We care about selecting
the right 'time zero' because we use that to calculate which quarter's
data needs to be returned for each day.
Methods
-------
get_expected_estimate(q1_knowledge,
q2_knowledge,
comparable_date) -> pd.DataFrame
Retrieves the expected estimate given the latest knowledge about each
quarter and the date on which the estimate is being requested. If
there is no expected estimate, returns an empty DataFrame.
Tests
------
test_estimates()
Tests that we get the right 'time zero' value on each day for each
sid and for each column.
"""
# Shorter date range for performance
END_DATE = pd.Timestamp("2015-01-28", tz="utc")
q1_knowledge_dates = [
pd.Timestamp("2015-01-01"),
pd.Timestamp("2015-01-04"),
pd.Timestamp("2015-01-07"),
pd.Timestamp("2015-01-11"),
]
q2_knowledge_dates = [
pd.Timestamp("2015-01-14"),
pd.Timestamp("2015-01-17"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-23"),
]
# We want to model the possibility of an estimate predicting a release date
# that doesn't match the actual release. This could be done by dynamically
# generating more combinations with different release dates, but that
# significantly increases the amount of time it takes to run the tests.
# These hard-coded cases are sufficient to know that we can update our
# beliefs when we get new information.
q1_release_dates = [
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-14"),
] # One day late
q2_release_dates = [
pd.Timestamp("2015-01-25"), # One day early
pd.Timestamp("2015-01-26"),
]
@classmethod
def make_events(cls):
"""
In order to determine which estimate we care about for a particular
sid, we need to look at all estimates that we have for that sid and
their associated event dates.
We define q1 < q2, and thus event1 < event2 since event1 occurs
during q1 and event2 occurs during q2 and we assume that there can
only be 1 event per quarter. We assume that there can be multiple
estimates per quarter leading up to the event. We assume that estimates
will not surpass the relevant event date. We will look at 2 estimates
for an event before the event occurs, since that is the simplest
scenario that covers the interesting edge cases:
- estimate values changing
- a release date changing
- estimates for different quarters interleaving
Thus, we generate all possible inter-leavings of 2 estimates per
quarter-event where estimate1 < estimate2 and all estimates are < the
relevant event and assign each of these inter-leavings to a
different sid.
"""
sid_estimates = []
sid_releases = []
# We want all permutations of 2 knowledge dates per quarter.
it = enumerate(
itertools.permutations(cls.q1_knowledge_dates + cls.q2_knowledge_dates, 4)
)
for sid, (q1e1, q1e2, q2e1, q2e2) in it:
# We're assuming that estimates must come before the relevant
# release.
if (
q1e1 < q1e2
and q2e1 < q2e2
# All estimates are < Q2's event, so just constrain Q1
# estimates.
and q1e1 < cls.q1_release_dates[0]
and q1e2 < cls.q1_release_dates[0]
):
sid_estimates.append(
cls.create_estimates_df(q1e1, q1e2, q2e1, q2e2, sid)
)
sid_releases.append(cls.create_releases_df(sid))
return pd.concat(sid_estimates + sid_releases).reset_index(drop=True)
@classmethod
def get_sids(cls):
sids = cls.events[SID_FIELD_NAME].unique()
# Tack on an extra sid to make sure that sids with no data are
# included but have all-null columns.
return list(sids) + [max(sids) + 1]
@classmethod
def create_releases_df(cls, sid):
# Final release dates never change. The quarters have very tight date
# ranges in order to reduce the number of dates we need to iterate
# through when testing.
return pd.DataFrame(
{
TS_FIELD_NAME: [pd.Timestamp("2015-01-13"), pd.Timestamp("2015-01-26")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-26"),
],
"estimate": [0.5, 0.8],
FISCAL_QUARTER_FIELD_NAME: [1.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0],
SID_FIELD_NAME: sid,
}
)
@classmethod
def create_estimates_df(cls, q1e1, q1e2, q2e1, q2e2, sid):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: cls.q1_release_dates + cls.q2_release_dates,
"estimate": [0.1, 0.2, 0.3, 0.4],
FISCAL_QUARTER_FIELD_NAME: [1.0, 1.0, 2.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0, 2015.0, 2015.0],
TS_FIELD_NAME: [q1e1, q1e2, q2e1, q2e2],
SID_FIELD_NAME: sid,
}
)
def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date):
return pd.DataFrame()
def test_estimates(self):
dataset = QuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=self.trading_days[1],
end_date=self.trading_days[-2],
)
for sid in self.ASSET_FINDER_EQUITY_SIDS:
sid_estimates = results.xs(sid, level=1)
# Separate assertion for all-null DataFrame to avoid setting
# column dtypes on `all_expected`.
if sid == max(self.ASSET_FINDER_EQUITY_SIDS):
assert sid_estimates.isnull().all().all()
else:
ts_sorted_estimates = self.events[
self.events[SID_FIELD_NAME] == sid
].sort_values(TS_FIELD_NAME)
q1_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 1
]
q2_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 2
]
all_expected = pd.concat(
[
self.get_expected_estimate(
q1_knowledge[
q1_knowledge[TS_FIELD_NAME] <= date.tz_localize(None)
],
q2_knowledge[
q2_knowledge[TS_FIELD_NAME] <= date.tz_localize(None)
],
date.tz_localize(None),
).set_index([[date]])
for date in sid_estimates.index
],
axis=0,
)
sid_estimates.index = all_expected.index.copy()
assert_equal(all_expected[sid_estimates.columns], sid_estimates)
class NextEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date):
# If our latest knowledge of q1 is that the release is
# happening on this simulation date or later, then that's
# the estimate we want to use.
if (
not q1_knowledge.empty
and q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >= comparable_date
):
return q1_knowledge.iloc[-1:]
# If q1 has already happened or we don't know about it
# yet and our latest knowledge indicates that q2 hasn't
# happened yet, then that's the estimate we want to use.
elif (
not q2_knowledge.empty
and q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >= comparable_date
):
return q2_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns, index=[comparable_date])
class PreviousEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date):
# The expected estimate will be for q2 if the last thing
# we've seen is that the release date already happened.
# Otherwise, it'll be for q1, as long as the release date
# for q1 has already happened.
if (
not q2_knowledge.empty
and q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <= comparable_date
):
return q2_knowledge.iloc[-1:]
elif (
not q1_knowledge.empty
and q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <= comparable_date
):
return q1_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns, index=[comparable_date])
class WithEstimateMultipleQuarters(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events, cls.make_expected_out as
class-level fixtures and self.test_multiple_qtrs_requested as a test.
Attributes
----------
events : pd.DataFrame
Simple DataFrame with estimates for 2 quarters for a single sid.
Methods
-------
make_expected_out() --> pd.DataFrame
Returns the DataFrame that is expected as a result of running a
Pipeline where estimates are requested for multiple quarters out.
fill_expected_out(expected)
Fills the expected DataFrame with data.
Tests
------
test_multiple_qtrs_requested()
Runs a Pipeline that calculate which estimates for multiple quarters
out and checks that the returned columns contain data for the correct
number of quarters out.
"""
@classmethod
def make_events(cls):
return pd.DataFrame(
{
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp("2015-01-01"), pd.Timestamp("2015-01-06")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-10"),
pd.Timestamp("2015-01-20"),
],
"estimate": [1.0, 2.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015],
}
)
@classmethod
def init_class_fixtures(cls):
super(WithEstimateMultipleQuarters, cls).init_class_fixtures()
cls.expected_out = cls.make_expected_out()
@classmethod
def make_expected_out(cls):
expected = pd.DataFrame(
columns=[cls.columns[col] + "1" for col in cls.columns]
+ [cls.columns[col] + "2" for col in cls.columns],
index=cls.trading_days,
)
for (col, raw_name), suffix in itertools.product(
cls.columns.items(), ("1", "2")
):
expected_name = raw_name + suffix
if col.dtype == datetime64ns_dtype:
expected[expected_name] = pd.to_datetime(expected[expected_name])
else:
expected[expected_name] = expected[expected_name].astype(col.dtype)
cls.fill_expected_out(expected)
return expected.reindex(cls.trading_days)
def test_multiple_qtrs_requested(self):
dataset1 = QuartersEstimates(1)
dataset2 = QuartersEstimates(2)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline(
merge(
[
{c.name + "1": c.latest for c in dataset1.columns},
{c.name + "2": c.latest for c in dataset2.columns},
]
)
),
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
q1_columns = [col.name + "1" for col in self.columns]
q2_columns = [col.name + "2" for col in self.columns]
# We now expect a column for 1 quarter out and a column for 2
# quarters out for each of the dataset columns.
assert_equal(
sorted(np.array(q1_columns + q2_columns)), sorted(results.columns.values)
)
assert_equal(
self.expected_out.sort_index(axis=1),
results.xs(0, level=1).sort_index(axis=1),
)
class NextEstimateMultipleQuarters(WithEstimateMultipleQuarters, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected.loc[
pd.Timestamp("2015-01-01", tz="UTC") : pd.Timestamp(
"2015-01-11", tz="UTC"
),
raw_name + "1",
] = cls.events[raw_name].iloc[0]
expected.loc[
pd.Timestamp("2015-01-11", tz="UTC") : pd.Timestamp(
"2015-01-20", tz="UTC"
),
raw_name + "1",
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
# We only have an estimate and event date for 2 quarters out before
# Q1's event happens; after Q1's event, we know 1 Q out but not 2 Qs
# out.
for col_name in ["estimate", "event_date"]:
expected.loc[
pd.Timestamp("2015-01-06", tz="UTC") : pd.Timestamp(
"2015-01-10", tz="UTC"
),
col_name + "2",
] = cls.events[col_name].iloc[1]
# But we know what FQ and FY we'd need in both Q1 and Q2
# because we know which FQ is next and can calculate from there
expected.loc[
pd.Timestamp("2015-01-01", tz="UTC") : pd.Timestamp("2015-01-09", tz="UTC"),
FISCAL_QUARTER_FIELD_NAME + "2",
] = 2
expected.loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC"),
FISCAL_QUARTER_FIELD_NAME + "2",
] = 3
expected.loc[
pd.Timestamp("2015-01-01", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC"),
FISCAL_YEAR_FIELD_NAME + "2",
] = 2015
return expected
class PreviousEstimateMultipleQuarters(WithEstimateMultipleQuarters, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected[raw_name + "1"].loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp(
"2015-01-19", tz="UTC"
)
] = cls.events[raw_name].iloc[0]
expected[raw_name + "1"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
for col_name in ["estimate", "event_date"]:
expected[col_name + "2"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = cls.events[col_name].iloc[0]
expected[FISCAL_QUARTER_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC")
] = 4
expected[FISCAL_YEAR_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC")
] = 2014
expected[FISCAL_QUARTER_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = 1
expected[FISCAL_YEAR_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = 2015
return expected
class WithVaryingNumEstimates(WithEstimates):
"""
ZiplineTestCase mixin providing fixtures and a test to ensure that we
have the correct overwrites when the event date changes. We want to make
sure that if we have a quarter with an event date that gets pushed back,
we don't start overwriting for the next quarter early. Likewise,
if we have a quarter with an event date that gets pushed forward, we want
to make sure that we start applying adjustments at the appropriate, earlier
date, rather than the later date.
Methods
-------
assert_compute()
Defines how to determine that results computed for the `SomeFactor`
factor are correct.
Tests
-----
test_windows_with_varying_num_estimates()
Tests that we create the correct overwrites from 2015-01-13 to
2015-01-14 regardless of how event dates were updated for each
quarter for each sid.
"""
@classmethod
def make_events(cls):
return pd.DataFrame(
{
SID_FIELD_NAME: [0] * 3 + [1] * 3,
TS_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-13"),
]
* 2,
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-20"),
],
"estimate": [11.0, 12.0, 21.0] * 2,
FISCAL_QUARTER_FIELD_NAME: [1, 1, 2] * 2,
FISCAL_YEAR_FIELD_NAME: [2015] * 6,
}
)
@classmethod
def assert_compute(cls, estimate, today):
raise NotImplementedError("assert_compute")
def test_windows_with_varying_num_estimates(self):
dataset = QuartersEstimates(1)
assert_compute = self.assert_compute
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = 3
def compute(self, today, assets, out, estimate):
assert_compute(estimate, today)
engine = self.make_engine()
engine.run_pipeline(
Pipeline({"est": SomeFactor()}),
start_date=pd.Timestamp("2015-01-13", tz="utc"),
# last event date we have
end_date=pd.Timestamp("2015-01-14", tz="utc"),
)
class PreviousVaryingNumEstimates(WithVaryingNumEstimates, ZiplineTestCase):
def assert_compute(self, estimate, today):
if today == pd.Timestamp("2015-01-13", tz="utc"):
assert_array_equal(estimate[:, 0], np.array([np.NaN, np.NaN, 12]))
assert_array_equal(estimate[:, 1], np.array([np.NaN, 12, 12]))
else:
assert_array_equal(estimate[:, 0], np.array([np.NaN, 12, 12]))
assert_array_equal(estimate[:, 1], np.array([12, 12, 12]))
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
class NextVaryingNumEstimates(WithVaryingNumEstimates, ZiplineTestCase):
def assert_compute(self, estimate, today):
if today == pd.Timestamp("2015-01-13", tz="utc"):
assert_array_equal(estimate[:, 0], np.array([11, 12, 12]))
assert_array_equal(estimate[:, 1], np.array([np.NaN, np.NaN, 21]))
else:
assert_array_equal(estimate[:, 0], np.array([np.NaN, 21, 21]))
assert_array_equal(estimate[:, 1], np.array([np.NaN, 21, 21]))
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
class WithEstimateWindows(WithEstimates):
"""
ZiplineTestCase mixin providing fixures and a test to test running a
Pipeline with an estimates loader over differently-sized windows.
Attributes
----------
events : pd.DataFrame
DataFrame with estimates for 2 quarters for 2 sids.
window_test_start_date : pd.Timestamp
The date from which the window should start.
timelines : dict[int -> pd.DataFrame]
A dictionary mapping to the number of quarters out to
snapshots of how the data should look on each date in the date range.
Methods
-------
make_expected_timelines() -> dict[int -> pd.DataFrame]
Creates a dictionary of expected data. See `timelines`, above.
Tests
-----
test_estimate_windows_at_quarter_boundaries()
Tests that we overwrite values with the correct quarter's estimate at
the correct dates when we have a factor that asks for a window of data.
"""
END_DATE = pd.Timestamp("2015-02-10", tz="utc")
window_test_start_date = pd.Timestamp("2015-01-05")
critical_dates = [
pd.Timestamp("2015-01-09", tz="utc"),
pd.Timestamp("2015-01-15", tz="utc"),
pd.Timestamp("2015-01-20", tz="utc"),
pd.Timestamp("2015-01-26", tz="utc"),
pd.Timestamp("2015-02-05", tz="utc"),
pd.Timestamp("2015-02-10", tz="utc"),
]
# Starting date, number of announcements out.
window_test_cases = list(itertools.product(critical_dates, (1, 2)))
@classmethod
def make_events(cls):
# Typical case: 2 consecutive quarters.
sid_0_timeline = pd.DataFrame(
{
TS_FIELD_NAME: [
cls.window_test_start_date,
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-02-10"),
# We want a case where we get info for a later
# quarter before the current quarter is over but
# after the split_asof_date to make sure that
# we choose the correct date to overwrite until.
pd.Timestamp("2015-01-18"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-02-10"),
pd.Timestamp("2015-02-10"),
pd.Timestamp("2015-04-01"),
],
"estimate": [100.0, 101.0] + [200.0, 201.0] + [400],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2 + [4],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 0,
}
)
# We want a case where we skip a quarter. We never find out about Q2.
sid_10_timeline = pd.DataFrame(
{
TS_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-15"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-22"),
pd.Timestamp("2015-01-22"),
pd.Timestamp("2015-02-05"),
pd.Timestamp("2015-02-05"),
],
"estimate": [110.0, 111.0] + [310.0, 311.0],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [3] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 10,
}
)
# We want to make sure we have correct overwrites when sid quarter
# boundaries collide. This sid's quarter boundaries collide with sid 0.
sid_20_timeline = pd.DataFrame(
{
TS_FIELD_NAME: [
cls.window_test_start_date,
pd.Timestamp("2015-01-07"),
cls.window_test_start_date,
pd.Timestamp("2015-01-17"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-02-10"),
pd.Timestamp("2015-02-10"),
],
"estimate": [120.0, 121.0] + [220.0, 221.0],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 20,
}
)
concatted = pd.concat(
[sid_0_timeline, sid_10_timeline, sid_20_timeline]
).reset_index()
np.random.seed(0)
return concatted.reindex(np.random.permutation(concatted.index))
@classmethod
def get_sids(cls):
sids = sorted(cls.events[SID_FIELD_NAME].unique())
# Add extra sids between sids in our data. We want to test that we
# apply adjustments to the correct sids.
return [
sid for i in range(len(sids) - 1) for sid in range(sids[i], sids[i + 1])
] + [sids[-1]]
@classmethod
def make_expected_timelines(cls):
return {}
@classmethod
def init_class_fixtures(cls):
super(WithEstimateWindows, cls).init_class_fixtures()
cls.create_expected_df_for_factor_compute = partial(
create_expected_df_for_factor_compute,
cls.window_test_start_date,
cls.get_sids(),
)
cls.timelines = cls.make_expected_timelines()
@parameterized.expand(window_test_cases)
def test_estimate_windows_at_quarter_boundaries(
self, start_date, num_announcements_out
):
dataset = QuartersEstimates(num_announcements_out)
trading_days = self.trading_days
timelines = self.timelines
# The window length should be from the starting index back to the first
# date on which we got data. The goal is to ensure that as we
# progress through the timeline, all data we got, starting from that
# first date, is correctly overwritten.
window_len = (
self.trading_days.get_loc(start_date)
- self.trading_days.get_loc(self.window_test_start_date)
+ 1
)
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = window_len
def compute(self, today, assets, out, estimate):
today_idx = trading_days.get_loc(today)
today_timeline = (
timelines[num_announcements_out]
.loc[today]
.reindex(trading_days[: today_idx + 1])
.values
)
timeline_start_idx = len(today_timeline) - window_len
assert_almost_equal(estimate, today_timeline[timeline_start_idx:])
engine = self.make_engine()
engine.run_pipeline(
Pipeline({"est": SomeFactor()}),
start_date=start_date,
# last event date we have
end_date=pd.Timestamp("2015-02-10", tz="utc"),
)
class PreviousEstimateWindows(WithEstimateWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_timelines(cls):
oneq_previous = pd.concat(
[
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-01-19")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-20")),
],
pd.Timestamp("2015-01-20"),
),
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-20")),
],
pd.Timestamp("2015-01-21"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, 111, pd.Timestamp("2015-01-22")),
(20, 121, pd.Timestamp("2015-01-20")),
],
end_date,
)
for end_date in pd.date_range("2015-01-22", "2015-02-04")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, 311, pd.Timestamp("2015-02-05")),
(20, 121, pd.Timestamp("2015-01-20")),
],
end_date,
)
for end_date in pd.date_range("2015-02-05", "2015-02-09")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 201, pd.Timestamp("2015-02-10")),
(10, 311, pd.Timestamp("2015-02-05")),
(20, 221, pd.Timestamp("2015-02-10")),
],
pd.Timestamp("2015-02-10"),
),
]
)
twoq_previous = pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-02-09")
]
# We never get estimates for S1 for 2Q ago because once Q3
# becomes our previous quarter, 2Q ago would be Q2, and we have
# no data on it.
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-02-10")),
(10, np.NaN, pd.Timestamp("2015-02-05")),
(20, 121, pd.Timestamp("2015-02-10")),
],
pd.Timestamp("2015-02-10"),
)
]
)
return {1: oneq_previous, 2: twoq_previous}
class NextEstimateWindows(WithEstimateWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_timelines(cls):
oneq_next = pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp("2015-01-09")),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-07")),
],
pd.Timestamp("2015-01-09"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-07")),
],
end_date,
)
for end_date in pd.date_range("2015-01-12", "2015-01-19")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 100, cls.window_test_start_date),
(0, 101, pd.Timestamp("2015-01-20")),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-07")),
],
pd.Timestamp("2015-01-20"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
end_date,
)
for end_date in pd.date_range("2015-01-21", "2015-01-22")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, 310, pd.Timestamp("2015-01-09")),
(10, 311, pd.Timestamp("2015-01-15")),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
end_date,
)
for end_date in pd.date_range("2015-01-23", "2015-02-05")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
end_date,
)
for end_date in pd.date_range("2015-02-06", "2015-02-09")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(0, 201, pd.Timestamp("2015-02-10")),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
pd.Timestamp("2015-02-10"),
),
]
)
twoq_next = pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-01-11")
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-12", "2015-01-16")
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
pd.Timestamp("2015-01-20"),
)
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-21", "2015-02-10")
]
)
return {1: oneq_next, 2: twoq_next}
class WithSplitAdjustedWindows(WithEstimateWindows):
"""
ZiplineTestCase mixin providing fixures and a test to test running a
Pipeline with an estimates loader over differently-sized windows and with
split adjustments.
"""
split_adjusted_asof_date = pd.Timestamp("2015-01-14")
@classmethod
def make_events(cls):
# Add an extra sid that has a release before the split-asof-date in
# order to test that we're reversing splits correctly in the previous
# case (without an overwrite) and in the next case (with an overwrite).
sid_30 = pd.DataFrame(
{
TS_FIELD_NAME: [
cls.window_test_start_date,
pd.Timestamp("2015-01-09"),
# For Q2, we want it to start early enough
# that we can have several adjustments before
# the end of the first quarter so that we
# can test un-adjusting & readjusting with an
# overwrite.
cls.window_test_start_date,
# We want the Q2 event date to be enough past
# the split-asof-date that we can have
# several splits and can make sure that they
# are applied correctly.
pd.Timestamp("2015-01-20"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-20"),
],
"estimate": [130.0, 131.0, 230.0, 231.0],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 30,
}
)
# An extra sid to test no splits before the split-adjusted-asof-date.
# We want an event before and after the split-adjusted-asof-date &
# timestamps for data points also before and after
# split-adjsuted-asof-date (but also before the split dates, so that
# we can test that splits actually get applied at the correct times).
sid_40 = pd.DataFrame(
{
TS_FIELD_NAME: [pd.Timestamp("2015-01-09"), pd.Timestamp("2015-01-15")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-02-10"),
],
"estimate": [140.0, 240.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 40,
}
)
# An extra sid to test all splits before the
# split-adjusted-asof-date. All timestamps should be before that date
# so that we have cases where we un-apply and re-apply splits.
sid_50 = pd.DataFrame(
{
TS_FIELD_NAME: [pd.Timestamp("2015-01-09"), pd.Timestamp("2015-01-12")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-02-10"),
],
"estimate": [150.0, 250.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 50,
}
)
return pd.concat(
[
# Slightly hacky, but want to make sure we're using the same
# events as WithEstimateWindows.
cls.__base__.make_events(),
sid_30,
sid_40,
sid_50,
]
)
@classmethod
def make_splits_data(cls):
# For sid 0, we want to apply a series of splits before and after the
# split-adjusted-asof-date we well as between quarters (for the
# previous case, where we won't see any values until after the event
# happens).
sid_0_splits = pd.DataFrame(
{
SID_FIELD_NAME: 0,
"ratio": (-1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 100),
"effective_date": (
pd.Timestamp("2014-01-01"), # Filter out
# Split before Q1 event & after first estimate
pd.Timestamp("2015-01-07"),
# Split before Q1 event
pd.Timestamp("2015-01-09"),
# Split before Q1 event
pd.Timestamp("2015-01-13"),
# Split before Q1 event
pd.Timestamp("2015-01-15"),
# Split before Q1 event
pd.Timestamp("2015-01-18"),
# Split after Q1 event and before Q2 event
pd.Timestamp("2015-01-30"),
# Filter out - this is after our date index
pd.Timestamp("2016-01-01"),
),
}
)
sid_10_splits = pd.DataFrame(
{
SID_FIELD_NAME: 10,
"ratio": (0.2, 0.3),
"effective_date": (
# We want a split before the first estimate and before the
# split-adjusted-asof-date but within our calendar index so
# that we can test that the split is NEVER applied.
pd.Timestamp("2015-01-07"),
# Apply a single split before Q1 event.
pd.Timestamp("2015-01-20"),
),
}
)
# We want a sid with split dates that collide with another sid (0) to
# make sure splits are correctly applied for both sids.
sid_20_splits = pd.DataFrame(
{
SID_FIELD_NAME: 20,
"ratio": (
0.4,
0.5,
0.6,
0.7,
0.8,
0.9,
),
"effective_date": (
pd.Timestamp("2015-01-07"),
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-15"),
pd.Timestamp("2015-01-18"),
pd.Timestamp("2015-01-30"),
),
}
)
# This sid has event dates that are shifted back so that we can test
# cases where an event occurs before the split-asof-date.
sid_30_splits = pd.DataFrame(
{
SID_FIELD_NAME: 30,
"ratio": (8, 9, 10, 11, 12),
"effective_date": (
# Split before the event and before the
# split-asof-date.
pd.Timestamp("2015-01-07"),
# Split on date of event but before the
# split-asof-date.
pd.Timestamp("2015-01-09"),
# Split after the event, but before the
# split-asof-date.
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-15"),
pd.Timestamp("2015-01-18"),
),
}
)
# No splits for a sid before the split-adjusted-asof-date.
sid_40_splits = pd.DataFrame(
{
SID_FIELD_NAME: 40,
"ratio": (13, 14),
"effective_date": (
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-22"),
),
}
)
# No splits for a sid after the split-adjusted-asof-date.
sid_50_splits = pd.DataFrame(
{
SID_FIELD_NAME: 50,
"ratio": (15, 16),
"effective_date": (
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-14"),
),
}
)
return pd.concat(
[
sid_0_splits,
sid_10_splits,
sid_20_splits,
sid_30_splits,
sid_40_splits,
sid_50_splits,
]
)
class PreviousWithSplitAdjustedWindows(WithSplitAdjustedWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousSplitAdjustedEarningsEstimatesLoader(
events,
columns,
split_adjustments_loader=cls.adjustment_reader,
split_adjusted_column_names=["estimate"],
split_adjusted_asof=cls.split_adjusted_asof_date,
)
@classmethod
def make_expected_timelines(cls):
oneq_previous = pd.concat(
[
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
# Undo all adjustments that haven't happened yet.
(30, 131 * 1 / 10, pd.Timestamp("2015-01-09")),
(40, 140.0, pd.Timestamp("2015-01-09")),
(50, 150 * 1 / 15 * 1 / 16, pd.Timestamp("2015-01-09")),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-01-12")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131, pd.Timestamp("2015-01-09")),
(40, 140.0, pd.Timestamp("2015-01-09")),
(50, 150.0 * 1 / 16, pd.Timestamp("2015-01-09")),
],
pd.Timestamp("2015-01-13"),
),
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131, pd.Timestamp("2015-01-09")),
(40, 140.0, pd.Timestamp("2015-01-09")),
(50, 150.0, pd.Timestamp("2015-01-09")),
],
pd.Timestamp("2015-01-14"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131 * 11, pd.Timestamp("2015-01-09")),
(40, 140.0, pd.Timestamp("2015-01-09")),
(50, 150.0, pd.Timestamp("2015-01-09")),
],
end_date,
)
for end_date in pd.date_range("2015-01-15", "2015-01-16")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, np.NaN, cls.window_test_start_date),
(20, 121 * 0.7 * 0.8, pd.Timestamp("2015-01-20")),
(30, 231, pd.Timestamp("2015-01-20")),
(40, 140.0 * 13, pd.Timestamp("2015-01-09")),
(50, 150.0, pd.Timestamp("2015-01-09")),
],
end_date,
)
for end_date in pd.date_range("2015-01-20", "2015-01-21")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, 111 * 0.3, pd.Timestamp("2015-01-22")),
(20, 121 * 0.7 * 0.8, pd.Timestamp("2015-01-20")),
(30, 231, pd.Timestamp("2015-01-20")),
(40, 140.0 * 13 * 14, pd.Timestamp("2015-01-09")),
(50, 150.0, pd.Timestamp("2015-01-09")),
],
end_date,
)
for end_date in pd.date_range("2015-01-22", "2015-01-29")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101 * 7, | pd.Timestamp("2015-01-20") | pandas.Timestamp |
from typing import Tuple
import streamlit as st
import pandas as pd
import altair as alt
from codex.utils import to_columnar
from codex.measure import measure_vars1, measure_vars2
from codex.collection import load_collection, get_width_height_pixels
STEPS = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
DEFAULT_SCENARIO_ID = 7
NUM_SCENARIOS = DEFAULT_SCENARIO_ID + 1
@st.cache
def load_data_frame(scenario_id: int,
) -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]:
# load the base matrix, and the callable that transforms it
co_mat_original, transform = load_collection(scenario_id)
steps1 = []
props1 = []
names1 = []
steps2 = []
props2 = []
names2 = []
transformations = []
for step in STEPS:
# transform matrix
co_mat_transformed = transform(co_mat_original, step)
# measure properties
props1i, names1i = measure_vars1(co_mat_transformed)
props2i, names2i = measure_vars2(co_mat_transformed)
# collect for line chart 1
steps1 += [step] * len(props1i)
props1 += props1i
names1 += names1i
# collect for line chart 2
steps2 += [step] * len(props2i)
props2 += props2i
names2 += names2i
# collect transformed matrix for heat chart
transformations.append(co_mat_transformed)
df1 = | pd.DataFrame(data={'Step': steps1, 'Proportion': props1, 'Quantity': names1}) | pandas.DataFrame |
import os
import copy
import pytest
import numpy as np
import pandas as pd
import pyarrow as pa
from pyarrow import feather as pf
from pyarrow import parquet as pq
from time_series_transform.io.base import io_base
from time_series_transform.io.numpy import (
from_numpy,
to_numpy
)
from time_series_transform.io.pandas import (
from_pandas,
to_pandas
)
from time_series_transform.io.arrow import (
from_arrow_record_batch,
from_arrow_table,
to_arrow_record_batch,
to_arrow_table
)
from time_series_transform.transform_core_api.base import (
Time_Series_Data,
Time_Series_Data_Collection
)
from time_series_transform.io.parquet import (
from_parquet,
to_parquet
)
from time_series_transform.io.feather import (
from_feather,
to_feather
)
@pytest.fixture(scope = 'class')
def dictList_single():
return {
'time': [1, 2],
'data': [1, 2]
}
@pytest.fixture(scope = 'class')
def dictList_collection():
return {
'time': [1,2,1,3],
'data':[1,2,1,2],
'category':[1,1,2,2]
}
@pytest.fixture(scope = 'class')
def expect_single_expandTime():
return {
'data_1':[1],
'data_2':[2]
}
@pytest.fixture(scope = 'class')
def expect_single_seperateLabel():
return [{
'time': [1, 2],
'data': [1, 2]
},
{
'data_label': [1, 2]
}]
@pytest.fixture(scope = 'class')
def expect_collection_seperateLabel():
return [{
'time': [1,2,1,3],
'data':[1,2,1,2],
'category':[1,1,2,2]
},
{
'data_label':[1,2,1,2]
}
]
@pytest.fixture(scope = 'class')
def expect_collection_expandTime():
return {
'pad': {
'data_1':[1,1],
'data_2':[2,np.nan],
'data_3':[np.nan,2],
'category':[1,2]
},
'remove': {
'data_1':[1,1],
'category':[1,2]
}
}
@pytest.fixture(scope = 'class')
def expect_collection_expandCategory():
return {
'pad': {
'time':[1,2,3],
'data_1':[1,2,np.nan],
'data_2':[1,np.nan,2]
},
'remove': {
'time':[1],
'data_1':[1],
'data_2':[1]
}
}
@pytest.fixture(scope = 'class')
def expect_collection_expandFull():
return {
'pad': {
'data_1_1':[1],
'data_2_1':[1],
'data_1_2':[2],
'data_2_2':[np.nan],
'data_1_3':[np.nan],
'data_2_3':[2]
},
'remove': {
'data_1_1':[1],
'data_2_1':[1],
}
}
@pytest.fixture(scope = 'class')
def expect_collection_noExpand():
return {
'ignore':{
'time': [1,2,1,3],
'data':[1,2,1,2],
'category':[1,1,2,2]
},
'pad': {
'time': [1,2,3,1,2,3],
'data':[1,2,np.nan,1,np.nan,2],
'category':[1,1,1,2,2,2]
},
'remove': {
'time': [1,1],
'data':[1,1],
'category':[1,2]
}
}
@pytest.fixture(scope = 'class')
def seq_single():
return {
'time':[1,2,3],
'data':[[1,2,3],[11,12,13],[21,22,23]]
}
@pytest.fixture(scope = 'class')
def seq_collection():
return {
'time':[1,2,1,2],
'data':[[1,2],[1,2],[2,2],[2,2]],
'category':[1,1,2,2]
}
@pytest.fixture(scope = 'class')
def expect_seq_collection():
return {
'data_1_1':[[1,2]],
'data_2_1':[[2,2]],
'data_1_2':[[1,2]],
'data_2_2':[[2,2]]
}
class Test_base_io:
def test_base_io_from_single(self, dictList_single,expect_single_expandTime):
ExpandTimeAns = expect_single_expandTime
data = dictList_single
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
io = io_base(ts, 'time', None)
timeSeries = io.from_single(False)
for i in timeSeries:
assert timeSeries[i].tolist() == data[i]
timeSeries = io.from_single(True)
for i in timeSeries:
assert timeSeries[i] == ExpandTimeAns[i]
def test_base_io_to_single(self, dictList_single):
data = dictList_single
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
io = io_base(data, 'time', None)
assert io.to_single() == ts
def test_base_io_from_collection_expandTime(self, dictList_collection,expect_collection_expandTime):
noChange = dictList_collection
expand = expect_collection_expandTime
fullExpand = []
data = dictList_collection
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
ts = ts.set_data(data['category'],'category')
tsc = Time_Series_Data_Collection(ts,'time','category')
io = io_base(tsc, 'time', 'category')
with pytest.raises(ValueError):
timeSeries = io.from_collection(False,True,'ignore')
timeSeries = io.from_collection(False,True,'pad')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['pad'][i])
timeSeries = io.from_collection(False,True,'remove')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['remove'][i])
def test_base_io_from_collection_expandCategory(self, dictList_collection,expect_collection_expandCategory):
noChange = dictList_collection
expand = expect_collection_expandCategory
fullExpand = []
data = dictList_collection
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
ts = ts.set_data(data['category'],'category')
tsc = Time_Series_Data_Collection(ts,'time','category')
io = io_base(tsc, 'time', 'category')
with pytest.raises(ValueError):
timeSeries = io.from_collection(True,False,'ignore')
timeSeries = io.from_collection(True,False,'pad')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['pad'][i])
timeSeries = io.from_collection(True,False,'remove')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['remove'][i])
def test_base_io_from_collection_expandFull(self, dictList_collection,expect_collection_expandFull):
noChange = dictList_collection
expand = expect_collection_expandFull
fullExpand = []
data = dictList_collection
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
ts = ts.set_data(data['category'],'category')
tsc = Time_Series_Data_Collection(ts,'time','category')
io = io_base(tsc, 'time', 'category')
timeSeries = io.from_collection(True,True,'pad')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['pad'][i])
timeSeries = io.from_collection(True,True,'remove')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['remove'][i])
def test_base_io_to_collection(self, dictList_collection):
dataList = dictList_collection
io = io_base(dataList, 'time', 'category')
testData = io.to_collection()
tsd = Time_Series_Data(dataList,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
assert testData== tsc
def test_base_io_from_collection_no_expand(self,dictList_collection,expect_collection_noExpand):
noChange = dictList_collection
expand = expect_collection_noExpand
data = dictList_collection
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
ts = ts.set_data(data['category'],'category')
tsc = Time_Series_Data_Collection(ts,'time','category')
io = io_base(tsc, 'time', 'category')
timeSeries = io.from_collection(False,False,'ignore')
for i in timeSeries:
np.testing.assert_array_equal(timeSeries[i],expand['ignore'][i])
timeSeries = io.from_collection(False,False,'pad')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['pad'][i])
timeSeries = io.from_collection(False,False,'remove')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['remove'][i])
class Test_Pandas_IO:
def test_from_pandas_single(self,dictList_single):
data = dictList_single
df = pd.DataFrame(dictList_single)
tsd = Time_Series_Data(data,'time')
testData = from_pandas(df,'time',None)
assert tsd == testData
def test_from_pandas_collection(self,dictList_collection):
data = dictList_collection
df = pd.DataFrame(dictList_collection)
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = from_pandas(df,'time','category')
assert tsc == testData
def test_to_pandas_single(self,dictList_single,expect_single_expandTime):
data = dictList_single
df = pd.DataFrame(data)
expandTime = pd.DataFrame(expect_single_expandTime)
tsd = Time_Series_Data(data,'time')
testData = to_pandas(
tsd,
expandCategory= None,
expandTime=False,
preprocessType= None
)
pd.testing.assert_frame_equal(testData,df,check_dtype=False)
testData = to_pandas(
tsd,
expandCategory= None,
expandTime=True,
preprocessType= None
)
pd.testing.assert_frame_equal(testData,expandTime,check_dtype=False)
def test_to_pandas_collection_expandTime(self,dictList_collection,expect_collection_expandTime):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandTime['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandTime['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_pandas(
tsc,
expandCategory= False,
expandTime=True,
preprocessType= 'pad'
)
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_pandas(
tsc,
expandCategory= False,
expandTime=True,
preprocessType= 'remove'
)
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
with pytest.raises(ValueError):
timeSeries = to_pandas(tsc,False,True,'ignore')
def test_to_pandas_collection_expandCategory(self,dictList_collection,expect_collection_expandCategory):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandCategory['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandCategory['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_pandas(
tsc,
expandCategory= True,
expandTime=False,
preprocessType= 'pad'
)
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_pandas(
tsc,
expandCategory= True,
expandTime=False,
preprocessType= 'remove'
)
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
with pytest.raises(ValueError):
timeSeries = to_pandas(tsc,True,False,'ignore')
def test_to_pandas_collection_expandFull(self,dictList_collection,expect_collection_expandFull):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandFull['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandFull['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_pandas(
tsc,
expandCategory= True,
expandTime=True,
preprocessType= 'pad'
)
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_pandas(
tsc,
expandCategory= True,
expandTime=True,
preprocessType= 'remove'
)
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
def test_to_pandas_collection_noExpand(self,dictList_collection,expect_collection_noExpand):
data = dictList_collection
expandTime_ignore = pd.DataFrame(expect_collection_noExpand['ignore'])
expandTime_pad = | pd.DataFrame(expect_collection_noExpand['pad']) | pandas.DataFrame |
from collections import (
abc,
deque,
)
from decimal import Decimal
from warnings import catch_warnings
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
PeriodIndex,
Series,
concat,
date_range,
)
import pandas._testing as tm
from pandas.core.arrays import SparseArray
from pandas.core.construction import create_series_with_explicit_dtype
from pandas.tests.extension.decimal import to_decimal
class TestConcatenate:
def test_append_concat(self):
# GH#1815
d1 = date_range("12/31/1990", "12/31/1999", freq="A-DEC")
d2 = date_range("12/31/2000", "12/31/2009", freq="A-DEC")
s1 = Series(np.random.randn(10), d1)
s2 = Series(np.random.randn(10), d2)
s1 = s1.to_period()
s2 = s2.to_period()
# drops index
result = concat([s1, s2])
assert isinstance(result.index, PeriodIndex)
assert result.index[0] == s1.index[0]
def test_concat_copy(self, using_array_manager):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randint(0, 10, size=4).reshape(4, 1))
df3 = DataFrame({5: "foo"}, index=range(4))
# These are actual copies.
result = concat([df, df2, df3], axis=1, copy=True)
for arr in result._mgr.arrays:
assert arr.base is None
# These are the same.
result = concat([df, df2, df3], axis=1, copy=False)
for arr in result._mgr.arrays:
if arr.dtype.kind == "f":
assert arr.base is df._mgr.arrays[0].base
elif arr.dtype.kind in ["i", "u"]:
assert arr.base is df2._mgr.arrays[0].base
elif arr.dtype == object:
if using_array_manager:
# we get the same array object, which has no base
assert arr is df3._mgr.arrays[0]
else:
assert arr.base is not None
# Float block was consolidated.
df4 = DataFrame(np.random.randn(4, 1))
result = concat([df, df2, df3, df4], axis=1, copy=False)
for arr in result._mgr.arrays:
if arr.dtype.kind == "f":
if using_array_manager:
# this is a view on some array in either df or df4
assert any(
np.shares_memory(arr, other)
for other in df._mgr.arrays + df4._mgr.arrays
)
else:
# the block was consolidated, so we got a copy anyway
assert arr.base is None
elif arr.dtype.kind in ["i", "u"]:
assert arr.base is df2._mgr.arrays[0].base
elif arr.dtype == object:
# this is a view on df3
assert any(np.shares_memory(arr, other) for other in df3._mgr.arrays)
def test_concat_with_group_keys(self):
# axis=0
df = DataFrame(np.random.randn(3, 4))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1])
exp_index = MultiIndex.from_arrays(
[[0, 0, 0, 1, 1, 1, 1], [0, 1, 2, 0, 1, 2, 3]]
)
expected = DataFrame(np.r_[df.values, df2.values], index=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1])
exp_index2 = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])
expected = DataFrame(np.r_[df.values, df.values], index=exp_index2)
tm.assert_frame_equal(result, expected)
# axis=1
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df2.values], columns=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df.values], columns=exp_index2)
tm.assert_frame_equal(result, expected)
def test_concat_keys_specific_levels(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df.iloc[:, [0, 1]], df.iloc[:, [2]], df.iloc[:, [3]]]
level = ["three", "two", "one", "zero"]
result = concat(
pieces,
axis=1,
keys=["one", "two", "three"],
levels=[level],
names=["group_key"],
)
tm.assert_index_equal(result.columns.levels[0], Index(level, name="group_key"))
tm.assert_index_equal(result.columns.levels[1], Index([0, 1, 2, 3]))
assert result.columns.names == ["group_key", None]
@pytest.mark.parametrize("mapping", ["mapping", "dict"])
def test_concat_mapping(self, mapping, non_dict_mapping_subclass):
constructor = dict if mapping == "dict" else non_dict_mapping_subclass
frames = constructor(
{
"foo": DataFrame(np.random.randn(4, 3)),
"bar": DataFrame(np.random.randn(4, 3)),
"baz": DataFrame(np.random.randn(4, 3)),
"qux": DataFrame(np.random.randn(4, 3)),
}
)
sorted_keys = list(frames.keys())
result = concat(frames)
expected = | concat([frames[k] for k in sorted_keys], keys=sorted_keys) | pandas.concat |
# -*- coding: utf-8 -*-
import json
import os
from typing import Optional, Union, Iterator, List
from functools import partial
import pystow
import pandas as pd
from tqdm.auto import tqdm
from prodec import Descriptor, Transform
from .utils.IO import locate_file, process_data_version, TypeDecoder
def read_papyrus(is3d: bool = False, version: str = 'latest', chunksize: Optional[int] = None, source_path: Optional[str] = None) -> Union[
Iterator[pd.DataFrame], pd.DataFrame]:
"""Read the Papyrus dataset.
:param is3d: whether to consider stereochemistry or not (default: False)
:param version: version of the dataset to be read
:param chunksize: number of lines per chunk. To read without chunks, set to None
:param source_path: folder containing the bioactivity dataset (default: pystow's home folder)
:return: the Papyrus activity dataset
"""
# Determine default paths
if source_path is not None:
os.environ['PYSTOW_HOME'] = os.path.abspath(source_path)
version = process_data_version(version=version, root_folder=source_path)
source_path = pystow.module('papyrus', version)
# Load data types
dtype_file = source_path.join(name='data_types.json').as_posix()
with open(dtype_file, 'r') as jsonfile:
dtypes = json.load(jsonfile, cls=TypeDecoder)['papyrus']
# Find the file
filenames = locate_file(source_path.base.as_posix(),
f'*.*_combined_set_with{"out" if not is3d else ""}_stereochemistry.tsv*')
return pd.read_csv(filenames[0], sep='\t', chunksize=chunksize, dtype=dtypes, low_memory=True)
def read_protein_set(source_path: Optional[str] = None, version: str = 'latest') -> pd.DataFrame:
"""Read the protein targets of the Papyrus dataset.
:param source_path: folder containing the molecular descriptor datasets
:param version: version of the dataset to be read
:return: the set of protein targets in the Papyrus dataset
"""
version = process_data_version(version=version, root_folder=source_path)
# Determine default paths
if source_path is not None:
os.environ['PYSTOW_HOME'] = os.path.abspath(source_path)
source_path = pystow.module('papyrus', version)
# Find the file
filenames = locate_file(source_path.base.as_posix(), f'*.*_combined_set_protein_targets.tsv*')
return pd.read_csv(filenames[0], sep='\t', keep_default_na=False)
def read_molecular_descriptors(desc_type: str = 'mold2', is3d: bool = False,
version: str = 'latest', chunksize: Optional[int] = None,
source_path: Optional[str] = None):
"""Get molecular descriptors
:param desc_type: type of descriptor {'mold2', 'mordred', 'cddd', 'fingerprint', 'all'}
:param is3d: whether to load descriptors of the dataset containing stereochemistry
:param version: version of the dataset to be read
:param chunksize: number of lines per chunk. To read without chunks, set to None
:param source_path: folder containing the molecular descriptor datasets
:return: the dataframe of molecular descriptors
"""
if desc_type not in ['mold2', 'mordred', 'cddd', 'fingerprint', 'moe', 'all']:
raise ValueError("descriptor type not supported")
# Determine default paths
if source_path is not None:
os.environ['PYSTOW_HOME'] = os.path.abspath(source_path)
version = process_data_version(version=version, root_folder=source_path)
source_path = pystow.module('papyrus', version)
# Load data types
dtype_file = source_path.join(name='data_types.json').as_posix()
with open(dtype_file, 'r') as jsonfile:
dtypes = json.load(jsonfile, cls=TypeDecoder)
# Find the files
if desc_type in ['mold2', 'all']:
mold2_files = locate_file(source_path.join('descriptors').as_posix(),
f'*.*_combined_{3 if is3d else 2}D_moldescs_mold2.tsv*')
elif desc_type in ['mordred', 'all']:
mordd_files = locate_file(source_path.join('descriptors').as_posix(),
f'*.*_combined_{3 if is3d else 2}D_moldescs_mordred{3 if is3d else 2}D.tsv*')
elif desc_type in ['cddd', 'all']:
cddds_files = locate_file(source_path.join('descriptors').as_posix(),
f'*.*_combined_{3 if is3d else 2}D_moldescs_CDDDs.tsv*')
elif desc_type in ['fingerprint', 'all']:
molfp_files = locate_file(source_path.join('descriptors').as_posix(),
f'*.*_combined_{3 if is3d else 2}D_moldescs_{"E3FP" if is3d else "ECFP6"}.tsv*')
elif desc_type in ['moe', 'all']:
moe_files = locate_file(source_path.join('descriptors').as_posix(),
f'*.*_combined_{3 if is3d else 2}D_moldescs_MOE.tsv*')
if desc_type == 'mold2':
return pd.read_csv(mold2_files[0], sep='\t', dtype=dtypes['mold2'], low_memory=True, chunksize=chunksize)
elif desc_type == 'mordred':
return pd.read_csv(mordd_files[0], sep='\t', dtype=dtypes[f'mordred_{3 if is3d else 2}D'], low_memory=True,
chunksize=chunksize)
elif desc_type == 'cddd':
return pd.read_csv(cddds_files[0], sep='\t', dtype=dtypes['CDDD'], low_memory=True, chunksize=chunksize)
elif desc_type == 'fingerprint':
return pd.read_csv(molfp_files[0], sep='\t', dtype=dtypes[f'{"E3FP" if is3d else "ECFP6"}'], low_memory=True,
chunksize=chunksize)
elif desc_type == 'moe':
return pd.read_csv(moe_files[0], sep='\t', low_memory=True, chunksize=chunksize)
elif desc_type == 'all':
mold2 = pd.read_csv(mold2_files[0], sep='\t', dtype=dtypes['mold2'], low_memory=True, chunksize=chunksize)
mordd = pd.read_csv(mordd_files[0], sep='\t', dtype=dtypes[f'mordred_{3 if is3d else 2}D'], low_memory=True,
chunksize=chunksize)
cddds = pd.read_csv(cddds_files[0], sep='\t', dtype=dtypes['CDDD'], low_memory=True, chunksize=chunksize)
molfp = pd.read_csv(molfp_files[0], sep='\t', dtype=dtypes[f'{"E3FP" if is3d else "ECFP6"}'], low_memory=True,
chunksize=chunksize)
moe = pd.read_csv(moe_files[0], sep='\t', low_memory=True, chunksize=chunksize)
if chunksize is None:
mold2.set_index('InChIKey' if is3d else 'connectivity', inplace=True)
mordd.set_index('InChIKey' if is3d else 'connectivity', inplace=True)
molfp.set_index('InChIKey' if is3d else 'connectivity', inplace=True)
cddds.set_index('InChIKey' if is3d else 'connectivity', inplace=True)
moe.set_index('InChIKey' if is3d else 'connectivity', inplace=True)
data = | pd.concat([mold2, mordd, cddds, molfp, moe], axis=1) | pandas.concat |
# -*- coding: utf-8 -*-
# Copyright (c) May 2021, Wageningen Environmental Research
# <NAME> (<EMAIL>)
import sys, os
import xarray as xr
import pandas as pd
CMD_MODE = True if os.environ["CMD_MODE"] == "1" else False
from .util import create_agera5_fnames, convert_to_celsius
def extract_point(agera5_dir, point, startday, endday, tocelsius=False):
df_final = pd.DataFrame()
for day in pd.date_range(startday, endday):
fnames = create_agera5_fnames(agera5_dir, day)
ds = xr.open_mfdataset(fnames)
pnt = ds.sel(lon=point.longitude, lat=point.latitude, method="nearest")
df = pnt.to_dataframe()
df.reset_index(inplace=True)
ix = df.Wind_Speed_10m_Mean.notnull()
if not any(ix):
print(f"No data for given lon/lat ({point.longitude:7.2f}/{point.latitude:7.2f}),"
f" probably over water...")
if CMD_MODE:
sys.exit()
else:
return None
df_final = df_final.append(df[ix])
# convert to simple date
df_final['time'] = | pd.to_datetime(df_final.time) | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 4 2021, last edited 27 Oct 2021
Fiber flow emissions calculations module - class version
Inputs:
Excel file with old PPI market & emissions data ('FiberModelAll_Python_v3-yields.xlsx')
Outputs:
Dict of keys 'old','new','forest','trade' with emissions calcs
(*testing inputs*
x = 'FiberModelAll_Python_v2.xlsx'
f2pVolOld = pd.read_excel(x, 'OldData', usecols="A:I", skiprows=1, nrows=21, index_col=0)
pbpVolOld = pd.read_excel(x, 'OldData', usecols="K:R", skiprows=1, nrows=14, index_col=0)
pbpVolOld.columns = [x[:-2] for x in pbpVolOld.columns]
consCollOld = pd.read_excel(x, 'OldData', usecols="K:Q", skiprows=34, nrows=3, index_col=0)
rLevel = pd.read_excel(x, 'Demand', usecols="F:K", skiprows=16, nrows=5)
rLevel = {t: list(rLevel[t][np.isfinite(rLevel[t])].values) for t in fProd}
fProd = [t for t in f2pVolOld.iloc[:,:6].columns]
fProdM = [t for t in f2pVolOld.iloc[:,:7].columns]
rFiber = f2pVolOld.index[:16]
vFiber = f2pVolOld.index[16:]
rPulp = [p for p in pbpVolOld.index if 'Rec' in p]
vPulp = [q for q in pbpVolOld.index if 'Vir' in q]
fPulp = [f for f in pbpVolOld.index]
import numpy as np
f2pYld = pd.read_excel(x, 'Fiber', usecols="I:O", skiprows=1, nrows=21)
f2pYld.index = np.concatenate([rFiber.values, vFiber.values], axis=0)
pulpYld = pd.read_excel(x, 'Pulp', usecols="D", skiprows=1, nrows=14)
pulpYld.index = rPulp + vPulp
transPct = pd.read_excel(x, 'EmTables', usecols="L:P", skiprows=32, nrows=11, index_col=0)
transKM = pd.read_excel(x, 'EmTables', usecols="L:P", skiprows=46, nrows=11, index_col=0)
transUMI = pd.read_excel(x, 'EmTables', usecols="L:P", skiprows=59, nrows=1, index_col=0)
rsdlModes = pd.read_excel(x, 'EmTables', usecols="A:G", skiprows=32, nrows=6, index_col=0)
rsdlbio = pd.read_excel(x, 'EmTables', usecols="A:H", skiprows=41, nrows=4, index_col=0)
rsdlbio = rsdlbio.fillna(0)
rsdlfos = pd.read_excel(x, 'EmTables', usecols="A:H", skiprows=48, nrows=4, index_col=0)
rsdlfos = rsdlfos.fillna(0)
woodint = pd.read_excel(x, 'EmTables', usecols="A:H", skiprows=58, nrows=1, index_col=0)
wtotalGHGb0 = pd.read_excel(x, 'EmTables', usecols="A:K", skiprows=62, nrows=6, index_col=0)
wtotalGHGb1 = pd.read_excel(x, 'EmTables', usecols="A:K", skiprows=71, nrows=6, index_col=0)
wbioGHGb0 = pd.read_excel(x, 'EmTables', usecols="A:K", skiprows=80, nrows=6, index_col=0)
wbioGHGb1 = pd.read_excel(x, 'EmTables', usecols="A:K", skiprows=89, nrows=6, index_col=0)
wfosGHGb0 = pd.read_excel(x, 'EmTables', usecols="A:K", skiprows=98, nrows=6, index_col=0)
wfosGHGb1 = pd.read_excel(x, 'EmTables', usecols="A:K", skiprows=107, nrows=6, index_col=0)
exportOld = pd.read_excel(x, 'OldData', usecols="E:G", skiprows=31, nrows=16, index_col=0)
exportOld.iloc[:,:-1] = exportOld.iloc[:,:-1]
exportNew = exportOld.iloc[:,:-1] * 1.5
exportNew.columns = ['exportNew']
exportNew = exportNew.assign(TransCode=exportOld['TransCode'].values)
fiberType = pd.read_excel(x, 'OldData', usecols="A:B", skiprows=31, nrows=20, index_col=0)
chinaVals = pd.read_excel(x, 'EmTables', usecols="L:M", skiprows=66, nrows=3, index_col=0)
chinaCons = pd.read_excel(x, 'EmTables', usecols="L:M", skiprows=72, nrows=6, index_col=0)
fYield = pd.read_excel(x, 'EmTables', usecols="L:N", skiprows=81, nrows=5, index_col=0)
)
@author: <NAME>
"""
import pandas as pd
import numpy as np
class en_emissions(): # energy & emissions
def __init__(cls,xls,fProd,rLevel,f2pYld,pulpYld,f2pVolNew,pbpVolNew,consCollNew,exportNew,demandNew):
# xls (str) - name of Excel spreadsheet to pull data from
# fProd (list) - list of products in current scenario
# rLevel (df) - recycled content level by product
# f2pYld (df) - fiber to pulp yield by pulp product; indexed by fiber
# pulpYld (df) - pulp to product yield; pulp as index
# f2pVolNew (df) - fiber to pulp volume (in short tons); indexed by pulp name
# pbpVolNew (df) - pulp by product volume; indexed by pulp name
# consCollNew (df) - domestic consumption, collection, and recovery by product
# demandNew (df) - new demand by product; indexed by rec level
uC = 0.907185 # unit conversion of MM US ton to Mg/metric ton
cls.fProd = fProd
cls.fProdM = fProd + ['Market']
cls.rLevel = rLevel
cls.f2pYld = f2pYld
cls.pulpYld = pulpYld
cls.f2pVolNew = f2pVolNew * uC
cls.pbpVolNew = pbpVolNew * uC
cls.consCollNew = consCollNew * uC
cls.exportNew = exportNew * uC
cls.demandNew = {t: demandNew[t] * uC for t in demandNew.keys()}
with pd.ExcelFile(xls) as x:
# Old data
cls.f2pVolOld = pd.read_excel(x, 'OldData', usecols="A:I", skiprows=1, nrows=21, index_col=0)
cls.f2pVolOld.iloc[:,:-1] = cls.f2pVolOld.iloc[:,:-1] * uC * 1000
cls.f2pVolNew = cls.f2pVolNew.assign(TransCode=cls.f2pVolOld['TransCode'].values)
cls.pbpVolOld = pd.read_excel(x, 'OldData', usecols="K:R", skiprows=1, nrows=14, index_col=0)
cls.pbpVolOld.columns = [x[:-2] for x in cls.pbpVolOld.columns] # has .1 after column names for pandas duplicate
cls.pbpVolOld.iloc[:,:-1] = cls.pbpVolOld.iloc[:,:-1] * uC * 1000
cls.pbpVolNew = cls.pbpVolNew.assign(TransCode=cls.pbpVolOld['TransCode'].values)
cls.prodLD = pd.read_excel(x, 'OldData', usecols="K:Q", skiprows=19, nrows=5, index_col=0) * uC * 1000
cls.prodDemand = pd.read_excel(x, 'OldData', usecols="A:G", skiprows=26, nrows=1, index_col=0) * uC * 1000
cls.consCollOld = pd.read_excel(x, 'OldData', usecols="K:Q", skiprows=29, nrows=3, index_col=0) * uC * 1000
cls.exportOld = pd.read_excel(x, 'OldData', usecols="E:G", skiprows=31, nrows=16, index_col=0)
cls.exportOld.iloc[:,:-1] = cls.exportOld.iloc[:,:-1] * uC * 1000
cls.exportNew = cls.exportNew.assign(TransCode=cls.exportOld['TransCode'].values)
cls.fiberType = pd.read_excel(x, 'OldData', usecols="A:B", skiprows=31, nrows=20, index_col=0)
cls.rFiber = cls.f2pVolOld.index[:16]
cls.vFiber = cls.f2pVolOld.index[16:]
cls.rPulp = [p for p in cls.pbpVolOld.index if 'Rec' in p]
cls.vPulp = [q for q in cls.pbpVolOld.index if 'Vir' in q]
cls.fPulp = [f for f in cls.pbpVolOld.index]
# Emissions Info
cls.chemicals = pd.read_excel(x, 'nonFiber', usecols="A:B,E:L", skiprows=2, nrows=42, index_col=0)
cls.eolEmissions = pd.read_excel(x, 'EmTables', usecols="A:G", skiprows=2, nrows=3, index_col=0)
cls.bfEI = pd.read_excel(x, 'EmTables', usecols="J:P", skiprows=2, nrows=3, index_col=0)
cls.bfEI.columns = [x[:-2] for x in cls.bfEI.columns] # has .1 after column names for some reason
cls.bioPct = pd.read_excel(x, 'EmTables', usecols="J:P", skiprows=8, nrows=2, index_col=0)
cls.pwpEI = pd.read_excel(x, 'EmTables', usecols="O:P", skiprows=14, nrows=5, index_col=0)
cls.bfCO2 = pd.read_excel(x, 'EmTables', usecols="A:G", skiprows=9, nrows=2, index_col=0)
cls.fuelTable = pd.read_excel(x, 'EmTables', usecols="A:M", skiprows=15, nrows=13, index_col=0)
cls.fuelTable = cls.fuelTable.fillna(0)
cls.rsdlModes = pd.read_excel(x, 'EmTables', usecols="A:G", skiprows=32, nrows=6, index_col=0)
cls.rsdlbio = pd.read_excel(x, 'EmTables', usecols="A:H", skiprows=41, nrows=4, index_col=0)
cls.rsdlbio = cls.rsdlbio.fillna(0)
cls.rsdlfos = pd.read_excel(x, 'EmTables', usecols="A:H", skiprows=48, nrows=4, index_col=0)
cls.rsdlfos = cls.rsdlfos.fillna(0)
cls.transPct = pd.read_excel(x, 'EmTables', usecols="L:P", skiprows=32, nrows=11, index_col=0)
cls.transKM = pd.read_excel(x, 'EmTables', usecols="L:P", skiprows=46, nrows=11, index_col=0)
cls.transUMI = pd.read_excel(x, 'EmTables', usecols="L:P", skiprows=59, nrows=1, index_col=0)
cls.woodint = pd.read_excel(x, 'EmTables', usecols="A:H", skiprows=58, nrows=1, index_col=0)
cls.wtotalGHGb0 = pd.read_excel(x, 'EmTables', usecols="A:K", skiprows=62, nrows=6, index_col=0)
cls.wtotalGHGb1 = pd.read_excel(x, 'EmTables', usecols="A:K", skiprows=71, nrows=6, index_col=0)
cls.wbioGHGb0 = pd.read_excel(x, 'EmTables', usecols="A:K", skiprows=80, nrows=6, index_col=0)
cls.wbioGHGb1 = pd.read_excel(x, 'EmTables', usecols="A:K", skiprows=89, nrows=6, index_col=0)
cls.wfosGHGb0 = pd.read_excel(x, 'EmTables', usecols="A:K", skiprows=98, nrows=6, index_col=0)
cls.wfosGHGb1 = pd.read_excel(x, 'EmTables', usecols="A:K", skiprows=107, nrows=6, index_col=0)
cls.chinaVals = pd.read_excel(x, 'EmTables', usecols="L:M", skiprows=66, nrows=3, index_col=0)
cls.chinaCons = pd.read_excel(x, 'EmTables', usecols="L:M", skiprows=72, nrows=6, index_col=0)
cls.fYield = pd.read_excel(x, 'EmTables', usecols="L:N", skiprows=81, nrows=5, index_col=0)
def calculateTrans(cls,transVol):
# transVol [df] - item, volume (in Mg) by product, TransCode; indexed by fiberCode or other label
# transPct [df] - % traversed for transMode by transCode; indexed by transCode
# transKM [df] - distance traversed for transMode by transCode; indexed by transCode
# transUMI [s] - unit impact by mode (truck, train, boat); indexed by "transUMI"
transImpact = pd.Series(0, index = cls.fProd)
tC = transVol['TransCode']
tC = tC[(tC != 0) & (tC != 1)] # index non-zero/non-NaN elements only
transVol = transVol.loc[tC.index]
for t in cls.fProd:
for m in cls.transUMI.columns:
transImpact[t] += sum(transVol[t] * cls.transPct.loc[tC,m].values * cls.transKM.loc[tC,m].values * cls.transUMI[m].values * 1)
return transImpact
def calculateChem(cls,chemicals,prodDemand):
# chemicals [df] - nonfiber name, % use by product, transCode, impact factor; indexed by number
# prodDemand [df] - total demand; indexed by product
chemImpact = pd.Series(0, index = cls.fProd, name = 'chemImp')
chemVol = pd.DataFrame(0, index = chemicals.index, columns = cls.fProd)
for t in cls.fProd:
chemImpact[t] = sum(prodDemand[t].values * chemicals[t] * chemicals['Impact Factor'])
chemVol[t] = chemicals[t] * prodDemand[t].values
chemVol = chemVol.join(chemicals['TransCode'])
chemTrans = pd.Series(cls.calculateTrans(chemVol), name = 'chemTrans')
chemImpact = pd.DataFrame(chemImpact)
return pd.concat([chemImpact, chemTrans], axis=1)
def calculateEoL(cls,eolEmissions,consColl):
# eolEmissions [df] - biogenic and fossil CO2 emission factors & transportation code by product; indexed by bio/fosCO2
# consColl [df] - domestic consumption, collection, and recovery by product; indexed by name
prod2landfill = pd.Series(consColl.loc['Domestic Consumption'] - consColl.loc['Recovery Volume'],
index = cls.fProd, name = 'prod2landfill')
mrf2landfill = pd.Series(consColl.loc['Collection Volume'] - consColl.loc['Recovery Volume'],
index = cls.fProd, name = 'mrf2landfill')
bioEoL = pd.Series(prod2landfill * eolEmissions.loc['bioCO2'], index = cls.fProd, name = 'bioEoL')
mrf2landfill = pd.DataFrame(mrf2landfill) # works b/c all prods have same TransCode
transEoL = pd.Series(cls.calculateTrans(mrf2landfill.T.assign(TransCode=eolEmissions.loc['TransCode'].values[0])),
index = cls.fProd, name = 'eolTrans')
fesTransEoL = pd.Series(prod2landfill * eolEmissions.loc['fossilCO2'] + transEoL, index = cls.fProd,
name = 'fesTransEoL')
bftEoL = pd.Series(bioEoL + fesTransEoL, name = 'bftEoL')
return pd.concat([bioEoL, fesTransEoL, bftEoL, transEoL], axis=1)
def getEnergyYldCoeff(cls,f2pVol,pbpVol):
# f2pVol [df] - recycled fiber to pulp (in Mg); indexed by fiber code
# pbpVol [df] - pulp by product (in Mg); indexed by pulp name
#
# PYCoeff [s] - pulp yield coeffient; indexed by pulp
f2pByPulp = pd.Series(0, index = pbpVol.index, name = 'fiber2pulp')
for p in cls.rPulp:
f2pByPulp[p] = sum([f2pVol.loc[cls.rFiber,t].sum() for t in cls.fProdM
if cls.fProdM.index(t) == cls.rPulp.index(p)])
for q in cls.vPulp:
f2pByPulp[q] = sum([f2pVol.loc[cls.vFiber,t].sum() for t in cls.fProdM
if cls.fProdM.index(t) == cls.vPulp.index(q)])
pulpProd = pd.Series([pbpVol.loc[i].sum() for i in pbpVol.index], index = pbpVol.index, name = 'pulpProd')
PYCoeff = (pd.Series(f2pByPulp / pulpProd, name = 'pulpYldCoeff'))
PYCoeff.replace([np.inf, -np.inf], np.nan, inplace=True)
PYCoeff = PYCoeff.fillna(0)
return PYCoeff
def getEnergyPulpPct(cls,pbpVol):
# pbpVol [df] - pulp by product (in Mg); indexed by pulp name
#
# pulpPct [df] - % of rec/vir pulp used in product; indexed by pulp name
pulpPct = pbpVol.copy().drop(['TransCode'], axis=1)
for t in pulpPct.columns:
rTotalPulp = pulpPct.loc[cls.rPulp,t].sum()
vTotalPulp = pulpPct.loc[cls.vPulp,t].sum()
pulpPct.loc[cls.rPulp,t] = pulpPct.loc[cls.rPulp,t] / rTotalPulp
pulpPct.loc[cls.vPulp,t] = pulpPct.loc[cls.vPulp,t] / vTotalPulp
return pulpPct.fillna(0)
def getEnergyMultiProd(cls,PYMult,pulpPct):
# PYMult [s] - pulp yield multiplier; indexed by pulp name
# pulpPct [df] - % of rec/vir pulp used in product; indexed by pulp name
#
# (return) [df] - rec/vir yield multiprod by product; index by r/vYldMultiProd
rYldMultiProd = pd.Series([sum(pulpPct.loc[cls.rPulp,t] * PYMult[cls.rPulp]) for t in cls.fProd],
index = cls.fProd, name = 'rYldMultiProd')
vYldMultiProd = pd.Series([sum(pulpPct.loc[cls.vPulp,t] * PYMult[cls.vPulp]) for t in cls.fProd],
index = cls.fProd, name = 'vYldMultiProd')
rYldMultiProd.replace([np.inf, -np.inf], np.nan, inplace=True)
vYldMultiProd.replace([np.inf, -np.inf], np.nan, inplace=True)
return pd.concat([rYldMultiProd.fillna(0), vYldMultiProd.fillna(0)], axis=1)
def calculateEnergy(cls,pbpVol,prodLD,multiProd,pwpEI,paperEI):
# prodLD (df) - demand by product; indexed by % recycled content level
# bfEI (df) - bio & fes energy intensity fitting parameters by product; indexed by name
# bioPct (df) - bio fitting parameter for PWP; indexed by name
# pwpEI (df) - energy intensity of PWP pulp; indexed by pulp name
# paperEI (df) - paper production energy intensity; indexed by 'PPE'
# pbpVol (df) - pulp by product (in Mg); indexed by pulp name
# multiProd (df) - rec/vir yield multiprod by product; indexed by product
bioEnergy = pd.Series(0, index = cls.fProd, name = "bioEnergy")
fesEnergy = pd.Series(0, index = cls.fProd, name = 'fesEnergy')
totalEnergy = pd.Series(0, index = cls.fProd, name = 'totalEnergy')
for t in cls.fProd:
bioEnergy[t] = sum(prodLD[t].values[:len(cls.rLevel[t])] *
sum([r * cls.bfEI.loc['bioEI b1',t] + cls.bfEI.loc['bioEI b0',t] for r in cls.rLevel[t]]))
fesEnergy[t] = sum(prodLD[t].values[:len(cls.rLevel[t])] *
cls.bfEI.loc['fesEI',t] * multiProd.loc[t,'rYldMultiProd'])
if 'P&W' or 'News' in t:
avgrecPct = sum(prodLD[t].values[:len(cls.rLevel[t])] * cls.rLevel[t]) / prodLD[t].sum()
bioPctPW = avgrecPct * cls.bioPct.loc['bioPct b1',t] + cls.bioPct.loc['bioPct b0',t]
pulpProdEnergy = sum([pbpVol.loc[p,t] * pwpEI.loc[p].values[0] for p in pwpEI.index])
ppEnergy = pulpProdEnergy + prodLD[t].sum() * paperEI.values[0]
bioEnergy[t] = bioPctPW * ppEnergy
fesEnergy[t] = (1 - bioPctPW) * ppEnergy * multiProd.loc[t,'rYldMultiProd']
totalEnergy[t] = bioEnergy[t] + fesEnergy[t]
return pd.concat([bioEnergy, fesEnergy, totalEnergy], axis=1)
def calculateProduction(cls,calcEnergy):
# calcEnergy (df) - bio, fes, and total energy from calculateEnergy; indexed by product
# bfCO2 (df) - bio & fes CO2 fitting parameters; indexed by product
bioCO2 = pd.Series(0, index = cls.fProd, name = 'bioCO2')
fesCO2 = pd.Series(0, index = cls.fProd, name = 'fesCO2')
totalCO2 = pd.Series(0, index = cls.fProd, name = 'totalCO2')
for t in cls.fProd:
bioCO2[t] = calcEnergy.loc[t,'bioEnergy'] * cls.bfCO2.loc['bioCO2 b1',t]
fesCO2[t] = calcEnergy.loc[t,'fesEnergy'] * cls.bfCO2.loc['fesCO2 b1',t]
totalCO2[t] = bioCO2[t] + fesCO2[t]
return pd.concat([bioCO2, fesCO2, totalCO2], axis=1)
def calculateFuel(cls,calcEnergy):
# calcEnergy (df) - bio, fes, and total energy from calculateEnergy; indexed by product
# fuelTable (df) - fuel impact by product; indexed by fuel type
fuels = cls.fuelTable.index
bioFI = pd.Series(0, index = cls.fProd, name = 'bioFuelImp')
fesFI = pd.Series(0, index = cls.fProd, name = 'fesFuelImp')
fuelImp = pd.Series(0, index = cls.fProd, name = 'fuelImp')
for t in cls.fProd:
bioFI[t] = calcEnergy.loc[t,'bioEnergy'] * sum([cls.fuelTable.loc[f,t] * cls.fuelTable.loc[f,'Upstream Impact Factor']
for f in fuels if cls.fuelTable.loc[f,'Fuel Type'] == 1])
fesFI[t] = calcEnergy.loc[t,'fesEnergy'] * sum([cls.fuelTable.loc[f,t] * cls.fuelTable.loc[f,'Upstream Impact Factor']
for f in fuels if cls.fuelTable.loc[f,'Fuel Type'] == 2])
fuelImp[t] = bioFI[t] + fesFI[t]
fuelTransVol = cls.fuelTable.copy()
fuel1 = [f for f in fuels if cls.fuelTable.loc[f,'Fuel Type'] == 1]
fuel2 = [f for f in fuels if cls.fuelTable.loc[f,'Fuel Type'] == 2]
for t in cls.fProd:
fuelTransVol.loc[fuel1,t] = [calcEnergy.loc[t,'bioEnergy'] * cls.fuelTable.loc[f,t] * cls.fuelTable.loc[f,'FU/GJ']
for f in fuel1]
fuelTransVol.loc[fuel2,t] = [calcEnergy.loc[t,'fesEnergy'] * cls.fuelTable.loc[f,t] * cls.fuelTable.loc[f,'FU/GJ']
for f in fuel2]
fuelTrans = pd.Series(cls.calculateTrans(fuelTransVol), name = 'fuelTrans')
return pd.concat([bioFI, fesFI, fuelImp, fuelTrans], axis=1)
def calculateResidual(cls,pbpVol,f2pVol):
# pbpVol [df] - pulp by product (in Mg); indexed by pulp name
# f2pVol [df] - recycled fiber to pulp (in Mg); indexed by fiber code
# f2pYld [df] - fiber to pulp yield by pulp product; indexed by fiber
# pulpYld [df] - pulp to product yield; indexed by pulp
# rsdlModes [df] - residual treatments modes; indexed by residual type
# rsdlbio [df] - transport and biogenic emissions factors; indexed by residual treatment mode
# rsdlfos [df] - transport and fossil emissions factors; indexed by residual treatment mode
pulpProd = pd.Series(0, index = cls.rPulp + cls.vPulp, name = 'pulpProduced')
fiberRes = pd.Series(0, index = cls.rPulp + cls.vPulp, name = 'fiberResidue')
for p in cls.rPulp: # order of fPulp must match order of r/vPulp
pulpProd[p] = sum([(f2pVol.loc[cls.rFiber,t].mul(cls.f2pYld.loc[cls.rFiber,t])).sum() for t in cls.fProdM
if cls.fProdM.index(t) == cls.rPulp.index(p)])
fiberRes[p] = sum([(f2pVol.loc[cls.rFiber,t].mul(1 - cls.f2pYld.loc[cls.rFiber,t])).sum() for t in cls.fProdM
if cls.fProdM.index(t) == cls.rPulp.index(p)])
for q in cls.vPulp:
pulpProd[q] = sum([(f2pVol.loc[cls.vFiber,t].mul(cls.f2pYld.loc[cls.vFiber,t])).sum() for t in cls.fProdM
if cls.fProdM.index(t) == cls.vPulp.index(q)])
fiberRes[q] = sum([(f2pVol.loc[cls.vFiber,t].mul(1 - cls.f2pYld.loc[cls.vFiber,t])).sum() for t in cls.fProdM
if cls.fProdM.index(t) == cls.vPulp.index(q)])
pulpUP = pbpVol.iloc[:,:-1].div(pulpProd, axis=0).fillna(0) # pulpUsePct
rFiberRsd = pd.Series((pulpUP.loc[cls.rPulp].mul(fiberRes[cls.rPulp], axis=0)).sum(), index = cls.fProd, name = 'rFiberRsd')
rPulpRsd = pd.Series((pulpUP.loc[cls.rPulp].mul(1 - cls.pulpYld.iloc[:,0].loc[cls.rPulp], axis=0)).sum(), index = cls.fProd, name = 'rPulpRsd')
rTotalRsd = rFiberRsd + rPulpRsd
vFiberRsd = pd.Series((pulpUP.loc[cls.vPulp].mul(fiberRes[cls.vPulp], axis=0)).sum(), index = cls.fProd, name = 'vFiberRsd')
vPulpRsd = pd.Series((pulpUP.loc[cls.vPulp].mul(1 - cls.pulpYld.iloc[:,0].loc[cls.vPulp], axis=0)).sum(), index = cls.fProd, name = 'vPulpRsd')
vTotalRsd = vFiberRsd + vPulpRsd
rsdlType = cls.rsdlModes.index
rsdlQuantity = pd.DataFrame(0, index = rsdlType, columns = cls.fProd)
for rt in rsdlType:
if cls.rsdlModes.loc[rt,'Input Base'] == 1:
rsdlQuantity.loc[rt,:] = rTotalRsd * cls.rsdlModes.loc[rt,'Intensity']
if cls.rsdlModes.loc[rt,'Input Base'] == 2:
rsdlQuantity.loc[rt,:] = vTotalRsd * cls.rsdlModes.loc[rt,'Intensity']
rsdlMode = cls.rsdlModes.columns[:-2]
rsdlModeVol = {rM: pd.DataFrame(0, index = rsdlType, columns = cls.fProd)
for rM in rsdlMode}
for rM in rsdlMode:
rsdlModeVol[rM] = rsdlQuantity.mul(cls.rsdlModes[rM], axis=0)
rsdlModeVol[rM] = rsdlModeVol[rM].assign(TransCode=cls.rsdlbio.loc[rM,'TransCode'] * np.ones(len(rsdlType)))
rsdlModeVol[rM].replace([np.inf, -np.inf], np.nan, inplace=True) # TODO: what happens to make this inf?
rsdlModeVol[rM].fillna(0)
bioImp = pd.Series(0, index = cls.fProd, name = 'bioImp')
fosImp = pd.Series(0, index = cls.fProd, name = 'fossilImp')
for t in cls.fProd:
bioImp[t] = sum([rsdlModeVol[rM][t].sum() * cls.rsdlbio.loc[rM,t] for rM in rsdlMode])
fosImp[t] = sum([rsdlModeVol[rM][t].sum() * cls.rsdlfos.loc[rM,t] for rM in rsdlMode])
biofosImp = pd.Series(bioImp + fosImp, name = 'bio+fos')
rsdlTrans = pd.Series(0, index = cls.fProd, name = 'rsdlTrans')
for rM in rsdlMode:
rsdlTrans += cls.calculateTrans(rsdlModeVol[rM])
return pd.concat([bioImp, fosImp, biofosImp, rsdlTrans], axis=1)
def getExportTrans(cls,transVol):
transImpact = pd.Series(0, index = transVol.columns[:-1])
tC = transVol['TransCode']
tC = tC[(tC != 0) & (tC != 1)] # index non-zero/non-NaN elements only
transVol = transVol.loc[tC.index]
for n in transVol.columns[:-1]:
for m in cls.transUMI.columns:
transImpact[n] += sum(transVol[n] * cls.transPct.loc[tC,m].values * cls.transKM.loc[tC,m].values * cls.transUMI[m].values)
return transImpact.values
def calculateExport(cls,exportOld,exportNew):
# exportOld [df] old export from US; indexed by rec fiber
# exportNew [df] new export from US; indexed by rec fiber
impChange = pd.Series(0, index = cls.fYield.index, name = 'impChangeByGroup')
sumChange = pd.Series(0, index = cls.fYield.index, name = 'sumNetChange')
for r in impChange.index:
typeMask = cls.fiberType[cls.fiberType['fiberType'] == r].index
# impChange[r] = (exportOld.loc[typeMask, 'exportOld'] - exportNew.loc[typeMask, 'exportNew']).sum()
impChange[r] = (exportNew.loc[typeMask, 'exportNew'] - exportOld.loc[typeMask, 'exportOld']).sum()
sumChange[r] = impChange[r] * (1 - cls.fYield.loc[r,'US'] / cls.fYield.loc[r,'China'])
beta = sumChange.sum() / (cls.chinaCons.loc['totalVir'].values + cls.chinaCons.loc['domesticRec'].values +
cls.chinaCons.loc['importRec-US'].values + cls.chinaCons.loc['importRec-nonUS'].values)
# chinaTrans = cls.getExportTrans(exportOld) - cls.getExportTrans(exportNew)
chinaTrans = cls.getExportTrans(exportNew) - cls.getExportTrans(exportOld)
return cls.chinaVals.loc['Production'] * cls.chinaVals.loc['Energy Intensity'] * cls.chinaVals.loc['Emission Factor'] * beta + chinaTrans
def getForestVirginGHG(cls,virCons,woodint,slope,intercept):
# virCons [df] change in virgin consumption; products as columns
# woodint [df] intervals of virgin wood consumption
# slope [s] b1 value for GHG emissions
# intercept[s] b0 value for GHG emissions
for n in range(1,len(woodint.columns)):
if (woodint[n].values <= virCons) & (virCons < woodint[n+1].values):
return virCons * slope[n] + intercept[n]
return 0 # catch values outside of interval
def calculateForest(cls,virCons,forYear):
# virCons [float] change in virgin consumption, sum of all products
# forYear [int] forest year length for cumulative emissions calcs; 10-90 by ten
deltaTotalGHG = pd.Series(cls.getForestVirginGHG(virCons / 1e6, cls.woodint, cls.wtotalGHGb1[forYear], cls.wtotalGHGb0[forYear]),
name = 'totalGHG') * 1e6
deltabioGHG = pd.Series(cls.getForestVirginGHG(virCons / 1e6, cls.woodint, cls.wbioGHGb1[forYear], cls.wbioGHGb0[forYear]),
name = 'bioGHG') * 1e6
deltafosGHG = pd.Series(cls.getForestVirginGHG(virCons / 1e6, cls.woodint, cls.wfosGHGb1[forYear], cls.wfosGHGb0[forYear]),
name = 'fosGHG') * 1e6
return pd.concat([deltaTotalGHG, deltabioGHG, deltafosGHG], axis=1)
def calculateEmissions(cls):
# xls [df] - name of Excel spreadsheet to pull data from
# fProd [df] - list of products in current scenario
# rL [dict] - recycled content level by product
# f2pYld [df] - fiber to pulp yield by pulp product; indexed by fiber
# pulpYld [df] - pulp to product yield; indexed by pulp
# f2pVolNew [df] - fiber to pulp volume (in Mg); indexed by fiber code
# pbpVolNew [df] - pulp by product volume; indexed by pulp name
# consCollNew [df] - domestic consumption, collection, and recovery by product
pulpNames = cls.rPulp + cls.vPulp
mvO = [cls.pbpVolOld.loc[p] for p in pulpNames if 'Deinked' in p or 'Market' in p]
marketVolOld = pd.concat([mvO[0],mvO[1]], axis=1).T
mvN = [cls.pbpVolNew.loc[p] for p in pulpNames if 'Deinked' in p or 'Market' in p]
marketVolNew = pd.concat([mvN[0],mvN[1]], axis=1).T
# Chemical
chemImp = cls.calculateChem(cls.chemicals, cls.prodDemand)
# EoL
oldEoL = cls.calculateEoL(cls.eolEmissions, cls.consCollOld)
newEoL = cls.calculateEoL(cls.eolEmissions, cls.consCollNew)
# Energy
oldPulpPct = cls.getEnergyPulpPct(cls.pbpVolOld)
newPulpPct = cls.getEnergyPulpPct(cls.pbpVolNew)
oldPYCoeff = cls.getEnergyYldCoeff(cls.f2pVolOld, cls.pbpVolOld)
newPYCoeff = cls.getEnergyYldCoeff(cls.f2pVolNew, cls.pbpVolNew)
oldYldMultiplier = (oldPYCoeff / oldPYCoeff).fillna(0)
newYldMultiplier = (newPYCoeff / oldPYCoeff).fillna(0)
oldMP = cls.getEnergyMultiProd(oldYldMultiplier, oldPulpPct)
newMP = cls.getEnergyMultiProd(newYldMultiplier, newPulpPct)
oldEnergy = cls.calculateEnergy(cls.pbpVolOld, cls.prodLD, oldMP, cls.pwpEI.iloc[:-1], cls.pwpEI.iloc[-1])
newEnergy = cls.calculateEnergy(cls.pbpVolNew, cls.demandNew, newMP, cls.pwpEI.iloc[:-1], cls.pwpEI.iloc[-1])
# Production
oldProd = cls.calculateProduction(oldEnergy)
newProd = cls.calculateProduction(newEnergy)
# Fuel
oldFuel = cls.calculateFuel(oldEnergy)
newFuel = cls.calculateFuel(newEnergy)
# Residual
oldRsdl = cls.calculateResidual(cls.pbpVolOld, cls.f2pVolOld)
newRsdl = cls.calculateResidual(cls.pbpVolNew, cls.f2pVolNew)
# Transportation
oldFiberTrans = pd.Series(cls.calculateTrans(cls.f2pVolOld), name = 'fiberTrans')
oldMarketTrans = pd.Series(cls.calculateTrans(marketVolOld), name = 'marketTrans')
oldTrans = pd.concat([oldFiberTrans, oldMarketTrans, chemImp['chemTrans'], oldFuel['fuelTrans'],
oldRsdl['rsdlTrans'], oldEoL['eolTrans']], axis=1)
newFiberTrans = pd.Series(cls.calculateTrans(cls.f2pVolNew), name = 'fiberTrans')
newMarketTrans = pd.Series(cls.calculateTrans(marketVolNew), name = 'marketTrans')
newTrans = pd.concat([newFiberTrans, newMarketTrans, chemImp['chemTrans'], newFuel['fuelTrans'],
newRsdl['rsdlTrans'], newEoL['eolTrans']], axis=1)
# Export
exportImp = cls.calculateExport(cls.exportOld,cls.exportNew)
# FASOM/LURA
forestGHG = cls.calculateForest(cls.f2pVolNew.iloc[:,:-1].loc[cls.vFiber].sum().sum() -
cls.f2pVolOld.iloc[:,:-1].loc[cls.vFiber].sum().sum(), 90)
# Summary calcs for plotting
oldSums = pd.concat([pd.Series(chemImp['chemImp'], name='chemImp'),
pd.Series(oldFuel['bioFuelImp'], name='fuelbio'),
pd.Series(oldFuel['fesFuelImp'], name='fuelfos'),
pd.Series(oldProd['totalCO2'], name='prodImp'),
pd.Series(oldProd['bioCO2'], name='prodbio'),
pd.Series(oldProd['fesCO2'], name='prodfos'),
pd.Series(oldEnergy['totalEnergy'], name='energy'),
pd.Series(oldEnergy['bioEnergy'], name='energybio'),
pd.Series(oldEnergy['fesEnergy'], name='energyfos'),
pd.Series(oldRsdl['bio+fos'], name='residImp'),
pd.Series(oldRsdl['bioImp'], name='residbio'),
pd.Series(oldRsdl['fossilImp'], name='residfos'),
pd.Series(oldEoL['bftEoL'], name='eolImp'),
pd.Series(oldEoL['bioEoL'], name='eolbio'),
pd.Series(oldEoL['fesTransEoL'], name='eolfos'),
pd.Series(oldProd['bioCO2'] + oldRsdl['bioImp'] + oldEoL['bioEoL'], name='bioCO2'),
pd.Series(oldTrans.sum(axis=1) + chemImp['chemImp'] + oldFuel['fuelImp'] +
oldProd['fesCO2'] + oldRsdl['fossilImp'] + oldEoL['fesTransEoL'], name='fossilCO2'),
pd.Series(oldProd['bioCO2'] + oldRsdl['bioImp'], name='g2gbio'),
pd.Series(oldProd['fesCO2'] + oldRsdl['fossilImp'] + oldTrans.sum(axis=1), name='g2gfos')], axis=1)
oldSums = pd.concat([oldSums, pd.Series(oldSums['bioCO2'] + oldSums['fossilCO2'], name='totalImp')], axis=1)
oldSums = pd.concat([oldSums, pd.Series(oldSums['totalImp'] / cls.prodLD.sum(), name='unitImp')], axis=1, sort=True)
newSums = pd.concat([pd.Series(chemImp['chemImp'], name='chemImp'),
pd.Series(newFuel['bioFuelImp'], name='fuelbio'),
pd.Series(newFuel['fesFuelImp'], name='fuelfos'),
| pd.Series(newProd['totalCO2'], name='prodImp') | pandas.Series |
import re
import pandas
import cobra
from fractions import Fraction
def ReadExcel(excel_file, parse="cobra_string", Print=False):
""" parse = "cobra_string" | "cobra_position"
cobra_string
% INPUT
% fileName xls spreadsheet, with one 'Reaction List' and one 'Metabolite List' tab
%
% 'Reaction List' tab: Required headers (case sensitive):
% 'Abbreviation' HEX1
% 'Description' Hexokinase
% 'Reaction' 1 atp[c] + 1 glc-D[c] --> 1 adp[c] + 1 g6p[c] + 1 h[c]
% 'GPR' (3098.3) or (80201.1) or (2645.3) or ...
% 'Genes' 2645.1,2645.2,2645.3,... (optional)
% 'Proteins' Flj22761.1, Hk1.3, Gck.2,... (optional)
% 'Subsystem' Glycolysis
% 'Reversible' 0 (false) or 1 (true)
% 'Lower bound' 0
% 'Upper bound' 1000
% 'Objective' 0 (optional)
% 'Confidence Score' 0,1,2,3,4
% 'EC Number' 2.7.1.1,2.7.1.2
% 'Notes' 'Reaction also associated with EC 2.7.1.2' (optional)
% 'References' PMID:2043117,PMID:7150652,... (optional)
%
% 'Metabolite List' tab: Required headers (case sensitive): (needs to be complete list of metabolites, i.e., if a metabolite appears in multiple compartments it has to be represented in multiple rows. Abbreviations need to overlap with use in Reaction List
% 'Abbreviation' glc-D or glc-D[c]
% 'Description' D-glucose
% 'Neutral formula' C6H12O6
% 'Charged formula' C6H12O6
% 'Charge' 0
% 'Compartment' cytosol
% 'KEGG ID' C00031
% 'PubChem ID' 5793
% 'ChEBI ID' 4167
% 'InChI string' InChI=1/C6H12O6/c7-1-2-3(8)4(9)5(10)6(11)12-2/h2-11H,1H2/t2-,3-,4+,5-,6?/m1/s1
% 'SMILES' OC[C@H]1OC(O)[C@H](O)[C@@H](O)[C@@H]1O
% 'HMDB ID' HMDB00122
%
% OPTIONAL INPUT (may be required for input on unix macines)
% biomassRxnEquation .xls may have a 255 character limit on each cell,
% so pass the biomass reaction separately if it hits this maximum.
%
% OUTPUT
% model COBRA Toolbox model
cobra_position
% INPUT
% excel_file xls spreadsheet, with one 'reactions' and one 'metabolites' tab
%
% 'reactions' tab: Required headers:
% col 0 Abbreviation HEX1
% col 1 Description Hexokinase
% col 2 Reaction 1 atp[c] + 1 glc-D[c] --> 1 adp[c] + 1 g6p[c] + 1 h[c]
% col 3 GPR b0001
% col 4 Genes b0001 (optional: column can be empty)
% col 5 Proteins AlaS (optional: column can be empty)
% col 6 Subsystem Glycolysis
% col 7 Reversible 0
% col 8 Lower bound 0
% col 9 Upper bound 1000
% col 10 Objective 0 (optional: column can be empty)
% col 11 Confidence Score 0,1,2,3,4
% col 12 EC. Number 1.1.1.1
% col 13 Notes N/A (optional: column can be empty)
% col 14 References PMID: 1111111 (optional: column can be empty)
%
% 'metabolites' tab: Required headers: needs to be complete list of metabolites, i.e., if a metabolite appears in multiple compartments it has to be represented in multiple rows. Abbreviations needs to overlap with use in Reaction List
% col 0 Abbreviation
% col 1 Description
% col 2 Neutral formula
% col 3 Charge formula
% col 4 Charge
% col 5 Compartment
% col 6 KEGG ID
% col 7 PubChem ID
% col 8 ChEBI ID
% col 9 InChI string
% col 10 SMILES
% col 11 HMDB ID
%
%
% OUTPUT
% model cobrapy model """
excel = pandas.ExcelFile(excel_file)
for sheet in excel.sheet_names:
if sheet == "Reaction List" and parse == "cobra_string":
reactions = excel.parse(sheet,index_col=None)
elif 'reaction' in sheet.lower():
reactions = excel.parse(sheet,index_col=None)
if sheet == "Metabolite List" and parse == "cobra_string":
metabolites = excel.parse(sheet,index_col=None)
elif 'metabolite' in sheet.lower():
metabolites = excel.parse(sheet,index_col=None)
cobra_reaction_position = ['Abbreviation','Description','Reaction','GPR','Genes','Proteins','Subsystem','Reversible','Lower bound','Upper bound','Objective','Confidence Score','EC Number','Notes','References']
cobra_metabolite_position = ['Abbreviation','Description','Neutral formula','Charged formula','Charge','Compartment','KEGG ID','PubChem ID','ChEBI ID','InChI string','SMILES','HMDB ID']
if parse == "cobra_position":
if len(reactions.columns) > 15:
reactions = reactions.iloc[:,:15]
reactions.columns = cobra_reaction_position
else:
reactions.columns = cobra_reaction_position[:len(reactions.columns)]
if len(metabolites.columns) > 12:
metabolites = metabolites.iloc[:,:12]
metabolites.columns = cobra_metabolite_position
else:
metabolites.columns = cobra_metabolite_position[:len(metabolites.columns)]
model = cobra.Model()
metabolite_dic = {}
element_re = re.compile("([A-Z][a-z]?)([0-9.]+[0-9.]?|(?=[A-Z])?)")
for met in metabolites.index:
met_row = metabolites.loc[met] # pandas.Series of the metabolites
met_id = str(met_row['Abbreviation'])
#met_name = str(met_row[1]) if pandas.notnull(met_row[1]) else None
met_name = str(met_row['Description']) if ('Description' in met_row.index) and pandas.notnull(met_row['Description']) else None
if ('Charged formula' in met_row.index) and pandas.notnull(met_row['Charged formula']):
met_formula = str(met_row['Charged formula'])
elif ('Neutral formula' in met_row.index) and pandas.notnull(met_row['Neutral formula']):
if ('Charge' in met_row.index) and pandas.notnull(met_row['Charge']):
met_formula = ''
tmp_formula = str(met_row['Neutral formula'])
tmp_formula = tmp_formula.replace("*", "")
parsed = element_re.findall(tmp_formula)
for (element, count) in parsed:
if element != "H":
met_formula += element + str(count)
else:
if count == '':
count = 1
count = float(count)
if count.is_integer():
count = int(count)
charge = float(met_row['Charge'])
if charge.is_integer():
charge = int(charge)
count += charge
if count == 1:
met_formula += element
elif count != 0:
met_formula += element + str(count)
else:
met_formula = None
else:
met_formula = None
met_compartment = str(met_row['Compartment']) if 'Compartment' in met_row.index and pandas.notnull(met_row['Compartment']) else None
metabolite = cobra.Metabolite(met_id, formula=met_formula, name=met_name, compartment=met_compartment)
if ('Charge' in met_row.index) and pandas.notnull(met_row['Charge']):
metabolite.charge = float(met_row['Charge'])
if metabolite.charge.is_integer():
metabolite.charge = int(metabolite.charge)
if ('KEGG ID' in met_row.index) and pandas.notnull(met_row['KEGG ID']): metabolite.kegg_id = met_row['KEGG ID']
if 'PubChem ID' in met_row.index and pandas.notnull(met_row['PubChem ID']): metabolite.pubchem_id = str(met_row['PubChem ID'])
if 'ChEBI ID' in met_row.index and pandas.notnull(met_row['ChEBI ID']): metabolite.chebi_id = str(met_row['ChEBI ID'])
if 'InChI string' in met_row.index and pandas.notnull(met_row['InChI string']): metabolite.inchi_id = str(met_row['InChI string'])
if 'SMILES' in met_row.index and pandas.notnull(met_row['SMILES']): metabolite.smiles = str(met_row['SMILES'])
if 'HMDB ID' in met_row.index and pandas.notnull(met_row['HMDB ID']): metabolite.hmdb_id = str(met_row['HMDB ID'])
metabolite_dic[met_id] = metabolite
if Print:
print(metabolite.id)
for reac in reactions.index:
reac_row = reactions.loc[reac]
reaction = cobra.Reaction(str(reac_row['Abbreviation']))
stoichiometry_list = []
metabolite_list = []
new_metabolite = True
product = False
compartment = ''
reaction_string = reac_row['Reaction'].split(' ')
for s in reaction_string:
if s.startswith('['):
if s.endswith(']:'):
compartment = s[:-1]
else:
compartment = s
elif s == ":" or s == "":
pass
elif s == "+":
new_metabolite = True
elif s == "->" or s == "-->" or s == "=>" or s == u'\u2192':
direction = 'LEFT-TO-RIGHT'
product = True
new_metabolite = True
elif s == "<>" or s =="<=>" or s == "<==>" or s == "<->" or s == u'\u2194':
direction = 'REVERSIBLE'
product = True
new_metabolite = True
elif s == "<-" or s == "<--" or s == "<=" or s == u'\u2190':
direction = 'RIGHT-TO-LEFT'
product = True
new_metabolite = True
else:
try:
if s[0] == "(" and s[-1] == ")":
s = s[1:-1]
s = Fraction(s)
s = float(s)
if not product:
s = -s
stoichiometry_list.append(s)
new_metabolite = False
except ValueError:
metabolite_list.append(s + compartment)
if new_metabolite:
if not product:
stoichiometry_list.append(-1)
else:
stoichiometry_list.append(1)
new_metabolite = True
stoi_dic = {}
for b in range(len(metabolite_list)):
met_id = str(metabolite_list[b])
try:
met = metabolite_dic[met_id]
except KeyError: # try case-insensitive
met_key = []
for key in metabolite_dic:
if key.lower() == met_id.lower():
met_key.append(key)
if len(met_key) == 1:
met = metabolite_dic[met_key[0]]
else:
print(met_id + " added to metabolite list in " + str(reac_row['Abbreviation']) + " in row " + str(reac+2))
additional_metabolite = cobra.Metabolite(met_id)
met = metabolite_dic[met_id] = additional_metabolite
stoi_dic[met] = stoichiometry_list[b]
reaction.add_metabolites(stoi_dic)
#
reaction.lower_bound = float('-inf')
reaction.upper_bound = float('inf')
if direction == 'LEFT-TO-RIGHT':
reaction.lower_bound = 0.0
elif direction == 'RIGHT-TO-LEFT':
reaction.upper_bound = 0.0
if ('Description' in reac_row.index) and pandas.notnull(reac_row['Description']): reaction.name = str(reac_row['Description'])
if ('GPR' in reac_row.index) and pandas.notnull(reac_row['GPR']): reaction.gene_reaction_rule = str(reac_row['GPR'])
if ('Proteins' in reac_row.index) and pandas.notnull(reac_row['Proteins']): reaction.proteins = str(reac_row['Proteins'])
if ('Subsystem' in reac_row.index) and pandas.notnull(reac_row['Subsystem']): reaction.subsystem = str(reac_row['Subsystem'])
if ('Lower bound' in reac_row.index) and | pandas.notnull(reac_row['Lower bound']) | pandas.notnull |
from os import path, mkdir
import feedparser
import pandas as pd
import datetime
filename = "last.txt"
date = datetime.datetime.now().strftime("%Y-%m-%d")
project_url = "https://github.com/bwilliams18/risky-or-not"
def format_perc(fl):
return f"{int(round(fl * 100,0))}%"
if __name__ == "__main__":
RiskyOrNot = feedparser.parse("https://riskyornot.libsyn.com/rss")
if path.exists(filename):
with open(filename, "r") as f:
last_analyzed = f.read()
else:
last_analyzed = None
episodes = list()
for entry in RiskyOrNot.entries:
r = entry["summary"].find("☣️")
nr = entry["summary"].find("👍🏼")
if r == -1 and nr == -1:
continue
elif r == -1:
don = "👍🏼"
ben = "👍🏼"
elif nr == -1:
don = "☣️"
ben = "☣️"
elif nr < r:
don = "👍🏼"
ben = "☣️"
elif r < nr:
ben = "👍🏼"
don = "☣️"
else:
continue
episodes.append(
{
"title": entry["title"],
"id": entry["id"],
"link": entry["link"],
"dr. don": don,
"prof. ben": ben,
}
)
df = | pd.DataFrame(episodes) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# This is a test file intended to be used with pytest
# pytest automatically runs all the function starting with "test_"
# see https://docs.pytest.org for more information
import os
import pytest
import pandas as pd
from nlp.spacy_tokenizer import MultilingualTokenizer
def test_tokenize_df_english():
input_df = pd.DataFrame({"input_text": ["I hope nothing. I fear nothing. I am free. 💩 😂 #OMG"]})
tokenizer = MultilingualTokenizer()
output_df = tokenizer.tokenize_df(df=input_df, text_column="input_text", language="en")
tokenized_document = output_df[tokenizer.tokenized_column][0]
assert len(tokenized_document) == 15
def test_tokenize_df_japanese():
input_df = pd.DataFrame({"input_text": ["期一会。 異体同心。 そうです。"]})
tokenizer = MultilingualTokenizer()
output_df = tokenizer.tokenize_df(df=input_df, text_column="input_text", language="ja")
tokenized_document = output_df[tokenizer.tokenized_column][0]
assert len(tokenized_document) == 9
def test_tokenize_df_multilingual():
input_df = pd.DataFrame(
{
"input_text": [
"I hope nothing. I fear nothing. I am free.",
" Les sanglots longs des violons d'automne",
"子曰:“學而不思則罔,思而不學則殆。”",
"期一会。 異体同心。 そうです。",
],
"language": ["en", "fr", "zh", "ja"],
}
)
tokenizer = MultilingualTokenizer()
output_df = tokenizer.tokenize_df(df=input_df, text_column="input_text", language_column="language")
tokenized_documents = output_df[tokenizer.tokenized_column]
tokenized_documents_length = [len(doc) for doc in tokenized_documents]
assert tokenized_documents_length == [12, 8, 19, 9]
def test_tokenize_df_long_text():
input_df = pd.DataFrame({"input_text": ["Long text"]})
tokenizer = MultilingualTokenizer(max_num_characters=1)
with pytest.raises(ValueError):
tokenizer.tokenize_df(df=input_df, text_column="input_text", language="en")
def test_create_spacy_tokenizer_no_model():
input_df = pd.DataFrame({"input_text": ["I hope nothing. I fear nothing. I am free. 💩 😂 #OMG"]})
tokenizer = MultilingualTokenizer(add_pipe_components=["sentencizer"],enable_pipe_components=["sentencizer"],use_models=False)
nlp = tokenizer._create_spacy_tokenizer("en")
assert nlp.pipe_names == ["sentencizer"]
def test_create_spacy_tokenizer_model():
input_df = | pd.DataFrame({"input_text": ["I hope nothing. I fear nothing. I am free. 💩 😂 #OMG"]}) | pandas.DataFrame |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
from numpy import nan
import numpy as np
import pandas as pd
from pandas.types.common import is_integer, is_scalar
from pandas import Index, Series, DataFrame, isnull, date_range
from pandas.core.index import MultiIndex
from pandas.core.indexing import IndexingError
from pandas.tseries.index import Timestamp
from pandas.tseries.offsets import BDay
from pandas.tseries.tdi import Timedelta
from pandas.compat import lrange, range
from pandas import compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from pandas.tests.series.common import TestData
JOIN_TYPES = ['inner', 'outer', 'left', 'right']
class TestSeriesIndexing(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_get(self):
# GH 6383
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56, 45,
51, 39, 55, 43, 54, 52, 51, 54]))
result = s.get(25, 0)
expected = 0
self.assertEqual(result, expected)
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56,
45, 51, 39, 55, 43, 54, 52, 51, 54]),
index=pd.Float64Index(
[25.0, 36.0, 49.0, 64.0, 81.0, 100.0,
121.0, 144.0, 169.0, 196.0, 1225.0,
1296.0, 1369.0, 1444.0, 1521.0, 1600.0,
1681.0, 1764.0, 1849.0, 1936.0],
dtype='object'))
result = s.get(25, 0)
expected = 43
self.assertEqual(result, expected)
# GH 7407
# with a boolean accessor
df = pd.DataFrame({'i': [0] * 3, 'b': [False] * 3})
vc = df.i.value_counts()
result = vc.get(99, default='Missing')
self.assertEqual(result, 'Missing')
vc = df.b.value_counts()
result = vc.get(False, default='Missing')
self.assertEqual(result, 3)
result = vc.get(True, default='Missing')
self.assertEqual(result, 'Missing')
def test_delitem(self):
# GH 5542
# should delete the item inplace
s = Series(lrange(5))
del s[0]
expected = Series(lrange(1, 5), index=lrange(1, 5))
assert_series_equal(s, expected)
del s[1]
expected = Series(lrange(2, 5), index=lrange(2, 5))
assert_series_equal(s, expected)
# empty
s = Series()
def f():
del s[0]
self.assertRaises(KeyError, f)
# only 1 left, del, add, del
s = Series(1)
del s[0]
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='int64')))
s[0] = 1
assert_series_equal(s, Series(1))
del s[0]
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='int64')))
# Index(dtype=object)
s = Series(1, index=['a'])
del s['a']
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='object')))
s['a'] = 1
assert_series_equal(s, Series(1, index=['a']))
del s['a']
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='object')))
def test_getitem_setitem_ellipsis(self):
s = Series(np.random.randn(10))
np.fix(s)
result = s[...]
assert_series_equal(result, s)
s[...] = 5
self.assertTrue((result == 5).all())
def test_getitem_negative_out_of_bounds(self):
s = Series(tm.rands_array(5, 10), index=tm.rands_array(10, 10))
self.assertRaises(IndexError, s.__getitem__, -11)
self.assertRaises(IndexError, s.__setitem__, -11, 'foo')
def test_pop(self):
# GH 6600
df = DataFrame({'A': 0, 'B': np.arange(5, dtype='int64'), 'C': 0, })
k = df.iloc[4]
result = k.pop('B')
self.assertEqual(result, 4)
expected = Series([0, 0], index=['A', 'C'], name=4)
assert_series_equal(k, expected)
def test_getitem_get(self):
idx1 = self.series.index[5]
idx2 = self.objSeries.index[5]
self.assertEqual(self.series[idx1], self.series.get(idx1))
self.assertEqual(self.objSeries[idx2], self.objSeries.get(idx2))
self.assertEqual(self.series[idx1], self.series[5])
self.assertEqual(self.objSeries[idx2], self.objSeries[5])
self.assertEqual(
self.series.get(-1), self.series.get(self.series.index[-1]))
self.assertEqual(self.series[5], self.series.get(self.series.index[5]))
# missing
d = self.ts.index[0] - BDay()
self.assertRaises(KeyError, self.ts.__getitem__, d)
# None
# GH 5652
for s in [Series(), Series(index=list('abc'))]:
result = s.get(None)
self.assertIsNone(result)
def test_iget(self):
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.iget(1)
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.irow(1)
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.iget_value(1)
for i in range(len(s)):
result = s.iloc[i]
exp = s[s.index[i]]
assert_almost_equal(result, exp)
# pass a slice
result = s.iloc[slice(1, 3)]
expected = s.ix[2:4]
assert_series_equal(result, expected)
# test slice is a view
result[:] = 0
self.assertTrue((s[1:3] == 0).all())
# list of integers
result = s.iloc[[0, 2, 3, 4, 5]]
expected = s.reindex(s.index[[0, 2, 3, 4, 5]])
assert_series_equal(result, expected)
def test_iget_nonunique(self):
s = Series([0, 1, 2], index=[0, 1, 0])
self.assertEqual(s.iloc[2], 2)
def test_getitem_regression(self):
s = Series(lrange(5), index=lrange(5))
result = s[lrange(5)]
assert_series_equal(result, s)
def test_getitem_setitem_slice_bug(self):
s = Series(lrange(10), lrange(10))
result = s[-12:]
assert_series_equal(result, s)
result = s[-7:]
assert_series_equal(result, s[3:])
result = s[:-12]
assert_series_equal(result, s[:0])
s = Series(lrange(10), lrange(10))
s[-12:] = 0
self.assertTrue((s == 0).all())
s[:-12] = 5
self.assertTrue((s == 0).all())
def test_getitem_int64(self):
idx = np.int64(5)
self.assertEqual(self.ts[idx], self.ts[5])
def test_getitem_fancy(self):
slice1 = self.series[[1, 2, 3]]
slice2 = self.objSeries[[1, 2, 3]]
self.assertEqual(self.series.index[2], slice1.index[1])
self.assertEqual(self.objSeries.index[2], slice2.index[1])
self.assertEqual(self.series[2], slice1[1])
self.assertEqual(self.objSeries[2], slice2[1])
def test_getitem_boolean(self):
s = self.series
mask = s > s.median()
# passing list is OK
result = s[list(mask)]
expected = s[mask]
assert_series_equal(result, expected)
self.assert_index_equal(result.index, s.index[mask])
def test_getitem_boolean_empty(self):
s = Series([], dtype=np.int64)
s.index.name = 'index_name'
s = s[s.isnull()]
self.assertEqual(s.index.name, 'index_name')
self.assertEqual(s.dtype, np.int64)
# GH5877
# indexing with empty series
s = Series(['A', 'B'])
expected = Series(np.nan, index=['C'], dtype=object)
result = s[Series(['C'], dtype=object)]
assert_series_equal(result, expected)
s = Series(['A', 'B'])
expected = Series(dtype=object, index=Index([], dtype='int64'))
result = s[Series([], dtype=object)]
assert_series_equal(result, expected)
# invalid because of the boolean indexer
# that's empty or not-aligned
def f():
s[Series([], dtype=bool)]
self.assertRaises(IndexingError, f)
def f():
s[Series([True], dtype=bool)]
self.assertRaises(IndexingError, f)
def test_getitem_generator(self):
gen = (x > 0 for x in self.series)
result = self.series[gen]
result2 = self.series[iter(self.series > 0)]
expected = self.series[self.series > 0]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
def test_type_promotion(self):
# GH12599
s = pd.Series()
s["a"] = pd.Timestamp("2016-01-01")
s["b"] = 3.0
s["c"] = "foo"
expected = Series([pd.Timestamp("2016-01-01"), 3.0, "foo"],
index=["a", "b", "c"])
assert_series_equal(s, expected)
def test_getitem_boolean_object(self):
# using column from DataFrame
s = self.series
mask = s > s.median()
omask = mask.astype(object)
# getitem
result = s[omask]
expected = s[mask]
assert_series_equal(result, expected)
# setitem
s2 = s.copy()
cop = s.copy()
cop[omask] = 5
s2[mask] = 5
assert_series_equal(cop, s2)
# nans raise exception
omask[5:10] = np.nan
self.assertRaises(Exception, s.__getitem__, omask)
self.assertRaises(Exception, s.__setitem__, omask, 5)
def test_getitem_setitem_boolean_corner(self):
ts = self.ts
mask_shifted = ts.shift(1, freq=BDay()) > ts.median()
# these used to raise...??
self.assertRaises(Exception, ts.__getitem__, mask_shifted)
self.assertRaises(Exception, ts.__setitem__, mask_shifted, 1)
# ts[mask_shifted]
# ts[mask_shifted] = 1
self.assertRaises(Exception, ts.ix.__getitem__, mask_shifted)
self.assertRaises(Exception, ts.ix.__setitem__, mask_shifted, 1)
# ts.ix[mask_shifted]
# ts.ix[mask_shifted] = 2
def test_getitem_setitem_slice_integers(self):
s = Series(np.random.randn(8), index=[2, 4, 6, 8, 10, 12, 14, 16])
result = s[:4]
expected = s.reindex([2, 4, 6, 8])
assert_series_equal(result, expected)
s[:4] = 0
self.assertTrue((s[:4] == 0).all())
self.assertTrue(not (s[4:] == 0).any())
def test_getitem_out_of_bounds(self):
# don't segfault, GH #495
self.assertRaises(IndexError, self.ts.__getitem__, len(self.ts))
# GH #917
s = Series([])
self.assertRaises(IndexError, s.__getitem__, -1)
def test_getitem_setitem_integers(self):
# caused bug without test
s = Series([1, 2, 3], ['a', 'b', 'c'])
self.assertEqual(s.ix[0], s['a'])
s.ix[0] = 5
self.assertAlmostEqual(s['a'], 5)
def test_getitem_box_float64(self):
value = self.ts[5]
tm.assertIsInstance(value, np.float64)
def test_getitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
self.assertRaises(KeyError, s.__getitem__, 1)
self.assertRaises(KeyError, s.ix.__getitem__, 1)
def test_getitem_unordered_dup(self):
obj = Series(lrange(5), index=['c', 'a', 'a', 'b', 'b'])
self.assertTrue(is_scalar(obj['c']))
self.assertEqual(obj['c'], 0)
def test_getitem_dups_with_missing(self):
# breaks reindex, so need to use .ix internally
# GH 4246
s = Series([1, 2, 3, 4], ['foo', 'bar', 'foo', 'bah'])
expected = s.ix[['foo', 'bar', 'bah', 'bam']]
result = s[['foo', 'bar', 'bah', 'bam']]
assert_series_equal(result, expected)
def test_getitem_dups(self):
s = Series(range(5), index=['A', 'A', 'B', 'C', 'C'], dtype=np.int64)
expected = Series([3, 4], index=['C', 'C'], dtype=np.int64)
result = s['C']
assert_series_equal(result, expected)
def test_getitem_dataframe(self):
rng = list(range(10))
s = pd.Series(10, index=rng)
df = pd.DataFrame(rng, index=rng)
self.assertRaises(TypeError, s.__getitem__, df > 5)
def test_getitem_callable(self):
# GH 12533
s = pd.Series(4, index=list('ABCD'))
result = s[lambda x: 'A']
self.assertEqual(result, s.loc['A'])
result = s[lambda x: ['A', 'B']]
tm.assert_series_equal(result, s.loc[['A', 'B']])
result = s[lambda x: [True, False, True, True]]
tm.assert_series_equal(result, s.iloc[[0, 2, 3]])
def test_setitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
# equivalent of an append
s2 = s.copy()
s2[1] = 5
expected = s.append(Series([5], index=[1]))
assert_series_equal(s2, expected)
s2 = s.copy()
s2.ix[1] = 5
expected = s.append(Series([5], index=[1]))
assert_series_equal(s2, expected)
def test_setitem_float_labels(self):
# note labels are floats
s = Series(['a', 'b', 'c'], index=[0, 0.5, 1])
tmp = s.copy()
s.ix[1] = 'zoo'
tmp.iloc[2] = 'zoo'
assert_series_equal(s, tmp)
def test_setitem_callable(self):
# GH 12533
s = pd.Series([1, 2, 3, 4], index=list('ABCD'))
s[lambda x: 'A'] = -1
tm.assert_series_equal(s, pd.Series([-1, 2, 3, 4], index=list('ABCD')))
def test_setitem_other_callable(self):
# GH 13299
inc = lambda x: x + 1
s = pd.Series([1, 2, -1, 4])
s[s < 0] = inc
expected = pd.Series([1, 2, inc, 4])
tm.assert_series_equal(s, expected)
def test_slice(self):
numSlice = self.series[10:20]
numSliceEnd = self.series[-10:]
objSlice = self.objSeries[10:20]
self.assertNotIn(self.series.index[9], numSlice.index)
self.assertNotIn(self.objSeries.index[9], objSlice.index)
self.assertEqual(len(numSlice), len(numSlice.index))
self.assertEqual(self.series[numSlice.index[0]],
numSlice[numSlice.index[0]])
self.assertEqual(numSlice.index[1], self.series.index[11])
self.assertTrue(tm.equalContents(numSliceEnd, np.array(self.series)[
-10:]))
# test return view
sl = self.series[10:20]
sl[:] = 0
self.assertTrue((self.series[10:20] == 0).all())
def test_slice_can_reorder_not_uniquely_indexed(self):
s = Series(1, index=['a', 'a', 'b', 'b', 'c'])
s[::-1] # it works!
def test_slice_float_get_set(self):
self.assertRaises(TypeError, lambda: self.ts[4.0:10.0])
def f():
self.ts[4.0:10.0] = 0
self.assertRaises(TypeError, f)
self.assertRaises(TypeError, self.ts.__getitem__, slice(4.5, 10.0))
self.assertRaises(TypeError, self.ts.__setitem__, slice(4.5, 10.0), 0)
def test_slice_floats2(self):
s = Series(np.random.rand(10), index=np.arange(10, 20, dtype=float))
self.assertEqual(len(s.ix[12.0:]), 8)
self.assertEqual(len(s.ix[12.5:]), 7)
i = np.arange(10, 20, dtype=float)
i[2] = 12.2
s.index = i
self.assertEqual(len(s.ix[12.0:]), 8)
self.assertEqual(len(s.ix[12.5:]), 7)
def test_slice_float64(self):
values = np.arange(10., 50., 2)
index = Index(values)
start, end = values[[5, 15]]
s = Series(np.random.randn(20), index=index)
result = s[start:end]
expected = s.iloc[5:16]
assert_series_equal(result, expected)
result = s.loc[start:end]
assert_series_equal(result, expected)
df = DataFrame(np.random.randn(20, 3), index=index)
result = df[start:end]
expected = df.iloc[5:16]
tm.assert_frame_equal(result, expected)
result = df.loc[start:end]
tm.assert_frame_equal(result, expected)
def test_setitem(self):
self.ts[self.ts.index[5]] = np.NaN
self.ts[[1, 2, 17]] = np.NaN
self.ts[6] = np.NaN
self.assertTrue(np.isnan(self.ts[6]))
self.assertTrue(np.isnan(self.ts[2]))
self.ts[np.isnan(self.ts)] = 5
self.assertFalse(np.isnan(self.ts[2]))
# caught this bug when writing tests
series = Series(tm.makeIntIndex(20).astype(float),
index=tm.makeIntIndex(20))
series[::2] = 0
self.assertTrue((series[::2] == 0).all())
# set item that's not contained
s = self.series.copy()
s['foobar'] = 1
app = Series([1], index=['foobar'], name='series')
expected = self.series.append(app)
assert_series_equal(s, expected)
# Test for issue #10193
key = pd.Timestamp('2012-01-01')
series = pd.Series()
series[key] = 47
expected = pd.Series(47, [key])
assert_series_equal(series, expected)
series = pd.Series([], pd.DatetimeIndex([], freq='D'))
series[key] = 47
expected = pd.Series(47, pd.DatetimeIndex([key], freq='D'))
assert_series_equal(series, expected)
def test_setitem_dtypes(self):
# change dtypes
# GH 4463
expected = Series([np.nan, 2, 3])
s = Series([1, 2, 3])
s.iloc[0] = np.nan
assert_series_equal(s, expected)
s = Series([1, 2, 3])
s.loc[0] = np.nan
assert_series_equal(s, expected)
s = Series([1, 2, 3])
s[0] = np.nan
assert_series_equal(s, expected)
s = Series([False])
s.loc[0] = np.nan
assert_series_equal(s, Series([np.nan]))
s = Series([False, True])
s.loc[0] = np.nan
assert_series_equal(s, Series([np.nan, 1.0]))
def test_set_value(self):
idx = self.ts.index[10]
res = self.ts.set_value(idx, 0)
self.assertIs(res, self.ts)
self.assertEqual(self.ts[idx], 0)
# equiv
s = self.series.copy()
res = s.set_value('foobar', 0)
self.assertIs(res, s)
self.assertEqual(res.index[-1], 'foobar')
self.assertEqual(res['foobar'], 0)
s = self.series.copy()
s.loc['foobar'] = 0
self.assertEqual(s.index[-1], 'foobar')
self.assertEqual(s['foobar'], 0)
def test_setslice(self):
sl = self.ts[5:20]
self.assertEqual(len(sl), len(sl.index))
self.assertTrue(sl.index.is_unique)
def test_basic_getitem_setitem_corner(self):
# invalid tuples, e.g. self.ts[:, None] vs. self.ts[:, 2]
with tm.assertRaisesRegexp(ValueError, 'tuple-index'):
self.ts[:, 2]
with tm.assertRaisesRegexp(ValueError, 'tuple-index'):
self.ts[:, 2] = 2
# weird lists. [slice(0, 5)] will work but not two slices
result = self.ts[[slice(None, 5)]]
expected = self.ts[:5]
assert_series_equal(result, expected)
# OK
self.assertRaises(Exception, self.ts.__getitem__,
[5, slice(None, None)])
self.assertRaises(Exception, self.ts.__setitem__,
[5, slice(None, None)], 2)
def test_basic_getitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
result = self.ts[indices]
expected = self.ts.reindex(indices)
assert_series_equal(result, expected)
result = self.ts[indices[0]:indices[2]]
expected = self.ts.ix[indices[0]:indices[2]]
assert_series_equal(result, expected)
# integer indexes, be careful
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
inds = [0, 2, 5, 7, 8]
arr_inds = np.array([0, 2, 5, 7, 8])
result = s[inds]
expected = s.reindex(inds)
assert_series_equal(result, expected)
result = s[arr_inds]
expected = s.reindex(arr_inds)
assert_series_equal(result, expected)
# GH12089
# with tz for values
s = Series(pd.date_range("2011-01-01", periods=3, tz="US/Eastern"),
index=['a', 'b', 'c'])
expected = Timestamp('2011-01-01', tz='US/Eastern')
result = s.loc['a']
self.assertEqual(result, expected)
result = s.iloc[0]
self.assertEqual(result, expected)
result = s['a']
self.assertEqual(result, expected)
def test_basic_setitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices] = 0
exp.ix[indices] = 0
assert_series_equal(cp, exp)
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices[0]:indices[2]] = 0
exp.ix[indices[0]:indices[2]] = 0
assert_series_equal(cp, exp)
# integer indexes, be careful
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
inds = [0, 4, 6]
arr_inds = np.array([0, 4, 6])
cp = s.copy()
exp = s.copy()
s[inds] = 0
s.ix[inds] = 0
assert_series_equal(cp, exp)
cp = s.copy()
exp = s.copy()
s[arr_inds] = 0
s.ix[arr_inds] = 0
assert_series_equal(cp, exp)
inds_notfound = [0, 4, 5, 6]
arr_inds_notfound = np.array([0, 4, 5, 6])
self.assertRaises(Exception, s.__setitem__, inds_notfound, 0)
self.assertRaises(Exception, s.__setitem__, arr_inds_notfound, 0)
# GH12089
# with tz for values
s = Series(pd.date_range("2011-01-01", periods=3, tz="US/Eastern"),
index=['a', 'b', 'c'])
s2 = s.copy()
expected = Timestamp('2011-01-03', tz='US/Eastern')
s2.loc['a'] = expected
result = s2.loc['a']
self.assertEqual(result, expected)
s2 = s.copy()
s2.iloc[0] = expected
result = s2.iloc[0]
self.assertEqual(result, expected)
s2 = s.copy()
s2['a'] = expected
result = s2['a']
self.assertEqual(result, expected)
def test_ix_getitem(self):
inds = self.series.index[[3, 4, 7]]
assert_series_equal(self.series.ix[inds], self.series.reindex(inds))
assert_series_equal(self.series.ix[5::2], self.series[5::2])
# slice with indices
d1, d2 = self.ts.index[[5, 15]]
result = self.ts.ix[d1:d2]
expected = self.ts.truncate(d1, d2)
assert_series_equal(result, expected)
# boolean
mask = self.series > self.series.median()
assert_series_equal(self.series.ix[mask], self.series[mask])
# ask for index value
self.assertEqual(self.ts.ix[d1], self.ts[d1])
self.assertEqual(self.ts.ix[d2], self.ts[d2])
def test_ix_getitem_not_monotonic(self):
d1, d2 = self.ts.index[[5, 15]]
ts2 = self.ts[::2][[1, 2, 0]]
self.assertRaises(KeyError, ts2.ix.__getitem__, slice(d1, d2))
self.assertRaises(KeyError, ts2.ix.__setitem__, slice(d1, d2), 0)
def test_ix_getitem_setitem_integer_slice_keyerrors(self):
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
# this is OK
cp = s.copy()
cp.ix[4:10] = 0
self.assertTrue((cp.ix[4:10] == 0).all())
# so is this
cp = s.copy()
cp.ix[3:11] = 0
self.assertTrue((cp.ix[3:11] == 0).values.all())
result = s.ix[4:10]
result2 = s.ix[3:11]
expected = s.reindex([4, 6, 8, 10])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# non-monotonic, raise KeyError
s2 = s.iloc[lrange(5) + lrange(5, 10)[::-1]]
self.assertRaises(KeyError, s2.ix.__getitem__, slice(3, 11))
self.assertRaises(KeyError, s2.ix.__setitem__, slice(3, 11), 0)
def test_ix_getitem_iterator(self):
idx = iter(self.series.index[:10])
result = self.series.ix[idx]
assert_series_equal(result, self.series[:10])
def test_setitem_with_tz(self):
for tz in ['US/Eastern', 'UTC', 'Asia/Tokyo']:
orig = pd.Series(pd.date_range('2016-01-01', freq='H', periods=3,
tz=tz))
self.assertEqual(orig.dtype, 'datetime64[ns, {0}]'.format(tz))
# scalar
s = orig.copy()
s[1] = pd.Timestamp('2011-01-01', tz=tz)
exp = pd.Series([pd.Timestamp('2016-01-01 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2016-01-01 02:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
# vector
vals = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz)], index=[1, 2])
self.assertEqual(vals.dtype, 'datetime64[ns, {0}]'.format(tz))
s[[1, 2]] = vals
exp = pd.Series([pd.Timestamp('2016-01-01 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2012-01-01 00:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
def test_setitem_with_tz_dst(self):
# GH XXX
tz = 'US/Eastern'
orig = pd.Series(pd.date_range('2016-11-06', freq='H', periods=3,
tz=tz))
self.assertEqual(orig.dtype, 'datetime64[ns, {0}]'.format(tz))
# scalar
s = orig.copy()
s[1] = pd.Timestamp('2011-01-01', tz=tz)
exp = pd.Series([pd.Timestamp('2016-11-06 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2016-11-06 02:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
# vector
vals = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz)], index=[1, 2])
self.assertEqual(vals.dtype, 'datetime64[ns, {0}]'.format(tz))
s[[1, 2]] = vals
exp = pd.Series([pd.Timestamp('2016-11-06 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2012-01-01 00:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
def test_where(self):
s = Series(np.random.randn(5))
cond = s > 0
rs = s.where(cond).dropna()
rs2 = s[cond]
assert_series_equal(rs, rs2)
rs = s.where(cond, -s)
assert_series_equal(rs, s.abs())
rs = s.where(cond)
assert (s.shape == rs.shape)
assert (rs is not s)
# test alignment
cond = Series([True, False, False, True, False], index=s.index)
s2 = -(s.abs())
expected = s2[cond].reindex(s2.index[:3]).reindex(s2.index)
rs = s2.where(cond[:3])
assert_series_equal(rs, expected)
expected = s2.abs()
expected.ix[0] = s2[0]
rs = s2.where(cond[:3], -s2)
assert_series_equal(rs, expected)
self.assertRaises(ValueError, s.where, 1)
self.assertRaises(ValueError, s.where, cond[:3].values, -s)
# GH 2745
s = Series([1, 2])
s[[True, False]] = [0, 1]
expected = Series([0, 2])
assert_series_equal(s, expected)
# failures
self.assertRaises(ValueError, s.__setitem__, tuple([[[True, False]]]),
[0, 2, 3])
self.assertRaises(ValueError, s.__setitem__, tuple([[[True, False]]]),
[])
# unsafe dtype changes
for dtype in [np.int8, np.int16, np.int32, np.int64, np.float16,
np.float32, np.float64]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
s[mask] = lrange(2, 7)
expected = Series(lrange(2, 7) + lrange(5, 10), dtype=dtype)
assert_series_equal(s, expected)
self.assertEqual(s.dtype, expected.dtype)
# these are allowed operations, but are upcasted
for dtype in [np.int64, np.float64]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
values = [2.5, 3.5, 4.5, 5.5, 6.5]
s[mask] = values
expected = Series(values + lrange(5, 10), dtype='float64')
assert_series_equal(s, expected)
self.assertEqual(s.dtype, expected.dtype)
# GH 9731
s = Series(np.arange(10), dtype='int64')
mask = s > 5
values = [2.5, 3.5, 4.5, 5.5]
s[mask] = values
expected = Series(lrange(6) + values, dtype='float64')
assert_series_equal(s, expected)
# can't do these as we are forced to change the itemsize of the input
# to something we cannot
for dtype in [np.int8, np.int16, np.int32, np.float16, np.float32]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
values = [2.5, 3.5, 4.5, 5.5, 6.5]
self.assertRaises(Exception, s.__setitem__, tuple(mask), values)
# GH3235
s = Series(np.arange(10), dtype='int64')
mask = s < 5
s[mask] = lrange(2, 7)
expected = Series(lrange(2, 7) + lrange(5, 10), dtype='int64')
assert_series_equal(s, expected)
self.assertEqual(s.dtype, expected.dtype)
s = Series(np.arange(10), dtype='int64')
mask = s > 5
s[mask] = [0] * 4
expected = Series([0, 1, 2, 3, 4, 5] + [0] * 4, dtype='int64')
assert_series_equal(s, expected)
s = Series(np.arange(10))
mask = s > 5
def f():
s[mask] = [5, 4, 3, 2, 1]
self.assertRaises(ValueError, f)
def f():
s[mask] = [0] * 5
self.assertRaises(ValueError, f)
# dtype changes
s = Series([1, 2, 3, 4])
result = s.where(s > 2, np.nan)
expected = Series([np.nan, np.nan, 3, 4])
assert_series_equal(result, expected)
# GH 4667
# setting with None changes dtype
s = Series(range(10)).astype(float)
s[8] = None
result = s[8]
self.assertTrue(isnull(result))
s = Series(range(10)).astype(float)
s[s > 8] = None
result = s[isnull(s)]
expected = Series(np.nan, index=[9])
assert_series_equal(result, expected)
def test_where_setitem_invalid(self):
# GH 2702
# make sure correct exceptions are raised on invalid list assignment
# slice
s = Series(list('abc'))
def f():
s[0:3] = list(range(27))
self.assertRaises(ValueError, f)
s[0:3] = list(range(3))
expected = Series([0, 1, 2])
assert_series_equal(s.astype(np.int64), expected, )
# slice with step
s = Series(list('abcdef'))
def f():
s[0:4:2] = list(range(27))
self.assertRaises(ValueError, f)
s = Series(list('abcdef'))
s[0:4:2] = list(range(2))
expected = Series([0, 'b', 1, 'd', 'e', 'f'])
assert_series_equal(s, expected)
# neg slices
s = Series(list('abcdef'))
def f():
s[:-1] = list(range(27))
self.assertRaises(ValueError, f)
s[-3:-1] = list(range(2))
expected = Series(['a', 'b', 'c', 0, 1, 'f'])
assert_series_equal(s, expected)
# list
s = Series(list('abc'))
def f():
s[[0, 1, 2]] = list(range(27))
self.assertRaises(ValueError, f)
s = Series(list('abc'))
def f():
s[[0, 1, 2]] = list(range(2))
self.assertRaises(ValueError, f)
# scalar
s = Series(list('abc'))
s[0] = list(range(10))
expected = Series([list(range(10)), 'b', 'c'])
assert_series_equal(s, expected)
def test_where_broadcast(self):
# Test a variety of differently sized series
for size in range(2, 6):
# Test a variety of boolean indices
for selection in [
# First element should be set
np.resize([True, False, False, False, False], size),
# Set alternating elements]
np.resize([True, False], size),
# No element should be set
np.resize([False], size)]:
# Test a variety of different numbers as content
for item in [2.0, np.nan, np.finfo(np.float).max,
np.finfo(np.float).min]:
# Test numpy arrays, lists and tuples as the input to be
# broadcast
for arr in [np.array([item]), [item], (item, )]:
data = np.arange(size, dtype=float)
s = Series(data)
s[selection] = arr
# Construct the expected series by taking the source
# data or item based on the selection
expected = Series([item if use_item else data[
i] for i, use_item in enumerate(selection)])
assert_series_equal(s, expected)
s = Series(data)
result = s.where(~selection, arr)
assert_series_equal(result, expected)
def test_where_inplace(self):
s = Series(np.random.randn(5))
cond = s > 0
rs = s.copy()
rs.where(cond, inplace=True)
assert_series_equal(rs.dropna(), s[cond])
assert_series_equal(rs, s.where(cond))
rs = s.copy()
rs.where(cond, -s, inplace=True)
assert_series_equal(rs, s.where(cond, -s))
def test_where_dups(self):
# GH 4550
# where crashes with dups in index
s1 = Series(list(range(3)))
s2 = Series(list(range(3)))
comb = pd.concat([s1, s2])
result = comb.where(comb < 2)
expected = Series([0, 1, np.nan, 0, 1, np.nan],
index=[0, 1, 2, 0, 1, 2])
assert_series_equal(result, expected)
# GH 4548
# inplace updating not working with dups
comb[comb < 1] = 5
expected = Series([5, 1, 2, 5, 1, 2], index=[0, 1, 2, 0, 1, 2])
assert_series_equal(comb, expected)
comb[comb < 2] += 10
expected = Series([5, 11, 2, 5, 11, 2], index=[0, 1, 2, 0, 1, 2])
assert_series_equal(comb, expected)
def test_where_datetime(self):
s = Series(date_range('20130102', periods=2))
expected = Series([10, 10], dtype='datetime64[ns]')
mask = np.array([False, False])
rs = s.where(mask, [10, 10])
assert_series_equal(rs, expected)
rs = s.where(mask, 10)
assert_series_equal(rs, expected)
rs = s.where(mask, 10.0)
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, 10.0])
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, np.nan])
expected = Series([10, None], dtype='datetime64[ns]')
assert_series_equal(rs, expected)
def test_where_timedelta(self):
s = Series([1, 2], dtype='timedelta64[ns]')
expected = Series([10, 10], dtype='timedelta64[ns]')
mask = np.array([False, False])
rs = s.where(mask, [10, 10])
assert_series_equal(rs, expected)
rs = s.where(mask, 10)
assert_series_equal(rs, expected)
rs = s.where(mask, 10.0)
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, 10.0])
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, np.nan])
expected = Series([10, None], dtype='timedelta64[ns]')
assert_series_equal(rs, expected)
def test_mask(self):
# compare with tested results in test_where
s = Series(np.random.randn(5))
cond = s > 0
rs = s.where(~cond, np.nan)
assert_series_equal(rs, s.mask(cond))
rs = s.where(~cond)
rs2 = s.mask(cond)
assert_series_equal(rs, rs2)
rs = s.where(~cond, -s)
rs2 = s.mask(cond, -s)
assert_series_equal(rs, rs2)
cond = Series([True, False, False, True, False], index=s.index)
s2 = -(s.abs())
rs = s2.where(~cond[:3])
rs2 = s2.mask(cond[:3])
assert_series_equal(rs, rs2)
rs = s2.where(~cond[:3], -s2)
rs2 = s2.mask(cond[:3], -s2)
assert_series_equal(rs, rs2)
self.assertRaises(ValueError, s.mask, 1)
self.assertRaises(ValueError, s.mask, cond[:3].values, -s)
# dtype changes
s = Series([1, 2, 3, 4])
result = s.mask(s > 2, np.nan)
expected = Series([1, 2, np.nan, np.nan])
assert_series_equal(result, expected)
def test_mask_broadcast(self):
# GH 8801
# copied from test_where_broadcast
for size in range(2, 6):
for selection in [
# First element should be set
np.resize([True, False, False, False, False], size),
# Set alternating elements]
np.resize([True, False], size),
# No element should be set
np.resize([False], size)]:
for item in [2.0, np.nan, np.finfo(np.float).max,
np.finfo(np.float).min]:
for arr in [np.array([item]), [item], (item, )]:
data = np.arange(size, dtype=float)
s = Series(data)
result = s.mask(selection, arr)
expected = Series([item if use_item else data[
i] for i, use_item in enumerate(selection)])
assert_series_equal(result, expected)
def test_mask_inplace(self):
s = Series(np.random.randn(5))
cond = s > 0
rs = s.copy()
rs.mask(cond, inplace=True)
assert_series_equal(rs.dropna(), s[~cond])
assert_series_equal(rs, s.mask(cond))
rs = s.copy()
rs.mask(cond, -s, inplace=True)
assert_series_equal(rs, s.mask(cond, -s))
def test_ix_setitem(self):
inds = self.series.index[[3, 4, 7]]
result = self.series.copy()
result.ix[inds] = 5
expected = self.series.copy()
expected[[3, 4, 7]] = 5
assert_series_equal(result, expected)
result.ix[5:10] = 10
expected[5:10] = 10
assert_series_equal(result, expected)
# set slice with indices
d1, d2 = self.series.index[[5, 15]]
result.ix[d1:d2] = 6
expected[5:16] = 6 # because it's inclusive
assert_series_equal(result, expected)
# set index value
self.series.ix[d1] = 4
self.series.ix[d2] = 6
self.assertEqual(self.series[d1], 4)
self.assertEqual(self.series[d2], 6)
def test_where_numeric_with_string(self):
# GH 9280
s = pd.Series([1, 2, 3])
w = s.where(s > 1, 'X')
self.assertFalse(is_integer(w[0]))
self.assertTrue(is_integer(w[1]))
self.assertTrue(is_integer(w[2]))
self.assertTrue(isinstance(w[0], str))
self.assertTrue(w.dtype == 'object')
w = s.where(s > 1, ['X', 'Y', 'Z'])
self.assertFalse(is_integer(w[0]))
self.assertTrue(is_integer(w[1]))
self.assertTrue(is_integer(w[2]))
self.assertTrue(isinstance(w[0], str))
self.assertTrue(w.dtype == 'object')
w = s.where(s > 1, np.array(['X', 'Y', 'Z']))
self.assertFalse(is_integer(w[0]))
self.assertTrue(is_integer(w[1]))
self.assertTrue(is_integer(w[2]))
self.assertTrue(isinstance(w[0], str))
self.assertTrue(w.dtype == 'object')
def test_setitem_boolean(self):
mask = self.series > self.series.median()
# similiar indexed series
result = self.series.copy()
result[mask] = self.series * 2
expected = self.series * 2
assert_series_equal(result[mask], expected[mask])
# needs alignment
result = self.series.copy()
result[mask] = (self.series * 2)[0:5]
expected = (self.series * 2)[0:5].reindex_like(self.series)
expected[-mask] = self.series[mask]
assert_series_equal(result[mask], expected[mask])
def test_ix_setitem_boolean(self):
mask = self.series > self.series.median()
result = self.series.copy()
result.ix[mask] = 0
expected = self.series
expected[mask] = 0
assert_series_equal(result, expected)
def test_ix_setitem_corner(self):
inds = list(self.series.index[[5, 8, 12]])
self.series.ix[inds] = 5
self.assertRaises(Exception, self.series.ix.__setitem__,
inds + ['foo'], 5)
def test_get_set_boolean_different_order(self):
ordered = self.series.sort_values()
# setting
copy = self.series.copy()
copy[ordered > 0] = 0
expected = self.series.copy()
expected[expected > 0] = 0
assert_series_equal(copy, expected)
# getting
sel = self.series[ordered > 0]
exp = self.series[self.series > 0]
assert_series_equal(sel, exp)
def test_setitem_na(self):
# these induce dtype changes
expected = Series([np.nan, 3, np.nan, 5, np.nan, 7, np.nan, 9, np.nan])
s = Series([2, 3, 4, 5, 6, 7, 8, 9, 10])
s[::2] = np.nan
assert_series_equal(s, expected)
# get's coerced to float, right?
expected = Series([np.nan, 1, np.nan, 0])
s = Series([True, True, False, False])
s[::2] = np.nan
assert_series_equal(s, expected)
expected = Series([np.nan, np.nan, np.nan, np.nan, np.nan, 5, 6, 7, 8,
9])
s = Series(np.arange(10))
s[:5] = np.nan
assert_series_equal(s, expected)
def test_basic_indexing(self):
s = Series(np.random.randn(5), index=['a', 'b', 'a', 'a', 'b'])
self.assertRaises(IndexError, s.__getitem__, 5)
self.assertRaises(IndexError, s.__setitem__, 5, 0)
self.assertRaises(KeyError, s.__getitem__, 'c')
s = s.sort_index()
self.assertRaises(IndexError, s.__getitem__, 5)
self.assertRaises(IndexError, s.__setitem__, 5, 0)
def test_int_indexing(self):
s = Series(np.random.randn(6), index=[0, 0, 1, 1, 2, 2])
self.assertRaises(KeyError, s.__getitem__, 5)
self.assertRaises(KeyError, s.__getitem__, 'c')
# not monotonic
s = Series(np.random.randn(6), index=[2, 2, 0, 0, 1, 1])
self.assertRaises(KeyError, s.__getitem__, 5)
self.assertRaises(KeyError, s.__getitem__, 'c')
def test_datetime_indexing(self):
from pandas import date_range
index = date_range('1/1/2000', '1/7/2000')
index = index.repeat(3)
s = Series(len(index), index=index)
stamp = Timestamp('1/8/2000')
self.assertRaises(KeyError, s.__getitem__, stamp)
s[stamp] = 0
self.assertEqual(s[stamp], 0)
# not monotonic
s = Series(len(index), index=index)
s = s[::-1]
self.assertRaises(KeyError, s.__getitem__, stamp)
s[stamp] = 0
self.assertEqual(s[stamp], 0)
def test_timedelta_assignment(self):
# GH 8209
s = Series([])
s.loc['B'] = timedelta(1)
tm.assert_series_equal(s, Series(Timedelta('1 days'), index=['B']))
s = s.reindex(s.index.insert(0, 'A'))
tm.assert_series_equal(s, Series(
[np.nan, Timedelta('1 days')], index=['A', 'B']))
result = s.fillna(timedelta(1))
expected = Series(Timedelta('1 days'), index=['A', 'B'])
tm.assert_series_equal(result, expected)
s.loc['A'] = timedelta(1)
tm.assert_series_equal(s, expected)
# GH 14155
s = Series(10 * [np.timedelta64(10, 'm')])
s.loc[[1, 2, 3]] = np.timedelta64(20, 'm')
expected = pd.Series(10 * [np.timedelta64(10, 'm')])
expected.loc[[1, 2, 3]] = pd.Timedelta(np.timedelta64(20, 'm'))
tm.assert_series_equal(s, expected)
def test_underlying_data_conversion(self):
# GH 4080
df = DataFrame(dict((c, [1, 2, 3]) for c in ['a', 'b', 'c']))
df.set_index(['a', 'b', 'c'], inplace=True)
s = Series([1], index=[(2, 2, 2)])
df['val'] = 0
df
df['val'].update(s)
expected = DataFrame(
dict(a=[1, 2, 3], b=[1, 2, 3], c=[1, 2, 3], val=[0, 1, 0]))
expected.set_index(['a', 'b', 'c'], inplace=True)
tm.assert_frame_equal(df, expected)
# GH 3970
# these are chained assignments as well
pd.set_option('chained_assignment', None)
df = DataFrame({"aa": range(5), "bb": [2.2] * 5})
df["cc"] = 0.0
ck = [True] * len(df)
df["bb"].iloc[0] = .13
# TODO: unused
df_tmp = df.iloc[ck] # noqa
df["bb"].iloc[0] = .15
self.assertEqual(df['bb'].iloc[0], 0.15)
pd.set_option('chained_assignment', 'raise')
# GH 3217
df = DataFrame(dict(a=[1, 3], b=[np.nan, 2]))
df['c'] = np.nan
df['c'].update(pd.Series(['foo'], index=[0]))
expected = DataFrame(dict(a=[1, 3], b=[np.nan, 2], c=['foo', np.nan]))
tm.assert_frame_equal(df, expected)
def test_preserveRefs(self):
seq = self.ts[[5, 10, 15]]
seq[1] = np.NaN
self.assertFalse(np.isnan(self.ts[10]))
def test_drop(self):
# unique
s = Series([1, 2], index=['one', 'two'])
expected = Series([1], index=['one'])
result = s.drop(['two'])
assert_series_equal(result, expected)
result = s.drop('two', axis='rows')
assert_series_equal(result, expected)
# non-unique
# GH 5248
s = Series([1, 1, 2], index=['one', 'two', 'one'])
expected = | Series([1, 2], index=['one', 'one']) | pandas.Series |
from numpy import NaN
import pandas as pd
from tqdm import tqdm
# Todo essa parte é só para funcionar a orientação à objeto
# Ela não é obrigatória para chegar no resultado
# O jeito mais fácil seria usar o Jupyter e sem Orientação à objeto
# Fiz usando isso para poder aprender
class MicrodadosENEM:
def __init__(self, nome=None, colunas=None, ler_microdados=True, microdados=None):
if ler_microdados:
print("Importando...")
self.microdados = | pd.read_csv(nome, sep=';', encoding='latin-1', usecols=colunas) | pandas.read_csv |
import streamlit as st
import numpy as np
import pandas as pd
import sqlite3
conn=sqlite3.connect('data.db')
c=conn.cursor()
import os
import warnings
warnings.filterwarnings('ignore')
import tensorflow.keras as tf
import joblib
import base64
from io import BytesIO
import bz2
import pickle
import _pickle as cPickle
from streamlit import caching
# Pickle a file and then compress it into a file with extension
def compressed_pickle(title, data):
with bz2.BZ2File(title + '.pbz2','w') as f:
cPickle.dump(data, f)
# Load any compressed pickle file
def decompress_pickle(file):
data = bz2.BZ2File(file, 'rb')
data = cPickle.load(data)
return data
ratings_data = decompress_pickle('rat1.pbz2')
ratings_df1=ratings_data.sort_values(by="user_id",ascending=True).reset_index(drop=True)
ratings_df=ratings_df1[ratings_df1["user_id"]<2501].reset_index(drop=True)
del ratings_data,ratings_df1
new_model=tf.models.load_model("modelrecsys.h5")
co=joblib.load("contentsfile.joblib")
titlefile=joblib.load('title.joblib')
####To download dataframe recommondations
def to_excel(df):
output = BytesIO()
writer = pd.ExcelWriter(output, engine='xlsxwriter')
df.to_excel(writer, sheet_name='Sheet1')
writer.save()
processed_data = output.getvalue()
return processed_data
def get_table_download_link(df):
#Generates a link allowing the data in a given panda dataframe to be downloaded
#in: dataframe
#out: href string
val = to_excel(df)
b64 = base64.b64encode(val) # val looks like b'...'
return f'<a href="data:application/octet-stream;base64,{b64.decode()}" download="extract.xlsx">Download csv file</a>' # decode b'abc' => abc
##df = ... # your dataframe
##st.markdown(get_table_download_link(df), unsafe_allow_html=True)
def create_usertable():
c.execute('CREATE TABLE IF NOT EXISTS userstable(username TEXT, password TEXT)')
def add_userdata(username,password):
c.execute('INSERT INTO userstable(username, password) VALUES(?,?)',(username,password))
conn.commit()
def login_user(username,password):
c.execute('SELECT * FROM userstable WHERE username=? AND password=?',(username,password))
data=c.fetchall()
return data
def view_all_users():
c.execute('SELECT * FROM userstable')
data=c.fetchall()
return data
st.title(" GOODREADS BOOKS ")
st.markdown("###***HYBRID BOOK RECOMMENDATION SYSTEM USING DEEP LEARNING***")
menu=["Home", "Sign up", "Login","Books"]
choice=st.sidebar.selectbox("Menu",menu)
if choice=="Home":
st.image("https://images.gr-assets.com/misc/1397605627-1397605627_goodreads_misc.png",width=850)
st.markdown("Please use sidebar dropdown benu for ***Login/Signup***. (Login after Signing up entering custom username and password and tick login)")
st.markdown("_you are in HOME view_")
elif choice=="Login":
st.subheader("Login Section")
username=st.sidebar.text_input("username")
password=st.sidebar.text_input("password",type='password')
if st.sidebar.checkbox("Login"):
# if password=="<PASSWORD>":
create_usertable()
result=login_user(username,password)
if result:
st.success("LOGGED IN SUCCESSFULLY AS {} ".format(username))
st.markdown("After Login please select any one of below Task options -**_Info_, _Start-Analytics_ (for Reccomondations), _Account Details_")
task=st.selectbox("Task",["Info","Start-Analytics","Account Details"])
if task=="Info":
st.subheader("use Start-Analytics for finding Reccomondations")
st.image("http://knowledgequest.aasl.org/wp-content/uploads/2019/05/GoodReads-logo.jpg",width=500)
st.markdown("""**What is Goodreads?**
Goodreads is the largest site for connecting readers with books they will love. Keep track of books you have read, want to read, and are currently reading. Connect with other readers, leave reviews, and get book recommendations. You can even follow your favorite authors! (Follow me on goodreads)
Goodreads has been around since 2006, but it gained momentum in 2013 when it was bought by Amazon. Now you can integrate goodreads with your kindle and read book samples from Amazon straight from the goodreads site!
When most people think of goodreads, they think of book reviews and book recommendations. These are two of the most important features of goodreads, but there is so much more you can get from the website and app. Read on to learn how to use goodreads and take advantage of the lists, groups, giveaways, quotes, and so much more.""")
elif task=="Start-Analytics":
st.subheader("Top N number of Book Recommondations predicted realtime")
st.markdown("Please click enter key after entering values to apply")
#user_id = st.number_input('user_id', min_value=1, max_value=53424, value=1)
user_id=st.text_input("Enter user_id {1-2500} default 1")
if user_id!="":
user_id=int(user_id)
if user_id<1 or user_id>2500:
user_id=1
else:
user_id=1
num=st.text_input("Enter required_reccomondation_count (2-30) default 2")
if num!="":
num=int(num)
if num<2 or num>30:
num=2
else:
num=2
us_id_temp=[user_id for i in range(len(co['book_id']))]
@st.cache(suppress_st_warning=True)
def pred(new_model,us_id_temp,co,ratings_df,user_id,titlefile):
reccom = new_model.predict([pd.Series(us_id_temp),co['book_id'],co.iloc[:,1:]])
recc_df= | pd.DataFrame(reccom,columns=["rating"]) | pandas.DataFrame |
"""
Helper functions to convert the data to the format expected by run_robot.py
"""
import sys
import seir
import pandas as pd
import numpy as np
import numpy.linalg as la
import os.path as path
# To use PyJulia
print('Loading PyJulia module...')
from julia.api import Julia
jl = Julia(compiled_modules=False)
from julia import Main as Julia
Julia.eval('ENV["OMP_NUM_THREADS"] = 8')
print('Loading PyJulia module... Ok!')
print('Loading Robot-dance Julia module...')
Julia.eval('include("robot_dance.jl")')
print('Loading Robot-dance Julia module... Ok!')
def save_basic_parameters(tinc=5.2, tinf=2.9, rep=2.5, ndays=400, window=14, min_level=1.0):
"""Save the basic_paramters.csv file using the data used in the report.
All values are optional. If not present the values used in the report wihtout
an initial hammer phase are used.
"""
basic_prm = pd.Series(dtype=np.float)
basic_prm["tinc"] = tinc
basic_prm["tinf"] = tinf
basic_prm["rep"] = rep
basic_prm["ndays"] = ndays
basic_prm["window"] = window
basic_prm["min_level"] = min_level
basic_prm.to_csv(path.join("data", "basic_parameters.csv"), header=False)
return basic_prm
def initial_conditions(basic_prm, city_data, min_days, Julia, correction=1.0):
"""Fits data and define initial contidions of the SEIR model.
"""
population = city_data["estimated_population_2019"].iloc[0]
confirmed = city_data["confirmed"]
# Compute the new cases from the confirmed sum
new_cases = confirmed.values[1:] - confirmed.values[:-1]
# Use a mean in a week to smooth the data (specially to deal with weekends)
observed_I = np.convolve(new_cases, np.ones(7, dtype=int), 'valid') / 7.0
# Now accumulate in the inf_window
inf_window = int(round(basic_prm["tinf"]))
observed_I = np.convolve(observed_I, np.ones(inf_window, dtype=int), 'valid')
ndays = len(observed_I)
if ndays >= min_days and sum(observed_I) > 0:
observed_I /= population
Julia.observed_I = correction*observed_I
Julia.tinc = basic_prm["tinc"]
Julia.tinf = basic_prm["tinf"]
Julia.rep = basic_prm["rep"]
Julia.eval('initialc = fit_initial(tinc, tinf, rep, observed_I)')
S1 = Julia.initialc[0]
E1 = Julia.initialc[1]
I1 = Julia.initialc[2]
R1 = Julia.initialc[3]
return (S1, E1, I1, R1, ndays), observed_I, population
else:
raise ValueError("Not enough data for %s only %d days available" %
(city_data["city"].iloc[0], len(observed_I)))
def simulate(parameters, city_data, min_days):
"""Simulate from the computed initial parameters until the last day.
"""
c = city_data["city"].iloc[0]
last_day = city_data["date"].iloc[-1]
S1, E1, I1, R1, ndays = parameters[c]
covid = seir.seir(ndays)
print("Simulating", c, "until", last_day)
result = covid.run((S1, E1, I1, R1))
return result[:, -1], last_day
def compute_initial_condition_evolve_and_save(basic_prm, state, large_cities, min_pop, correction,
raw_name="data/covid_with_cities.csv"):
"""Compute the initial conditions and population and save it to data/cities_data.csv.
The population andinitial condition is estimated from a file with the information on
the total number of confimed cases for the cities. See the example in
data/covid_with_cities.csv.
Parameters: large_cities: list with the name of cities tha are pre_selected.
basic_prm: basic paramters for SEIR model.
state: state to subselect or None.
large_cinties: minimal subset of cities do be selected.
min_pop: minimal population to select more cities.
correction: a constant to multiply the observed cases to try to correct
subnotification.
raw_name: name of the file with the accumulated infected data to estimate the
initial conditions.
"""
raw_epi_data = pd.read_csv(raw_name)
if state is not None:
raw_epi_data = raw_epi_data[raw_epi_data["state"] == state]
large_cities.extend(
raw_epi_data[raw_epi_data["estimated_population_2019"] > min_pop]["city"].unique()
)
large_cities = list(set(large_cities))
large_cities.sort()
# Create a new Dataframe with only the needed information
raw_epi_data = raw_epi_data[["city", "date", "confirmed", "estimated_population_2019"]]
epi_data = raw_epi_data[raw_epi_data["city"] == large_cities[0]].copy()
epi_data.sort_values(by=["date"], inplace=True)
for city_name in large_cities[1:]:
city = raw_epi_data[raw_epi_data["city"] == city_name].copy()
city.sort_values(by = ["date"], inplace=True)
epi_data = epi_data.append(city)
epi_data.reset_index(inplace=True, drop=True)
# Compute initial parameters fitting the data
min_days = 5
parameters = {}
ignored = []
population = []
n_cities = len(large_cities)
for i in range(n_cities):
city_name = large_cities[i]
print("%d/%d" %(i + 1, n_cities), city_name)
try:
city_data = epi_data[epi_data["city"] == city_name]
parameters[city_name], observed_I, city_pop = initial_conditions(basic_prm,
city_data, min_days, Julia, correction)
population.append(city_pop)
except ValueError:
print("Ignoring ", city_name, "not enough data.")
ignored.append(city_name)
# Simulate the data until the last day to start the optimization phase.
cities_data = {}
for city_name in large_cities:
if city_name in ignored:
continue
city_data = epi_data[epi_data["city"] == city_name]
cities_data[city_name], last_day = simulate(parameters, city_data, min_days)
# Save results
cities_data = pd.DataFrame.from_dict(cities_data,
orient="index", columns=["S1", "E1", "I1", "R1"])
cities_data["population"] = population
cities_data.to_csv(path.join("data", "cities_data.csv"))
return cities_data
def convert_mobility_matrix_and_save(cities_data, max_neighbors, drs=False):
"""Read the mobility matrix data given by Pedro and save it in the format needed by
robot_dance.
cd: a data frame in the format of cities_data.csv
max_neighbors: maximum number of neighbors allowed in the mobility matrix.
"""
# Read the mobility_matrix
large_cities = cities_data.index
if drs:
mobility_matrix = pd.read_csv("data/drs_mobility.csv", index_col=0).T
mobility_matrix = mobility_matrix.mask(
mobility_matrix.rank(axis=1, method='min', ascending=False) > max_neighbors + 1, 0
)
elif path.exists("data/move_mat_SÃO PAULO_SP-Municipios_norm.csv"):
mobility_matrix = pd.read_csv("data/move_mat_SÃO PAULO_SP-Municipios_norm.csv",
header=None, sep=" ")
cities_names = pd.read_csv("data/move_mat_SÃO PAULO_SP-Municipios_reg_names.txt",
header=None)
# Cut the matrix to see only the desired cities
cities_names = [i.title() for i in cities_names[0]]
mobility_matrix.index = cities_names
mobility_matrix.columns = cities_names
mobility_matrix = mobility_matrix.loc[large_cities, large_cities].T
mobility_matrix = mobility_matrix.mask(
mobility_matrix.rank(axis=1, method='min', ascending=False) > max_neighbors + 1, 0
)
else:
ncities = len(large_cities)
pre_M = np.zeros((ncities, ncities))
mobility_matrix = | pd.DataFrame(data=pre_M, index=large_cities, columns=large_cities) | pandas.DataFrame |
from __future__ import division
import pandas as pd
import os.path
import sys
# parentddir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
# sys.path.append(parentddir)
from base.uber_model import UberModel, ModelSharedInputs
from .earthworm_functions import EarthwormFunctions
class EarthwormInputs(ModelSharedInputs):
"""
Input class for Earthworm.
"""
def __init__(self):
"""Class representing the inputs for Earthworm"""
super(EarthwormInputs, self).__init__()
self.k_ow = pd.Series([], dtype="float")
self.l_f_e = pd.Series([], dtype="float")
self.c_s = pd.Series([], dtype="float")
self.k_d = pd.Series([], dtype="float")
self.p_s = | pd.Series([], dtype="float") | pandas.Series |
from pathsetup import run_path_setup
run_path_setup()
import os
import gl
gl.isTrain = False
from model_config import model_argparse
config = model_argparse()
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = config['device']
import tensorflow as tf
tf_config = tf.ConfigProto()
tf_config.gpu_options.allow_growth = True
sess = tf.Session(config=tf_config)
import numpy as np
import pandas as pd
import utils
from ved import VEDModel
from sklearn.model_selection import train_test_split
np.random.seed(1337)
if config['dataset'] == 'daily':
train_data = pd.read_csv(config['data_dir'] + 'DailyDial/de_duplicated/df_daily_train.csv')
val_data = pd.read_csv(config['data_dir'] + 'DailyDial/de_duplicated/df_daily_valid_without_duplicates.csv')
test_data = | pd.read_csv(config['data_dir'] + 'DailyDial/de_duplicated/df_daily_test_without_duplicates.csv') | pandas.read_csv |
import pandas as pd
#drop unknow artist
import matplotlib as mpl
import matplotlib.pyplot as plt
log_dir ='logs/'
mpl.rcParams['figure.figsize'] = (22, 20)
dataset=pd.read_csv('/content/MultitaskPainting100k_Dataset_groundtruth/groundtruth_multiloss_train_header.csv')
# indexName=pf[pf['artist']=='Unknown photographer'].index
# pf.drop(indexName,inplace=True)
# grouped = pf.groupby(['artist']).size().reset_index(name='counts')
# p=grouped.sort_values('counts', ascending=False).head(50)
# top50=p['artist'].tolist()
# dataset=pd.DataFrame()
# for name,group in pf.groupby(['artist']):
# if name in top50:
# dataset=pd.concat([dataset,group],axis=0)
# dataset=dataset.reset_index()
import numpy as np
from collections import Counter
from sklearn.model_selection import StratifiedShuffleSplit
def generate_classdict(label):
counter = Counter(label)
class_num=len(counter)
class_list=list(counter.keys()) #?
class_dict={}
class_weight={}
total = len(label)
count=0
for name,num in counter.items():
class_dict[name]=count
class_weight[count]=(total/(num*class_num))
count+=1
return class_num,class_list,class_dict,class_weight
X=np.array(dataset['filename'].tolist())
y=np.array(dataset['style'].tolist())
Style_class_num,Style_class_list,Style_class_dict,Style_class_weight=generate_classdict(y)
y=np.array(dataset['genre'].tolist())
Objtype_class_num,Objtype_class_list,Objtype_class_dict,Objtype_class_weight=generate_classdict(y)
# y=np.array(dataset['Creation Date'].tolist())
# CreationDate_class_num,CreationDate_class_list,CreationDate_class_dict,CreationDate_class_weight=generate_classdict(y)
y=np.array(dataset['artist'].tolist())
Artist_class_num,Artist_class_list,Artist_class_dict,Artist_class_weight=generate_classdict(y)
sss = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=0)
print(sss.get_n_splits(X, y))
train_frame=pd.DataFrame()
test_frame= | pd.DataFrame() | pandas.DataFrame |
#This script is to do kinetic classification.
#Make sure that you have setup your PYTHONPATH environment
#variable as described in the github repository.
from zipfile import ZIP_FILECOUNT_LIMIT
from isort import file
from SBMLKinetics import kinetics_classification
import sys
import numpy as np
import os
from sympy import *
from libsbml import * # access functions in SBML
import time
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import matplotlib
# Column names
SBMLID = "SBMLid"
CLASSIFICATIONS = 'Classifications'
REACTION = 'Reaction'
KINETICLAW = 'kinetic law'
PERCENTAGE = 'Percentage'
class KineticAnalyzer:
"""
Load Dataset of SBML files.
Args:
dataSet: str-"biomodels", "curated", "metabolic", "signalling", "homo_sapiens", "non_homo",
"cellular_organisms", "Mus_musculus", "Mammalia", "Saccharomyces_cerevisiae";
path: str-path to the file, with a format of ``D:\\path\\to``;
model_indices: range-(initial_model_indx, final_model_indx)
"""
def __init__(self, path = os.path.dirname(os.path.abspath(__file__)),
dataSet = "biomodels", model_indices = range(0,1000)):
#In addition to dataSetName, allow users to inmport a zip of sbml files from a path
initial_model_indx = min(model_indices)
final_model_indx = max(model_indices)
if type(dataSet) == str and dataSet in ["biomodels", "curated",
"metabolic", "signalling", "homo_sapiens", "non_homo",
"cellular_organisms", "Mus_musculus", "Mammalia", "Saccharomyces_cerevisiae"]:
zip_filename = dataSet + '.zip'
try:
self.tuple = kinetics_classification._dataSetStatistics(zip_filename = zip_filename,
initial_model_indx = initial_model_indx, final_model_indx = final_model_indx)
except Exception as err:
raise Exception (err)
elif '.zip' in dataSet:
try:
self.tuple = kinetics_classification._dataSetStatistics(data_dir = path, zip_filename = dataSet,
initial_model_indx = initial_model_indx, final_model_indx = final_model_indx)
except Exception as err:
raise Exception (err)
else:
raise Exception("Not a valid dataset input.")
def getKineticLawDistribution(self, path = "", fileName = ""):
"""
Get the kinetic law distribution (and save the dataframe into an excel file).
Args:
path: str-path to the file, with a format like ``D:/path/to/`` (or ``D:\\\path\\\ to\\\``)
fileName: str-file name with which the excel file save to, "" (do not save to excel file).
Returns:
df_gen_stat_final: dataFrame-kinetic law distribution.
The column names are: "Classifications", "Percentage", "Percentage standard error",
"Percentage per model", "Percentage per model standard error".
In the column of "Classifications", there are "ZERO", "UNDR", "UNMO", "BIDR", "BIMO",
"MM", "MMCAT", "HILL", "FR" and "NA" in detail.
"ZERO" means "Zeroth order", "UNDR" means "Uni-directional mass action", "UNMO" means
"Uni-term with moderator", "BIDR" means "Bi-directional mass action", "BIMO" means "Bi-
terms with moderator", "MM" means "Michaelis-Menten kinetics", "MMCAT" means "Michaelis-
Menten kinetics", "HILL" means "Hill equations", "FR" means kinetics in the format of
fraction other than MM, MMCAT and HILL, "NA" means not classified kinetics.
"""
(_, df_gen_stat, _, _, _, _, _) = self.tuple
df_gen_stat_final = df_gen_stat[["Classifications", "Percentage", "Percentage per model", \
"Percentage per model standard error"]]
try:
df_gen_stat_final.insert(2, "Percentage standard error", 0)
except:
pass
if fileName != "":
# Create a Pandas Excel writer using XlsxWriter as the engine.
path_fileName = path + fileName
writer = pd.ExcelWriter(path_fileName, engine='xlsxwriter')
df_gen_stat_final.to_excel(writer, sheet_name='general_statistics')
# Close the Pandas Excel writer and output the Excel file.
writer.save()
return df_gen_stat_final
def TopFrequentKineticLawType(self):
"""
Return the most frequent kinetic law type on average in the loaded SBML dataset .
Returns:
kinetics_type_list: list pf kinetics_type.
kinetics_type: str-kinetic law type.
"""
(_, df_gen_stat, _, _, _, _, _) = self.tuple
df_gen_stat_plot = df_gen_stat[["Classifications", "Percentage", "Percentage per model", \
"Percentage per model standard error"]]
df_temp = df_gen_stat_plot
# try:
# kinetics_type_list = []
# max_idx = df_temp['Percentage'].idxmax()
# kinetics_type = df_temp['Classifications'][max_idx]
# kinetics_type_list.append(kinetics_type)
# except:
max_value = df_temp['Percentage'].max()
idx_list = df_temp.index[df_temp['Percentage'] == max_value].tolist()
kinetics_type_list =[]
for i in range(len(idx_list)):
kinetics_type_list.append(df_temp.iloc[idx_list[i]]["Classifications"])
return kinetics_type_list
def plotKineticLawDistribution(self, fileName = 'KineticLawDistribution.pdf'):
"""
Plot the kinetic law distribution as save it as a pdf file.
Args:
fileName: str-file name with which the pdf file save to.
"""
(_, df_gen_stat, _, _, _, _, _) = self.tuple
df_gen_stat_plot = df_gen_stat[["Classifications", "Percentage", "Percentage per model", \
"Percentage per model standard error"]]
try:
df_gen_stat_plot.insert(2, "Percentage standard error", 0)
except:
pass
yerr = df_gen_stat_plot[["Percentage standard error", \
"Percentage per model standard error"]].to_numpy().T
ax = df_gen_stat_plot.plot(kind="bar",x="Classifications", y=["Percentage","Percentage per model"],\
yerr=yerr, fontsize = 8)
ax.set_ylim(0.,1.)
ax.get_yaxis().set_major_formatter(
matplotlib.ticker.FuncFormatter(lambda y, p: str("{:.2%}".format(y))))
for p in ax.patches:
ax.annotate(str("{:.2%}".format(p.get_height())), (p.get_x() * 1.005, p.get_height() * 1.005), fontsize = 4)
#plt.show()
fig = ax.get_figure()
fig.savefig(fileName)
def getKineticLawDistributionPerMassTransfer(self, rct_num, prd_num, path = "", fileName = ""):
"""
Get the kinetic law distribution for the certein mass transfer
(and save the dataframe into an excel file).
Args:
rct_num: int-0, 1, 2, 3 (representing > 2).
prd_num: int-0, 1, 2, 3 (representing > 2).
path: str-path to the file, with a format like ``D:/path/to/`` (or ``D:\\\path\\\ to\\\``)
fileName: str-file name with which the excel file save to, "" (do not save to excel file).
Returns:
df_gen_stat_PR_final: dataFrame-the kinetic law distribution for a certain mass trasfer.
The column names are: "Classifications", "Percentage", "Percentage standard error",
"Percentage per model", "Percentage per model standard error".
In the column of "Classifications", there are "ZERO", "UNDR", "UNMO", "BIDR", "BIMO",
"MM", "MMCAT", "HILL", "FR" and "NA" in detail.
"ZERO" means "Zeroth order", "UNDR" means "Uni-directional mass action", "UNMO" means
"Uni-term with moderator", "BIDR" means "Bi-directional mass action", "BIMO" means "Bi-
terms with moderator", "MM" means "Michaelis-Menten kinetics", "MMCAT" means "Michaelis-
Menten kinetics", "HILL" means "Hill equations", "FR" means kinetics in the format of
fraction other than MM, MMCAT and HILL, "NA" means not classified kinetics.
"""
(_, df_gen_stat, _, df_gen_stat_PR, _, _, _) = self.tuple
df_gen_stat_plot = df_gen_stat[["Classifications", "Percentage", "Percentage per model", \
"Percentage per model standard error"]]
try:
df_gen_stat_plot.insert(2, "Percentage standard error", 0)
except:
pass
try:
df_gen_stat_PR.insert(2, "Percentage standard error", 0)
except:
pass
df_gen_stat_PR_plot = {}
types = len(df_gen_stat_plot)
if prd_num in [0,1,2,3] and rct_num in [0,1,2,3]:
i = prd_num*4 + rct_num
df_gen_stat_PR_plot[i] = pd.DataFrame(columns = df_gen_stat_PR.columns.tolist())
df_temp = df_gen_stat_PR[types*i:types*(i+1)]
df_gen_stat_PR_plot[i] = | pd.concat([df_gen_stat_PR_plot[i],df_temp], ignore_index=True) | pandas.concat |
"""The American Gut App."""
import dash
import dash_daq as daq
import dash_core_components as dcc
import dash_html_components as html
import plotly.graph_objs as go
import plotly.figure_factory as ff
import numpy as np
import pandas as pd
from start import (
samples,
find_closest,
healthiest_sample,
meta,
describe,
bact_plot,
firm_plot,
)
close = | pd.Series(0, index=samples.index) | pandas.Series |
import datetime
import numpy as np
from numpy import nan
import pandas as pd
import pytest
from pandas.util.testing import assert_frame_equal
from numpy.testing import assert_allclose
from pvlib.location import Location
from pvlib import tracking
SINGLEAXIS_COL_ORDER = ['tracker_theta', 'aoi',
'surface_azimuth', 'surface_tilt']
def test_solar_noon():
apparent_zenith = pd.Series([10])
apparent_azimuth = pd.Series([180])
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=0,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
expect = pd.DataFrame({'tracker_theta': 0, 'aoi': 10,
'surface_azimuth': 90, 'surface_tilt': 0},
index=[0], dtype=np.float64)
expect = expect[SINGLEAXIS_COL_ORDER]
assert_frame_equal(expect, tracker_data)
def test_azimuth_north_south():
apparent_zenith = pd.Series([60])
apparent_azimuth = pd.Series([90])
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=180,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
expect = pd.DataFrame({'tracker_theta': -60, 'aoi': 0,
'surface_azimuth': 90, 'surface_tilt': 60},
index=[0], dtype=np.float64)
expect = expect[SINGLEAXIS_COL_ORDER]
assert_frame_equal(expect, tracker_data)
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=0,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
expect['tracker_theta'] *= -1
assert_frame_equal(expect, tracker_data)
def test_max_angle():
apparent_zenith = pd.Series([60])
apparent_azimuth = pd.Series([90])
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=0,
max_angle=45, backtrack=True,
gcr=2.0/7.0)
expect = pd.DataFrame({'aoi': 15, 'surface_azimuth': 90,
'surface_tilt': 45, 'tracker_theta': 45},
index=[0], dtype=np.float64)
expect = expect[SINGLEAXIS_COL_ORDER]
assert_frame_equal(expect, tracker_data)
def test_backtrack():
apparent_zenith = pd.Series([80])
apparent_azimuth = | pd.Series([90]) | pandas.Series |
#!/usr/bin/env python
# coding: utf-8
'''This script finds the best parameters for SVC and LGR models and fits the data to these two models and outputs the classification images and the classification reports as the csv documents.
Usage: src/model.py --data_input=<data_input> --result_output=<result_output>
Arguments:
--data_input=<data_input> The path for all the clean data
--result_output=<result_output> The path where to store the csv data
'''
import numpy as np
import pandas as pd
from docopt import docopt
from sklearn.model_selection import RandomizedSearchCV
#from sklearn.model_selection import GridSearchCV
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
#from plot_classifier import plot_classifier
import matplotlib.pyplot as plt
from sklearn.metrics import classification_report
import lightgbm as lgb
opt = docopt(__doc__)
def get_model_results(X, y, X_train, y_train, X_test, y_test, result_output):
parameters_svc = {'C':np.logspace(-3,3,7), 'gamma':np.logspace(-4,2,7)}
pd.DataFrame(parameters_svc).to_csv(result_output + '/hyper_parameters.csv')
svc = SVC()
svc_opt = RandomizedSearchCV(svc, parameters_svc, cv=5, iid=False, n_iter = 25)
# svc_opt.fit(X_train, y_train)
# train_score_svc = svc_opt.score(X_train,y_train)
# test_score_svc= svc_opt.score(X_test,y_test)
#svc_opt = GridSearchCV(svc, parameters_svc, cv=5, iid=False)
svc_opt.fit(X_train.to_numpy(), y_train.to_numpy().ravel())
train_score_svc = svc_opt.score(X_train.to_numpy(),y_train.to_numpy().ravel())
test_score_svc = svc_opt.score(X_test.to_numpy(),y_test.to_numpy().ravel())
parameters_lgr = {'C':np.logspace(-3,3,7)}
lgr = LogisticRegression()
#lgr_opt = GridSearchCV(lgr, parameters_lgr, cv=5, iid=False)
lgr_opt = RandomizedSearchCV(lgr, parameters_lgr, cv=5, iid=False, n_iter = 25)
lgr_opt.fit(X_train.to_numpy(), y_train.to_numpy().ravel())
train_score_lgr = lgr_opt.score(X_train.to_numpy(),y_train.to_numpy().ravel())
test_score_lgr = lgr_opt.score(X_test.to_numpy(),y_test.to_numpy().ravel())
lgbm = lgb.LGBMClassifier()
lgbm.fit(X_train.to_numpy(),y_train.to_numpy().ravel())
train_score_lgbm = lgbm.score(X_train.to_numpy(),y_train.to_numpy().ravel())
test_score_lgbm = lgbm.score(X_test.to_numpy(),y_test.to_numpy().ravel())
data = {'Train accuracy':[train_score_svc, train_score_lgr, train_score_lgbm], 'Validation accuracy':[test_score_svc, test_score_lgr,test_score_lgbm], 'Best parameters':[svc_opt.best_params_,lgr_opt.best_params_, 'NA']}
accuracy_df = pd.DataFrame(data, index = ['SVC','LGR','LGBM'])
accuracy_df.to_csv(result_output+'/accuracy.csv')
predictions_svc = svc_opt.predict(X_test)
predictions_lgr = lgr_opt.predict(X_test)
predictions_lgbm = lgbm.predict(X_test)
svc_report = pd.DataFrame(classification_report(y_test, predictions_svc, output_dict=True))
lgr_report = pd.DataFrame(classification_report(y_test, predictions_lgr, output_dict=True))
lgbm_report = pd.DataFrame(classification_report(y_test, predictions_lgbm, output_dict=True))
svc_report.to_csv(result_output+'/svc_classification_report.csv')
lgr_report.to_csv(result_output+'/lgr_classification_report.csv')
lgbm_report.to_csv(result_output+'/lgbm_classification_report.csv')
try:
pd.read_csv(result_output+'/svc_classification_report.csv')
pd.read_csv(result_output+'/lgr_classification_report.csv')
pd.read_csv(result_output+'/lgbm_classification_report.csv')
except:
raise Exception("result doesn't save successfully")
return svc_opt, lgr_opt, lgbm
def main(data_input, result_output):
X_train = pd.read_csv(data_input+'/X_train_clean.csv')
y_train = | pd.read_csv(data_input+'/y_train.csv',usecols = ["Target"]) | pandas.read_csv |
import numpy as np # Matrise pakke
import pandas as pd # Database pakke
import support # For error handling
import matplotlib.pyplot as plt # Plottepakke
import matplotlib.patches as mpatches # Legend in plot
import sys # For aborting scripts
import math # For floor
import user_setup
from PIL import Image # For saving as TIFF
from io import BytesIO # For saving as TIFF
#-----------------------------------------------------------------------------------
######## Function for making list of color codes
## Takes optional color scheme as input
## Returns vector of colors to be used in plot
def adjust_color(color, amount=0.5):
"""
Lightens the given color by multiplying (1-luminosity) by the given amount.
Input can be matplotlib color string, hex string, or RGB tuple.
Examples:
>> adjust_color('g', 0.3)
>> adjust_color('#F034A3', 0.6)
>> adjust_color((.3,.55,.1), 0.5)
"""
import matplotlib.colors as mc
import colorsys
try:
c = mc.cnames[color]
except:
c = color
c = colorsys.rgb_to_hls(*mc.to_rgb(c))
return colorsys.hls_to_rgb(c[0], 1 - amount * (1 - c[1]), c[2])
def get_colors(df, cycles=None, color=None, color_scheme=None):
if not isinstance(df, pd.DataFrame): # If df is not dataframe, assumes df is list
new_df = | pd.DataFrame() | pandas.DataFrame |
# scraper_horse_racing.py
# -*- coding: utf-8 -*-
import os
import time
from selenium.webdriver import Firefox
from selenium.webdriver.firefox.firefox_profile import FirefoxProfile
from selenium.webdriver.firefox.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import requests
from bs4 import BeautifulSoup
import pandas as pd
# Variable with the URL of the website.
my_url = "https://www.horseracing24.com/"
# Preparing of the Tor browser for the work.
# for my laptop
# torexe = os.popen(\
# r"C:\Users\Oliver\Desktop\Tor Browser\Browser\firefox.exe")
# for my mainframe
torexe = os.popen(\
r"C:\Users\olive\OneDrive\Pulpit\Tor Browser\Browser\firefox.exe")
# for my laptop
# profile = FirefoxProfile(\
# r"C:\Users\Oliver\Desktop\Tor Browser\Browser\TorBrowser\Data\Browser"+\
# "\profile.default")
# for my mainframe
profile = FirefoxProfile(\
r"C:\Users\olive\OneDrive\Pulpit\Tor Browser\Browser\TorBrowser\Data"+\
"\Browser\profile.default")
profile.set_preference("network.proxy.type", 1)
profile.set_preference("network.proxy.socks", "127.0.0.1")
profile.set_preference("network.proxy.socks_port", 9150)
profile.set_preference("network.proxy.socks_remote_dns", False)
profile.update_preferences()
firefox_options = Options()
driver = Firefox(firefox_profile=profile, options=firefox_options)
# Adding the headers to the browser
session = requests.Session()
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:97.0)"+\
" Gecko/20100101 Firefox/97.0", "Accept": "text/html,application"+\
"/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8"}
req = session.get(my_url, headers=headers)
# Loads the website code as the Selenium object.
driver.get(my_url)
# Prepare the blank dictionary to fill in for pandas.
dictionary_of_races = {}
# Preparation of lists with scraped data.
list_of_hippodromes = []
list_of_start_times = []
list_of_names_data = []
list_of_ratings = []
list_of_countries = []
list_of_horses = []
list_of_jockeys_trainers = []
list_of_age = []
list_of_weights = []
list_of_traveled_distances = []
list_of_bet_comments = []
list_of_each_way_bets = []
list_of_winner_comments = []
list_of_winners = []
# Wait for page to fully render
try:
element = WebDriverWait(driver, 120).until(
EC.presence_of_element_located((By.CLASS_NAME, \
"boxOverContent__bannerLink")))
finally:
# Loads the website code as the BeautifulSoup object.
pageSource = driver.page_source
bsObj = BeautifulSoup(pageSource, "lxml")
# Determining the number of the hippodromes.
hippodromes = bsObj.find_all("div", {"class":\
"subTabs subTabs--label"})
# Building the list with the selected times.
selected_times = bsObj.find_all("div", {"class":\
"subTabs__tab selected"})
print(len(hippodromes))
# Scraping of the hippodromes.
for ind in range(1, (len(hippodromes)+1)):
hippodrom = driver.find_element(By.XPATH ,\
'//div[@class="container__livetable"]/div[2]/div/section/div['\
+str(ind)+']/div[1]').text
list_of_hippodromes.append(hippodrom)
list_of_hippodromes.append("")
list_of_hippodromes.append("")
# Inserting of the empty fields as many as participants in
# the race.
count_iterations = len(driver.find_elements(By.XPATH ,\
'//div[@class="container__livetable"]/div[2]/div/section/div['\
+str(ind)+']/div[3]/div/div/div[*]/div[3]'))
for i in range(1, (count_iterations-1)):
list_of_hippodromes.append("")
# Scraping of the selected start times.
for ind in range(1, (len(hippodromes)+1)):
list_of_start_times.append("")
list_of_start_times.append(selected_times[(ind-1)].get_text())
list_of_start_times.append("")
# Inserting of the empty fields as many as participants in
# the race.
count_iterations = len(driver.find_elements(By.XPATH ,\
'//div[@class="container__livetable"]/div[2]/div/section/div['\
+str(ind)+']/div[3]/div/div/div[*]/div[3]'))
for i in range(1, (count_iterations-1)):
list_of_start_times.append("")
print(selected_times[(ind-1)].get_text())
print(count_iterations)
# Scraping of the racing names and the additional race data.
for ind in range(1, (len(hippodromes)+1)):
list_of_names_data.append("")
# Scraping of the racing names.
racing_name = driver.find_element(By.XPATH ,\
'//div[@class="container__livetable"]/div[2]/div/section/div['\
+str(ind)+']/div[3]/div/div/div[1]/div[2]/div/span').text
list_of_names_data.append(racing_name)
# Scraping of the additional race data.
try:
racing_data_1 = driver.find_element(By.XPATH ,\
'//div[@class="container__livetable"]/div[2]/div/section/div['\
+str(ind)+']/div[3]/div/div/div[2]/span[1]').text
string_of_racing_data = (racing_data_1 + " ")
except:
racing_data_1 = ""
string_of_racing_data = (racing_data_1 + " ")
try:
racing_length = driver.find_element(By.XPATH ,\
'//div[@class="container__livetable"]/div[2]/div/section/div['\
+str(ind)+']/div[3]/div/div/div[2]/span[2]')
racing_data_2 = racing_length.get_attribute("title")
string_of_racing_data = string_of_racing_data + (racing_data_2 +\
" ")
except:
racing_data_2 = ""
string_of_racing_data = string_of_racing_data + (racing_data_2 +\
" ")
try:
racing_data_3 = driver.find_element(By.XPATH ,\
'//div[@class="container__livetable"]/div[2]/div/section/div['\
+str(ind)+']/div[3]/div/div/div[2]/span[3]').text
string_of_racing_data = string_of_racing_data + (racing_data_3 +\
" ")
except:
racing_data_3 = ""
string_of_racing_data = string_of_racing_data + (racing_data_3 +\
" ")
try:
racing_data_4 = driver.find_element(By.XPATH ,\
'//div[@class="container__livetable"]/div[2]/div/section/div['\
+str(ind)+']/div[3]/div/div/div[2]/span[4]').text
string_of_racing_data = string_of_racing_data + (racing_data_4 +\
" ")
except:
racing_data_4 = ""
string_of_racing_data = string_of_racing_data + (racing_data_4 +\
" ")
try:
racing_data_5 = driver.find_element(By.XPATH ,\
'//div[@class="container__livetable"]/div[2]/div/section/div['\
+str(ind)+']/div[3]/div/div/div[2]/span[5]').text
string_of_racing_data = string_of_racing_data + (racing_data_5 +\
" ")
except:
racing_data_5 = ""
string_of_racing_data = string_of_racing_data + (racing_data_5 +\
" ")
# Scraping of the status data (e.g. Finished).
try:
racing_data_6 = driver.find_element(By.XPATH ,\
'//div[@class="container__livetable"]/div[2]/div/section/div['\
+str(ind)+']/div[3]/div/div/div[1]/div[3]').text
string_of_racing_data = string_of_racing_data + racing_data_6
string_of_racing_data.strip()
list_of_names_data.append(string_of_racing_data)
except:
racing_data_6 = ""
string_of_racing_data = string_of_racing_data + racing_data_6
string_of_racing_data.strip()
list_of_names_data.append(string_of_racing_data)
# Inserting of the empty fields as many as participants in
# the race.
count_iterations = len(driver.find_elements(By.XPATH ,\
'//div[@class="container__livetable"]/div[2]/div/section/div['\
+str(ind)+']/div[3]/div/div/div[*]/div[3]'))
for i in range(1, (count_iterations-1)):
list_of_names_data.append("")
# Scraping of the ratings.
for ind in range(1, (len(hippodromes)+1)):
list_of_ratings.append("")
list_of_ratings.append("")
list_of_ratings.append("")
# Enumeration of the race participants for the iteration.
count_iterations = len(driver.find_elements(By.XPATH ,\
'//div[@class="container__livetable"]/div[2]/div/section/div['\
+str(ind)+']/div[3]/div/div/div[*]/div[3]'))
for i in range(4, (count_iterations+2)):
try:
rating = driver.find_element(By.XPATH ,\
'//div[@class="container__livetable"]/div[2]/div/section/div['\
+str(ind)+']/div[3]/div/div/div['+str(i)+']/div[2]').text
list_of_ratings.append(rating)
except:
list_of_ratings.append("")
# Scraping of the countries.
for ind in range(1, (len(hippodromes)+1)):
list_of_countries.append("")
list_of_countries.append("")
list_of_countries.append("")
# Enumeration of the race participants for the iteration.
count_iterations = len(driver.find_elements(By.XPATH ,\
'//div[@class="container__livetable"]/div[2]/div/section/div['\
+str(ind)+']/div[3]/div/div/div[*]/div[3]'))
for i in range(4, (count_iterations+2)):
try:
country_string = driver.find_element(By.XPATH ,\
'//div[@class="container__livetable"]/div[2]/div/section/div['\
+str(ind)+']/div[3]/div/div/div['+str(i)+']/div[3]/span')
country=country_string.get_attribute("title")
list_of_countries.append(country)
except:
list_of_countries.append("")
# Scraping of the horse names.
for ind in range(1, (len(hippodromes)+1)):
list_of_horses.append("")
list_of_horses.append("")
list_of_horses.append("")
# Enumeration of the race participants for the iteration.
count_iterations = len(driver.find_elements(By.XPATH ,\
'//div[@class="container__livetable"]/div[2]/div/section/div['\
+str(ind)+']/div[3]/div/div/div[*]/div[3]'))
for i in range(4, (count_iterations+2)):
try:
horse = driver.find_element(By.XPATH ,\
'//div[@class="container__livetable"]/div[2]/div/section/div['\
+str(ind)+']/div[3]/div/div/div['+str(i)+']/div[3]').text
list_of_horses.append(horse)
except:
list_of_horses.append("")
# Scraping of the names of the jockeys and the trainers.
for ind in range(1, (len(hippodromes)+1)):
list_of_jockeys_trainers.append("")
list_of_jockeys_trainers.append("")
list_of_jockeys_trainers.append("")
# Enumeration of the race participants for the iteration.
count_iterations = len(driver.find_elements(By.XPATH ,\
'//div[@class="container__livetable"]/div[2]/div/section/div['\
+str(ind)+']/div[3]/div/div/div[*]/div[3]'))
for i in range(4, (count_iterations+2)):
try:
jockey_trainer = driver.find_element(By.XPATH ,\
'//div[@class="container__livetable"]/div[2]/div/section/div['\
+str(ind)+']/div[3]/div/div/div['+str(i)+']/div[4]').text
list_of_jockeys_trainers.append(jockey_trainer)
except:
list_of_jockeys_trainers.append("")
# Scraping of the age.
for ind in range(1, (len(hippodromes)+1)):
list_of_age.append("")
list_of_age.append("")
list_of_age.append("")
# Enumeration of the race participants for the iteration.
count_iterations = len(driver.find_elements(By.XPATH ,\
'//div[@class="container__livetable"]/div[2]/div/section/div['\
+str(ind)+']/div[3]/div/div/div[*]/div[3]'))
for i in range(4, (count_iterations+2)):
try:
age = driver.find_element(By.XPATH ,\
'//div[@class="container__livetable"]/div[2]/div/section/div['\
+str(ind)+']/div[3]/div/div/div['+str(i)+']/div[5]').text
list_of_age.append(age)
except:
list_of_age.append("")
# Scraping of the weights.
for ind in range(1, (len(hippodromes)+1)):
list_of_weights.append("")
list_of_weights.append("")
list_of_weights.append("")
# Enumeration of the race participants for the iteration.
count_iterations = len(driver.find_elements(By.XPATH ,\
'//div[@class="container__livetable"]/div[2]/div/section/div['\
+str(ind)+']/div[3]/div/div/div[*]/div[3]'))
for i in range(4, (count_iterations+2)):
try:
weight = driver.find_element(By.XPATH ,\
'//div[@class="container__livetable"]/div[2]/div/section/div['\
+str(ind)+']/div[3]/div/div/div['+str(i)+']/div[6]').text
list_of_weights.append(weight)
except:
list_of_weights.append("")
# Scraping of the traveled_distances.
for ind in range(1, (len(hippodromes)+1)):
list_of_traveled_distances.append("")
list_of_traveled_distances.append("")
list_of_traveled_distances.append("")
# Enumeration of the race participants for the iteration.
count_iterations = len(driver.find_elements(By.XPATH ,\
'//div[@class="container__livetable"]/div[2]/div/section/div['\
+str(ind)+']/div[3]/div/div/div[*]/div[3]'))
for i in range(4, (count_iterations+2)):
try:
traveled_distance = driver.find_element(By.XPATH ,\
'//div[@class="container__livetable"]/div[2]/div/section/div['\
+str(ind)+']/div[3]/div/div/div['+str(i)+']/div[7]').text
list_of_traveled_distances.append(traveled_distance)
except:
list_of_traveled_distances.append("")
# Scraping of the comments about the each way bets.
for ind in range(1, (len(hippodromes)+1)):
list_of_bet_comments.append("")
list_of_bet_comments.append("")
list_of_bet_comments.append("")
# Enumeration of the race participants for the iteration.
count_iterations = len(driver.find_elements(By.XPATH ,\
'//div[@class="container__livetable"]/div[2]/div/section/div['\
+str(ind)+']/div[3]/div/div/div[*]/div[3]'))
for i in range(4, (count_iterations+2)):
# Scraping of the comments about the each way bets.
try:
comment = driver.find_element(By.XPATH ,\
'//div[@class="container__livetable"]/div[2]/div/section/div['\
+str(ind)+']/div[3]/div/div/div['+str(i)+']/div[8]/span')
bet_comment_1 = comment.get_attribute("class")
string_of_comments = (bet_comment_1 + " ")
except:
bet_comment_1 = ""
string_of_comments = (bet_comment_1 + " ")
try:
comment = driver.find_element(By.XPATH ,\
'//div[@class="container__livetable"]/div[2]/div/section/div['\
+str(ind)+']/div[3]/div/div/div['+str(i)+']/div[8]/span')
bet_comment_2 = comment.get_attribute("alt")
text_in_one_row = bet_comment_2.split()
bet_comment_2 = " ".join(text_in_one_row)
bet_comment_2 = bet_comment_2.replace("[d]", " » ")
bet_comment_2 = bet_comment_2.replace("[u]", " » ")
string_of_comments = string_of_comments + bet_comment_2
string_of_comments.strip()
list_of_bet_comments.append(string_of_comments)
except:
bet_comment_2 = ""
string_of_comments = string_of_comments + bet_comment_2
string_of_comments.strip()
list_of_bet_comments.append(string_of_comments)
# Scraping of the each way bets.
for ind in range(1, (len(hippodromes)+1)):
list_of_each_way_bets.append("")
list_of_each_way_bets.append("")
list_of_each_way_bets.append("")
# Enumeration of the race participants for the iteration.
count_iterations = len(driver.find_elements(By.XPATH ,\
'//div[@class="container__livetable"]/div[2]/div/section/div['\
+str(ind)+']/div[3]/div/div/div[*]/div[3]'))
for i in range(4, (count_iterations+2)):
try:
each_way_bet = driver.find_element(By.XPATH ,\
'//div[@class="container__livetable"]/div[2]/div/section/div['\
+str(ind)+']/div[3]/div/div/div['+str(i)+']/div[8]/span').text
list_of_each_way_bets.append(each_way_bet)
except:
list_of_each_way_bets.append("")
# Scraping of the comments about the winners.
for ind in range(1, (len(hippodromes)+1)):
list_of_winner_comments.append("")
list_of_winner_comments.append("")
list_of_winner_comments.append("")
# Enumeration of the race participants for the iteration.
count_iterations = len(driver.find_elements(By.XPATH ,\
'//div[@class="container__livetable"]/div[2]/div/section/div['\
+str(ind)+']/div[3]/div/div/div[*]/div[3]'))
for i in range(4, (count_iterations+2)):
# Scraping of the comments about the winners.
try:
comment = driver.find_element(By.XPATH ,\
'//div[@class="container__livetable"]/div[2]/div/section/div['\
+str(ind)+']/div[3]/div/div/div['+str(i)+']/div[9]/span')
winner_comment_1 = comment.get_attribute("class")
string_of_winner_comments = (winner_comment_1 + " ")
except:
winner_comment_1 = ""
string_of_winner_comments = (winner_comment_1 + " ")
try:
comment = driver.find_element(By.XPATH ,\
'//div[@class="container__livetable"]/div[2]/div/section/div['\
+str(ind)+']/div[3]/div/div/div['+str(i)+']/div[9]/span')
winner_comment_2 = comment.get_attribute("alt")
winner_text_in_one_row = winner_comment_2.split()
winner_comment_2 = " ".join(winner_text_in_one_row)
winner_comment_2 = winner_comment_2.replace("[d]", " » ")
winner_comment_2 = winner_comment_2.replace("[u]", " » ")
string_of_winner_comments = string_of_winner_comments +\
winner_comment_2
string_of_winner_comments.strip()
list_of_winner_comments.append(string_of_winner_comments)
except:
winner_comment_2 = ""
string_of_winner_comments = string_of_winner_comments +\
winner_comment_2
string_of_winner_comments.strip()
list_of_winner_comments.append(string_of_winner_comments)
# Scraping of the winners.
for ind in range(1, (len(hippodromes)+1)):
list_of_winners.append("")
list_of_winners.append("")
list_of_winners.append("")
# Enumeration of the race participants for the iteration.
count_iterations = len(driver.find_elements(By.XPATH ,\
'//div[@class="container__livetable"]/div[2]/div/section/div['\
+str(ind)+']/div[3]/div/div/div[*]/div[3]'))
for i in range(4, (count_iterations+2)):
try:
winner = driver.find_element(By.XPATH ,\
'//div[@class="container__livetable"]/div[2]/div/section/div['\
+str(ind)+']/div[3]/div/div/div['+str(i)+']/div[9]/span').text
list_of_winners.append(winner)
print(winner)
except:
list_of_winners.append("")
print("")
# countries (<span class="flag fl_77" title="France"></span>)
# /html/body/div[5]/div[1]/div/div[1]/div[2]/div[4] (<div class="container__livetable">)
# //*[@id="fsbody"]
# //*[@id="live-table"]
# /html/body/div[5]/div[1]/div/div[1]/div[2]/div[4]/div[2]/div/section (<section class="event">)
# /html/body/div[5]/div[1]/div/div[1]/div[2]/div[4]/div[2]/div/section/div[1]/div[3]
# /html/body/div[5]/div[1]/div/div[1]/div[2]/div[4]/div[2]/div/section/div[1]/div[3]/div/div (<div class="sportName horse-racing">)
# /html/body/div[5]/div[1]/div/div[1]/div[2]/div[4]/div[2]/div/section/div[1]/div[3]/div/div/div[4]/div[3]/span
# /html/body/div[5]/div[1]/div/div[1]/div[2]/div[4]/div[2]/div/section/div[1]/div[3]/div/div/div[5]/div[3]/span
# /html/body/div[5]/div[1]/div/div[1]/div[2]/div[4]/div[2]/div/section/div[2]/div[3]/div/div/div[4]/div[3]/span
# /html/body/div[5]/div[1]/div/div[1]/div[2]/div[4]/div[2]/div/section/div[2]/div[3]/div/div/div[5]/div[3]/span
# /html/body/div[5]/div[1]/div/div[1]/div[2]/div[4]/div[2]/div/section/div[2]/div[3]/div/div/div[6]/div[3]/span
# ...
# /html/body/div[5]/div[1]/div/div[1]/div[2]/div[4]/div[2]/div/section/div[3]/div[3]/div/div/div[4]/div[3]/span
# /html/body/div[5]/div[1]/div/div[1]/div[2]/div[4]/div[2]/div/section/div[3]/div[3]/div/div/div[5]/div[3]/span
# /html/body/div[5]/div[1]/div/div[1]/div[2]/div[4]/div[2]/div/section/div[3]/div[3]/div/div/div[6]/div[3]/span
# winners (<span class="" alt="8.00">8.00</span>)
# (<span class="down" alt="2.37[d]2.20">2.20</span>)
# (<span class="" alt="3.00">3.00</span>)
# /html/body/div[5]/div[1]/div/div[1]/div[2]/div[4]/div[2]/div/section/div[9]/div[3]/div/div/div[4]/div[9]/span
# /html/body/div[5]/div[1]/div/div[1]/div[2]/div[4]/div[2]/div/section/div[9]/div[3]/div/div/div[5]/div[9]/span
# /html/body/div[5]/div[1]/div/div[1]/div[2]/div[4]/div[2]/div/section/div[9]/div[3]/div/div/div[6]/div[9]/span
# /html/body/div[5]/div[1]/div/div[1]/div[2]/div[4]/div[2]/div/section/div[9]/div[3]/div/div/div[7]/div[9]/span
# ...
# Add lists with the scraped data to the dictionary in the correct
# order.
dictionary_of_races["Hippodrome"] = list_of_hippodromes
dictionary_of_races["Start time"] = list_of_start_times
dictionary_of_races["Racing name and data"] = list_of_names_data
dictionary_of_races["Rating"] = list_of_ratings
dictionary_of_races["Country"] = list_of_countries
dictionary_of_races["Horse"] = list_of_horses
dictionary_of_races["Jockey/Trainer"] = list_of_jockeys_trainers
dictionary_of_races["Age"] = list_of_age
dictionary_of_races["Weight"] = list_of_weights
dictionary_of_races["Traveled distance"] = list_of_traveled_distances
dictionary_of_races["Bet comment"] = list_of_bet_comments
dictionary_of_races["Each way bet"] = list_of_each_way_bets
dictionary_of_races["Winner comment"] = list_of_winner_comments
dictionary_of_races["Winner"] = list_of_winners
# Creating of the frame for the data.
df_res = | pd.DataFrame(dictionary_of_races) | pandas.DataFrame |
from pathlib import Path
import numpy as np
import pandas as pd
import pickle
import lightgbm as lgb
import statsmodels.api as sm
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import NearestNeighbors
##############################################################################
dir = Path(__file__).parents[2]
input_path = dir / "input/"
estimation_path = dir / "src/estimation"
model_path = dir / "src/estimation/models"
from estimation.standard import data_birth, get_n_partners
from estimation.extended import data_general
##############################################################################
mortality = pd.read_csv(input_path / "mortality.csv")
# fertility = pd.read_csv(input_path / "fertility.csv")
##############################################################################
# Read in and transfrom fertility data
def scale_fertility():
df_fertility = pd.read_csv(input_path / "fertility.csv")
df_fertility.rename(columns={"Age": "age",
"1968": "prob"},
inplace=True)
df_fertility["prob"] = (df_fertility["prob"]/1000)
return df_fertility
def death(dataf):
dataf = dataf.copy()
dataf["deaths"] = 0
dataf.loc[dataf["age"]>=dataf["age_max"], "deaths"] = 1
if np.sum(dataf["deaths"])>0:
death_count = np.sum(dataf["deaths"])
dataf = dataf.loc[dataf["deaths"]==0,:]
else:
death_count = 0
dataf.drop("deaths", axis=1, inplace=True)
return dataf, death_count
def dating_market(dataf):
"""
New couples finding together. Right now 20% of the singles find a new partner.
"""
dataf = dataf.copy()
dataf.reset_index(drop=True, inplace=True)
eligible = (dataf["in_couple"] == 0) & (dataf["child"] == 0) & (dataf["n_people"] - dataf["n_children"] == 1)
female_singles = dataf.loc[eligible & (dataf["female"] == 1), :]
male_singles = dataf.loc[eligible & (dataf["female"] == 0), :]
not_single = dataf[~eligible]
# Get rate for marriage from sample
y = dataf["year"].max()
rates = get_rates(y)
n = rates["in_couple_in"][0]
n_couples = n*sum(eligible)//2 # Since number of people is taken by two
new_couples = round(min(len(female_singles),
len(male_singles),
n_couples))
if new_couples>0:
matching_dict = _matching(female_singles,
male_singles,
new_couples)
dataf_out = pd.concat((not_single,
matching_dict["girls"],
matching_dict["guys"]), axis=0)
assert(
len(matching_dict["girls"]) == len(female_singles)
), "Lenght of dataframe is not the same as before"
else:
dataf_out = dataf
return dataf_out, new_couples
def _matching(females, males, number):
"""
Finding the 5 best fitting matches and then choosing randomly.
#TODO: think of a better way than this loop
"""
partners = females.copy()
lucky_guys = males.sample(number)
# partners.reset_index(drop=True, inplace=True)
# lucky_guys.reset_index(drop=True, inplace=True)
neigh = NearestNeighbors(n_neighbors=5)
happy_girls = pd.DataFrame()
# Looping since as soon as one couple matched, that woman is no longer available
for i in np.arange(len(lucky_guys)):
neigh.fit(partners[["age",
"education",
"migback",
"east",
"n_children"]])
bachelor = lucky_guys.iloc[i,:]
bachelor = bachelor[["age",
"education",
"migback",
"east",
"n_children"]].to_numpy().reshape(1,-1)
partner_choice = neigh.kneighbors(bachelor)
partner = np.random.choice(np.ravel(partner_choice[1]), 1)
happy_girls = pd.concat([happy_girls, partners.iloc[partner,:]])
partners.drop(partners.iloc[partner].index, inplace=True)
happy_girls, lucky_guys = _adjust_values(happy_girls, lucky_guys)
singles_dict = {"all_female": females,
"all_male": males,
"happy_girls": happy_girls,
"lucky_guys": lucky_guys}
out_dict = _concat_singles(singles_dict)
return out_dict
def _concat_singles(dici):
"""
Concating those who found new partners and those who didn"t
"""
unlucky_guys = dici["all_male"].loc[dici["all_male"].index.isin(dici["lucky_guys"].index) == False, :]
unhappy_girls = dici["all_female"].loc[dici["all_female"].index.isin(dici["happy_girls"].index) == False, :]
girls = pd.concat((dici["happy_girls"], unhappy_girls), axis = 0)
guys = pd.concat((dici["lucky_guys"], unlucky_guys), axis = 0)
assert(
len(unlucky_guys) + len(dici["lucky_guys"]) == len(dici["all_male"])
), "Error in concating guys"
assert(
len(unhappy_girls) + len(dici["happy_girls"]) == len(dici["all_female"])
), "Error in concating girls"
out_dict = {"girls" : girls,
"guys": guys}
return out_dict
def _adjust_values(females, males):
"""
Adjusting the values as the man moves in with the woman
"""
females = females.copy()
males = males.copy()
males.loc[:,"hid"] = females["hid"].tolist()
males.loc[:,"east"] = females["east"].tolist()
males.loc[:,"hhweight"] = females["hhweight"].tolist()
males.loc[:,"in_couple"] = 1
females.loc[:,"in_couple"] = 1
females.loc[:, "hhv"] = 0 # Make women the head of household
males.loc[:, "hhv"] = 1 # Men are "only" partner
return females, males
def separations(dataf):
"""
Calculates the seperations in each period.
Only those who are married or in a relationship (in_couple) can separate
"""
dataf = dataf.copy()
# Get rate for marriage from sample
y = dataf["year"].max()
rates = get_rates(y)
probability = np.random.uniform(0, 1, len(dataf))
condition_married = (dataf["married"] == 1) & (probability<rates["married_out"])
condition_incouple = (dataf["in_couple"] == 1) & (dataf["married"] == 0) & (probability<rates["in_couple_out"])
condition_separation = (condition_married | condition_incouple)
males = (condition_separation) & (dataf["female"] == 0)
dataf.loc[condition_separation, ["married", "in_couple"]] = [[0, 0]]
# Men move out; resetting HID
dataf.loc[males, "orighid"] = dataf.loc[males, "hid"].copy()
dataf.loc[males, "hid"] += np.arange(1, np.sum(males)+1)
dataf.loc[condition_separation, "hhv"] = 0 # Both become head of households in their respective HH
separations_this_period = np.sum(condition_separation)
return dataf, separations_this_period
def marriage(dataf):
dataf = dataf.copy()
dataf = get_n_partners(dataf)
eligible_hh = dataf["n_partners"]==2
eligible_people = dataf["hhv"]!=2
not_child = dataf["child"]==0
not_married = dataf["married"]==0
# Get rate for marriage from sample
y = dataf["year"].max()
rates = get_rates(y)
n = rates["married_in"][0]
n_weddings = n*sum(not_married & not_child & eligible_hh)//2 # Since number of people is taken by two
dataf["hid"].nunique()
draw_df = pd.DataFrame()
draw_df["hid"] = dataf["hid"].unique()
draw_df["draw"] = np.random.uniform(0, 1, len(draw_df))
dataf = | pd.merge(dataf, draw_df, on=["hid"], how="left") | pandas.merge |
from datetime import datetime, timedelta
from typing import Any
import weakref
import numpy as np
from pandas._libs import index as libindex
from pandas._libs.lib import no_default
from pandas._libs.tslibs import frequencies as libfrequencies, resolution
from pandas._libs.tslibs.parsing import parse_time_string
from pandas._libs.tslibs.period import Period
from pandas._typing import DtypeObj, Label
from pandas.util._decorators import Appender, cache_readonly, doc
from pandas.core.dtypes.common import (
ensure_platform_int,
is_bool_dtype,
is_datetime64_any_dtype,
is_dtype_equal,
is_float,
is_integer,
is_object_dtype,
is_scalar,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import PeriodDtype
from pandas.core.arrays.period import (
PeriodArray,
period_array,
raise_on_incompatible,
validate_dtype_freq,
)
import pandas.core.common as com
import pandas.core.indexes.base as ibase
from pandas.core.indexes.base import (
InvalidIndexError,
_index_shared_docs,
ensure_index,
maybe_extract_name,
)
from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin
from pandas.core.indexes.datetimes import DatetimeIndex, Index
from pandas.core.indexes.extension import inherit_names
from pandas.core.indexes.numeric import Int64Index
from pandas.core.ops import get_op_result_name
from pandas.core.tools.datetimes import DateParseError
from pandas.tseries import frequencies
from pandas.tseries.offsets import DateOffset, Tick
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
_index_doc_kwargs.update(dict(target_klass="PeriodIndex or list of Periods"))
# --- Period index sketch
def _new_PeriodIndex(cls, **d):
# GH13277 for unpickling
values = d.pop("data")
if values.dtype == "int64":
freq = d.pop("freq", None)
values = PeriodArray(values, freq=freq)
return cls._simple_new(values, **d)
else:
return cls(values, **d)
@inherit_names(
["strftime", "to_timestamp", "asfreq", "start_time", "end_time"]
+ PeriodArray._field_ops,
PeriodArray,
wrap=True,
)
@inherit_names(["is_leap_year", "freq", "_format_native_types"], PeriodArray)
class PeriodIndex(DatetimeIndexOpsMixin, Int64Index):
"""
Immutable ndarray holding ordinal values indicating regular periods in time.
Index keys are boxed to Period objects which carries the metadata (eg,
frequency information).
Parameters
----------
data : array-like (1d int np.ndarray or PeriodArray), optional
Optional period-like data to construct index with.
copy : bool
Make a copy of input ndarray.
freq : str or period object, optional
One of pandas period strings or corresponding objects.
year : int, array, or Series, default None
month : int, array, or Series, default None
quarter : int, array, or Series, default None
day : int, array, or Series, default None
hour : int, array, or Series, default None
minute : int, array, or Series, default None
second : int, array, or Series, default None
tz : object, default None
Timezone for converting datetime64 data to Periods.
dtype : str or PeriodDtype, default None
Attributes
----------
day
dayofweek
dayofyear
days_in_month
daysinmonth
end_time
freq
freqstr
hour
is_leap_year
minute
month
quarter
qyear
second
start_time
week
weekday
weekofyear
year
Methods
-------
asfreq
strftime
to_timestamp
See Also
--------
Index : The base pandas Index type.
Period : Represents a period of time.
DatetimeIndex : Index with datetime64 data.
TimedeltaIndex : Index of timedelta64 data.
period_range : Create a fixed-frequency PeriodIndex.
Examples
--------
>>> idx = pd.PeriodIndex(year=year_arr, quarter=q_arr)
"""
_typ = "periodindex"
_attributes = ["name", "freq"]
# define my properties & methods for delegation
_is_numeric_dtype = False
_infer_as_myclass = True
_data: PeriodArray
freq: DateOffset
_engine_type = libindex.PeriodEngine
_supports_partial_string_indexing = True
# ------------------------------------------------------------------------
# Index Constructors
def __new__(
cls,
data=None,
ordinal=None,
freq=None,
tz=None,
dtype=None,
copy=False,
name=None,
**fields,
):
valid_field_set = {
"year",
"month",
"day",
"quarter",
"hour",
"minute",
"second",
}
if not set(fields).issubset(valid_field_set):
argument = list(set(fields) - valid_field_set)[0]
raise TypeError(f"__new__() got an unexpected keyword argument {argument}")
name = maybe_extract_name(name, data, cls)
if data is None and ordinal is None:
# range-based.
data, freq2 = PeriodArray._generate_range(None, None, None, freq, fields)
# PeriodArray._generate range does validation that fields is
# empty when really using the range-based constructor.
freq = freq2
data = PeriodArray(data, freq=freq)
else:
freq = validate_dtype_freq(dtype, freq)
# PeriodIndex allow PeriodIndex(period_index, freq=different)
# Let's not encourage that kind of behavior in PeriodArray.
if freq and isinstance(data, cls) and data.freq != freq:
# TODO: We can do some of these with no-copy / coercion?
# e.g. D -> 2D seems to be OK
data = data.asfreq(freq)
if data is None and ordinal is not None:
# we strangely ignore `ordinal` if data is passed.
ordinal = np.asarray(ordinal, dtype=np.int64)
data = PeriodArray(ordinal, freq)
else:
# don't pass copy here, since we copy later.
data = period_array(data=data, freq=freq)
if copy:
data = data.copy()
return cls._simple_new(data, name=name)
@classmethod
def _simple_new(cls, values: PeriodArray, name: Label = None):
"""
Create a new PeriodIndex.
Parameters
----------
values : PeriodArray
Values that can be converted to a PeriodArray without inference
or coercion.
"""
assert isinstance(values, PeriodArray), type(values)
result = object.__new__(cls)
result._data = values
# For groupby perf. See note in indexes/base about _index_data
result._index_data = values._data
result.name = name
result._cache = {}
result._reset_identity()
return result
# ------------------------------------------------------------------------
# Data
@property
def values(self):
return np.asarray(self)
@property
def _has_complex_internals(self):
# used to avoid libreduction code paths, which raise or require conversion
return True
def _shallow_copy(self, values=None, name: Label = no_default):
name = name if name is not no_default else self.name
cache = self._cache.copy() if values is None else {}
if values is None:
values = self._data
result = self._simple_new(values, name=name)
result._cache = cache
return result
def _maybe_convert_timedelta(self, other):
"""
Convert timedelta-like input to an integer multiple of self.freq
Parameters
----------
other : timedelta, np.timedelta64, DateOffset, int, np.ndarray
Returns
-------
converted : int, np.ndarray[int64]
Raises
------
IncompatibleFrequency : if the input cannot be written as a multiple
of self.freq. Note IncompatibleFrequency subclasses ValueError.
"""
if isinstance(other, (timedelta, np.timedelta64, Tick, np.ndarray)):
offset = frequencies.to_offset(self.freq.rule_code)
if isinstance(offset, Tick):
# _check_timedeltalike_freq_compat will raise if incompatible
delta = self._data._check_timedeltalike_freq_compat(other)
return delta
elif isinstance(other, DateOffset):
freqstr = other.rule_code
base = libfrequencies.get_base_alias(freqstr)
if base == self.freq.rule_code:
return other.n
raise raise_on_incompatible(self, other)
elif is_integer(other):
# integer is passed to .shift via
# _add_datetimelike_methods basically
# but ufunc may pass integer to _add_delta
return other
# raise when input doesn't have freq
raise raise_on_incompatible(self, None)
def _is_comparable_dtype(self, dtype: DtypeObj) -> bool:
"""
Can we compare values of the given dtype to our own?
"""
if not isinstance(dtype, PeriodDtype):
return False
return dtype.freq == self.freq
# ------------------------------------------------------------------------
# Rendering Methods
def _mpl_repr(self):
# how to represent ourselves to matplotlib
return self.astype(object)._values
@property
def _formatter_func(self):
return self.array._formatter(boxed=False)
# ------------------------------------------------------------------------
# Indexing
@cache_readonly
def _engine(self):
# To avoid a reference cycle, pass a weakref of self._values to _engine_type.
period = weakref.ref(self._values)
return self._engine_type(period, len(self))
@doc(Index.__contains__)
def __contains__(self, key: Any) -> bool:
if isinstance(key, Period):
if key.freq != self.freq:
return False
else:
return key.ordinal in self._engine
else:
hash(key)
try:
self.get_loc(key)
return True
except KeyError:
return False
@cache_readonly
def _int64index(self) -> Int64Index:
return Int64Index._simple_new(self.asi8, name=self.name)
# ------------------------------------------------------------------------
# Index Methods
def __array_wrap__(self, result, context=None):
"""
Gets called after a ufunc. Needs additional handling as
PeriodIndex stores internal data as int dtype
Replace this to __numpy_ufunc__ in future version
"""
if isinstance(context, tuple) and len(context) > 0:
func = context[0]
if func is np.add:
pass
elif func is np.subtract:
name = self.name
left = context[1][0]
right = context[1][1]
if isinstance(left, PeriodIndex) and isinstance(right, PeriodIndex):
name = left.name if left.name == right.name else None
return Index(result, name=name)
elif isinstance(left, Period) or isinstance(right, Period):
return Index(result, name=name)
elif isinstance(func, np.ufunc):
if "M->M" not in func.types:
msg = f"ufunc '{func.__name__}' not supported for the PeriodIndex"
# This should be TypeError, but TypeError cannot be raised
# from here because numpy catches.
raise ValueError(msg)
if is_bool_dtype(result):
return result
# the result is object dtype array of Period
# cannot pass _simple_new as it is
return type(self)(result, freq=self.freq, name=self.name)
def asof_locs(self, where, mask: np.ndarray) -> np.ndarray:
"""
where : array of timestamps
mask : array of booleans where data is not NA
"""
where_idx = where
if isinstance(where_idx, DatetimeIndex):
where_idx = PeriodIndex(where_idx._values, freq=self.freq)
elif not isinstance(where_idx, PeriodIndex):
raise TypeError("asof_locs `where` must be DatetimeIndex or PeriodIndex")
elif where_idx.freq != self.freq:
raise raise_on_incompatible(self, where_idx)
locs = self.asi8[mask].searchsorted(where_idx.asi8, side="right")
locs = np.where(locs > 0, locs - 1, 0)
result = np.arange(len(self))[mask].take(locs)
first = mask.argmax()
result[(locs == 0) & (where_idx.asi8 < self.asi8[first])] = -1
return result
@doc(Index.astype)
def astype(self, dtype, copy=True, how="start"):
dtype = pandas_dtype(dtype)
if is_datetime64_any_dtype(dtype):
# 'how' is index-specific, isn't part of the EA interface.
tz = getattr(dtype, "tz", None)
return self.to_timestamp(how=how).tz_localize(tz)
# TODO: should probably raise on `how` here, so we don't ignore it.
return super().astype(dtype, copy=copy)
@property
def is_full(self) -> bool:
"""
Returns True if this PeriodIndex is range-like in that all Periods
between start and end are present, in order.
"""
if len(self) == 0:
return True
if not self.is_monotonic:
raise ValueError("Index is not monotonic")
values = self.asi8
return ((values[1:] - values[:-1]) < 2).all()
@property
def inferred_type(self) -> str:
# b/c data is represented as ints make sure we can't have ambiguous
# indexing
return "period"
@Appender(_index_shared_docs["get_indexer"] % _index_doc_kwargs)
def get_indexer(self, target, method=None, limit=None, tolerance=None):
target = ensure_index(target)
if isinstance(target, PeriodIndex):
if target.freq != self.freq:
# No matches
no_matches = -1 * np.ones(self.shape, dtype=np.intp)
return no_matches
target = target.asi8
self_index = self._int64index
else:
self_index = self
if tolerance is not None:
tolerance = self._convert_tolerance(tolerance, target)
if self_index is not self:
# convert tolerance to i8
tolerance = self._maybe_convert_timedelta(tolerance)
return Index.get_indexer(self_index, target, method, limit, tolerance)
@Appender(_index_shared_docs["get_indexer_non_unique"] % _index_doc_kwargs)
def get_indexer_non_unique(self, target):
target = ensure_index(target)
if not self._is_comparable_dtype(target.dtype):
no_matches = -1 * np.ones(self.shape, dtype=np.intp)
return no_matches, no_matches
target = target.asi8
indexer, missing = self._int64index.get_indexer_non_unique(target)
return ensure_platform_int(indexer), missing
def get_loc(self, key, method=None, tolerance=None):
"""
Get integer location for requested label.
Parameters
----------
key : Period, NaT, str, or datetime
String or datetime key must be parseable as Period.
Returns
-------
loc : int or ndarray[int64]
Raises
------
KeyError
Key is not present in the index.
TypeError
If key is listlike or otherwise not hashable.
"""
orig_key = key
if not | is_scalar(key) | pandas.core.dtypes.common.is_scalar |
import pandas as pd
import lenskit.crossfold as xf
import numpy as np
from utils import *
import json
ratings = pd.read_csv('data/Clothing_Shoes_and_Jewelry/Home_and_Kitchen.csv', header=None, index_col=None)
#
dir_exists('data/Clothing_Shoes_and_Jewelry/th_0')
dir_exists('data/Clothing_Shoes_and_Jewelry/th_4')
dir_exists('data/Clothing_Shoes_and_Jewelry/th_5')
columns = ['item', 'user', 'rating', 'timestamp']
ratings.columns = columns
ratings = ratings[['user', 'item', 'rating', 'timestamp']]
ratings = ratings.drop('timestamp', axis=1)
n_user = len( | pd.unique(ratings.user) | pandas.unique |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# This file contains dummy data for the model unit tests
import numpy as np
import pandas as pd
AIR_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 380.6292037661305,
1: 383.26004701147235,
2: 385.8905370924373,
3: 388.52067431512216,
4: 391.1504589893095,
5: 393.7798914284503,
6: 396.4089719496461,
7: 399.0377008736321,
8: 401.66607852475926,
9: 404.2941052309762,
10: 406.9217813238114,
11: 409.54910713835505,
12: 412.1760830132403,
13: 414.80270929062544,
14: 417.42898631617453,
15: 420.0549144390392,
16: 422.68049401183924,
17: 425.3057253906438,
18: 427.93060893495215,
19: 430.555145007674,
20: 433.1793339751107,
21: 435.8031762069345,
22: 438.42667207616984,
23: 441.0498219591729,
24: 443.6726262356114,
25: 446.2950852884452,
26: 448.91719950390507,
27: 451.53896927147304,
28: 454.1603949838614,
29: 456.78147703699216,
},
"fcst_upper": {
0: 565.2596851227581,
1: 567.9432096935082,
2: 570.6270874286351,
3: 573.3113180220422,
4: 575.9959011639468,
5: 578.680836540898,
6: 581.3661238357942,
7: 584.0517627279,
8: 586.7377528928648,
9: 589.4240940027398,
10: 592.1107857259966,
11: 594.797827727545,
12: 597.4852196687516,
13: 600.1729612074585,
14: 602.8610519980012,
15: 605.5494916912286,
16: 608.2382799345206,
17: 610.9274163718079,
18: 613.6169006435915,
19: 616.3067323869615,
20: 618.9969112356168,
21: 621.6874368198849,
22: 624.3783087667415,
23: 627.0695266998305,
24: 629.7610902394838,
25: 632.4529990027421,
26: 635.145252603374,
27: 637.8378506518982,
28: 640.5307927556019,
29: 643.2240785185628,
},
}
)
AIR_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 351.01805478037915,
1: 353.64044896268456,
2: 356.2623766991775,
3: 358.883838394139,
4: 361.50483445671773,
5: 364.12536530090745,
6: 366.74543134552374,
7: 369.3650330141812,
8: 371.98417073526997,
9: 374.6028449419319,
10: 377.2210560720369,
11: 379.83880456815905,
12: 382.45609087755207,
13: 385.07291545212513,
14: 387.68927874841813,
15: 390.3051812275768,
16: 392.92062335532785,
17: 395.5356056019535,
18: 398.15012844226646,
19: 400.764192355584,
20: 403.37779782570226,
21: 405.99094534087044,
22: 408.60363539376465,
23: 411.2158684814615,
24: 413.82764510541136,
25: 416.4389657714128,
26: 419.04983098958445,
27: 421.66024127433906,
28: 424.2701971443558,
29: 426.8796991225531,
},
"fcst_upper": {
0: 594.8708341085095,
1: 597.562807742296,
2: 600.255247821895,
3: 602.9481539430253,
4: 605.6415256965386,
5: 608.3353626684409,
6: 611.0296644399166,
7: 613.724430587351,
8: 616.4196606823541,
9: 619.1153542917842,
10: 621.8115109777711,
11: 624.508130297741,
12: 627.2052118044398,
13: 629.9027550459588,
14: 632.6007595657577,
15: 635.299224902691,
16: 637.998150591032,
17: 640.6975361604982,
18: 643.3973811362772,
19: 646.0976850390515,
20: 648.7984473850253,
21: 651.4996676859489,
22: 654.2013454491467,
23: 656.903480177542,
24: 659.6060713696838,
25: 662.3091185197744,
26: 665.0126211176946,
27: 667.716578649032,
28: 670.4209905951075,
29: 673.1258564330019,
},
}
)
PEYTON_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 7.055970485245664,
1: 7.056266316358524,
2: 7.056561800026597,
3: 7.056856936297079,
4: 7.057151725217398,
5: 7.05744616683524,
6: 7.057740261198534,
7: 7.058034008355445,
8: 7.058327408354395,
9: 7.058620461244044,
10: 7.0589131670733005,
11: 7.059205525891312,
12: 7.059497537747475,
13: 7.059789202691431,
14: 7.0600805207730595,
15: 7.060371492042489,
16: 7.060662116550093,
17: 7.060952394346479,
18: 7.06124232548251,
19: 7.0615319100092835,
20: 7.061821147978145,
21: 7.062110039440677,
22: 7.062398584448709,
23: 7.062686783054313,
24: 7.0629746353098,
25: 7.063262141267724,
26: 7.063549300980883,
27: 7.063836114502315,
28: 7.0641225818852975,
29: 7.064408703183352,
},
"fcst_upper": {
0: 9.903278969069254,
1: 9.903703030365794,
2: 9.90412743910712,
3: 9.904552195246042,
4: 9.904977298735123,
5: 9.90540274952668,
6: 9.90582854757279,
7: 9.906254692825279,
8: 9.90668118523573,
9: 9.90710802475548,
10: 9.907535211335626,
11: 9.907962744927016,
12: 9.908390625480251,
13: 9.9088188529457,
14: 9.90924742727347,
15: 9.909676348413441,
16: 9.91010561631524,
17: 9.910535230928254,
18: 9.910965192201623,
19: 9.91139550008425,
20: 9.91182615452479,
21: 9.912257155471659,
22: 9.912688502873028,
23: 9.913120196676825,
24: 9.91355223683074,
25: 9.913984623282214,
26: 9.914417355978456,
27: 9.914850434866427,
28: 9.915283859892844,
29: 9.91571763100419,
},
}
)
PEYTON_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 6.605000045325637,
1: 6.605275566724015,
2: 6.605550630617649,
3: 6.605825237068679,
4: 6.606099386139563,
5: 6.60637307789309,
6: 6.606646312392368,
7: 6.606919089700827,
8: 6.607191409882221,
9: 6.607463273000626,
10: 6.607734679120443,
11: 6.608005628306389,
12: 6.608276120623508,
13: 6.608546156137163,
14: 6.608815734913038,
15: 6.609084857017139,
16: 6.609353522515795,
17: 6.609621731475649,
18: 6.609889483963668,
19: 6.610156780047143,
20: 6.61042361979368,
21: 6.610690003271204,
22: 6.610955930547961,
23: 6.611221401692519,
24: 6.611486416773756,
25: 6.611750975860878,
26: 6.612015079023405,
27: 6.612278726331177,
28: 6.612541917854348,
29: 6.612804653663393,
},
"fcst_upper": {
0: 10.354249408989281,
1: 10.354693780000304,
2: 10.355138608516068,
3: 10.355583894474442,
4: 10.356029637812957,
5: 10.35647583846883,
6: 10.356922496378955,
7: 10.357369611479896,
8: 10.357817183707903,
9: 10.358265212998898,
10: 10.358713699288483,
11: 10.359162642511938,
12: 10.359612042604219,
13: 10.360061899499968,
14: 10.360512213133493,
15: 10.36096298343879,
16: 10.361414210349539,
17: 10.361865893799084,
18: 10.362318033720465,
19: 10.36277063004639,
20: 10.363223682709256,
21: 10.363677191641132,
22: 10.364131156773775,
23: 10.364585578038621,
24: 10.365040455366783,
25: 10.365495788689062,
26: 10.365951577935935,
27: 10.366407823037564,
28: 10.366864523923793,
29: 10.36732168052415,
},
}
)
PEYTON_FCST_LINEAR_INVALID_ZERO = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2012-05-02 00:00:00"),
1: pd.Timestamp("2012-05-03 00:00:00"),
2: pd.Timestamp("2012-05-04 00:00:00"),
3: pd.Timestamp("2012-05-05 00:00:00"),
4: pd.Timestamp("2012-05-06 00:00:00"),
5: pd.Timestamp("2012-05-07 00:00:00"),
6: pd.Timestamp("2012-05-08 00:00:00"),
7: pd.Timestamp("2012-05-09 00:00:00"),
8: pd.Timestamp("2012-05-10 00:00:00"),
9: pd.Timestamp("2012-05-11 00:00:00"),
10: pd.Timestamp("2012-05-12 00:00:00"),
11: pd.Timestamp("2012-05-13 00:00:00"),
12: pd.Timestamp("2012-05-14 00:00:00"),
13: pd.Timestamp("2012-05-15 00:00:00"),
14: pd.Timestamp("2012-05-16 00:00:00"),
15: pd.Timestamp("2012-05-17 00:00:00"),
16: pd.Timestamp("2012-05-18 00:00:00"),
17: pd.Timestamp("2012-05-19 00:00:00"),
18: pd.Timestamp("2012-05-20 00:00:00"),
19: pd.Timestamp("2012-05-21 00:00:00"),
20: pd.Timestamp("2012-05-22 00:00:00"),
21: pd.Timestamp("2012-05-23 00:00:00"),
22: pd.Timestamp("2012-05-24 00:00:00"),
23: pd.Timestamp("2012-05-25 00:00:00"),
24: pd.Timestamp("2012-05-26 00:00:00"),
25: pd.Timestamp("2012-05-27 00:00:00"),
26: pd.Timestamp("2012-05-28 00:00:00"),
27: pd.Timestamp("2012-05-29 00:00:00"),
28: pd.Timestamp("2012-05-30 00:00:00"),
29: pd.Timestamp("2012-05-31 00:00:00"),
30: pd.Timestamp("2012-06-01 00:00:00"),
31: pd.Timestamp("2012-06-02 00:00:00"),
32: pd.Timestamp("2012-06-03 00:00:00"),
33: pd.Timestamp("2012-06-04 00:00:00"),
34: pd.Timestamp("2012-06-05 00:00:00"),
35: pd.Timestamp("2012-06-06 00:00:00"),
36: pd.Timestamp("2012-06-07 00:00:00"),
37: pd.Timestamp("2012-06-08 00:00:00"),
38: pd.Timestamp("2012-06-09 00:00:00"),
39: pd.Timestamp("2012-06-10 00:00:00"),
40: pd.Timestamp("2012-06-11 00:00:00"),
41: pd.Timestamp("2012-06-12 00:00:00"),
42: pd.Timestamp("2012-06-13 00:00:00"),
43: pd.Timestamp("2012-06-14 00:00:00"),
44: pd.Timestamp("2012-06-15 00:00:00"),
45: pd.Timestamp("2012-06-16 00:00:00"),
46: pd.Timestamp("2012-06-17 00:00:00"),
47: pd.Timestamp("2012-06-18 00:00:00"),
48: pd.Timestamp("2012-06-19 00:00:00"),
49: pd.Timestamp("2012-06-20 00:00:00"),
50: pd.Timestamp("2012-06-21 00:00:00"),
51: pd.Timestamp("2012-06-22 00:00:00"),
52: pd.Timestamp("2012-06-23 00:00:00"),
53: pd.Timestamp("2012-06-24 00:00:00"),
54: pd.Timestamp("2012-06-25 00:00:00"),
55: pd.Timestamp("2012-06-26 00:00:00"),
56: | pd.Timestamp("2012-06-27 00:00:00") | pandas.Timestamp |
#
# Copyright 2020 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Testing out the datacompy functionality
"""
import io
import logging
import sys
from datetime import datetime
from decimal import Decimal
from unittest import mock
import numpy as np
import pandas as pd
import pytest
from pandas.util.testing import assert_series_equal
from pytest import raises
import datacompy
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
def test_numeric_columns_equal_abs():
data = """a|b|expected
1|1|True
2|2.1|True
3|4|False
4|NULL|False
NULL|4|False
NULL|NULL|True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(df.a, df.b, abs_tol=0.2)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_numeric_columns_equal_rel():
data = """a|b|expected
1|1|True
2|2.1|True
3|4|False
4|NULL|False
NULL|4|False
NULL|NULL|True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_string_columns_equal():
data = """a|b|expected
Hi|Hi|True
Yo|Yo|True
Hey|Hey |False
résumé|resume|False
résumé|résumé|True
💩|💩|True
💩|🤔|False
| |True
| |False
datacompy|DataComPy|False
something||False
|something|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_string_columns_equal_with_ignore_spaces():
data = """a|b|expected
Hi|Hi|True
Yo|Yo|True
Hey|Hey |True
résumé|resume|False
résumé|résumé|True
💩|💩|True
💩|🤔|False
| |True
| |True
datacompy|DataComPy|False
something||False
|something|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2, ignore_spaces=True)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_string_columns_equal_with_ignore_spaces_and_case():
data = """a|b|expected
Hi|Hi|True
Yo|Yo|True
Hey|Hey |True
résumé|resume|False
résumé|résumé|True
💩|💩|True
💩|🤔|False
| |True
| |True
datacompy|DataComPy|True
something||False
|something|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(
df.a, df.b, rel_tol=0.2, ignore_spaces=True, ignore_case=True
)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_date_columns_equal():
data = """a|b|expected
2017-01-01|2017-01-01|True
2017-01-02|2017-01-02|True
2017-10-01|2017-10-10|False
2017-01-01||False
|2017-01-01|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
# First compare just the strings
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# Then compare converted to datetime objects
df["a"] = pd.to_datetime(df["a"])
df["b"] = pd.to_datetime(df["b"])
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# and reverse
actual_out_rev = datacompy.columns_equal(df.b, df.a, rel_tol=0.2)
assert_series_equal(expect_out, actual_out_rev, check_names=False)
def test_date_columns_equal_with_ignore_spaces():
data = """a|b|expected
2017-01-01|2017-01-01 |True
2017-01-02 |2017-01-02|True
2017-10-01 |2017-10-10 |False
2017-01-01||False
|2017-01-01|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
# First compare just the strings
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2, ignore_spaces=True)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# Then compare converted to datetime objects
df["a"] = pd.to_datetime(df["a"])
df["b"] = pd.to_datetime(df["b"])
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2, ignore_spaces=True)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# and reverse
actual_out_rev = datacompy.columns_equal(df.b, df.a, rel_tol=0.2, ignore_spaces=True)
assert_series_equal(expect_out, actual_out_rev, check_names=False)
def test_date_columns_equal_with_ignore_spaces_and_case():
data = """a|b|expected
2017-01-01|2017-01-01 |True
2017-01-02 |2017-01-02|True
2017-10-01 |2017-10-10 |False
2017-01-01||False
|2017-01-01|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
# First compare just the strings
actual_out = datacompy.columns_equal(
df.a, df.b, rel_tol=0.2, ignore_spaces=True, ignore_case=True
)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# Then compare converted to datetime objects
df["a"] = pd.to_datetime(df["a"])
df["b"] = pd.to_datetime(df["b"])
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2, ignore_spaces=True)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# and reverse
actual_out_rev = datacompy.columns_equal(df.b, df.a, rel_tol=0.2, ignore_spaces=True)
assert_series_equal(expect_out, actual_out_rev, check_names=False)
def test_date_columns_unequal():
"""I want datetime fields to match with dates stored as strings
"""
df = pd.DataFrame([{"a": "2017-01-01", "b": "2017-01-02"}, {"a": "2017-01-01"}])
df["a_dt"] = pd.to_datetime(df["a"])
df["b_dt"] = pd.to_datetime(df["b"])
assert datacompy.columns_equal(df.a, df.a_dt).all()
assert datacompy.columns_equal(df.b, df.b_dt).all()
assert datacompy.columns_equal(df.a_dt, df.a).all()
assert datacompy.columns_equal(df.b_dt, df.b).all()
assert not datacompy.columns_equal(df.b_dt, df.a).any()
assert not datacompy.columns_equal(df.a_dt, df.b).any()
assert not datacompy.columns_equal(df.a, df.b_dt).any()
assert not datacompy.columns_equal(df.b, df.a_dt).any()
def test_bad_date_columns():
"""If strings can't be coerced into dates then it should be false for the
whole column.
"""
df = pd.DataFrame(
[{"a": "2017-01-01", "b": "2017-01-01"}, {"a": "2017-01-01", "b": "217-01-01"}]
)
df["a_dt"] = pd.to_datetime(df["a"])
assert not datacompy.columns_equal(df.a_dt, df.b).any()
def test_rounded_date_columns():
"""If strings can't be coerced into dates then it should be false for the
whole column.
"""
df = pd.DataFrame(
[
{"a": "2017-01-01", "b": "2017-01-01 00:00:00.000000", "exp": True},
{"a": "2017-01-01", "b": "2017-01-01 00:00:00.123456", "exp": False},
{"a": "2017-01-01", "b": "2017-01-01 00:00:01.000000", "exp": False},
{"a": "2017-01-01", "b": "2017-01-01 00:00:00", "exp": True},
]
)
df["a_dt"] = pd.to_datetime(df["a"])
actual = datacompy.columns_equal(df.a_dt, df.b)
expected = df["exp"]
assert_series_equal(actual, expected, check_names=False)
def test_decimal_float_columns_equal():
df = pd.DataFrame(
[
{"a": Decimal("1"), "b": 1, "expected": True},
{"a": Decimal("1.3"), "b": 1.3, "expected": True},
{"a": Decimal("1.000003"), "b": 1.000003, "expected": True},
{"a": Decimal("1.000000004"), "b": 1.000000003, "expected": False},
{"a": Decimal("1.3"), "b": 1.2, "expected": False},
{"a": np.nan, "b": np.nan, "expected": True},
{"a": np.nan, "b": 1, "expected": False},
{"a": Decimal("1"), "b": np.nan, "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_decimal_float_columns_equal_rel():
df = pd.DataFrame(
[
{"a": Decimal("1"), "b": 1, "expected": True},
{"a": Decimal("1.3"), "b": 1.3, "expected": True},
{"a": Decimal("1.000003"), "b": 1.000003, "expected": True},
{"a": Decimal("1.000000004"), "b": 1.000000003, "expected": True},
{"a": Decimal("1.3"), "b": 1.2, "expected": False},
{"a": np.nan, "b": np.nan, "expected": True},
{"a": np.nan, "b": 1, "expected": False},
{"a": Decimal("1"), "b": np.nan, "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b, abs_tol=0.001)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_decimal_columns_equal():
df = pd.DataFrame(
[
{"a": Decimal("1"), "b": Decimal("1"), "expected": True},
{"a": Decimal("1.3"), "b": Decimal("1.3"), "expected": True},
{"a": Decimal("1.000003"), "b": Decimal("1.000003"), "expected": True},
{"a": Decimal("1.000000004"), "b": Decimal("1.000000003"), "expected": False},
{"a": Decimal("1.3"), "b": Decimal("1.2"), "expected": False},
{"a": np.nan, "b": np.nan, "expected": True},
{"a": np.nan, "b": Decimal("1"), "expected": False},
{"a": Decimal("1"), "b": np.nan, "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_decimal_columns_equal_rel():
df = pd.DataFrame(
[
{"a": Decimal("1"), "b": Decimal("1"), "expected": True},
{"a": Decimal("1.3"), "b": Decimal("1.3"), "expected": True},
{"a": Decimal("1.000003"), "b": Decimal("1.000003"), "expected": True},
{"a": Decimal("1.000000004"), "b": Decimal("1.000000003"), "expected": True},
{"a": Decimal("1.3"), "b": Decimal("1.2"), "expected": False},
{"a": np.nan, "b": np.nan, "expected": True},
{"a": np.nan, "b": Decimal("1"), "expected": False},
{"a": Decimal("1"), "b": np.nan, "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b, abs_tol=0.001)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_infinity_and_beyond():
df = pd.DataFrame(
[
{"a": np.inf, "b": np.inf, "expected": True},
{"a": -np.inf, "b": -np.inf, "expected": True},
{"a": -np.inf, "b": np.inf, "expected": False},
{"a": np.inf, "b": -np.inf, "expected": False},
{"a": 1, "b": 1, "expected": True},
{"a": 1, "b": 0, "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_mixed_column():
df = pd.DataFrame(
[
{"a": "hi", "b": "hi", "expected": True},
{"a": 1, "b": 1, "expected": True},
{"a": np.inf, "b": np.inf, "expected": True},
{"a": Decimal("1"), "b": Decimal("1"), "expected": True},
{"a": 1, "b": "1", "expected": False},
{"a": 1, "b": "yo", "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_mixed_column_with_ignore_spaces():
df = pd.DataFrame(
[
{"a": "hi", "b": "hi ", "expected": True},
{"a": 1, "b": 1, "expected": True},
{"a": np.inf, "b": np.inf, "expected": True},
{"a": Decimal("1"), "b": Decimal("1"), "expected": True},
{"a": 1, "b": "1 ", "expected": False},
{"a": 1, "b": "yo ", "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b, ignore_spaces=True)
expect_out = df["expected"]
| assert_series_equal(expect_out, actual_out, check_names=False) | pandas.util.testing.assert_series_equal |
from datetime import datetime
import re
import numpy as np
import pytest
from pandas import (
DataFrame,
Index,
MultiIndex,
Series,
_testing as tm,
)
def test_extract_expand_kwarg_wrong_type_raises(any_string_dtype):
# TODO: should this raise TypeError
values = Series(["fooBAD__barBAD", np.nan, "foo"], dtype=any_string_dtype)
with pytest.raises(ValueError, match="expand must be True or False"):
values.str.extract(".*(BAD[_]+).*(BAD)", expand=None)
def test_extract_expand_kwarg(any_string_dtype):
s = Series(["fooBAD__barBAD", np.nan, "foo"], dtype=any_string_dtype)
expected = DataFrame(["BAD__", np.nan, np.nan], dtype=any_string_dtype)
result = s.str.extract(".*(BAD[_]+).*")
tm.assert_frame_equal(result, expected)
result = s.str.extract(".*(BAD[_]+).*", expand=True)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
[["BAD__", "BAD"], [np.nan, np.nan], [np.nan, np.nan]], dtype=any_string_dtype
)
result = s.str.extract(".*(BAD[_]+).*(BAD)", expand=False)
tm.assert_frame_equal(result, expected)
def test_extract_expand_False_mixed_object():
ser = Series(
["aBAD_BAD", np.nan, "BAD_b_BAD", True, datetime.today(), "foo", None, 1, 2.0]
)
# two groups
result = ser.str.extract(".*(BAD[_]+).*(BAD)", expand=False)
er = [np.nan, np.nan] # empty row
expected = DataFrame([["BAD_", "BAD"], er, ["BAD_", "BAD"], er, er, er, er, er, er])
tm.assert_frame_equal(result, expected)
# single group
result = ser.str.extract(".*(BAD[_]+).*BAD", expand=False)
expected = Series(
["BAD_", np.nan, "BAD_", np.nan, np.nan, np.nan, np.nan, np.nan, np.nan]
)
tm.assert_series_equal(result, expected)
def test_extract_expand_index_raises():
# GH9980
# Index only works with one regex group since
# multi-group would expand to a frame
idx = Index(["A1", "A2", "A3", "A4", "B5"])
msg = "only one regex group is supported with Index"
with pytest.raises(ValueError, match=msg):
idx.str.extract("([AB])([123])", expand=False)
def test_extract_expand_no_capture_groups_raises(index_or_series, any_string_dtype):
s_or_idx = index_or_series(["A1", "B2", "C3"], dtype=any_string_dtype)
msg = "pattern contains no capture groups"
# no groups
with pytest.raises(ValueError, match=msg):
s_or_idx.str.extract("[ABC][123]", expand=False)
# only non-capturing groups
with pytest.raises(ValueError, match=msg):
s_or_idx.str.extract("(?:[AB]).*", expand=False)
def test_extract_expand_single_capture_group(index_or_series, any_string_dtype):
# single group renames series/index properly
s_or_idx = index_or_series(["A1", "A2"], dtype=any_string_dtype)
result = s_or_idx.str.extract(r"(?P<uno>A)\d", expand=False)
expected = index_or_series(["A", "A"], name="uno", dtype=any_string_dtype)
if index_or_series == Series:
tm.assert_series_equal(result, expected)
else:
tm.assert_index_equal(result, expected)
def test_extract_expand_capture_groups(any_string_dtype):
s = Series(["A1", "B2", "C3"], dtype=any_string_dtype)
# one group, no matches
result = s.str.extract("(_)", expand=False)
expected = Series([np.nan, np.nan, np.nan], dtype=any_string_dtype)
tm.assert_series_equal(result, expected)
# two groups, no matches
result = s.str.extract("(_)(_)", expand=False)
expected = DataFrame(
[[np.nan, np.nan], [np.nan, np.nan], [np.nan, np.nan]], dtype=any_string_dtype
)
tm.assert_frame_equal(result, expected)
# one group, some matches
result = s.str.extract("([AB])[123]", expand=False)
expected = Series(["A", "B", np.nan], dtype=any_string_dtype)
tm.assert_series_equal(result, expected)
# two groups, some matches
result = s.str.extract("([AB])([123])", expand=False)
expected = DataFrame(
[["A", "1"], ["B", "2"], [np.nan, np.nan]], dtype=any_string_dtype
)
tm.assert_frame_equal(result, expected)
# one named group
result = s.str.extract("(?P<letter>[AB])", expand=False)
expected = Series(["A", "B", np.nan], name="letter", dtype=any_string_dtype)
tm.assert_series_equal(result, expected)
# two named groups
result = s.str.extract("(?P<letter>[AB])(?P<number>[123])", expand=False)
expected = DataFrame(
[["A", "1"], ["B", "2"], [np.nan, np.nan]],
columns=["letter", "number"],
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, expected)
# mix named and unnamed groups
result = s.str.extract("([AB])(?P<number>[123])", expand=False)
expected = DataFrame(
[["A", "1"], ["B", "2"], [np.nan, np.nan]],
columns=[0, "number"],
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, expected)
# one normal group, one non-capturing group
result = s.str.extract("([AB])(?:[123])", expand=False)
expected = Series(["A", "B", np.nan], dtype=any_string_dtype)
tm.assert_series_equal(result, expected)
# two normal groups, one non-capturing group
s = Series(["A11", "B22", "C33"], dtype=any_string_dtype)
result = s.str.extract("([AB])([123])(?:[123])", expand=False)
expected = DataFrame(
[["A", "1"], ["B", "2"], [np.nan, np.nan]], dtype=any_string_dtype
)
tm.assert_frame_equal(result, expected)
# one optional group followed by one normal group
s = Series(["A1", "B2", "3"], dtype=any_string_dtype)
result = s.str.extract("(?P<letter>[AB])?(?P<number>[123])", expand=False)
expected = DataFrame(
[["A", "1"], ["B", "2"], [np.nan, "3"]],
columns=["letter", "number"],
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, expected)
# one normal group followed by one optional group
s = Series(["A1", "B2", "C"], dtype=any_string_dtype)
result = s.str.extract("(?P<letter>[ABC])(?P<number>[123])?", expand=False)
expected = DataFrame(
[["A", "1"], ["B", "2"], ["C", np.nan]],
columns=["letter", "number"],
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, expected)
def test_extract_expand_capture_groups_index(index, any_string_dtype):
# https://github.com/pandas-dev/pandas/issues/6348
# not passing index to the extractor
data = ["A1", "B2", "C"]
if len(index) < len(data):
pytest.skip("Index too short")
index = index[: len(data)]
s = Series(data, index=index, dtype=any_string_dtype)
result = s.str.extract(r"(\d)", expand=False)
expected = Series(["1", "2", np.nan], index=index, dtype=any_string_dtype)
tm.assert_series_equal(result, expected)
result = s.str.extract(r"(?P<letter>\D)(?P<number>\d)?", expand=False)
expected = DataFrame(
[["A", "1"], ["B", "2"], ["C", np.nan]],
columns=["letter", "number"],
index=index,
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, expected)
def test_extract_single_series_name_is_preserved(any_string_dtype):
s = Series(["a3", "b3", "c2"], name="bob", dtype=any_string_dtype)
result = s.str.extract(r"(?P<sue>[a-z])", expand=False)
expected = Series(["a", "b", "c"], name="sue", dtype=any_string_dtype)
tm.assert_series_equal(result, expected)
def test_extract_expand_True(any_string_dtype):
# Contains tests like those in test_match and some others.
s = Series(["fooBAD__barBAD", np.nan, "foo"], dtype=any_string_dtype)
result = s.str.extract(".*(BAD[_]+).*(BAD)", expand=True)
expected = DataFrame(
[["BAD__", "BAD"], [np.nan, np.nan], [np.nan, np.nan]], dtype=any_string_dtype
)
tm.assert_frame_equal(result, expected)
def test_extract_expand_True_mixed_object():
er = [np.nan, np.nan] # empty row
mixed = Series(
[
"aBAD_BAD",
np.nan,
"BAD_b_BAD",
True,
datetime.today(),
"foo",
None,
1,
2.0,
]
)
result = mixed.str.extract(".*(BAD[_]+).*(BAD)", expand=True)
expected = DataFrame([["BAD_", "BAD"], er, ["BAD_", "BAD"], er, er, er, er, er, er])
tm.assert_frame_equal(result, expected)
def test_extract_expand_True_single_capture_group_raises(
index_or_series, any_string_dtype
):
# these should work for both Series and Index
# no groups
s_or_idx = index_or_series(["A1", "B2", "C3"], dtype=any_string_dtype)
msg = "pattern contains no capture groups"
with pytest.raises(ValueError, match=msg):
s_or_idx.str.extract("[ABC][123]", expand=True)
# only non-capturing groups
with pytest.raises(ValueError, match=msg):
s_or_idx.str.extract("(?:[AB]).*", expand=True)
def test_extract_expand_True_single_capture_group(index_or_series, any_string_dtype):
# single group renames series/index properly
s_or_idx = index_or_series(["A1", "A2"], dtype=any_string_dtype)
result = s_or_idx.str.extract(r"(?P<uno>A)\d", expand=True)
expected_dtype = "object" if index_or_series is Index else any_string_dtype
expected = DataFrame({"uno": ["A", "A"]}, dtype=expected_dtype)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("name", [None, "series_name"])
def test_extract_series(name, any_string_dtype):
# extract should give the same result whether or not the series has a name.
s = Series(["A1", "B2", "C3"], name=name, dtype=any_string_dtype)
# one group, no matches
result = s.str.extract("(_)", expand=True)
expected = DataFrame([np.nan, np.nan, np.nan], dtype=any_string_dtype)
tm.assert_frame_equal(result, expected)
# two groups, no matches
result = s.str.extract("(_)(_)", expand=True)
expected = DataFrame(
[[np.nan, np.nan], [np.nan, np.nan], [np.nan, np.nan]], dtype=any_string_dtype
)
tm.assert_frame_equal(result, expected)
# one group, some matches
result = s.str.extract("([AB])[123]", expand=True)
expected = DataFrame(["A", "B", np.nan], dtype=any_string_dtype)
tm.assert_frame_equal(result, expected)
# two groups, some matches
result = s.str.extract("([AB])([123])", expand=True)
expected = DataFrame(
[["A", "1"], ["B", "2"], [np.nan, np.nan]], dtype=any_string_dtype
)
tm.assert_frame_equal(result, expected)
# one named group
result = s.str.extract("(?P<letter>[AB])", expand=True)
expected = DataFrame({"letter": ["A", "B", np.nan]}, dtype=any_string_dtype)
tm.assert_frame_equal(result, expected)
# two named groups
result = s.str.extract("(?P<letter>[AB])(?P<number>[123])", expand=True)
expected = DataFrame(
[["A", "1"], ["B", "2"], [np.nan, np.nan]],
columns=["letter", "number"],
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, expected)
# mix named and unnamed groups
result = s.str.extract("([AB])(?P<number>[123])", expand=True)
expected = DataFrame(
[["A", "1"], ["B", "2"], [np.nan, np.nan]],
columns=[0, "number"],
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, expected)
# one normal group, one non-capturing group
result = s.str.extract("([AB])(?:[123])", expand=True)
expected = DataFrame(["A", "B", np.nan], dtype=any_string_dtype)
| tm.assert_frame_equal(result, expected) | pandas._testing.assert_frame_equal |
# Concatenate uber1, uber2, and uber3: row_concat
row_concat = pd.concat([uber1, uber2, uber3])
# Print the shape of row_concat
print(row_concat.shape)
# Print the head of row_concat
print(row_concat.head())
# Concatenate ebola_melt and status_country column-wise: ebola_tidy
ebola_tidy = pd.concat([ebola_melt, status_country], axis=1)
# Print the shape of ebola_tidy
print(ebola_tidy.shape)
# Print the head of ebola_tidy
print(ebola_tidy.head())
# Import necessary modules
import glob
import pandas as pd
# Write the pattern: pattern
pattern = '*.csv'
# Save all file matches: csv_files
csv_files = glob.glob(pattern)
# Print the file names
print(csv_files)
# Load the second file into a DataFrame: csv2
csv2 = pd.read_csv(csv_files[1])
# Print the head of csv2
print(csv2.head())
# Create an empty list: frames
frames = []
# Iterate over csv_files
for csv in csv_files:
# Read csv into a DataFrame: df
df = pd.read_csv(csv)
# Append df to frames
frames.append(df)
# Concatenate frames into a single DataFrame: uber
uber = pd.concat(frames)
# Print the shape of uber
print(uber.shape)
# Print the head of uber
print(uber.head())
# Merge the DataFrames: o2o
o2o = pd.merge(left=site, right=visited, left_on="name", right_on="site")
# Print o2o
print(o2o)
# Merge the DataFrames: m2o
m2o = | pd.merge(left=site, right=visited, left_on="name", right_on="site") | pandas.merge |
def grid_scanfish_wrapper(cast_as_ds,dx=500,dz=10,d_factor=500):
import glob
import os
import numpy as np
import xarray as xr
import pandas as pd
import gsw
# use scipy.interpolate.Rbf to interpolate to grid; use d_factor to put more weight on values on x-axis, and not z-axis
#d_factor = 500
#dx = 300 # m
#dz = 5 # m
xi,yi,ti,CT_grid,cast_as_ds_corrected = gridding_scanfish_data(cast_as_ds,varname='CT', dx=dx/d_factor,dz=dz,d_factor=d_factor)
xi,yi,ti,SA_grid,cast_as_ds_corrected = gridding_scanfish_data(cast_as_ds,varname='SA', dx=dx/d_factor,dz=dz,d_factor=d_factor)
# xi,yi,sigma_0_grid,cast_as_ds_corrected = gridding_scanfish_data(cast_as_ds,varname='sigma_0',dx=dx/d_factor,dz=dz,d_factor=d_factor)
p_ref = 10.1325; # reference pressure # following scanfish calc
Pot_dens = gsw.rho(SA_grid,CT_grid,p_ref)
sigma_0_grid = Pot_dens - 1000;
# convert to Dataset
gridded_as_ds = xr.Dataset({'CT': (['z', 'time'], CT_grid ),'SA': (['z', 'time'], SA_grid ),'sigma_0': (['z', 'time'], sigma_0_grid )},
coords={'distance':(['z', 'time'], xi ),'z': (['z'], yi[:,0] ),'time':(['time'], ti[0,:] )})
return gridded_as_ds, cast_as_ds_corrected
def load_process_scanfish_data(pathData):
import glob
import os
import numpy as np
import xarray as xr
import pandas as pd
import gsw
# load scanfish
#pathData = r'/Users/North/Drive/Work/UniH_Work/DataAnalysis/Data/MET_132/Scanfish/' # use your path
data_files = glob.glob(os.path.join(pathData, "Scanfish_complete_cast*.nc")) # advisable to use os.path.join as this makes concatenation OS independent
#from scanfish_functions import gridding_scanfish_data
scanfish_data, scanfish_gridded= [], []
for ti,f in zip(range(len(data_files[0:3:2])),data_files[0:3:2]): # skipping second transect as it doesn't cross filament
cast_as_ds = xr.open_dataset(f,decode_times=False)
# Convert time axis to date form:
cast_as_ds.time.values = | pd.to_datetime(cast_as_ds.time.values-719529, unit='D') | pandas.to_datetime |
import os
import urllib
import requests
import zipfile
import pandas as pd
import time
from datetime import datetime
from google.transit import gtfs_realtime_pb2
def RequestsWrite(APIkey, feed_id):
'''
This function takes APIkey and feed_id as an input, and
Requests MTA subway real-time status, and Writes a gtfs file.
'''
url = 'http://datamine.mta.info/mta_esi.php?key=' + APIkey + '&feed_id=' + str(feed_id)
response = requests.get(url)
feed = gtfs_realtime_pb2.FeedMessage()
feed.ParseFromString(response.content)
timestamp = datetime.fromtimestamp(feed.header.timestamp)
FolderName = '%04d'%(timestamp.year) + '%02d'%(timestamp.month) + '/' \
+ '%04d'%(timestamp.year) + '%02d'%(timestamp.month) + '%02d'%(timestamp.day)
if not os.path.isdir(FolderName):
os.makedirs(FolderName)
file = open(FolderName + "/gtfs_" + str(feed_id) + '_' + str(timestamp).replace(" ", "-").replace(":", "-") + ".gtfs", "wb")
file.write(response.content)
file.close()
def collect(APIkey):
'''
This function takes APIkey as an input, and
Keep requesting real-time status for all MTA subway lines, and Writting gtfs files.
Note: this is an endless loop; to stop this program in terminal, please press Control + C.
'''
feed_id_lines = {'1': '123456S',
'26': 'ACEHS',
'16': 'NQRW',
'21': 'BDFM',
'2': 'L',
'11': 'SIR',
'31': 'G',
'36': 'JZ',
'51': '7'
}
var = 1
while var <= 1: # every loop takes about 1 seconds for all 9 feed_id
for feed_id in feed_id_lines.keys():
try:
RequestsWrite(APIkey, feed_id)
except:
continue
time.sleep(3)
# 15 seconds per update on average (confidence interval between 5 to 30 sec);
# 3 seconds sleep (totally less than 5 seconds) for each loop to make sure the data integrity.
def arrival(date):
'''
This function takes date(e.g. 20190901) as an input,
structure and integrate the gtfs files in the corresponding folder,
output an arrival csv file.
'''
errornum = 0
year, month, day = date[:4], date[4:6], date[6:8]
FolderPath = year + month + '/' + year + month + day
gtfsFileNames = os.listdir(FolderPath)
dict1 = {}
for gtfsFileName in gtfsFileNames:
gtfsFilePath = FolderPath + '/' + gtfsFileName
try:
# read in gtfs
f = open(gtfsFilePath, 'rb')
raw_str = f.read()
msg = gtfs_realtime_pb2.FeedMessage()
msg.ParseFromString(raw_str)
# structure gtfs
gtfs_timestamp = msg.header.timestamp
for i,entity in enumerate(msg.entity):
if entity.HasField('trip_update'):
try:
arrival_time = entity.trip_update.stop_time_update[0].arrival.time
stop_id = entity.trip_update.stop_time_update[0].stop_id
trip_id = entity.trip_update.trip.trip_id
route_id = entity.trip_update.trip.route_id
entity2 = msg.entity[i+1]
current_stop_sequence = entity2.vehicle.current_stop_sequence
current_status = entity2.vehicle.current_status
vehicle_timestamp = entity2.vehicle.timestamp
vehicle_stop_id = entity2.vehicle.stop_id
dict0 = {'gtfs_timestamp': gtfs_timestamp,
'trip_id': trip_id,
'arrival_time': arrival_time,
'stop_id': stop_id,
'route_id': route_id,
'current_stop_sequence': current_stop_sequence,
'current_status': current_status,
'vehicle_timestamp': vehicle_timestamp,
'vehicle_stop_id': vehicle_stop_id
}
try: # update record
if gtfs_timestamp >= dict1[trip_id+ '//' + stop_id]['gtfs_timestamp']:
dict1[trip_id+ '//' + stop_id] = dict0
except: # add new record
dict1[trip_id+ '//' + stop_id] = dict0
except:
continue
except: # DecodeError: Error parsing message
errornum+=1
continue
print("%d GTFS files cannot be parsed"%errornum)
df = pd.DataFrame.from_dict(dict1).T.reset_index(drop=True)
df.to_csv(year + month + '/arrival_' + year + month + day + '.csv')
def delay(date, date_schedule = 'latest'):
'''
This function takes date as an input,
calculate delays by actual arrivals and schedules,
output a delay csv file.
** require actual arrival/ schedule csv files in the corresponding folder,
'''
year, month, day = date[:4], date[4:6], date[6:8]
#################### Actual Arrival ####################
df = pd.read_csv(year + month + '/arrival_' + year + month + day + '.csv', index_col=0)
df.arrival_time = df.arrival_time.apply(lambda x: datetime.fromtimestamp(x))
df.gtfs_timestamp = df.gtfs_timestamp.apply(lambda x: datetime.fromtimestamp(x))
df.vehicle_timestamp = df.vehicle_timestamp.apply(lambda x: datetime.fromtimestamp(x))
weekday_dict = {0:'Weekday', 1:'Weekday', 2:'Weekday', 3:'Weekday', 4:'Weekday', 5:'Saturday', 6:'Sunday'}
df['weekday'] = df.arrival_time.apply(lambda x: weekday_dict[x.weekday()])
df['trip_id2'] = df['weekday'] + '-' + df['trip_id']
df['match_id'] = df['trip_id2'] + '//' + df['stop_id'].astype(str)
#################### Schedules ####################
# download schedule gtfs file
# schedule default setting is the latest; for historical schedule, please refer to: https://transitfeeds.com/p/mta/79
FolderName = 'schedule_' + date_schedule
FileName = FolderName + ".zip"
url = 'https://transitfeeds.com/p/mta/79/' + date_schedule + '/download'
if not os.path.isdir(FolderName):
os.makedirs(FolderName)
urllib.request.urlretrieve(url, FileName)
with zipfile.ZipFile(FileName, 'r') as zip_ref:
zip_ref.extractall(FolderName)
stop_times = pd.read_csv(FolderName + "/stop_times.txt")
stop_times['weekday'] = stop_times.trip_id.apply(lambda x: x.split('-')[-2]) # perfectly extracted
stop_times['weekday'] = stop_times['weekday'].apply(lambda x: 'Weekday' if x == 'Wednesday' else x) # L train does not have Weekday! Only ['Saturday', 'Sunday', 'Wednesday']
stop_times['num_id'] = stop_times.trip_id.apply(lambda x: x.split('_')[1]) # perfectly extracted, fixed lenth
stop_times['direction'] = stop_times['trip_id'].apply(lambda x: x.split('-')[-1].split('.')[-1][0])
stop_times['route_id'] = stop_times['trip_id'].apply(lambda x: x.split('_')[-1].split('.')[0])
stop_times['trip_id2'] = stop_times['weekday'] + '-' + stop_times['num_id'] + '_' + stop_times['route_id'] + '..' + stop_times['direction']
stop_times['match_id'] = stop_times['trip_id2'] + '//' + stop_times['stop_id']
stop_times.drop(['stop_headsign', 'pickup_type', 'drop_off_type', 'shape_dist_traveled'], axis=1, inplace=True)
stop_times.rename(columns={'arrival_time':'arrival_time_scheduled', 'departure_time':'departure_time_scheduled'}, inplace=True)
# conver timestamp
stop_times = stop_times[stop_times.arrival_time_scheduled.apply(lambda x: int(x[:2])<24)] # filter abnormal arrival_time which is greater than 24 hour
stop_times = stop_times[stop_times.arrival_time_scheduled.apply(lambda x: int(x[3:5])<60)] # filter abnormal arrival_time which is greater than 60 minutes
stop_times.arrival_time_scheduled = pd.to_datetime(stop_times.arrival_time_scheduled.apply(lambda x: year + '-' + month + '-' + day + '-' + x))
#################### Match ####################
df_match = | pd.merge(df, stop_times[['match_id', 'arrival_time_scheduled', 'departure_time_scheduled']], on='match_id', how='inner') | pandas.merge |
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 29 11:20:57 2017
@author: James
"""
from xgboost import XGBRegressor, XGBClassifier
from sklearn.model_selection import cross_val_predict
from sklearn.decomposition import PCA
from sklearn.preprocessing import MinMaxScaler
import pandas as pd
import pickle
from sklearn import metrics
#Load main data
data = | pd.read_csv("filtered_background.csv",encoding="utf-8") | pandas.read_csv |
# coding=utf-8
"""
Log based system ID
"""
from typing import Dict, List
import control
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pyulog
import scipy.optimize
import scipy.signal as sig
import ulog_tools as ut
# pylint: disable=no-member, invalid-name
def ulog_to_dict(log: pyulog.ULog) -> Dict:
"""Convert ulog to a dict"""
res = {}
for topic in log.data_list:
types = {}
for field in topic.field_data:
types[field.field_name] = field.type_str
index = pd.TimedeltaIndex(data=np.array(topic.data['timestamp']), unit='us')
cols = []
data = []
for k in sorted(topic.data.keys()):
cols.append('f_{:s}'.format(k.replace('[', '_').replace(']', '')))
data.append(topic.data[k])
data = np.array(data).T
df = pd.DataFrame(data, index=index, columns=cols)
res['t_{:s}_{:d}'.format(topic.name, topic.multi_id)] = df
return res
def series_diff(series: pd.Series, order: int=1) -> pd.Series:
"""Derivative of a series"""
dx = np.gradient(series.values, order)
dt = np.gradient(series.index, order)
dxdt = np.zeros(dx.shape)
for i, dt_i in enumerate(dt):
if dt_i <= 0:
dxdt[i] = 0
else:
dxdt[i] = dx[i] / dt[i]
return | pd.Series(data=dxdt, index=series.index) | pandas.Series |
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 25 14:18:15 2017
@author: 53771
"""
import pandas as pd
import tushare as ts
import fileInfo as fi
import numpy as np
def save_hist_data(code):
data=ts.get_k_data(code,start='2011-01-01')
data.to_csv("./stock/"+code+'.csv')
#data.index=pd.to_datetime(data.index)
#data.sort_index(inplace=True)
#return data
def read_hit_data(code):
try:
df=pd.read_csv('./stock/'+code+'.csv',index_col='date')
df.index=pd.to_datetime(df.index)
df.sort_index(inplace=True)
except IOError:
save_hist_data(code)
df=pd.read_csv('./stock/'+code+'.csv',index_col='date')
df.index= | pd.to_datetime(df.index) | pandas.to_datetime |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on 2020-11-16
Code for Figure 6
Code runs multiple evolution run, each run is stored on disk independently
Use mlsFig_evolutionFitnessLandscape with same settings to create reference parameter space scans
@author: simonvanvliet
<EMAIL>
"""
import sys
sys.path.insert(0, '..')
from mainCode import MlsGroupDynamics_evolve as mls
from mainCode import MlsGroupDynamics_utilities as util
import numpy as np
import pandas as pd
from joblib import Parallel, delayed
"""
SET SETTINGS
"""
#SET fileName is appended to file name
fileName = 'evolutionRun'
#SET number of cores to use
numCore = 20
#SET group fission rates to scan
gr_Sfission_Vec = np.array([0.1, 4]) # for S = 0, use K = 2E5; for S > 0 (0.1 or 4), use K = 3E4
#SET parName and par0Vec to scan over any parameter of choice
par0Name = 'indv_NType'
par0Vec = np.array([1, 2])
#SET initial locations of evolution runs
init_Aray = np.array([[0.05,0.05],[0.05,0.5],[0.05,0.95],[0.25,0.5],[0.45,0.5]])
numInit = init_Aray.shape[0]
#SET Model default settings
model_par = {
#time and run settings
"maxPopSize": 0,
"maxT": 1E6, # total run time
"sampleInt": 1E3, # sampling interval
"mav_window": 1E4, # average over this time window
"rms_window": 1E4, # calc rms change over this time window
# settings for initial condition
"init_groupNum": 100, # initial # groups
"init_fCoop": 1,
"init_groupDens": 50, # initial total cell number in group
# settings for individual level dynamics
# complexity
"indv_NType": 1,
"indv_asymmetry": 1, # difference in growth rate b(j+1) = b(j) / asymmetry
# mutation load
"indv_cost": 0.01, # cost of cooperation
"indv_migrR": 0, # migration rate
# set mutation rates
'mutR_type': 1E-3, # mutation rate between cooperator and cheater
'mutR_size': 1E-2, # mutation rate in offsspring size trait value
'mutR_frac': 1E-2, # mutation rate in offsspring fraction trait value
'indv_tau' : 1, # multipies individual rates
# group size control
"indv_K": 100, # total group size at EQ if f_coop=1
"delta_indv": 1, # zero if death rate is simply 1/k, one if death rate decreases with group size
# setting for group rates
# fission rate
'gr_CFis': 0.01,
'gr_SFis': 0, # measured in units of 1 / indv_K
'grp_tau': 1, # constant multiplies group rates
# extinction rate
'delta_grp': 0, # exponent of denisty dependence on group #
'K_grp': 0, # carrying capacity of groups
'delta_tot': 1, # exponent of denisty dependence on total #indvidual
'K_tot': 1E4, # carrying capacity of total individuals.
'delta_size': 0, # exponent of size dependence
# initial settings for fissioning
'offspr_sizeInit': 0.25, # offspr_size <= 0.5 and
'offspr_fracInit': 0.5 # offspr_size < offspr_frac < 1-offspr_size'
}
"""
Function definitions
"""
def run_single(model_par, mainName):
parNameAbbrev = {
'delta_indv' : 'dInd',
'delta_grp' : 'dGrp',
'delta_tot' : 'dTot',
'delta_size' : 'dSiz',
'gr_CFis' : 'fisC',
'gr_SFis' : 'fisS',
'indv_NType' : 'nTyp',
'indv_asymmetry': 'asym',
'indv_cost' : 'cost',
'mutR_type' : 'muTy',
'mutR_size' : 'muSi',
'mutR_frac' : 'muFr',
'indv_migrR' : 'migR',
'indv_K' : 'kInd',
'K_grp' : 'kGrp',
'K_tot' : 'kTot',
'offspr_sizeInit':'siIn',
'offspr_fracInit':'frIn',
'indv_tau' : 'tInd'}
parListName = ['indv_NType',
'gr_SFis',
'offspr_sizeInit',
'offspr_fracInit']
parName = ['_%s%.0g' %(parNameAbbrev[x], model_par[x]) for x in parListName]
parName = ''.join(parName)
fileName = mainName + parName + '.npz'
fileNamePkl = mainName + parName + '.pkl'
#run model and save data to disk
try:
outputMat, traitDistr = mls.run_model(model_par)
np.savez(fileName, output=outputMat, traitDistr=traitDistr, model_par=[model_par])
try:
df = | pd.DataFrame.from_records(outputMat) | pandas.DataFrame.from_records |
import pandas as pd
l = pd.DataFrame({
"id": [0, 1, 3, 4],
"A": ['a', 'b', 'c', 'd'],
"B": ['e', 'f', 'g', 'h'],
})
r = pd.DataFrame({
"id": [0, 1, 3, 4],
"B": ['e', 'f', 'z', 'h'],
"C": ['i', 'j', 'k', 'l'],
"d": ['m', 'n', 'o', 'p'],
})
# when merge on="id" Spalten werden sortiert, ohne on= wird auf Indizes (default sind diese gleich) zu mergen
df = pd.merge(l, r, on="id")
df2 = pd.merge(l, r)
df3 = pd.merge(l, r, on="id", how='outer')
# Auch mit mehrern Spalten kann gejoint werden -> Liste
# parameter für how:
# inner: nur gleiche werden gesetzt
# outer: alle werden gesetzt und mit NAN aufgefüllt
# left: outer left
# right: outer right
# indicator gibt zusätzliche Info in neuer Spalte
df4 = pd.merge(l, r, on=["id",'B'], how='outer')
df5 = | pd.merge(l, r, on=["id",'B'], how='inner') | pandas.merge |
from .CoreClasses import *
from .InitializeFunctions import *
import numpy as np
import time
import re
import random
import os
import pickle
import sys
import copy
import math
### DEPRECIATED!!!!
def check_mass(original_mass, CRS, concentrations):
''' Checks conservation of mass
Arguements
- original_mass: integer mass of the original system
- CRS: CRS object
- concentrations: array of molecule abundances
'''
print(concentrations)
mass_conserved = False
test_mass = 0.0
molecules = CRS.molecule_list
for m in molecules:
index = molecules.index(m)
molecule_size = len(m)
molecule_count = np.sum(concentrations[:, :, index])
mass = molecule_count*molecule_size
test_mass += mass
if test_mass == original_mass:
mass_conserved = True
return mass_conserved, test_mass
def calculate_mass_fraction_by_composition(concentrations, CRS, total_mass):
''' Takes an abundace distribution and returns a mass fraction distribution (which is a dictionary, keys are composition, values are mass_fraction) '''
molecules = CRS.molecule_list
mass_fraction = {}
for m in molecules:
index = molecules.index(m)
comp = get_composition(m)
molecule_size = len(m)
molecule_count = float(np.sum(concentrations[:,:,index]))
mass = molecule_count*molecule_size
if comp in mass_fraction.keys():
mass_fraction[comp] += float(mass)/float(total_mass)
else:
mass_fraction[comp] = float(mass)/float(total_mass)
return mass_fraction
def calculate_molecule_fraction_by_composition(concentrations, CRS, total_mass):
''' Takes an abundace distribution and returns a mass fraction distribution (which is a dictionary, keys are composition, values are mass_fraction) '''
molecules = CRS.molecule_list
molecule_fraction = {}
total_molecules = 0.0
for m in molecules:
index = molecules.index(m)
comp = get_composition(m)
molecule_size = len(m)
molecule_count = float(np.sum(concentrations[:,:,index]))
total_molecules += molecule_count
#mass = molecule_count
if comp in molecule_fraction.keys():
molecule_fraction[comp] += float(molecule_count)
else:
molecule_fraction[comp] = float(molecule_count)
for comp in molecule_fraction.keys():
molecule_fraction[comp] = float(molecule_fraction[comp])/float(total_molecules)
return molecule_fraction
def get_aa(comp):
''' Returns the amino acids in a given composition string '''
aa = []
coef = re.findall(r'\d+', comp)
for i in range(len(coef)):
comp = comp.replace(coef[i], ' ')
aa = comp.split()
return aa
def get_reaction_constants(CRS):
''' Returns a list of reaction constants for from a CRS '''
constants = {}
for rID in range(len(CRS.reactions)):
if sum(CRS.reactions[rID].product_coeff) == 1:
rxn = CRS.reactions[rID]
constants[rID] = rxn.constant
return constants
def generate_random_distribution(CRS, total_mass, N_L = 1):
''' Generates a random distribution of molecules for a given CRS '''
molecules = CRS.molecule_list
concentrations = np.zeros( (N_L, N_L, len(molecules)) )
molecule_dict = CRS.molecule_dict
mass = total_mass
monomers_in_seqs = {}
while mass > 0.1*(total_mass):
m= random.choice(molecules)
mID = molecule_dict[m]
concentrations[0,0, mID] += 1
comp = get_composition(m)
coef = map(int, re.findall(r'\d+', comp))
mass -= sum(coef)
aa = get_aa(comp)
for a in range(len(aa)):
if aa[a] in monomers_in_seqs.keys():
monomers_in_seqs[aa[a]] += coef[a]
else:
monomers_in_seqs[aa[a]] = coef[a]
diff = total_mass- mass
monomer_names= monomers_in_seqs.keys()
concentrations[0,0,0] += int( (monomers_in_seqs[monomer_names[0]] - monomers_in_seqs[monomer_names[1]])/(2.0) + (diff/2.0) )
concentrations[0,0,1] += int( (monomers_in_seqs[monomer_names[1]] - monomers_in_seqs[monomer_names[0]])/(2.0) + (diff/2.0) )
return concentrations
def set_reaction_constants(original_CRS, new_constants ):
''' Sets the reaction constants in the CRS to new_constants
Returns the CRS with updated constants'''
newCRS = copy.deepcopy(original_CRS)
for rID in new_constants.keys():
rxn = newCRS.reactions[rID]
rxn.constant = copy.deepcopy(new_constants[rID])
newCRS.reactions[rID] = rxn
return newCRS
def get_composition(seq):
'''Gets the composition of the seq. Returns a string which contains the stoichimetry of the seq. Monoomers are sorted alphabetically. '''
comp = ''
monomers = sorted(list(set(seq)))
for m in monomers:
comp+= m +str(seq.count(m))
return comp
def mutate(float_vec, mu, epsilion, as_percentage = True, targets = None):
''' Mutates a vector of floating point numbers. Mu is the fraction of numbers to be changed.
Half of the changes result in the number being replaced by another number in the vector
The remaining half of changes will cause small change to the number
if as_percentage is True, the change will be scaled by the size of the original number '''
if targets ==None:
new_vec = {}
nV = len(float_vec.values())
nR = int(np.floor(0.25*nV*mu))
nM = nV - nR
mutations = np.random.choice(float_vec.keys(), size= (nM), replace = False)
replacements = np.random.choice(float_vec.keys(), size= (nR), replace = False)
for i in mutations:
# Mutate
if as_percentage == True:
delta = np.random.normal(0,float_vec[i]*epsilion)
new_vec[i] = float_vec[i] + delta
new_vec[i] = abs(new_vec[i])
else:
new_vec[i] = float_vec[i] + np.random.normal(0, epsilion)
new_vec[i] = abs(new_vec[i])
for i in replacements:
# Replace
new_vec[i] = float_vec[np.random.choice(list(float_vec.keys()), size = 1)[0]]
return new_vec
else:
new_vec = {}
nV = len(targets)
nR = int(np.floor(0.5*nV))
nM = nV - nR
mutations = np.random.choice(float_vec.keys(), size= (nM), replace = False)
replacements = np.random.choice(float_vec.keys(), size= (nR), replace = False)
for i in mutations:
# Mutate
if as_percentage == True:
delta = np.random.normal(0,float_vec[i]*epsilion)
new_vec[i] = float_vec[i] + delta
new_vec[i] = abs(new_vec[i])
else:
new_vec[i] = float_vec[i] + np.random.normal(0, epsilion)
new_vec[i] = abs(new_vec[i])
for i in replacements:
# Replace
new_vec[i] = float_vec[np.random.choice(list(float_vec.keys()), size = 1)[0]]
return new_vec
def generate_concentrations_from_data(mass_fraction, CRS, total_mass, monomer_fraction = 0.25, N_L = 1):
''' Generates a concentration array from a given mass fraction
Assumes that the mass of a given composition is distributed evenly
Assumes that the mass of monomers is monomer_fraction of the total_mass'''
# Determine the fraction of the system in monomers
monomer_mass = np.floor(total_mass*monomer_fraction)
#print "Monomer Mass", monomer_mass
oligmer_mass = total_mass - monomer_mass
#print 'oligmer Mass', oligmer_mass
nM = len(CRS.molecule_list)
concentrations = np.zeros((N_L, N_L, nM), dtype = int)
m_dict = CRS.molecule_dict
molecules = CRS.molecule_list
new_mass = 0.0
comp_dict = {} # Maps compositions to lists of molecules
num_monomers = 0
### Iterate over all molecules
for m in molecules:
## If it's a monomer take note
if len(m) == 1:
num_monomers += 1
# Get the composition, and record which molecules are associated with that composition
comp = get_composition(m)
#print comp
if comp in comp_dict.keys():
comp_dict[comp].append(m)
else:
comp_dict[comp] = [m]
#raw_input("Enter to continue, The composition of All molecules has been recorded")
### For all compositions distribute the mass equally
monomers_in_seqs = [0,0]
for comp in comp_dict.keys():
nM = len(comp_dict[comp])
frac = 1.0/float(nM)
### If the composition was contained in the data
if comp in mass_fraction.keys():
comp_mass = mass_fraction[comp] # Determine the mass of that compostion
coef = map(int, re.findall(r'\d+', comp)) # Determine the length
l = sum(coef)
particles_per_sequence = frac*float(oligmer_mass*comp_mass)/float(l)
for m in comp_dict[comp]:
## FOr each molecule with that composition
#print m
index = CRS.molecule_dict[m]
concentrations[0,0, index] = int(particles_per_sequence)
for c in range(len(coef)):
monomers_in_seqs[c] += int(coef[c]*particles_per_sequence)
new_mass += int(particles_per_sequence)*int(l)
oligomer_mass = sum(monomers_in_seqs)
diff = int(total_mass - oligmer_mass)
monomers = [total_mass/2.0, total_mass/2.0]
monomers = np.array(monomers)- np.array(monomers_in_seqs)
for i in range(2):
concentrations[0,0,i] += monomers[i]
return concentrations
def generate_seq_peptides_CRS(amino_acids, max_length, kl = 0.0001, kd = 1.0, catalysts = False):
''' Generates all possible reactions between peptide molecules up to size max_length assigns all reactions the same constant and standard prorpenisty
Arguements:
- amino_acids: the amino acids used in the peptides
- max_length: the maximum length of peptides
- kl: reaction rate constant for forward reactions
- kd: reaction rate constant for reverse reactions, if None given, it will be assigned to be equal to kl
- catalysts: Boolean, whether or not catalysts should be included
'''
import itertools
rxn_IDs = []
reaction_list = []
molecule_list = []
molecule_dict = {}
rxn_ID = 0
#molecule_ID = 0
# If the backward constant is not specified, make it equal the forward constant
if kd == None:
kd = kl
for l in range(1,max_length+1):
# Generate all sequences of length l+1
sequences = itertools.product(amino_acids, repeat = l)
for seq in sequences:
s = ''.join(seq)
molecule_list.append(s)
molecule_dict[s] = molecule_list.index(s)
#print s
for i in range(1,l):
# Forward Reaction
rxn_ID = len(reaction_list)
reactants = [ molecule_dict[s[:i]], molecule_dict[s[i:]] ]
reactant_coeff = [1,1]
product_coeff = [1]
products = [ molecule_dict[s] ]
reaction_list.append( Reaction(rxn_ID, reactants = reactants, reactant_coeff = reactant_coeff , products = products, product_coeff = product_coeff, constant = np.random.normal(kl, 0.05*kl), prop = 'STD') )
#print 'Reaction List index: ', rxn_ID, 'Reaction ID: ', reaction_list[rxn_ID].ID
rxn_IDs.append(rxn_ID)
# Backward Reaction
rxn_ID = len(reaction_list)
reaction_list.append( Reaction(rxn_ID, products = reactants, product_coeff = reactant_coeff, reactants = products, reactant_coeff= reactant_coeff, constant = kd, prop = 'STD') )
#print 'Reaction List index: ', rxn_ID, 'Reaction ID: ', reaction_list[rxn_ID].ID
rxn_IDs.append(rxn_ID)
#print molecule_dict
newCRS = CRS(molecule_list = molecule_list, molecule_dict = molecule_dict, reactions = reaction_list)
#print newCRS.molecule_dict
return newCRS
def compare_distributions(target, current):
''' Compares two different mass distributions based on their euclidean distance '''
t = []
c = []
for i in current.keys():
t.append(target[i])
c.append(current[i])
dist = 0.0
for i in range(len(c)):
dist += (c[i]- t[i])**2
return np.sqrt(dist)
def compare_distributions_AE(target, current):
''' Compares two different mass distributions based on their Absolute difference '''
t = []
c = []
dist = 0.0
diffs = []
for i in current.keys():
diffs.append ( (i, target[i]-current[i] ) )
#if target[i] != 0.0 and current[i] != 0.0:
dist += abs(target[i]-current[i])
return dist
def compare_distributions_AE_targeted(target, current):
''' Compares two different mass distributions based on their Absolute difference, weighted by the value of the target '''
dist = 0.0
for i in current.keys():
#if target[i] != 0.0 and current[i] != 0.0:
dist += target[i]*abs(target[i]-current[i])
return dist
def mass_fraction_to_length_distribution(mass_fraction):
'''Converts a mass fraction dictionary to a length dictionary '''
length_dist = {}
for comp in mass_fraction.keys():
coef = map(int, re.findall(r'\d+', comp))
l = sum(coef)
if l in length_dist.keys():
length_dist[l] += mass_fraction[comp]
else:
length_dist[l] = mass_fraction[comp]
return length_dist
def plot_length_dist(length_dist):
import matplotlib.pylab as plt
import scipy.stats as stats
l = []
f = []
max_l = max(length_dist.keys())
for i in range(1,max_l+1):
l.append(i)
f.append(length_dist[i])
(m,b, r, p, std) = stats.linregress(l,np.log(f))
test_line = [np.exp(m*i + b) for i in l]
plt.plot(l, f)
plt.plot(test_line)
#plt.yscale('log')
print(m, b)
plt.show()
def EICname_to_composition(name):
s = re.split('-', name)
comp = get_composition(s[0])
return comp
def load_affinity_data_normalized(fname, max_length):
import pandas as pd
### Read File
affinity = {}
affinity_df = pd.read_csv(fname, index_col = 0)
### Drop peptides that were never observed
header_names = list(affinity_df)
molecule_dict= {} ### This will map the values from the EIC data to composition codes
molecule_names = list(affinity_df[header_names[1]])## Names of molecules from EIC data
total_intensity = 0.0
for index, row in affinity_df.iterrows():
comp = EICname_to_composition(row[header_names[1]])
coef = map(int, re.findall(r'\d+', comp))
l = sum(coef)
if l <= max_length:
molecule_dict[row[header_names[1]]] = comp
if comp in affinity.keys():
affinity[comp] += (row['A.RT'])
else:
affinity[comp] = (row['A.RT'])
total_intensity += (row['A.RT'])
for comp in affinity.keys():
affinity[comp] = affinity[comp]/float(total_intensity)
# print sum(mass_fraction.values())
# print mass_fraction.keys()
return affinity
def load_EIC_data_as_composition_data(fname, max_length):
''' Load EIC data from APS system into a mass fraction dictionary
This process contains LOTS OF ASSUMPTIONS
ASSUMPTION LIST:
- Hydrated/ cyclic peptides are the same as linear '''
import pandas as pd
mass_fraction = {}
### Read File
EIC_df = pd.read_csv(fname)
### Drop peptides that were never observed
EIC_df = EIC_df[EIC_df['EIC integral'] != 0.0]
header_names = list(EIC_df)
molecule_dict= {} ### This will map the values from the EIC data to composition codes
molecule_names = list(EIC_df[header_names[0]])## Names of molecules from EIC data
total_intensity = 0.0
for index, row in EIC_df.iterrows():
# if 'H2O' not in row[header_names[0]]:
comp = EICname_to_composition(row[header_names[0]])
coef = map(int, re.findall(r'\d+', comp))
l = sum(coef)
if l <= max_length:
molecule_dict[row[header_names[0]]] = comp
if comp in mass_fraction.keys():
mass_fraction[comp] += row['EIC integral']
else:
mass_fraction[comp] = row['EIC integral']
total_intensity += row['EIC integral']
for comp in mass_fraction.keys():
mass_fraction[comp] = mass_fraction[comp]/float(total_intensity)
# print sum(mass_fraction.values())
# print mass_fraction.keys()
return mass_fraction
def find_rxns(target, CRS):
rIDs = []
for t in target:
for rxn in CRS.reactions:
products = rxn.products
if len(products) == 1:
seqs = [CRS.molecule_list[p] for p in products]
comps = [get_composition(s) for s in seqs]
if t in comps:
rIDs.append(rxn.ID)
return rIDs
def update_temp(Tmax, t):
return Tmax*(1.0 - np.sqrt(t))
def generate_flat_mass_frac(target):
''' Generate a flat mass distribution of the same dimensions as the target '''
flat = {}
num_comps = len(target.keys())
flat_value = 1.0/num_comps
for comp in target.keys():
flat[comp] = flat_value
return flat
def load_integrated_EIC_heatmap(fname):
import pandas as pd
input_df = | pd.read_csv(fname) | pandas.read_csv |
import json
import os
import pickle as pkl
from collections import Counter, defaultdict, OrderedDict
from copy import deepcopy
from itertools import product
from typing import (
Any,
Dict,
Iterable,
List,
Optional,
OrderedDict as OrderedDictType,
Union,
)
import numpy as np
import quaternion # noqa: F401
import torch
from pandas import DataFrame, read_csv
from tabulate import tabulate
from detectron2.data import MetadataCatalog
from detectron2.evaluation.evaluator import DatasetEvaluator
from detectron2.structures import Instances
from roca.data import CategoryCatalog
from roca.data.constants import (
CAD_TAXONOMY,
CAD_TAXONOMY_REVERSE,
IMAGE_SIZE,
)
from roca.structures import Rotations
from roca.utils.alignment_errors import (
rotation_diff,
scale_ratio,
translation_diff,
)
from roca.utils.linalg import decompose_mat4, make_M_from_tqs
NMS_TRANS = 0.4
NMS_ROT = 60
NMS_SCALE = 0.6
TRANS_THRESH = 0.2
ROT_THRESH = 20
SCALE_THRESH = 0.2
VOXEL_IOU_THRESH = 0.5
class Vid2CADEvaluator(DatasetEvaluator):
def __init__(
self,
dataset_name: str,
full_annot: Union[str, List[Dict[str, Any]]],
cfg=None,
output_dir: str = '',
mocking: bool = False,
exclude: Optional[Iterable[str]]=None,
grid_file: Optional[str] = None,
exact_ret: bool = False,
key_prefix: str = '',
info_file: str = ''
):
self._dataset_name = dataset_name
self._metadata = MetadataCatalog.get(self._dataset_name)
self._category_manager = CategoryCatalog.get(self._dataset_name)
self.mocking = mocking
self._output_dir = output_dir
self._exclude = exclude
# Parse raw data
if isinstance(full_annot, list):
annots = full_annot
else:
with open(full_annot) as f:
annots = json.load(f)
self._full_annots = annots
scene_alignments = {}
scene_counts = defaultdict(lambda: Counter())
for annot in annots:
scene = annot['id_scan']
trs = annot['trs']
to_scene = np.linalg.inv(make_M_from_tqs(
trs['translation'], trs['rotation'], trs['scale']
))
alignment = []
for model in annot['aligned_models']:
if int(model['catid_cad']) not in CAD_TAXONOMY:
continue
scene_counts[scene][int(model['catid_cad'])] += 1
mtrs = model['trs']
to_s2c = make_M_from_tqs(
mtrs['translation'],
mtrs['rotation'],
mtrs['scale']
)
t, q, s = decompose_mat4(to_scene @ to_s2c)
alignment.append({
't': t.tolist(),
'q': q.tolist(),
's': s.tolist(),
'catid_cad': model['catid_cad'],
'id_cad': model['id_cad'],
'sym': model['sym']
})
scene_alignments[scene] = alignment
self._scene_alignments = scene_alignments
self._scene_counts = scene_counts
self.with_grids = grid_file is not None
self.grid_data = None
if self.with_grids:
with open(grid_file, 'rb') as f:
self.grid_data = pkl.load(f)
self.exact_ret = exact_ret
self.key_prefix = key_prefix
self.info_file = info_file
def reset(self):
self.results = defaultdict(list)
self.poses = defaultdict(list)
self.object_ids = defaultdict(list)
self.info_data = defaultdict(list)
def process(
self,
inputs: List[Dict[str, Any]],
outputs: List[Dict[str, Any]]
):
for input, output in zip(inputs, outputs):
file_name = input['file_name']
scene_name = input['file_name'].split('/')[-3]
if 'instances' not in output:
continue
instances = output['instances']
instances = instances[instances.scores > 0.5]
has_alignment = instances.has_alignment
if not has_alignment.any():
continue
instances = instances[has_alignment]
instances = deepcopy(instances) # avoid modification!
if instances.has('pred_meshes'):
instances.remove('pred_meshes')
self.results[scene_name].append(instances.to('cpu'))
if 'cad_ids' in output:
object_ids = output['cad_ids']
object_ids = [
object_ids[i]
for i in instances.pred_indices.tolist()
]
self.object_ids[scene_name].append(object_ids)
pose_file = file_name\
.replace('color', 'pose')\
.replace('.jpg', '.txt')
with open(pose_file) as f:
pose_mat = torch.tensor([
[float(v) for v in line.strip().split()]
for line in f
])
pose_mat = pose_mat.unsqueeze(0).expand(len(instances), 4, 4)
self.poses[scene_name].append(pose_mat)
'''if len(self.poses) == 20:
self.evaluate()
exit(0)'''
def process_mock(
self,
scene_name: str,
instances: Instances,
object_ids=None
):
self.results[scene_name] = instances
self.object_ids[scene_name] = object_ids
def evaluate(self) -> OrderedDictType[str, Dict[str, float]]:
self._collect_results()
self._transform_results_to_world_space()
path = self._write_raw_results()
return eval_csv(
self._dataset_name,
path,
self._full_annots,
exact_ret=self.exact_ret,
prefix=self.key_prefix,
info_file=self.info_file
)
def evaluate_mock(self) -> OrderedDictType[str, Dict[str, float]]:
self._nms_results()
self._apply_constraints()
return self._compute_metrics()
def _collect_results(self):
print('INFO: Collecting results...', flush=True)
for k, v in self.results.items():
instances = Instances.cat(v)
indices = instances.scores.argsort(descending=True)
self.results[k] = instances[indices]
self.poses[k] = torch.cat(self.poses[k], dim=0)[indices]
# NOTE: Objects corresponds to instances,
# so sort them similar to results
if k in self.object_ids:
object_ids = []
for ids in self.object_ids[k]:
object_ids.extend(ids)
self.object_ids[k] = [object_ids[i] for i in indices.tolist()]
def _transform_results_to_world_space(self):
print('INFO: Transforming results to world space...', flush=True)
for scene, instances in self.results.items():
poses = self.poses[scene]
# TODO: This can be batched
for i, (pose, t, q, s) in enumerate(zip(
poses.unbind(0),
instances.pred_translations.unbind(0),
instances.pred_rotations.unbind(0),
instances.pred_scales.unbind(0)
)):
pose = pose.numpy().reshape(4, 4)
mat = make_M_from_tqs(t.tolist(), q.tolist(), s.tolist())
new_t, new_q, new_s = decompose_mat4(pose @ mat)
instances.pred_translations[i] = torch.from_numpy(new_t)
instances.pred_rotations[i] = torch.from_numpy(new_q)
instances.pred_scales[i] = torch.from_numpy(new_s)
self.results[scene] = instances
self.poses[scene] = poses
def _write_raw_results(self):
output_dir = self._output_dir
output_path = os.path.join(output_dir, 'raw_results.csv')
print(
'INFO: Writing raw results to {}...'.format(output_path),
flush=True
)
data = defaultdict(lambda: [])
results = sorted(self.results.items(), key=lambda x: x[0])
for scene, instances in results:
data['id_scan'].extend((scene,) * len(instances))
for c in instances.pred_classes.tolist():
cid = CAD_TAXONOMY_REVERSE[self._category_manager.get_name(c)]
data['objectCategory'].append(cid)
data['alignedModelId'].extend(
id_cad for _, id_cad in self.object_ids[scene]
)
data['tx'].extend(instances.pred_translations[:, 0].tolist())
data['ty'].extend(instances.pred_translations[:, 1].tolist())
data['tz'].extend(instances.pred_translations[:, 2].tolist())
data['qw'].extend(instances.pred_rotations[:, 0].tolist())
data['qx'].extend(instances.pred_rotations[:, 1].tolist())
data['qy'].extend(instances.pred_rotations[:, 2].tolist())
data['qz'].extend(instances.pred_rotations[:, 3].tolist())
data['sx'].extend(instances.pred_scales[:, 0].tolist())
data['sy'].extend(instances.pred_scales[:, 1].tolist())
data['sz'].extend(instances.pred_scales[:, 2].tolist())
data['object_score'].extend(instances.scores.tolist())
frame = | DataFrame(data=data) | pandas.DataFrame |
"""Classes for representing datasets of images and/or coordinates."""
import copy
import inspect
import json
import logging
import os.path as op
import numpy as np
import pandas as pd
from nilearn._utils import load_niimg
from .base import NiMAREBase
from .utils import (
_dict_to_coordinates,
_dict_to_df,
_listify,
_transform_coordinates_to_space,
_try_prepend,
_validate_df,
_validate_images_df,
get_masker,
get_template,
mm2vox,
)
LGR = logging.getLogger(__name__)
class Dataset(NiMAREBase):
"""Storage container for a coordinate- and/or image-based meta-analytic dataset/database.
.. versionchanged:: 0.0.9
* [ENH] Add merge method to Dataset class
.. versionchanged:: 0.0.8
* [FIX] Set ``nimare.dataset.Dataset.basepath`` in :func:`update_path` using absolute path.
Parameters
----------
source : :obj:`str` or :obj:`dict`
JSON file containing dictionary with database information or the dict()
object
target : :obj:`str`, optional
Desired coordinate space for coordinates. Names follow NIDM convention.
Default is 'mni152_2mm' (MNI space with 2x2x2 voxels).
This parameter has no impact on images.
mask : :obj:`str`, :class:`nibabel.nifti1.Nifti1Image`, \
:class:`nilearn.input_data.NiftiMasker` or similar, or None, optional
Mask(er) to use. If None, uses the target space image, with all
non-zero voxels included in the mask.
Attributes
----------
ids : 1D :class:`numpy.ndarray`
Identifiers
masker : :class:`nilearn.input_data.NiftiMasker` or similar
Masker object defining the space and location of the area of interest
(e.g., 'brain').
space : :obj:`str`
Standard space. Same as ``target`` parameter.
annotations : :class:`pandas.DataFrame`
Labels describing studies
coordinates : :class:`pandas.DataFrame`
Peak coordinates from studies
images : :class:`pandas.DataFrame`
Images from studies
metadata : :class:`pandas.DataFrame`
Metadata describing studies
texts : :class:`pandas.DataFrame`
Texts associated with studies
Notes
-----
Images loaded into a Dataset are assumed to be in the same space.
If images have different resolutions or affines from the Dataset's masker,
then they will be resampled automatically, at the point where they're used,
by :obj:`Dataset.masker`.
"""
_id_cols = ["id", "study_id", "contrast_id"]
def __init__(self, source, target="mni152_2mm", mask=None):
if isinstance(source, str):
with open(source, "r") as f_obj:
data = json.load(f_obj)
elif isinstance(source, dict):
data = source
else:
raise Exception("`source` needs to be a file path or a dictionary")
# Datasets are organized by study, then experiment
# To generate unique IDs, we combine study ID with experiment ID
# build list of ids
id_columns = ["id", "study_id", "contrast_id"]
all_ids = []
for pid in data.keys():
for expid in data[pid]["contrasts"].keys():
id_ = f"{pid}-{expid}"
all_ids.append([id_, pid, expid])
id_df = pd.DataFrame(columns=id_columns, data=all_ids)
id_df = id_df.set_index("id", drop=False)
self._ids = id_df.index.values
# Set up Masker
if mask is None:
mask = get_template(target, mask="brain")
self.masker = mask
self.space = target
self.annotations = _dict_to_df(id_df, data, key="labels")
self.coordinates = _dict_to_coordinates(data, masker=self.masker, space=self.space)
self.images = _dict_to_df(id_df, data, key="images")
self.metadata = _dict_to_df(id_df, data, key="metadata")
self.texts = _dict_to_df(id_df, data, key="text")
self.basepath = None
def __repr__(self):
"""Show basic Dataset representation.
It's basically the same as the NiMAREBase representation, but with the number of
experiments in the Dataset represented as well.
"""
# Get default parameter values for the object
signature = inspect.signature(self.__init__)
defaults = {
k: v.default
for k, v in signature.parameters.items()
if v.default is not inspect.Parameter.empty
}
# Eliminate any sub-parameters (e.g., parameters for a MetaEstimator's KernelTransformer),
# as well as default values
params = self.get_params()
params = {k: v for k, v in params.items() if "__" not in k}
# Parameter "target" is stored as attribute "space"
# and we want to show it regardless of whether it's the default or not
params["space"] = self.space
params.pop("target")
params = {k: v for k, v in params.items() if defaults.get(k) != v}
# Convert to strings
param_strs = []
for k, v in params.items():
if isinstance(v, str):
# Wrap string values in single quotes
param_str = f"{k}='{v}'"
else:
# Keep everything else as-is based on its own repr
param_str = f"{k}={v}"
param_strs.append(param_str)
params_str = ", ".join(param_strs)
params_str = f"{len(self.ids)} experiments{', ' if params_str else ''}{params_str}"
rep = f"{self.__class__.__name__}({params_str})"
return rep
@property
def ids(self):
"""numpy.ndarray: 1D array of identifiers in Dataset.
The associated setter for this property is private, as ``Dataset.ids`` is immutable.
"""
return self.__ids
@ids.setter
def _ids(self, ids):
ids = np.sort(np.asarray(ids))
assert isinstance(ids, np.ndarray) and ids.ndim == 1
self.__ids = ids
@property
def masker(self):
""":class:`nilearn.input_data.NiftiMasker` or similar: Masker object.
Defines the space and location of the area of interest (e.g., 'brain').
"""
return self.__masker
@masker.setter
def masker(self, mask):
mask = get_masker(mask)
if hasattr(self, "masker") and not np.array_equal(
self.masker.mask_img.affine, mask.mask_img.affine
):
# This message does not have an associated effect,
# since matrix indices are calculated as necessary
LGR.warning("New masker does not match old masker. Space is assumed to be the same.")
self.__masker = mask
@property
def annotations(self):
""":class:`pandas.DataFrame`: Labels describing studies in the dataset.
Each study/experiment has its own row.
Columns correspond to individual labels (e.g., 'emotion'), and may
be prefixed with a feature group including two underscores
(e.g., 'Neurosynth_TFIDF__emotion').
"""
return self.__annotations
@annotations.setter
def annotations(self, df):
_validate_df(df)
self.__annotations = df.sort_values(by="id")
@property
def coordinates(self):
""":class:`pandas.DataFrame`: Coordinates in the dataset.
.. versionchanged:: 0.0.10
The coordinates attribute no longer includes the associated matrix indices
(columns 'i', 'j', and 'k'). These columns are calculated as needed.
Each study has one row for each peak.
Columns include ['x', 'y', 'z'] (peak locations in mm) and 'space' (Dataset's space).
"""
return self.__coordinates
@coordinates.setter
def coordinates(self, df):
_validate_df(df)
self.__coordinates = df.sort_values(by="id")
@property
def images(self):
""":class:`pandas.DataFrame`: Images in the dataset.
Each image type has its own column (e.g., 'z') with absolute paths to
files and each study has its own row.
Additionally, relative paths to image files are stored in columns with
the suffix '__relative' (e.g., 'z__relative').
Warnings
--------
Images are assumed to be in the same space, although they may have
different resolutions and affines. Images will be resampled as needed
at the point where they are used, via :obj:`Dataset.masker`.
"""
return self.__images
@images.setter
def images(self, df):
_validate_df(df)
self.__images = _validate_images_df(df).sort_values(by="id")
@property
def metadata(self):
""":class:`pandas.DataFrame`: Metadata describing studies in the dataset.
Each metadata field has its own column (e.g., 'sample_sizes') and each study
has its own row.
"""
return self.__metadata
@metadata.setter
def metadata(self, df):
_validate_df(df)
self.__metadata = df.sort_values(by="id")
@property
def texts(self):
""":class:`pandas.DataFrame`: Texts in the dataset.
Each text type has its own column (e.g., 'abstract') and each study
has its own row.
"""
return self.__texts
@texts.setter
def texts(self, df):
_validate_df(df)
self.__texts = df.sort_values(by="id")
def slice(self, ids):
"""Create a new dataset with only requested IDs.
Parameters
----------
ids : array_like
List of study IDs to include in new dataset
Returns
-------
new_dset : :obj:`~nimare.dataset.Dataset`
Reduced Dataset containing only requested studies.
"""
new_dset = copy.deepcopy(self)
new_dset._ids = ids
for attribute in ("annotations", "coordinates", "images", "metadata", "texts"):
df = getattr(new_dset, attribute)
df = df.loc[df["id"].isin(ids)]
setattr(new_dset, attribute, df)
return new_dset
def merge(self, right):
"""Merge two Datasets.
.. versionadded:: 0.0.9
Parameters
----------
right : :obj:`~nimare.dataset.Dataset`
Dataset to merge with.
Returns
-------
:obj:`~nimare.dataset.Dataset`
A Dataset of the two merged Datasets.
"""
assert isinstance(right, Dataset)
shared_ids = np.intersect1d(self.ids, right.ids)
if shared_ids.size:
raise Exception("Duplicate IDs detected in both datasets.")
all_ids = np.concatenate((self.ids, right.ids))
new_dset = copy.deepcopy(self)
new_dset._ids = all_ids
for attribute in ("annotations", "coordinates", "images", "metadata", "texts"):
df1 = getattr(self, attribute)
df2 = getattr(right, attribute)
new_df = df1.append(df2, ignore_index=True, sort=False)
new_df.sort_values(by="id", inplace=True)
new_df.reset_index(drop=True, inplace=True)
new_df = new_df.where(~new_df.isna(), None)
setattr(new_dset, attribute, new_df)
new_dset.coordinates = _transform_coordinates_to_space(
new_dset.coordinates,
self.masker,
self.space,
)
return new_dset
def update_path(self, new_path):
"""Update paths to images.
Prepends new path to the relative path for files in Dataset.images.
Parameters
----------
new_path : :obj:`str`
Path to prepend to relative paths of files in Dataset.images.
"""
self.basepath = op.abspath(new_path)
df = self.images
relative_path_cols = [c for c in df if c.endswith("__relative")]
for col in relative_path_cols:
abs_col = col.replace("__relative", "")
if abs_col in df.columns:
LGR.info(f"Overwriting images column {abs_col}")
df[abs_col] = df[col].apply(_try_prepend, prefix=self.basepath)
self.images = df
def copy(self):
"""Create a copy of the Dataset."""
return copy.deepcopy(self)
def get(self, dict_, drop_invalid=True):
"""Retrieve files and/or metadata from the current Dataset.
Parameters
----------
dict_ : :obj:`dict`
Dictionary specifying images or metadata to collect.
Keys should be variables to be used as keys for results dictionary.
Values should be tuples with two values:
type (e.g., 'image' or 'metadata') and specific field corresponding
to column of type-specific DataFrame (e.g., 'z' or 'sample_sizes').
drop_invalid : :obj:`bool`, optional
Whether to automatically ignore any studies without the required data or not.
Default is False.
Returns
-------
results : :obj:`dict`
A dictionary of lists of requested data. Keys correspond to the keys in ``dict_``.
Examples
--------
>>> dset.get({'z_maps': ('image', 'z'), 'sample_sizes': ('metadata', 'sample_sizes')})
>>> dset.get({'coordinates': ('coordinates', None)})
"""
results = {}
results["id"] = self.ids
keep_idx = np.arange(len(self.ids), dtype=int)
for k, vals in dict_.items():
if vals[0] == "image":
temp = self.get_images(imtype=vals[1])
elif vals[0] == "metadata":
temp = self.get_metadata(field=vals[1])
elif vals[0] == "coordinates":
# Break DataFrame down into a list of study-specific DataFrames
temp = [self.coordinates.loc[self.coordinates["id"] == id_] for id_ in self.ids]
# Replace empty DataFrames with Nones
temp = [t if t.size else None for t in temp]
elif vals[0] == "annotations":
# Break DataFrame down into a list of study-specific DataFrames
temp = [self.annotations.loc[self.annotations["id"] == id_] for id_ in self.ids]
# Replace empty DataFrames with Nones
temp = [t if t.size else None for t in temp]
else:
raise ValueError(f"Input '{vals[0]}' not understood.")
results[k] = temp
temp_keep_idx = np.where([t is not None for t in temp])[0]
keep_idx = np.intersect1d(keep_idx, temp_keep_idx)
# reduce
if drop_invalid and (len(keep_idx) != len(self.ids)):
LGR.info(f"Retaining {len(keep_idx)}/{len(self.ids)} studies")
elif len(keep_idx) != len(self.ids):
raise Exception(
f"Only {len(keep_idx)}/{len(self.ids)} in Dataset contain the necessary data. "
"If you want to analyze the subset of studies with required data, "
"set `drop_invalid` to True."
)
for k in results:
results[k] = [results[k][i] for i in keep_idx]
if dict_.get(k, [None])[0] in ("coordinates", "annotations"):
results[k] = | pd.concat(results[k]) | pandas.concat |
"""Covid Model"""
__docformat__ = "numpy"
import warnings
import pandas as pd
import numpy as np
global_cases_time_series = (
"https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_"
"covid_19_time_series/time_series_covid19_confirmed_global.csv"
)
global_deaths_time_series = (
"https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_"
"covid_19_time_series/time_series_covid19_deaths_global.csv"
)
def get_global_cases(country: str) -> pd.DataFrame:
"""Get historical cases for given country
Parameters
----------
country: str
Country to search for
Returns
-------
pd.DataFrame
Dataframe of historical cases
"""
cases = pd.read_csv(global_cases_time_series)
cases = cases.rename(columns={"Country/Region": "Country"})
cases = (
cases.drop(columns=["Province/State", "Lat", "Long"])
.groupby("Country")
.agg("sum")
.T
)
cases.index = pd.to_datetime(cases.index)
cases = pd.DataFrame(cases[country]).diff().dropna()
if cases.shape[1] > 1:
return pd.DataFrame(cases.sum(axis=1))
return cases
def get_global_deaths(country: str) -> pd.DataFrame:
"""Get historical deaths for given country
Parameters
----------
country: str
Country to search for
Returns
-------
pd.DataFrame
Dataframe of historical deaths
"""
deaths = pd.read_csv(global_deaths_time_series)
deaths = deaths.rename(columns={"Country/Region": "Country"})
deaths = (
deaths.drop(columns=["Province/State", "Lat", "Long"])
.groupby("Country")
.agg("sum")
.T
)
deaths.index = | pd.to_datetime(deaths.index) | pandas.to_datetime |
import pandas as pd
import os.path
import csv
import matplotlib.pyplot as plt
from modules.global_vars import *
class metabolite_pool(object):
def __init__(self, metabolite_name, number_of_carbons, pool_size):
self.metabolite_name = metabolite_name
self.number_of_carbons = number_of_carbons
self.pool_size = pool_size
# Initialize empty variables for later use
self.pool = None
self.tmp = None
self.collection_total_excess = pd.DataFrame()
self.collection_isotop_distr = pd.DataFrame()
def initialize_pool(self):
if verbose:
print('running %s.initialize_pool' % self.metabolite_name)
#Create dataframe according to number of carbons and pool size with unlabeled molecules
columns = {}
for i in range(self.number_of_carbons): # create dictionary from C1 to CX (number of carbons with unlabeled carbons (0)
columns['C' + str(i+1)] = self.pool_size*[0]
self.pool = pd.DataFrame(columns)
if verbose:
print(self.pool)
print('')
def calculate_enrichment(self):
if verbose:
print('running %s.calculate_enrichment' % self.metabolite_name)
# Total excess
number_13C = self.pool.values.sum()
number_totalC = self.pool.shape[0]*self.pool.shape[1]
self.total_excess = number_13C/number_totalC
# Calculate isotopologue distribution
# Sums up all rows individually, sum represents number of 13C in the molecule
self.row_sum = pd.DataFrame({'Number_of_13C': self.pool.sum(axis = 1)})
# Calculates for every M+# the enrichment and stores it in a dict
self.isotop_distr = {}
for i in range(self.number_of_carbons):
i = i+1
tmp = self.row_sum[self.row_sum.Number_of_13C == i] # Sum of row == i -> M+i
count = tmp.shape[0] # count rows where sum of rows == i -> number of molecules with M+i
enrichment = count/self.row_sum.shape[0] # relative to the total amount of rows
self.isotop_distr['M+' + str(i)] = enrichment
if verbose:
print('Total excess: ' + str(self.total_excess))
print('Isotop. Distr.: ' + str(self.isotop_distr))
print('')
# Collect dicts in dataframes
self.collection_total_excess = self.collection_total_excess.append({'total_excess': self.total_excess}, ignore_index = True)
self.collection_isotop_distr = self.collection_isotop_distr.append(self.isotop_distr, ignore_index = True)
def export(self):
if verbose:
print('running %s export_csv' % self.metabolite_name)
directory = os.path.join(os.getcwd(), 'output')
if not os.path.exists(directory):
os.makedirs(directory)
self.collection_total_excess.to_csv(os.path.join(directory, str(self.metabolite_name) + '_total_excess.csv'))
self.collection_isotop_distr.to_csv(os.path.join(directory, str(self.metabolite_name) + '_isotop_distr.csv'))
# Calculate relative isotopologue distribution
self.collection_relative_isotop_distr = pd.DataFrame()
for i in range(len(self.collection_isotop_distr.columns)):
self.collection_relative_isotop_distr['M+' + str(i+1)] = self.collection_isotop_distr.loc[:, 'M+' + str(i+1)]/self.collection_isotop_distr.sum(axis = 1)
# Export relative isotopologue distribution
self.collection_relative_isotop_distr.to_csv(os.path.join(directory, str(self.metabolite_name) + '_relative_isotop_distr.csv'))
if verbose:
#print(self.collection_total_excess)
#print(self.collection_isotop_distr)
print('exported to folder "csv"')
print('')
print('drawing total excess and relative isotopologue distribution')
print('')
# Draw figures
ax = self.collection_total_excess.plot(legend = None)
ax.set_xlabel('steps')
ax.set_ylabel('total excess')
ax.set_title('%s enrichment kinetic' % self.metabolite_name)
plt.savefig(os.path.join(directory, str(self.metabolite_name) + '_total_excess.png'))
#self.collection_isotop_distr.plot()
# relative isotop distr
fig, ax = plt.subplots()
x = range(self.collection_relative_isotop_distr.shape[0])
ax.stackplot(x, self.collection_relative_isotop_distr.T)
ax.margins(0, 0) # Set margins to avoid "whitespace"
ax.set_xlabel('steps')
ax.set_title('%s isotopologue distribution' % self.metabolite_name)
ax.legend(self.collection_relative_isotop_distr.columns, loc = 'center left')
#plt.show()
plt.savefig(os.path.join(directory, str(self.metabolite_name) + '_isotop_distr.png'))
plt.close()
# Print last total excess and isotop distr
print('%s:' % self.metabolite_name)
print(str(self.collection_total_excess.iloc[-1:]))
print(str(self.collection_isotop_distr.iloc[-1:]))
print('')
def to_tmp(self, number_of_molecules):
if verbose:
print('running %s.to_tmp' % self.metabolite_name)
self.tmp = self.pool.sample(n = number_of_molecules)
self.pool = self.pool.loc[~self.pool.index.isin(self.tmp.index)] # rows which are not in self.tmp
if verbose:
print(self.tmp)
print('')
def from_tmp(self, number_of_molecules, source):
if verbose:
print('running %s.from_tmp' % self.metabolite_name)
tmp = source.tmp.sample(n = number_of_molecules)
source.tmp = source.tmp.loc[~source.tmp.index.isin(tmp.index)] # Consume molecules from source pool
self.pool = pd.concat([self.pool, tmp])
# Sanity check
#if self.pool.shape[0] != self.pool_size:
# print('ERROR in function "%s.from_tmp": Pool size changed' % self.metabolite_name)
# quit()
self.pool = self.pool.reset_index(drop=True)
if verbose:
print(self.pool)
print('')
def introduce_molecules(self, number_of_molecules, molecule):
if verbose:
print('running %s.introduce_molecules' % self.metabolite_name)
# Sanity check
if type(molecule) is not str:
print('ERROR: variable "molecule" in function "introduce_molecules" is not a string.')
quit()
# Convert input string to dict
molecule_tuple = tuple(molecule)
# Sanity check
if len(molecule_tuple) != self.number_of_carbons:
print('ERROR in function introduce_molecules: Molecule specified has the wrong number of carbons')
quit()
molecule_dict = {}
for i in range(len(molecule)):
molecule_dict['C' + str(i+1)] = int(molecule_tuple[i])
new_molecules = pd.DataFrame(number_of_molecules*[molecule_dict])
self.pool = pd.concat([self.pool, new_molecules])
self.pool = self.pool.reset_index(drop = True)
# Sanity check
#if self.pool.shape[0] != self.pool_size:
# print('ERROR in function "introduce_molecules": Pool size of %s changed' % self.metabolite_name)
# quit()
if verbose:
print(self.pool)
print('')
def check_pool_size(self):
if verbose:
print('running %s.check_pool_size' % self.metabolite_name)
if verbose:
if self.pool.shape[0] == self.pool_size:
print('+ Pool size unchanged')
print('')
else:
print('- DANGER: Pool size changed')
print('')
def check_tmp_size(self, number_of_molecules = 0): # number_of molecules == number expected to be left over
if verbose:
print('running %s.check_tmp_size' % self.metabolite_name)
if self.tmp.shape[0] == number_of_molecules:
print('+ %d molecules in %s.tmp left' % (number_of_molecules, self.metabolite_name))
print('')
else:
print('- DANGER: %d molecules instead of %d molecules in %s.tmp left' % (self.tmp.shape[0], number_of_molecules, self.metabolite_name))
print('')
def mirror_symmetry(self):
if verbose:
print('running %s.mirror_symmetry' % self.metabolite_name)
# Randomly choose 50% of the rows
to_be_rotated = self.pool.sample(frac=0.5)
# Selects all rows which are NOT in rotate and updates pool
self.pool = self.pool.loc[~self.pool.index.isin(to_be_rotated.index)]
have_been_rotated = to_be_rotated.rename(columns={a: b for a, b in zip(to_be_rotated.columns, reversed(to_be_rotated.columns))})
print(have_been_rotated)
# Concatenate dataframes
self.pool = | pd.concat([have_been_rotated, self.pool]) | pandas.concat |
""" This module provides the BaseScraper class """
# Standard library imports
from abc import ABCMeta, abstractmethod
from datetime import date
import logging
from pathlib import Path
import sys
from typing import Dict, Union, Optional
# Third party imports
import pandas as pd
from sqlalchemy.engine import Connection
# Local imports
from naccbis.Common import utils
from naccbis.Common.splits import Split, GameLogSplit
class BaseScraper(metaclass=ABCMeta):
"""This is the abstract base class for the scrapers.
It provides only the shared functionality between all scrapers.
Don't directly create an instance of the base class!
General procedure that each scraper follows:
1. Scrape the data from the web
2. Clean the data
3. Export the data
"""
BASE_URL = "https://naccsports.org/sports/bsb/"
TEAM_IDS = {
"Aurora": "AUR",
"Benedictine": "BEN",
"Concordia Chicago": "CUC",
"Concordia Wisconsin": "CUW",
"Dominican": "DOM",
"Edgewood": "EDG",
"Illinois Tech": "ILLT",
"Lakeland": "LAK",
"MSOE": "MSOE",
"Marian": "MAR",
"Maranatha": "MARN",
"Rockford": "ROCK",
"Wisconsin Lutheran": "WLC",
}
TABLES: Dict[str, str] = {}
VALID_OUTPUT = ["csv", "sql"]
def __init__(
self,
year: str,
split: Union[Split, GameLogSplit],
output: str,
inseason: bool = False,
verbose: bool = False,
conn: Optional[Connection] = None,
) -> None:
"""Class constructor
:param year: The school year. A string.
:param split: overall or conference stats. A string.
:param output: Output format. Currently csv and sql.
:param inseason: Is this scraping taking place in season?
:param verbose: Print extra information to standard out?
"""
self._name = "Base Scraper"
self._year = year
self._split = split
if output not in self.VALID_OUTPUT:
raise ValueError("Invalid output: {}".format(output))
self._output = output
self._inseason = inseason
self._verbose = verbose
self._data = | pd.DataFrame() | pandas.DataFrame |
# from feature_generation.utils import convert_categorical_labels_to_numerical
from feature_generation.Labels import Labels
import pandas as pd
from itertools import takewhile
import time
from feature_generation.datasets.Timeseries import Timeseries
class EMIP(Timeseries):
def __init__(self):
super().__init__("emip-fixations")
self.column_name_mapping = {
"id": self.column_names["subject_id"],
"fixationStart": self.column_names["time"],
"x": self.column_names["x"],
"y": self.column_names["y"],
"averagePupilSize": self.column_names["pupil_diameter"],
"duration": self.column_names["duration"],
"fixationEnd": self.column_names["fixation_end"],
}
self.label = "expertise_programming"
def prepare_files(self, file_references, metadata_references):
labels = pd.DataFrame()
dataset = []
with metadata_references[0].open("r") as f:
metadata_file = | pd.read_csv(f) | pandas.read_csv |
# Import Libraries
import time
import pandas as pd
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
# Import Libraries
from scipy import stats
import matplotlib.pyplot as plt
# import time
# Import Libraries
import math
class YinsDL:
print("---------------------------------------------------------------------")
print(
"""
Yin's Deep Learning Package
Copyright © W.Y.N. Associates, LLC, 2009 – Present
For more information, please go to https://wyn-associates.com/
""" )
print("---------------------------------------------------------------------")
# Define function
def NN3_Classifier(
X_train, y_train, X_test, y_test,
l1_act='relu', l2_act='relu', l3_act='softmax',
layer1size=128, layer2size=64, layer3size=2,
optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'],
num_of_epochs=10,
plotROC=True,
verbose=True):
"""
MANUAL:
# One can use the following example.
house_sales = pd.read_csv('../data/kc_house_data.csv')
house_sales.head(3)
house_sales = house_sales.drop(['id', 'zipcode', 'lat', 'long', 'date'], axis=1)
house_sales.info()
X_all = house_sales.drop('price', axis=1)
y = np.log(house_sales.price)
y_binary = (y > y.mean()).astype(int)
y_binary
X_all.head(3), y_binary.head(3)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X_all, y_binary, test_size=0.3, random_state=0)
print(X_train.shape, X_test.shape)
print(y_train)
testresult = NN3_Classifier(X_train, y_train, X_test, y_test,
l1_act='relu', l2_act='relu', l3_act='softmax',
layer1size=128, layer2size=64, layer3size=2,
num_of_epochs=50)
"""
# TensorFlow and tf.keras
import tensorflow as tf
from tensorflow import keras
# Helper libraries
import numpy as np
import matplotlib.pyplot as plt
if verbose:
print("Tensorflow Version:")
print(tf.__version__)
# Normalize
# Helper Function
def helpNormalize(X):
return (X - X.mean()) / np.std(X)
X_train = X_train.apply(helpNormalize, axis=1)
X_test = X_test.apply(helpNormalize, axis=1)
# Model
model = tf.keras.Sequential([
keras.layers.Dense(units=layer1size, input_shape=[X_train.shape[1]]),
keras.layers.Dense(units=layer2size, activation=l2_act),
keras.layers.Dense(units=layer3size, activation=l3_act)
])
if verbose:
print("Summary of Network Architecture:")
model.summary()
# Compile
model.compile(optimizer=optimizer,
loss=loss,
metrics=metrics)
# Model Fitting
model.fit(X_train, y_train, epochs=num_of_epochs)
# Prediction
predictions = model.predict(X_test)
# Performance
from sklearn.metrics import confusion_matrix
import numpy as np
import pandas as pd
y_test_hat = np.argmax(predictions, axis=1)
confusion = confusion_matrix(y_test, y_test_hat)
confusion = pd.DataFrame(confusion)
test_acc = sum(np.diag(confusion)) / sum(sum(np.array(confusion)))
# Print
if verbose:
print("Confusion Matrix:")
print(confusion)
print("Test Accuracy:", round(test_acc, 4))
# ROCAUC
if layer3size == 2:
from sklearn.metrics import roc_curve, auc, roc_auc_score
fpr, tpr, thresholds = roc_curve(y_test, y_test_hat)
areaUnderROC = auc(fpr, tpr)
resultsROC = {
'false positive rate': fpr,
'true positive rate': tpr,
'thresholds': thresholds,
'auc': round(areaUnderROC, 3)
}
if verbose:
print(f'Test AUC: {areaUnderROC}')
if plotROC:
plt.figure()
plt.plot(fpr, tpr, color='r', lw=2, label='ROC curve')
plt.plot([0, 1], [0, 1], color='k', lw=2, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic: \
Area under the curve = {0:0.2f}'.format(areaUnderROC))
plt.legend(loc="lower right")
plt.show()
else:
resultsROC = "Response not in two classes."
# Output
return {
'Data': [X_train, y_train, X_test, y_test],
'Shape': [X_train.shape, len(y_train), X_test.shape, len(y_test)],
'Model Fitting': model,
'Performance': {
'response': {'response': y_test, 'estimated response': y_test_hat},
'test_acc': test_acc,
'confusion': confusion
},
'Results of ROC': resultsROC
}
# End of function
# Define function
def NN10_Classifier(
X_train, y_train, X_test, y_test,
l1_act='relu', l2_act='relu', l3_act='relu', l4_act='relu', l5_act='relu',
l6_act='relu', l7_act='relu', l8_act='relu', l9_act='relu', l10_act='softmax',
layer1size=128, layer2size=64, layer3size=64, layer4size=64, layer5size=64,
layer6size=64, layer7size=64, layer8size=64, layer9size=64, layer10size=2,
optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'],
num_of_epochs=10,
plotROC=True,
verbose=True):
"""
MANUAL:
# One can use the following example.
house_sales = pd.read_csv('../data/kc_house_data.csv')
house_sales.head(3)
house_sales = house_sales.drop(['id', 'zipcode', 'lat', 'long', 'date'], axis=1)
house_sales.info()
X_all = house_sales.drop('price', axis=1)
y = np.log(house_sales.price)
y_binary = (y > y.mean()).astype(int)
y_binary
X_all.head(3), y_binary.head(3)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X_all, y_binary, test_size=0.3, random_state=0)
print(X_train.shape, X_test.shape)
print(y_train)
testresult = NN10_Classifier(
X_train, y_train, X_test, y_test,
l1_act='relu', l2_act='relu', l3_act='relu', l4_act='relu', l5_act='relu',
l6_act='relu', l7_act='relu', l8_act='relu', l9_act='relu', l10_act='softmax',
layer1size=128, layer2size=64, layer3size=64, layer4size=64, layer5size=64,
layer6size=64, layer7size=64, layer8size=64, layer9size=64, layer10size=2,
plotROC=True,
num_of_epochs=50)
"""
# TensorFlow and tf.keras
import tensorflow as tf
from tensorflow import keras
# Helper libraries
import numpy as np
import matplotlib.pyplot as plt
if verbose:
print("Tensorflow Version:")
print(tf.__version__)
# Normalize
# Helper Function
def helpNormalize(X):
return (X - X.mean()) / np.std(X)
X_train = X_train.apply(helpNormalize, axis=1)
X_test = X_test.apply(helpNormalize, axis=1)
# Model
model = tf.keras.Sequential([
keras.layers.Dense(units=layer1size, input_shape=[X_train.shape[1]]),
keras.layers.Dense(units=layer2size, activation=l2_act),
keras.layers.Dense(units=layer3size, activation=l3_act),
keras.layers.Dense(units=layer4size, activation=l4_act),
keras.layers.Dense(units=layer5size, activation=l5_act),
keras.layers.Dense(units=layer6size, activation=l6_act),
keras.layers.Dense(units=layer7size, activation=l7_act),
keras.layers.Dense(units=layer8size, activation=l8_act),
keras.layers.Dense(units=layer9size, activation=l9_act),
keras.layers.Dense(units=layer10size, activation=l10_act)
])
if verbose:
print("Summary of Network Architecture:")
model.summary()
# Compile
model.compile(optimizer=optimizer,
loss=loss,
metrics=metrics)
# Model Fitting
model.fit(X_train, y_train, epochs=num_of_epochs)
# Prediction
predictions = model.predict(X_test)
# Performance
from sklearn.metrics import confusion_matrix
import numpy as np
import pandas as pd
y_test_hat = np.argmax(predictions, axis=1)
confusion = confusion_matrix(y_test, y_test_hat)
confusion = pd.DataFrame(confusion)
test_acc = sum(np.diag(confusion)) / sum(sum(np.array(confusion)))
# Print
if verbose:
print("Confusion Matrix:")
print(confusion)
print("Test Accuracy:", round(test_acc, 4))
# ROCAUC
if layer10size == 2:
from sklearn.metrics import roc_curve, auc, roc_auc_score
fpr, tpr, thresholds = roc_curve(y_test, y_test_hat)
areaUnderROC = auc(fpr, tpr)
resultsROC = {
'false positive rate': fpr,
'true positive rate': tpr,
'thresholds': thresholds,
'auc': round(areaUnderROC, 3)
}
if verbose:
print(f'Test AUC: {areaUnderROC}')
if plotROC:
plt.figure()
plt.plot(fpr, tpr, color='r', lw=2, label='ROC curve')
plt.plot([0, 1], [0, 1], color='k', lw=2, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic: \
Area under the curve = {0:0.2f}'.format(areaUnderROC))
plt.legend(loc="lower right")
plt.show()
else:
resultsROC = "Response not in two classes."
# Output
return {
'Data': [X_train, y_train, X_test, y_test],
'Shape': [X_train.shape, len(y_train), X_test.shape, len(y_test)],
'Model Fitting': model,
'Performance': {
'response': {'response': y_test, 'estimated response': y_test_hat},
'test_acc': test_acc,
'confusion': confusion
},
'Results of ROC': resultsROC
}
# End of function
# Define function
def plotOneImage(
initialPosX = 1,
initialPosY = 0,
boxWidth = 1,
boxHeight = 0,
linewidth = 2,
edgecolor = 'r',
IMAGE = 0):
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from PIL import Image
import numpy as np
im = np.array(IMAGE, dtype=np.uint8)
# Create figure and axes
fig,ax = plt.subplots(1)
# Display the image
ax.imshow(im)
# Create a Rectangle patch
rect = patches.Rectangle(
(initialPosX, initialPosY), boxWidth, boxHeight,
linewidth=linewidth, edgecolor=edgecolor, facecolor='none')
# Add the patch to the Axes
ax.add_patch(rect)
plt.show()
# End of function
# Define function
def ConvOperationC1(
X_train, y_train, X_test, y_test,
inputSHAPEwidth=10, inputSHAPElenth=3,
filter1 = [[1,0], [0,1]],
verbose=True, printManual=True):
if printManual:
print("----------------------------------------------------------------------")
print("Manual")
print(
"""
This script input X_train, y_train, X_test, y_test with selected input width and height
as well as a filter. Then the script executes convolutional operation to compute new
features from combination of original variables and the filter.
Note: the filter plays crucial role which is why this function the filter is user-friendly
and can be updated as the user see fits.
# Run
newDataGenerated = YinsDL.ConvOperationC1(
X_train, y_train, X_test, y_test,
inputSHAPEwidth=10, inputSHAPElenth=3,
filter1 = [[1,0], [0,1]],
verbose=True, printManual=True)
""" )
print("----------------------------------------------------------------------")
# TensorFlow and tf.keras
import tensorflow as tf
from tensorflow import keras
# Helper libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
if verbose:
print("Tensorflow Version:")
print(tf.__version__)
# Normalize
# Helper Function
def helpNormalize(X):
return (X - X.mean()) / np.std(X)
X_train = X_train.apply(helpNormalize, axis=1)
X_test = X_test.apply(helpNormalize, axis=1)
# Convolutional Operation
X_train = np.reshape(np.array(X_train), (X_train.shape[0], inputSHAPEwidth, inputSHAPElenth))
X_test = np.reshape(np.array(X_test), (X_test.shape[0], inputSHAPEwidth, inputSHAPElenth))
if verbose:
print('Shapes of X in training set', X_train.shape, 'Shapes of X in test set:', X_test.shape)
# Filter
filter1 = pd.DataFrame(filter1)
# Convolutional Operation (called Yins to make it different from default function)
def YinsConvOp(incidence=0, X=X_train, unitFilter=filter1):
filterHeight = unitFilter.shape[0]
filterWidth = unitFilter.shape[1]
unitSample = []
for i in range(pd.DataFrame(X[incidence]).shape[0] - (filterHeight - 1)):
for j in range(pd.DataFrame(X[incidence]).shape[1] - (filterWidth - 1)):
unitSample.append(
np.multiply(
pd.DataFrame(X[incidence]).iloc[i:(i + filterWidth), j:(j + filterHeight)],
unitFilter).sum(axis=1).sum())
return unitSample
# Apply Operation
X_train_new = pd.DataFrame([YinsConvOp(incidence=0, X=X_train, unitFilter=filter1)])
for i in range(1, X_train.shape[0]):
X_train_new = pd.concat([
X_train_new,
pd.DataFrame([YinsConvOp(incidence=i, X=X_train, unitFilter=filter1)]) ])
# For Prediction
X_test_new = pd.DataFrame([YinsConvOp(incidence=0, X=X_test, unitFilter=filter1)])
for i in range(1, X_test.shape[0]):
X_test_new = pd.concat([
X_test_new,
pd.DataFrame([YinsConvOp(incidence=i, X=X_test, unitFilter=filter1)]) ])
# Output
return {
'Data': [X_train, y_train, X_test, y_test, X_train_new, X_test_new],
'Shape': [X_train.shape, len(y_train), X_test.shape, len(y_test)]
}
# End function
# Define function
def C1NN3_Classifier(
X_train, y_train, X_test, y_test,
inputSHAPEwidth=10, inputSHAPElenth=3,
filter1 = [[1,0], [0,1]],
l1_act='relu', l2_act='relu', l3_act='softmax',
layer1size=128, layer2size=64, layer3size=2,
optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'],
num_of_epochs=10,
plotROC=True,
verbose=True):
"""
MANUAL:
# One can use the following example.
house_sales = pd.read_csv('../data/kc_house_data.csv')
house_sales.head(3)
house_sales = house_sales.drop(['id', 'zipcode', 'lat', 'long', 'date'], axis=1)
house_sales.info()
X_all = house_sales.drop('price', axis=1)
y = np.log(house_sales.price)
y_binary = (y > y.mean()).astype(int)
y_binary
X_all.head(3), y_binary.head(3)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X_all, y_binary, test_size=0.3, random_state=0)
print(X_train.shape, X_test.shape)
print(y_train)
testresult = NN3_Classifier(X_train, y_train, X_test, y_test,
l1_act='relu', l2_act='relu', l3_act='softmax',
layer1size=128, layer2size=64, layer3size=2,
num_of_epochs=50)
"""
# TensorFlow and tf.keras
import tensorflow as tf
from tensorflow import keras
# Helper libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
if verbose:
print("Tensorflow Version:")
print(tf.__version__)
# Normalize
# Helper Function
def helpNormalize(X):
return (X - X.mean()) / np.std(X)
X_train = X_train.apply(helpNormalize, axis=1)
X_test = X_test.apply(helpNormalize, axis=1)
# Convolutional Operation
X_train = np.reshape(np.array(X_train), (X_train.shape[0], inputSHAPEwidth, inputSHAPElenth))
X_test = np.reshape(np.array(X_test), (X_test.shape[0], inputSHAPEwidth, inputSHAPElenth))
if verbose:
print('Shapes of X in training set', X_train.shape, 'Shapes of X in test set:', X_test.shape)
# Filter
filter1 = pd.DataFrame(filter1)
# Convolutional Operation (called Yins to make it different from default function)
def YinsConvOp(incidence=0, X=X_train, unitFilter=filter1):
filterHeight = unitFilter.shape[0]
filterWidth = unitFilter.shape[1]
unitSample = []
for i in range(pd.DataFrame(X[incidence]).shape[0] - (filterHeight - 1)):
for j in range(pd.DataFrame(X[incidence]).shape[1] - (filterWidth - 1)):
unitSample.append(
np.multiply(
pd.DataFrame(X[incidence]).iloc[i:(i + filterWidth), j:(j + filterHeight)],
unitFilter).sum(axis=1).sum())
return unitSample
# Apply Operation
X_train_new = pd.DataFrame([YinsConvOp(incidence=0, X=X_train, unitFilter=filter1)])
for i in range(1, X_train.shape[0]):
X_train_new = pd.concat([
X_train_new,
pd.DataFrame([YinsConvOp(incidence=i, X=X_train, unitFilter=filter1)]) ])
# Model
model = tf.keras.Sequential([
keras.layers.Dense(units=layer1size, input_shape=[X_train_new.shape[1]]),
keras.layers.Dense(units=layer2size, activation=l2_act),
keras.layers.Dense(units=layer3size, activation=l3_act)
])
if verbose:
print("Summary of Network Architecture:")
model.summary()
# Compile
model.compile(optimizer=optimizer,
loss=loss,
metrics=metrics)
# Model Fitting
model.fit(X_train_new, y_train, epochs=num_of_epochs)
# Prediction
X_test_new = pd.DataFrame([YinsConvOp(incidence=0, X=X_test, unitFilter=filter1)])
for i in range(1, X_test.shape[0]):
X_test_new = pd.concat([
X_test_new,
pd.DataFrame([YinsConvOp(incidence=i, X=X_test, unitFilter=filter1)]) ])
predictions = model.predict(X_test_new)
# Performance
from sklearn.metrics import confusion_matrix
import numpy as np
import pandas as pd
y_test_hat = np.argmax(predictions, axis=1)
confusion = confusion_matrix(y_test, y_test_hat)
confusion = pd.DataFrame(confusion)
test_acc = sum(np.diag(confusion)) / sum(sum(np.array(confusion)))
# Print
if verbose:
print("Confusion Matrix:")
print(confusion)
print("Test Accuracy:", round(test_acc, 4))
# ROCAUC
if layer3size == 2:
from sklearn.metrics import roc_curve, auc, roc_auc_score
fpr, tpr, thresholds = roc_curve(y_test, y_test_hat)
areaUnderROC = auc(fpr, tpr)
resultsROC = {
'false positive rate': fpr,
'true positive rate': tpr,
'thresholds': thresholds,
'auc': round(areaUnderROC, 3)
}
if verbose:
print(f'Test AUC: {areaUnderROC}')
if plotROC:
plt.figure()
plt.plot(fpr, tpr, color='r', lw=2, label='ROC curve')
plt.plot([0, 1], [0, 1], color='k', lw=2, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic: \
Area under the curve = {0:0.2f}'.format(areaUnderROC))
plt.legend(loc="lower right")
plt.show()
else:
resultsROC = "Response not in two classes."
# Output
return {
'Data': [X_train, y_train, X_test, y_test, X_train_new, X_test_new],
'Shape': [X_train.shape, len(y_train), X_test.shape, len(y_test)],
'Model Fitting': model,
'Performance': {
'response': {'response': y_test, 'estimated response': y_test_hat},
'test_acc': test_acc,
'confusion': confusion
},
'Results of ROC': resultsROC
}
# End of function
# Define function
def C2NN3_Classifier(
X_train, y_train, X_test, y_test,
inputSHAPEwidth1=10, inputSHAPElenth1=3,
inputSHAPEwidth2=8, inputSHAPElenth2=9,
filter1 = [[1,0], [0,1]],
filter2 = [[1,0], [0,1]],
l1_act='relu', l2_act='relu', l3_act='softmax',
layer1size=128, layer2size=64, layer3size=2,
optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'],
num_of_epochs=10,
plotROC=True,
verbose=True,
printManual=False):
if printManual:
print("--------------------------------------------------------------------")
print("MANUAL:")
print(
"""
# One can use the following example.
house_sales = pd.read_csv('../data/kc_house_data.csv')
house_sales.head(3)
house_sales = house_sales.drop(['id', 'zipcode', 'lat', 'long', 'date'], axis=1)
house_sales.info()
X_all = house_sales.drop('price', axis=1)
y = np.log(house_sales.price)
y_binary = (y > y.mean()).astype(int)
y_binary
X_all.head(3), y_binary.head(3)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X_all, y_binary, test_size=0.3, random_state=0)
print(X_train.shape, X_test.shape)
print(y_train)
testresult = C2NN3_Classifier(
X_train, y_train, X_test, y_test,
inputSHAPEwidth1=10, inputSHAPElenth1=3,
inputSHAPEwidth2=8, inputSHAPElenth2=9,
filter1 = [[1,0], [0,1]],
filter2 = [[1,0], [0,1]],
l1_act='relu', l2_act='relu', l3_act='softmax',
layer1size=128, layer2size=64, layer3size=2,
optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'],
num_of_epochs=10,
plotROC=True,
verbose=True,
printManual=True
""" )
print("--------------------------------------------------------------------")
# TensorFlow and tf.keras
import tensorflow as tf
from tensorflow import keras
# Helper libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import time
if verbose:
print("Tensorflow Version:")
print(tf.__version__)
# Normalize
# Helper Function
def helpNormalize(X):
return (X - X.mean()) / np.std(X)
X_train = X_train.apply(helpNormalize, axis=1)
X_test = X_test.apply(helpNormalize, axis=1)
# Convolutional Operation
X_train = np.reshape(np.array(X_train), (X_train.shape[0], inputSHAPEwidth1, inputSHAPElenth1))
X_test = np.reshape(np.array(X_test), (X_test.shape[0], inputSHAPEwidth1, inputSHAPElenth1))
if verbose:
print('Shapes of X in training set', X_train.shape, 'Shapes of X in test set:', X_test.shape)
# Filter
filter1 = pd.DataFrame(filter1)
filter2 = pd.DataFrame(filter2)
# Convolutional Operation (called Yins to make it different from default function)
def YinsConvOp(incidence=0, X=X_train, unitFilter=filter1):
filterHeight = unitFilter.shape[0]
filterWidth = unitFilter.shape[1]
unitSample = []
for i in range( | pd.DataFrame(X[incidence]) | pandas.DataFrame |
##%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
## Soft sensing via XGBoost on UCI Wastewater Treatment Plant data
## %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#%% read data
import pandas as pd
data_raw = | pd.read_csv('water-treatment.data', header=None,na_values="?" ) | pandas.read_csv |
# coding: utf8
from .tsv_utils import complementary_list, add_demographics, baseline_df, chi2
from ..deep_learning.iotools import return_logger
from scipy.stats import ttest_ind
import shutil
import pandas as pd
from os import path
import numpy as np
import os
import logging
sex_dict = {'M': 0, 'F': 1}
def create_split(diagnosis, diagnosis_df, merged_df, n_test, age_name="age",
pval_threshold_ttest=0.80, t_val_chi2_threshold=0.0642,
logger=None):
"""
Split data at the subject-level in training and test set with equivalent age and sex distributions
:param diagnosis: (str) diagnosis on which the split is done
:param diagnosis_df: DataFrame with columns including ['participant_id', 'session_id', 'diagnosis']
:param merged_df: DataFrame with columns including ['age', 'sex'] and containing the same sessions as diagnosis_df
:param n_test: (float)
If >= 1 number of subjects to put in the test set.
If < 1 proportion of subjects to put in the test set.
:param age_name: (str) label of the age column in the dataset.
:param pval_threshold_ttest: (float) threshold for the t-test on age
:param t_val_chi2_threshold: (float) threshold for the chi2 test on sex
:param logger: Logger object from logging library
:return:
train_df (DataFrame) subjects in the train set
test_df (DataFrame) subjects in the test set
"""
if logger is None:
logger = logging
logger.basicConfig(level=logging.DEBUG)
diagnosis_baseline_df = baseline_df(diagnosis_df, diagnosis)
baseline_demographics_df = add_demographics(diagnosis_baseline_df, merged_df, diagnosis)
if n_test >= 1:
n_test = int(n_test)
else:
n_test = int(n_test * len(diagnosis_baseline_df))
sex = list(baseline_demographics_df.sex.values)
age = list(baseline_demographics_df[age_name].values)
idx = np.arange(len(diagnosis_baseline_df))
flag_selection = True
n_try = 0
while flag_selection:
idx_test = np.random.choice(idx, size=n_test, replace=False)
idx_test.sort()
idx_train = complementary_list(idx, idx_test)
# Find the a similar distribution for the age variable
if len(set(age)) != 1:
age_test = [float(age[idx]) for idx in idx_test]
age_train = [float(age[idx]) for idx in idx_train]
t_age, p_age = ttest_ind(age_test, age_train)
else:
p_age = 1
# Find the a similar distribution for the sex variable
if len(set(sex)) != 1:
sex_test = [sex_dict[sex[idx]] for idx in idx_test]
sex_train = [sex_dict[sex[idx]] for idx in idx_train]
T_sex = chi2(sex_test, sex_train)
else:
T_sex = 0
if T_sex < t_val_chi2_threshold and p_age > pval_threshold_ttest:
flag_selection = False
test_df = baseline_demographics_df.loc[idx_test]
train_df = baseline_demographics_df.loc[idx_train]
n_try += 1
logger.info("Split for diagnosis %s was found after %i trials" % (diagnosis, n_try))
return train_df, test_df
def split_diagnoses(merged_tsv, formatted_data_path,
n_test=100, age_name="age", subset_name="test", MCI_sub_categories=True,
t_val_threshold=0.0642, p_val_threshold=0.80, verbosity=0):
"""
Performs a single split for each label independently on the subject level.
The train folder will contain two lists per diagnosis (baseline and longitudinal),
whereas the test folder will only include the list of baseline sessions.
The age and sex distributions between the two sets must be non-significant (according to T-test and chi-square).
Args:
merged_tsv (str): Path to the file obtained by the command clinica iotools merge-tsv.
formatted_data_path (str): Path to the folder containing data extracted by clinicadl tsvtool getlabels.
n_test (float):
If > 1, number of subjects to put in set with name 'subset_name'.
If < 1, proportion of subjects to put in set with name 'subset_name'.
If 0, no training set is created and the whole dataset is considered as one set with name 'subset_name'.
age_name (str): Label of the age column in the dataset.
subset_name (str): Name of the subset that is complementary to train.
MCI_sub_categories (bool): If True, manages MCI sub-categories to avoid data leakage.
t_val_threshold (float): The threshold used for the chi2 test on sex distributions.
p_val_threshold (float): The threshold used for the T-test on age distributions.
verbosity (int): level of verbosity.
Returns:
writes three files per <label>.tsv file present in formatted_data_path:
- formatted_data_path/train/<label>.tsv
- formatted_data_path/train/<label>_baseline.tsv
- formatted_data_path/<subset_name>/<label>_baseline.tsv
"""
logger = return_logger(verbosity, "split")
# Read files
merged_df = pd.read_csv(merged_tsv, sep='\t')
merged_df.set_index(['participant_id', 'session_id'], inplace=True)
results_path = formatted_data_path
train_path = path.join(results_path, 'train')
if path.exists(train_path):
shutil.rmtree(train_path)
if n_test > 0:
os.makedirs(train_path)
test_path = path.join(results_path, subset_name)
if path.exists(test_path):
shutil.rmtree(test_path)
os.makedirs(test_path)
diagnosis_df_paths = os.listdir(results_path)
diagnosis_df_paths = [x for x in diagnosis_df_paths if x.endswith('.tsv')]
diagnosis_df_paths = [x for x in diagnosis_df_paths if not x.endswith('_baseline.tsv')]
MCI_special_treatment = False
if 'MCI.tsv' in diagnosis_df_paths and n_test > 0:
if MCI_sub_categories:
diagnosis_df_paths.remove('MCI.tsv')
MCI_special_treatment = True
elif 'sMCI.tsv' in diagnosis_df_paths or 'pMCI.tsv' in diagnosis_df_paths:
logger.warning("MCI special treatment was deactivated though MCI subgroups were found."
"Be aware that it may cause data leakage in transfer learning tasks.")
# The baseline session must be kept before or we are taking all the sessions to mix them
for diagnosis_df_path in diagnosis_df_paths:
diagnosis_df = pd.read_csv(path.join(results_path, diagnosis_df_path),
sep='\t')
diagnosis = diagnosis_df_path.split('.')[0]
if n_test > 0:
train_df, test_df = create_split(diagnosis, diagnosis_df, merged_df, age_name=age_name,
n_test=n_test, t_val_chi2_threshold=t_val_threshold,
pval_threshold_ttest=p_val_threshold, logger=logger)
# Save baseline splits
train_df = train_df[['participant_id', 'session_id', 'diagnosis']]
train_df.to_csv(path.join(train_path, str(diagnosis) + '_baseline.tsv'), sep='\t', index=False)
test_df = test_df[['participant_id', 'session_id', 'diagnosis']]
test_df.to_csv(path.join(test_path, str(diagnosis) + '_baseline.tsv'), sep='\t', index=False)
# Retrieve all sessions for the training set
complete_train_df = pd.DataFrame()
for idx in train_df.index.values:
subject = train_df.loc[idx, 'participant_id']
subject_df = diagnosis_df[diagnosis_df.participant_id == subject]
complete_train_df = pd.concat([complete_train_df, subject_df])
complete_train_df.to_csv(path.join(train_path, str(diagnosis) + '.tsv'), sep='\t', index=False)
else:
diagnosis_baseline_df = baseline_df(diagnosis_df, diagnosis)
test_df = diagnosis_baseline_df[['participant_id', 'session_id', 'diagnosis']]
test_df.to_csv(path.join(test_path, str(diagnosis) + '_baseline.tsv'), sep='\t', index=False)
if MCI_special_treatment:
# Extraction of MCI subjects without intersection with the sMCI / pMCI train
diagnosis_df = pd.read_csv(path.join(results_path, 'MCI.tsv'), sep='\t')
MCI_df = diagnosis_df.set_index(['participant_id', 'session_id'])
supplementary_diagnoses = []
logger.debug('Before subjects removal for MCI special treatment')
sub_df = diagnosis_df.reset_index().groupby('participant_id')['session_id'].nunique()
logger.debug('%i subjects, %i scans' % (len(sub_df), len(diagnosis_df)))
if 'sMCI.tsv' in diagnosis_df_paths:
sMCI_baseline_train_df = pd.read_csv(path.join(train_path, 'sMCI_baseline.tsv'), sep='\t')
sMCI_baseline_test_df = pd.read_csv(path.join(test_path, 'sMCI_baseline.tsv'), sep='\t')
sMCI_baseline_df = pd.concat([sMCI_baseline_train_df, sMCI_baseline_test_df])
sMCI_baseline_df.reset_index(drop=True, inplace=True)
for idx in sMCI_baseline_df.index.values:
subject = sMCI_baseline_df.loc[idx, 'participant_id']
MCI_df.drop(subject, inplace=True)
supplementary_diagnoses.append('sMCI')
logger.debug('Removed %i subjects based on sMCI label' % len(sMCI_baseline_df))
sub_df = MCI_df.reset_index().groupby('participant_id')['session_id'].nunique()
logger.debug('%i subjects, %i scans' % (len(sub_df), len(MCI_df)))
if 'pMCI.tsv' in diagnosis_df_paths:
pMCI_baseline_train_df = pd.read_csv(path.join(train_path, 'pMCI_baseline.tsv'), sep='\t')
pMCI_baseline_test_df = pd.read_csv(path.join(test_path, 'pMCI_baseline.tsv'), sep='\t')
pMCI_baseline_df = pd.concat([pMCI_baseline_train_df, pMCI_baseline_test_df])
pMCI_baseline_df.reset_index(drop=True, inplace=True)
for idx in pMCI_baseline_df.index.values:
subject = pMCI_baseline_df.loc[idx, 'participant_id']
MCI_df.drop(subject, inplace=True)
supplementary_diagnoses.append('pMCI')
logger.debug('Removed %i subjects based on pMCI label' % len(pMCI_baseline_df))
sub_df = MCI_df.reset_index().groupby('participant_id')['session_id'].nunique()
logger.debug('%i subjects, %i scans' % (len(sub_df), len(MCI_df)))
if len(supplementary_diagnoses) == 0:
raise ValueError('The MCI_sub_categories flag is not needed as there are no intersections with'
'MCI subcategories.')
# Construction of supplementary train
supplementary_train_df = pd.DataFrame()
for diagnosis in supplementary_diagnoses:
sup_baseline_train_df = pd.read_csv(path.join(train_path, diagnosis + '_baseline.tsv'), sep='\t')
supplementary_train_df = pd.concat([supplementary_train_df, sup_baseline_train_df])
sub_df = supplementary_train_df.reset_index().groupby('participant_id')['session_id'].nunique()
logger.debug('supplementary_train_df %i subjects, %i scans' % (len(sub_df), len(supplementary_train_df)))
supplementary_train_df.reset_index(drop=True, inplace=True)
supplementary_train_df = add_demographics(supplementary_train_df, merged_df, 'MCI')
# MCI selection
MCI_df.reset_index(inplace=True)
diagnosis_baseline_df = baseline_df(MCI_df, 'MCI')
baseline_demographics_df = add_demographics(diagnosis_baseline_df, merged_df, 'MCI')
complete_diagnosis_baseline_df = baseline_df(diagnosis_df, 'MCI')
if n_test > 1:
n_test = int(n_test)
else:
n_test = int(n_test * len(complete_diagnosis_baseline_df))
sex = list(baseline_demographics_df.sex.values)
age = list(baseline_demographics_df[age_name].values)
sup_train_sex = list(supplementary_train_df.sex.values)
sup_train_age = list(supplementary_train_df[age_name].values)
sup_train_sex = [sex_dict[x] for x in sup_train_sex]
sup_train_age = [float(x) for x in sup_train_age]
idx = np.arange(len(diagnosis_baseline_df))
flag_selection = True
n_try = 0
while flag_selection:
idx_test = np.random.choice(idx, size=n_test, replace=False)
idx_test.sort()
idx_train = complementary_list(idx, idx_test)
# Find the value for different demographical values (age, MMSE, sex)
age_test = [float(age[idx]) for idx in idx_test]
age_train = [float(age[idx]) for idx in idx_train] + sup_train_age
sex_test = [sex_dict[sex[idx]] for idx in idx_test]
sex_train = [sex_dict[sex[idx]] for idx in idx_train] + sup_train_sex
t_age, p_age = ttest_ind(age_test, age_train)
T_sex = chi2(sex_test, sex_train)
if T_sex < t_val_threshold and p_age > p_val_threshold:
flag_selection = False
MCI_baseline_test_df = baseline_demographics_df.loc[idx_test]
train_df = baseline_demographics_df.loc[idx_train]
MCI_baseline_train_df = pd.concat([train_df, supplementary_train_df])
logger.debug('Supplementary train df', len(supplementary_train_df))
MCI_baseline_train_df.reset_index(drop=True, inplace=True)
n_try += 1
logger.info('Split for diagnosis MCI was found after %i trials' % n_try)
# Write selection of MCI
MCI_baseline_train_df = MCI_baseline_train_df[['participant_id', 'session_id', 'diagnosis']]
MCI_baseline_train_df.to_csv(path.join(train_path, 'MCI_baseline.tsv'), sep='\t', index=False)
MCI_baseline_test_df = MCI_baseline_test_df[['participant_id', 'session_id', 'diagnosis']]
MCI_baseline_test_df.to_csv(path.join(test_path, 'MCI_baseline.tsv'), sep='\t', index=False)
# Retrieve all sessions for the training set
MCI_complete_train_df = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Authors: <NAME>, <NAME>, <NAME>, and
<NAME>
IHE Delft 2017
Contact: <EMAIL>
Repository: https://github.com/gespinoza/hants
Module: hants
"""
from __future__ import division
import netCDF4
import pandas as pd
import numpy as np
import datetime
import math
import os
import osr
import glob
from copy import deepcopy
import matplotlib.pyplot as plt
import warnings
import gdal
from joblib import Parallel, delayed
def run_HANTS(rasters_path_inp, name_format,
start_date, end_date, latlim, lonlim, cellsize, nc_path,
nb, nf, HiLo, low, high, fet, dod, delta, Scaling_factor = 0.001,
epsg=4326, cores=1):
'''
This function runs the python implementation of the HANTS algorithm. It
takes a folder with geotiffs raster data as an input, creates a netcdf
file, and optionally export the data back to geotiffs.
'''
nc_paths = create_netcdf(rasters_path_inp, name_format, start_date, end_date,
latlim, lonlim, cellsize, nc_path, Scaling_factor,
epsg)
args = [nb, nf, HiLo, low, high, fet, dod, delta, Scaling_factor]
print('\tApply HANTS on tiles...')
results = Parallel(n_jobs=cores)(delayed(HANTS_netcdf)(nc_path, args)
for nc_path in nc_paths)
if len(nc_paths) > 1:
Merge_NC_Tiles(nc_paths, nc_path, start_date, end_date, latlim, lonlim, cellsize, epsg, Scaling_factor)
return nc_path
def create_netcdf(rasters_path, name_format, start_date, end_date,
latlim, lonlim, cellsize, nc_path, Scaling_factor,
epsg=4326):
'''
This function creates a netcdf file from a folder with geotiffs rasters to
be used to run HANTS.
'''
# Latitude and longitude
lat_ls = pd.np.arange(latlim[0] + 0.5*cellsize, latlim[1],
cellsize)
lat_ls = lat_ls[::-1] # ArcGIS numpy
lon_ls = pd.np.arange(lonlim[0] + 0.5*cellsize, lonlim[1],
cellsize)
lat_n = len(lat_ls)
lon_n = len(lon_ls)
spa_ref = Spatial_Reference(epsg)
# ll_corner = [lonlim[0], latlim[0]]
# Rasters
dates_dt = pd.date_range(start_date, end_date, freq='D')
dates_ls = [d.toordinal() for d in dates_dt]
os.chdir(rasters_path)
ras_ls = glob.glob('*.tif')
# Create tile parts
if (lat_n > 200 or lon_n > 200):
lat_n_amount = np.maximum(1,int(np.floor(lat_n/100)))
lon_n_amount = np.maximum(1,int(np.floor(lon_n/100)))
nc_path_part_names = nc_path.split('.')
nc_path_tiles = []
for lat_n_one in range(0, lat_n_amount):
for lon_n_one in range(0, lon_n_amount):
nc_path_tile = ''.join(nc_path_part_names[0] + "_h%03d_v%03d.nc" %(lon_n_one, lat_n_one))
nc_path_tiles = np.append(nc_path_tiles, nc_path_tile)
else:
nc_path_tiles = nc_path
i = 0
# Loop over the nc_paths
for nc_path_tile in nc_path_tiles:
i += 1
if lat_n_amount > 1:
lat_part = int(nc_path_tile[-6:-3])
lat_start = lat_part * 100
if int(lat_part) is not int(lat_n_amount-1):
lat_end = int((lat_part + 1) * 100)
else:
lat_end = int(lat_n)
else:
lat_start = int(0)
lat_end = int(lat_n)
if lon_n_amount > 1:
lon_part = int(nc_path_tile[-11:-8])
lon_start = int(lon_part * 100)
if int(lon_part) is not int(lon_n_amount-1):
lon_end = int((lon_part + 1) * 100)
else:
lon_end = int(lon_n)
else:
lon_start = int(0)
lon_end = int(lon_n)
# Define space dimention
lat_range = lat_ls[lat_start:lat_end]
lon_range = lon_ls[lon_start:lon_end]
geo_ex = tuple([lon_range[0] - 0.5*cellsize, cellsize, 0, lat_range[0] + cellsize * 0.5, 0, -cellsize])
# Create netcdf file
print('Creating netCDF file tile %s out of %s...' %(i,len(nc_path_tiles)))
nc_file = netCDF4.Dataset(nc_path_tile, 'w', format="NETCDF4_CLASSIC")
# Create Dimensions
lat_dim = nc_file.createDimension('latitude', lat_end - lat_start)
lon_dim = nc_file.createDimension('longitude', lon_end - lon_start)
time_dim = nc_file.createDimension('time', len(dates_ls))
# Create Variables
crso = nc_file.createVariable('crs', 'i4')
crso.long_name = 'Lon/Lat Coords in WGS84'
crso.standard_name = 'crs'
crso.grid_mapping_name = 'latitude_longitude'
crso.projection = spa_ref
crso.longitude_of_prime_meridian = 0.0
crso.semi_major_axis = 6378137.0
crso.inverse_flattening = 298.257223563
crso.geo_reference = geo_ex
lat_var = nc_file.createVariable('latitude', 'f8', ('latitude',))
lat_var.units = 'degrees_north'
lat_var.standard_name = 'latitude'
lon_var = nc_file.createVariable('longitude', 'f8', ('longitude',))
lon_var.units = 'degrees_east'
lon_var.standard_name = 'longitude'
time_var = nc_file.createVariable('time', 'l', ('time',))
time_var.standard_name = 'time'
time_var.calendar = 'gregorian'
original_var = nc_file.createVariable('original_values', 'i',
('time', 'latitude', 'longitude'),
fill_value=-9999, zlib=True, least_significant_digit=0)
original_var.long_name = 'original_values'
original_var.grid_mapping = 'crs'
original_var.add_offset = 0.00
original_var.scale_factor = Scaling_factor
original_var.set_auto_maskandscale(False)
print('\tVariables created')
# Fill in time and space dimention
lat_var[:] = lat_range
lon_var[:] = lon_range
time_var[:] = dates_ls
# Create memory example file
# empty array
empty_vec = pd.np.empty((lat_end - lat_start, lon_end - lon_start))
empty_vec[:] = -9999 * np.float(Scaling_factor)
dest_ex = Save_as_MEM(empty_vec, geo_ex, str(epsg))
# Raster loop
print('\tExtracting data from rasters...')
for tt in range(len(dates_ls)):
Date_now = datetime.datetime.fromordinal(dates_ls[tt])
yyyy = str(Date_now.year)
mm = '%02d' %int(Date_now.month)
dd = '%02d' %int(Date_now.day)
# Raster
ras = name_format.format(yyyy=yyyy,mm=mm,dd=dd)
if ras in ras_ls:
data_in = os.path.join(rasters_path, ras)
dest = reproject_dataset_example(data_in, dest_ex)
array_tt = dest.GetRasterBand(1).ReadAsArray()
array_tt[array_tt<-9999] = -9999 * np.float(Scaling_factor)
original_var[tt, :, :] = np.int_(array_tt * 1./np.float(Scaling_factor))
else:
# Store values
original_var[tt, :, :] = np.int_(empty_vec * 1./np.float(Scaling_factor))
# Close file
nc_file.close()
print('NetCDF %s file created' %i)
# Return
return nc_path_tiles
def HANTS_netcdf(nc_path, args):
'''
This function runs the python implementation of the HANTS algorithm. It
takes the input netcdf file and fills the 'hants_values',
'combined_values', and 'outliers' variables.
'''
nb, nf, HiLo, low, high, fet, dod, delta, Scaling_factor = args
# Read netcdfs
nc_file = netCDF4.Dataset(nc_path, 'r+', format="NETCDF4_CLASSIC")
nc_file.set_fill_on()
time_var = nc_file.variables['time'][:]
original_values = nc_file.variables['original_values'][:]
[ztime, rows, cols] = original_values.shape
size_st = cols*rows
values_hants = pd.np.empty((ztime, rows, cols))
outliers_hants = pd.np.empty((ztime, rows, cols))
values_hants[:] = pd.np.nan
outliers_hants[:] = pd.np.nan
# Additional parameters
ni = len(time_var)
ts = range(ni)
# Loop
counter = 1
#print('Running HANTS...')
for m in range(rows):
for n in range(cols):
#print('\t{0}/{1}'.format(counter, size_st))
y = pd.np.array(original_values[:, m, n])
y[pd.np.isnan(y)] = -9999
[yr, outliers] = HANTS(ni, nb, nf, y, ts, HiLo,
low, high, fet, dod, delta)
values_hants[:, m, n] = yr
outliers_hants[:, m, n] = outliers
counter = counter + 1
values_hants[values_hants<-9999] = -9999 * np.float(Scaling_factor)
hants_var = nc_file.createVariable('hants_values', 'i',
('time', 'latitude', 'longitude'),
fill_value=-9999, zlib=True, least_significant_digit=0)
hants_var.long_name = 'hants_values'
hants_var.grid_mapping = 'crs'
hants_var.add_offset = 0.00
hants_var.scale_factor = Scaling_factor
hants_var.set_auto_maskandscale(False)
combined_var = nc_file.createVariable('combined_values', 'i',
('time', 'latitude', 'longitude'),
fill_value=-9999, zlib=True, least_significant_digit=0)
combined_var.long_name = 'combined_values'
combined_var.grid_mapping = 'crs'
combined_var.add_offset = 0.00
combined_var.scale_factor = Scaling_factor
combined_var.set_auto_maskandscale(False)
outliers_var = nc_file.createVariable('outliers', 'i4',
('time', 'latitude', 'longitude'),
fill_value=-9999)
outliers_var.long_name = 'outliers'
outliers_var.grid_mapping = 'crs'
hants_var[:,:,:]= np.int_(values_hants * 1./np.float(Scaling_factor))
outliers_var[:,:,:] = outliers_hants
combined_var[:,:,:] = pd.np.where(outliers_hants,
np.int_(values_hants * 1./np.float(Scaling_factor)),
np.int_(original_values * 1./np.float(Scaling_factor)))
# Close netcdf file
nc_file.close()
def HANTS_singlepoint(nc_path, point, nb, nf, HiLo, low, high, fet, dod,
delta):
'''
This function runs the python implementation of the HANTS algorithm for a
single point (lat, lon). It plots the fit and returns a data frame with
the 'original' and the 'hants' time series.
'''
# Location
lonx = point[0]
latx = point[1]
nc_file = netCDF4.Dataset(nc_path, 'r', format="NETCDF4_CLASSIC")
time = [pd.to_datetime(i, format='%Y%m%d')
for i in nc_file.variables['time'][:]]
lat = nc_file.variables['latitude'][:]
lon = nc_file.variables['longitude'][:]
# Check that the point falls within the extent of the netcdf file
lon_max = max(lon)
lon_min = min(lon)
lat_max = max(lat)
lat_min = min(lat)
if not (lon_min < lonx < lon_max) or not (lat_min < latx < lat_max):
warnings.warn('The point lies outside the extent of the netcd file. '
'The closest cell is plotted.')
if lonx > lon_max:
lonx = lon_max
elif lonx < lon_min:
lonx = lon_min
if latx > lat_max:
latx = lat_max
elif latx < lat_min:
latx = lat_min
# Get lat-lon index in the netcdf file
lat_closest = lat.flat[pd.np.abs(lat - latx).argmin()]
lon_closest = lon.flat[pd.np.abs(lon - lonx).argmin()]
lat_i = pd.np.where(lat == lat_closest)[0][0]
lon_i = pd.np.where(lon == lon_closest)[0][0]
# Read values
original_values = nc_file.variables['original_values'][:, lat_i, lon_i]
# Additional parameters
ni = len(time)
ts = range(ni)
# HANTS
y = pd.np.array(original_values)
y[pd.np.isnan(y)] = -9999
[hants_values, outliers] = HANTS(ni, nb, nf, y, ts, HiLo, low, high, fet,
dod, delta)
# Plot
top = 1.15*max(pd.np.nanmax(original_values),
pd.np.nanmax(hants_values))
bottom = 1.15*min(pd.np.nanmin(original_values),
pd.np.nanmin(hants_values))
ylim = [bottom, top]
plt.plot(time, hants_values, 'r-', label='HANTS')
plt.plot(time, original_values, 'b.', label='Original data')
plt.ylim(ylim[0], ylim[1])
plt.legend(loc=4)
plt.xlabel('time')
plt.ylabel('values')
plt.gcf().autofmt_xdate()
plt.axes().set_title('Point: lon {0:.2f}, lat {1:.2f}'.format(lon_closest,
lat_closest))
plt.axes().set_aspect(0.5*(time[-1] - time[0]).days/(ylim[1] - ylim[0]))
plt.show()
# Close netcdf file
nc_file.close()
# Data frame
df = pd.DataFrame({'time': time,
'original': original_values,
'hants': hants_values})
# Return
return df
def HANTS(ni, nb, nf, y, ts, HiLo, low, high, fet, dod, delta):
'''
This function applies the Harmonic ANalysis of Time Series (HANTS)
algorithm originally developed by the Netherlands Aerospace Centre (NLR)
(http://www.nlr.org/space/earth-observation/).
This python implementation was based on two previous implementations
available at the following links:
https://codereview.stackexchange.com/questions/71489/harmonic-analysis-of-time-series-applied-to-arrays
http://nl.mathworks.com/matlabcentral/fileexchange/38841-matlab-implementation-of-harmonic-analysis-of-time-series--hants-
'''
# Arrays
mat = pd.np.zeros((min(2*nf+1, ni), ni))
# amp = np.zeros((nf + 1, 1))
# phi = np.zeros((nf+1, 1))
yr = pd.np.zeros((ni, 1))
outliers = pd.np.zeros((1, len(y)))
# Filter
sHiLo = 0
if HiLo == 'Hi':
sHiLo = -1
elif HiLo == 'Lo':
sHiLo = 1
nr = min(2*nf+1, ni)
noutmax = ni - nr - dod
# dg = 180.0/math.pi
mat[0, :] = 1.0
ang = 2*math.pi*pd.np.arange(nb)/nb
cs = pd.np.cos(ang)
sn = pd.np.sin(ang)
i = pd.np.arange(1, nf+1)
for j in pd.np.arange(ni):
index = pd.np.mod(i*ts[j], nb)
mat[2 * i-1, j] = cs.take(index)
mat[2 * i, j] = sn.take(index)
p = pd.np.ones_like(y)
bool_out = (y < low) | (y > high)
p[bool_out] = 0
outliers[bool_out.reshape(1, y.shape[0])] = 1
nout = pd.np.sum(p == 0)
if nout > noutmax:
if pd.np.isclose(y, -9999).any():
ready = pd.np.array([True])
yr = y
outliers = pd.np.zeros((y.shape[0]), dtype=int)
outliers[:] = -9999
else:
raise Exception('Not enough data points.')
else:
ready = pd.np.zeros((y.shape[0]), dtype=bool)
nloop = 0
nloopmax = ni
while ((not ready.all()) & (nloop < nloopmax)):
nloop += 1
za = pd.np.matmul(mat, p*y)
A = pd.np.matmul(pd.np.matmul(mat, pd.np.diag(p)),
pd.np.transpose(mat))
A = A + pd.np.identity(nr)*delta
A[0, 0] = A[0, 0] - delta
zr = pd.np.linalg.solve(A, za)
yr = pd.np.matmul(pd.np.transpose(mat), zr)
diffVec = sHiLo*(yr-y)
err = p*diffVec
err_ls = list(err)
err_sort = deepcopy(err)
err_sort.sort()
rankVec = [err_ls.index(f) for f in err_sort]
maxerr = diffVec[rankVec[-1]]
ready = (maxerr <= fet) | (nout == noutmax)
if (not ready):
i = ni - 1
j = rankVec[i]
while ((p[j]*diffVec[j] > 0.5*maxerr) & (nout < noutmax)):
p[j] = 0
outliers[0, j] = 1
nout += 1
i -= 1
if i == 0:
j = 0
else:
j = 1
return [yr, outliers]
def plot_point(nc_path, point, ylim=None):
'''
This function plots the original time series and the HANTS time series.
It can be used to assess the fit.
'''
# Location
lonx = point[0]
latx = point[1]
nc_file = netCDF4.Dataset(nc_path, 'r', format="NETCDF4_CLASSIC")
time = [pd.to_datetime(i, format='%Y%m%d')
for i in nc_file.variables['time'][:]]
lat = nc_file.variables['latitude'][:]
lon = nc_file.variables['longitude'][:]
# Check that the point falls within the extent of the netcdf file
lon_max = max(lon)
lon_min = min(lon)
lat_max = max(lat)
lat_min = min(lat)
if not (lon_min < lonx < lon_max) or not (lat_min < latx < lat_max):
warnings.warn('The point lies outside the extent of the netcd file. '
'The closest cell is plotted.')
if lonx > lon_max:
lonx = lon_max
elif lonx < lon_min:
lonx = lon_min
if latx > lat_max:
latx = lat_max
elif latx < lat_min:
latx = lat_min
# Get lat-lon index in the netcdf file
lat_closest = lat.flat[ | pd.np.abs(lat - latx) | pandas.np.abs |
from __future__ import print_function, division
from warnings import warn, filterwarnings
from matplotlib import rcParams
import matplotlib.pyplot as plt
from collections import OrderedDict
import random
import sys
import pandas as pd
import numpy as np
import h5py
import os
import pickle
from keras.models import Sequential
from keras.layers import Dense, Conv1D, GRU, Bidirectional, Dropout
from keras.utils import plot_model
from sklearn.model_selection import train_test_split
from keras.callbacks import ModelCheckpoint
import keras.backend as K
from nilmtk.utils import find_nearest
from nilmtk.feature_detectors import cluster
from nilmtk.disaggregate import Disaggregator
from nilmtk.datastore import HDFDataStore
import random
import json
from .util import *
random.seed(10)
np.random.seed(10)
class WindowGRU(Disaggregator):
def __init__(self, params):
self.MODEL_NAME = "WindowGRU"
self.save_model_path = params.get('save-model-path',None)
self.load_model_path = params.get('pretrained-model-path',None)
self.chunk_wise_training = params.get('chunk_wise_training',False)
self.sequence_length = params.get('sequence_length',99)
self.n_epochs = params.get('n_epochs', 10)
self.models = OrderedDict()
self.max_val = 800
self.batch_size = params.get('batch_size',512)
def partial_fit(self,train_main,train_appliances,do_preprocessing=True,**load_kwargs):
if do_preprocessing:
train_main, train_appliances = self.call_preprocessing(train_main, train_appliances, 'train')
train_main = pd.concat(train_main,axis=0).values
train_main = train_main.reshape((-1,self.sequence_length,1))
new_train_appliances = []
for app_name, app_df in train_appliances:
app_df = pd.concat(app_df,axis=0).values
app_df = app_df.reshape((-1,1))
new_train_appliances.append((app_name, app_df))
train_appliances = new_train_appliances
for app_name, app_df in train_appliances:
if app_name not in self.models:
print("First model training for ", app_name)
self.models[app_name] = self.return_network()
else:
print("Started re-training model for ", app_name)
model = self.models[app_name]
mains = train_main.reshape((-1,self.sequence_length,1))
app_reading = app_df.reshape((-1,1))
filepath = 'windowgru-temp-weights-'+str(random.randint(0,100000))+'.h5'
checkpoint = ModelCheckpoint(filepath,monitor='val_loss',verbose=1,save_best_only=True,mode='min')
train_x, v_x, train_y, v_y = train_test_split(mains, app_reading, test_size=.15,random_state=10)
model.fit(train_x,train_y,validation_data=[v_x,v_y],epochs=self.n_epochs,callbacks=[checkpoint],shuffle=True,batch_size=self.batch_size)
model.load_weights(filepath)
def disaggregate_chunk(self,test_main_list,model=None,do_preprocessing=True):
if model is not None:
self.models = model
if do_preprocessing:
test_main_list = self.call_preprocessing(
test_main_list, submeters_lst=None, method='test')
test_predictions = []
for mains in test_main_list:
disggregation_dict = {}
mains = mains.values.reshape((-1,self.sequence_length,1))
for appliance in self.models:
prediction = self.models[appliance].predict(mains,batch_size=self.batch_size)
prediction = np.reshape(prediction, len(prediction))
valid_predictions = prediction.flatten()
valid_predictions = np.where(valid_predictions > 0, valid_predictions, 0)
valid_predictions = self._denormalize(valid_predictions, self.max_val)
df = pd.Series(valid_predictions)
disggregation_dict[appliance] = df
results = pd.DataFrame(disggregation_dict, dtype='float32')
test_predictions.append(results)
return test_predictions
def call_preprocessing(self, mains_lst, submeters_lst, method):
max_val = self.max_val
if method == 'train':
print("Training processing")
processed_mains = []
for mains in mains_lst:
# add padding values
padding = [0 for i in range(0, self.sequence_length - 1)]
paddf = | pd.DataFrame({mains.columns.values[0]: padding}) | pandas.DataFrame |
from pathlib import Path
import numpy as np
import pandas as pd
import torch
import cv2
import os
from PIL import Image
from .base_dataset import BaseDataset
from .constants import COL_PATH, COL_STUDY
class SUDataset(BaseDataset):
def __init__(self, data_dir,
transform_args, split, is_training,
tasks_to, study_level,
frontal_lateral=False, frac=1,
subset=None, toy=False,
return_info_dict=False):
""" SU Dataset
Args:
data_dir (string): Name of the root data directory.
transform_args (Namespace): Args for data transforms
split (string): Name of the CSV to load.
is_training (bool): True if training, False otherwise.
tasks_to (string): Name of the sequence of tasks.
study_level (bool): If true, each example is a study rather than an individual image.
subset: String that specified as subset that should be loaded: AP, PA or Lateral.
return_info_dict: If true, return a dict of info with each image.
Notes:
When study_level is true, the study folder is set as the index of the
DataFrame. To retrieve images from a study, .loc[study_folder] is used.
"""
dataset_task_sequence = 'stanford'
super().__init__(data_dir, transform_args, split, is_training, 'stanford', tasks_to, dataset_task_sequence)
self.subset = subset
self.study_level = study_level
self.return_info_dict = return_info_dict
df = self._load_df(self.data_dir, split, subset, self.original_tasks)
self.studies = df[COL_STUDY].drop_duplicates()
if toy and split == 'train':
self.studies = self.studies.sample(n=10)
df = df[df[COL_STUDY].isin(self.studies)]
df = df.reset_index(drop=True)
# Sample a fraction of the data for training.
if frac != 1 and is_training:
self.studies = self.studies.sample(frac=frac)
df = df[df[COL_STUDY].isin(self.studies)]
df = df.reset_index(drop=True)
# Set Study folder as index.
if study_level:
self._set_study_as_index(df)
# Get labels and image paths.
self.frontal_lateral = frontal_lateral
self.labels = self._get_labels(df)
self.img_paths = self._get_paths(df)
# Set class weights.
self._set_class_weights(self.labels)
@staticmethod
def _load_df(data_dir, split, subset, original_tasks):
csv_name = f"{split}.csv" if not split.endswith(".csv") else split
chexpert_data_dir = "CheXpert-v1.0"
codalab_data_dir = "CodaLab"
uncertainty_data_dir = "Uncertainty"
if 'test' in split:
csv_path = data_dir / codalab_data_dir / f"{split}_image_paths.csv"
specific_data_dir = codalab_data_dir
elif 'uncertainty' in split:
csv_path = data_dir / uncertainty_data_dir / csv_name
specific_data_dir = chexpert_data_dir
else:
csv_path = data_dir / chexpert_data_dir / csv_name
specific_data_dir = chexpert_data_dir
df = pd.read_csv(csv_path)
df[COL_PATH] = df[COL_PATH].apply(lambda x: data_dir / x.replace(str(chexpert_data_dir), str(specific_data_dir)))
df[COL_STUDY] = df[COL_PATH].apply(lambda p: str(p.parent))
if 'test' in split:
csv_name = "test_groundtruth.csv"
gt_df = | pd.read_csv(data_dir / codalab_data_dir / csv_name) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 8 14:37:03 2019
@author: ppradeep
"""
import os
clear = lambda: os.system('cls')
clear()
## Import packages
import warnings
warnings.filterwarnings('ignore')
import pandas as pd
import numpy as np
import pickle
# Classifiers
from sklearn.ensemble import RandomForestRegressor
from sklearn import svm, preprocessing
path = 'C:/Users/Administrator/OneDrive/Profile/Desktop/HTTK/'
#path = 'Z:/Projects/HTTK/'
#%%
# Normalize descriptors: Transform variables to mean=0, variance=1
def normalizeDescriptors(X):
scaler = preprocessing.StandardScaler().fit(X)
transformed = scaler.transform(X)
x_norm = pd.DataFrame(transformed, index = X.index)
x_norm.columns = X.columns
return(scaler, x_norm)
#%%
###########################################################################
###########################################################################
## Build the final models
###########################################################################
###########################################################################
####-----------------------------------------------------------------------------------------------------------------
## Read training data
####-----------------------------------------------------------------------------------------------------------------
data1 = pd.read_csv(path+'data/Prachi-112117.txt', index_col = 'CAS').loc[:,['All.Compound.Names', 'Human.Funbound.plasma', 'Human.Clint']]
data1.rename(columns={'All.Compound.Names' : 'Name'}, inplace = True)
data2 = pd.read_excel(path+'data/AFFINITY_Model_Results-2018-02-27.xlsx', index_col = 'CAS').loc[:,['Name','Fup.Med']]
data2.rename(columns={'Name': 'All.Compound.Names','Fup.Med':'Human.Funbound.plasma'}, inplace = True)
data3 = pd.read_excel(path+'data/CLint-2018-03-01-Results.xlsx', index_col = 'CAS').loc[:,['Name','CLint.1uM.Median']]
data3.rename(columns={'Name': 'All.Compound.Names','CLint.1uM.Median':'Human.Clint'}, inplace = True)
#%%
####-----------------------------------------------------------------------------------------------------------------
## Read training fingerprints
####-----------------------------------------------------------------------------------------------------------------
## Chemotyper FPs: 779 Toxprints
df_chemotypes = pd.read_csv(path+'data/toxprint.txt', sep = ';', index_col='M_NAME') #Rename 'M_NAME' to 'CAS' in data file
## PubChem FPs: 881 bits
df_pubchem = pd.read_csv(path+'data/pubchem.txt', index_col='row ID')
####-----------------------------------------------------------------------------------------------------------------
## Read continuous descriptors
####-----------------------------------------------------------------------------------------------------------------
### OPERA descriptors
df_opera = | pd.read_csv(path+'data/OPERA2.5_Pred.csv', index_col='MoleculeID') | pandas.read_csv |
from collections import abc, deque
from decimal import Decimal
from io import StringIO
from warnings import catch_warnings
import numpy as np
from numpy.random import randn
import pytest
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
MultiIndex,
Series,
concat,
date_range,
read_csv,
)
import pandas._testing as tm
from pandas.core.arrays import SparseArray
from pandas.core.construction import create_series_with_explicit_dtype
from pandas.tests.extension.decimal import to_decimal
@pytest.fixture(params=[True, False])
def sort(request):
"""Boolean sort keyword for concat and DataFrame.append."""
return request.param
class TestConcatenate:
def test_concat_copy(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randint(0, 10, size=4).reshape(4, 1))
df3 = DataFrame({5: "foo"}, index=range(4))
# These are actual copies.
result = concat([df, df2, df3], axis=1, copy=True)
for b in result._mgr.blocks:
assert b.values.base is None
# These are the same.
result = concat([df, df2, df3], axis=1, copy=False)
for b in result._mgr.blocks:
if b.is_float:
assert b.values.base is df._mgr.blocks[0].values.base
elif b.is_integer:
assert b.values.base is df2._mgr.blocks[0].values.base
elif b.is_object:
assert b.values.base is not None
# Float block was consolidated.
df4 = DataFrame(np.random.randn(4, 1))
result = concat([df, df2, df3, df4], axis=1, copy=False)
for b in result._mgr.blocks:
if b.is_float:
assert b.values.base is None
elif b.is_integer:
assert b.values.base is df2._mgr.blocks[0].values.base
elif b.is_object:
assert b.values.base is not None
def test_concat_with_group_keys(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
# axis=0
df = DataFrame(np.random.randn(3, 4))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1])
exp_index = MultiIndex.from_arrays(
[[0, 0, 0, 1, 1, 1, 1], [0, 1, 2, 0, 1, 2, 3]]
)
expected = DataFrame(np.r_[df.values, df2.values], index=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1])
exp_index2 = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])
expected = DataFrame(np.r_[df.values, df.values], index=exp_index2)
tm.assert_frame_equal(result, expected)
# axis=1
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df2.values], columns=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df.values], columns=exp_index2)
tm.assert_frame_equal(result, expected)
def test_concat_keys_specific_levels(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df.iloc[:, [0, 1]], df.iloc[:, [2]], df.iloc[:, [3]]]
level = ["three", "two", "one", "zero"]
result = concat(
pieces,
axis=1,
keys=["one", "two", "three"],
levels=[level],
names=["group_key"],
)
tm.assert_index_equal(result.columns.levels[0], Index(level, name="group_key"))
tm.assert_index_equal(result.columns.levels[1], Index([0, 1, 2, 3]))
assert result.columns.names == ["group_key", None]
def test_concat_dataframe_keys_bug(self, sort):
t1 = DataFrame(
{"value": Series([1, 2, 3], index=Index(["a", "b", "c"], name="id"))}
)
t2 = DataFrame({"value": Series([7, 8], index=Index(["a", "b"], name="id"))})
# it works
result = concat([t1, t2], axis=1, keys=["t1", "t2"], sort=sort)
assert list(result.columns) == [("t1", "value"), ("t2", "value")]
def test_concat_series_partial_columns_names(self):
# GH10698
foo = Series([1, 2], name="foo")
bar = Series([1, 2])
baz = Series([4, 5])
result = concat([foo, bar, baz], axis=1)
expected = DataFrame(
{"foo": [1, 2], 0: [1, 2], 1: [4, 5]}, columns=["foo", 0, 1]
)
tm.assert_frame_equal(result, expected)
result = concat([foo, bar, baz], axis=1, keys=["red", "blue", "yellow"])
expected = DataFrame(
{"red": [1, 2], "blue": [1, 2], "yellow": [4, 5]},
columns=["red", "blue", "yellow"],
)
tm.assert_frame_equal(result, expected)
result = concat([foo, bar, baz], axis=1, ignore_index=True)
expected = DataFrame({0: [1, 2], 1: [1, 2], 2: [4, 5]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("mapping", ["mapping", "dict"])
def test_concat_mapping(self, mapping, non_dict_mapping_subclass):
constructor = dict if mapping == "dict" else non_dict_mapping_subclass
frames = constructor(
{
"foo": DataFrame(np.random.randn(4, 3)),
"bar": DataFrame(np.random.randn(4, 3)),
"baz": DataFrame(np.random.randn(4, 3)),
"qux": DataFrame(np.random.randn(4, 3)),
}
)
sorted_keys = list(frames.keys())
result = | concat(frames) | pandas.concat |
import numpy as np
from .helpers import scale, scale_clean
#from numba import jit
import pandas as pd
defs = {
'r9.4': {
'ed_params': {
'window_lengths': [3, 6], 'thresholds': [1.4, 1.1],
'peak_height': 0.2
}
},
'r9': {
'ed_params': {
'window_lengths': [6, 12], 'thresholds': [2.0, 1.1],
'peak_height': 1.2
}
},
'r9.5b': {
'ed_params': {
'window_lengths': [4, 5], 'thresholds': [1.4, 1.0],
'peak_height': 1
}
},
'r9.5': {
'ed_params': {
'window_lengths': [4, 6], 'thresholds': [1.4, 1.0],
'peak_height': 0.65
}
},
'rf': {
'ed_params': {
'window_lengths': [4, 6], 'thresholds': [1.4, 1.1], # [1.4, 1.1],
'peak_height': 1.2 # 1.2
}
}
}
def smooth(ser, sc):
return np.array(pd.Series(ser).rolling(sc, min_periods=1, center=True).mean())
def find_raw(raw, maxi=1000, safe=10):
m = raw
d2 = smooth(np.sqrt((m[1:]-m[:-1])**2), 1000)
return min(np.argmax(d2[safe:] > 3)+safe, len(m)-1), min(len(d2)-np.argmax(d2[::-1] > 3)+safe, len(m)-1)
def get_events(h5, already_detected=True, chemistry="r9.5", window_size=None,
old=True, verbose=True, about_max_len=None, extra=False, tomb=False, bigf=False):
if tomb:
if bigf:
racine = "BaseCalled_template/"
else:
racine = "Analyses/RawGenomeCorrected_000/BaseCalled_template/"
try:
# print(h5.filename)
# print()
if bigf:
if "Event" in h5[racine].keys():
e = h5[racine+"Event"]
else:
e = h5[racine + "Events"]
else:
e = h5[racine+"Events"]
# print(e)
except:
"""
def printname(name, two=""):
print(name, two)
try:
print(name, name.value)
except:
pass
print("failed")
print(h5[racine])
h5[racine].visit(printname)
"""
return None, None, None
#e = list(e)
return {"mean": e["norm_mean"], "bases": np.array(e["base"], dtype=str)},\
dict(h5[racine].attrs),\
dict(h5[racine+"/Alignment"].attrs)
if already_detected:
try:
e = h5["Analyses/Basecall_RNN_1D_000/BaseCalled_template/Events"]
return e
except:
pass
try:
e = h5["Analyses/Basecall_1D_000/BaseCalled_template/Events"]
return e
except:
pass
else:
try:
if not extra:
return h5["Segmentation_Rep/events"]
else:
raw, sl = get_raw(h5)
s, e = find_raw(raw)
return h5["Segmentation_Rep/events"], raw[s:e], sl
except:
# print("la")
return extract_events(h5, chemistry, window_size,
old=old, verbose=verbose, about_max_len=about_max_len)
def find2(event, maxi=1000, safe=10):
m = event["mean"]
d2 = np.sqrt((m[1:]-m[:-1])**2)
return min(np.argmax(d2[safe:] > 30)+safe, len(m)-1), min(len(d2)-np.argmax(d2[::-1] > 30)+safe, len(m)-1)
def scale_ratio(x):
x -= np.percentile(x, 25)
#scale = np.percentile(x, 75) - np.percentile(x, 25)
# print(scale,np.percentile(x, 75) , np.percentile(x, 25))
#x /= scale
x /= 20
if np.sum(x > 10) > len(x) * 0.05:
print("Warning lotl of rare events")
print(np.sum(x > 10 * scale), len(x))
x[x > 5] = 0
x[x < -5] = 0
return x
def get_raw(h5):
# print(h5["Raw/Reads"].keys())
rk = list(h5["Raw/Reads"].keys())[0]
raw = h5["Raw/Reads"][rk]["Signal"]
# print(list(h5["Raw/Reads"].keys()))
meta = h5["UniqueGlobalKey/channel_id"].attrs
offset = meta["offset"]
raw_unit = meta['range'] / meta['digitisation']
raw = (raw + offset) * raw_unit
sl = meta["sampling_rate"]
# print(tracking
return raw, sl
def find_stall_old(events, threshold):
count_above = 0
start_ev_ind = 0
for ev_ind, event in enumerate(events[:100]):
if event['mean'] <= threshold:
count_above = 0
else:
count_above += 1
if count_above == 2:
start_ev_ind = ev_ind - 1
break
new_start = 0
count = 0
for idx in range(start_ev_ind, len(events)):
if events['mean'][idx] > threshold:
count = 0
else:
count += 1
if count == 3:
new_start = idx - 2
break
return new_start
def find_stall(events, start_threshold, end_threshold, raw, sampling_rate, max_under_threshold=10):
std = | pd.Series(raw) | pandas.Series |
#!/usr/bin/env python
# coding: utf-8
# # Converting output files from "11c - Electric Futures Simulations BIFACIAL (PVSC) CLEANUP"
# ## into OpenEi format for the various graphs shown on the PVSC PVICE wiki page
# In[8]:
import PV_ICE
import numpy as np
import pandas as pd
import os,sys
from pathlib import Path
# In[9]:
testfolder = str(Path().resolve().parent.parent /'PV_ICE' / 'TEMP')
baselinesfolder = str(Path().resolve().parent.parent /'PV_ICE' / 'baselines')
# Another option using relative address; for some operative systems you might need '/' instead of '\'
# testfolder = os.path.abspath(r'..\..\PV_ICE\TEMP')
print ("Your simulation will be stored in %s" % testfolder)
print(baselinesfolder)
# In[11]:
yearly_results = pd.read_csv(r'C:\Users\ahegedus\Documents\PV ICE Open EI Data\PVSC PVICE Python Data Files to Convert to Open EI\CONVERT FROM THESE\Yearly_Results.csv')
cumulative_results = pd.read_csv(r'C:\Users\ahegedus\Documents\PV ICE Open EI Data\PVSC PVICE Python Data Files to Convert to Open EI\CONVERT FROM THESE\PVSC_Cumulative_Results.csv')
# ## Create "PVSC_Yearly, with Source Comparison, Materials Summed.csv"
# In[12]:
yearly_source_comparison = pd.DataFrame()
# In[13]:
scenario_col = ["Today"]*(2050-1994)+["Bifacial Projection"]*(2050-1994)
yearly_source_comparison['@scenario|Module Composition Scenario'] = scenario_col
# In[14]:
yearly_source_comparison['@timeseries|Year'] = list(yearly_results['year'])*2
# ### Total Virgin Material Demand Columns
# In[15]:
virgin_material_demand_PVICE_bireduced = yearly_results['VirginStock_glass_Bifacial_ReducedInstalls'] + yearly_results['VirginStock_aluminium_frames_Bifacial_ReducedInstalls'] + yearly_results['VirginStock_silver_Bifacial_ReducedInstalls'] + yearly_results['VirginStock_silicon_Bifacial_ReducedInstalls'] + yearly_results['VirginStock_copper_Bifacial_ReducedInstalls']
yearly_source_comparison['@value|TotalVirginMaterialDemand|PV ICE Bifacial Reduced Installs#MetricTonnes'] = ["NA"]*(2050-1994) + list(virgin_material_demand_PVICE_bireduced.values)
# In[16]:
virgin_material_demand_PVICE_bi = yearly_results['VirginStock_glass_Bifacial_SameInstalls'] + yearly_results['VirginStock_aluminium_frames_Bifacial_SameInstalls'] + yearly_results['VirginStock_silver_Bifacial_SameInstalls'] + yearly_results['VirginStock_silicon_Bifacial_SameInstalls'] + yearly_results['VirginStock_copper_Bifacial_SameInstalls']
yearly_source_comparison['@value|TotalVirginMaterialDemand|PV ICE Bifacial#MetricTonnes'] = ["NA"]*(2050-1994) + list(virgin_material_demand_PVICE_bi.values)
# In[17]:
virgin_material_demand_PVICE_today = yearly_results['VirginStock_glass_PV_ICE_Today'] + yearly_results['VirginStock_aluminium_frames_PV_ICE_Today'] + yearly_results['VirginStock_silver_PV_ICE_Today'] + yearly_results['VirginStock_silicon_PV_ICE_Today'] + yearly_results['VirginStock_copper_PV_ICE_Today']
virgin_material_demand_PVICE_bifacialproj = yearly_results['VirginStock_glass_PV_ICE_Bifacial'] + yearly_results['VirginStock_aluminium_frames_PV_ICE_Bifacial'] + yearly_results['VirginStock_silver_PV_ICE_Bifacial'] + yearly_results['VirginStock_silicon_PV_ICE_Bifacial'] + yearly_results['VirginStock_copper_PV_ICE_Bifacial']
yearly_source_comparison['@value|TotalVirginMaterialDemand|PV ICE#MetricTonnes'] = list(virgin_material_demand_PVICE_today.values) + list(virgin_material_demand_PVICE_bifacialproj.values)
# In[18]:
lit_sources = ["PV_ICE","Irena_EL","Irena_RL"]
pretty_sources = ['PV ICE','Irena EL','Irena RL']
#virgin material demand cols for lit_sources
for source in lit_sources:
virgin_material_demand_today = yearly_results['VirginStock_glass_' + source + '_Today'] + yearly_results['VirginStock_aluminium_frames_' + source + '_Today'] + yearly_results['VirginStock_silver_' + source + '_Today'] + yearly_results['VirginStock_silicon_' + source + '_Today'] + yearly_results['VirginStock_copper_' + source + '_Today']
virgin_material_demand_bifacialproj = yearly_results['VirginStock_glass_' + source + '_Bifacial'] + yearly_results['VirginStock_aluminium_frames_' + source + '_Bifacial'] + yearly_results['VirginStock_silver_' + source + '_Bifacial'] + yearly_results['VirginStock_silicon_' + source + '_Bifacial'] + yearly_results['VirginStock_copper_' + source + '_Bifacial']
better_source_name = pretty_sources[lit_sources.index(source)]
yearly_source_comparison['@value|TotalVirginMaterialDemand|' + better_source_name + '#MetricTonnes'] = list(virgin_material_demand_today.values) + list(virgin_material_demand_bifacialproj.values)
# ### Total EOL Material Columns
# In[19]:
bifacial_scenarios = ["Bifacial_ReducedInstalls","Bifacial_SameInstalls"]
pretty_scenarios = ["PV ICE Bifacial Reduced Installs", "PV ICE Bifacial"]
for myscenario in bifacial_scenarios:
total_eol_material_bifacialproj = yearly_results['Waste_EOL_glass_' + myscenario] + yearly_results['Waste_EOL_aluminium_frames_' + myscenario] + yearly_results['Waste_EOL_silver_' + myscenario] + yearly_results['Waste_EOL_silicon_' + myscenario] + yearly_results['Waste_EOL_copper_' + myscenario]
better_scenario_name = pretty_scenarios[bifacial_scenarios.index(myscenario)]
yearly_source_comparison['@value|TotalEOLMaterial|' + better_scenario_name + '#MetricTonnes'] = ["NA"]*(2050-1994) + list(total_eol_material_bifacialproj.values)
# In[20]:
lit_sources = ["PV_ICE","Irena_EL","Irena_RL"]
pretty_sources = ['PV ICE','Irena EL','Irena RL']
for source in lit_sources:
total_eol_today = yearly_results['Waste_EOL_glass_' + source + '_Today'] + yearly_results['Waste_EOL_aluminium_frames_' + source + '_Today'] + yearly_results['Waste_EOL_silver_' + source + '_Today'] + yearly_results['Waste_EOL_silicon_' + source + '_Today'] + yearly_results['Waste_EOL_copper_' + source + '_Today']
total_eol_bifacialproj = yearly_results['Waste_EOL_glass_' + source + '_Bifacial'] + yearly_results['Waste_EOL_aluminium_frames_' + source + '_Bifacial'] + yearly_results['Waste_EOL_silver_' + source + '_Bifacial'] + yearly_results['Waste_EOL_silicon_' + source + '_Bifacial'] + yearly_results['Waste_EOL_copper_' + source + '_Bifacial']
better_source_name = pretty_sources[lit_sources.index(source)]
yearly_source_comparison['@value|TotalEOLMaterial|' + better_source_name + '#MetricTonnes'] = list(total_eol_today.values) + list(total_eol_bifacialproj.values)
# ### Manufacturing Scrap Columns
# In[21]:
bifacial_scenarios = ["Bifacial_ReducedInstalls","Bifacial_SameInstalls"]
pretty_scenarios = ["PV ICE Bifacial Reduced Installs", "PV ICE Bifacial"]
for myscenario in bifacial_scenarios:
total_mfg_scrap_bifacialproj = yearly_results['Waste_MFG_glass_' + myscenario] + yearly_results['Waste_MFG_aluminium_frames_' + myscenario] + yearly_results['Waste_MFG_silver_' + myscenario] + yearly_results['Waste_MFG_silicon_' + myscenario] + yearly_results['Waste_MFG_copper_' + myscenario]
better_scenario_name = pretty_scenarios[bifacial_scenarios.index(myscenario)]
yearly_source_comparison['@value|ManufacturingScrap|' + better_scenario_name + '#MetricTonnes'] = ["NA"]*(2050-1994) + list(total_mfg_scrap_bifacialproj.values)
# In[22]:
lit_sources = ["PV_ICE","Irena_EL","Irena_RL"]
pretty_sources = ['PV ICE','Irena EL','Irena RL']
for source in lit_sources:
total_mfg_today = yearly_results['Waste_MFG_glass_' + source + '_Today'] + yearly_results['Waste_MFG_aluminium_frames_' + source + '_Today'] + yearly_results['Waste_MFG_silver_' + source + '_Today'] + yearly_results['Waste_MFG_silicon_' + source + '_Today'] + yearly_results['Waste_MFG_copper_' + source + '_Today']
total_mfg_bifacialproj = yearly_results['Waste_MFG_glass_' + source + '_Bifacial'] + yearly_results['Waste_MFG_aluminium_frames_' + source + '_Bifacial'] + yearly_results['Waste_MFG_silver_' + source + '_Bifacial'] + yearly_results['Waste_MFG_silicon_' + source + '_Bifacial'] + yearly_results['Waste_MFG_copper_' + source + '_Bifacial']
better_source_name = pretty_sources[lit_sources.index(source)]
yearly_source_comparison['@value|ManufacturingScrap|' + better_source_name + '#MetricTonnes'] = list(total_mfg_today.values) + list(total_mfg_bifacialproj.values)
# ### Manufacturing Scrap and EOL Material Columns
# In[23]:
bifacial_scenarios = ["Bifacial_ReducedInstalls","Bifacial_SameInstalls"]
pretty_scenarios = ["PV ICE Bifacial Reduced Installs", "PV ICE Bifacial"]
for myscenario in bifacial_scenarios:
better_scenario_name = pretty_scenarios[bifacial_scenarios.index(myscenario)]
total_waste = yearly_source_comparison['@value|TotalEOLMaterial|' + better_scenario_name + '#MetricTonnes'] + yearly_source_comparison['@value|ManufacturingScrap|' + better_scenario_name + '#MetricTonnes']
yearly_source_comparison['@value|ManufacturingScrapAndEOLMaterial|' + better_scenario_name + '#MetricTonnes'] = ["NA"]*(2050-1994) + list(total_waste.values)[56:]
# In[24]:
lit_sources = ["PV_ICE","Irena_EL","Irena_RL"]
pretty_sources = ['PV ICE','Irena EL','Irena RL']
for source in lit_sources:
better_source_name = pretty_sources[lit_sources.index(source)]
total_waste = yearly_source_comparison['@value|TotalEOLMaterial|' + better_source_name + '#MetricTonnes'] + yearly_source_comparison['@value|ManufacturingScrap|' + better_source_name + '#MetricTonnes']
yearly_source_comparison['@value|ManufacturingScrapAndEOLMaterial|' + better_source_name + '#MetricTonnes'] = list(total_waste.values)
# ### New Installed Capacity Columns
# In[25]:
bifacial_scenarios = ["Bifacial_ReducedInstalls","Bifacial_SameInstalls"]
pretty_scenarios = ["PV ICE Bifacial Reduced Installs", "PV ICE Bifacial"]
for myscenario in bifacial_scenarios:
new_installs = yearly_results['new_Installed_Capacity_[MW]_' + myscenario]
better_scenario_name = pretty_scenarios[bifacial_scenarios.index(myscenario)]
yearly_source_comparison['@value|NewInstalledCapacity|' + better_scenario_name + '#MW'] = ["NA"]*(2050-1994) + list(new_installs.values)
# In[26]:
lit_sources = ["PV_ICE","Irena_EL","Irena_RL"]
pretty_sources = ['PV ICE','Irena EL','Irena RL']
for source in lit_sources:
new_installs = yearly_results['new_Installed_Capacity_[MW]_' + source]
better_source_name = pretty_sources[lit_sources.index(source)]
yearly_source_comparison['@value|NewInstalledCapacity|' + better_source_name + '#MW'] = list(new_installs.values) * 2
# ### Installed Capacity Columns
# In[27]:
bifacial_scenarios = ["Bifacial_ReducedInstalls","Bifacial_SameInstalls"]
pretty_scenarios = ["PV ICE Bifacial Reduced Installs", "PV ICE Bifacial"]
for myscenario in bifacial_scenarios:
capacity = cumulative_results['Capacity_' + myscenario]
better_scenario_name = pretty_scenarios[bifacial_scenarios.index(myscenario)]
yearly_source_comparison['@value|InstalledCapacity|' + better_scenario_name + '#MW'] = ["NA"]*(2050-1994) + list(capacity.values/1000000) #convert to MW
# In[28]:
lit_sources = ["PV_ICE","Irena_EL","Irena_RL"]
pretty_sources = ['PV ICE','Irena EL','Irena RL']
for source in lit_sources:
capacity_today = cumulative_results['Capacity_' + source + '_Today']
capacity_bifacialproj = cumulative_results['Capacity_' + source + '_Bifacial']
better_source_name = pretty_sources[lit_sources.index(source)]
yearly_source_comparison['@value|InstalledCapacity|' + better_source_name + '#MW'] = list(capacity_today.values) + list(capacity_bifacialproj.values/1000000) #convert to MW
# In[29]:
yearly_source_comparison['@value|InstalledCapacity|Cumulative New Installs#MW'] = cumulative_results['new_Installed_Capacity_[MW]_PV_ICE']
# In[30]:
### Save results as CSV, saves in tutorial folder
yearly_source_comparison.to_csv('New_PVSC_Yearly, with Source Comparison, Materials Summed.csv')
# ## Create "PVSC_Installed Capacity.csv"
# ### Using Capacity_Today values
# In[31]:
installed_capacity = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# *****************************************************************************/
# * Authors: <NAME>
# *****************************************************************************/
"""transformCSV.py
This module contains the basic functions for creating the content of a configuration file from CSV.
Args:
--inFile: Path for the configuration file where the time series data values CSV
--outFile: Path for the configuration file where the time series data values INI
--debug: Boolean flag to activate verbose printing for debug use
Example:
Default usage:
$ python transformCSV.py
Specific usage:
$ python transformCSV.py
--inFile C:\raad\src\software\time-series.csv
--outFile C:\raad\src\software\time-series.ini
--debug True
"""
import sys
import datetime
import optparse
import traceback
import pandas
import numpy
import os
import pprint
import csv
if sys.version_info.major > 2:
import configparser as cF
else:
import ConfigParser as cF
class TransformMetaData(object):
debug = False
fileName = None
fileLocation = None
columnsList = None
analysisFrameFormat = None
uniqueLists = None
analysisFrame = None
def __init__(self, inputFileName=None, debug=False, transform=False, sectionName=None, outFolder=None,
outFile='time-series-madness.ini'):
if isinstance(debug, bool):
self.debug = debug
if inputFileName is None:
return
elif os.path.exists(os.path.abspath(inputFileName)):
self.fileName = inputFileName
self.fileLocation = os.path.exists(os.path.abspath(inputFileName))
(analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList) = self.CSVtoFrame(
inputFileName=self.fileName)
self.analysisFrame = analysisFrame
self.columnsList = columnNamesList
self.analysisFrameFormat = analysisFrameFormat
self.uniqueLists = uniqueLists
if transform:
passWrite = self.frameToINI(analysisFrame=analysisFrame, sectionName=sectionName, outFolder=outFolder,
outFile=outFile)
print(f"Pass Status is : {passWrite}")
return
def getColumnList(self):
return self.columnsList
def getAnalysisFrameFormat(self):
return self.analysisFrameFormat
def getuniqueLists(self):
return self.uniqueLists
def getAnalysisFrame(self):
return self.analysisFrame
@staticmethod
def getDateParser(formatString="%Y-%m-%d %H:%M:%S.%f"):
return (lambda x: pandas.datetime.strptime(x, formatString)) # 2020-06-09 19:14:00.000
def getHeaderFromFile(self, headerFilePath=None, method=1):
if headerFilePath is None:
return (None, None)
if method == 1:
fieldnames = pandas.read_csv(headerFilePath, index_col=0, nrows=0).columns.tolist()
elif method == 2:
with open(headerFilePath, 'r') as infile:
reader = csv.DictReader(infile)
fieldnames = list(reader.fieldnames)
elif method == 3:
fieldnames = list(pandas.read_csv(headerFilePath, nrows=1).columns)
else:
fieldnames = None
fieldDict = {}
for indexName, valueName in enumerate(fieldnames):
fieldDict[valueName] = pandas.StringDtype()
return (fieldnames, fieldDict)
def CSVtoFrame(self, inputFileName=None):
if inputFileName is None:
return (None, None)
# Load File
print("Processing File: {0}...\n".format(inputFileName))
self.fileLocation = inputFileName
# Create data frame
analysisFrame = pandas.DataFrame()
analysisFrameFormat = self._getDataFormat()
inputDataFrame = pandas.read_csv(filepath_or_buffer=inputFileName,
sep='\t',
names=self._getDataFormat(),
# dtype=self._getDataFormat()
# header=None
# float_precision='round_trip'
# engine='c',
# parse_dates=['date_column'],
# date_parser=True,
# na_values=['NULL']
)
if self.debug: # Preview data.
print(inputDataFrame.head(5))
# analysisFrame.astype(dtype=analysisFrameFormat)
# Cleanup data
analysisFrame = inputDataFrame.copy(deep=True)
analysisFrame.apply(pandas.to_numeric, errors='coerce') # Fill in bad data with Not-a-Number (NaN)
# Create lists of unique strings
uniqueLists = []
columnNamesList = []
for columnName in analysisFrame.columns:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', analysisFrame[columnName].values)
if isinstance(analysisFrame[columnName].dtypes, str):
columnUniqueList = analysisFrame[columnName].unique().tolist()
else:
columnUniqueList = None
columnNamesList.append(columnName)
uniqueLists.append([columnName, columnUniqueList])
if self.debug: # Preview data.
print(analysisFrame.head(5))
return (analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList)
def frameToINI(self, analysisFrame=None, sectionName='Unknown', outFolder=None, outFile='nil.ini'):
if analysisFrame is None:
return False
try:
if outFolder is None:
outFolder = os.getcwd()
configFilePath = os.path.join(outFolder, outFile)
configINI = cF.ConfigParser()
configINI.add_section(sectionName)
for (columnName, columnData) in analysisFrame:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', columnData.values)
print("Column Contents Length:", len(columnData.values))
print("Column Contents Type", type(columnData.values))
writeList = "["
for colIndex, colValue in enumerate(columnData):
writeList = f"{writeList}'{colValue}'"
if colIndex < len(columnData) - 1:
writeList = f"{writeList}, "
writeList = f"{writeList}]"
configINI.set(sectionName, columnName, writeList)
if not os.path.exists(configFilePath) or os.stat(configFilePath).st_size == 0:
with open(configFilePath, 'w') as configWritingFile:
configINI.write(configWritingFile)
noErrors = True
except ValueError as e:
errorString = ("ERROR in {__file__} @{framePrintNo} with {ErrorFound}".format(__file__=str(__file__),
framePrintNo=str(
sys._getframe().f_lineno),
ErrorFound=e))
print(errorString)
noErrors = False
return noErrors
@staticmethod
def _validNumericalFloat(inValue):
"""
Determines if the value is a valid numerical object.
Args:
inValue: floating-point value
Returns: Value in floating-point or Not-A-Number.
"""
try:
return numpy.float128(inValue)
except ValueError:
return numpy.nan
@staticmethod
def _calculateMean(x):
"""
Calculates the mean in a multiplication method since division produces an infinity or NaN
Args:
x: Input data set. We use a data frame.
Returns: Calculated mean for a vector data frame.
"""
try:
mean = numpy.float128(numpy.average(x, weights=numpy.ones_like(numpy.float128(x)) / numpy.float128(x.size)))
except ValueError:
mean = 0
pass
return mean
def _calculateStd(self, data):
"""
Calculates the standard deviation in a multiplication method since division produces a infinity or NaN
Args:
data: Input data set. We use a data frame.
Returns: Calculated standard deviation for a vector data frame.
"""
sd = 0
try:
n = numpy.float128(data.size)
if n <= 1:
return numpy.float128(0.0)
# Use multiplication version of mean since numpy bug causes infinity.
mean = self._calculateMean(data)
sd = numpy.float128(mean)
# Calculate standard deviation
for el in data:
diff = numpy.float128(el) - numpy.float128(mean)
sd += (diff) ** 2
points = numpy.float128(n - 1)
sd = numpy.float128(numpy.sqrt(numpy.float128(sd) / numpy.float128(points)))
except ValueError:
pass
return sd
def _determineQuickStats(self, dataAnalysisFrame, columnName=None, multiplierSigma=3.0):
"""
Determines stats based on a vector to get the data shape.
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
multiplierSigma: Sigma range for the stats.
Returns: Set of stats.
"""
meanValue = 0
sigmaValue = 0
sigmaRangeValue = 0
topValue = 0
try:
# Clean out anomoly due to random invalid inputs.
if (columnName is not None):
meanValue = self._calculateMean(dataAnalysisFrame[columnName])
if meanValue == numpy.nan:
meanValue = numpy.float128(1)
sigmaValue = self._calculateStd(dataAnalysisFrame[columnName])
if float(sigmaValue) is float(numpy.nan):
sigmaValue = numpy.float128(1)
multiplier = numpy.float128(multiplierSigma) # Stats: 1 sigma = 68%, 2 sigma = 95%, 3 sigma = 99.7
sigmaRangeValue = (sigmaValue * multiplier)
if float(sigmaRangeValue) is float(numpy.nan):
sigmaRangeValue = numpy.float128(1)
topValue = numpy.float128(meanValue + sigmaRangeValue)
print("Name:{} Mean= {}, Sigma= {}, {}*Sigma= {}".format(columnName,
meanValue,
sigmaValue,
multiplier,
sigmaRangeValue))
except ValueError:
pass
return (meanValue, sigmaValue, sigmaRangeValue, topValue)
def _cleanZerosForColumnInFrame(self, dataAnalysisFrame, columnName='cycles'):
"""
Cleans the data frame with data values that are invalid. I.E. inf, NaN
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
Returns: Cleaned dataframe.
"""
dataAnalysisCleaned = None
try:
# Clean out anomoly due to random invalid inputs.
(meanValue, sigmaValue, sigmaRangeValue, topValue) = self._determineQuickStats(
dataAnalysisFrame=dataAnalysisFrame, columnName=columnName)
# dataAnalysisCleaned = dataAnalysisFrame[dataAnalysisFrame[columnName] != 0]
# When the cycles are negative or zero we missed cleaning up a row.
# logicVector = (dataAnalysisFrame[columnName] != 0)
# dataAnalysisCleaned = dataAnalysisFrame[logicVector]
logicVector = (dataAnalysisCleaned[columnName] >= 1)
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
# These timed out mean + 2 * sd
logicVector = (dataAnalysisCleaned[columnName] < topValue) # Data range
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
except ValueError:
pass
return dataAnalysisCleaned
def _cleanFrame(self, dataAnalysisTemp, cleanColumn=False, columnName='cycles'):
"""
Args:
dataAnalysisTemp: Dataframe to do analysis on.
cleanColumn: Flag to clean the data frame.
columnName: Column name of the data frame.
Returns: cleaned dataframe
"""
try:
replacementList = [pandas.NaT, numpy.Infinity, numpy.NINF, 'NaN', 'inf', '-inf', 'NULL']
if cleanColumn is True:
dataAnalysisTemp = self._cleanZerosForColumnInFrame(dataAnalysisTemp, columnName=columnName)
dataAnalysisTemp = dataAnalysisTemp.replace(to_replace=replacementList,
value=numpy.nan)
dataAnalysisTemp = dataAnalysisTemp.dropna()
except ValueError:
pass
return dataAnalysisTemp
@staticmethod
def _getDataFormat():
"""
Return the dataframe setup for the CSV file generated from server.
Returns: dictionary data format for pandas.
"""
dataFormat = {
"Serial_Number": pandas.StringDtype(),
"LogTime0": pandas.StringDtype(), # @todo force rename
"Id0": pandas.StringDtype(), # @todo force rename
"DriveId": pandas.StringDtype(),
"JobRunId": pandas.StringDtype(),
"LogTime1": pandas.StringDtype(), # @todo force rename
"Comment0": pandas.StringDtype(), # @todo force rename
"CriticalWarning": pandas.StringDtype(),
"Temperature": pandas.StringDtype(),
"AvailableSpare": pandas.StringDtype(),
"AvailableSpareThreshold": pandas.StringDtype(),
"PercentageUsed": pandas.StringDtype(),
"DataUnitsReadL": pandas.StringDtype(),
"DataUnitsReadU": pandas.StringDtype(),
"DataUnitsWrittenL": pandas.StringDtype(),
"DataUnitsWrittenU": pandas.StringDtype(),
"HostReadCommandsL": pandas.StringDtype(),
"HostReadCommandsU": pandas.StringDtype(),
"HostWriteCommandsL": pandas.StringDtype(),
"HostWriteCommandsU": pandas.StringDtype(),
"ControllerBusyTimeL": pandas.StringDtype(),
"ControllerBusyTimeU": pandas.StringDtype(),
"PowerCyclesL": pandas.StringDtype(),
"PowerCyclesU": pandas.StringDtype(),
"PowerOnHoursL": pandas.StringDtype(),
"PowerOnHoursU": pandas.StringDtype(),
"UnsafeShutdownsL": pandas.StringDtype(),
"UnsafeShutdownsU": pandas.StringDtype(),
"MediaErrorsL": pandas.StringDtype(),
"MediaErrorsU": pandas.StringDtype(),
"NumErrorInfoLogsL": pandas.StringDtype(),
"NumErrorInfoLogsU": pandas.StringDtype(),
"ProgramFailCountN": pandas.StringDtype(),
"ProgramFailCountR": pandas.StringDtype(),
"EraseFailCountN": pandas.StringDtype(),
"EraseFailCountR": pandas.StringDtype(),
"WearLevelingCountN": pandas.StringDtype(),
"WearLevelingCountR": pandas.StringDtype(),
"E2EErrorDetectCountN": pandas.StringDtype(),
"E2EErrorDetectCountR": pandas.StringDtype(),
"CRCErrorCountN": pandas.StringDtype(),
"CRCErrorCountR": pandas.StringDtype(),
"MediaWearPercentageN": pandas.StringDtype(),
"MediaWearPercentageR": pandas.StringDtype(),
"HostReadsN": pandas.StringDtype(),
"HostReadsR": pandas.StringDtype(),
"TimedWorkloadN": pandas.StringDtype(),
"TimedWorkloadR": pandas.StringDtype(),
"ThermalThrottleStatusN": pandas.StringDtype(),
"ThermalThrottleStatusR": pandas.StringDtype(),
"RetryBuffOverflowCountN": pandas.StringDtype(),
"RetryBuffOverflowCountR": pandas.StringDtype(),
"PLLLockLossCounterN": pandas.StringDtype(),
"PLLLockLossCounterR": pandas.StringDtype(),
"NandBytesWrittenN": pandas.StringDtype(),
"NandBytesWrittenR": pandas.StringDtype(),
"HostBytesWrittenN": pandas.StringDtype(),
"HostBytesWrittenR": pandas.StringDtype(),
"SystemAreaLifeRemainingN": pandas.StringDtype(),
"SystemAreaLifeRemainingR": pandas.StringDtype(),
"RelocatableSectorCountN": pandas.StringDtype(),
"RelocatableSectorCountR": pandas.StringDtype(),
"SoftECCErrorRateN": pandas.StringDtype(),
"SoftECCErrorRateR": | pandas.StringDtype() | pandas.StringDtype |
import pandas as pd
import numpy as np
import math
import os
from scipy.interpolate import interp1d
import time
from sklearn.ensemble import RandomForestRegressor
import xgboost as xgb
from lightgbm import LGBMRegressor
from catboost import CatBoostRegressor
from information_measures import *
from joblib import Parallel, delayed
#from arch import arch_model
def rmspe(y_true, y_pred):
return (np.sqrt(np.mean(np.square((y_true - y_pred) / y_true))))
def log_return(list_stock_prices): # Stock prices are estimated through wap values
return np.log(list_stock_prices).diff()
def realized_volatility(series_log_return):
return np.sqrt(np.sum(series_log_return**2))
def compute_wap(book_pd):
wap = (book_pd['bid_price1'] * book_pd['ask_size1'] + book_pd['ask_price1'] * book_pd['bid_size1']) / (book_pd['bid_size1']+ book_pd['ask_size1'])
return wap
def realized_volatility_from_book_pd(book_stock_time):
wap = compute_wap(book_stock_time)
returns = log_return(wap)
volatility = realized_volatility(returns)
return volatility
def realized_volatility_per_time_id(file_path, prediction_column_name):
df_book_data = pd.read_parquet(file_path)
# Estimate stock price per time point
df_book_data['wap'] = compute_wap(df_book_data)
# Compute log return from wap values per time_id
df_book_data['log_return'] = df_book_data.groupby(['time_id'])['wap'].apply(log_return)
df_book_data = df_book_data[~df_book_data['log_return'].isnull()]
# Compute the square root of the sum of log return squared to get realized volatility
df_realized_vol_per_stock = pd.DataFrame(df_book_data.groupby(['time_id'])['log_return'].agg(realized_volatility)).reset_index()
# Formatting
df_realized_vol_per_stock = df_realized_vol_per_stock.rename(columns = {'log_return':prediction_column_name})
stock_id = file_path.split('=')[1]
df_realized_vol_per_stock['row_id'] = df_realized_vol_per_stock['time_id'].apply(lambda x:f'{stock_id}-{x}')
return df_realized_vol_per_stock[['row_id',prediction_column_name]]
def past_realized_volatility_per_stock(list_file,prediction_column_name):
df_past_realized = pd.DataFrame()
for file in list_file:
df_past_realized = pd.concat([df_past_realized,
realized_volatility_per_time_id(file,prediction_column_name)])
return df_past_realized
def stupidForestPrediction(book_path_train,prediction_column_name,train_targets_pd,book_path_test):
naive_predictions_train = past_realized_volatility_per_stock(list_file=book_path_train,prediction_column_name=prediction_column_name)
df_joined_train = train_targets_pd.merge(naive_predictions_train[['row_id','pred']], on = ['row_id'], how = 'left')
X = np.array(df_joined_train['pred']).reshape(-1,1)
y = np.array(df_joined_train['target']).reshape(-1,)
regr = RandomForestRegressor(random_state=0)
regr.fit(X, y)
naive_predictions_test = past_realized_volatility_per_stock(list_file=book_path_test,prediction_column_name='target')
yhat = regr.predict(np.array(naive_predictions_test['target']).reshape(-1,1))
updated_predictions = naive_predictions_test.copy()
updated_predictions['target'] = yhat
return updated_predictions
def garch_fit_predict_volatility(returns_series, N=10000):
model = arch_model(returns_series * N, p=1, q=1)
model_fit = model.fit(update_freq=0, disp='off')
yhat = model_fit.forecast(horizon=600, reindex=False)
pred_volatility = np.sqrt(np.sum(yhat.variance.values)) / N
return pred_volatility
def garch_volatility_per_time_id(file_path, prediction_column_name):
# read the data
df_book_data = pd.read_parquet(file_path)
# calculate the midprice (not the WAP)
df_book_data['midprice'] =(df_book_data['bid_price1'] + df_book_data['ask_price1'])/2
# leave only WAP for now
df_book_data = df_book_data[['time_id', 'seconds_in_bucket', 'midprice']]
df_book_data = df_book_data.sort_values('seconds_in_bucket')
# make the book updates evenly spaced
df_book_data_evenly = pd.DataFrame({'time_id':np.repeat(df_book_data['time_id'].unique(), 600),
'second':np.tile(range(0,600), df_book_data['time_id'].nunique())})
df_book_data_evenly['second'] = df_book_data_evenly['second'].astype(np.int16)
df_book_data_evenly = df_book_data_evenly.sort_values('second')
df_book_data_evenly = pd.merge_asof(df_book_data_evenly,
df_book_data,
left_on='second',right_on='seconds_in_bucket',
by = 'time_id')
# Ordering for easier use
df_book_data_evenly = df_book_data_evenly[['time_id', 'second', 'midprice']]
df_book_data_evenly = df_book_data_evenly.sort_values(['time_id','second']).reset_index(drop=True)
# calculate log returns
df_book_data_evenly['log_return'] = df_book_data_evenly.groupby(['time_id'])['midprice'].apply(log_return)
df_book_data_evenly = df_book_data_evenly[~df_book_data_evenly['log_return'].isnull()]
# fit GARCH(1, 1) and predict the volatility of returns
df_garch_vol_per_stock = \
pd.DataFrame(df_book_data_evenly.groupby(['time_id'])['log_return'].agg(garch_fit_predict_volatility)).reset_index()
df_garch_vol_per_stock = df_garch_vol_per_stock.rename(columns = {'log_return':prediction_column_name})
# add row_id column to the data
stock_id = file_path.split('=')[1]
df_garch_vol_per_stock['row_id'] = df_garch_vol_per_stock['time_id'].apply(lambda x:f'{stock_id}-{x}')
# return the result
return df_garch_vol_per_stock[['row_id', prediction_column_name]]
def garch_volatility_per_stock(list_file, prediction_column_name):
df_garch_predicted = pd.DataFrame()
for file in list_file:
df_garch_predicted = pd.concat([df_garch_predicted,
garch_volatility_per_time_id(file, prediction_column_name)])
return df_garch_predicted
def entropy_from_book(book_stock_time,last_min):
if last_min < 10:
book_stock_time = book_stock_time[book_stock_time['seconds_in_bucket'] >= (600-last_min*60)]
if book_stock_time.empty == True or book_stock_time.shape[0] < 3:
return 0
wap = compute_wap(book_stock_time)
t_init = book_stock_time['seconds_in_bucket']
t_new = np.arange(np.min(t_init),np.max(t_init))
# Closest neighbour interpolation (no changes in wap between lines)
nearest = interp1d(t_init, wap, kind='nearest')
resampled_wap = nearest(t_new)
# Compute sample entropy
# sampleEntropy = nolds.sampen(resampled_wap)
sampleEntropy = sampen(resampled_wap)
return sampleEntropy
def entropy_from_wap(wap,seconds,last_seconds):
if last_seconds < 600:
idx = np.where(seconds >= last_seconds)[0]
if len(idx) < 3:
return 0
else:
wap = wap[idx]
seconds = seconds[idx]
# Closest neighbour interpolation (no changes in wap between lines)
t_new = np.arange(np.min(seconds),np.max(seconds))
nearest = interp1d(seconds, wap, kind='nearest')
resampled_wap = nearest(t_new)
# Compute sample entropy
# sampleEntropy = nolds.sampen(resampled_wap)
sampleEntropy = sampen(resampled_wap)
# sampleEntropy = ApEn_new(resampled_wap,3,0.001)
return sampleEntropy
def linearFit(book_stock_time, last_min):
if last_min < 10:
book_stock_time = book_stock_time[book_stock_time['seconds_in_bucket'] >= (600-last_min*60)]
if book_stock_time.empty == True or book_stock_time.shape[0] < 2:
return 0
wap = np.array(compute_wap(book_stock_time))
t_init = book_stock_time['seconds_in_bucket']
return (wap[-1] - wap[0])/(np.max(t_init) - np.min(t_init))
def wapStat(book_stock_time, last_min):
if last_min < 10:
book_stock_time = book_stock_time[book_stock_time['seconds_in_bucket'] >= (600-last_min*60)]
if book_stock_time.empty == True or book_stock_time.shape[0] < 2:
return 0
wap = compute_wap(book_stock_time)
t_init = book_stock_time['seconds_in_bucket']
t_new = np.arange(np.min(t_init),np.max(t_init))
# Closest neighbour interpolation (no changes in wap between lines)
nearest = interp1d(t_init, wap, kind='nearest')
resampled_wap = nearest(t_new)
return np.std(resampled_wap)
def entropy_Prediction(book_path_train,prediction_column_name,train_targets_pd,book_path_test,all_stocks_ids,test_file):
# Compute features
book_features_encoded_test = computeFeatures_1(book_path_test,'test',test_file,all_stocks_ids)
book_features_encoded_train = computeFeatures_1(book_path_train,'train',train_targets_pd,all_stocks_ids)
X = book_features_encoded_train.drop(['row_id','target','stock_id'],axis=1)
y = book_features_encoded_train['target']
# Modeling
catboost_default = CatBoostRegressor(verbose=0)
catboost_default.fit(X,y)
# Predict
X_test = book_features_encoded_test.drop(['row_id','stock_id'],axis=1)
yhat = catboost_default.predict(X_test)
# Formatting
yhat_pd = pd.DataFrame(yhat,columns=['target'])
predictions = pd.concat([test_file,yhat_pd],axis=1)
return predictions
def computeFeatures_1(book_path,prediction_column_name,train_targets_pd,all_stocks_ids):
book_all_features = pd.DataFrame()
encoder = np.eye(len(all_stocks_ids))
stocks_id_list, row_id_list = [], []
volatility_list, entropy2_list = [], []
linearFit_list, linearFit5_list, linearFit2_list = [], [], []
wap_std_list, wap_std5_list, wap_std2_list = [], [], []
for file in book_path:
start = time.time()
book_stock = pd.read_parquet(file)
stock_id = file.split('=')[1]
print('stock id computing = ' + str(stock_id))
stock_time_ids = book_stock['time_id'].unique()
for time_id in stock_time_ids:
# Access book data at this time + stock
book_stock_time = book_stock[book_stock['time_id'] == time_id]
# Create feature matrix
stocks_id_list.append(stock_id)
row_id_list.append(str(f'{stock_id}-{time_id}'))
volatility_list.append(realized_volatility_from_book_pd(book_stock_time=book_stock_time))
entropy2_list.append(entropy_from_book(book_stock_time=book_stock_time,last_min=2))
linearFit_list.append(linearFit(book_stock_time=book_stock_time,last_min=10))
linearFit5_list.append(linearFit(book_stock_time=book_stock_time,last_min=5))
linearFit2_list.append(linearFit(book_stock_time=book_stock_time,last_min=2))
wap_std_list.append(wapStat(book_stock_time=book_stock_time,last_min=10))
wap_std5_list.append(wapStat(book_stock_time=book_stock_time,last_min=5))
wap_std2_list.append(wapStat(book_stock_time=book_stock_time,last_min=2))
print('Computing one stock entropy took', time.time() - start, 'seconds for stock ', stock_id)
# Merge targets
stocks_id_pd = pd.DataFrame(stocks_id_list,columns=['stock_id'])
row_id_pd = pd.DataFrame(row_id_list,columns=['row_id'])
volatility_pd = pd.DataFrame(volatility_list,columns=['volatility'])
entropy2_pd = pd.DataFrame(entropy2_list,columns=['entropy2'])
linearFit_pd = pd.DataFrame(linearFit_list,columns=['linearFit_coef'])
linearFit5_pd = pd.DataFrame(linearFit5_list,columns=['linearFit_coef5'])
linearFit2_pd = pd.DataFrame(linearFit2_list,columns=['linearFit_coef2'])
wap_std_pd = pd.DataFrame(wap_std_list,columns=['wap_std'])
wap_std5_pd = pd.DataFrame(wap_std5_list,columns=['wap_std5'])
wap_std2_pd = pd.DataFrame(wap_std2_list,columns=['wap_std2'])
book_all_features = pd.concat([stocks_id_pd,row_id_pd,volatility_pd,entropy2_pd,linearFit_pd,linearFit5_pd,linearFit2_pd,
wap_std_pd,wap_std5_pd,wap_std2_pd],axis=1)
# This line makes sure the predictions are aligned with the row_id in the submission file
book_all_features = train_targets_pd.merge(book_all_features, on = ['row_id'])
# Add encoded stock
encoded = list()
for i in range(book_all_features.shape[0]):
stock_id = book_all_features['stock_id'][i]
encoded_stock = encoder[np.where(all_stocks_ids == int(stock_id))[0],:]
encoded.append(encoded_stock)
encoded_pd = pd.DataFrame(np.array(encoded).reshape(book_all_features.shape[0],np.array(all_stocks_ids).shape[0]))
book_all_features_encoded = pd.concat([book_all_features, encoded_pd],axis=1)
return book_all_features_encoded
def calc_wap(df):
return (df['bid_price1'] * df['ask_size1'] + df['ask_price1'] * df['bid_size1']) / (df['bid_size1'] + df['ask_size1'])
def calc_wap2(df):
return (df['bid_price2'] * df['ask_size2'] + df['ask_price2'] * df['bid_size2']) / (df['bid_size2'] + df['ask_size2'])
def calc_wap3(df):
return (df['bid_price2'] * df['bid_size2'] + df['ask_price2'] * df['ask_size2']) / (df['bid_size2'] + df['ask_size2'])
def calc_wap4(df):
return (df['bid_price1'] * df['bid_size1'] + df['ask_price1'] * df['ask_size1']) / (df['bid_size1'] + df['ask_size1'])
def mid_price(df):
return df['bid_price1'] /2 + df['ask_price1'] / 2
def calc_rv_from_wap_numba(values, index):
log_return = np.diff(np.log(values))
realized_vol = np.sqrt(np.sum(np.square(log_return[1:])))
return realized_vol
def load_book_data_by_id(stock_id,datapath,train_test):
file_to_read = os.path.join(datapath,'book_' + str(train_test) + str('.parquet'),'stock_id=' + str(stock_id))
df = pd.read_parquet(file_to_read)
return df
def load_trades_data_by_id(stock_id,datapath,train_test):
file_to_read = os.path.join(datapath,'trade_' + str(train_test) + str('.parquet'),'stock_id=' + str(stock_id))
df = pd.read_parquet(file_to_read)
return df
def entropy_from_df(df):
if df.shape[0] < 3:
return 0
t_init = df['seconds_in_bucket']
t_new = np.arange(np.min(t_init),np.max(t_init))
# Closest neighbour interpolation (no changes in wap between lines)
nearest = interp1d(t_init, df['wap'], kind='nearest')
resampled_wap = nearest(t_new)
# Compute sample entropy
# sampleEntropy = nolds.sampen(resampled_wap)
sampleEntropy = sampen(resampled_wap)
return sampleEntropy
def entropy_from_df2(df):
if df.shape[0] < 3:
return 0
t_init = df['seconds_in_bucket']
t_new = np.arange(np.min(t_init),np.max(t_init))
# Closest neighbour interpolation (no changes in wap between lines)
nearest = interp1d(t_init, df['wap2'], kind='nearest')
resampled_wap = nearest(t_new)
# Compute sample entropy
# sampleEntropy = nolds.sampen(resampled_wap)
sampleEntropy = sampen(resampled_wap)
return sampleEntropy
def entropy_from_df3(df):
if df.shape[0] < 3:
return 0
t_init = df['seconds_in_bucket']
t_new = np.arange(np.min(t_init),np.max(t_init))
# Closest neighbour interpolation (no changes in wap between lines)
nearest = interp1d(t_init, df['wap3'], kind='nearest')
resampled_wap = nearest(t_new)
# Compute sample entropy
sampleEntropy = sampen(resampled_wap)
return sampleEntropy
def financial_metrics(df):
wap_imbalance = np.mean(df['wap'] - df['wap2'])
price_spread = np.mean((df['ask_price1'] - df['bid_price1']) / ((df['ask_price1'] + df['bid_price1'])/2))
bid_spread = np.mean(df['bid_price1'] - df['bid_price2'])
ask_spread = np.mean(df['ask_price1'] - df['ask_price2']) # Abs to take
total_volume = np.mean((df['ask_size1'] + df['ask_size2']) + (df['bid_size1'] + df['bid_size2']))
volume_imbalance = np.mean(abs((df['ask_size1'] + df['ask_size2']) - (df['bid_size1'] + df['bid_size2'])))
return [wap_imbalance,price_spread,bid_spread,ask_spread,total_volume,volume_imbalance]
def financial_metrics_2(df):
wap_imbalance = df['wap'] - df['wap2']
price_spread = (df['ask_price1'] - df['bid_price1']) / ((df['ask_price1'] + df['bid_price1'])/2)
bid_spread = df['bid_price1'] - df['bid_price2']
ask_spread = df['ask_price1'] - df['ask_price2'] # Abs to take
total_volume = (df['ask_size1'] + df['ask_size2']) + (df['bid_size1'] + df['bid_size2'])
volume_imbalance = abs((df['ask_size1'] + df['ask_size2']) - (df['bid_size1'] + df['bid_size2']))
# New features here
wap_imbalance_mean = np.mean(wap_imbalance)
wap_imbalance_sum = np.sum(wap_imbalance)
wap_imbalance_std = np.std(wap_imbalance)
wap_imbalance_max = np.max(wap_imbalance)
wap_imbalance_min = np.min(wap_imbalance)
price_spread_mean = np.mean(price_spread)
price_spread_sum = np.sum(price_spread)
price_spread_std = np.std(price_spread)
price_spread_max = np.max(price_spread)
price_spread_min = np.min(price_spread)
bid_spread_mean = np.mean(bid_spread)
bid_spread_sum = np.sum(bid_spread)
bid_spread_std = np.std(bid_spread)
bid_spread_max = np.max(bid_spread)
bid_spread_min = np.min(bid_spread)
ask_spread_mean = np.mean(ask_spread)
ask_spread_sum = np.sum(ask_spread)
ask_spread_std = np.std(ask_spread)
ask_spread_max = np.max(ask_spread)
ask_spread_min = np.min(ask_spread)
total_volume_mean = np.mean(total_volume)
total_volume_sum = np.sum(total_volume)
total_volume_std = np.std(total_volume)
total_volume_max = np.max(total_volume)
total_volume_min = np.min(total_volume)
volume_imbalance_mean = np.mean(volume_imbalance)
volume_imbalance_sum = np.sum(volume_imbalance)
volume_imbalance_std = np.std(volume_imbalance)
volume_imbalance_max = np.max(volume_imbalance)
volume_imbalance_min = np.min(volume_imbalance)
return [wap_imbalance_mean,price_spread_mean,bid_spread_mean,ask_spread_mean,total_volume_mean,volume_imbalance_mean, wap_imbalance_sum,price_spread_sum,bid_spread_sum,ask_spread_sum,total_volume_sum,volume_imbalance_sum, wap_imbalance_std,price_spread_std,bid_spread_std,ask_spread_std,total_volume_std,volume_imbalance_std, wap_imbalance_max,price_spread_max,bid_spread_max,ask_spread_max,total_volume_max,volume_imbalance_max, wap_imbalance_min,price_spread_min,bid_spread_min,ask_spread_min,total_volume_min,volume_imbalance_min]
def other_metrics(df):
if df.shape[0] < 2:
linearFit = 0
linearFit2 = 0
linearFit3 = 0
std_1 = 0
std_2 = 0
std_3 = 0
else:
linearFit = (df['wap'].iloc[-1] - df['wap'].iloc[0]) / ((np.max(df['seconds_in_bucket']) - np.min(df['seconds_in_bucket'])))
linearFit2 = (df['wap2'].iloc[-1] - df['wap2'].iloc[0]) / ((np.max(df['seconds_in_bucket']) - np.min(df['seconds_in_bucket'])))
linearFit3 = (df['wap3'].iloc[-1] - df['wap3'].iloc[0]) / ((np.max(df['seconds_in_bucket']) - np.min(df['seconds_in_bucket'])))
# Resampling
t_init = df['seconds_in_bucket']
t_new = np.arange(np.min(t_init),np.max(t_init))
# Closest neighbour interpolation (no changes in wap between lines)
nearest = interp1d(t_init, df['wap'], kind='nearest')
nearest2 = interp1d(t_init, df['wap2'], kind='nearest')
nearest3 = interp1d(t_init, df['wap3'], kind='nearest')
std_1 = np.std(nearest(t_new))
std_2 = np.std(nearest2(t_new))
std_3 = np.std(nearest3(t_new))
return [linearFit, linearFit2, linearFit3, std_1, std_2, std_3]
def load_book_data_by_id_kaggle(stock_id,train_test):
df = pd.read_parquet(f'../input/optiver-realized-volatility-prediction/book_{train_test}.parquet/stock_id={stock_id}')
return df
def load_trades_data_by_id_kaggle(stock_id,train_test):
df = pd.read_parquet(f'../input/optiver-realized-volatility-prediction/trade_{train_test}.parquet/stock_id={stock_id}')
return df
def computeFeatures_wEntropy(machine, dataset, all_stocks_ids, datapath):
list_rv, list_rv2, list_rv3 = [], [], []
list_ent, list_fin, list_fin2 = [], [], []
list_others, list_others2, list_others3 = [], [], []
for stock_id in range(127):
start = time.time()
if machine == 'local':
try:
book_stock = load_book_data_by_id(stock_id,datapath,dataset)
except:
continue
elif machine == 'kaggle':
try:
book_stock = load_book_data_by_id_kaggle(stock_id,dataset)
except:
continue
# Useful
all_time_ids_byStock = book_stock['time_id'].unique()
# Calculate wap for the book
book_stock['wap'] = calc_wap(book_stock)
book_stock['wap2'] = calc_wap2(book_stock)
book_stock['wap3'] = calc_wap3(book_stock)
# Calculate realized volatility
df_sub = book_stock.groupby('time_id')['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2 = book_stock.groupby('time_id')['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3 = book_stock.groupby('time_id')['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub['time_id']]
df_sub = pd.concat([df_sub,df_sub2['wap2'],df_sub3['wap3']],axis=1)
df_sub = df_sub.rename(columns={'time_id':'row_id','wap': 'rv', 'wap2': 'rv2', 'wap3': 'rv3'})
# Calculate realized volatility last 5 min
isEmpty = book_stock.query(f'seconds_in_bucket >= 300').empty
if isEmpty == False:
df_sub_5 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id'])['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2_5 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id'])['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3_5 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id'])['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub_5['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_5['time_id']]
df_sub_5 = pd.concat([df_sub_5,df_sub2_5['wap2'],df_sub3_5['wap3']],axis=1)
df_sub_5 = df_sub_5.rename(columns={'time_id':'row_id','wap': 'rv_5', 'wap2': 'rv2_5', 'wap3': 'rv3_5'})
else: # 0 volatility
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
zero_rv = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv_5'])
zero_rv2 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv2_5'])
zero_rv3 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv3_5'])
df_sub_5 = pd.concat([times_pd,zero_rv,zero_rv2,zero_rv3],axis=1)
# Calculate realized volatility last 2 min
isEmpty = book_stock.query(f'seconds_in_bucket >= 480').empty
if isEmpty == False:
df_sub_2 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id'])['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2_2 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id'])['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3_2 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id'])['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub_2['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_2['time_id']]
df_sub_2 = pd.concat([df_sub_2,df_sub2_2['wap2'],df_sub3_2['wap3']],axis=1)
df_sub_2 = df_sub_2.rename(columns={'time_id':'row_id','wap': 'rv_2', 'wap2': 'rv2_2', 'wap3': 'rv3_2'})
else: # 0 volatility
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
zero_rv = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv_2'])
zero_rv2 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv2_2'])
zero_rv3 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv3_2'])
df_sub_2 = pd.concat([times_pd,zero_rv,zero_rv2,zero_rv3],axis=1)
list_rv.append(df_sub)
list_rv2.append(df_sub_5)
list_rv3.append(df_sub_2)
# Calculate other financial metrics from book
df_sub_book_feats = book_stock.groupby(['time_id']).apply(financial_metrics).to_frame().reset_index()
df_sub_book_feats = df_sub_book_feats.rename(columns={0:'embedding'})
df_sub_book_feats[['wap_imbalance','price_spread','bid_spread','ask_spread','total_vol','vol_imbalance']] = pd.DataFrame(df_sub_book_feats.embedding.tolist(), index=df_sub_book_feats.index)
df_sub_book_feats['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_book_feats['time_id']]
df_sub_book_feats = df_sub_book_feats.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
isEmpty = book_stock.query(f'seconds_in_bucket >= 300').empty
if isEmpty == False:
df_sub_book_feats5 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id']).apply(financial_metrics).to_frame().reset_index()
df_sub_book_feats5 = df_sub_book_feats5.rename(columns={0:'embedding'})
df_sub_book_feats5[['wap_imbalance5','price_spread5','bid_spread5','ask_spread5','total_vol5','vol_imbalance5']] = pd.DataFrame(df_sub_book_feats5.embedding.tolist(), index=df_sub_book_feats5.index)
df_sub_book_feats5['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_book_feats5['time_id']]
df_sub_book_feats5 = df_sub_book_feats5.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
else:
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
temp = pd.DataFrame([0],columns=['wap_imbalance5'])
temp2 = pd.DataFrame([0],columns=['price_spread5'])
temp3 = pd.DataFrame([0],columns=['bid_spread5'])
temp4 = pd.DataFrame([0],columns=['ask_spread5'])
temp5 = pd.DataFrame([0],columns=['total_vol5'])
temp6 = pd.DataFrame([0],columns=['vol_imbalance5'])
df_sub_book_feats5 = pd.concat([times_pd,temp,temp2,temp3,temp4,temp5,temp6],axis=1)
list_fin.append(df_sub_book_feats)
list_fin2.append(df_sub_book_feats5)
# Compute entropy
isEmpty = book_stock.query(f'seconds_in_bucket >= 480').empty
if isEmpty == False:
df_ent = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id']).apply(entropy_from_df).to_frame().reset_index().fillna(0)
df_ent2 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id']).apply(entropy_from_df2).to_frame().reset_index().fillna(0)
df_ent3 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id']).apply(entropy_from_df3).to_frame().reset_index().fillna(0)
df_ent['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_ent['time_id']]
df_ent = df_ent.rename(columns={'time_id':'row_id',0:'entropy'})
df_ent2 = df_ent2.rename(columns={0:'entropy2'}).drop(['time_id'],axis=1)
df_ent3 = df_ent3.rename(columns={0:'entropy3'}).drop(['time_id'],axis=1)
df_ent = pd.concat([df_ent,df_ent2,df_ent3],axis=1)
else:
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
temp = | pd.DataFrame([0],columns=['entropy']) | pandas.DataFrame |
import json
import pathlib
import altair as alt
import pandas as pd
import rbo
import streamlit as st
from tinydb import TinyDB, Query
DL = "https://github.com/The57thPick/nba/releases/download/{year}-media-awards/{year}.zip"
DB = TinyDB("db/db.json")
YEARS = [
2015,
2016,
2017,
2018,
2019,
2020,
2021,
]
Q = Query()
def truncate(df, col, limit):
if limit >= len(df):
limit = len(df) - 1
return df.sort_values(col)[0:limit]
def award_box(year: str, award: str):
""" """
with open("db/ranks.json") as f:
ranks = json.load(f)
data = ranks[award]
return | pd.DataFrame(data, columns=["stat", "rank", "player", "year"]) | pandas.DataFrame |
"""Tests for the sdv.constraints.tabular module."""
import pandas as pd
from sdv.constraints.tabular import (
ColumnFormula, CustomConstraint, GreaterThan, UniqueCombinations)
def dummy_transform():
pass
def dummy_reverse_transform():
pass
def dummy_is_valid():
pass
class TestCustomConstraint():
def test___init__(self):
"""Test the ``CustomConstraint.__init__`` method.
The ``transform``, ``reverse_transform`` and ``is_valid`` methods
should be replaced by the given ones, importing them if necessary.
Setup:
- Create dummy functions (created above this class).
Input:
- dummy transform and revert_transform + is_valid FQN
Output:
- Instance with all the methods replaced by the dummy versions.
"""
is_valid_fqn = __name__ + '.dummy_is_valid'
# Run
instance = CustomConstraint(
transform=dummy_transform,
reverse_transform=dummy_reverse_transform,
is_valid=is_valid_fqn
)
# Assert
assert instance.transform == dummy_transform
assert instance.reverse_transform == dummy_reverse_transform
assert instance.is_valid == dummy_is_valid
class TestUniqueCombinations():
def test___init__(self):
"""Test the ``UniqueCombinations.__init__`` method.
It is expected to create a new Constraint instance and receiving the names of
the columns that need to produce unique combinations.
Side effects:
- instance._colums == columns
"""
# Setup
columns = ['b', 'c']
# Run
instance = UniqueCombinations(columns=columns)
# Assert
assert instance._columns == columns
def test__valid_separator_valid(self):
"""Test ``_valid_separator`` for a valid separator.
If the separator and data are valid, result is ``True``.
Input:
- Table data (pandas.DataFrame)
Output:
- True (bool).
"""
# Setup
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance._separator = '#'
# Run
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
is_valid = instance._valid_separator(table_data)
# Assert
assert is_valid
def test__valid_separator_non_valid_separator_contained(self):
"""Test ``_valid_separator`` passing a column that contains the separator.
If any of the columns contains the separator string, result is ``False``.
Input:
- Table data (pandas.DataFrame) with a column that contains the separator string ('#')
Output:
- False (bool).
"""
# Setup
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance._separator = '#'
# Run
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', '#', 'f'],
'c': ['g', 'h', 'i']
})
is_valid = instance._valid_separator(table_data)
# Assert
assert not is_valid
def test__valid_separator_non_valid_name_joined_exists(self):
"""Test ``_valid_separator`` passing a column whose name is obtained after joining
the column names using the separator.
If the column name obtained after joining the column names using the separator
already exists, result is ``False``.
Input:
- Table data (pandas.DataFrame) with a column name that will be obtained by joining
the column names and the separator.
Output:
- False (bool).
"""
# Setup
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance._separator = '#'
# Run
table_data = pd.DataFrame({
'b#c': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
is_valid = instance._valid_separator(table_data)
# Assert
assert not is_valid
def test_fit(self):
"""Test the ``UniqueCombinations.fit`` method.
The ``UniqueCombinations.fit`` method is expected to:
- Call ``UniqueCombinations._valid_separator``.
- Find a valid separator for the data and generate the joint column name.
Input:
- Table data (pandas.DataFrame)
"""
# Setup
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
# Run
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
instance.fit(table_data)
# Asserts
expected_combinations = set(table_data[columns].itertuples(index=False))
assert instance._separator == '#'
assert instance._joint_column == 'b#c'
assert instance._combinations == expected_combinations
def test_is_valid_true(self):
"""Test the ``UniqueCombinations.is_valid`` method.
If the input data satisfies the constraint, result is a series of ``True`` values.
Input:
- Table data (pandas.DataFrame), satisfying the constraint.
Output:
- Series of ``True`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, True])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_false(self):
"""Test the ``UniqueCombinations.is_valid`` method.
If the input data doesn't satisfy the constraint, result is a series of ``False`` values.
Input:
- Table data (pandas.DataFrame), which does not satisfy the constraint.
Output:
- Series of ``False`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
incorrect_table = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['D', 'E', 'F'],
'c': ['g', 'h', 'i']
})
out = instance.is_valid(incorrect_table)
# Assert
expected_out = pd.Series([False, False, False])
pd.testing.assert_series_equal(expected_out, out)
def test_transform(self):
"""Test the ``UniqueCombinations.transform`` method.
It is expected to return a Table data with the columns concatenated by the separator.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data transformed, with the columns concatenated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b#c': ['d#g', 'e#h', 'f#i']
})
pd.testing.assert_frame_equal(expected_out, out)
def reverse_transform(self):
"""Test the ``UniqueCombinations.reverse_transform`` method.
It is expected to return the original data separating the concatenated columns.
Input:
- Table data transformed (pandas.DataFrame)
Output:
- Original table data, with the concatenated columns separated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
transformed_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b#c': ['d#g', 'e#h', 'f#i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(transformed_data)
# Run
out = instance.reverse_transform(transformed_data)
# Assert
expected_out = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
pd.testing.assert_frame_equal(expected_out, out)
class TestGreaterThan():
def test___init___strict_false(self):
"""Test the ``GreaterThan.__init__`` method.
It is expected to create a new Constraint instance and receiving ``low`` and ``high``,
names of the columns that contain the low and high value.
Input:
- low = 'a'
- high = 'b'
Side effects:
- instance._low == 'a'
- instance._high == 'b'
- instance._strict == False
"""
# Run
instance = GreaterThan(low='a', high='b')
# Asserts
assert instance._low == 'a'
assert instance._high == 'b'
assert instance._strict is False
def test___init___strict_true(self):
"""Test the ``GreaterThan.__init__`` method.
It is expected to create a new Constraint instance and receiving ``low`` and ``high``,
names of the columns that contain the low and high value. It also receives ``strict``,
a bool that indicates the comparison of the values should be strict.
Input:
- low = 'a'
- high = 'b'
- strict = True
Side effects:
- instance._low == 'a'
- instance._high == 'b'
- instance._stric == True
"""
# Run
instance = GreaterThan(low='a', high='b', strict=True)
# Asserts
assert instance._low == 'a'
assert instance._high == 'b'
assert instance._strict is True
def test_fit(self):
"""Test the ``GreaterThan.fit`` method.
It is expected to return the dtype of the ``high`` column.
Input:
- Table data (pandas.DataFrame)
Output:
- dtype of the ``high`` column.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
instance.fit(table_data)
# Asserts
expected = table_data['b'].dtype
assert instance._dtype == expected
def test_is_valid_true_strict(self):
"""Test the ``GreaterThan.is_valid`` method when the column values are valid
and the comparison is strict.
If the columns satisfy the constraint, result is a series of ``True`` values.
Input:
- Table data, where the values of the ``low`` column are lower
than the values of the ``high`` column (pandas.DataFrame)
Output:
- Series of ``True`` values (pandas.Series)
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, True])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_false_strict(self):
"""Test the ``GreaterThan.is_valid`` method when the column values are not valid
and the comparison is strict.
If the columns do not satisfy the costraint, result is a series of ``False`` values.
Input:
- Table data, where the values of the ``low`` column are higher or equal
than the values of the ``high`` column (pandas.DataFrame)
Output:
- Series of ``False`` values (pandas.Series)
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [1, 1, 1],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([False, False, False])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_true_not_strict(self):
"""Test the ``GreaterThan.is_valid`` method when the column values are valid
and the comparison is not strict.
If the columns satisfy the constraint, result is a series of ``True`` values.
Input:
- Table data, where the values of the ``low`` column are lower or equal
than the values of the ``high`` column (pandas.DataFrame)
Output:
- Series of ``True`` values (pandas.Series)
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 3],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, True])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_false_not_strict(self):
"""Test the ``GreaterThan.is_valid`` method when the column values are not valid
and the comparison is not strict.
If the columns do not satisfy the costraint, result is a series of ``False`` values.
Input:
- Table data, where the values of the ``low`` column are higher
than the values of the ``high`` column (pandas.DataFrame)
Output:
- Series of ``True`` values (pandas.Series)
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [0, 1, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([False, False, False])
pd.testing.assert_series_equal(expected_out, out)
def test_transform(self):
"""Test the ``GreaterThan.transform`` method.
The ``GreaterThan.transform`` method is expected to:
- Transform the original table data.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data transformed (pandas.DataFrame)
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [1.3862944, 1.3862944, 1.3862944]
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform(self):
"""Test the ``GreaterThan.reverse_transform`` method.
The ``GreaterThan.reverse_transform`` method is expected to:
- Return the original table data.
Input:
- Table data transformed (pandas.DataFrame)
Output:
- Table data (pandas.DataFrame)
Side effects:
- Since ``reverse_transform`` uses the class variable ``_dtype``, the ``fit`` method
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
instance = GreaterThan(low='a', high='b', strict=True)
instance.fit(table_data)
# Run
out = instance.reverse_transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [55, 149, 405],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def new_column(data):
"""Formula to be used for the ``TestColumnFormula`` class."""
return data['a'] + data['b']
class TestColumnFormula():
def test___init__(self):
"""Test the ``ColumnFormula.__init__`` method.
It is expected to create a new Constraint instance
and import the formula to use for the computation.
Input:
- column = 'c'
- formula = new_column
"""
# Setup
column = 'c'
# Run
instance = ColumnFormula(column=column, formula=new_column)
# Assert
assert instance._column == column
assert instance._formula == new_column
def test_is_valid_valid(self):
"""Test the ``ColumnFormula.is_valid`` method for a valid data.
If the data fulfills the formula, result is a series of ``True`` values.
Input:
- Table data fulfilling the formula (pandas.DataFrame)
Output:
- Series of ``True`` values (pandas.Series)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [5, 7, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, True])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_non_valid(self):
"""Test the ``ColumnFormula.is_valid`` method for a non-valid data.
If the data does not fulfill the formula, result is a series of ``False`` values.
Input:
- Table data not fulfilling the formula (pandas.DataFrame)
Output:
- Series of ``False`` values (pandas.Series)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [1, 2, 3]
})
instance = ColumnFormula(column=column, formula=new_column)
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([False, False, False])
| pd.testing.assert_series_equal(expected_out, out) | pandas.testing.assert_series_equal |
import pandas as pd
import numpy as np
import tensorflow as tf
import datetime
import pickle
import math
from create_features import Features
from binance import client
class Trader(client.Client):
"""
This class adds functionalities to perform trades in Binance.
It requires the api and secret key from binance.
"""
def __init__(self, api_key, secret_key):
super().__init__(api_key, secret_key)
def load_model(self, path):
"""
Loads a pre-trained tensorflow model
Args:
path: str. the model's path
returns:
model: tensorflow model
"""
model = tf.keras.models.load_model(path)
return model
def load_scaler(self, path):
"""
Loads a serialized sklearn scaler object. Normally
StandardScaler() or MinMaxScaler()
Args:
path: str. The scaler's path
returns:
scaler: sklearn scaler object
"""
with open(path, "rb") as f:
scaler = pickle.load(f)
return scaler
def save_scaler(self, path, scaler):
"""
Serializes a sklearn scaler object.
Args:
path: str. Where to save the scaler
scaler: the sklearn scaler object used to
transform the model's data
"""
with open(path, "wb") as f:
pickle.dump(scaler, f)
def timestamp2date(self, timestamp):
"""
converts the binance timestamp (miliseconds) to a
normal date
"""
correct_date = datetime.datetime.fromtimestamp(timestamp / 1000)
return correct_date
def format_df(self, df):
"""
Takes the raw klines from binance and adds them the correct
column names. It also keeps the most relevant columns
and transforms the string values to numeric values.
Args:
df: pandas DataFrame. It contains the raw klines from binance
returns:
df_copy: pandas DataFrame. It contains correct column names
and correct dtypes.
"""
df_copy = df.copy()
# These are the column names from binance api data
columns = [
"open_time",
"open",
"high",
"low",
"close",
"volume",
"close_time",
"quote_asset_volume",
"number_of_trades",
"taker_buy_base_asset_volume",
"taker_buy_quote_asset_volume",
"ignore",
]
df_copy.columns = columns
# Columns that we are going to keep
keep = [
"open_time",
"open",
"high",
"low",
"close",
"volume",
"number_of_trades",
]
df_copy = df_copy[keep]
# Change the timestamp values to real dates.
df_copy["open_time"] = df_copy.open_time.apply(self.timestamp2date)
df_copy["open_time"] = pd.to_datetime(df_copy["open_time"])
# Change dtypes
for column in keep[1:]:
df_copy[column] = pd.to_numeric(df_copy[column])
return df_copy
def add_features(self, df, emas, volume_emas, indicators=True, patterns=True):
"""
Creates new features using the Feature class.
We can add just technical indicators or candle
patterns or both.
Args:
df: pandas dataframe (columns: open_time, open, high,
low, close, volume, number_of_trades)
emas: list of int. With the different ema periods
volume_emas: list of int. With the different ema periods
indicators: bool
patterns: bool
returns:
df_copy: pandas dataframe. Contains the initial columns
plus the new
"""
# instantiate the features object
features = Features()
df_copy = df.copy()
if indicators:
df_copy = features.handle_dates(df_copy)
df_copy = features.add_rsi(df_copy)
df_copy = features.add_macd(df_copy)
df_copy = features.add_apo(df_copy)
df_copy = features.add_ema(df_copy, emas)
df_copy = features.add_volume_ema(df_copy, volume_emas)
df_copy = features.add_bbands(df_copy)
df_copy = features.add_psar(df_copy)
if patterns:
df_copy = features.three_inside_up_down(df_copy)
df_copy = features.three_line_strike(df_copy)
df_copy = features.three_stars_south(df_copy)
df_copy = features.advancing_white_soldiers(df_copy)
df_copy = features.belt_hold(df_copy)
df_copy = features.breakaway(df_copy)
df_copy = features.closing_marubozu(df_copy)
df_copy = features.counteratack(df_copy)
df_copy = features.doji_star(df_copy)
df_copy = features.dragonfly_doji(df_copy)
df_copy = features.engulfing(df_copy)
df_copy = features.gravestone_doji(df_copy)
df_copy = features.hammer(df_copy)
df_copy = features.hanging_man(df_copy)
df_copy = features.inverted_hammer(df_copy)
df_copy = features.matching_low(df_copy)
df_copy = features.morning_doji_star(df_copy)
df_copy = features.separating_lines(df_copy)
df_copy = features.shooting_star(df_copy)
df_copy = features.unique3river(df_copy)
# delete columns with na. Normally for emas calculations
df_copy = df_copy.dropna()
df_copy = df_copy.reset_index(drop=True)
return df_copy
def create_target(self, df, lag=1):
"""
Calculates the log returns using the requested lag.
It also creates a signal column that is used for
classification.
Args:
df: pandas dataframe
lag: int
returns:
df_copy: pandas dataframe. The initial dataframe
plus two new columns returns_lag, signal_lag
"""
df_copy = df.copy()
df_copy[f"returns_{lag}"] = np.log(df_copy.close / df_copy.close.shift(lag))
df_copy[f"signal_{lag}"] = np.where(df_copy[f"returns_{lag}"] > 0, 1, 0)
# Check for dropna
df_copy.dropna(inplace=True)
return df_copy
def create_splits(self, df, lag=1, pct_split=0.95, scaler=None):
"""
Creates the training and validation splits for training
the model.
Args:
df: pandas dataframe. Dataframe with the final columns
to train
lag: int. To retrieve the correct targets
pct_split: float. Train percent to keep
scaler: sklearn.preprocessing. A scaler to normalize the data
helpful for neural nets.
returns:
train: pandas dataframe. X_train data
test: pandas dataframe. X_valid data
train_targets: pandas dataframe. y_train data
test_targets: pandas dataframe. y_valid data
scaler: sklearn.preprocessing. Scaler used later for inverse transforme
the data.
"""
df_copy = df.copy()
# Firts separate the target
# also add open_time for backtesting
target_columns = [f"returns_{lag}", f"signal_{lag}", "open_time"]
targets = df_copy[target_columns]
df_copy.drop(target_columns, axis=1, inplace=True)
columns = df_copy.columns
split = int(len(df_copy) * pct_split)
train = df_copy.iloc[:split]
train_targets = targets.iloc[:split]
test = df_copy.iloc[split:]
test_targets = targets.iloc[split:]
if scaler:
train = scaler.fit_transform(train)
test = scaler.transform(test)
train = pd.DataFrame(train, columns=columns)
test = pd.DataFrame(test, columns=columns)
print(f"train shape: {train.shape}")
print(f"test shape: {test.shape}")
return train, test, train_targets, test_targets, scaler
def get_one_prediction_data(self, emas, volume_emas, num_candles=1, **kwargs):
"""
It gets the last LIMIT prices and adds the features. Then we return
the resulting dataframe to be scaled.
Args:
emas: list. a list of int with the periods for the emas
volume_emas: list. a list of int with the periods for the volume emas
num_candles: int. the number of candles or rows that we return
returns:
data: pandas DataFrame. The candles to make predictions. There are not scaled
date: pandas Series. The corresponding datetime for the data.
"""
candles = self.get_klines(**kwargs)
df = pd.DataFrame(candles)
df = self.format_df(df)
df = self.add_features(df, emas, volume_emas)
# Delete the last row because is the firts seconds of information
# of the candle that we want to predict
df.drop(df.tail(1).index, inplace=True)
# We remove open_time because it is not use for prediction
open_time = df["open_time"]
df.drop("open_time", axis=1, inplace=True)
data = df.tail(num_candles)
date = open_time.tail(num_candles)
return data, date
def model_data(
self, emas, volume_emas, lag, interval, start_date, scaler, symbol="BTCUSDT"
):
"""
It creates the datasets for training a ml model
Args:
emas: list. a list of int with the periods for the emas
volume_emas: list. a list of int with the periods for the volume emas
lag: int. the price to predict. If 1 it means the next candle
interval: str. klines interval
start_date: str.
scaler: sklearn scaler object. Normally StandardScaler or MinMaxScaler
symbol: str.
returns:
X_train: pandas dataframe. Features for training
X_valid: pandas dataframe. Features for validation
y_train: pandas dataframe. Targets for training
y_valid: pandas dataframe. Targets for validation
scaler: sklean scaler. Ready to transform new data
"""
candles = self.get_historical_klines(symbol, interval, start_date)
df = | pd.DataFrame(candles) | pandas.DataFrame |
#!C:\Users\RIchardC\Documents\digitizePlots\venv\Scripts\python.exe
# Create Lyman/Fitz style long flat Design Files from plain-text onset files
# EKK / June 2015
# Python 2/3 compatibile, depends on Pandas and Numpy/Scipy
from __future__ import print_function
from pandas import concat, read_csv
from argparse import ArgumentParser, FileType
from numpy import empty
def main(args):
runs_df = load_onsets(args.onsets_files, args)
print("Saving designfile (%d rows) to %s" % (runs_df.shape[0], args.out))
runs_df.to_csv(args.out, index=False)
def load_onsets(onsets_files, args):
"""Read onsets file and add metadata from their filenames.
Return one concatenated pandas dataframe with all trials as rows."""
runs = []
for i, fid in enumerate(onsets_files):
run = | read_csv(fid) | pandas.read_csv |
# *****************************************************************************
# © Copyright IBM Corp. 2018. All Rights Reserved.
#
# This program and the accompanying materials
# are made available under the terms of the Apache V2.0 license
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
#
# *****************************************************************************
"""
The Built In Functions module contains preinstalled functions
"""
import itertools as it
import datetime as dt
import importlib
import logging
import time
import numpy as np
import pandas as pd
import scipy as sp
from pyod.models.cblof import CBLOF
import numpy as np
import pandas as pd
import scipy as sp
from pyod.models.cblof import CBLOF
import ruptures as rpt
# for Spectral Analysis
from scipy import signal, fftpack
import skimage as ski
from skimage import util as skiutil # for nifty windowing
# for KMeans
from sklearn import ensemble
from sklearn import linear_model
from sklearn import metrics
from sklearn.covariance import MinCovDet
from sklearn.neighbors import (KernelDensity, LocalOutlierFactor)
from sklearn.pipeline import Pipeline, TransformerMixin
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import (StandardScaler, RobustScaler, MinMaxScaler,
minmax_scale, PolynomialFeatures)
from sklearn.utils import check_array
# for Matrix Profile
import stumpy
# for KDEAnomalyScorer
import statsmodels.api as sm
from statsmodels.nonparametric.kernel_density import KDEMultivariate
from statsmodels.tsa.arima.model import ARIMA
# EXCLUDED until we upgrade to statsmodels 0.12
#from statsmodels.tsa.forecasting.stl import STLForecast
from .base import (BaseTransformer, BaseRegressor, BaseEstimatorFunction, BaseSimpleAggregator)
from .bif import (AlertHighValue)
from .ui import (UISingle, UIMulti, UIMultiItem, UIFunctionOutSingle, UISingleItem, UIFunctionOutMulti)
# VAE
import torch
import torch.autograd
import torch.nn as nn
logger = logging.getLogger(__name__)
try:
# for gradient boosting
import lightgbm
except (AttributeError, ImportError):
logger.exception('')
logger.debug(f'Could not import lightgm package. Might have issues when using GBMRegressor catalog function')
PACKAGE_URL = 'git+https://github.com/ibm-watson-iot/functions.git@'
_IS_PREINSTALLED = True
Error_SmallWindowsize = 0.0001
Error_Generic = 0.0002
FrequencySplit = 0.3
DefaultWindowSize = 12
SmallEnergy = 1e-20
KMeans_normalizer = 1
Spectral_normalizer = 100 / 2.8
FFT_normalizer = 1
Saliency_normalizer = 1
Generalized_normalizer = 1 / 300
# from
# https://stackoverflow.com/questions/44790072/sliding-window-on-time-series-data
def view_as_windows1(temperature, length, step):
logger.info('VIEW ' + str(temperature.shape) + ' ' + str(length) + ' ' + str(step))
def moving_window(x, length, _step=1):
if type(step) != 'int' or _step < 1:
logger.info('MOVE ' + str(_step))
_step = 1
streams = it.tee(x, length)
return zip(*[it.islice(stream, i, None, _step) for stream, i in zip(streams, it.count(step=1))])
x_ = list(moving_window(temperature, length, step))
return np.asarray(x_)
def view_as_windows(temperature, length, step):
return skiutil.view_as_windows(temperature, window_shape=(length,), step=step)
def custom_resampler(array_like):
# initialize
if 'gap' not in dir():
gap = 0
if array_like.values.size > 0:
gap = 0
return 0
else:
gap += 1
return gap
def min_delta(df):
# minimal time delta for merging
if df is None:
return pd.Timedelta('5 seconds'), df
elif len(df.index.names) > 1:
df2 = df.reset_index(level=df.index.names[1:], drop=True)
else:
df2 = df
try:
mindelta = df2.index.to_series().diff().min()
except Exception as e:
logger.debug('Min Delta error: ' + str(e))
mindelta = pd.Timedelta('5 seconds')
if mindelta == dt.timedelta(seconds=0) or pd.isnull(mindelta):
mindelta = pd.Timedelta('5 seconds')
return mindelta, df2
def set_window_size_and_overlap(windowsize, trim_value=2 * DefaultWindowSize):
# make sure it exists
if windowsize is None:
windowsize = DefaultWindowSize
# make sure it is positive and not too large
trimmed_ws = np.minimum(np.maximum(windowsize, 1), trim_value)
# overlap
if trimmed_ws == 1:
ws_overlap = 0
else:
# larger overlap - half the window
ws_overlap = trimmed_ws // 2
return trimmed_ws, ws_overlap
def dampen_anomaly_score(array, dampening):
if dampening is None:
dampening = 0.9 # gradient dampening
if dampening >= 1:
return array
if dampening < 0.01:
return array
if array.size <= 1:
return array
gradient = np.gradient(array)
# dampened
grad_damp = np.float_power(abs(gradient), dampening) * np.sign(gradient)
# reconstruct (dampened) anomaly score by discrete integration
integral = []
x = array[0]
for x_el in np.nditer(grad_damp):
x = x + x_el
integral.append(x)
# shift array slightly to the right to position anomaly score
array_damp = np.roll(np.asarray(integral), 1)
array_damp[0] = array_damp[1]
# normalize
return array_damp / dampening / 2
# Saliency helper functions
# copied from https://github.com/y-bar/ml-based-anomaly-detection
# remove the boring part from an image resp. time series
def series_filter(values, kernel_size=3):
"""
Filter a time series. Practically, calculated mean value inside kernel size.
As math formula, see https://docs.opencv.org/2.4/modules/imgproc/doc/filtering.html.
:param values:
:param kernel_size:
:return: The list of filtered average
"""
filter_values = np.cumsum(values, dtype=float)
logger.info('SERIES_FILTER: ' + str(values.shape) + ',' + str(filter_values.shape) + ',' + str(kernel_size))
filter_values[kernel_size:] = filter_values[kernel_size:] - filter_values[:-kernel_size]
filter_values[kernel_size:] = filter_values[kernel_size:] / kernel_size
for i in range(1, kernel_size):
filter_values[i] /= i + 1
return filter_values
# Saliency class
# see https://www.inf.uni-hamburg.de/en/inst/ab/cv/research/research1-visual-attention.html
class Saliency(object):
def __init__(self, amp_window_size, series_window_size, score_window_size):
self.amp_window_size = amp_window_size
self.series_window_size = series_window_size
self.score_window_size = score_window_size
def transform_saliency_map(self, values):
"""
Transform a time-series into spectral residual, which is method in computer vision.
For example, See https://docs.opencv.org/master/d8/d65/group__saliency.html
:param values: a list or numpy array of float values.
:return: silency map and spectral residual
"""
freq = np.fft.fft(values)
mag = np.sqrt(freq.real ** 2 + freq.imag ** 2)
# remove the boring part of a timeseries
spectral_residual = np.exp(np.log(mag) - series_filter(np.log(mag), self.amp_window_size))
freq.real = freq.real * spectral_residual / mag
freq.imag = freq.imag * spectral_residual / mag
# and apply inverse fourier transform
saliency_map = np.fft.ifft(freq)
return saliency_map
def transform_spectral_residual(self, values):
saliency_map = self.transform_saliency_map(values)
spectral_residual = np.sqrt(saliency_map.real ** 2 + saliency_map.imag ** 2)
return spectral_residual
def merge_score(dfEntity, dfEntityOrig, column_name, score, mindelta):
"""
Fit interpolated score to original entity slice of the full dataframe
"""
# equip score with time values, make sure it's positive
score[score < 0] = 0
dfEntity[column_name] = score
# merge
dfEntityOrig = pd.merge_asof(dfEntityOrig, dfEntity[column_name], left_index=True, right_index=True,
direction='nearest', tolerance=mindelta)
if column_name + '_y' in dfEntityOrig:
merged_score = dfEntityOrig[column_name + '_y'].to_numpy()
else:
merged_score = dfEntityOrig[column_name].to_numpy()
return merged_score
#######################################################################################
# Scalers
#######################################################################################
class Standard_Scaler(BaseEstimatorFunction):
"""
Learns and applies standard scaling
"""
eval_metric = staticmethod(metrics.r2_score)
# class variables
train_if_no_model = True
def set_estimators(self):
self.estimators['standard_scaler'] = (StandardScaler, self.params)
logger.info('Standard Scaler initialized')
def __init__(self, features=None, targets=None, predictions=None):
super().__init__(features=features, targets=targets, predictions=predictions, keep_current_models=True)
# do not run score and call transform instead of predict
self.is_scaler = True
self.experiments_per_execution = 1
self.normalize = True # support for optional scaling in subclasses
self.prediction = self.predictions[0] # support for subclasses with univariate focus
self.params = {}
self.whoami = 'Standard_Scaler'
# used by all the anomaly scorers based on it
def prepare_data(self, dfEntity):
logger.debug(self.whoami + ': prepare Data for ' + self.prediction + ' column')
# operate on simple timestamp index
# needed for aggregated data with 3 or more indices
if len(dfEntity.index.names) > 1:
index_names = dfEntity.index.names
dfe = dfEntity.reset_index(index_names[1:])
else:
dfe = dfEntity
# interpolate gaps - data imputation
try:
dfe = dfe.interpolate(method="time")
except Exception as e:
logger.error('Prepare data error: ' + str(e))
# one dimensional time series - named temperature for catchyness
temperature = dfe[self.prediction].fillna(0).to_numpy(dtype=np.float64)
return dfe, temperature
# dummy function for scaler, can be replaced with anomaly functions
def kexecute(self, entity, df_copy):
return df_copy
def execute(self, df):
df_copy = df.copy()
entities = np.unique(df_copy.index.levels[0])
logger.debug(str(entities))
missing_cols = [x for x in self.predictions if x not in df_copy.columns]
for m in missing_cols:
df_copy[m] = None
for entity in entities:
normalize_entity = self.normalize
try:
check_array(df_copy.loc[[entity]][self.features].values, allow_nd=True)
except Exception as e:
normalize_entity = False
logger.error(
'Found Nan or infinite value in feature columns for entity ' + str(entity) + ' error: ' + str(e))
# support for optional scaling in subclasses
if normalize_entity:
dfe = super()._execute(df_copy.loc[[entity]], entity)
df_copy.loc[entity, self.predictions] = dfe[self.predictions]
else:
self.prediction = self.features[0]
df_copy = self.kexecute(entity, df_copy)
self.prediction = self.predictions[0]
logger.info('Standard_Scaler: Found columns ' + str(df_copy.columns))
return df_copy
@classmethod
def build_ui(cls):
# define arguments that behave as function inputs
inputs = []
inputs.append(UIMultiItem(name='features', datatype=float, required=True))
inputs.append(UIMultiItem(name='targets', datatype=float, required=True, output_item='predictions',
is_output_datatype_derived=True))
# define arguments that behave as function outputs
outputs = []
return inputs, outputs
class Robust_Scaler(BaseEstimatorFunction):
"""
Learns and applies robust scaling, scaling after outlier removal
"""
eval_metric = staticmethod(metrics.r2_score)
# class variables
train_if_no_model = True
def set_estimators(self):
self.estimators['robust_scaler'] = (RobustScaler, self.params)
logger.info('Robust Scaler initialized')
def __init__(self, features=None, targets=None, predictions=None):
super().__init__(features=features, targets=targets, predictions=predictions, keep_current_models=True)
# do not run score and call transform instead of predict
self.is_scaler = True
self.experiments_per_execution = 1
self.params = {}
def execute(self, df):
df_copy = df.copy()
entities = np.unique(df_copy.index.levels[0])
logger.debug(str(entities))
missing_cols = [x for x in self.predictions if x not in df_copy.columns]
for m in missing_cols:
df_copy[m] = None
for entity in entities:
# per entity - copy for later inplace operations
try:
check_array(df_copy.loc[[entity]][self.features].values, allow_nd=True)
except Exception as e:
logger.error(
'Found Nan or infinite value in feature columns for entity ' + str(entity) + ' error: ' + str(e))
continue
dfe = super()._execute(df_copy.loc[[entity]], entity)
df_copy.loc[entity, self.predictions] = dfe[self.predictions]
return df_copy
@classmethod
def build_ui(cls):
# define arguments that behave as function inputs
inputs = []
inputs.append(UIMultiItem(name='features', datatype=float, required=True))
inputs.append(UIMultiItem(name='targets', datatype=float, required=True, output_item='predictions',
is_output_datatype_derived=True))
# define arguments that behave as function outputs
outputs = []
return inputs, outputs
class MinMax_Scaler(BaseEstimatorFunction):
"""
Learns and applies minmax scaling
"""
eval_metric = staticmethod(metrics.r2_score)
# class variables
train_if_no_model = True
def set_estimators(self):
self.estimators['minmax_scaler'] = (MinMaxScaler, self.params)
logger.info('MinMax Scaler initialized')
def __init__(self, features=None, targets=None, predictions=None):
super().__init__(features=features, targets=targets, predictions=predictions, keep_current_models=True)
# do not run score and call transform instead of predict
self.is_scaler = True
self.experiments_per_execution = 1
self.params = {}
def execute(self, df):
df_copy = df.copy()
entities = np.unique(df_copy.index.levels[0])
logger.debug(str(entities))
missing_cols = [x for x in self.predictions if x not in df_copy.columns]
for m in missing_cols:
df_copy[m] = None
for entity in entities:
try:
check_array(df_copy.loc[[entity]][self.features].values, allow_nd=True)
except Exception as e:
logger.error(
'Found Nan or infinite value in feature columns for entity ' + str(entity) + ' error: ' + str(e))
continue
dfe = super()._execute(df_copy.loc[[entity]], entity)
df_copy.loc[entity, self.predictions] = dfe[self.predictions]
return df_copy
@classmethod
def build_ui(cls):
# define arguments that behave as function inputs
inputs = []
inputs.append(UIMultiItem(name='features', datatype=float, required=True))
inputs.append(UIMultiItem(name='targets', datatype=float, required=True, output_item='predictions',
is_output_datatype_derived=True))
# define arguments that behave as function outputs
outputs = []
return inputs, outputs
#######################################################################################
# Anomaly Scorers
#######################################################################################
class AnomalyScorer(BaseTransformer):
"""
Superclass of all unsupervised anomaly detection functions.
"""
def __init__(self, input_item, windowsize, output_items):
super().__init__()
logger.debug(input_item)
self.input_item = input_item
# use 12 by default
self.windowsize, self.windowoverlap = set_window_size_and_overlap(windowsize)
# assume 1 per sec for now
self.frame_rate = 1
# step
self.step = self.windowsize - self.windowoverlap
self.output_items = output_items
self.normalize = False
self.whoami = 'Anomaly'
def get_model_name(self, prefix='model', suffix=None):
name = []
if prefix is not None:
name.append(prefix)
name.extend([self._entity_type.name, self.whoami])
name.append(self.output_items[0])
if suffix is not None:
name.append(suffix)
name = '.'.join(name)
return name
# make sure data is evenly spaced
def prepare_data(self, dfEntity):
logger.debug(self.whoami + ': prepare Data')
# operate on simple timestamp index
if len(dfEntity.index.names) > 1:
index_names = dfEntity.index.names
dfe = dfEntity.reset_index(index_names[1:])
else:
dfe = dfEntity
# interpolate gaps - data imputation
try:
dfe = dfe.dropna(subset=[self.input_item]).interpolate(method="time")
except Exception as e:
logger.error('Prepare data error: ' + str(e))
# one dimensional time series - named temperature for catchyness
temperature = dfe[self.input_item].fillna(0).to_numpy(dtype=np.float64)
return dfe, temperature
def execute(self, df):
logger.debug('Execute ' + self.whoami)
df_copy = df # no copy
# check data type
if not pd.api.types.is_numeric_dtype(df_copy[self.input_item].dtype):
logger.error('Anomaly scoring on non-numeric feature:' + str(self.input_item))
return df_copy
# set output columns to zero
for output_item in self.output_items:
df_copy[output_item] = 0
# delegate to _calc
logger.debug('Execute ' + self.whoami + ' enter per entity execution')
# group over entities
group_base = [pd.Grouper(axis=0, level=0)]
df_copy = df_copy.groupby(group_base).apply(self._calc)
logger.debug('Scoring done')
return df_copy
def _calc(self, df):
entity = df.index.levels[0][0]
# get rid of entity id as part of the index
df = df.droplevel(0)
# Get new data frame with sorted index
dfe_orig = df.sort_index()
# remove all rows with only null entries
dfe = dfe_orig.dropna(how='all')
# minimal time delta for merging
mindelta, dfe_orig = min_delta(dfe_orig)
logger.debug('Timedelta:' + str(mindelta) + ' Index: ' + str(dfe_orig.index))
# one dimensional time series - named temperature for catchyness
# interpolate gaps - data imputation by default
# for missing data detection we look at the timestamp gradient instead
dfe, temperature = self.prepare_data(dfe)
logger.debug(
self.whoami + ', Entity: ' + str(entity) + ', Input: ' + str(self.input_item) + ', Windowsize: ' + str(
self.windowsize) + ', Output: ' + str(self.output_items) + ', Overlap: ' + str(
self.windowoverlap) + ', Inputsize: ' + str(temperature.size))
if temperature.size <= self.windowsize:
logger.debug(str(temperature.size) + ' <= ' + str(self.windowsize))
for output_item in self.output_items:
dfe[output_item] = Error_SmallWindowsize
else:
logger.debug(str(temperature.size) + str(self.windowsize))
for output_item in self.output_items:
dfe[output_item] = Error_Generic
temperature = self.scale(temperature, entity)
scores = self.score(temperature)
# length of time_series_temperature, signal_energy and ets_zscore is smaller than half the original
# extend it to cover the full original length
logger.debug('->')
try:
for i,output_item in enumerate(self.output_items):
# check for fast path, no interpolation required
diff = temperature.size - scores[i].size
# slow path - interpolate result score to stretch it to the size of the input data
if diff > 0:
dfe[output_item] = 0.0006
time_series_temperature = np.linspace(self.windowsize // 2, temperature.size - self.windowsize // 2 + 1,
temperature.size - diff)
linear_interpolate = sp.interpolate.interp1d(time_series_temperature, scores[i], kind='linear',
fill_value='extrapolate')
zScoreII = merge_score(dfe, dfe_orig, output_item,
abs(linear_interpolate(np.arange(0, temperature.size, 1))), mindelta)
# fast path - either cut off or just copy
elif diff < 0:
zScoreII = scores[i][0:temperature.size]
else:
zScoreII = scores[i]
df[output_item] = zScoreII
except Exception as e:
logger.error(self.whoami + ' score integration failed with ' + str(e))
logger.debug('--->')
return df
def score(self, temperature):
#scores = np.zeros((len(self.output_items), ) + temperature.shape)
scores = []
for output_item in self.output_items:
scores.append(np.zeros(temperature.shape))
try:
# super simple 1-dimensional z-score
ets_zscore = abs(sp.stats.zscore(temperature))
scores[0] = ets_zscore
# 2nd argument to return the modified input argument (for no data)
if len(self.output_items) > 1:
scores[1] = temperature
except Exception as e:
logger.error(self.whoami + ' failed with ' + str(e))
return scores
def scale(self, temperature, entity):
normalize_entity = self.normalize
if not normalize_entity:
return temperature
temp = temperature.reshape(-1, 1)
logger.info(self.whoami + ' scaling ' + str(temperature.shape))
try:
check_array(temp, allow_nd=True)
except Exception as e:
logger.error('Found Nan or infinite value in input data, error: ' + str(e))
return temperature
db = self._entity_type.db
scaler_model = None
# per entity - copy for later inplace operations
model_name = self.get_model_name(suffix=entity)
try:
scaler_model = db.model_store.retrieve_model(model_name)
logger.info('load model %s' % str(scaler_model))
except Exception as e:
logger.error('Model retrieval failed with ' + str(e))
# failed to load a model, so train it
if scaler_model is None:
# all variables should be continuous
scaler_model = StandardScaler().fit(temp)
logger.debug('Created Scaler ' + str(scaler_model))
try:
db.model_store.store_model(model_name, scaler_model)
except Exception as e:
logger.error('Model store failed with ' + str(e))
if scaler_model is not None:
temp = scaler_model.transform(temp)
return temp.reshape(temperature.shape)
return temperature
#####
# experimental function to interpolate over larger gaps
####
class Interpolator(AnomalyScorer):
"""
Interpolates NaN and data to be interpreted as NaN (for example 0 as invalid sensor reading)
The window size is typically set large enough to allow for "bridging" gaps
Missing indicates sensor readings to be interpreted as invalid.
"""
def __init__(self, input_item, windowsize, missing, output_item):
super().__init__(input_item, windowsize, [output_item])
logger.debug(input_item)
self.missing = missing
self.whoami = 'Interpolator'
def prepare_data(self, dfEntity):
logger.debug(self.whoami + ': prepare Data')
# operate on simple timestamp index
if len(dfEntity.index.names) > 1:
index_names = dfEntity.index.names
dfe = dfEntity.reset_index(index_names[1:])
else:
dfe = dfEntity
# remove Nan
dfe = dfe[dfe[self.input_item].notna()]
# remove self.missing
dfe = dfe[dfe[self.input_item] != self.missing]
# interpolate gaps - data imputation
try:
dfe = dfe.interpolate(method="time")
except Exception as e:
logger.error('Prepare data error: ' + str(e))
# one dimensional time series - named temperature for catchyness
# replace NaN with self.missing
temperature = dfe[self.input_item].fillna(0).to_numpy(dtype=np.float64)
return dfe, temperature
@classmethod
def build_ui(cls):
# define arguments that behave as function inputs
inputs = []
inputs.append(UISingleItem(name='input_item', datatype=float, description='Data item to interpolate'))
inputs.append(
UISingle(name='windowsize', datatype=int, description='Minimal size of the window for interpolating data.'))
inputs.append(UISingle(name='missing', datatype=int, description='Data to be interpreted as not-a-number.'))
# define arguments that behave as function outputs
outputs = []
outputs.append(UIFunctionOutSingle(name='output_item', datatype=float, description='Interpolated data'))
return (inputs, outputs)
class NoDataAnomalyScoreExt(AnomalyScorer):
"""
An unsupervised anomaly detection function.
Uses z-score AnomalyScorer to find gaps in data.
The function moves a sliding window across the data signal and applies the anomaly model to each window.
The window size is typically set to 12 data points.
"""
def __init__(self, input_item, windowsize, output_item):
super().__init__(input_item, windowsize, [output_item])
self.whoami = 'NoDataExt'
self.normalizer = 1
logger.debug('NoDataExt')
def prepare_data(self, dfEntity):
logger.debug(self.whoami + ': prepare Data')
# operate on simple timestamp index
if len(dfEntity.index.names) > 1:
index_names = dfEntity.index.names
dfe = dfEntity.reset_index(index_names[1:])
else:
dfe = dfEntity
# count the timedelta in seconds between two events
timeSeq = (dfe.index.values - dfe.index[0].to_datetime64()) / np.timedelta64(1, 's')
#dfe = dfEntity.copy()
# one dimensional time series - named temperature for catchyness
# we look at the gradient of the time series timestamps for anomaly detection
# might throw an exception - we catch it in the super class !!
try:
temperature = np.gradient(timeSeq)
dfe[[self.input_item]] = temperature
except Exception as pe:
logger.info("NoData Gradient failed with " + str(pe))
dfe[[self.input_item]] = 0
temperature = dfe[[self.input_item]].values
temperature[0] = 10 ** 10
temperature = temperature.astype('float64').reshape(-1)
return dfe, temperature
@classmethod
def build_ui(cls):
# define arguments that behave as function inputs
inputs = []
inputs.append(UISingleItem(name='input_item', datatype=float, description='Data item to analyze'))
inputs.append(UISingle(name='windowsize', datatype=int,
description='Size of each sliding window in data points. Typically set to 12.'))
# define arguments that behave as function outputs
outputs = []
outputs.append(UIFunctionOutSingle(name='output_item', datatype=float, description='No data anomaly score'))
return inputs, outputs
class ChangePointDetector(AnomalyScorer):
'''
An unsupervised anomaly detection function.
Applies a spectral analysis clustering techniqueto extract features from time series data and to create z scores.
Moves a sliding window across the data signal and applies the anomalymodelto each window.
The window size is typically set to 12 data points.
Try several anomaly detectors on your data and use the one that fits your data best.
'''
def __init__(self, input_item, windowsize, chg_pts):
super().__init__(input_item, windowsize, [chg_pts])
logger.debug(input_item)
self.whoami = 'ChangePointDetector'
def score(self, temperature):
scores = []
sc = np.zeros(temperature.shape)
try:
algo = rpt.BottomUp(model="l2", jump=2).fit(temperature)
chg_pts = algo.predict(n_bkps=15)
for j in chg_pts:
x = np.arange(0, temperature.shape[0], 1)
Gaussian = sp.stats.norm(j-1, temperature.shape[0]/20) # high precision
y = Gaussian.pdf(x) * temperature.shape[0]/8 # max is ~1
sc += y
except Exception as e:
logger.error(self.whoami + ' failed with ' + str(e))
scores.append(sc)
return scores
@classmethod
def build_ui(cls):
# define arguments that behave as function inputs
inputs = []
inputs.append(UISingleItem(name='input_item', datatype=float, description='Data item to analyze'))
# define arguments that behave as function outputs
outputs = []
outputs.append(UIFunctionOutSingle(name='chg_pts', datatype=float, description='Change points'))
return inputs, outputs
ENSEMBLE = '_ensemble_'
SPECTRALEXT = 'SpectralAnomalyScoreExt'
class EnsembleAnomalyScore(BaseTransformer):
'''
Call a set of anomaly detectors and return an joint vote along with the individual results
'''
def __init__(self, input_item, windowsize, scorers, thresholds, output_item):
super().__init__()
self.input_item = input_item
self.windowsize = windowsize
self.output_item = output_item
logger.debug(input_item)
self.whoami = 'EnsembleAnomalyScore'
self.list_of_scorers = scorers.split(',')
self.thresholds = list(map(int, thresholds.split(',')))
self.klasses = []
self.instances = []
self.output_items = []
module = importlib.import_module('mmfunctions.anomaly')
for m in self.list_of_scorers:
klass = getattr(module, m)
self.klasses.append(klass)
print(klass.__name__)
if klass.__name__ == SPECTRALEXT:
inst = klass(input_item, windowsize, output_item + ENSEMBLE + klass.__name__,
output_item + ENSEMBLE + klass.__name__ + '_inv')
else:
inst = klass(input_item, windowsize, output_item + ENSEMBLE + klass.__name__)
self.output_items.append(output_item + ENSEMBLE + klass.__name__)
self.instances.append(inst)
def execute(self, df):
logger.debug('Execute ' + self.whoami)
df_copy = df # no copy
binned_indices_list = []
for inst, output, threshold in zip(self.instances, self.output_items, self.thresholds):
logger.info('Execute anomaly scorer ' + str(inst.__class__.__name__) + ' with threshold ' + str(threshold))
tic = time.perf_counter_ns()
df_copy = inst.execute(df_copy)
toc = time.perf_counter_ns()
logger.info('Executed anomaly scorer ' + str(inst.__class__.__name__) + ' in ' +\
str((toc-tic)//1000000) + ' milliseconds')
arr = df_copy[output]
# sort results into bins that depend on the thresholds
# 0 - below 3/4 threshold, 1 - up to the threshold, 2 - crossed the threshold,
# 3 - very high, 4 - extreme
if inst.__class__.__name__ == SPECTRALEXT and isinstance(threshold, int):
# hard coded threshold for inverted values
threshold_ = 5
bins = [threshold * 0.75, threshold, threshold * 1.5, threshold * 2]
binned_indices_list.append(np.searchsorted(bins, arr, side='left'))
if inst.__class__.__name__ == SPECTRALEXT:
bins = [threshold_ * 0.75, threshold_, threshold_ * 1.5, threshold_ * 2]
arr = df_copy[output + '_inv']
binned_indices_list.append(np.searchsorted(bins, arr, side='left'))
binned_indices = np.vstack(binned_indices_list).mean(axis=0)
# should we explicitly drop the columns generated by the ensemble members
#df[self.output_item] = binned_indices
df_copy[self.output_item] = binned_indices
return df_copy
@classmethod
def build_ui(cls):
# define arguments that behave as function inputs
inputs = []
inputs.append(UISingleItem(name='input_item', datatype=float, description='Data item to analyze'))
inputs.append(UISingle(name='windowsize', datatype=int,
description='Size of each sliding window in data points. Typically set to 12.'))
# define arguments that behave as function outputs
outputs = []
outputs.append(
UIFunctionOutSingle(name='output_item', datatype=float, description='Spectral anomaly score (z-score)'))
return inputs, outputs
class SpectralAnomalyScore(AnomalyScorer):
'''
An unsupervised anomaly detection function.
Applies a spectral analysis clustering techniqueto extract features from time series data and to create z scores.
Moves a sliding window across the data signal and applies the anomalymodelto each window.
The window size is typically set to 12 data points.
Try several anomaly detectors on your data and use the one that fits your data best.
'''
def __init__(self, input_item, windowsize, output_item):
if isinstance(output_item, list):
super().__init__(input_item, windowsize, output_item)
else:
super().__init__(input_item, windowsize, [output_item])
logger.debug(input_item)
self.whoami = 'SpectralAnomalyScore'
def score(self, temperature):
scores = []
for output_item in self.output_items:
scores.append(np.zeros(temperature.shape))
try:
# Fourier transform:
# frequency, time, spectral density
frequency_temperature, time_series_temperature, spectral_density_temperature = signal.spectrogram(
temperature, fs=self.frame_rate, window='hanning', nperseg=self.windowsize,
noverlap=self.windowoverlap, detrend='l', scaling='spectrum')
# cut off freqencies too low to fit into the window
frequency_temperatureb = (frequency_temperature > 2 / self.windowsize).astype(int)
frequency_temperature = frequency_temperature * frequency_temperatureb
frequency_temperature[frequency_temperature == 0] = 1 / self.windowsize
signal_energy = np.dot(spectral_density_temperature.T, frequency_temperature)
signal_energy[signal_energy < SmallEnergy] = SmallEnergy
inv_signal_energy = np.divide(np.ones(signal_energy.size), signal_energy)
ets_zscore = abs(sp.stats.zscore(signal_energy)) * Spectral_normalizer
inv_zscore = abs(sp.stats.zscore(inv_signal_energy))
scores[0] = ets_zscore
if len(self.output_items) > 1:
scores[1] = inv_zscore
# 3rd argument to return the raw windowed signal energy
if len(self.output_items) > 2:
scores[2] = signal_energy
# 4th argument to return the modified input argument (for no data)
if len(self.output_items) > 3:
scores[3] = temperature.copy()
logger.debug(
'Spectral z-score max: ' + str(ets_zscore.max()) + ', Spectral inv z-score max: ' + str(
inv_zscore.max()))
except Exception as e:
logger.error(self.whoami + ' failed with ' + str(e))
return scores
@classmethod
def build_ui(cls):
# define arguments that behave as function inputs
inputs = []
inputs.append(UISingleItem(name='input_item', datatype=float, description='Data item to analyze'))
inputs.append(UISingle(name='windowsize', datatype=int,
description='Size of each sliding window in data points. Typically set to 12.'))
# define arguments that behave as function outputs
outputs = []
outputs.append(
UIFunctionOutSingle(name='output_item', datatype=float, description='Spectral anomaly score (z-score)'))
return inputs, outputs
class SpectralAnomalyScoreExt(SpectralAnomalyScore):
'''
An unsupervised anomaly detection function.
Applies a spectral analysis clustering techniqueto extract features from time series data and to create z scores.
Moves a sliding window across the data signal and applies the anomalymodelto each window.
The window size is typically set to 12 data points.
Try several anomaly detectors on your data and use the one that fits your data best.
'''
def __init__(self, input_item, windowsize, output_item, inv_zscore, signal_energy=None):
if signal_energy is None:
super().__init__(input_item, windowsize, [output_item, inv_zscore])
else:
super().__init__(input_item, windowsize, [output_item, inv_zscore, signal_energy])
logger.debug(input_item)
self.whoami = 'SpectralAnomalyScoreExt'
@classmethod
def build_ui(cls):
# define arguments that behave as function inputs
inputs = []
inputs.append(UISingleItem(name='input_item', datatype=float, description='Data item to analyze'))
inputs.append(UISingle(name='windowsize', datatype=int,
description='Size of each sliding window in data points. Typically set to 12.'))
# define arguments that behave as function outputs
outputs = []
outputs.append(
UIFunctionOutSingle(name='output_item', datatype=float, description='Spectral anomaly score (z-score)'))
outputs.append(UIFunctionOutSingle(name='inv_zscore', datatype=float,
description='z-score of inverted signal energy - detects unusually low activity'))
outputs.append(UIFunctionOutSingle(name='signal_enerty', datatype=float,
description='signal energy'))
return inputs, outputs
class KMeansAnomalyScore(AnomalyScorer):
"""
An unsupervised anomaly detection function.
Applies a k-means analysis clustering technique to time series data.
Moves a sliding window across the data signal and applies the anomaly model to each window.
The window size is typically set to 12 data points.
Try several anomaly models on your data and use the one that fits your data best.
"""
def __init__(self, input_item, windowsize, output_item, expr=None):
super().__init__(input_item, windowsize, [output_item])
logger.debug(input_item)
self.whoami = 'KMeans'
def score(self, temperature):
scores = []
for output_item in self.output_items:
scores.append(np.zeros(temperature.shape))
try:
# Chop into overlapping windows
slices = view_as_windows(temperature, self.windowsize, self.step)
if self.windowsize > 1:
n_cluster = 40
else:
n_cluster = 20
n_cluster = np.minimum(n_cluster, slices.shape[0] // 2)
logger.debug(self.whoami + 'params, Clusters: ' + str(n_cluster) + ', Slices: ' + str(slices.shape))
cblofwin = CBLOF(n_clusters=n_cluster, n_jobs=-1)
try:
cblofwin.fit(slices)
except Exception as e:
logger.info('KMeans failed with ' + str(e))
self.trace_append('KMeans failed with' + str(e))
return scores
pred_score = cblofwin.decision_scores_.copy() * KMeans_normalizer
scores[0] = pred_score
logger.debug('KMeans score max: ' + str(pred_score.max()))
except Exception as e:
logger.error(self.whoami + ' failed with ' + str(e))
return scores
@classmethod
def build_ui(cls):
# define arguments that behave as function inputs
inputs = []
inputs.append(UISingleItem(name='input_item', datatype=float, description='Data item to analyze'))
inputs.append(UISingle(name='windowsize', datatype=int,
description='Size of each sliding window in data points. Typically set to 12.'))
# define arguments that behave as function outputs
outputs = []
outputs.append(UIFunctionOutSingle(name='output_item', datatype=float, description='Anomaly score (kmeans)'))
return inputs, outputs
class GeneralizedAnomalyScore(AnomalyScorer):
"""
An unsupervised anomaly detection function.
Applies the Minimum Covariance Determinant (FastMCD) technique to detect outliers.
Moves a sliding window across the data signal and applies the anomaly model to each window.
The window size is typically set to 12 data points.
Try several anomaly detectors on your data and use the one that fits your data best.
"""
def __init__(self, input_item, windowsize, output_item):
super().__init__(input_item, windowsize, [output_item])
logger.debug(input_item)
self.whoami = 'GAM'
self.normalizer = Generalized_normalizer
def feature_extract(self, temperature):
logger.debug(self.whoami + ': feature extract')
slices = view_as_windows(temperature, self.windowsize, self.step)
return slices
def score(self, temperature):
scores = []
for output_item in self.output_items:
scores.append(np.zeros(temperature.shape))
logger.debug(str(temperature.size) + "," + str(self.windowsize))
temperature -= np.mean(temperature.astype(np.float64), axis=0)
mcd = MinCovDet()
# Chop into overlapping windows (default) or run through FFT first
slices = self.feature_extract(temperature)
try:
mcd.fit(slices)
pred_score = mcd.mahalanobis(slices).copy() * self.normalizer
except ValueError as ve:
pred_score = np.zeros(temperature.shape)
logger.info(self.whoami + ", Input: " + str(
self.input_item) + ", WindowSize: " + str(self.windowsize) + ", Output: " + str(
self.output_items[0]) + ", Step: " + str(self.step) + ", InputSize: " + str(
slices.shape) + " failed in the fitting step with \"" + str(ve) + "\" - scoring zero")
except Exception as e:
pred_score = np.zeros(temperature.shape)
logger.error(self.whoami + ", Input: " + str(
self.input_item) + ", WindowSize: " + str(self.windowsize) + ", Output: " + str(
self.output_items[0]) + ", Step: " + str(self.step) + ", InputSize: " + str(
slices.shape) + " failed in the fitting step with " + str(e))
scores[0] = pred_score
logger.debug(self.whoami + ' score max: ' + str(pred_score.max()))
return scores
@classmethod
def build_ui(cls):
# define arguments that behave as function inputs
inputs = []
inputs.append(UISingleItem(name="input_item", datatype=float, description="Data item to analyze", ))
inputs.append(UISingle(name="windowsize", datatype=int,
description="Size of each sliding window in data points. Typically set to 12."))
# define arguments that behave as function outputs
outputs = []
outputs.append(
UIFunctionOutSingle(name="output_item", datatype=float, description="Anomaly score (GeneralizedAnomaly)", ))
return inputs, outputs
class NoDataAnomalyScore(GeneralizedAnomalyScore):
"""
An unsupervised anomaly detection function.
Uses FastMCD to find gaps in data.
The function moves a sliding window across the data signal and applies the anomaly model to each window.
The window size is typically set to 12 data points.
"""
def __init__(self, input_item, windowsize, output_item):
super().__init__(input_item, windowsize, output_item)
self.whoami = 'NoData'
self.normalizer = 1
logger.debug('NoData')
def prepare_data(self, dfEntity):
logger.debug(self.whoami + ': prepare Data')
# operate on simple timestamp index
if len(dfEntity.index.names) > 1:
index_names = dfEntity.index.names[1:]
dfe = dfEntity.reset_index(index_names)
else:
dfe = dfEntity
# count the timedelta in seconds between two events
logger.debug('type of index[0] is ' + str(type(dfEntity.index[0])))
try:
timeSeq = (dfe.index.values - dfe.index[0].to_datetime64()) / np.timedelta64(1, 's')
except Exception:
try:
time_to_numpy = np.array(dfe.index[0], dtype='datetime64')
print('5. ', type(time_to_numpy), dfe.index[0][0])
timeSeq = (time_to_numpy - dfe.index[0][0].to_datetime64()) / np.timedelta64(1, 's')
except Exception:
print('Nochens')
timeSeq = 1.0
#dfe = dfEntity.copy()
# one dimensional time series - named temperature for catchyness
# we look at the gradient of the time series timestamps for anomaly detection
# might throw an exception - we catch it in the super class !!
try:
temperature = np.gradient(timeSeq)
dfe[[self.input_item]] = temperature
except Exception as pe:
logger.info("NoData Gradient failed with " + str(pe))
dfe[[self.input_item]] = 0
temperature = dfe[[self.input_item]].values
temperature[0] = 10 ** 10
temperature = temperature.astype('float64').reshape(-1)
return dfe, temperature
@classmethod
def build_ui(cls):
# define arguments that behave as function inputs
inputs = []
inputs.append(UISingleItem(name='input_item', datatype=float, description='Data item to analyze'))
inputs.append(UISingle(name='windowsize', datatype=int,
description='Size of each sliding window in data points. Typically set to 12.'))
# define arguments that behave as function outputs
outputs = []
outputs.append(UIFunctionOutSingle(name='output_item', datatype=float, description='No data anomaly score'))
return inputs, outputs
class FFTbasedGeneralizedAnomalyScore(GeneralizedAnomalyScore):
"""
An unsupervised and robust anomaly detection function.
Extracts temporal features from time series data using Fast Fourier Transforms.
Applies the GeneralizedAnomalyScore to the features to detect outliers.
Moves a sliding window across the data signal and applies the anomaly models to each window.
The window size is typically set to 12 data points.
Try several anomaly detectors on your data and use the one that fits your data best.
"""
def __init__(self, input_item, windowsize, output_item):
super().__init__(input_item, windowsize, output_item)
self.whoami = 'FFT'
self.normalizer = FFT_normalizer
logger.debug('FFT')
def feature_extract(self, temperature):
logger.debug(self.whoami + ': feature extract')
slices_ = view_as_windows(temperature, self.windowsize, self.step)
slicelist = []
for slice in slices_:
slicelist.append(fftpack.rfft(slice))
return np.stack(slicelist, axis=0)
@classmethod
def build_ui(cls):
# define arguments that behave as function inputs
inputs = []
inputs.append(UISingleItem(name="input_item", datatype=float, description="Data item to analyze", ))
inputs.append(UISingle(name="windowsize", datatype=int,
description="Size of each sliding window in data points. Typically set to 12."))
# define arguments that behave as function outputs
outputs = []
outputs.append(UIFunctionOutSingle(name="output_item", datatype=float,
description="Anomaly score (FFTbasedGeneralizedAnomalyScore)", ))
return inputs, outputs
class MatrixProfileAnomalyScore(AnomalyScorer):
"""
An unsupervised anomaly detection function.
Applies matrix profile analysis on time series data.
Moves a sliding window across the data signal to calculate the euclidean distance from one window to all others to build a distance profile.
The window size is typically set to 12 data points.
Try several anomaly models on your data and use the one that fits your data best.
"""
DATAPOINTS_AFTER_LAST_WINDOW = 1e-15
INIT_SCORES = 1e-20
ERROR_SCORES = 1e-16
def __init__(self, input_item, window_size, output_item):
super().__init__(input_item, window_size, [output_item])
logger.debug(f'Input item: {input_item}')
self.whoami = 'MatrixProfile'
def score(self, temperature):
scores = []
for output_item in self.output_items:
scores.append(np.zeros(temperature.shape))
try: # calculate scores
matrix_profile = stumpy.aamp(temperature, m=self.windowsize)[:, 0]
# fill in a small value for newer data points outside the last possible window
fillers = np.array([self.DATAPOINTS_AFTER_LAST_WINDOW] * (self.windowsize - 1))
matrix_profile = np.append(matrix_profile, fillers)
except Exception as er:
logger.warning(f' Error in calculating Matrix Profile Scores. {er}')
matrix_profile = np.array([self.ERROR_SCORES] * temperature.shape[0])
scores[0] = matrix_profile
logger.debug('Matrix Profile score max: ' + str(matrix_profile.max()))
return scores
@classmethod
def build_ui(cls):
# define arguments that behave as function inputs
inputs = [UISingleItem(name="input_item", datatype=float, description="Time series data item to analyze", ),
UISingle(name="window_size", datatype=int,
description="Size of each sliding window in data points. Typically set to 12.")]
# define arguments that behave as function outputs
outputs = [UIFunctionOutSingle(name="output_item", datatype=float,
description="Anomaly score (MatrixProfileAnomalyScore)", )]
return inputs, outputs
class SaliencybasedGeneralizedAnomalyScore(GeneralizedAnomalyScore):
"""
An unsupervised anomaly detection function.
Based on salient region detection models,
it uses fast fourier transform to reconstruct a signal using the salient features of a the signal.
It applies GeneralizedAnomalyScore to the reconstructed signal.
The function moves a sliding window across the data signal and applies its analysis to each window.
The window size is typically set to 12 data points.
Try several anomaly detectors on your data and use the one that fits your data best.
"""
def __init__(self, input_item, windowsize, output_item):
super().__init__(input_item, windowsize, output_item)
self.whoami = 'Saliency'
self.saliency = Saliency(windowsize, 0, 0)
self.normalizer = Saliency_normalizer
logger.debug('Saliency')
def feature_extract(self, temperature):
logger.debug(self.whoami + ': feature extract')
slices = view_as_windows(temperature, self.windowsize, self.step)
return slices
@classmethod
def build_ui(cls):
# define arguments that behave as function inputs
inputs = []
inputs.append(UISingleItem(name="input_item", datatype=float, description="Data item to analyze"))
inputs.append(UISingle(name="windowsize", datatype=int,
description="Size of each sliding window in data points. Typically set to 12.", ))
# define arguments that behave as function outputs
outputs = []
outputs.append(UIFunctionOutSingle(name="output_item", datatype=float,
description="Anomaly score (SaliencybasedGeneralizedAnomalyScore)", ))
return (inputs, outputs)
#######################################################################################
# Anomaly detectors with scaling
#######################################################################################
class KMeansAnomalyScoreV2(KMeansAnomalyScore):
def __init__(self, input_item, windowsize, normalize, output_item, expr=None):
super().__init__(input_item, windowsize, output_item)
logger.debug(input_item)
self.normalize = normalize
self.whoami = 'KMeansV2'
@classmethod
def build_ui(cls):
# define arguments that behave as function inputs
inputs = []
inputs.append(UISingleItem(name='input_item', datatype=float, description='Data item to analyze'))
inputs.append(UISingle(name='windowsize', datatype=int,
description='Size of each sliding window in data points. Typically set to 12.'))
inputs.append(UISingle(name='normalize', datatype=bool, description='Flag for normalizing data.'))
# define arguments that behave as function outputs
outputs = []
outputs.append(UIFunctionOutSingle(name='output_item', datatype=float, description='Anomaly score (kmeans)'))
return (inputs, outputs)
class GeneralizedAnomalyScoreV2(GeneralizedAnomalyScore):
"""
An unsupervised anomaly detection function.
Applies the Minimum Covariance Determinant (FastMCD) technique to detect outliers.
Moves a sliding window across the data signal and applies the anomaly model to each window.
The window size is typically set to 12 data points.
The normalize switch allows to learn and apply a standard scaler prior to computing the anomaly score.
Try several anomaly detectors on your data and use the one that fits your data best.
"""
def __init__(self, input_item, windowsize, normalize, output_item, expr=None):
super().__init__(input_item, windowsize, output_item)
logger.debug(input_item)
# do not run score and call transform instead of predict
self.normalize = normalize
self.whoami = 'GAMV2'
@classmethod
def build_ui(cls):
# define arguments that behave as function inputs
inputs = []
inputs.append(UISingleItem(name="input_item", datatype=float, description="Data item to analyze", ))
inputs.append(UISingle(name="windowsize", datatype=int,
description="Size of each sliding window in data points. Typically set to 12."))
inputs.append(UISingle(name='normalize', datatype=bool, description='Flag for normalizing data.'))
# define arguments that behave as function outputs
outputs = []
outputs.append(
UIFunctionOutSingle(name="output_item", datatype=float, description="Anomaly score (GeneralizedAnomaly)", ))
return inputs, outputs
class FFTbasedGeneralizedAnomalyScoreV2(GeneralizedAnomalyScoreV2):
"""
An unsupervised and robust anomaly detection function.
Extracts temporal features from time series data using Fast Fourier Transforms.
Applies the GeneralizedAnomalyScore to the features to detect outliers.
Moves a sliding window across the data signal and applies the anomaly models to each window.
The window size is typically set to 12 data points.
The normalize switch allows to learn and apply a standard scaler prior to computing the anomaly score.
Try several anomaly detectors on your data and use the one that fits your data best.
"""
def __init__(self, input_item, windowsize, normalize, output_item):
super().__init__(input_item, windowsize, normalize, output_item)
self.normalize = normalize
self.whoami = 'FFTV2'
self.normalizer = FFT_normalizer
logger.debug('FFT')
def feature_extract(self, temperature):
logger.debug(self.whoami + ': feature extract')
slices_ = view_as_windows(temperature, self.windowsize, self.step)
slicelist = []
for slice in slices_:
slicelist.append(fftpack.rfft(slice))
return np.stack(slicelist, axis=0)
@classmethod
def build_ui(cls):
# define arguments that behave as function inputs
inputs = []
inputs.append(UISingleItem(name="input_item", datatype=float, description="Data item to analyze", ))
inputs.append(UISingle(name="windowsize", datatype=int,
description="Size of each sliding window in data points. Typically set to 12."))
inputs.append(UISingle(name='normalize', datatype=bool, description='Flag for normalizing data.'))
# define arguments that behave as function outputs
outputs = []
outputs.append(UIFunctionOutSingle(name="output_item", datatype=float,
description="Anomaly score (FFTbasedGeneralizedAnomalyScore)", ))
return inputs, outputs
class SaliencybasedGeneralizedAnomalyScoreV2(SaliencybasedGeneralizedAnomalyScore):
"""
An unsupervised anomaly detection function.
Based on salient region detection models,
it uses fast fourier transform to reconstruct a signal using the salient features of a the signal.
It applies GeneralizedAnomalyScore to the reconstructed signal.
The function moves a sliding window across the data signal and applies its analysis to each window.
The window size is typically set to 12 data points.
The normalize switch allows to learn and apply a standard scaler prior to computing the anomaly score.
Try several anomaly detectors on your data and use the one that fits your data best.
"""
def __init__(self, input_item, windowsize, normalize, output_item):
super().__init__(input_item, windowsize, output_item)
self.whoami = 'SaliencyV2'
self.normalize = normalize
logger.debug('SaliencyV2')
@classmethod
def build_ui(cls):
# define arguments that behave as function inputs
inputs = []
inputs.append(UISingleItem(name="input_item", datatype=float, description="Data item to analyze"))
inputs.append(UISingle(name="windowsize", datatype=int,
description="Size of each sliding window in data points. Typically set to 12.", ))
inputs.append(UISingle(name='normalize', datatype=bool, description='Flag for normalizing data.'))
# define arguments that behave as function outputs
outputs = []
outputs.append(UIFunctionOutSingle(name="output_item", datatype=float,
description="Anomaly score (SaliencybasedGeneralizedAnomalyScore)", ))
return inputs, outputs
KMeansAnomalyScorev2 = KMeansAnomalyScoreV2
FFTbasedGeneralizedAnomalyScorev2 = FFTbasedGeneralizedAnomalyScoreV2
SaliencybasedGeneralizedAnomalyScorev2 = SaliencybasedGeneralizedAnomalyScoreV2
GeneralizedAnomalyScorev2 = GeneralizedAnomalyScoreV2
#######################################################################################
# Base class to handle models
#######################################################################################
class SupervisedLearningTransformer(BaseTransformer):
name = 'SupervisedLearningTransformer'
"""
Base class for anomaly scorers that can be trained with historic data in a notebook
and automatically store a trained model in the tenant database
Inferencing is run in the pipeline
"""
def __init__(self, features, targets):
super().__init__()
logging.debug("__init__" + self.name)
# do NOT automatically train if no model is found (subclasses)
self.auto_train = False
self.delete_model = False
self.features = features
self.targets = targets
parms = []
if features is not None:
parms.extend(features)
if targets is not None:
parms.extend(targets)
parms = '.'.join(parms)
logging.debug("__init__ done with parameters: " + parms)
'''
Generate unique model name from entity, optionally features and target for consistency checks
'''
def get_model_name(self, prefix='model', features=None, targets=None, suffix=None):
name = []
if prefix is not None:
name.append(prefix)
name.extend([self._entity_type.name, self.name])
if features is not None:
name.extend(features)
if targets is not None:
name.extend(targets)
if suffix is not None:
name.append(suffix)
name = '.'.join(name)
return name
def load_model(self, suffix=None):
model_name = self.get_model_name(targets=self.targets, suffix=suffix)
my_model = None
try:
my_model = self._entity_type.db.model_store.retrieve_model(model_name)
logger.info('load model %s' % str(my_model))
except Exception as e:
logger.error('Model retrieval failed with ' + str(e))
pass
# ditch old model
version = 1
if self.delete_model:
if my_model is not None:
if hasattr(my_model, 'version'):
version = my_model.version + 1
logger.debug('Deleting robust model ' + str(version-1) + ' for entity: ' + str(suffix))
my_model = None
return model_name, my_model, version
def execute(self, df):
logger.debug('Execute ' + self.whoami)
df_copy = df # no copy
# check data type
#if df[self.input_item].dtype != np.float64:
for feature in self.features:
if not pd.api.types.is_numeric_dtype(df_copy[feature].dtype):
logger.error('Regression on non-numeric feature:' + str(feature))
return (df_copy)
# delegate to _calc
logger.debug('Execute ' + self.whoami + ' enter per entity execution')
# group over entities
group_base = [pd.Grouper(axis=0, level=0)]
df_copy = df_copy.groupby(group_base).apply(self._calc)
logger.debug('Scoring done')
return df_copy
#######################################################################################
# Outlier removal in pipeline
#######################################################################################
class LocalOutlierFactor:
def __init__(self):
self.lof = LocalOutlierFactor() #**kwargs)
self.version = 1
def fit(self, X):
self.lof.fit(X.reshape(-1,1))
def predict(self, X, threshold):
#return (X >= self.MinMax[0]) & (X <= self.MinMax[1])
return self.lof.negative_outlier_factor_ < threshold
class KDEMaxMin:
def __init__(self, version=1):
self.version = version
self.kde = KernelDensity(kernel='gaussian')
self.Min = None
self.Max = None
def fit(self, X, alpha):
self.kde.fit(X.reshape(-1,1))
kde_X = self.kde.score_samples(X.reshape(-1,1))
# find outliers of the kde score
tau_kde = sp.stats.mstats.mquantiles(kde_X, 1. - alpha) # alpha = 0.995
# determine outliers
X_outliers = X[np.argwhere(kde_X < tau_kde).flatten()]
X_valid = X[np.argwhere(kde_X >= tau_kde).flatten()]
# determine max of all sample that are not outliers
self.Min = X_valid.min()
self.Max = X_valid.max()
if len(X_outliers) > 0:
X_min = X_outliers[X_outliers < self.Min]
X_max = X_outliers[X_outliers > self.Max]
if len(X_min) > 0:
self.Min = max(X_min.max(), self.Min)
if len(X_max) > 0:
self.Max = min(X_max.min(), self.Max)
# self.Min = max(X_outliers[X_outliers < self.Min].max(), self.Min)
# self.Max = min(X_outliers[X_outliers > self.Max].min(), self.Max)
logger.info('KDEMaxMin - Min: ' + str(self.Min) + ', ' + str(self.Max))
return kde_X
def predict(self, X, threshold=None):
return (X >= self.Min) & (X <= self.Max)
class RobustThreshold(SupervisedLearningTransformer):
def __init__(self, input_item, threshold, output_item):
super().__init__(features=[input_item], targets=[output_item])
self.input_item = input_item
self.threshold = threshold
self.output_item = output_item
self.auto_train = True
self.Min = dict()
self.Max = dict()
self.whoami = 'RobustThreshold'
logger.info(self.whoami + ' from ' + self.input_item + ' quantile threshold ' + str(self.threshold) +
' exceeding boolean ' + self.output_item)
def execute(self, df):
# set output columns to zero
logger.debug('Called ' + self.whoami + ' with columns: ' + str(df.columns))
df[self.output_item] = 0
return super().execute(df)
def _calc(self, df):
# per entity - copy for later inplace operations
db = self._entity_type.db
entity = df.index.levels[0][0]
model_name, robust_model, version = self.load_model(suffix=entity)
feature = df[self.input_item].values
if robust_model is None and self.auto_train:
robust_model = KDEMaxMin(version=version)
try:
robust_model.fit(feature, self.threshold)
db.model_store.store_model(model_name, robust_model)
except Exception as e:
logger.error('Model store failed with ' + str(e))
robust_model = None
if robust_model is not None:
self.Min[entity] = robust_model.Min
self.Max[entity] = robust_model.Max
df[self.output_item] = robust_model.predict(feature, self.threshold)
else:
df[self.output_item] = 0
return df.droplevel(0)
@classmethod
def build_ui(cls):
# define arguments that behave as function inputs
inputs = []
inputs.append(UISingleItem(name="input_item", datatype=float, description="Data item to analyze"))
inputs.append(UISingle(name="threshold", datatype=int,
description="Threshold to determine outliers by quantile. Typically set to 0.95", ))
# define arguments that behave as function outputs
outputs = []
outputs.append(UIFunctionOutSingle(name="output_item", datatype=bool,
description="Boolean outlier condition"))
return (inputs, outputs)
#######################################################################################
# Regressors
#######################################################################################
class BayesRidgeRegressor(BaseEstimatorFunction):
"""
Linear regressor based on a probabilistic model as provided by sklearn
"""
eval_metric = staticmethod(metrics.r2_score)
# class variables
train_if_no_model = True
num_rounds_per_estimator = 3
def BRidgePipeline(self):
steps = [('scaler', StandardScaler()), ('bridge', linear_model.BayesianRidge(compute_score=True))]
return Pipeline(steps)
def set_estimators(self):
params = {}
self.estimators['bayesianridge'] = (self.BRidgePipeline, params)
logger.info('Bayesian Ridge Regressor start searching for best model')
def __init__(self, features, targets, predictions=None, deviations=None):
super().__init__(features=features, targets=targets, predictions=predictions, stddev=True, keep_current_models=True)
if deviations is not None:
self.pred_stddev = deviations
self.experiments_per_execution = 1
self.auto_train = True
self.correlation_threshold = 0
self.stop_auto_improve_at = -2
self.whoami = 'BayesianRidgeRegressor'
def execute(self, df):
logger.debug('Execute ' + self.whoami)
df_copy = df.copy()
# Create missing columns before doing group-apply
missing_cols = [x for x in self.predictions + self.pred_stddev if x not in df_copy.columns]
for m in missing_cols:
df_copy[m] = None
# check data type
#if df[self.input_item].dtype != np.float64:
for feature in self.features:
if not | pd.api.types.is_numeric_dtype(df_copy[feature].dtype) | pandas.api.types.is_numeric_dtype |
# -*- coding: utf-8 -*-
"""
Reading data for WB, PRO,
for kennisimpulse project
to read data from province, water companies, and any other sources
Created on Sun Jul 26 21:55:57 2020
@author: <NAME>
"""
import pytest
import numpy as np
import pandas as pd
from pathlib import Path
import pickle as pckl
from hgc import ner
from hgc import io
import tests
# import xlsxwriter
def test_province():
# WD = Path(tests.__file__).parent / 'provincie_data_long_preprocessed.csv'
WD = r'C:\Users\beta6\Documents\Dropbox\008KWR\0081Projects\kennisimpulse'+'/provincie_data_long_preprocessed.csv'
df_temp = pd.read_csv(WD, encoding='ISO-8859-1', header=None)
# define the nrow here
n_row = None
feature_map, feature_unmapped, df_feature_map = ner.generate_feature_map(entity_orig=list(df_temp.iloc[slice(2, n_row), 25].dropna()))
unit_map, unit_unmapped, df_unit_map = ner.generate_unit_map(entity_orig=list(df_temp.iloc[slice(2, n_row), 26].dropna()))
# create a df to record what has been mapped and what has not
df_map = pd.DataFrame((feature_map.keys(),feature_map.values(),unit_map.keys(),unit_map.values()), index=['Feature','Mapped feature','Unit','Mapped unit']).transpose()
if not not feature_unmapped:
df_map = df_map.join(pd.DataFrame(feature_unmapped, columns=['Unmapped feature']))
if not not unit_unmapped:
df_map = df_map.join(pd.DataFrame(unit_unmapped, columns=['Unmapped unit']))
dct2_arguments = {
'file_path': WD,
'sheet_name': 'stacked',
'shape': 'stacked',
'slice_header': [1, slice(1, None)],
'slice_data': [slice(1, n_row), slice(1, None)],
'map_header': {
**io.default_map_header(),
'MeetpuntId': 'LocationID',
'parameter':'Feature',
'eenheid': 'Unit',
'waarde': 'Value',
'Opgegeven bemonstering datum': 'Datetime',
'Monsternummer': 'SampleID', # "SampleID" already exists as header, but contains wrong date. Use "Sample number" as "SampleID"
# 'SampleID': None # otherwise exists twice in output file
},
'map_features': {**feature_map,'pH(1)':'pH'},
'map_units': {**unit_map, 'oC':'°C'},
}
df2 = io.import_file(**dct2_arguments)[0]
df2_hgc = io.stack_to_hgc(df2)
with pd.ExcelWriter(r'C:\Users\beta6\Documents\Dropbox\008KWR\0081Projects\kennisimpulse'+r'/provincie_processed.xlsx') as writer:
df2_hgc.to_excel(writer, sheet_name='hgc_prov')
df2.to_excel(writer, sheet_name='df_prov')
df_map.to_excel(writer, sheet_name='mapAndUnmap')
def test_KIWKZUID():
# WD = Path(tests.__file__).parent / 'provincie_data_long_preprocessed.csv'
# WD = r'D:/DBOX/Dropbox/008KWR/0081Projects/kennisimpulse/Opkomende stoffen KIWK Zuid_preprocessed.csv'
WD = r'C:\Users\beta6\Documents\Dropbox\008KWR\0081Projects\kennisimpulse/Opkomende stoffen KIWK Zuid_preprocessed.csv'
df_temp = pd.read_csv(WD, header=None, encoding='ISO-8859-1')
# define the nrow here
n_row = None
feature_map, feature_unmapped, df_feature_map = ner.generate_feature_map(entity_orig=list(df_temp.iloc[slice(2, n_row), 20].dropna()))
unit_map, unit_unmapped, df_unit_map = ner.generate_unit_map(entity_orig=list(df_temp.iloc[slice(2, n_row), 21].dropna()))
# create a df to record what has been mapped and what has not
df_map = pd.DataFrame((feature_map.keys(),feature_map.values(),unit_map.keys(),unit_map.values()), index=['Feature','Mapped feature','Unit','Mapped unit']).transpose()
if not not feature_unmapped:
df_map = df_map.join(pd.DataFrame(feature_unmapped, columns=['Unmapped feature']))
dct2_arguments = {
'file_path': WD,
'sheet_name': 'Export KoW 2.0',
'shape': 'stacked',
'slice_header': [1, slice(1, 24)],
'slice_data': [slice(1, n_row), slice(1, 24)],
'map_header': {
**io.default_map_header(),
'Monsterpunt': 'LocationID',
'Parameter omschrijving':'Feature',
'Eenheid': 'Unit',
'Gerapporteerde waarde': 'Value', # Gerapporteerde waarde, right?!
'Monstername datum': 'Datetime',
'Analyse': 'SampleID', # Analyse !?
# 'SampleID': None # otherwise exists twice in output file
},
'map_features': {**feature_map,'pH(1)':'pH'},
'map_units': {**unit_map, 'oC':'°C'},
}
df2 = io.import_file(**dct2_arguments)[0]
df2_hgc = io.stack_to_hgc(df2)
# with pd.ExcelWriter(r'D:/DBOX/Dropbox/008KWR/0081Projects/kennisimpulse/KIWK_Zuid_processed.xlsx') as writer:
with pd.ExcelWriter(r'C:\Users\beta6\Documents\Dropbox\008KWR\0081Projects\kennisimpulse/KIWK_Zuid_processed.xlsx') as writer:
df2.to_excel(writer, sheet_name='KIWK_Zuid')
df2_hgc.to_excel(writer, sheet_name='hgc_KIWK_Zuid')
df_map.to_excel(writer, sheet_name='mapAndUnmap')
def test_KIWKVenloschol():
# WD = Path(tests.__file__).parent / 'provincie_data_long_preprocessed.csv'
# WD = r'D:/DBOX/Dropbox/008KWR/0081Projects/kennisimpulse/Opkomende stoffen KIWK Zuid_preprocessed.csv'
WD = r'C:\Users\beta6\Documents\Dropbox\008KWR\0081Projects\kennisimpulse/Opkomende stoffen KIWK Venloschol_preprocessed.xlsx'
df_temp = pd.read_excel(WD, header=None, encoding='ISO-8859-1')
# define the nrow here
n_row = None
feature_map, feature_unmapped, df_feature_map = ner.generate_feature_map(entity_orig=list(df_temp.iloc[slice(2, n_row), 20].dropna()))
unit_map, unit_unmapped, df_unit_map = ner.generate_unit_map(entity_orig=list(df_temp.iloc[slice(2, n_row), 21].dropna()))
# create a df to record what has been mapped and what has not
df_map = pd.DataFrame((feature_map.keys(),feature_map.values(),unit_map.keys(),unit_map.values()), index=['Feature','Mapped feature','Unit','Mapped unit']).transpose()
if not not feature_unmapped:
df_map = df_map.join(pd.DataFrame(feature_unmapped, columns=['Unmapped feature']))
if not not unit_unmapped:
df_map = df_map.join(pd.DataFrame(unit_unmapped, columns=['Unmapped unit']))
dct2_arguments = {
'file_path': WD,
'sheet_name': 'Export KoW 2.0',
'shape': 'stacked',
'slice_header': [1, slice(1, 24)],
'slice_data': [slice(1, n_row), slice(1, 24)],
'map_header': {
**io.default_map_header(),
'Monsterpunt': 'LocationID',
'Parameter omschrijving':'Feature',
'Eenheid': 'Unit',
'Gerapporteerde waarde': 'Value', # Gerapporteerde waarde, right?!
'Monstername datum': 'Datetime',
'Analyse': 'SampleID', # Analyse !?
},
'map_features': {**feature_map,'pH(1)':'pH'},
'map_units': {**unit_map, 'µg/l atrazine-D5':'µg/l'},
}
df2 = io.import_file(**dct2_arguments)[0]
df2_hgc = io.stack_to_hgc(df2)
# with pd.ExcelWriter(r'D:/DBOX/Dropbox/008KWR/0081Projects/kennisimpulse/KIWK_Zuid_processed.xlsx') as writer:
with pd.ExcelWriter(r'C:\Users\beta6\Documents\Dropbox\008KWR\0081Projects\kennisimpulse/Opkomende stoffen KIWK Venloschol_processed.xlsx') as writer:
df2_hgc.to_excel(writer, sheet_name='hgc_KIWK Venloschol')
df2.to_excel(writer, sheet_name='KIWK Venloschol')
df_map.to_excel(writer, sheet_name='mapAndUnmap')
def test_KIWKRoerdalslenk():
WD = Path(tests.__file__).parent / 'provincie_data_long_preprocessed.csv'
WD = r'D:/DBOX/Dropbox/008KWR/0081Projects/kennisimpulse/Opkomende stoffen KIWK Zuid_preprocessed.csv'
WD = r'C:\Users\beta6\Documents\Dropbox\008KWR\0081Projects\kennisimpulse/Opkomende stoffen KIWK Roerdalslenk_preprocessed.xlsx'
df_temp = pd.read_excel(WD, header=None, encoding='ISO-8859-1')
# define the nrow here
n_row = None
feature_map, feature_unmapped, df_feature_map = ner.generate_feature_map(entity_orig=list(df_temp.iloc[slice(2, n_row), 20].dropna()))
unit_map, unit_unmapped, df_unit_map = ner.generate_unit_map(entity_orig=list(df_temp.iloc[slice(2, n_row), 21].dropna()))
# create a df to record what has been mapped and what has not
df_map = pd.DataFrame((feature_map.keys(),feature_map.values(),unit_map.keys(),unit_map.values()), index=['Feature','Mapped feature','Unit','Mapped unit']).transpose()
if not not feature_unmapped:
df_map = df_map.join(pd.DataFrame(feature_unmapped, columns=['Unmapped feature']))
if not not unit_unmapped:
df_map = df_map.join(pd.DataFrame(unit_unmapped, columns=['Unmapped unit']))
dct2_arguments = {
'file_path': WD,
'sheet_name': 'Export KoW 2.0',
'shape': 'stacked',
'slice_header': [1, slice(1, 24)],
'slice_data': [slice(1, n_row), slice(1, 24)],
'map_header': {
**io.default_map_header(),
'Monsterpunt': 'LocationID',
'Parameter omschrijving':'Feature',
'Eenheid': 'Unit',
'Gerapporteerde waarde': 'Value', # Gerapporteerde waarde, right?!
'Monstername datum': 'Datetime',
'Analyse': 'SampleID', # Analyse !?
},
'map_features': {**feature_map,'pH(1)':'pH'},
'map_units': {**unit_map, 'µg/l Hxdcn-d34':'µg/l'},
}
df2 = io.import_file(**dct2_arguments)[0]
df2_hgc = io.stack_to_hgc(df2)
# with pd.ExcelWriter(r'D:/DBOX/Dropbox/008KWR/0081Projects/kennisimpulse/KIWK_Zuid_processed.xlsx') as writer:
with | pd.ExcelWriter(r'C:\Users\beta6\Documents\Dropbox\008KWR\0081Projects\kennisimpulse/Opkomende stoffen KIWK Roerdalslenk_processed.xlsx') | pandas.ExcelWriter |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 4 15:00:57 2019
@author: <NAME>
Input file: list of quantification tables
Output files: quantification_stats_*.tsv, quantification_results_*.tsv (HTSeq option) or
Description: Used to merge quantification results from all samples
"""
import argparse
import pandas as pd
# function to merge HTSeq quantification results
def collect_quantification_data_HTseq(input_files, profile, gene_attribute):
# initiate merged quantification data frame
quant_merged_table = pd.DataFrame()
for input_file in input_files: #iterate over sample results
# read quantification data
quant_table = | pd.read_csv(input_file,sep='\t',header=0) | pandas.read_csv |
'''
Created on 30.5.2017
@author: Markus.Walden
- https://developers.arcgis.com/authentication/accessing-arcgis-online-services/
'''
import requests
import pandas as pd
import numpy as np
def main():
return None
def getStockData():
# df = df.sample(n = 20) # , frac, replace, weights, random_state, axis
# result.fillna(method='ffill')
df_stock = pd.read_csv('./data/stockFull.csv', sep = ';', encoding='latin-1', decimal=",")
df_symbol = | pd.read_csv('./data/symbolLatLong.csv', sep = ';', encoding='latin-1', decimal=",", index_col = 'symbol') | pandas.read_csv |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.