prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# -*- coding: utf-8 -*-
import copy
import os
import shutil
from builtins import range
from datetime import datetime
import numpy as np
import pandas as pd
import pytest
from ..testing_utils import make_ecommerce_entityset
import featuretools as ft
from featuretools import variable_types
from featuretools.entityset import EntitySet, Relationship
from featuretools.tests import integration_data
@pytest.fixture()
def entityset():
return make_ecommerce_entityset()
def test_operations_invalidate_metadata(entityset):
new_es = ft.EntitySet(id="test")
# test metadata gets created on access
assert new_es._metadata is None
assert new_es.metadata is not None # generated after access
assert new_es._metadata is not None
new_es.entity_from_dataframe("customers",
entityset["customers"].df,
index=entityset["customers"].index)
new_es.entity_from_dataframe("sessions",
entityset["sessions"].df,
index=entityset["sessions"].index)
assert new_es._metadata is None
assert new_es.metadata is not None
assert new_es._metadata is not None
r = ft.Relationship(new_es["customers"]["id"],
new_es["sessions"]["customer_id"])
new_es = new_es.add_relationship(r)
assert new_es._metadata is None
assert new_es.metadata is not None
assert new_es._metadata is not None
new_es = new_es.normalize_entity("customers", "cohort", "cohort")
assert new_es._metadata is None
assert new_es.metadata is not None
assert new_es._metadata is not None
new_es.add_last_time_indexes()
assert new_es._metadata is None
assert new_es.metadata is not None
assert new_es._metadata is not None
new_es.add_interesting_values()
assert new_es._metadata is None
assert new_es.metadata is not None
assert new_es._metadata is not None
def test_reset_metadata(entityset):
assert entityset.metadata is not None
assert entityset._metadata is not None
entityset.reset_metadata()
assert entityset._metadata is None
def test_cannot_readd_relationships_that_already_exists(entityset):
before_len = len(entityset.relationships)
entityset.add_relationship(entityset.relationships[0])
after_len = len(entityset.relationships)
assert before_len == after_len
def test_add_relationships_convert_type(entityset):
for r in entityset.relationships:
parent_e = entityset[r.parent_entity.id]
child_e = entityset[r.child_entity.id]
assert type(r.parent_variable) == variable_types.Index
assert type(r.child_variable) == variable_types.Id
assert parent_e.df[r.parent_variable.id].dtype == child_e.df[r.child_variable.id].dtype
def test_add_relationship_errors_on_dtype_mismatch(entityset):
log_2_df = entityset['log'].df.copy()
log_variable_types = {
'id': variable_types.Categorical,
'session_id': variable_types.Id,
'product_id': variable_types.Id,
'datetime': variable_types.Datetime,
'value': variable_types.Numeric,
'value_2': variable_types.Numeric,
'latlong': variable_types.LatLong,
'latlong2': variable_types.LatLong,
'value_many_nans': variable_types.Numeric,
'priority_level': variable_types.Ordinal,
'purchased': variable_types.Boolean,
'comments': variable_types.Text
}
entityset.entity_from_dataframe(entity_id='log2',
dataframe=log_2_df,
index='id',
variable_types=log_variable_types,
time_index='datetime',
encoding='utf-8')
error_text = u'Unable to add relationship because id in customers is Pandas dtype category and session_id in log2 is Pandas dtype int64.'
with pytest.raises(ValueError, match=error_text):
mismatch = Relationship(entityset[u'customers']['id'], entityset['log2']['session_id'])
entityset.add_relationship(mismatch)
def test_query_by_id(entityset):
df = entityset['log'].query_by_values(instance_vals=[0])
assert df['id'].values[0] == 0
def test_query_by_id_with_time(entityset):
df = entityset['log'].query_by_values(
instance_vals=[0, 1, 2, 3, 4],
time_last=datetime(2011, 4, 9, 10, 30, 2 * 6))
assert df['id'].get_values().tolist() == [0, 1, 2]
def test_get_forward_entities_deep(entityset):
entities = entityset.get_forward_entities('log', 'deep')
assert entities == set(['sessions', 'customers', 'products', u'régions', 'cohorts'])
def test_query_by_variable_with_time(entityset):
df = entityset['log'].query_by_values(
instance_vals=[0, 1, 2], variable_id='session_id',
time_last=datetime(2011, 4, 9, 10, 50, 0))
true_values = [
i * 5 for i in range(5)] + [i * 1 for i in range(4)] + [0]
assert df['id'].get_values().tolist() == list(range(10))
assert df['value'].get_values().tolist() == true_values
def test_query_by_variable_with_training_window(entityset):
df = entityset['log'].query_by_values(
instance_vals=[0, 1, 2], variable_id='session_id',
time_last=datetime(2011, 4, 9, 10, 50, 0),
training_window='15m')
assert df['id'].get_values().tolist() == [9]
assert df['value'].get_values().tolist() == [0]
def test_query_by_indexed_variable(entityset):
df = entityset['log'].query_by_values(
instance_vals=['taco clock'],
variable_id='product_id')
assert df['id'].get_values().tolist() == [15, 16]
def test_check_variables_and_dataframe():
# matches
df = pd.DataFrame({'id': [0, 1, 2], 'category': ['a', 'b', 'a']})
vtypes = {'id': variable_types.Categorical,
'category': variable_types.Categorical}
entityset = EntitySet(id='test')
entityset.entity_from_dataframe('test_entity', df, index='id',
variable_types=vtypes)
assert entityset.entity_dict['test_entity'].variable_types['category'] == variable_types.Categorical
def test_make_index_variable_ordering():
df = pd.DataFrame({'id': [0, 1, 2], 'category': ['a', 'b', 'a']})
vtypes = {'id': variable_types.Categorical,
'category': variable_types.Categorical}
entityset = EntitySet(id='test')
entityset.entity_from_dataframe(entity_id='test_entity',
index='id1',
make_index=True,
variable_types=vtypes,
dataframe=df)
assert entityset.entity_dict['test_entity'].df.columns[0] == 'id1'
def test_extra_variable_type():
# more variables
df = pd.DataFrame({'id': [0, 1, 2], 'category': ['a', 'b', 'a']})
vtypes = {'id': variable_types.Categorical,
'category': variable_types.Categorical,
'category2': variable_types.Categorical}
error_text = "Variable ID category2 not in DataFrame"
with pytest.raises(LookupError, match=error_text):
entityset = EntitySet(id='test')
entityset.entity_from_dataframe(entity_id='test_entity',
index='id',
variable_types=vtypes, dataframe=df)
def test_add_parent_not_index_varible(entityset):
error_text = "Parent variable.*is not the index of entity Entity.*"
with pytest.raises(AttributeError, match=error_text):
entityset.add_relationship(Relationship(entityset[u'régions']['language'],
entityset['customers'][u'région_id']))
def test_unknown_index():
# more variables
df = pd.DataFrame({'category': ['a', 'b', 'a']})
vtypes = {'category': variable_types.Categorical}
entityset = EntitySet(id='test')
entityset.entity_from_dataframe(entity_id='test_entity',
index='id',
variable_types=vtypes, dataframe=df)
assert entityset['test_entity'].index == 'id'
assert entityset['test_entity'].df['id'].tolist() == list(range(3))
def test_doesnt_remake_index():
# more variables
df = pd.DataFrame({'id': [0, 1, 2], 'category': ['a', 'b', 'a']})
error_text = "Cannot make index: index variable already present"
with pytest.raises(RuntimeError, match=error_text):
entityset = EntitySet(id='test')
entityset.entity_from_dataframe(entity_id='test_entity',
index='id',
make_index=True,
dataframe=df)
def test_bad_time_index_variable():
df = pd.DataFrame({'category': ['a', 'b', 'a']})
error_text = "Time index not found in dataframe"
with pytest.raises(LookupError, match=error_text):
entityset = EntitySet(id='test')
entityset.entity_from_dataframe(entity_id='test_entity',
index="id",
dataframe=df,
time_index='time')
def test_converts_variable_types_on_init():
df = pd.DataFrame({'id': [0, 1, 2],
'category': ['a', 'b', 'a'],
'category_int': [1, 2, 3],
'ints': ['1', '2', '3'],
'floats': ['1', '2', '3.0']})
df["category_int"] = df["category_int"].astype("category")
vtypes = {'id': variable_types.Categorical,
'ints': variable_types.Numeric,
'floats': variable_types.Numeric}
entityset = EntitySet(id='test')
entityset.entity_from_dataframe(entity_id='test_entity', index='id',
variable_types=vtypes, dataframe=df)
entity_df = entityset['test_entity'].df
assert entity_df['ints'].dtype.name in variable_types.PandasTypes._pandas_numerics
assert entity_df['floats'].dtype.name in variable_types.PandasTypes._pandas_numerics
# this is infer from pandas dtype
e = entityset["test_entity"]
assert isinstance(e['category_int'], variable_types.Categorical)
def test_converts_variable_type_after_init():
df = pd.DataFrame({'id': [0, 1, 2],
'category': ['a', 'b', 'a'],
'ints': ['1', '2', '1']})
df["category"] = df["category"].astype("category")
entityset = EntitySet(id='test')
entityset.entity_from_dataframe(entity_id='test_entity', index='id',
dataframe=df)
e = entityset['test_entity']
df = entityset['test_entity'].df
e.convert_variable_type('ints', variable_types.Numeric)
assert isinstance(e['ints'], variable_types.Numeric)
assert df['ints'].dtype.name in variable_types.PandasTypes._pandas_numerics
e.convert_variable_type('ints', variable_types.Categorical)
assert isinstance(e['ints'], variable_types.Categorical)
e.convert_variable_type('ints', variable_types.Ordinal)
assert isinstance(e['ints'], variable_types.Ordinal)
e.convert_variable_type('ints', variable_types.Boolean,
true_val=1, false_val=2)
assert isinstance(e['ints'], variable_types.Boolean)
assert df['ints'].dtype.name == 'bool'
def test_converts_datetime():
# string converts to datetime correctly
# This test fails without defining vtypes. Entityset
# infers time column should be numeric type
times = pd.date_range('1/1/2011', periods=3, freq='H')
time_strs = times.strftime('%Y-%m-%d')
df = pd.DataFrame({'id': [0, 1, 2], 'time': time_strs})
vtypes = {'id': variable_types.Categorical,
'time': variable_types.Datetime}
entityset = EntitySet(id='test')
entityset._import_from_dataframe(entity_id='test_entity', index='id',
time_index="time", variable_types=vtypes,
dataframe=df)
pd_col = entityset['test_entity'].df['time']
# assert type(entityset['test_entity']['time']) == variable_types.Datetime
assert type(pd_col[0]) == pd.Timestamp
def test_handles_datetime_format():
# check if we load according to the format string
# pass in an ambigious date
datetime_format = "%d-%m-%Y"
actual = pd.Timestamp('Jan 2, 2011')
time_strs = [actual.strftime(datetime_format)] * 3
df = pd.DataFrame(
{'id': [0, 1, 2], 'time_format': time_strs, 'time_no_format': time_strs})
vtypes = {'id': variable_types.Categorical,
'time_format': (variable_types.Datetime, {"format": datetime_format}),
'time_no_format': variable_types.Datetime}
entityset = EntitySet(id='test')
entityset._import_from_dataframe(entity_id='test_entity', index='id',
variable_types=vtypes, dataframe=df)
col_format = entityset['test_entity'].df['time_format']
col_no_format = entityset['test_entity'].df['time_no_format']
# without formatting pandas gets it wrong
assert (col_no_format != actual).all()
# with formatting we correctly get jan2
assert (col_format == actual).all()
def test_handles_datetime_mismatch():
# can't convert arbitrary strings
df = pd.DataFrame({'id': [0, 1, 2], 'time': ['a', 'b', 'tomorrow']})
vtypes = {'id': variable_types.Categorical,
'time': variable_types.Datetime}
error_text = "Given date string not likely a datetime."
with pytest.raises(ValueError, match=error_text):
entityset = EntitySet(id='test')
entityset.entity_from_dataframe('test_entity', df, 'id',
time_index='time', variable_types=vtypes)
def test_entity_init(entityset):
# Note: to convert the time column directly either the variable type
# or convert_date_columns must be specifie
df = pd.DataFrame({'id': [0, 1, 2],
'time': [datetime(2011, 4, 9, 10, 31, 3 * i)
for i in range(3)],
'category': ['a', 'b', 'a'],
'number': [4, 5, 6]})
vtypes = {'time': variable_types.Datetime}
entityset.entity_from_dataframe('test_entity', df, index='id',
time_index='time', variable_types=vtypes)
assert entityset['test_entity'].df.shape == df.shape
assert entityset['test_entity'].index == 'id'
assert entityset['test_entity'].time_index == 'time'
assert set([v.id for v in entityset['test_entity'].variables]) == set(df.columns)
assert entityset['test_entity'].df['time'].dtype == df['time'].dtype
assert set(entityset['test_entity'].df['id']) == set(df['id'])
def test_nonstr_column_names():
df = pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6], 3: ['a', 'b', 'c']})
es = ft.EntitySet(id='Failure')
error_text = "All column names must be strings.*"
with pytest.raises(ValueError, match=error_text) as excinfo:
es.entity_from_dataframe(entity_id='str_cols',
dataframe=df,
index='index')
assert 'All column names must be strings (Column 3 is not a string)' in str(excinfo)
def test_sort_time_id():
transactions_df = pd.DataFrame({"id": [1, 2, 3, 4, 5, 6],
"transaction_time": pd.date_range(start="10:00", periods=6, freq="10s")[::-1]})
es = EntitySet("test", entities={"t": (
transactions_df, "id", "transaction_time")})
times = es["t"].df.transaction_time.tolist()
assert times == sorted(transactions_df.transaction_time.tolist())
def test_already_sorted_parameter():
transactions_df = pd.DataFrame({"id": [1, 2, 3, 4, 5, 6],
"transaction_time": [datetime(2014, 4, 6),
datetime(
2012, 4, 8),
datetime(
2012, 4, 8),
datetime(
2013, 4, 8),
datetime(
2015, 4, 8),
datetime(2016, 4, 9)]})
es = EntitySet(id='test')
es.entity_from_dataframe('t',
transactions_df,
index='id',
time_index="transaction_time",
already_sorted=True)
times = es["t"].df.transaction_time.tolist()
assert times == transactions_df.transaction_time.tolist()
def test_concat_entitysets(entityset):
df = pd.DataFrame({'id': [0, 1, 2], 'category': ['a', 'b', 'a']})
vtypes = {'id': variable_types.Categorical,
'category': variable_types.Categorical}
entityset.entity_from_dataframe(entity_id='test_entity',
index='id1',
make_index=True,
variable_types=vtypes,
dataframe=df)
entityset.add_last_time_indexes()
assert entityset.__eq__(entityset)
entityset_1 = copy.deepcopy(entityset)
entityset_2 = copy.deepcopy(entityset)
emap = {
'log': [list(range(10)) + [14, 15, 16], list(range(10, 14)) + [15, 16]],
'sessions': [[0, 1, 2, 5], [1, 3, 4, 5]],
'customers': [[0, 2], [1, 2]],
'test_entity': [[0, 1], [0, 2]],
}
assert entityset.__eq__(entityset_1, deep=True)
assert entityset.__eq__(entityset_2, deep=True)
for i, es in enumerate([entityset_1, entityset_2]):
for entity, rows in emap.items():
df = es[entity].df
es[entity].update_data(df=df.loc[rows[i]])
assert 10 not in entityset_1['log'].last_time_index.index
assert 10 in entityset_2['log'].last_time_index.index
assert 9 in entityset_1['log'].last_time_index.index
assert 9 not in entityset_2['log'].last_time_index.index
assert not entityset.__eq__(entityset_1, deep=True)
assert not entityset.__eq__(entityset_2, deep=True)
# make sure internal indexes work before concat
regions = entityset_1['customers'].query_by_values(['United States'], variable_id=u'région_id')
assert regions.index.isin(entityset_1['customers'].df.index).all()
assert entityset_1.__eq__(entityset_2)
assert not entityset_1.__eq__(entityset_2, deep=True)
old_entityset_1 = copy.deepcopy(entityset_1)
old_entityset_2 = copy.deepcopy(entityset_2)
entityset_3 = entityset_1.concat(entityset_2)
assert old_entityset_1.__eq__(entityset_1, deep=True)
assert old_entityset_2.__eq__(entityset_2, deep=True)
assert entityset_3.__eq__(entityset, deep=True)
for entity in entityset.entities:
df = entityset[entity.id].df.sort_index()
df_3 = entityset_3[entity.id].df.sort_index()
for column in df:
for x, y in zip(df[column], df_3[column]):
assert ((pd.isnull(x) and pd.isnull(y)) or (x == y))
orig_lti = entityset[entity.id].last_time_index.sort_index()
new_lti = entityset_3[entity.id].last_time_index.sort_index()
for x, y in zip(orig_lti, new_lti):
assert ((pd.isnull(x) and pd.isnull(y)) or (x == y))
entityset_1['stores'].last_time_index = None
entityset_1['test_entity'].last_time_index = None
entityset_2['test_entity'].last_time_index = None
entityset_4 = entityset_1.concat(entityset_2)
assert not entityset_4.__eq__(entityset, deep=True)
for entity in entityset.entities:
df = entityset[entity.id].df.sort_index()
df_4 = entityset_4[entity.id].df.sort_index()
for column in df:
for x, y in zip(df[column], df_4[column]):
assert ((pd.isnull(x) and pd.isnull(y)) or (x == y))
if entity.id != 'test_entity':
orig_lti = entityset[entity.id].last_time_index.sort_index()
new_lti = entityset_4[entity.id].last_time_index.sort_index()
for x, y in zip(orig_lti, new_lti):
assert ((pd.isnull(x) and | pd.isnull(y) | pandas.isnull |
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import pickle
import streamlit as st
import pandas as pd
import numpy as np
sim = pickle.load(open(r"user_sim_df", 'rb'))
user_sim_df=pd.DataFrame(sim)
alist=pickle.load(open(r"anime", 'rb'))
anime=pd.DataFrame(alist)
pv= pickle.load(open(r"pivot", 'rb'))
pivot= | pd.DataFrame(pv) | pandas.DataFrame |
import pandas as pd
from collections import deque, namedtuple
class PositionSummary(object):
"""
Takes the trade history for a user's watchlist from the database and it's
ticker. Then applies the FIFO accounting methodology to calculate the
overall positions status i.e. final open lots, average cost and a breakdown
of the open lots.
This is a queue data structure.
"""
def __init__(self, trade_history):
self.trade_history = trade_history
self.average_cost = None
self.open_lots = None
self.ticker = self.set_ticker()
self.buy_quantities = deque([])
self.buy_prices = deque([])
self.buy_dates = deque([])
self.sell_quantities = deque([])
self.sell_prices = deque([])
self.sell_dates = deque([])
self.open_direction = None
self.breakdown = []
self.net_position = 0
self._apply_fifo()
def __repr__(self):
return (f"<Ticker: {self.ticker}, Quantity: {self.net_position}>")
def set_ticker(self):
tickers = set([i[0] for i in self.trade_history])
if len(tickers) == 1:
return self.trade_history[0][0]
else:
raise ValueError("The Trade History for this security contains multiple tickers")
def total_open_lots(self):
""" returns the sum of the positions open lots"""
if self.open_direction == "long":
return sum(self.buy_quantities)
elif self.open_direction == "short":
return sum(self.sell_quantities)
else:
return None
def total_market_value(self):
"""Returns the position's market value"""
total = None
if self.buy_quantities and self.open_direction == "long":
zipped = zip(self.buy_quantities, self.buy_prices)
total = (quantity*price for quantity, price in zipped)
elif self.sell_quantities and self.open_direction == "short":
zipped = zip(self.sell_quantities, self.sell_prices)
total = (quantity*price for quantity, price in zipped)
return sum(total) if total is not None else None
def get_average_cost(self):
"""Returns the weighted average cost of the positions open lots."""
open_lots = self.total_open_lots()
if open_lots == 0 or not open_lots:
return 0
return abs(self.total_market_value()/self.total_open_lots())
def remove_trade(self, direction):
if direction == "buy":
popped_quantity = self.buy_quantities.popleft()
self.buy_prices.popleft()
self.buy_dates.popleft()
elif direction == "sell":
popped_quantity = self.sell_quantities.popleft()
self.sell_prices.popleft()
self.sell_dates.popleft()
else:
raise NameError("why did this happen")
return popped_quantity
def _collapse_trade(self):
if self.sell_quantities:
if self.sell_quantities[0] >= 0:
self.remove_trade("sell")
if self.buy_quantities:
if self.buy_quantities[0] <= 0:
self.remove_trade("buy")
def get_summary(self):
"""
Returns a named tuple of the ticker, net position and the average
price of the opens lots
"""
Summary = namedtuple("Summary",
["ticker", "quantity", "average_price"])
ticker = self.ticker
quantity = self.net_position
average_price = round(self.average_cost, 4)
return Summary(ticker, quantity, average_price)
def add(self, side, units, price, date):
if side == "buy":
self.buy_quantities.append(units)
self.buy_prices.append(price)
self.buy_dates.append(date)
elif side == "sell":
self.sell_quantities.append(units)
self.sell_prices.append(price)
self.sell_dates.append(date)
def _set_direction(self):
"""
Checks if there has been a reversal in the users overall
trade direction and sets that direction accordingly.
"""
if self.open_direction == "short" and self.net_position > 0:
self.open_direction = "long"
elif self.open_direction == "long" and self.net_position < 0:
self.open_direction = "short"
def set_initial_trade(self):
units = self.trade_history[0].quantity
price = self.trade_history[0].price
date = self.trade_history[0].date
if units >= 0:
self.open_direction = "long"
self.add("buy", units, price, date)
else:
self.open_direction = "short"
self.add("sell", units, price, date)
self.average_cost = self.get_average_cost()
self.net_position = self.total_open_lots()
self.breakdown.append([date, self.net_position, self.average_cost])
def _apply_fifo(self):
"""
This algorithm iterate over the trade history. It sets the
initial trade direction to get the initial open lots and then increases
or closes lots based on each trade.
In the event that a position was initally long then becomes short or
vice versa the open lots will be increased or closed accordingly.
"""
if self.trade_history:
self.set_initial_trade()
else:
return []
trades = len(self.trade_history)
c1 = 1 # counter
while c1 < trades:
units = self.trade_history[c1].quantity
price = self.trade_history[c1].price
date = self.trade_history[c1].date
if units*self.net_position > 0: # if true both trades have the same sign
if self.open_direction == "long":
self.add("buy", units, price, date)
else:
self.add("sell", units, price, date)
elif units*self.net_position == 0: # position is flat
if units >= 0:
self.open_direction = "long"
self.add("buy", units, price, date)
else:
self.open_direction = "short"
self.add("sell", units, price, date)
else: # both trades are in different directions
if self.open_direction == "long":
self.add("sell", units, price, date)
# while the lots are not empty
while self.sell_quantities and self.buy_quantities:
if abs(self.sell_quantities[0]) >= self.buy_quantities[0]:
self.sell_quantities[0] += self.buy_quantities[0]
self.remove_trade("buy")
else:
temp = self.remove_trade("sell")
self.buy_quantities[0] += temp
self.net_position += units # subtract units from net position
else: # self.open_direction == "short"
self.add("buy", units, price, date)
while self.sell_quantities and self.buy_quantities:
if self.buy_quantities[0] >= abs(self.sell_quantities[0]):
self.buy_quantities[0] += self.sell_quantities[0]
self.remove_trade("sell")
else:
temp = self.remove_trade("buy")
self.sell_quantities[0] += temp
self.net_position += units
self._collapse_trade()
self._set_direction()
self.average_cost = round(self.get_average_cost(), 4)
self.net_position = self.total_open_lots()
self.breakdown.append([date, self.net_position, self.average_cost])
c1 += 1
class PositionAccounting(PositionSummary):
"""
Inherits from the Position Summary and applies accounting methods
to a Position
"""
def __init__(self, close_prices, trade_history):
super().__init__(trade_history)
self.close_prices = close_prices # Daily market prices
def performance_table(self):
"""
Combines the position breakdown with the daily prices to calculate
daily unrealised P&L. The Daily unrealised P&L is the difference
between the postion's weighted average cost and the market
price.
"""
df = pd.DataFrame(self.close_prices, columns=["date", "price"])
df = df.set_index("date")
df["quantity"] = float("nan")
df["avg_cost"] = float("nan")
start_date = str(self.breakdown[0][0])
df2 = df.loc[start_date:]
df2 = df2.copy() # copied to prevent chained assignment
for row in self.breakdown:
df2.at[str(row[0]), "quantity"] = row[1]
df2.at[str(row[0]), "avg_cost"] = row[2]
df2["quantity"] = df2["quantity"].fillna(method="ffill")
df2["price"] = df2["price"].fillna(method="ffill")
df2["avg_cost"] = df2["avg_cost"].fillna(method="ffill")
df2["price"] = pd.to_numeric(df2["price"])
df2.loc[df2['quantity'] <= 0, 'Long/Short'] = -1
df2.loc[df2['quantity'] > 0, 'Long/Short'] = 1
df2["pct_change"] = (((df2["price"] - df2["avg_cost"])/df2["avg_cost"])*df2["Long/Short"])*100
df2["pct_change"] = round(df2["pct_change"], 3)
df2 = df2.reset_index()
df2 = df2[["date", "quantity", "avg_cost", "price", "pct_change"]]
df2 = list(df2.itertuples(index=False))
return df2
def daily_valuations(self):
"""
Combines the position breakdown with the daily prices to calculate
daily market value. The Daily market value is the positions quantity
multiplied by the market price.
"""
df = pd.DataFrame(self.close_prices, columns=["date", "price"])
df = df.set_index("date")
df["quantity"] = float("nan")
df["market_val"] = float("nan")
# the prices starting from the first date the security was held
start_date = str(self.breakdown[0][0])
df2 = df.loc[start_date:]
df2 = df2.copy() # copied to prevent chained assignment
# update the quantity at each date
for row in self.breakdown:
df2.at[str(row[0]), "quantity"] = row[1]
df2["price"] = df2["price"].fillna(method="ffill")
df2["quantity"] = df2["quantity"].fillna(method="ffill")
df2["price"] = pd.to_numeric(df2["price"])
df2["market_val"] = round((df2["price"] * df2["quantity"]), 3)
df2 = df2[["market_val"]]
new_name = f"market_val_{self.ticker}"
new_header = {"market_val": new_name}
df2 = df2.rename(columns=new_header)
return df2
class Portfolio_Summary(object):
"""
This is a collection of the Positions for the user accounts, priced as of
the latest market prices
"""
def __init__(self):
self.portfolio_breakdown = pd.DataFrame()
def add_position(self, close_prices, trade_history):
"""
Adds each positions daily market value to the portfolio breakdown.
"""
Position = PositionAccounting(close_prices, trade_history)
Position_valuation = Position.daily_valuations()
if self.portfolio_breakdown.empty:
self.portfolio_breakdown = Position_valuation
else:
self.portfolio_breakdown = self.portfolio_breakdown.join(Position_valuation)
self.portfolio_breakdown = self.portfolio_breakdown.fillna(method="ffill")
def net_valuations(self):
"""
returns the portfolios daily market value
"""
valuation = self.portfolio_breakdown.copy()
valuation["portfolio_val"] = valuation.sum(axis=1)
valuation = valuation[["portfolio_val"]]
return valuation
def convert_flows(self, flows):
"""
Using the Holding Period Return (HPR) methodology. Purchases of
securities are accounted as fund inflows and the sale of securities are
accounted as increases in cash.
By creating the cumulative sum of these values we can maintain an
accurate calculation of the HPR which can be distorted as purchases and
sells are added to the trades.
"""
df_flows = pd.DataFrame(flows, columns=["date", "flows"])
df_flows["cash"] = float("nan")
df_flows["inflows"] = float("nan")
df_flows["date"] = df_flows["date"].astype(str)
df_flows["cash"] = df_flows.loc[df_flows['flows'] > 0, "flows"]
df_flows["inflows"] = df_flows.loc[df_flows['flows'] <= 0, "flows"]
df_flows["cash"] = df_flows["cash"].cumsum()
df_flows["inflows"] = df_flows["inflows"].abs()
df_flows = df_flows.set_index("date") # need to sum groupby date
df_flows = df_flows.groupby([df_flows.index]).sum()
df_flows = df_flows.drop(columns=['flows'])
df_flows = df_flows.replace({'cash': 0, 'inflows': 0}, float("nan"))
return df_flows
def generate_hpr(self, flows):
"""
Where PortVal = Portfolio Value. The Formula for the Daily
Holding Period Return (HPR) is calculated as follows:
(Ending PortVal) / (Previous PortVal After Cash Flow) – 1.
1. Add the cash from the sale of securities to the portfolio value.
2. shift the total portfolio value column to allow us to easily
caclulate the Percentage change before and after each cash flow.
Returns a named tuple of daily HPR % changes.
"""
df_flows = self.convert_flows(flows)
valuation = self.net_valuations()
valuation = valuation.join(df_flows)
valuation["cash"] = valuation["cash"].fillna(method="ffill")
valuation = valuation.fillna(value=0)
valuation["total_portfolio_val"] = valuation["portfolio_val"] + valuation["cash"]
valuation["portfolio_val"] = valuation["total_portfolio_val"].shift(1)
valuation["pct_change"] = ((valuation["total_portfolio_val"])/(valuation["portfolio_val"]+valuation["inflows"])-1)*100
valuation["pct_change"] = round(valuation["pct_change"], 3)
valuation = valuation.reset_index()
valuation = list(valuation.itertuples(index=False))
return valuation
class DashboardCharts(object):
"""
Various methods that take portfolio data from the database and cleans the
data so that it can be plotted into graphs on the front end
"""
def worldmap(map_data):
"""
Groups the users positions by the country and ISO Count
the aggregagte function used here is COUNT
"""
df = pd.DataFrame(map_data)
if not df.empty:
df = df.groupby(["Country", "ISO Code"]).count().reset_index()
return df
else:
return pd.DataFrame(columns=["No. of Positions",
"Country",
"ISO Code"])
def get_pie_chart(self, portfolio_valuation):
"""
Returns a named tuple of the largest positions by absolute exposure
in descending order. For the portfolios that contain more than 6
positions the next n positons are aggregated to and classified
'as other'
"""
df = portfolio_valuation.tail(1)
df = df.T.reset_index() # transpose table to make the tickers the rows
if df.empty:
return df
new_headers = {df.columns[0]: "ticker", df.columns[1]: "Market_val"}
df = df.rename(columns=new_headers)
df["Market_val"] = abs(df["Market_val"])
total_portfolio_val = sum(df["Market_val"])
df["ticker"] = df["ticker"].replace("market_val_", "", regex=True)
df["market_val_perc"] = round(df["Market_val"]/total_portfolio_val, 2)
df = df[df["Market_val"] != 0] # filter rows where valuation isnt zero
df = df.sort_values(by=['market_val_perc'], ascending=False)
if len(df) >= 7:
# split the dataframe into two parts
df_bottom = df.tail(len(df)-6)
df = df.head(6)
# sum the bottom dataframe to create an "Other" field
df_bottom.loc['Other'] = df_bottom.sum(numeric_only=True, axis=0)
df_bottom.at["Other", "ticker"] = "Other"
df_bottom = df_bottom.tail(1)
df_final = | pd.concat([df, df_bottom]) | pandas.concat |
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Index,
Series,
)
import pandas._testing as tm
dt_data = [
pd.Timestamp("2011-01-01"),
pd.Timestamp("2011-01-02"),
pd.Timestamp("2011-01-03"),
]
tz_data = [
pd.Timestamp("2011-01-01", tz="US/Eastern"),
pd.Timestamp("2011-01-02", tz="US/Eastern"),
pd.Timestamp("2011-01-03", tz="US/Eastern"),
]
td_data = [
pd.Timedelta("1 days"),
pd.Timedelta("2 days"),
pd.Timedelta("3 days"),
]
period_data = [
pd.Period("2011-01", freq="M"),
pd.Period("2011-02", freq="M"),
pd.Period("2011-03", freq="M"),
]
data_dict = {
"bool": [True, False, True],
"int64": [1, 2, 3],
"float64": [1.1, np.nan, 3.3],
"category": Categorical(["X", "Y", "Z"]),
"object": ["a", "b", "c"],
"datetime64[ns]": dt_data,
"datetime64[ns, US/Eastern]": tz_data,
"timedelta64[ns]": td_data,
"period[M]": period_data,
}
class TestConcatAppendCommon:
"""
Test common dtype coercion rules between concat and append.
"""
@pytest.fixture(params=sorted(data_dict.keys()))
def item(self, request):
key = request.param
return key, data_dict[key]
item2 = item
def _check_expected_dtype(self, obj, label):
"""
Check whether obj has expected dtype depending on label
considering not-supported dtypes
"""
if isinstance(obj, Index):
assert obj.dtype == label
elif isinstance(obj, Series):
if label.startswith("period"):
assert obj.dtype == "Period[M]"
else:
assert obj.dtype == label
else:
raise ValueError
def test_dtypes(self, item):
# to confirm test case covers intended dtypes
typ, vals = item
self._check_expected_dtype(Index(vals), typ)
self._check_expected_dtype(Series(vals), typ)
def test_concatlike_same_dtypes(self, item):
# GH 13660
typ1, vals1 = item
vals2 = vals1
vals3 = vals1
if typ1 == "category":
exp_data = Categorical(list(vals1) + list(vals2))
exp_data3 = Categorical(list(vals1) + list(vals2) + list(vals3))
else:
exp_data = vals1 + vals2
exp_data3 = vals1 + vals2 + vals3
# ----- Index ----- #
# index.append
res = Index(vals1).append(Index(vals2))
exp = Index(exp_data)
tm.assert_index_equal(res, exp)
# 3 elements
res = Index(vals1).append([Index(vals2), Index(vals3)])
exp = Index(exp_data3)
tm.assert_index_equal(res, exp)
# index.append name mismatch
i1 = Index(vals1, name="x")
i2 = Index(vals2, name="y")
res = i1.append(i2)
exp = Index(exp_data)
tm.assert_index_equal(res, exp)
# index.append name match
i1 = Index(vals1, name="x")
i2 = Index(vals2, name="x")
res = i1.append(i2)
exp = Index(exp_data, name="x")
tm.assert_index_equal(res, exp)
# cannot append non-index
with pytest.raises(TypeError, match="all inputs must be Index"):
Index(vals1).append(vals2)
with pytest.raises(TypeError, match="all inputs must be Index"):
Index(vals1).append([Index(vals2), vals3])
# ----- Series ----- #
# series.append
res = Series(vals1)._append(Series(vals2), ignore_index=True)
exp = Series(exp_data)
tm.assert_series_equal(res, exp, check_index_type=True)
# concat
res = pd.concat([Series(vals1), Series(vals2)], ignore_index=True)
tm.assert_series_equal(res, exp, check_index_type=True)
# 3 elements
res = Series(vals1)._append([Series(vals2), Series(vals3)], ignore_index=True)
exp = Series(exp_data3)
tm.assert_series_equal(res, exp)
res = pd.concat(
[Series(vals1), Series(vals2), Series(vals3)],
ignore_index=True,
)
tm.assert_series_equal(res, exp)
# name mismatch
s1 = Series(vals1, name="x")
s2 = Series(vals2, name="y")
res = s1._append(s2, ignore_index=True)
exp = Series(exp_data)
tm.assert_series_equal(res, exp, check_index_type=True)
res = pd.concat([s1, s2], ignore_index=True)
tm.assert_series_equal(res, exp, check_index_type=True)
# name match
s1 = Series(vals1, name="x")
s2 = Series(vals2, name="x")
res = s1._append(s2, ignore_index=True)
exp = Series(exp_data, name="x")
tm.assert_series_equal(res, exp, check_index_type=True)
res = pd.concat([s1, s2], ignore_index=True)
tm.assert_series_equal(res, exp, check_index_type=True)
# cannot append non-index
msg = (
r"cannot concatenate object of type '.+'; "
"only Series and DataFrame objs are valid"
)
with pytest.raises(TypeError, match=msg):
Series(vals1)._append(vals2)
with pytest.raises(TypeError, match=msg):
Series(vals1)._append([Series(vals2), vals3])
with pytest.raises(TypeError, match=msg):
pd.concat([Series(vals1), vals2])
with pytest.raises(TypeError, match=msg):
pd.concat([Series(vals1), Series(vals2), vals3])
def test_concatlike_dtypes_coercion(self, item, item2, request):
# GH 13660
typ1, vals1 = item
typ2, vals2 = item2
vals3 = vals2
# basically infer
exp_index_dtype = None
exp_series_dtype = None
if typ1 == typ2:
# same dtype is tested in test_concatlike_same_dtypes
return
elif typ1 == "category" or typ2 == "category":
# The `vals1 + vals2` below fails bc one of these is a Categorical
# instead of a list; we have separate dedicated tests for categorical
return
warn = None
# specify expected dtype
if typ1 == "bool" and typ2 in ("int64", "float64"):
# series coerces to numeric based on numpy rule
# index doesn't because bool is object dtype
exp_series_dtype = typ2
mark = pytest.mark.xfail(reason="GH#39187 casting to object")
request.node.add_marker(mark)
warn = FutureWarning
elif typ2 == "bool" and typ1 in ("int64", "float64"):
exp_series_dtype = typ1
mark = pytest.mark.xfail(reason="GH#39187 casting to object")
request.node.add_marker(mark)
warn = FutureWarning
elif (
typ1 == "datetime64[ns, US/Eastern]"
or typ2 == "datetime64[ns, US/Eastern]"
or typ1 == "timedelta64[ns]"
or typ2 == "timedelta64[ns]"
):
exp_index_dtype = object
exp_series_dtype = object
exp_data = vals1 + vals2
exp_data3 = vals1 + vals2 + vals3
# ----- Index ----- #
# index.append
with tm.assert_produces_warning(warn, match="concatenating bool-dtype"):
# GH#39817
res = Index(vals1).append(Index(vals2))
exp = Index(exp_data, dtype=exp_index_dtype)
tm.assert_index_equal(res, exp)
# 3 elements
res = Index(vals1).append([Index(vals2), Index(vals3)])
exp = Index(exp_data3, dtype=exp_index_dtype)
tm.assert_index_equal(res, exp)
# ----- Series ----- #
# series._append
with tm.assert_produces_warning(warn, match="concatenating bool-dtype"):
# GH#39817
res = Series(vals1)._append(Series(vals2), ignore_index=True)
exp = Series(exp_data, dtype=exp_series_dtype)
tm.assert_series_equal(res, exp, check_index_type=True)
# concat
with tm.assert_produces_warning(warn, match="concatenating bool-dtype"):
# GH#39817
res = pd.concat([Series(vals1), Series(vals2)], ignore_index=True)
tm.assert_series_equal(res, exp, check_index_type=True)
# 3 elements
with tm.assert_produces_warning(warn, match="concatenating bool-dtype"):
# GH#39817
res = Series(vals1)._append(
[Series(vals2), Series(vals3)], ignore_index=True
)
exp = Series(exp_data3, dtype=exp_series_dtype)
tm.assert_series_equal(res, exp)
with tm.assert_produces_warning(warn, match="concatenating bool-dtype"):
# GH#39817
res = pd.concat(
[Series(vals1), Series(vals2), Series(vals3)],
ignore_index=True,
)
tm.assert_series_equal(res, exp)
def test_concatlike_common_coerce_to_pandas_object(self):
# GH 13626
# result must be Timestamp/Timedelta, not datetime.datetime/timedelta
dti = pd.DatetimeIndex(["2011-01-01", "2011-01-02"])
tdi = pd.TimedeltaIndex(["1 days", "2 days"])
exp = Index(
[
pd.Timestamp("2011-01-01"),
pd.Timestamp("2011-01-02"),
pd.Timedelta("1 days"),
pd.Timedelta("2 days"),
]
)
res = dti.append(tdi)
tm.assert_index_equal(res, exp)
assert isinstance(res[0], pd.Timestamp)
assert isinstance(res[-1], pd.Timedelta)
dts = Series(dti)
tds = Series(tdi)
res = dts._append(tds)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
assert isinstance(res.iloc[0], pd.Timestamp)
assert isinstance(res.iloc[-1], pd.Timedelta)
res = pd.concat([dts, tds])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
assert isinstance(res.iloc[0], pd.Timestamp)
assert isinstance(res.iloc[-1], pd.Timedelta)
def test_concatlike_datetimetz(self, tz_aware_fixture):
tz = tz_aware_fixture
# GH 7795
dti1 = pd.DatetimeIndex(["2011-01-01", "2011-01-02"], tz=tz)
dti2 = pd.DatetimeIndex(["2012-01-01", "2012-01-02"], tz=tz)
exp = pd.DatetimeIndex(
["2011-01-01", "2011-01-02", "2012-01-01", "2012-01-02"], tz=tz
)
res = dti1.append(dti2)
tm.assert_index_equal(res, exp)
dts1 = Series(dti1)
dts2 = Series(dti2)
res = dts1._append(dts2)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([dts1, dts2])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
@pytest.mark.parametrize("tz", ["UTC", "US/Eastern", "Asia/Tokyo", "EST5EDT"])
def test_concatlike_datetimetz_short(self, tz):
# GH#7795
ix1 = pd.date_range(start="2014-07-15", end="2014-07-17", freq="D", tz=tz)
ix2 = pd.DatetimeIndex(["2014-07-11", "2014-07-21"], tz=tz)
df1 = DataFrame(0, index=ix1, columns=["A", "B"])
df2 = DataFrame(0, index=ix2, columns=["A", "B"])
exp_idx = pd.DatetimeIndex(
["2014-07-15", "2014-07-16", "2014-07-17", "2014-07-11", "2014-07-21"],
tz=tz,
)
exp = DataFrame(0, index=exp_idx, columns=["A", "B"])
tm.assert_frame_equal(df1._append(df2), exp)
tm.assert_frame_equal(pd.concat([df1, df2]), exp)
def test_concatlike_datetimetz_to_object(self, tz_aware_fixture):
tz = tz_aware_fixture
# GH 13660
# different tz coerces to object
dti1 = pd.DatetimeIndex(["2011-01-01", "2011-01-02"], tz=tz)
dti2 = pd.DatetimeIndex(["2012-01-01", "2012-01-02"])
exp = Index(
[
pd.Timestamp("2011-01-01", tz=tz),
pd.Timestamp("2011-01-02", tz=tz),
pd.Timestamp("2012-01-01"),
pd.Timestamp("2012-01-02"),
],
dtype=object,
)
res = dti1.append(dti2)
tm.assert_index_equal(res, exp)
dts1 = Series(dti1)
dts2 = Series(dti2)
res = dts1._append(dts2)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([dts1, dts2])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
# different tz
dti3 = pd.DatetimeIndex(["2012-01-01", "2012-01-02"], tz="US/Pacific")
exp = Index(
[
pd.Timestamp("2011-01-01", tz=tz),
pd.Timestamp("2011-01-02", tz=tz),
pd.Timestamp("2012-01-01", tz="US/Pacific"),
pd.Timestamp("2012-01-02", tz="US/Pacific"),
],
dtype=object,
)
res = dti1.append(dti3)
tm.assert_index_equal(res, exp)
dts1 = Series(dti1)
dts3 = Series(dti3)
res = dts1._append(dts3)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([dts1, dts3])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
def test_concatlike_common_period(self):
# GH 13660
pi1 = pd.PeriodIndex(["2011-01", "2011-02"], freq="M")
pi2 = pd.PeriodIndex(["2012-01", "2012-02"], freq="M")
exp = pd.PeriodIndex(["2011-01", "2011-02", "2012-01", "2012-02"], freq="M")
res = pi1.append(pi2)
tm.assert_index_equal(res, exp)
ps1 = Series(pi1)
ps2 = Series(pi2)
res = ps1._append(ps2)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([ps1, ps2])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
def test_concatlike_common_period_diff_freq_to_object(self):
# GH 13221
pi1 = pd.PeriodIndex(["2011-01", "2011-02"], freq="M")
pi2 = pd.PeriodIndex(["2012-01-01", "2012-02-01"], freq="D")
exp = Index(
[
pd.Period("2011-01", freq="M"),
pd.Period("2011-02", freq="M"),
pd.Period("2012-01-01", freq="D"),
pd.Period("2012-02-01", freq="D"),
],
dtype=object,
)
res = pi1.append(pi2)
tm.assert_index_equal(res, exp)
ps1 = Series(pi1)
ps2 = Series(pi2)
res = ps1._append(ps2)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([ps1, ps2])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
def test_concatlike_common_period_mixed_dt_to_object(self):
# GH 13221
# different datetimelike
pi1 = pd.PeriodIndex(["2011-01", "2011-02"], freq="M")
tdi = pd.TimedeltaIndex(["1 days", "2 days"])
exp = Index(
[
pd.Period("2011-01", freq="M"),
pd.Period("2011-02", freq="M"),
pd.Timedelta("1 days"),
pd.Timedelta("2 days"),
],
dtype=object,
)
res = pi1.append(tdi)
tm.assert_index_equal(res, exp)
ps1 = Series(pi1)
tds = Series(tdi)
res = ps1._append(tds)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([ps1, tds])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
# inverse
exp = Index(
[
pd.Timedelta("1 days"),
pd.Timedelta("2 days"),
pd.Period("2011-01", freq="M"),
pd.Period("2011-02", freq="M"),
],
dtype=object,
)
res = tdi.append(pi1)
tm.assert_index_equal(res, exp)
ps1 = Series(pi1)
tds = Series(tdi)
res = tds._append(ps1)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([tds, ps1])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
def test_concat_categorical(self):
# GH 13524
# same categories -> category
s1 = Series([1, 2, np.nan], dtype="category")
s2 = Series([2, 1, 2], dtype="category")
exp = Series([1, 2, np.nan, 2, 1, 2], dtype="category")
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
# partially different categories => not-category
s1 = Series([3, 2], dtype="category")
s2 = Series([2, 1], dtype="category")
exp = Series([3, 2, 2, 1])
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
# completely different categories (same dtype) => not-category
s1 = Series([10, 11, np.nan], dtype="category")
s2 = Series([np.nan, 1, 3, 2], dtype="category")
exp = Series([10, 11, np.nan, np.nan, 1, 3, 2], dtype=np.float64)
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
def test_union_categorical_same_categories_different_order(self):
# https://github.com/pandas-dev/pandas/issues/19096
a = Series(Categorical(["a", "b", "c"], categories=["a", "b", "c"]))
b = Series(Categorical(["a", "b", "c"], categories=["b", "a", "c"]))
result = pd.concat([a, b], ignore_index=True)
expected = Series(
Categorical(["a", "b", "c", "a", "b", "c"], categories=["a", "b", "c"])
)
tm.assert_series_equal(result, expected)
def test_concat_categorical_coercion(self):
# GH 13524
# category + not-category => not-category
s1 = Series([1, 2, np.nan], dtype="category")
s2 = Series([2, 1, 2])
exp = Series([1, 2, np.nan, 2, 1, 2], dtype=np.float64)
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
# result shouldn't be affected by 1st elem dtype
exp = Series([2, 1, 2, 1, 2, np.nan], dtype=np.float64)
tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp)
tm.assert_series_equal(s2._append(s1, ignore_index=True), exp)
# all values are not in category => not-category
s1 = Series([3, 2], dtype="category")
s2 = Series([2, 1])
exp = Series([3, 2, 2, 1])
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
exp = Series([2, 1, 3, 2])
tm.assert_series_equal( | pd.concat([s2, s1], ignore_index=True) | pandas.concat |
"""
A collection of Algos used to create Strategy logic.
"""
from __future__ import division
import abc
import random
import re
import numpy as np
import pandas as pd
import sklearn.covariance
from future.utils import iteritems
import bt
from bt.core import Algo, AlgoStack, SecurityBase, is_zero
def run_always(f):
"""
Run always decorator to be used with Algo
to ensure stack runs the decorated Algo
on each pass, regardless of failures in the stack.
"""
f.run_always = True
return f
class PrintDate(Algo):
"""
This Algo simply print's the current date.
Can be useful for debugging purposes.
"""
def __call__(self, target):
print(target.now)
return True
class PrintTempData(Algo):
"""
This Algo prints the temp data.
Useful for debugging.
Args:
* fmt_string (str): A string that will later be formatted with the
target's temp dict. Therefore, you should provide
what you want to examine within curly braces ( { } )
"""
def __init__(self, fmt_string=None):
super(PrintTempData, self).__init__()
self.fmt_string = fmt_string
def __call__(self, target):
if self.fmt_string:
print(self.fmt_string.format(**target.temp))
else:
print(target.temp)
return True
class PrintInfo(Algo):
"""
Prints out info associated with the target strategy. Useful for debugging
purposes.
Args:
* fmt_string (str): A string that will later be formatted with the
target object's __dict__ attribute. Therefore, you should provide
what you want to examine within curly braces ( { } )
Ex:
PrintInfo('Strategy {name} : {now}')
This will print out the name and the date (now) on each call.
Basically, you provide a string that will be formatted with target.__dict__
"""
def __init__(self, fmt_string="{name} {now}"):
super(PrintInfo, self).__init__()
self.fmt_string = fmt_string
def __call__(self, target):
print(self.fmt_string.format(**target.__dict__))
return True
class Debug(Algo):
"""
Utility Algo that calls pdb.set_trace when triggered.
In the debug session, 'target' is available and can be examined through the
StrategyBase interface.
"""
def __call__(self, target):
import pdb
pdb.set_trace()
return True
class RunOnce(Algo):
"""
Returns True on first run then returns False.
Args:
* run_on_first_call: bool which determines if it runs the first time the algo is called
As the name says, the algo only runs once. Useful in situations
where we want to run the logic once (buy and hold for example).
"""
def __init__(self):
super(RunOnce, self).__init__()
self.has_run = False
def __call__(self, target):
# if it hasn't run then we will
# run it and set flag
if not self.has_run:
self.has_run = True
return True
# return false to stop future execution
return False
class RunPeriod(Algo):
def __init__(
self, run_on_first_date=True, run_on_end_of_period=False, run_on_last_date=False
):
super(RunPeriod, self).__init__()
self._run_on_first_date = run_on_first_date
self._run_on_end_of_period = run_on_end_of_period
self._run_on_last_date = run_on_last_date
def __call__(self, target):
# get last date
now = target.now
# if none nothing to do - return false
if now is None:
return False
# not a known date in our universe
if now not in target.data.index:
return False
# get index of the current date
index = target.data.index.get_loc(target.now)
result = False
# index 0 is a date added by the Backtest Constructor
if index == 0:
return False
# first date
if index == 1:
if self._run_on_first_date:
result = True
# last date
elif index == (len(target.data.index) - 1):
if self._run_on_last_date:
result = True
else:
# create pandas.Timestamp for useful .week,.quarter properties
now = pd.Timestamp(now)
index_offset = -1
if self._run_on_end_of_period:
index_offset = 1
date_to_compare = target.data.index[index + index_offset]
date_to_compare = | pd.Timestamp(date_to_compare) | pandas.Timestamp |
import os
from uuid import uuid4
import pytest
from thrift.transport import TSocket, TTransport
from thrift.transport.TSocket import TTransportException
from heavyai import connect
import datetime
import random
import string
import numpy as np
import pandas as pd
heavydb_host = os.environ.get('HEAVYDB_HOST', 'localhost')
def _check_open():
"""
Test to see if OmniSci running on localhost and socket open
"""
socket = TSocket.TSocket(heavydb_host, 6274)
transport = TTransport.TBufferedTransport(socket)
try:
transport.open()
return True
except TTransportException:
return False
@pytest.fixture(scope='session')
def mapd_server():
"""Ensure a mapd server is running, optionally starting one if none"""
if _check_open():
# already running before pytest started
pass
else:
raise RuntimeError(
"Unable to connect to OmniSci server at {}".format(heavydb_host)
)
@pytest.fixture(scope='session')
def con(mapd_server):
"""
Fixture to provide Connection for tests run against live OmniSci instance
"""
return connect(
user="admin",
password='<PASSWORD>',
host=heavydb_host,
port=6274,
protocol='binary',
dbname='omnisci',
)
@pytest.fixture
def mock_client(mocker):
"""A magicmock for heavydb.connection.Client"""
return mocker.patch("heavydb.connection.Client")
def no_gpu():
"""Check for the required GPU dependencies"""
try:
from numba import cuda
import cudf # noqa
try:
cuda.select_device(0)
except cuda.cudadrv.error.CudaDriverError:
return True
except ImportError:
return True
return False
def gen_string():
"""Generate a random string sequence for use in _tests_table_no_nulls"""
return ''.join(
[
random.choice(string.ascii_letters + string.digits)
for n in range(10)
]
)
def _tests_table_no_nulls(n_samples):
"""
Generates a dataframe with all OmniSci types in it for use in integration
testing
"""
np.random.seed(12345)
tinyint_ = np.random.randint(
low=-127, high=127, size=n_samples, dtype='int8'
)
smallint_ = np.random.randint(
low=-32767, high=32767, size=n_samples, dtype='int16'
)
int_ = np.random.randint(
low=-2147483647, high=2147483647, size=n_samples, dtype='int32'
)
bigint_ = np.random.randint(
low=-9223372036854775807,
high=9223372036854775807,
size=n_samples,
dtype='int64',
)
# float and double ranges slightly lower than we support, full width
# causes an error in np.linspace that's not worth tracking down
float_ = np.linspace(-3.4e37, 3.4e37, n_samples, dtype='float32')
double_ = np.linspace(-1.79e307, 1.79e307, n_samples, dtype='float64')
bool_ = np.random.randint(low=0, high=2, size=n_samples, dtype='bool')
# effective date range of 1904 to 2035
# TODO: validate if this is an Arrow limitation, outside this range fails
date_ = [
datetime.date(1970, 1, 1) + datetime.timedelta(days=int(x))
for x in np.random.randint(-24000, 24000, size=n_samples)
]
datetime_ = [
datetime.datetime(1970, 1, 1)
+ datetime.timedelta(days=int(x), minutes=int(x))
for x in np.random.randint(-24000, 24000, size=n_samples)
]
time_h = np.random.randint(0, 24, size=n_samples)
time_m = np.random.randint(0, 60, size=n_samples)
time_s = np.random.randint(0, 60, size=n_samples)
time_ = [datetime.time(h, m, s) for h, m, s in zip(time_h, time_m, time_s)]
# generate random text strings
text_ = [gen_string() for x in range(n_samples)]
# read geo data from files
point_ = pd.read_csv("tests/data/points_10000.zip", header=None).values
point_ = np.squeeze(point_)
line_ = pd.read_csv("tests/data/lines_10000.zip", header=None).values
line_ = np.squeeze(line_)
mpoly_ = pd.read_csv("tests/data/mpoly_10000.zip", header=None).values
mpoly_ = np.squeeze(mpoly_)
poly_ = | pd.read_csv("tests/data/polys_10000.zip", header=None) | pandas.read_csv |
import pandas as pd
import time
def patient(rdb):
""" Returns list of patients """
patients = """SELECT "Name" FROM patient ORDER BY index"""
try:
patients = pd.read_sql(patients, rdb)
patients = patients["Name"].values.tolist()
except:
patients = ['Patient']
return patients
def label(rdb):
""" Returns list of parameter for linear and bar drop down """
sql = """SELECT type FROM name WHERE type IN ('Heart Rate','Heart Rate Variability SDNN', 'Resting Heart Rate',
'VO2 Max','Walking Heart Rate Average')"""
sql2 = """SELECT type FROM name WHERE type NOT IN ('Heart Rate','Heart Rate Variability SDNN',
'Resting Heart Rate','VO2 Max','Walking Heart Rate Average')"""
try:
df, df2 = pd.read_sql(sql, rdb), pd.read_sql(sql2, rdb)
label_linear, label_bar = df["type"].values.tolist(), df2["type"].values.tolist()
except:
label_linear, label_bar = [], []
return label_linear, label_bar
def month(rdb, patient):
""" Returns list of months in database for selected patient """
sql = """SELECT DISTINCT TO_CHAR("Date",'YYYY-MM') AS month
FROM applewatch_numeric
WHERE "Name"='{}'
AND type ='Resting Heart Rate'
ORDER BY month""".format(patient)
try:
df = pd.read_sql(sql, rdb)
months = df['month'].to_list()
except:
months = []
return months
def week(rdb, patient):
""" Returns list of weeks in database for selected patient """
sql = """SELECT DISTINCT TO_CHAR("Date", 'IYYY/IW') AS week
FROM applewatch_numeric
WHERE "Name"='{}'
AND type ='Resting Heart Rate'
ORDER BY week """.format(patient)
try:
df = pd.read_sql(sql, rdb)
weeks = df['week'].to_list()
except:
weeks = []
return weeks
def min_max_date(rdb, patient):
""" Returns min and max date for selected patient """
sql = """SELECT min_date,max_date FROM patient WHERE "Name"='{}'""".format(patient)
try:
df = pd.read_sql(sql, rdb)
min_date, max_date = df['min_date'].iloc[0].date(), df['max_date'].iloc[0].date()
except:
min_date, max_date = '', ''
return min_date, max_date
def age_sex(rdb, patient):
""" Returns age and gender for selected patient"""
sql = """SELECT "Age","Sex" from patient where "Name"='{}' """.format(patient)
try:
df = pd.read_sql(sql, rdb)
age, sex = df['Age'][0], df['Sex'][0]
except:
age, sex = '', ''
return age, sex
def classification_ecg(rdb, patient):
""" Returns ecg classification for patient information card """
sql = """SELECT "Classification",count(*) FROM ecg WHERE "Patient"='{}' GROUP BY "Classification" """.format(patient)
try:
df = pd.read_sql(sql, rdb)
except:
df = pd.DataFrame()
return df
def number_of_days_more_6(rdb, patient):
""" Returns number of days the patient had the Apple Watch on their hand for more than 6 hours"""
sql = """SELECT count (*)
FROM (SELECT "Date"::date
FROM applewatch_categorical
WHERE "Name" = '{}'
AND "type" = 'Apple Stand Hour'
GROUP BY "Date"::date
HAVING count("Date"::date) > 6) days """.format(patient)
try:
df = pd.read_sql(sql, rdb)
df = df.iloc[0]['count']
except:
df = '0'
return df
def card(rdb, patient, group, date, value):
""" Returns DataFrame with resting, working, mean hear rate, step count, exercise time, activity for the cards """
if group == 'M':
to_char = """ TO_CHAR("Date",'YYYY-MM') """
group_by = "month"
elif group == 'W':
to_char = """ TO_CHAR("Date", 'IYYY/IW') """
group_by = "week"
elif group == 'DOW':
to_char = """ TRIM(TO_CHAR("Date", 'Day')) """
group_by = "DOW"
else:
to_char = """ "Date"::date """
group_by = "date"
value = date
sql = """SELECT {0} AS {3},type,
CASE
WHEN type in ('Active Energy Burned','Step Count','Apple Exercise Time') THEN SUM("Value")
WHEN type in ('Heart Rate','Walking Heart Rate Average','Resting Heart Rate') THEN AVG("Value")
END AS "Value"
FROM applewatch_numeric
WHERE "Name" = '{1}'
AND type in ('Active Energy Burned','Step Count','Apple Exercise Time','Heart Rate',
'Walking Heart Rate Average','Resting Heart Rate')
AND {0}='{2}'
GROUP BY {3},type""".format(to_char, patient, value, group_by)
try:
df = pd.read_sql(sql, rdb)
df["Value"] = df["Value"].round(2)
except:
df = pd.DataFrame()
return df
def table(rdb, patient, group, linear, bar):
""" Returns a table with the patient and parameters that were selected from drop downs """
if isinstance(linear, list):
linear = "'" + "','".join(linear) + "'"
else:
linear = "'" + linear + "'"
if group == 'M':
to_char = """ TO_CHAR("Date",'YYYY-MM')"""
group_by = "month"
elif group == 'W':
to_char = """ TO_CHAR("Date", 'IYYY/IW') """
group_by = "week"
elif group == 'DOW':
to_char = """ TRIM(TO_CHAR("Date",'Day')) """
group_by = ' "DOW" '
else:
to_char = """ "Date"::date """
group_by = "date"
sql = """SELECT {0} as {4},"type",
CASE WHEN type IN ('Heart Rate','Heart Rate Variability SDNN','Resting Heart Rate','VO2 Max',
'Walking Heart Rate Average') THEN AVG("Value") ELSE SUM("Value")
END AS "Value"
FROM applewatch_numeric
WHERE "Name" = '{1}'
AND "type" in ({2},'{3}')
GROUP BY {0},type
ORDER BY "type",{4} """.format(to_char, patient, linear, bar, group_by)
try:
df = pd.read_sql(sql, rdb)
if group == 'DOW':
cats = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
df['DOW'] = pd.Categorical(df['DOW'], categories=cats, ordered=True)
df = df.sort_values('DOW')
group_by = "DOW"
df = df.pivot(index=group_by, columns='type', values='Value').reset_index()
except:
df = pd.DataFrame()
return df, group_by
def day_figure(rdb, patient, bar, date):
""" Returns DataFrame for day figure with heart rate and selected parameter and patient """
sql = """ SELECT "Date","type","Value"
FROM applewatch_numeric
WHERE "Name" = '{}'
AND "Date"::date='{}'
AND "type" in ('Heart Rate','{}')
ORDER BY "type","Date" """.format(patient, date, bar)
try:
df = pd.read_sql(sql, rdb)
except:
df = pd.DataFrame()
return df
def trend_figure(rdb, patient, group, start_date, end_date):
""" Returns DataFrame for trend figure """
if group == 'M':
to_char = """ TO_CHAR("Date",'YYYY-MM')"""
group_by = "month"
elif group == 'W':
to_char = """ TO_CHAR("Date", 'IYYY/IW') """
group_by = "week"
elif group == 'DOW':
to_char = """TRIM(TO_CHAR("Date", 'Day')) """
group_by = """ "DOW" """
else:
to_char = """ "Date"::date """
group_by = "date"
""" TRIM(TO_CHAR("Date", 'Day')) in ()"""
sql = """SELECT {0} as {1},extract('hour' from "Date") as hour,AVG("Value") AS "Value"
FROM applewatch_numeric
WHERE "Name" = '{2}'
AND type='Heart Rate'
AND "Date" BETWEEN '{3}' AND '{4}'
GROUP BY {0},extract('hour' from "Date")
ORDER BY {1},hour """.format(to_char, group_by, patient, start_date, end_date)
try:
df = pd.read_sql(sql, rdb)
except:
df = pd.DataFrame()
return df
# Query data for ECG_analyse
def ecgs(rdb, patient):
""" Returns DataFrame for table_ecg"""
sql2 = """SELECT "Day","Date"::time AS Time, "Classification"
FROM ecg
WHERE "Patient"='{}'
ORDER BY "Day" """.format(patient)
try:
df = pd.read_sql(sql2, rdb)
except:
df = pd.DataFrame()
return df
def ecg_data(rdb, day, patient, time):
""" Returns DatFrame to plot ecg signal """
sql = """SELECT * FROM ECG where "Day"='{0}' and "Patient"='{1}' and "Date"::time='{2}' """.format(day, patient, time)
try:
df = pd.read_sql(sql, rdb)
except:
df = pd.DataFrame()
return df
def table_hrv(rdb):
""" Returns DataFrame with all information about ecg ann calculate HRV feature for time and frequency domain """
sql = """ SELECT "Patient","Day","Date"::time as Time, "hrvOwn", "SDNN", "SENN", "SDSD", "pNN20", "pNN50", "lf",
"hf", "lf_hf_ratio","total_power", "vlf", "Classification" FROM ecg ORDER BY "Patient","Day" """
try:
df = pd.read_sql(sql, rdb)
except:
df = pd.DataFrame()
return df
def scatter_plot_ecg(rdb, x_axis, y_axis):
""" Returns DataFrame for scatter plot with patients ids/numbers and selected features """
sql = """ SELECT "Patient","{0}","{1}" FROM ecg """.format(x_axis, y_axis)
try:
df = pd.read_sql(sql, rdb)
except:
df = pd.DataFrame()
return df
def box_plot_ecg(rdb, x_axis):
""" Returns DataFrame for box plot with patients ids/numbers and selected feature """
sql = """ SELECT "Patient","{}" FROM ecg """.format(x_axis)
try:
df = pd.read_sql(sql, rdb)
except:
df = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def plot_scatter(latent_code, output_path,
label_file='data/PANCAN/GDC-PANCAN_both_samples_tumour_type.tsv',
colour_file='data/TCGA_colors_obvious.tsv', latent_code_dim=2, have_label=True):
if latent_code_dim <= 3:
if latent_code_dim == 3:
# Plot the 3D scatter graph of latent space
if have_label:
# Set sample label
disease_id = pd.read_csv(label_file, sep='\t', index_col=0)
latent_code_label = pd.merge(latent_code, disease_id, left_index=True, right_index=True)
colour_setting = pd.read_csv(colour_file, sep='\t')
fig = plt.figure(figsize=(8, 5.5))
ax = fig.add_subplot(111, projection='3d')
for index in range(len(colour_setting)):
code = colour_setting.iloc[index, 1]
colour = colour_setting.iloc[index, 0]
if code in latent_code_label.iloc[:, latent_code_dim].unique():
latent_code_label_part = latent_code_label[latent_code_label.iloc[:, latent_code_dim] == code]
ax.scatter(latent_code_label_part.iloc[:, 0], latent_code_label_part.iloc[:, 1],
latent_code_label_part.iloc[:, 2], s=2, marker='o', alpha=0.8, c=colour, label=code)
ax.legend(ncol=2, markerscale=4, bbox_to_anchor=(1, 0.9), loc='upper left', frameon=False)
else:
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(latent_code.iloc[:, 0], latent_code.iloc[:, 1], latent_code.iloc[:, 2], s=2, marker='o',
alpha=0.8)
ax.set_xlabel('First Latent Dimension')
ax.set_ylabel('Second Latent Dimension')
ax.set_zlabel('Third Latent Dimension')
elif latent_code_dim == 2:
if have_label:
# Set sample label
disease_id = | pd.read_csv(label_file, sep='\t', index_col=0) | pandas.read_csv |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# finpie - a simple library to download some financial data
# https://github.com/peterlacour/finpie
#
# Copyright (c) 2020 <NAME>
#
# Licensed under the MIT License
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import numpy as np
import pandas as pd
from finpie.base import DataBase
class MwatchData( DataBase ):
def __init__(self, ticker, freq = 'A', countryCode = ''):
DataBase.__init__(self)
self.ticker = ticker
self.countryCode = countryCode
self.freq = freq
def _download(self, sheet):
'''
'''
if self.freq.lower() == 'annual' or self.freq.lower() == 'a':
if self.countryCode != '':
url = f'https://www.marketwatch.com/investing/stock/{self.ticker}/financials/{sheet}?countrycode={self.countryCode}'
else:
url = f'https://www.marketwatch.com/investing/stock/{self.ticker}/financials/{sheet}'
elif self.freq.lower() == 'quarterly' or self.freq.lower() == 'q':
if self.countryCode != '':
url = f'https://www.marketwatch.com/investing/stock/{self.ticker}/financials/{sheet}/quarter?countrycode={self.countryCode}'
else:
url = f'https://www.marketwatch.com/investing/stock/{self.ticker}/financials/{sheet}/quarter'
else:
print('Please specify annual or quartlery frequency.')
return None
soup = self._get_session(url)
df = pd.concat( [ pd.read_html(str(s.find('table')))[0] for s in soup.find_all('div', class_='financials') ] )
df = df.astype(str)
df.iloc[:,0][ df.iloc[:,0] == 'nan' ]= df[ df.iloc[:,0] == 'nan' ].iloc[:,-1]
df = df.iloc[:,:-1]
df = df.transpose()
df.index.name = 'date'
df.columns = df.iloc[0]
df = df[1:]
df.columns.name = ''
if self.freq.lower() == 'quarterly' or self.freq.lower() == 'q':
df.index = pd.to_datetime(df.index)
df.replace('-', np.nan, inplace = True)
df.replace('\(', '-', regex = True, inplace = True)
df.replace('\)', '', regex = True, inplace = True)
# rename duplicate columns
columns = | pd.io.parsers.ParserBase({'names':df.columns}) | pandas.io.parsers.ParserBase |
import argparse
from autogluon.tabular import TabularDataset, TabularPredictor
from autogluon.tabular.models import CatBoostModel, KNNModel, LGBModel, XGBoostModel, TabularNeuralNetModel, RFModel
import os
from numpy.core.fromnumeric import trace
import pandas as pd
import traceback
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--dataset_dir', help='path to cpp directory', type=str, default='dataset/cpp')
parser.add_argument('-p', '--problem', help='only run this problem if specified', type=str, default=None)
parser.add_argument('-r', '--result_path', help='file to save test set score to', type=str, default='sanitycheck/cpp/result.csv')
parser.add_argument('-m', '--mode', help='what AutoGluon setting to try', choices=['ag', 'ag-stack'], default='ag-stack')
parser.add_argument('-t', '--time_limit', help='time limit in minutes', type=int, default=60)
args = parser.parse_args()
# DATASETS = [
# "1db99236-0601-4e03-b8bb-96b5eb236d74",
# "20e6e8e8-a4da-4fea-a9de-c784cdf84c1f",
# "2cbd9a22-0da1-404d-a7ba-49911840a622",
# "3cf28e5f-886a-4ace-bebf-299e1dbde654",
# "4dbb8031-56a6-43bf-9e03-40ea2affa163",
# "5729f07d-8d43-463d-894b-7dfa2da63efb",
# "5d1e3461-8b01-463c-a9db-2e4c48db1467",
# "60c60200-2341-427d-b0ec-2fc30c4bfdd8",
# ]
TIME_LIMIT = args.time_limit * 60.
RESULT_PATH = args.result_path
EXCEPTIONS_PATH = os.path.join(os.path.dirname(args.result_path), 'exceptions.csv')
if args.problem is None:
DATASETS = sorted([dataset for dataset in os.listdir(args.dataset_dir) if not dataset.startswith('.')])[1:]
else:
DATASETS = [args.problem]
FEATURE_PRUNE_KWARGS = {}
def add_datapoint(result: dict, dataset: str, mode: str, val_score: float, test_score: float, time_limit: float, n_sample: int, n_feature: int):
result['dataset'].append(dataset)
result['mode'].append(mode)
result['val_score'].append(round(val_score, 4))
result['test_score'].append(round(test_score, 4))
result['time_limit'].append(round(time_limit, 4))
result['n_sample'].append(n_sample)
result['n_feature'].append(n_feature)
def add_exception(exception: dict, dataset: str, type: str, error_str: str, stacktrace: str):
exception['dataset'].append(dataset)
exception['type'].append(type)
exception['error_str'].append(error_str)
exception['stacktrace'].append(stacktrace)
for dataset in DATASETS:
train_data = pd.read_csv(os.path.join(args.dataset_dir, dataset, 'train.csv'))
test_data = pd.merge(pd.read_csv(os.path.join(args.dataset_dir, dataset, 'testFeaturesNoLabel.csv')),
pd.read_csv(os.path.join(args.dataset_dir, dataset, 'testLabel.csv')), on='ID')
y_test = test_data['label']
presets = ['medium_quality_faster_train'] if args.mode == 'ag' else ['best_quality']
n_sample, n_feature = len(train_data), len(train_data.columns) - 1
result = {'dataset': [], 'mode': [], 'val_score': [], 'test_score': [], 'time_limit': [], 'n_sample': [], 'n_feature': []}
exception = {'dataset': [], 'type': [], 'error_str': [], 'stacktrace': []}
try:
predictor = TabularPredictor(label='label', eval_metric='roc_auc')
predictor = predictor.fit(train_data, presets=presets, time_limit=TIME_LIMIT, ag_args_fit=dict(num_cpu=8))
leaderboard = predictor.leaderboard(test_data)
best_val_row = leaderboard.loc[leaderboard['score_val'].idxmax()]
val_score, test_score = best_val_row['score_val'], best_val_row['score_test']
add_datapoint(result, dataset, presets[0], val_score, test_score, TIME_LIMIT, n_sample, n_feature)
except Exception as e:
add_exception(exception, dataset, presets[0], str(e), traceback.format_exc())
try:
predictor = TabularPredictor(label='label', eval_metric='roc_auc')
predictor = predictor.fit(train_data, presets=presets, time_limit=TIME_LIMIT, ag_args_fit=dict(num_cpu=8), feature_prune_kwargs=FEATURE_PRUNE_KWARGS)
leaderboard = predictor.leaderboard(test_data)
best_val_row = leaderboard.loc[leaderboard['score_val'].idxmax()]
val_score, test_score = best_val_row['score_val'], best_val_row['score_test']
add_datapoint(result, dataset, presets[0] + "_prune", val_score, test_score, TIME_LIMIT, n_sample, n_feature)
except Exception as e:
add_exception(exception, dataset, presets[0] + "_prune", str(e), traceback.format_exc())
result_df = | pd.DataFrame(result) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 26 11:57:27 2015
@author: malte
"""
import numpy as np
import pandas as pd
from scipy import sparse
import implicit
class ImplicitNN:
'''
ImplicitNN(factors=100, epochs=15, reg=0.03, steps=None, weighting='same', session_key = 'playlist_id', item_key = 'track_id')
Using the implicit library to find nearest neighbors in terms of the items or tracks respectively
Parameters
--------
'''
def __init__(self, factors=100, epochs=15, reg=0.03, steps=None, weighting='same', session_key = 'playlist_id', item_key = 'track_id'):
self.factors = factors
self.epochs = epochs
self.reg = reg
self.steps = steps
self.weighting = weighting
self.session_key = session_key
self.item_key = item_key
self.current_session = None
def train(self, train, test=None):
'''
Trains the predictor.
Parameters
--------
data: pandas.DataFrame
Training data. It contains the transactions of the sessions. It has one column for session IDs, one for item IDs and one for the timestamp of the events (unix timestamps).
It must have a header. Column names are arbitrary, but must correspond to the ones you set during the initialization of the network (session_key, item_key, time_key properties).
'''
data = train['actions']
datat = test['actions']
data = pd.concat([data,datat])
itemids = data[self.item_key].unique()
self.n_items = len(itemids)
self.itemidmap = pd.Series(data=np.arange(self.n_items), index=itemids)
self.itemidmap2 = pd.Series(index=np.arange(self.n_items), data=itemids)
sessionids = data[self.session_key].unique()
self.n_sessions = len(sessionids)
self.useridmap = pd.Series(data=np.arange(self.n_sessions), index=sessionids)
ones = np.ones( len(data) )
row_ind = self.itemidmap[ data.track_id.values ]
col_ind = self.useridmap[ data.playlist_id.values ]
self.mat = sparse.csr_matrix((ones, (row_ind, col_ind)))
#self.model = implicit.als.AlternatingLeastSquares( factors=self.factors, iterations=self.epochs, regularization=self.reg )
self.model = implicit.approximate_als.NMSLibAlternatingLeastSquares( factors=self.factors, iterations=self.epochs, regularization=self.reg )
#self.model = implicit.nearest_neighbours.CosineRecommender()
self.model.fit(self.mat)
self.tmp = self.mat.T.tocsr()
def predict( self, name=None, tracks=None, playlist_id=None, artists=None, num_hidden=None ):
'''
Gives predicton scores for a selected set of items on how likely they be the next item in the session.
Parameters
--------
name : int or string
The session IDs of the event.
tracks : int list
The item ID of the event. Must be in the set of item IDs of the training set.
Returns
--------
res : pandas.DataFrame
Prediction scores for selected items on how likely to be the next item of this session. Indexed by the item IDs.
'''
items = tracks if tracks is not None else []
sim_list = | pd.Series() | pandas.Series |
from flask import Flask, Markup, render_template
import pandas as pd
import json
from sentiment_score_calculator import get_and_process_tweets
final_list = get_and_process_tweets()
#print(len(final_list))
list_values = [val for d in final_list for val in d.values()]
list_values = list_values[::-4]
#json_list = []
#for x in range(len(list_values)):
#json_list.append([x,list_values[x]])
json_list = [[x,list_values[x]] for x in range(len(list_values))]
#print(json_list)
df_json = pd.DataFrame(json_list)
#print(df_json.head(10))
#df_json.to_json("static/data/sentiment_score.json")
with open('static/data/sentiment_score.json', 'w') as F:
F.write(json.dumps(json_list))
col_series = | pd.Series(['variable 1','variable 2', 'variable 3', 'variable 4', 'variable 5']) | pandas.Series |
import functools
import itertools
import itertools as it
import logging
import shutil
import warnings
from pathlib import Path
from typing import Any
from typing import Callable
from typing import Dict
from typing import List
from typing import Optional
from typing import Tuple
from typing import Union
import dask.dataframe as dd
import numba as nb
import numpy as np
import pandas as pd
from sid.config import BOOLEAN_STATE_COLUMNS
from sid.config import DTYPE_COUNTDOWNS
from sid.config import DTYPE_GROUP_CODE
from sid.config import DTYPE_IMMUNITY
from sid.config import DTYPE_INFECTION_COUNTER
from sid.config import SAVED_COLUMNS
from sid.contacts import calculate_contacts
from sid.contacts import calculate_infections_by_contacts
from sid.contacts import create_group_indexer
from sid.contacts import post_process_contacts
from sid.countdowns import COUNTDOWNS
from sid.events import calculate_infections_by_events
from sid.initial_conditions import (
sample_initial_distribution_of_infections_and_immunity,
)
from sid.matching_probabilities import create_cumulative_group_transition_probabilities
from sid.parse_model import parse_duration
from sid.parse_model import parse_initial_conditions
from sid.parse_model import parse_virus_strains
from sid.pathogenesis import draw_course_of_disease
from sid.policies import apply_contact_policies
from sid.rapid_tests import apply_reactions_to_rapid_tests
from sid.rapid_tests import perform_rapid_tests
from sid.seasonality import prepare_seasonality_factor
from sid.shared import factorize_assortative_variables
from sid.shared import separate_contact_model_names
from sid.susceptibility import prepare_susceptibility_factor
from sid.testing import perform_testing
from sid.time import timestamp_to_sid_period
from sid.update_states import update_states
from sid.vaccination import vaccinate_individuals
from sid.validation import validate_contact_models
from sid.validation import validate_contact_policies
from sid.validation import validate_initial_states
from sid.validation import validate_params
from sid.validation import validate_prepared_initial_states
from sid.validation import validate_testing_models
from sid.validation import validate_vaccination_models
from sid.virus_strains import prepare_virus_strain_factors
from tqdm import tqdm
logger = logging.getLogger("sid")
def get_simulate_func(
params: pd.DataFrame,
initial_states: pd.DataFrame,
contact_models: Dict[str, Any],
duration: Optional[Dict[str, Any]] = None,
events: Optional[Dict[str, Any]] = None,
contact_policies: Optional[Dict[str, Any]] = None,
testing_demand_models: Optional[Dict[str, Any]] = None,
testing_allocation_models: Optional[Dict[str, Any]] = None,
testing_processing_models: Optional[Dict[str, Any]] = None,
seed: Optional[int] = None,
path: Union[str, Path, None] = None,
saved_columns: Optional[Dict[str, Union[bool, str, List[str]]]] = None,
initial_conditions: Optional[Dict[str, Any]] = None,
susceptibility_factor_model: Optional[Callable] = None,
virus_strains: Optional[List[str]] = None,
vaccination_models: Optional[Callable] = None,
rapid_test_models: Optional[Dict[str, Dict[str, Any]]] = None,
rapid_test_reaction_models: Optional[Dict[str, Dict[str, Any]]] = None,
seasonality_factor_model: Optional[Callable] = None,
derived_state_variables: Optional[Dict[str, str]] = None,
period_outputs: Optional[Dict[str, Callable]] = None,
return_time_series: bool = True,
return_last_states: bool = True,
):
"""Get a function that simulates the spread of an infectious disease.
The resulting function only depends on parameters. The computational time it takes
to process the user input is only incurred once in :func:`get_simulate_func` and not
when the resulting function is called.
Args:
params (pandas.DataFrame): ``params`` is a DataFrame with a three-level index
which contains parameters for various aspects of the model. For example,
infection probabilities of contact models, multiplier effects of policies,
determinants of the course of the disease. More information can be found in
:ref:`params`.
initial_states (pandas.DataFrame): The initial states are a DataFrame which
contains individuals and their characteristics. More information can be
found in :ref:`states`.
contact_models (Dict[str, Any]): A dictionary of dictionaries where each
dictionary describes a channel by which contacts can be formed. More
information can be found in :ref:`contact_models`.
duration (Optional[Dict[str, Any]]): A dictionary which contains keys and values
suited to be passed to :func:`pandas.date_range`. Only the first three
arguments, ``"start"``, ``"end"``, and ``"periods"``, are allowed.
events (Optional[Dict[str, Any]]): Dictionary of events which cause infections.
contact_policies (Optional[Dict[str, Any]]): Dict of dicts with contact. See
:ref:`policies`.
testing_demand_models (Optional[Dict[str, Any]]): Dict of dicts with demand
models for tests. See :ref:`testing_demand_models` for more information.
testing_allocation_models (Optional[Dict[str, Any]]): Dict of dicts with
allocation models for tests. See :ref:`testing_allocation_models` for more
information.
testing_processing_models (Optional[Dict[str, Any]]): Dict of dicts with
processing models for tests. See :ref:`testing_processing_models` for more
information.
seed (Optional[int]): The seed is used as the starting point for two seed
sequences where one is used to set up the simulation function and the other
seed sequence is used within the simulation and reset every parameter
evaluation. If you pass ``None`` as a seed, an internal seed is sampled to
set up the simulation function. The seed for the simulation is sampled at
the beginning of the simulation function and can be influenced by setting
:class:`numpy.random.seed` right before the call.
path (Union[str, pathlib.Path, None]): Path to the directory where the simulated
data is stored.
saved_columns (Option[Dict[str, Union[bool, str, List[str]]]]): Dictionary with
categories of state columns. The corresponding values can be True, False or
Lists with columns that should be saved. Typically, during estimation you
only want to save exactly what you need to calculate moments to make the
simulation and calculation of moments faster. The categories are
"initial_states", "disease_states", "testing_states", "countdowns",
"contacts", "countdown_draws", "group_codes" and "other".
initial_conditions (Optional[Dict[str, Any]]): The initial conditions allow you
to govern the distribution of infections and immunity and the heterogeneity
of courses of disease at the start of the simulation. Use ``None`` to assume
no heterogeneous courses of diseases and 1% infections. Otherwise,
``initial_conditions`` is a dictionary containing the following entries:
- ``assort_by`` (Optional[Union[str, List[str]]]): The relative infections
is preserved between the groups formed by ``assort_by`` variables. By
default, no group is formed and infections spread across the whole
population.
- ``burn_in_periods`` (int): The number of periods over which infections are
distributed and can progress. The default is one period.
- ``growth_rate`` (float): The growth rate specifies the increase of
infections from one burn-in period to the next. For example, two indicates
doubling case numbers every period. The value must be greater than or
equal to one. Default is one which is no distribution over time.
- ``initial_immunity`` (Union[int, float, pandas.Series]): The n_people who
are immune in the beginning can be specified as an integer for the number,
a float between 0 and 1 for the share, and a :class:`pandas.Series` with
the same index as states. Note that infected individuals are also immune.
For a 10% pre-existing immunity with 2% currently infected people, set the
key to 0.12. By default, only infected individuals indicated by the
initial infections are immune.
- ``initial_infections`` (Union[int, float, pandas.Series,
pandas.DataFrame]): The initial infections can be given as an integer
which is the number of randomly infected individuals, as a float for the
share or as a :class:`pandas.Series` which indicates whether an
individuals is infected. If initial infections are a
:class:`pandas.DataFrame`, then, the index is the same as ``states``,
columns are dates or periods which can be sorted, and values are infected
individuals on that date. This step will skip upscaling and distributing
infections over days and directly jump to the evolution of states. By
default, 1% of individuals is infected.
- ``known_cases_multiplier`` (int): The factor can be used to scale up the
initial infections while keeping shares between ``assort_by`` variables
constant. This is helpful if official numbers are underreporting the
number of cases.
- ``virus_shares`` (Union[dict, pandas.Series]): A mapping between the names
of the virus strains and their share among newly infected individuals in
each burn-in period.
susceptibility_factor_model (Optional[Callable]): A function which
takes the states and parameters and returns an infection probability
multiplier for each individual.
virus_strains (Optional[List[str]]): A list of names indicating the different
virus strains used in the model. Their different contagiousness factors are
looked up in the params DataFrame. By default, only one virus strain is
used.
vaccination_models (Optional[Dict[str, Dict[str, Any]): A dictionary of models
which allow to vaccinate individuals. The ``"model"`` key holds a function
with arguments ``states``, ``params``, and a ``seed`` which returns boolean
indicators for individuals who received a vaccination.
rapid_test_models (Optional[Dict[str, Dict[str, Any]]]: A dictionary of
dictionaries containing models for rapid tests. Each model for rapid tests
can have a ``"start"`` and ``"end"`` date. It must have a function under
``"model"`` which accepts ``states``, ``params``, ``receives_rapid_test``,
``"contacts"`` and ``seed`` and returns a boolean series indicating
individuals who received a rapid test. The difference to other test models
is that rapid tests are performed after planned contacts are calculated
(i.e. contact models and policies are evaluated) but before they actually
take place. This allows people to use more rapid tests on days with many
planned contacts and to react to the test outcome in
``rapid_test_reaction_models``.
rapid_test_reaction_models (Optional[Dict[str, Dict[str, Any]]]): A dictionary
holding rapid tests reaction models which allow to change calculated
contacts based on the results of rapid tests. Each model can have a
``"start"`` and ``"end"`` date. It must have a function under ``"model"``
which accepts ``states``, ``params``, ``"contacts"`` and ``seed`` and
returns a modified copy of contacts.
seasonality_factor_model (Optional[Callable]): A model which takes in and
``params`` and ``dates`` signaling the whole duration of the simulation and
returns a DataFrame with a factor for each day and contact model which
scales the corresponding infection probability. If seasonality patterns are
the same for all contact models, the model can return a Series instead
of a DataFrame.
derived_state_variables (Optional[Dict[str, str]]): A dictionary that maps
names of state variables to pandas evaluation strings that generate derived
state variables, i.e. state variables that can be calculated from the
existing state variables.
period_outputs (Optional[Dict[str, Callable]]): A dictionary of functions
that are called with the states DataFrame at the end of each period. Their
results are stored in a dictionary of lists inside the results dictionary
of the simulate function.
return_time_series (Optional[bool])): Whether the full time searies is stored
on disk and returned as dask.DataFrame in the results dictionary of the
simulate function.
return_last_states (Optional[bool])): Whether the full states DataFrame of the
last period are returned in the results dictionary of the simulate function.
Returns:
Callable: Simulates dataset based on parameters.
"""
startup_seed, simulation_seed = _generate_seeds(seed)
events = {} if events is None else events
contact_policies = {} if contact_policies is None else contact_policies
if (
testing_demand_models is None
or testing_allocation_models is None
or testing_processing_models is None
):
testing_demand_models = {}
testing_allocation_models = {}
testing_processing_models = {}
if rapid_test_reaction_models is None or rapid_test_models is None:
rapid_test_reaction_models = {}
if rapid_test_models is None:
rapid_test_models = {}
if vaccination_models is None:
vaccination_models = {}
if derived_state_variables is None:
derived_state_variables = {}
if period_outputs is None:
period_outputs = {}
if not any([period_outputs, return_time_series, return_last_states]):
raise ValueError("No simulation output was requested.")
initial_states = initial_states.copy(deep=True)
params = params.copy(deep=True)
validate_params(params)
validate_contact_models(contact_models)
validate_contact_policies(contact_policies, contact_models)
validate_testing_models(
testing_demand_models, testing_allocation_models, testing_processing_models
)
validate_vaccination_models(vaccination_models)
user_state_columns = initial_states.columns
path = _create_output_directory(path)
contact_models = _sort_contact_models(contact_models)
assort_bys = _process_assort_bys(contact_models)
duration = parse_duration(duration)
virus_strains = parse_virus_strains(virus_strains, params)
contact_policies = _add_default_duration_to_models(contact_policies, duration)
initial_conditions = parse_initial_conditions(
initial_conditions, duration["start"], virus_strains
)
# Testing and vaccination models are used in the initial conditions and should be
# activated during the burn-in phase if the starting date is not defined.
default_duration_w_burn_in = {
"start": initial_conditions["burn_in_periods"][0],
"end": duration["end"],
}
testing_demand_models = _add_default_duration_to_models(
testing_demand_models, default_duration_w_burn_in
)
testing_allocation_models = _add_default_duration_to_models(
testing_allocation_models, default_duration_w_burn_in
)
testing_processing_models = _add_default_duration_to_models(
testing_processing_models, default_duration_w_burn_in
)
vaccination_models = _add_default_duration_to_models(
vaccination_models, default_duration_w_burn_in
)
rapid_test_models = _add_default_duration_to_models(rapid_test_models, duration)
rapid_test_reaction_models = _add_default_duration_to_models(
rapid_test_reaction_models, duration
)
if _are_states_prepared(initial_states):
validate_prepared_initial_states(initial_states, duration)
else:
validate_initial_states(initial_states)
initial_states = _process_initial_states(
initial_states, assort_bys, virus_strains
)
initial_states = draw_course_of_disease(
initial_states, params, next(startup_seed)
)
initial_states = sample_initial_distribution_of_infections_and_immunity(
states=initial_states,
params=params,
initial_conditions=initial_conditions,
testing_demand_models=testing_demand_models,
testing_allocation_models=testing_allocation_models,
testing_processing_models=testing_processing_models,
virus_strains=virus_strains,
vaccination_models=vaccination_models,
seed=startup_seed,
derived_state_variables=derived_state_variables,
)
initial_states, group_codes_info = _create_group_codes_and_info(
initial_states, assort_bys, contact_models
)
indexers = _prepare_assortative_matching_indexers(
initial_states, contact_models, group_codes_info
)
cols_to_keep = _process_saved_columns(
saved_columns, user_state_columns, group_codes_info, contact_models
)
sim_func = functools.partial(
_simulate,
initial_states=initial_states,
assort_bys=assort_bys,
contact_models=contact_models,
group_codes_info=group_codes_info,
duration=duration,
events=events,
contact_policies=contact_policies,
testing_demand_models=testing_demand_models,
testing_allocation_models=testing_allocation_models,
testing_processing_models=testing_processing_models,
seed=simulation_seed,
path=path,
columns_to_keep=cols_to_keep,
indexers=indexers,
susceptibility_factor_model=susceptibility_factor_model,
virus_strains=virus_strains,
vaccination_models=vaccination_models,
rapid_test_models=rapid_test_models,
rapid_test_reaction_models=rapid_test_reaction_models,
seasonality_factor_model=seasonality_factor_model,
derived_state_variables=derived_state_variables,
period_outputs=period_outputs,
return_time_series=return_time_series,
return_last_states=return_last_states,
)
return sim_func
def _simulate(
params,
initial_states,
assort_bys,
contact_models,
group_codes_info,
duration,
events,
contact_policies,
testing_demand_models,
testing_allocation_models,
testing_processing_models,
seed,
path,
columns_to_keep,
indexers,
susceptibility_factor_model,
virus_strains,
vaccination_models,
rapid_test_models,
rapid_test_reaction_models,
seasonality_factor_model,
derived_state_variables,
period_outputs,
return_time_series,
return_last_states,
):
"""Simulate the spread of an infectious disease.
Args:
params (pandas.DataFrame): DataFrame with parameters that influence the number
of contacts, contagiousness and dangerousness of the disease, ... .
initial_states (pandas.DataFrame): See :ref:`states`. Cannot contain the column
"date" because it is used internally.
contact_models (dict): Dictionary of dictionaries where each dictionary
describes a channel by which contacts can be formed. See
:ref:`contact_models`.
duration (dict): Duration is a dictionary containing kwargs for
:func:`pandas.date_range`.
events (dict): Dictionary of events which cause infections.
contact_policies (dict): Dict of dicts with policies. See :ref:`policies`.
testing_demand_models (dict): Dict of dicts with demand models for tests. See
:ref:`testing_demand_models` for more information.
testing_allocation_models (dict): Dict of dicts with allocation models for
tests. See :ref:`testing_allocation_models` for more information.
testing_processing_models (dict): Dict of dicts with processing models for
tests. See :ref:`testing_processing_models` for more information.
seed (int, optional): The seed is used as the starting point for two seed
sequences where one is used to set up the simulation function and the other
seed sequence is used within the simulation and reset every parameter
evaluation. If you pass ``None`` as a seed, an internal seed is sampled to
set up the simulation function. The seed for the simulation is sampled at
the beginning of the simulation function and can be influenced by setting
:class:`numpy.random.seed` right before the call.
path (pathlib.Path): Path to the directory where the simulated data is stored.
columns_to_keep (list): Columns of states that will be saved in each period.
susceptibility_factor_model (Callable): A function which takes the
states and parameters and returns an infection probability multiplier for
each individual.
virus_strains (Dict[str, Any]): A dictionary with the keys ``"names"``,
``"contagiousness_factor"`` and ``"immunity_resistance_factor"`` holding the
different contagiousness factors and immunity resistance factors of multiple
viruses.
vaccination_models (Optional[Dict[str, Dict[str, Any]): A dictionary of models
which allow to vaccinate individuals. The ``"model"`` key holds a function
with arguments ``states``, ``params``, and a ``seed`` which returns boolean
indicators for individuals who received a vaccination.
rapid_test_models (Optional[Dict[str, Dict[str, Any]]]: A dictionary of
dictionaries containing models for rapid tests. Each model for rapid tests
can have a ``"start"`` and ``"end"`` date. It must have a function under
``"model"`` which accepts ``states``, ``params``, ``receives_rapid_test``
and ``seed`` and returns a boolean series indicating individuals who
received a rapid test. The difference to other test models is that rapid
tests are performed before contacts are calculated to allow that people can
use rapid tests before they meet other people and so that normal tests can
re-test individuals with positive rapid tests.
rapid_test_reaction_models (Optional[Dict[str, Dict[str, Any]]]): A dictionary
holding rapid tests reaction models which allow to change calculated
contacts based on the results of rapid tests.
seasonality_factor_model (Optional[Callable]): A model which takes in and
``params`` and ``dates`` signaling the whole duration of the simulation and
returns a factor for each day which scales all infection probabilities.
derived_state_variables (Dict[str, str]): A dictionary that maps
names of state variables to pandas evaluation strings that generate derived
state variables, i.e. state variables that can be calculated from the
existing state variables.
period_outputs (Optional[Dict[str, Callable]]): A dictionary of functions
that are called with the states DataFrame at the end of each period. Their
results are stored in a dictionary of lists inside the results dictionary
of the simulate function.
return_time_series (Optional[bool])): Whether the full time searies is stored
on disk and returned as dask.DataFrame in the results dictionary of the
simulate function. If False, only the additional outputs are available.
return_last_states (Optional[bool])): Whether the full states DataFrame of the
last period are returned in the results dictionary of the simulate function.
Returns:
result (Dict[str, Any]): The simulation result which include some or all of the
following keys, depending on the values of ``period_outputs``,
``return_time_series`` and ``return_last_states``.
- **time_series** (:class:`dask.dataframe`): The DataFrame contains the
states of each period (see :ref:`states`).
- **last_states** (:class:`dask.dataframe`): The states of the last
simulated period to resume the simulation.
- **period_outputs** (dict): Dictionary of lists. The keys are the keys
of the ``period_outputs`` dictionary passed to ``get_simulate_func``.
The values are lists with one entry per simulated period.
"""
seed = np.random.randint(0, 1_000_000) if seed is None else seed
seed = itertools.count(seed)
assortative_matching_cum_probs = (
_prepare_assortative_matching_cumulative_probabilities(
initial_states, assort_bys, params, contact_models, group_codes_info
)
)
virus_strains = prepare_virus_strain_factors(
virus_strains=virus_strains, params=params
)
susceptibility_factor = prepare_susceptibility_factor(
susceptibility_factor_model=susceptibility_factor_model,
initial_states=initial_states,
params=params,
seed=seed,
)
seasonality_factor = prepare_seasonality_factor(
seasonality_factor_model=seasonality_factor_model,
params=params,
dates=duration["dates"],
seed=seed,
contact_models=contact_models,
)
states = initial_states
if states.columns.isin(["date", "period"]).any():
logger.info("Resume the simulation...")
else:
logger.info("Start the simulation...")
pbar = tqdm(duration["dates"])
evaluated_period_outputs = {key: [] for key in period_outputs}
for date in pbar:
pbar.set_description(f"{date.date()}")
states["date"] = date
states["period"] = timestamp_to_sid_period(date)
contacts = calculate_contacts(
contact_models=contact_models,
states=states,
params=params,
seed=seed,
)
contacts = apply_contact_policies(
contact_policies=contact_policies,
contacts=contacts,
states=states,
date=date,
seed=seed,
)
states = perform_rapid_tests(
date=date,
states=states,
params=params,
rapid_test_models=rapid_test_models,
contacts=contacts,
seed=seed,
)
contacts = apply_reactions_to_rapid_tests(
date=date,
states=states,
params=params,
rapid_test_reaction_models=rapid_test_reaction_models,
contacts=contacts,
seed=seed,
)
recurrent_contacts, random_contacts = post_process_contacts(
contacts=contacts,
states=states,
contact_models=contact_models,
)
recurrent_contacts_np = (
None
if recurrent_contacts is None
else recurrent_contacts.to_numpy(copy=True)
)
random_contacts_np = (
None if random_contacts is None else random_contacts.to_numpy(copy=True)
)
(
newly_infected_contacts,
n_has_additionally_infected,
newly_missed_contacts,
channel_infected_by_contact,
) = calculate_infections_by_contacts(
states=states,
recurrent_contacts=recurrent_contacts_np,
random_contacts=random_contacts_np,
params=params,
indexers=indexers,
assortative_matching_cum_probs=assortative_matching_cum_probs,
contact_models=contact_models,
group_codes_info=group_codes_info,
susceptibility_factor=susceptibility_factor,
virus_strains=virus_strains,
seasonality_factor=seasonality_factor.loc[date],
seed=seed,
)
(
newly_infected_events,
channel_infected_by_event,
) = calculate_infections_by_events(states, params, events, virus_strains, seed)
states, channel_demands_test, to_be_processed_tests = perform_testing(
date=date,
states=states,
params=params,
testing_demand_models=testing_demand_models,
testing_allocation_models=testing_allocation_models,
testing_processing_models=testing_processing_models,
seed=seed,
columns_to_keep=columns_to_keep,
)
newly_vaccinated = vaccinate_individuals(
date, vaccination_models, states, params, seed
)
states = update_states(
states=states,
newly_infected_contacts=newly_infected_contacts,
newly_infected_events=newly_infected_events,
params=params,
virus_strains=virus_strains,
to_be_processed_tests=to_be_processed_tests,
newly_vaccinated=newly_vaccinated,
seed=seed,
derived_state_variables=derived_state_variables,
)
states = _add_additional_information_to_states(
states=states,
columns_to_keep=columns_to_keep,
n_has_additionally_infected=n_has_additionally_infected,
random_contacts=random_contacts,
recurrent_contacts=recurrent_contacts,
channel_infected_by_contact=channel_infected_by_contact,
channel_infected_by_event=channel_infected_by_event,
channel_demands_test=channel_demands_test,
susceptibility_factor=susceptibility_factor,
)
if return_time_series:
_dump_periodic_states(states, columns_to_keep, path, date)
if period_outputs:
for name, func in period_outputs.items():
evaluated_period_outputs[name].append(func(states))
results = {}
if return_time_series:
time_series = _prepare_time_series(path, columns_to_keep, states)
results["time_series"] = time_series
if return_last_states:
results["last_states"] = states
if period_outputs:
results["period_outputs"] = evaluated_period_outputs
return results
def _generate_seeds(seed: Optional[int]):
"""Generate seeds for startup and simulation.
We use the user provided seed or a random seed to generate two other seeds. The
first seed will be turned to a seed sequence and used to control randomness during
the preparation of the simulate function. The second seed is for the randomness in
the simulation, but stays an integer so that the seed sequence can be rebuild every
iteration.
If the seed is ``None``, only the start-up seed is sampled and the seed for
simulation is set to ``None``. This seed will be sampled in :func:`_simulate` and
can be influenced by setting ``np.random.seed(seed)`` right before the call.
Args:
seed (Optional[int]): The seed provided by the user.
Returns:
out (tuple): A tuple containing
- **startup_seed** (:class:`itertools.count`): The seed sequence for the
startup.
- **simulation_seed** (:class:`int`): The starting point for the seed sequence
in the simulation.
"""
internal_seed = np.random.randint(0, 1_000_000) if seed is None else seed
np.random.seed(internal_seed)
startup_seed = itertools.count(np.random.randint(0, 10_000))
simulation_seed = (
np.random.randint(100_000, 1_000_000) if seed is not None else None
)
return startup_seed, simulation_seed
def _create_output_directory(path: Union[str, Path, None]) -> Path:
"""Determine the output directory for the data.
The user can provide a path or a default path is chosen. If the user's path leads to
an non-empty directory, it is removed and newly created.
Args:
path (Union[str, Path, None]): Path to the output directory.
Returns:
output_directory (pathlib.Path): Path to the created output directory.
"""
if path is None:
path = Path.cwd() / ".sid"
output_directory = Path(path)
if output_directory.exists() and not output_directory.is_dir():
raise ValueError(f"{path} is a file instead of an directory.")
elif output_directory.exists():
shutil.rmtree(output_directory)
for directory in [
output_directory,
output_directory / "last_states",
output_directory / "time_series",
]:
directory.mkdir(parents=True, exist_ok=True)
return output_directory
def _sort_contact_models(contact_models: Dict[str, Any]) -> Dict[str, Any]:
"""Sort the contact_models.
First we have non recurrent, then recurrent contacts models. Within each group
the models are sorted alphabetically.
Args:
contact_models (Dict[str, Any]): See :ref:`contact_models`
Returns:
Dict[str, Any]: Sorted copy of contact_models.
"""
sorted_ = sorted(
name for name, mod in contact_models.items() if not mod["is_recurrent"]
)
sorted_ += sorted(
name for name, mod in contact_models.items() if mod["is_recurrent"]
)
return {name: contact_models[name] for name in sorted_}
def _process_assort_bys(contact_models: Dict[str, Any]) -> Dict[str, List[str]]:
"""Set default values for assort_by variables and extract them into a dict.
Args:
contact_models (Dict[str, Any]): see :ref:`contact_models`
Returns:
assort_bys (Dict[str, List[str]]): Keys are names of contact models, values are
lists with the assort_by variables of the model.
"""
assort_bys = {}
for model_name, model in contact_models.items():
assort_by = model.get("assort_by", None)
if assort_by is None:
warnings.warn(
"Not specifying 'assort_by' significantly raises runtime. "
"You can silence this warning by setting 'assort_by' to False."
f"in contact model {model_name}"
)
assort_by = []
elif not assort_by:
assort_by = []
elif isinstance(assort_by, str):
assort_by = [assort_by]
elif isinstance(assort_by, list):
pass
else:
raise ValueError(
f"'assort_by' for '{model_name}' must one of False, str, or list."
)
assort_bys[model_name] = assort_by
return assort_bys
def _create_group_codes_names(
contact_models: Dict[str, Any], assort_bys: Dict[str, List[str]]
) -> Dict[str, str]:
"""Create a name for each contact models group codes.
The group codes are either found in the initial states or are a factorization of one
or multiple variables in the initial states.
``"is_factorized"`` can be set in contact models to indicate that the assortative
variable is already factorized which saves memory.
"""
group_codes_names = {}
for name, model in contact_models.items():
is_factorized = model.get("is_factorized", False)
n_assort_bys = len(assort_bys[name])
if is_factorized and n_assort_bys != 1:
raise ValueError(
f"'is_factorized' is 'True' for contact model {name}, but there is not "
f"one assortative variable, but {n_assort_bys}."
)
elif is_factorized:
group_codes_names[name] = assort_bys[name][0]
else:
group_codes_names[name] = f"group_codes_{name}"
return group_codes_names
def _prepare_assortative_matching_indexers(
states: pd.DataFrame,
contact_models: Dict[str, Dict[str, Any]],
group_codes_info: Dict[str, Dict[str, Any]],
) -> Dict[str, nb.typed.List]:
"""Create indexers for matching individuals within contact models.
For each contact model, :func:`create_group_indexer` returns a Numba list where each
position contains a :class:`numpy.ndarray` with all the indices of individuals
belonging to the same group given by the index.
The indexer has one Numba list for recurrent and random models. Each list has one
entry per contact model which holds the result of :func:`create_group_indexer`.
Args:
states (pandas.DataFrame): see :ref:`states`.
contact_models (Dict[str, Dict[str, Any]]): The contact models.
group_codes_info (Dict[str, Dict[str, Any]]): A dictionary where keys are names
of contact models and values are dictionaries containing the name and the
original codes of the assortative variables.
Returns:
indexers (Dict[str, numba.typed.List]): The indexer is a dictionary with one
entry for recurrent and random contact models. The values are Numba lists
containing Numba lists for each contact model. Each list holds indices for
each group in the contact model.
"""
recurrent_models, random_models = separate_contact_model_names(contact_models)
indexers = {"recurrent": nb.typed.List(), "random": nb.typed.List()}
for cm in recurrent_models:
indexer = create_group_indexer(states, group_codes_info[cm]["name"])
indexers["recurrent"].append(indexer)
for cm in random_models:
indexer = create_group_indexer(states, group_codes_info[cm]["name"])
indexers["random"].append(indexer)
return indexers
def _prepare_assortative_matching_cumulative_probabilities(
states: pd.DataFrame,
assort_bys: Dict[str, List[str]],
params: pd.DataFrame,
contact_models: Dict[str, Dict[str, Any]],
group_codes_info: Dict[str, Dict[str, Any]],
) -> nb.typed.List:
"""Create first stage probabilities for assortative matching with random contacts.
Args:
states (pandas.DataFrame): See :ref:`states`.
assort_bys (Dict[str, List[str]]): Keys are names of contact models, values are
lists with the assort_by variables of the model.
params (pandas.DataFrame): See :ref:`params`.
contact_models (dict): see :ref:`contact_models`.
group_codes_info (Dict[str, Dict[str, Any]]): A dictionary where keys are names
of contact models and values are dictionaries containing the name and the
original codes of the assortative variables.
Returns:
probabilities (numba.typed.List): The list contains one entry for each random
contact model. Each entry holds a ``n_groups * n_groups`` transition matrix
where ``probs[i, j]`` is the cumulative probability that an individual from
group ``i`` meets someone from group ``j``.
"""
probabilities = nb.typed.List()
for model_name, assort_by in assort_bys.items():
if not contact_models[model_name]["is_recurrent"]:
probs = create_cumulative_group_transition_probabilities(
states,
assort_by,
params,
model_name,
group_codes_info[model_name]["groups"],
)
probabilities.append(probs)
# The nopython mode fails while calculating infections, if we leave the list empty
# or put a 1d array inside the list.
if len(probabilities) == 0:
probabilities.append(np.zeros((0, 0)))
return probabilities
def _add_default_duration_to_models(
dictionaries: Dict[str, Dict[str, Any]], duration: Dict[str, Any]
) -> Dict[str, Dict[str, Any]]:
"""Add default durations to models."""
for name, model in dictionaries.items():
start = pd.Timestamp(model.get("start", duration["start"]))
end = pd.Timestamp(model.get("end", duration["end"]))
m = "The {} date of model '{}' could not be converted to a valid pd.Timestamp."
if pd.isna(start):
raise ValueError(m.format("start", name))
if | pd.isna(end) | pandas.isna |
""" test parquet compat """
import datetime
from distutils.version import LooseVersion
import os
from warnings import catch_warnings
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
import pandas._testing as tm
from pandas.io.parquet import (
FastParquetImpl,
PyArrowImpl,
get_engine,
read_parquet,
to_parquet,
)
try:
import pyarrow # noqa
_HAVE_PYARROW = True
except ImportError:
_HAVE_PYARROW = False
try:
import fastparquet # noqa
_HAVE_FASTPARQUET = True
except ImportError:
_HAVE_FASTPARQUET = False
pytestmark = pytest.mark.filterwarnings(
"ignore:RangeIndex.* is deprecated:DeprecationWarning"
)
# setup engines & skips
@pytest.fixture(
params=[
pytest.param(
"fastparquet",
marks=pytest.mark.skipif(
not _HAVE_FASTPARQUET, reason="fastparquet is not installed"
),
),
pytest.param(
"pyarrow",
marks=pytest.mark.skipif(
not _HAVE_PYARROW, reason="pyarrow is not installed"
),
),
]
)
def engine(request):
return request.param
@pytest.fixture
def pa():
if not _HAVE_PYARROW:
pytest.skip("pyarrow is not installed")
return "pyarrow"
@pytest.fixture
def fp():
if not _HAVE_FASTPARQUET:
pytest.skip("fastparquet is not installed")
return "fastparquet"
@pytest.fixture
def df_compat():
return pd.DataFrame({"A": [1, 2, 3], "B": "foo"})
@pytest.fixture
def df_cross_compat():
df = pd.DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
# 'c': np.arange(3, 6).astype('u1'),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("20130101", periods=3),
# 'g': pd.date_range('20130101', periods=3,
# tz='US/Eastern'),
# 'h': pd.date_range('20130101', periods=3, freq='ns')
}
)
return df
@pytest.fixture
def df_full():
return pd.DataFrame(
{
"string": list("abc"),
"string_with_nan": ["a", np.nan, "c"],
"string_with_none": ["a", None, "c"],
"bytes": [b"foo", b"bar", b"baz"],
"unicode": ["foo", "bar", "baz"],
"int": list(range(1, 4)),
"uint": np.arange(3, 6).astype("u1"),
"float": np.arange(4.0, 7.0, dtype="float64"),
"float_with_nan": [2.0, np.nan, 3.0],
"bool": [True, False, True],
"datetime": pd.date_range("20130101", periods=3),
"datetime_with_nat": [
pd.Timestamp("20130101"),
pd.NaT,
pd.Timestamp("20130103"),
],
}
)
def check_round_trip(
df,
engine=None,
path=None,
write_kwargs=None,
read_kwargs=None,
expected=None,
check_names=True,
check_like=False,
repeat=2,
):
"""Verify parquet serializer and deserializer produce the same results.
Performs a pandas to disk and disk to pandas round trip,
then compares the 2 resulting DataFrames to verify equality.
Parameters
----------
df: Dataframe
engine: str, optional
'pyarrow' or 'fastparquet'
path: str, optional
write_kwargs: dict of str:str, optional
read_kwargs: dict of str:str, optional
expected: DataFrame, optional
Expected deserialization result, otherwise will be equal to `df`
check_names: list of str, optional
Closed set of column names to be compared
check_like: bool, optional
If True, ignore the order of index & columns.
repeat: int, optional
How many times to repeat the test
"""
write_kwargs = write_kwargs or {"compression": None}
read_kwargs = read_kwargs or {}
if expected is None:
expected = df
if engine:
write_kwargs["engine"] = engine
read_kwargs["engine"] = engine
def compare(repeat):
for _ in range(repeat):
df.to_parquet(path, **write_kwargs)
with catch_warnings(record=True):
actual = read_parquet(path, **read_kwargs)
tm.assert_frame_equal(
expected, actual, check_names=check_names, check_like=check_like
)
if path is None:
with tm.ensure_clean() as path:
compare(repeat)
else:
compare(repeat)
def test_invalid_engine(df_compat):
with pytest.raises(ValueError):
check_round_trip(df_compat, "foo", "bar")
def test_options_py(df_compat, pa):
# use the set option
with pd.option_context("io.parquet.engine", "pyarrow"):
check_round_trip(df_compat)
def test_options_fp(df_compat, fp):
# use the set option
with pd.option_context("io.parquet.engine", "fastparquet"):
check_round_trip(df_compat)
def test_options_auto(df_compat, fp, pa):
# use the set option
with pd.option_context("io.parquet.engine", "auto"):
check_round_trip(df_compat)
def test_options_get_engine(fp, pa):
assert isinstance(get_engine("pyarrow"), PyArrowImpl)
assert isinstance(get_engine("fastparquet"), FastParquetImpl)
with pd.option_context("io.parquet.engine", "pyarrow"):
assert isinstance(get_engine("auto"), PyArrowImpl)
assert isinstance(get_engine("pyarrow"), PyArrowImpl)
assert isinstance(get_engine("fastparquet"), FastParquetImpl)
with pd.option_context("io.parquet.engine", "fastparquet"):
assert isinstance(get_engine("auto"), FastParquetImpl)
assert isinstance(get_engine("pyarrow"), PyArrowImpl)
assert isinstance(get_engine("fastparquet"), FastParquetImpl)
with pd.option_context("io.parquet.engine", "auto"):
assert isinstance(get_engine("auto"), PyArrowImpl)
assert isinstance(get_engine("pyarrow"), PyArrowImpl)
assert isinstance(get_engine("fastparquet"), FastParquetImpl)
def test_get_engine_auto_error_message():
# Expect different error messages from get_engine(engine="auto")
# if engines aren't installed vs. are installed but bad version
from pandas.compat._optional import VERSIONS
# Do we have engines installed, but a bad version of them?
pa_min_ver = VERSIONS.get("pyarrow")
fp_min_ver = VERSIONS.get("fastparquet")
have_pa_bad_version = (
False
if not _HAVE_PYARROW
else LooseVersion(pyarrow.__version__) < LooseVersion(pa_min_ver)
)
have_fp_bad_version = (
False
if not _HAVE_FASTPARQUET
else LooseVersion(fastparquet.__version__) < LooseVersion(fp_min_ver)
)
# Do we have usable engines installed?
have_usable_pa = _HAVE_PYARROW and not have_pa_bad_version
have_usable_fp = _HAVE_FASTPARQUET and not have_fp_bad_version
if not have_usable_pa and not have_usable_fp:
# No usable engines found.
if have_pa_bad_version:
match = f"Pandas requires version .{pa_min_ver}. or newer of .pyarrow."
with pytest.raises(ImportError, match=match):
get_engine("auto")
else:
match = "Missing optional dependency .pyarrow."
with pytest.raises(ImportError, match=match):
get_engine("auto")
if have_fp_bad_version:
match = f"Pandas requires version .{fp_min_ver}. or newer of .fastparquet."
with pytest.raises(ImportError, match=match):
get_engine("auto")
else:
match = "Missing optional dependency .fastparquet."
with pytest.raises(ImportError, match=match):
get_engine("auto")
def test_cross_engine_pa_fp(df_cross_compat, pa, fp):
# cross-compat with differing reading/writing engines
df = df_cross_compat
with tm.ensure_clean() as path:
df.to_parquet(path, engine=pa, compression=None)
result = read_parquet(path, engine=fp)
tm.assert_frame_equal(result, df)
result = read_parquet(path, engine=fp, columns=["a", "d"])
tm.assert_frame_equal(result, df[["a", "d"]])
def test_cross_engine_fp_pa(df_cross_compat, pa, fp):
# cross-compat with differing reading/writing engines
if (
LooseVersion(pyarrow.__version__) < "0.15"
and LooseVersion(pyarrow.__version__) >= "0.13"
):
pytest.xfail(
"Reading fastparquet with pyarrow in 0.14 fails: "
"https://issues.apache.org/jira/browse/ARROW-6492"
)
df = df_cross_compat
with tm.ensure_clean() as path:
df.to_parquet(path, engine=fp, compression=None)
with catch_warnings(record=True):
result = read_parquet(path, engine=pa)
tm.assert_frame_equal(result, df)
result = read_parquet(path, engine=pa, columns=["a", "d"])
tm.assert_frame_equal(result, df[["a", "d"]])
class Base:
def check_error_on_write(self, df, engine, exc):
# check that we are raising the exception on writing
with tm.ensure_clean() as path:
with pytest.raises(exc):
to_parquet(df, path, engine, compression=None)
class TestBasic(Base):
def test_error(self, engine):
for obj in [
pd.Series([1, 2, 3]),
1,
"foo",
| pd.Timestamp("20130101") | pandas.Timestamp |
import matplotlib.pyplot as plt
import pandas as pd
dataset_file = "datasets/stock_data.csv"
def load_data():
data = pd.read_csv(dataset_file)
# print(data)
return data
df = load_data()
plt.figure(figsize=(10, 5))
top = plt.subplot2grid((4, 4), (0, 0), rowspan=3, colspan=4)
bottom = plt.subplot2grid((4, 4), (3, 0), rowspan=1, colspan=4)
top.plot(df.index, df['Adj Close'])
bottom.bar(df.index, df['Volume'])
# set the labels
top.axes.get_xaxis().set_visible(False)
top.set_title('Apple')
top.set_ylabel('Adj Closing Price')
bottom.set_ylabel('Volume')
# simple moving averages
sma10 = df['Close'].rolling(10).mean() # 10 days
sma20 = df['Close'].rolling(20).mean() # 20 days
sma50 = df['Close'].rolling(50).mean() # 50 days
sma = | pd.DataFrame({'AAPL': df['Close'], 'SMA 10': sma10, 'SMA 20': sma20, 'SMA 50': sma50}) | pandas.DataFrame |
import argparse
import pandas as pd
import re
from pathlib import Path
import torch
parser = argparse.ArgumentParser()
parser.add_argument('dir', type=str)
parser.add_argument('fmt', default=None,nargs='?')
args = parser.parse_args()
res = {}
root_dir = Path(args.dir)
train_log = root_dir / 'train.log'
config = torch.load(root_dir / 'run_config.pth')
pretrained = config.get('pretrained', None)
# logs
augment = config.get('transforms', [])
label_type = config.get('label_type', 'soft')
model = config.get('model','CRNN')
def get_seg_metrics(line, pointer, seg_type='Segment'):
res = {}
while not 'macro-average' in line:
line = next(pointer).strip()
while not 'F-measure (F1)' in line:
line = next(pointer).strip()
res[f'F1'] = float(line.split()[-2])
while not 'Precision' in line:
line = next(pointer).strip()
res[f'Precision'] = float(line.split()[-2])
while not 'Recall' in line:
line = next(pointer).strip()
res[f'Recall'] = float(line.split()[-2])
return res
def parse_eval_file(eval_file):
res = {}
frame_results = {}
with open(eval_file, 'r') as rp:
for line in rp:
line = line.strip()
if 'AUC' in line:
auc = line.split()[-1]
frame_results['AUC'] = float(auc)
if 'FER' in line:
fer = line.split()[-1]
frame_results['FER'] = float(fer)
if 'VAD macro' in line:
f1, pre, rec = re.findall(r"[-+]?\d*\.\d+|\d+",
line)[1:] # First hit is F1
frame_results['F1'] = float(f1)
frame_results['Precision'] = float(pre)
frame_results['Recall'] = float(rec)
if "Segment based metrics" in line:
res['Segment'] = get_seg_metrics(line, rp)
if 'Event based metrics' in line:
res['Event'] = get_seg_metrics(line, rp, 'Event')
res['Frame'] = frame_results
return res
all_results = []
for f in root_dir.glob('*.txt'):
eval_dataset = str(f.stem)[11:]
res = parse_eval_file(f)
df = | pd.DataFrame(res) | pandas.DataFrame |
import pandas as pd
import numpy as np
import sklearn.neighbors
import scipy.sparse as sp
import seaborn as sns
import matplotlib.pyplot as plt
import torch
from torch_geometric.data import Data
def Transfer_pytorch_Data(adata):
G_df = adata.uns['Spatial_Net'].copy()
cells = np.array(adata.obs_names)
cells_id_tran = dict(zip(cells, range(cells.shape[0])))
G_df['Cell1'] = G_df['Cell1'].map(cells_id_tran)
G_df['Cell2'] = G_df['Cell2'].map(cells_id_tran)
G = sp.coo_matrix((np.ones(G_df.shape[0]), (G_df['Cell1'], G_df['Cell2'])), shape=(adata.n_obs, adata.n_obs))
G = G + sp.eye(G.shape[0])
edgeList = np.nonzero(G)
if type(adata.X) == np.ndarray:
data = Data(edge_index=torch.LongTensor(np.array(
[edgeList[0], edgeList[1]])), x=torch.FloatTensor(adata.X)) # .todense()
else:
data = Data(edge_index=torch.LongTensor(np.array(
[edgeList[0], edgeList[1]])), x=torch.FloatTensor(adata.X.todense())) # .todense()
return data
def Batch_Data(adata, num_batch_x, num_batch_y, spatial_key=['X', 'Y'], plot_Stats=False):
Sp_df = adata.obs.loc[:, spatial_key].copy()
Sp_df = np.array(Sp_df)
batch_x_coor = [np.percentile(Sp_df[:, 0], (1/num_batch_x)*x*100) for x in range(num_batch_x+1)]
batch_y_coor = [np.percentile(Sp_df[:, 1], (1/num_batch_y)*x*100) for x in range(num_batch_y+1)]
Batch_list = []
for it_x in range(num_batch_x):
for it_y in range(num_batch_y):
min_x = batch_x_coor[it_x]
max_x = batch_x_coor[it_x+1]
min_y = batch_y_coor[it_y]
max_y = batch_y_coor[it_y+1]
temp_adata = adata.copy()
temp_adata = temp_adata[temp_adata.obs[spatial_key[0]].map(lambda x: min_x <= x <= max_x)]
temp_adata = temp_adata[temp_adata.obs[spatial_key[1]].map(lambda y: min_y <= y <= max_y)]
Batch_list.append(temp_adata)
if plot_Stats:
f, ax = plt.subplots(figsize=(1, 3))
plot_df = pd.DataFrame([x.shape[0] for x in Batch_list], columns=['#spot/batch'])
sns.boxplot(y='#spot/batch', data=plot_df, ax=ax)
sns.stripplot(y='#spot/batch', data=plot_df, ax=ax, color='red', size=5)
return Batch_list
def Cal_Spatial_Net(adata, rad_cutoff=None, k_cutoff=None, model='Radius', verbose=True):
"""\
Construct the spatial neighbor networks.
Parameters
----------
adata
AnnData object of scanpy package.
rad_cutoff
radius cutoff when model='Radius'
k_cutoff
The number of nearest neighbors when model='KNN'
model
The network construction model. When model=='Radius', the spot is connected to spots whose distance is less than rad_cutoff. When model=='KNN', the spot is connected to its first k_cutoff nearest neighbors.
Returns
-------
The spatial networks are saved in adata.uns['Spatial_Net']
"""
assert(model in ['Radius', 'KNN'])
if verbose:
print('------Calculating spatial graph...')
coor = pd.DataFrame(adata.obsm['spatial'])
coor.index = adata.obs.index
coor.columns = ['imagerow', 'imagecol']
if model == 'Radius':
nbrs = sklearn.neighbors.NearestNeighbors(radius=rad_cutoff).fit(coor)
distances, indices = nbrs.radius_neighbors(coor, return_distance=True)
KNN_list = []
for it in range(indices.shape[0]):
KNN_list.append(pd.DataFrame(zip([it]*indices[it].shape[0], indices[it], distances[it])))
if model == 'KNN':
nbrs = sklearn.neighbors.NearestNeighbors(n_neighbors=k_cutoff+1).fit(coor)
distances, indices = nbrs.kneighbors(coor)
KNN_list = []
for it in range(indices.shape[0]):
KNN_list.append(pd.DataFrame(zip([it]*indices.shape[1],indices[it,:], distances[it,:])))
KNN_df = | pd.concat(KNN_list) | pandas.concat |
#Compare painted data with observed data - for three different sets of ages
#Works but could do with some tidying up of the code
import numpy as np
import h5py
import pandas as pd
import math
from astropy.io import fits
from astropy.table import Table, join
import matplotlib.pyplot as plt
from matplotlib.colors import PowerNorm
import matplotlib.colors as colors
import sys
sys.path.append('./scripts/')
from chemevo import *
#fl = chem_evo_data('./comparison.hdf5')
#fl = chem_evo_data('./output.hdf5')
fl = chem_evo_data('./KSsfr.hdf5')
hdf5_file = '/data/ktfm2/apogee_data/gaia_spectro.hdf5'
data_file_1 = '/data/ktfm2/apogee_data/apogee_astroNN_DR16.fits'
data_file_2 = '/data/jls/apokasc_astroNN.fits'
hdf = h5py.File(hdf5_file, "r")
dataset = hdf['data']
log_age_data = dataset["log10_age"]
ID_data = dataset["APOGEE_ID"]
SD_table = Table([ID_data, log_age_data], names=('apogee_id','log_age_data'))
hdu_list_1 = fits.open(data_file_1, memmap=True)
apogee_data = Table(hdu_list_1[1].data)
hdu_list_1.close()
hdu_list_2 = fits.open(data_file_2, memmap=True)
apokasc_data = Table(hdu_list_2[1].data)
hdu_list_2.close()
#print(apokasc_data.colnames)
def betw(x,l,u):
return (x>l)&(x<u)
def outs(x,l,u):
return (x<l)|(x>u)
#Join the APOGEE data table and table from Sanders & Das - will have less rows
#Comment out if not using S&D ages
#full_table = join(apogee_data, SD_table)
#apogee_data = full_table
#Use APOKASC data, comment out if not using
#apogee_data = apokasc_data
#===================================================================================================================
#Radial Migration filter
#fltr = (~pd.isna(apogee_data['rl']))&(~pd.isna(apogee_data['LogAge']))&(~pd.isna(apogee_data['FE_H']))&(~pd.isna(apogee_data['MG_H']))&(~pd.isna(apogee_data['LOGG']))&(~pd.isna(apogee_data['FE_H_ERR']))&(~pd.isna(apogee_data['MG_H_ERR']))&(apogee_data['LOGG']<3.5)&(betw(apogee_data['GALZ'],-5.0,5.0))&(apogee_data['FE_H_ERR']<0.2)&(betw(apogee_data['rl'],7.6,8.6))
#fltr = (~pd.isna(apogee_data['rl']))&(~pd.isna(apogee_data['log_age_data']))&(~pd.isna(apogee_data['FE_H']))&(~pd.isna(apogee_data['MG_H']))&(~pd.isna(apogee_data['LOGG']))&(~pd.isna(apogee_data['FE_H_ERR']))&(apogee_data['LOGG']<3.5)&(outs(apogee_data['GALZ'],-1.0,1.0))&(betw(apogee_data['GALZ'],-5.0,5.0))&(apogee_data['FE_H_ERR']<0.2)&(betw(apogee_data['rl'],7.6,8.6))
fltr = (~pd.isna(apogee_data['rl']))&(~pd.isna(apogee_data['age_lowess_correct']))&(apogee_data['age_lowess_correct']>0.0)&(~pd.isna(apogee_data['FE_H']))&(~pd.isna(apogee_data['MG_H']))&(~pd.isna(apogee_data['LOGG']))&(~pd.isna(apogee_data['FE_H_ERR']))&(apogee_data['LOGG']<3.5)&(outs(apogee_data['GALZ'],-1.0,1.0))&(betw(apogee_data['GALZ'],-5.0,5.0))&(apogee_data['FE_H_ERR']<0.2)&(betw(apogee_data['rl'],7.6,8.6))
lower_fltr = (~ | pd.isna(apogee_data['rl']) | pandas.isna |
from string import ascii_letters
import struct
from uuid import uuid4
from datashape import var, R, Option, dshape
import numpy as np
from odo import resource, odo
import pandas as pd
import pytest
import sqlalchemy as sa
from warp_prism._warp_prism import (
postgres_signature,
raw_to_arrays,
test_overflow_operations as _test_overflow_operations,
)
from warp_prism import (
to_arrays,
to_dataframe,
null_values as null_values_for_type,
_typeid_map,
)
from warp_prism.tests import tmp_db_uri as tmp_db_uri_ctx
@pytest.fixture(scope='module')
def tmp_db_uri():
with tmp_db_uri_ctx() as db_uri:
yield db_uri
@pytest.fixture
def tmp_table_uri(tmp_db_uri):
return '%s::%s%s' % (tmp_db_uri, 'table_', uuid4().hex)
def check_roundtrip_nonnull(table_uri, data, dtype, sqltype):
"""Check the data roundtrip through postgres using warp_prism to read the
data
Parameters
----------
table_uri : str
The uri to a unique table.
data : np.array
The input data.
dtype : str
The dtype of the data.
sqltype : type
The sqlalchemy type of the data.
"""
input_dataframe = | pd.DataFrame({'a': data}) | pandas.DataFrame |
# @Time : 4/7/2022 11:15 AM
# @Author : <NAME>
"""
This script performs the data post-processing, before feeding it into any machine learning algorithm
"""
import os
import numpy as np
from numpy import genfromtxt
import math
import statistics as st
import matplotlib.pyplot as plt
from statistics import mean, stdev
import pandas as pd
from tqdm import tqdm
import csv
import shutil
import random
# trial 2
def check_size(source):
lowest = 10000
highest = 0
sizes = []
for filename in tqdm(os.listdir(source)):
data = pd.read_csv(source + filename)
n_samples = data.shape[0]
sizes.append(n_samples)
if n_samples < lowest:
lowest = n_samples
if n_samples > highest:
highest = n_samples
title = "Lowest= " + str(lowest) + " / Highest= " + str(highest) + " / Mean=" + str(round(mean(sizes),2)) + " / SD= " + str(round(stdev(sizes),2))
plt.title(title)
plt.boxplot(sizes)
plt.show()
return lowest, highest
def down_sample(period, source, target):
"""
Downsamples all the csv files located in source folder, and saves the new csv in target folder
:param period: period [ms] at which you want to sample the time series
:param source: subfolder with original data
:param target: subfolder to save the downsampled data
:return:
"""
for filename in os.listdir(source):
# print(filename)
# --- Step 0: Read csv data into a a Pandas Dataframe ---
# Do not include the first column that has the time, so we don't overfit the next processes
# data = genfromtxt((source + filename), delimiter=',', skip_header=True)
data = pd.read_csv(source + filename)
n_samples = data.shape[0] # rows
n_channels = data.shape[1] # columns
max_time = data.iloc[-1, 0]
# Create New Dataframe
downsampled_data = pd.DataFrame()
headers = pd.read_csv(source + filename, index_col=0, nrows=0).columns.tolist()
# print(headers)
for i in range(n_channels):
new_value = []
if i == 0:
# --- Time Channel
new_time = []
time = data.iloc[0, 0]
while time < max_time:
new_time.append(time)
time = time + period/1000
# print(time)
header = "Time"
downsampled_data[header] = new_time
else:
# --- The rest of the channels
new_value = []
index = 0
for x in new_time:
for k in data.iloc[index:, 0]:
if k > x:
break
else:
index += 1
# Interpolation
x1 = data.iloc[index-1, 0]
x2 = data.iloc[index, 0]
y1 = data.iloc[index-1, i]
y2 = data.iloc[index, i]
value = (y1 - y2)*(x2 - x)/(x2 - x1) + y2
new_value.append(value)
header = headers[i-1]
downsampled_data[header] = new_value
# --- Compare PLots ---
# plt.plot(data.iloc[:, 0], data.iloc[:, i])
# plt.plot(new_time, new_value)
# plt.show()
# print(downsampled_data)
downsampled_data.to_csv(target + filename, index=False)
def join_csv(name, case, source, target):
"""
Joins csv from different topics but from the same experiment, into a single csv.
Thus, data is easier to handle, and less prone to make mistakes.
It does some cropping of the initial or last points, in order to have all the topics have the same size
:param name: Name of the dataset / experiment
:param case: Whether Grasp or Pick stage
:param source:
:param target:
:return:
"""
if case == 'GRASP/':
stage = 'grasp'
elif case == 'PICK/':
stage = 'pick'
# --- Step 1: Open all the topics from the same experiment that need to be joined ---
location = source
topics = ['_wrench', '_f1_imu', '_f1_states', '_f2_imu', '_f2_states', '_f3_imu', '_f3_states']
data_0 = | pd.read_csv(location + name + stage + topics[0] + '.csv', header=None, index_col=False) | pandas.read_csv |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
"""
程序通用函数库
作者:wking [http://wkings.net]
"""
import os
import statistics
import time
import datetime
import requests
import numpy as np
import pandas as pd
import threading
from queue import Queue
from retry import retry
# from rich.progress import track
# from rich import print
from tqdm import tqdm
import user_config as ucfg
# debug输出函数
def user_debug(print_str, print_value='', ):
"""第一个参数为变量名称,第二个参数为变量的值"""
if ucfg.debug:
if print_value:
print(str(print_str) + ' = ' + str(print_value))
else:
print(str(print_str))
# 将通达信的日线文件转换成CSV格式保存函数。通达信数据文件32字节为一组。
def day2csv(source_dir, file_name, target_dir):
"""
将通达信的日线文件转换成CSV格式保存函数。通达信数据文件32字节为一组
:param source_dir: str 源文件路径
:param file_name: str 文件名
:param target_dir: str 要保存的路径
:return: none
"""
from struct import unpack
from decimal import Decimal # 用于浮点数四舍五入
# 以二进制方式打开源文件
source_path = source_dir + os.sep + file_name # 源文件包含文件名的路径
source_file = open(source_path, 'rb')
buf = source_file.read() # 读取源文件保存在变量中
source_file.close()
source_size = os.path.getsize(source_path) # 获取源文件大小
source_row_number = int(source_size / 32)
# user_debug('源文件行数', source_row_number)
# 打开目标文件,后缀名为CSV
target_path = target_dir + os.sep + file_name[2:-4] + '.csv' # 目标文件包含文件名的路径
# user_debug('target_path', target_path)
if not os.path.isfile(target_path):
# 目标文件不存在。写入表头行。begin从0开始转换
target_file = open(target_path, 'w', encoding="utf-8") # 以覆盖写模式打开文件
header = str('date') + ',' + str('code') + ',' + str('open') + ',' + str('high') + ',' + str('low') + ',' \
+ str('close') + ',' + str('vol') + ',' + str('amount')
target_file.write(header)
begin = 0
end = begin + 32
row_number = 0
else:
# 不为0,文件有内容。行附加。
# 通达信数据32字节为一组,因此通达信文件大小除以32可算出通达信文件有多少行(也就是多少天)的数据。
# 再用readlines计算出目标文件已有多少行(目标文件多了首行标题行),(行数-1)*32 即begin要开始的字节位置
target_file = open(target_path, 'a+', encoding="gbk") # 以追加读写模式打开文件
# target_size = os.path.getsize(target_path) #获取目标文件大小
# 由于追加读写模式载入文件后指针在文件的结尾,需要先把指针改到文件开头,读取文件行数。
user_debug('当前指针', target_file.tell())
target_file.seek(0, 0) # 文件指针移到文件头
user_debug('移动指针到开头', target_file.seek(0, 0))
target_file_content = target_file.readlines() # 逐行读取文件内容
row_number = len(target_file_content) # 获得文件行数
user_debug('目标文件行数', row_number)
user_debug('目标文件最后一行的数据', target_file_content[-1])
target_file.seek(0, 2) # 文件指针移到文件尾
user_debug('移动指针到末尾', target_file.seek(0, 2))
if row_number > source_row_number:
user_debug('已是最新数据,跳过for循环')
else:
user_debug('追加模式,从' + str(row_number + 1) + '行开始')
if row_number == 0: # 如果文件出错是0的特殊情况
begin = 0
else:
row_number = row_number - 1 # 由于pandas的dataFrame格式索引从0开始,为下面for循环需要减1
begin = row_number * 32
end = begin + 32
for i in range(row_number, source_row_number):
# 由于pandas的dataFrame格式首行为标题行,第二行的索引从0开始,
# 因此转换出来显示的行数比原本少一行,但实际数据一致
#
# 将字节流转换成Python数据格式
# I: unsigned int
# f: float
# a[5]浮点类型的成交金额,使用decimal类四舍五入为整数
a = unpack('IIIIIfII', buf[begin:end])
# '\n' + str(i) + ','
# a[0] 将’19910404'样式的字符串转为'1991-05-05'格式的字符串。为了统一日期格式
a_date = str(a[0])[0:4] + '-' + str(a[0])[4:6] + '-' + str(a[0])[6:8]
file_name[2:-4]
line = '\n' + str(a_date) + ',' \
+ file_name[2:-4] + ',' \
+ str(a[1] / 100.0) + ',' \
+ str(a[2] / 100.0) + ',' \
+ str(a[3] / 100.0) + ',' \
+ str(a[4] / 100.0) + ',' \
+ str(a[6]) + ',' \
+ str(Decimal(a[5]).quantize(Decimal("1."), rounding="ROUND_HALF_UP"))
target_file.write(line)
begin += 32
end += 32
target_file.close()
def get_TDX_blockfilecontent(filename):
"""
读取本机通达信板块文件,获取文件内容
:rtype: object
:param filename: 字符串类型。输入的文件名。
:return: DataFrame类型
"""
from pytdx.reader import block_reader, TdxFileNotFoundException
if ucfg.tdx['tdx_path']:
filepath = ucfg.tdx['tdx_path'] + os.sep + 'T0002' + os.sep + 'hq_cache' + os.sep + filename
df = block_reader.BlockReader().get_df(filepath)
else:
print("user_config文件的tdx_path变量未配置,或未找到" + filename + "文件")
return df
def get_lastest_stocklist():
"""
使用pytdx从网络获取最新券商列表
:return:DF格式,股票清单
"""
import pytdx.hq
import pytdx.util.best_ip
print(f"优选通达信行情服务器 也可直接更改为优选好的 {{'ip': '172.16.58.3', 'port': 7709}}")
# ipinfo = pytdx.util.best_ip.select_best_ip()
api = pytdx.hq.TdxHq_API()
# with api.connect(ipinfo['ip'], ipinfo['port']):
with api.connect('172.16.58.3', 7709):
data = pd.concat([pd.concat(
[api.to_df(api.get_security_list(j, i * 1000)).assign(sse='sz' if j == 0 else 'sh') for i in
range(int(api.get_security_count(j) / 1000) + 1)], axis=0) for j in range(2)], axis=0)
data = data.reindex(columns=['sse', 'code', 'name', 'pre_close', 'volunit', 'decimal_point'])
data.sort_values(by=['sse', 'code'], ascending=True, inplace=True)
data.reset_index(drop=True, inplace=True)
# 这个方法不行 字符串不能运算大于小于,转成int更麻烦
# df = data.loc[((data['sse'] == 'sh') & ((data['code'] >= '600000') | (data['code'] < '700000'))) | \
# ((data['sse'] == 'sz') & ((data['code'] >= '000001') | (data['code'] < '100000'))) | \
# ((data['sse'] == 'sz') & ((data['code'] >= '300000') | (data['code'] < '309999')))]
sh_start_num = data[(data['sse'] == 'sh') & (data['code'] == '600000')].index.tolist()[0]
sh_end_num = data[(data['sse'] == 'sh') & (data['code'] == '706070')].index.tolist()[0]
sz00_start_num = data[(data['sse'] == 'sz') & (data['code'] == '000001')].index.tolist()[0]
sz00_end_num = data[(data['sse'] == 'sz') & (data['code'] == '100303')].index.tolist()[0]
sz30_start_num = data[(data['sse'] == 'sz') & (data['code'] == '300001')].index.tolist()[0]
sz30_end_num = data[(data['sse'] == 'sz') & (data['code'] == '395001')].index.tolist()[0]
df_sh = data.iloc[sh_start_num:sh_end_num]
df_sz00 = data.iloc[sz00_start_num:sz00_end_num]
df_sz30 = data.iloc[sz30_start_num:sz30_end_num]
df = pd.concat([df_sh, df_sz00, df_sz30])
df.reset_index(drop=True, inplace=True)
return df
def historyfinancialreader(filepath):
"""
读取解析通达信目录的历史财务数据
:param filepath: 字符串类型。传入文件路径
:return: DataFrame格式。返回解析出的财务文件内容
"""
import struct
cw_file = open(filepath, 'rb')
header_pack_format = '<1hI1H3L'
header_size = struct.calcsize(header_pack_format)
stock_item_size = struct.calcsize("<6s1c1L")
data_header = cw_file.read(header_size)
stock_header = struct.unpack(header_pack_format, data_header)
max_count = stock_header[2]
report_date = stock_header[1]
report_size = stock_header[4]
report_fields_count = int(report_size / 4)
report_pack_format = '<{}f'.format(report_fields_count)
results = []
for stock_idx in range(0, max_count):
cw_file.seek(header_size + stock_idx * struct.calcsize("<6s1c1L"))
si = cw_file.read(stock_item_size)
stock_item = struct.unpack("<6s1c1L", si)
code = stock_item[0].decode("utf-8")
foa = stock_item[2]
cw_file.seek(foa)
info_data = cw_file.read(struct.calcsize(report_pack_format))
data_size = len(info_data)
cw_info = list(struct.unpack(report_pack_format, info_data))
cw_info.insert(0, code)
results.append(cw_info)
df = pd.DataFrame(results)
return df
class ManyThreadDownload:
def __init__(self, num=10):
self.num = num # 线程数,默认10
self.url = '' # url
self.name = '' # 目标地址
self.total = 0 # 文件大小
# 获取每个线程下载的区间
def get_range(self):
ranges = []
offset = int(self.total / self.num)
for i in range(self.num):
if i == self.num - 1:
ranges.append((i * offset, ''))
else:
ranges.append(((i * offset), (i + 1) * offset - 1))
return ranges # [(0,99),(100,199),(200,"")]
# 通过传入开始和结束位置来下载文件
def download(self, ts_queue):
while not ts_queue.empty():
start_, end_ = ts_queue.get()
headers = {
'Range': 'Bytes=%s-%s' % (start_, end_),
'Accept-Encoding': '*'
}
flag = False
while not flag:
try:
# 设置重连次数
requests.adapters.DEFAULT_RETRIES = 10
# s = requests.session() # 每次都会发起一次TCP握手,性能降低,还可能因发起多个连接而被拒绝
# # 设置连接活跃状态为False
# s.keep_alive = False
# 默认stream=false,立即下载放到内存,文件过大会内存不足,大文件时用True需改一下码子
res = requests.get(self.url, headers=headers)
res.close() # 关闭请求 释放内存
except Exception as e:
print((start_, end_, "出错了,连接重试:%s", e,))
time.sleep(1)
continue
flag = True
# print("\n", ("%s-%s download success" % (start_, end_)), end="", flush=True)
# with lock:
with open(self.name, "rb+") as fd:
fd.seek(start_)
fd.write(res.content)
# self.fd.seek(start_) # 指定写文件的位置,下载的内容放到正确的位置处
# self.fd.write(res.content) # 将下载文件保存到 fd所打开的文件里
def run(self, url, name):
self.url = url
self.name = name
self.total = int(requests.head(url).headers['Content-Length'])
# file_size = int(urlopen(self.url).info().get('Content-Length', -1))
file_size = self.total
if os.path.exists(name):
first_byte = os.path.getsize(name)
else:
first_byte = 0
if first_byte >= file_size:
return file_size
self.fd = open(name, "wb") # 续传时直接rb+ 文件不存在时会报错,先wb再rb+
self.fd.truncate(self.total) # 建一个和下载文件一样大的文件,不是必须的,stream=True时会用到
self.fd.close()
# self.fd = open(self.name, "rb+") # 续传时ab方式打开时会强制指针指向文件末尾,seek并不管用,应用rb+模式
thread_list = []
ts_queue = Queue() # 用队列的线程安全特性,以列表的形式把开始和结束加到队列
for ran in self.get_range():
start_, end_ = ran
ts_queue.put((start_, end_))
for i in range(self.num):
t = threading.Thread(target=self.download, name='th-' + str(i), kwargs={'ts_queue': ts_queue})
t.setDaemon(True)
thread_list.append(t)
for t in thread_list:
t.start()
for t in thread_list:
t.join() # 设置等待,全部线程完事后再继续
self.fd.close()
@retry(tries=3, delay=3) # 无限重试装饰性函数
def dowload_url(url):
"""
:param url:要下载的url
:return: request.get实例化对象
"""
import requests
header = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/87.0.4280.141',
}
response_obj = requests.get(url, headers=header, timeout=5) # get方式请求
response_obj.raise_for_status() # 检测异常方法。如有异常则抛出,触发retry
# print(f'{url} 下载完成')
return response_obj
def list_localTDX_cwfile(ext_name):
"""
列出本地已有的专业财务文件。返回文件列表
:param ext_name: str类型。文件扩展名。返回指定扩展名的文件列表
:return: list类型。财务专业文件列表
"""
cw_path = ucfg.tdx['tdx_path'] + os.sep + "vipdoc" + os.sep + "cw"
tmplist = os.listdir(cw_path) # 遍历通达信vipdoc/cw目录
cw_filelist = []
for file in tmplist: # 只保留gpcw????????.扩展名 格式文件
if len(file) == 16 and file[:4] == "gpcw" and file[-4:] == "." + ext_name:
cw_filelist.append(file)
# print(f'检测到{len(cw_filelist)}个专业财务文件')
return cw_filelist
def readall_local_cwfile():
"""
将全部财报文件读到df_cw字典里。会占用1G内存,但处理速度比遍历CSV方式快很多
:return: 字典形式,所有财报内容。
"""
print(f'开始载入所有财报文件到内存')
dict = {}
cwfile_list = os.listdir(ucfg.tdx['csv_cw']) # cw目录 生成文件名列表
starttime_tick = time.time()
for cwfile in cwfile_list:
if os.path.getsize(ucfg.tdx['csv_cw'] + os.sep + cwfile) != 0:
dict[cwfile[4:-4]] = pd.read_pickle(ucfg.tdx['csv_cw'] + os.sep + cwfile, compression=None)
print(f'读取所有财报文件完成 用时{(time.time() - starttime_tick):.2f}秒')
return dict
def make_fq(code, df_code, df_gbbq, df_cw='', start_date='', end_date='', fqtype='qfq'):
"""
股票周期数据复权处理函数
:param code:str格式,具体股票代码
:param df_code:DF格式,未除权的具体股票日线数据。DF自动生成的数字索引,列定义:date,open,high,low,close,vol,amount
:param df_gbbq:DF格式,通达信导出的全股票全日期股本变迁数据。DF读取gbbq文件必须加入dtype={'code': str}参数,否则股票代码开头0会忽略
:param df_cw:DF格式,读入内存的全部财务文件
:param start_date:可选,要截取的起始日期。默认为空。格式"2020-10-10"
:param end_date:可选,要截取的截止日期。默认为空。格式"2020-10-10"
:param fqtype:可选,复权类型。默认前复权。
:return:复权后的DF格式股票日线数据
"""
'''以下是从https://github.com/rainx/pytdx/issues/78#issuecomment-335668322 提取学习的前复权代码
import datetime
import numpy as np
import pandas as pd
from pytdx.hq import TdxHq_API
# from pypinyin import lazy_pinyin
import tushare as ts
'除权除息'
api = TdxHq_API()
with api.connect('172.16.17.32', 7709):
# 从服务器获取该股的股本变迁数据
category = {
'1': '除权除息', '2': '送配股上市', '3': '非流通股上市', '4': '未知股本变动', '5': '股本变化',
'6': '增发新股', '7': '股份回购', '8': '增发新股上市', '9': '转配股上市', '10': '可转债上市',
'11': '扩缩股', '12': '非流通股缩股', '13': '送认购权证', '14': '送认沽权证'}
data = api.to_df(api.get_xdxr_info(0, '000001'))
data = data \
.assign(date=pd.to_datetime(data[['year', 'month', 'day']])) \
.drop(['year', 'month', 'day'], axis=1) \
.assign(category_meaning=data['category'].apply(lambda x: category[str(x)])) \
.assign(code=str('000001')) \
.rename(index=str, columns={'panhouliutong': 'liquidity_after',
'panqianliutong': 'liquidity_before', 'houzongguben': 'shares_after',
'qianzongguben': 'shares_before'}) \
.set_index('date', drop=False, inplace=False)
xdxr_data = data.assign(date=data['date'].apply(lambda x: str(x)[0:10])) # 该股的股本变迁DF处理完成
df_gbbq = xdxr_data[xdxr_data['category'] == 1] # 提取只有除权除息的行保存到DF df_gbbq
# print(df_gbbq)
# 从服务器读取该股的全部历史不复权K线数据,保存到data表, 只包括 日期、开高低收、成交量、成交金额数据
data = pd.concat([api.to_df(api.get_security_bars(9, 0, '000001', (9 - i) * 800, 800)) for i in range(10)], axis=0)
# 从data表加工数据,保存到bfq_data表
df_code = data \
.assign(date=pd.to_datetime(data['datetime'].apply(lambda x: x[0:10]))) \
.assign(code=str('000001')) \
.set_index('date', drop=False, inplace=False) \
.drop(['year', 'month', 'day', 'hour',
'minute', 'datetime'], axis=1)
df_code['if_trade'] = True
# 不复权K线数据处理完成,保存到bfq_data表
# 提取info表的category列的值,按日期一一对应,列拼接到bfq_data表。也就是标识出当日是除权除息日的行
data = pd.concat([df_code, df_gbbq[['category']][df_code.index[0]:]], axis=1)
# print(data)
data['date'] = data.index
data['if_trade'].fillna(value=False, inplace=True) # if_trade列,无效的值填充为False
data = data.fillna(method='ffill') # 向下填充无效值
# 提取info表的'fenhong', 'peigu', 'peigujia',‘songzhuangu'列的值,按日期一一对应,列拼接到data表。
# 也就是将当日是除权除息日的行,对应的除权除息数据,写入对应的data表的行。
data = pd.concat([data, df_gbbq[['fenhong', 'peigu', 'peigujia',
'songzhuangu']][df_code.index[0]:]], axis=1)
data = data.fillna(0) # 无效值填空0
data['preclose'] = (data['close'].shift(1) * 10 - data['fenhong'] + data['peigu']
* data['peigujia']) / (10 + data['peigu'] + data['songzhuangu'])
data['adj'] = (data['preclose'].shift(-1) / data['close']).fillna(1)[::-1].cumprod() # 计算每日复权因子
data['open'] = data['open'] * data['adj']
data['high'] = data['high'] * data['adj']
data['low'] = data['low'] * data['adj']
data['close'] = data['close'] * data['adj']
data['preclose'] = data['preclose'] * data['adj']
data = data[data['if_trade']]
result = data \
.drop(['fenhong', 'peigu', 'peigujia', 'songzhuangu', 'if_trade', 'category'], axis=1)[data['open'] != 0] \
.assign(date=data['date'].apply(lambda x: str(x)[0:10]))
print(result)
'''
# 先进行判断。如果有adj列,且没有NaN值,表示此股票数据已处理完成,无需处理。直接返回。
# 如果没有‘adj'列,表示没进行过复权处理,当作新股处理
if 'adj' in df_code.columns.to_list():
if True in df_code['adj'].isna().to_list():
first_index = np.where(df_code.isna())[0][0] # 有NaN值,设为第一个NaN值所在的行
else:
return ""
else:
first_index = 0
flag_newstock = True
flag_attach = False # True=追加数据模式 False=数据全部重新计算
# 设置新股标志。True=新股,False=旧股。新股跳过追加数据部分的代码。如果没定义,默认为False
if 'flag_newstock' not in dir():
flag_newstock = False
# 提取该股除权除息行保存到DF df_cqcx,提取其他信息行到df_gbbq
df_cqcx = df_gbbq.loc[(df_gbbq['code'] == code) & (df_gbbq['类别'] == '除权除息')]
df_gbbq = df_gbbq.loc[(df_gbbq['code'] == code) & (
(df_gbbq['类别'] == '股本变化') |
(df_gbbq['类别'] == '送配股上市') |
(df_gbbq['类别'] == '转配股上市'))]
# 清洗df_gbbq,可能出现同一日期有 配股上市、股本变化两行数据。不清洗后面合并会索引冲突。
# 下面的代码可以保证删除多个不连续的重复行,用DF dropdup方法不能确保删除的值是大是小
# 如果Ture在列表里。表示有重复行存在
if True in df_gbbq.duplicated(subset=['权息日'], keep=False).to_list():
# 提取重复行的索引
del_index = [] # 要删除的后流通股的值
tmp_dict = df_gbbq.duplicated(subset=['权息日'], keep=False).to_dict()
for k, v in tmp_dict.items():
if v:
del_index.append(df_gbbq.at[k, '送转股-后流通盘'])
# 如果dup_index有1个以上的值,且K+1的元素是False,或K+1不存在也返回False,表示下一个元素 不是 重复行
if len(del_index) > 1 and (tmp_dict.get(k + 1, False) == False):
del_index.remove(max(del_index)) # 删除最大值
# 选择剩余的值,取反,则相当于保留了最大值,删除了其余的值
df_gbbq = df_gbbq[~df_gbbq['送转股-后流通盘'].isin(del_index)]
# int64类型储存的日期19910404,转换为dtype: datetime64[ns] 1991-04-04 为了按日期一一对应拼接
df_cqcx = df_cqcx.assign(date=pd.to_datetime(df_cqcx['权息日'], format='%Y%m%d')) # 添加date列,设置为datetime64[ns]格式
df_cqcx.set_index('date', drop=True, inplace=True) # 设置权息日为索引 (字符串表示的日期 "19910101")
df_cqcx['category'] = 1.0 # 添加category列
df_gbbq = df_gbbq.assign(date=pd.to_datetime(df_gbbq['权息日'], format='%Y%m%d')) # 添加date列,设置为datetime64[ns]格式
df_gbbq.set_index('date', drop=True, inplace=True) # 设置权息日为索引 (字符串表示的日期 "19910101")
if len(df_cqcx) > 0: # =0表示股本变迁中没有该股的除权除息信息。gbbq_lastest_date设置为今天,当作新股处理
cqcx_lastest_date = df_cqcx.index[-1].strftime('%Y-%m-%d') # 提取最新的除权除息日
else:
cqcx_lastest_date = str(datetime.date.today())
flag_newstock = True
# 判断df_code是否已有历史数据,是追加数据还是重新生成。
# 如果gbbq_lastest_date not in df_code.loc[first_index:, 'date'].to_list(),表示未更新数据中不包括除权除息日
# 由于前复权的特性,除权后历史数据都要变。因此未更新数据中不包括除权除息日,只需要计算未更新数据。否则日线数据需要全部重新计算
# 如果'adj'在df_code的列名单里,表示df_code是已复权过的,只需追加新数据,否则日线数据还是需要全部重新计算
if cqcx_lastest_date not in df_code.loc[first_index:, 'date'].to_list() and not flag_newstock:
if 'adj' in df_code.columns.to_list():
flag_attach = True # 确定为追加模式
df_code_original = df_code # 原始code备份为df_code_original,最后合并
df_code = df_code.iloc[first_index:] # 切片df_code,只保留需要处理的行
df_code.reset_index(drop=True, inplace=True)
df_code_original.dropna(how='any', inplace=True) # 丢掉缺失数据的行,之后直接append新数据就行。比merge简单。
df_code_original['date'] = pd.to_datetime(df_code_original['date'], format='%Y-%m-%d') # 转为时间格式
df_code_original.set_index('date', drop=True, inplace=True) # 时间为索引。方便与另外复权的DF表对齐合并
# 单独提取流通股处理。因为流通股是设置流通股变更时间节点,最后才填充nan值。和其他列的处理会冲突。
# 如果有流通股列,单独复制出来;如果没有流通股列,添加流通股列,赋值为NaN。
# 如果是追加数据模式,则肯定已存在流通股列且数据已处理。因此不需单独提取流通股列。只在前复权前处理缺失的流通股数据即可
# 虽然财报中可能没有流通股的数据,但股本变迁文件中最少也有股票第一天上市时的流通股数据。
# 且后面还会因为送配股上市、股本变化,导致在非财报日之前,流通股就发生变动
if not flag_attach:
if '流通股' in df_code.columns.to_list():
df_ltg = pd.DataFrame(index=df_code.index)
df_ltg['date'] = df_code['date']
df_ltg['流通股'] = df_code['流通股']
del df_code['流通股']
else:
df_ltg = pd.DataFrame(index=df_code.index)
df_ltg['date'] = df_code['date']
df_ltg['流通股'] = np.nan
else:
# 附加模式,此处df_code是已经切片过的,只包括需要更新的数据行。其中也包含流通股列,值全为NaN。
# 类似单独提出处理流通股列,和新股模式的区别是只处理需要更新的数据行。
df_ltg = pd.DataFrame(index=df_code.index)
del df_code['流通股']
# 第一个值赋值为df_code_original流通股列第一个NaN值的前一个有效值
ltg_lastest_value = df_code_original.at[df_code_original.index[-1], '流通股']
df_ltg['date'] = df_code['date']
df_ltg['流通股'] = np.nan
df_ltg.at[0, '流通股'] = ltg_lastest_value
df_gbbq = df_gbbq.rename(columns={'送转股-后流通盘': '流通股'}) # 列改名,为了update可以匹配
# 用df_gbbq update data,由于只有流通股列重复,因此只会更新流通股列对应索引的NaN值
df_ltg['date'] = pd.to_datetime(df_ltg['date'], format='%Y-%m-%d') # 转为时间格式
df_ltg.set_index('date', drop=True, inplace=True) # 时间为索引。方便与另外复权的DF表对齐合并
df_ltg.update(df_gbbq, overwrite=False) # 使用update方法更新df_ltg
if not flag_attach: # 附加模式则单位已经调整过,无需再调整
# 股本变迁里的流通股单位是万股。转换与财报的单位:股 统一
df_ltg['流通股'] = df_ltg['流通股'] * 10000
# int64类型储存的日期19910404,转换为dtype: datetime64[ns] 1991-04-04 为了按日期一一对应拼接
with pd.option_context('mode.chained_assignment', None): # 临时屏蔽语句警告
df_code['date'] = pd.to_datetime(df_code['date'], format='%Y-%m-%d')
df_code.set_index('date', drop=True, inplace=True)
df_code.insert(df_code.shape[1], 'if_trade', True) # 插入if_trade列,赋值True
# 提取df_cqcx和df_gbbq表的category列的值,按日期一一对应,列拼接到bfq_data表。也就是标识出当日是股本变迁的行
data = pd.concat([df_code, df_cqcx[['category']][df_code.index[0]:]], axis=1)
# print(data)
data['if_trade'].fillna(value=False, inplace=True) # if_trade列,无效的值填充为False
data.fillna(method='ffill', inplace=True) # 向下填充无效值
# 提取info表的'fenhong', 'peigu', 'peigujia',‘songzhuangu'列的值,按日期一一对应,列拼接到data表。
# 也就是将当日是除权除息日的行,对应的除权除息数据,写入对应的data表的行。
data = pd.concat([data, df_cqcx[['分红-前流通盘', '配股-后总股本', '配股价-前总股本',
'送转股-后流通盘']][df_code.index[0]:]], axis=1)
data = data.fillna(0) # 无效值填空0
data['preclose'] = (data['close'].shift(1) * 10 - data['分红-前流通盘'] + data['配股-后总股本']
* data['配股价-前总股本']) / (10 + data['配股-后总股本'] + data['送转股-后流通盘'])
# 计算每日复权因子 前复权最近一次股本变迁的复权因子为1
data['adj'] = (data['preclose'].shift(-1) / data['close']).fillna(1)[::-1].cumprod()
data['open'] = data['open'] * data['adj']
data['high'] = data['high'] * data['adj']
data['low'] = data['low'] * data['adj']
data['close'] = data['close'] * data['adj']
# data['preclose'] = data['preclose'] * data['adj'] # 这行没用了
data = data[data['if_trade']] # 重建整个表,只保存if_trade列=true的行
# 抛弃过程处理行,且open值不等于0的行
data = data.drop(['分红-前流通盘', '配股-后总股本', '配股价-前总股本',
'送转股-后流通盘', 'if_trade', 'category', 'preclose'], axis=1)[data['open'] != 0]
# 复权处理完成
# 如果没有传参进来,就自己读取财务文件,否则用传参的值
if df_cw == '':
cw_dict = readall_local_cwfile()
else:
cw_dict = df_cw
# 计算换手率
# 财报数据公开后,股本才变更。因此有效时间是“当前财报日至未来日期”。故将结束日期设置为2099年。每次财报更新后更新对应的日期时间段
e_date = '20990101'
for cw_date in cw_dict: # 遍历财报字典 cw_date=财报日期 cw_dict[cw_date]=具体的财报内容
# 如果复权数据表的首行日期>当前要读取的财务报表日期,则表示此财务报表发布时股票还未上市,跳过此次循环。有例外情况:003001
# (cw_dict[cw_date][1] == code).any() 表示当前股票code在财务DF里有数据
if df_ltg.index[0].strftime('%Y%m%d') <= cw_date <= df_ltg.index[-1].strftime('%Y%m%d') \
and len(cw_dict[cw_date]) > 0:
if (cw_dict[cw_date][1] == code).any():
# 获取目前股票所在行的索引值,具有唯一性,所以直接[0]
code_df_index = cw_dict[cw_date][cw_dict[cw_date][1] == code].index.to_list()[0]
# DF格式读取的财报,字段与财务说明文件的序号一一对应,如果是CSV读取的,字段需+1
# print(f'{cwfile_date} 总股本:{cw_dict[cw_date].iat[code_df_index,238]}'
# f'流通股本:{cw_dict[cw_date].iat[code_df_index,239]}')
# 如果流通股值是0,则进行下一次循环
if int(cw_dict[cw_date].iat[code_df_index, 239]) != 0:
# df_ltg[cw_date:e_date].index[0] 表示df_ltg中从cw_date到e_date的第一个索引的值。
# 也就是离cw_date日期最近的下一个有效行
df_ltg.at[df_ltg[cw_date:e_date].index[0], '流通股'] = float(cw_dict[cw_date].iat[code_df_index, 239])
# df_ltg拼接回原DF
data = pd.concat([data, df_ltg], axis=1)
data = data.fillna(method='ffill') # 向下填充无效值
data = data.fillna(method='bfill') # 向上填充无效值 为了弥补开始几行的空值
data = data.round({'open': 2, 'high': 2, 'low': 2, 'close': 2, }) # 指定列四舍五入
if '流通股' in data.columns.to_list():
data['流通市值'] = data['流通股'] * data['close']
data['换手率'] = data['vol'] / data['流通股'] * 100
data = data.round({'流通市值': 2, '换手率': 2, }) # 指定列四舍五入
if flag_attach: # 追加模式,则附加最新处理的数据
data = df_code_original.append(data)
if len(start_date) == 0 and len(end_date) == 0:
pass
elif len(start_date) != 0 and len(end_date) == 0:
data = data[start_date:]
elif len(start_date) == 0 and len(end_date) != 0:
data = data[:end_date]
elif len(start_date) != 0 and len(end_date) != 0:
data = data[start_date:end_date]
data.reset_index(drop=False, inplace=True) # 重置索引行,数字索引,date列到第1列,保存为str '1991-01-01' 格式
# 最后调整列顺序
# data = data.reindex(columns=['code', 'date', 'open', 'high', 'low', 'close', 'vol', 'amount', 'adj', '流通股', '流通市值', '换手率'])
return data
def get_tdx_lastestquote(stocklist=None):
"""
使用pytdx获取当前实时行情。返回行情的DF列表格式。stocklist为空则获取ucfg.tdx['csv_lday']目录全部股票行情
:param stocklist: 可选,list类型。str类型传入股票列表['000001', '000002','600030']
:return:当前从pytdx服务器获取的最新股票行情
"""
# get_security_quotes只允许最大80个股票为一组 数字越大漏掉的股票越多。测试数据:
# 数字 获取股票 用时
# 80 3554 2.59
# 40 3874 5.07
# 20 4015 10.12
# 10 4105 17.54
from pytdx.hq import TdxHq_API
stocklist_pytdx = []
if stocklist is None: # 如果列表为空,则获取csv_lday目录全部股票
stocklist = []
for i in os.listdir(ucfg.tdx['csv_lday']):
stocklist.append(i[:-4])
elif isinstance(stocklist, str):
tmp = []
tmp.append(stocklist)
stocklist = tmp
del tmp
elif isinstance(stocklist, tuple):
stocklist_pytdx.append(stocklist)
if isinstance(stocklist, list):
for stock in stocklist: # 构造get_security_quotes所需的元组参数
if stock[:1] == '6':
stocklist_pytdx.append(tuple([1, stock]))
elif stock[:1] == '0' or stock[:1] == '3':
stocklist_pytdx.append(tuple([0, stock]))
del stocklist
df = pd.DataFrame()
api = TdxHq_API(raise_exception=False)
starttime_tick = time.time()
print(f'请求 {len(stocklist_pytdx)} 只股票实时行情')
if api.connect(ucfg.tdx['pytdx_ip'], ucfg.tdx['pytdx_port']):
# 第一轮获取股票行情,会有100只股票左右遗漏。pytdx代码问题
if len(stocklist_pytdx) == 1:
data = api.to_df(api.get_security_quotes(stocklist_pytdx))
df = | pd.concat([df, data], axis=0, ignore_index=True) | pandas.concat |
import datetime
import numpy as np
import pandas as pd
import pytest
from .utils import (
get_extension,
to_json_string,
to_days_since_epoch,
extend_dict,
filter_by_columns,
breakdown_by_month,
breakdown_by_month_sum_days,
to_bin,
)
@pytest.fixture
def issues():
return pd.DataFrame(
[
{
"key": "ABC-1",
"priority": "high",
"start": pd.Timestamp(2018, 1, 1),
"end": pd.Timestamp(2018, 3, 20),
},
{
"key": "ABC-2",
"priority": "med",
"start": pd.Timestamp(2018, 1, 2),
"end": pd.Timestamp(2018, 1, 20),
},
{
"key": "ABC-3",
"priority": "high",
"start": pd.Timestamp(2018, 2, 3),
"end": pd.Timestamp(2018, 3, 20),
},
{
"key": "ABC-4",
"priority": "med",
"start": pd.Timestamp(2018, 1, 4),
"end": pd.Timestamp(2018, 3, 20),
},
{
"key": "ABC-5",
"priority": "high",
"start": pd.Timestamp(2018, 2, 5),
"end": pd.Timestamp(2018, 2, 20),
},
{
"key": "ABC-6",
"priority": "med",
"start": pd.Timestamp(2018, 3, 6),
"end": pd.Timestamp(2018, 3, 20),
},
],
columns=["key", "priority", "start", "end"],
)
def test_extend_dict():
assert extend_dict({"one": 1}, {"two": 2}) == {"one": 1, "two": 2}
def test_get_extension():
assert get_extension("foo.csv") == ".csv"
assert get_extension("/path/to/foo.csv") == ".csv"
assert get_extension("\\path\\to\\foo.csv") == ".csv"
assert get_extension("foo") == ""
assert get_extension("foo.CSV") == ".csv"
def test_to_json_string():
assert to_json_string(1) == "1"
assert to_json_string("foo") == "foo"
assert to_json_string(None) == ""
assert to_json_string(np.NaN) == ""
assert to_json_string(pd.NaT) == ""
assert to_json_string(pd.Timestamp(2018, 2, 1)) == "2018-02-01"
def test_to_days_since_epoch():
assert to_days_since_epoch(datetime.date(1970, 1, 1)) == 0
assert to_days_since_epoch(datetime.date(1970, 1, 15)) == 14
def test_filter_by_columns():
df = pd.DataFrame(
[
{"high": 1, "med": 2, "low": 0},
{"high": 3, "med": 1, "low": 2},
{"high": 2, "med": 2, "low": 3},
],
columns=["high", "med", "low"],
)
# Check without values, original data frame will be returned.
result = filter_by_columns(df, None)
assert result.equals(df)
# Check with values, columns will be filtered and reordered
result = filter_by_columns(df, ["med", "high"])
assert list(result.columns) == ["med", "high"]
assert result.to_dict("records") == [
{"high": 1, "med": 2},
{"high": 3, "med": 1},
{"high": 2, "med": 2},
]
def test_breakdown_by_month(issues):
breakdown = breakdown_by_month(issues, "start", "end", "key", "priority")
assert list(breakdown.columns) == ["high", "med"] # alphabetical
assert list(breakdown.index) == [
pd.Timestamp(2018, 1, 1),
pd.Timestamp(2018, 2, 1),
pd.Timestamp(2018, 3, 1),
]
assert breakdown.to_dict("records") == [
{"high": 1, "med": 2},
{"high": 3, "med": 1},
{"high": 2, "med": 2},
]
def test_breakdown_by_month_open_ended(issues):
# Replace ABC-6 end date to None
issues["end"][5] = None
breakdown = breakdown_by_month(issues, "start", "end", "key", "priority")
assert list(breakdown.columns) == ["high", "med"] # alphabetical
# Note: We will get columns until the current month; assume this test is
# run from June onwards ;)
assert list(breakdown.index)[:5] == [
pd.Timestamp(2018, 1, 1),
pd.Timestamp(2018, 2, 1),
pd.Timestamp(2018, 3, 1),
pd.Timestamp(2018, 4, 1),
pd.Timestamp(2018, 5, 1),
]
assert breakdown.to_dict("records")[:5] == [
{"high": 1, "med": 2},
{"high": 3, "med": 1},
{"high": 2, "med": 2},
{"high": 0, "med": 1},
{"high": 0, "med": 1},
]
def test_breakdown_by_month_none_values(issues):
# Replace all priorities to None
issues["priority"] = None
breakdown = breakdown_by_month(issues, "start", "end", "key", "priority")
assert list(breakdown.columns) == [None]
assert list(breakdown.index) == [
pd.Timestamp(2018, 1, 1),
pd.Timestamp(2018, 2, 1),
pd.Timestamp(2018, 3, 1),
]
assert breakdown.to_dict("records") == [{None: 3}, {None: 4}, {None: 4}]
def test_breakdown_by_month_sum_days(issues):
breakdown = breakdown_by_month_sum_days(issues, "start", "end", "priority")
assert list(breakdown.columns) == ["high", "med"] # alphabetical
assert list(breakdown.index) == [
pd.Timestamp(2018, 1, 1),
pd.Timestamp(2018, 2, 1),
pd.Timestamp(2018, 3, 1),
]
assert breakdown.to_dict("records") == [
{"high": 31.0, "med": 47.0},
{"high": 70.0, "med": 28.0},
{"high": 40.0, "med": 35.0},
]
def test_breakdown_by_month_sum_day_open_ended(issues):
# Replace ABC-6 end date to None
issues["end"][5] = None
breakdown = breakdown_by_month_sum_days(issues, "start", "end", "priority")
assert list(breakdown.columns) == ["high", "med"] # alphabetical
# Note: We will get columns until the current month; assume this test is
# run from June onwards ;)
assert list(breakdown.index)[:5] == [
| pd.Timestamp(2018, 1, 1) | pandas.Timestamp |
import numpy as np
import pandas as pd
import joblib, os
class dataset_creator():
def __init__(self, project, data, njobs=1):
self.data = data
self.dates_ts = self.check_dates(data.index)
self.project_name= project['_id']
self.static_data = project['static_data']
self.path_nwp_project = self.static_data['pathnwp']
self.areas = self.static_data['areas']
self.nwp_model = self.static_data['NWP_model']
self.njobs = njobs
if self.static_data['type'] == 'pv':
self.variables = ['Cloud', 'Flux', 'Temperature']
elif self.static_data['type'] == 'wind':
self.variables = ['WS', 'WD']
else:
self.variables = []
def check_dates(self, dates):
start_date = pd.to_datetime(dates[0].strftime('%d%m%y'), format='%d%m%y')
end_date = pd.to_datetime(dates[-1].strftime('%d%m%y'), format='%d%m%y')
dates = pd.date_range(start_date, end_date)
return dates
def stack_2d(self, X, sample):
if len(sample.shape)==3:
if X.shape[0] == 0:
X = sample
elif len(X.shape) == 3:
X = np.stack((X, sample))
else:
X = np.vstack((X, sample[np.newaxis, :, :, :]))
elif len(sample.shape)==2:
if X.shape[0] == 0:
X = sample
elif len(X.shape) == 2:
X = np.stack((X, sample))
else:
X = np.vstack((X, sample[np.newaxis, :, :]))
return X
def get_3d_dataset(self):
X = np.array([])
data_var = dict()
for var in self.variables:
if var in {'WS', 'Flux'}:
data_var[var+'_prev'] = X
data_var[var] = X
data_var[var+'_next'] = X
else:
data_var[var] = X
data_var['dates'] = X
for t in self.dates_ts:
nwps = joblib.load(
os.path.join(self.path_nwp_project, self.nwp_model + '_' + t.strftime('%d%m%y') + '.pickle'))
pdates = pd.date_range(t + pd.DateOffset(hours=25), t + pd.DateOffset(hours=48), freq='H').strftime(
'%d%m%y%H%M')
for date in pdates:
try:
nwp = nwps[date]
date = pd.to_datetime(date, format='%d%m%y%H%M')
nwp_prev = nwps[(date - pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_next = nwps[(date + | pd.DateOffset(hours=1) | pandas.DateOffset |
import argparse
import yaml
import os
import shutil
from pathlib import Path
from collections import OrderedDict
import torch
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rcParams["lines.linewidth"] = 0.8
from pysnn.neuron import BaseNeuron
from pysnn.network import SNNNetwork
from evolutionary.utils.constructors import build_network, build_environment
from evolutionary.utils.utils import randomize_env
def plot_performance(folder, parameters):
folder = Path(folder)
individual_id = "_".join(
[s.replace(".net", "") for s in parameters.split("/")[-2:]]
)
save_folder = folder / ("test+" + individual_id)
if os.path.exists(save_folder):
shutil.rmtree(save_folder)
os.makedirs(save_folder)
# Load config
with open(folder / "config.yaml", "r") as cf:
config = yaml.full_load(cf)
# Build environment
env = build_environment(config)
# Load network
network = build_network(config)
network.load_state_dict(torch.load(parameters))
# Create plot for performance
fig_p, axs_p = plt.subplots(6, 1, sharex=True, figsize=(10, 10))
axs_p[0].set_ylabel("height [m]")
axs_p[1].set_ylabel("velocity [m/s]")
axs_p[2].set_ylabel("thrust setpoint [g]")
axs_p[3].set_ylabel("divergence [1/s]")
axs_p[4].set_ylabel("divergence dot [1/s2]")
axs_p[5].set_ylabel("spikes [?]")
axs_p[5].set_xlabel("time [s]")
# Create plot for neurons
fig_n, axs_n = plt.subplots(7, 3, sharex=True, figsize=(10, 10))
axs_n = axs_n.flatten()
# Create list to hold spike rates per neuron
rates = []
# 5 runs
for i in range(5):
# With different properties
# Randomizing here means that another run of this file will get different envs,
# but so be it. Not easy to change
env = randomize_env(env, config)
# Reset network and env
if isinstance(network, SNNNetwork):
network.reset_state()
obs = env.reset(h0=config["env"]["h0"][1])
done = False
spikes = 0
# For plotting
action_list = []
state_list = []
obs_gt_list = []
obs_list = []
time_list = []
spike_list = []
# For neuron visualization
neuron_dict = OrderedDict(
[
(name, {"trace": [], "spike": []})
for name, child in network.named_children()
if isinstance(child, BaseNeuron)
]
)
# Log performance
action_list.append(np.clip(env.action, *config["env"]["g bounds"]))
state_list.append(env.state.copy())
obs_gt_list.append(env.div_ph.copy())
obs_list.append(obs.copy())
time_list.append(env.t)
spike_list.append([0, 0])
# Log neurons
for name, child in network.named_children():
if name in neuron_dict:
neuron_dict[name]["trace"].append(
child.trace.detach().clone().view(-1).numpy()
)
neuron_dict[name]["spike"].append(
child.spikes.detach().clone().view(-1).numpy()
) if hasattr(child, "spikes") else None
while not done:
# Step the environment
obs = torch.from_numpy(obs)
action = network.forward(obs.view(1, 1, -1))
action = action.numpy()
obs, _, done, _ = env.step(action)
# Log performance
action_list.append(np.clip(env.action[0], *config["env"]["g bounds"]))
state_list.append(env.state.copy())
obs_gt_list.append(env.div_ph.copy())
obs_list.append(obs.copy())
time_list.append(env.t)
if isinstance(network, SNNNetwork):
spikes += (
network.neuron1.spikes.sum().item()
+ network.neuron2.spikes.sum().item()
if network.neuron1 is not None
else network.neuron2.spikes.sum().item()
)
spike_list.append(
[
spikes,
network.neuron1.spikes.sum().item()
+ network.neuron2.spikes.sum().item()
if network.neuron1 is not None
else network.neuron2.spikes.sum().item(),
]
)
# Log neurons
for name, child in network.named_children():
if name in neuron_dict:
neuron_dict[name]["trace"].append(
child.trace.detach().clone().view(-1).numpy()
)
neuron_dict[name]["spike"].append(
child.spikes.detach().clone().view(-1).numpy()
) if hasattr(child, "spikes") else None
# Plot data
# Height
axs_p[0].plot(time_list, np.array(state_list)[:, 0], "C0", label=f"run {i}")
# Velocity
axs_p[1].plot(time_list, np.array(state_list)[:, 1], "C0", label=f"run {i}")
# Thrust
axs_p[2].plot(time_list, np.array(action_list), "C0", label=f"run {i}")
# Divergence
axs_p[3].plot(time_list, np.array(obs_list)[:, 0], "C0", label=f"run {i}")
axs_p[3].plot(time_list, np.array(obs_gt_list)[:, 0], "C1", label=f"run {i} GT")
# Divergence dot
axs_p[4].plot(time_list, np.array(obs_list)[:, 1], "C0", label=f"run {i}")
axs_p[4].plot(time_list, np.array(obs_gt_list)[:, 1], "C1", label=f"run {i} GT")
# Spikes
axs_p[5].plot(
time_list,
np.array(spike_list)[:, 0] / np.array(time_list),
"C0",
label=f"run {i}",
)
axs_p[5].plot(
time_list,
pd.Series(np.array(spike_list)[:, 1])
.rolling(window=20, min_periods=1)
.mean()
.values,
"C1",
label=f"run {i}",
)
# Plot neurons
neurons = OrderedDict()
k = 0
# Go over layers
for recordings in neuron_dict.values():
# Go over neurons in layer
for j in range(np.array(recordings["spike"]).shape[1]):
neurons[f"n{k}_spike"] = np.array(recordings["spike"])[:, j].astype(
float
)
neurons[f"n{k}_trace"] = np.array(recordings["trace"])[:, j]
neurons[f"n{k}_ma"] = (
pd.Series(np.array(recordings["spike"])[:, j])
.rolling(window=20, min_periods=1)
.mean()
.values
)
axs_n[k].plot(time_list, np.array(recordings["trace"])[:, j], "C0")
axs_n[k].plot(time_list, np.array(recordings["spike"])[:, j], "C1")
axs_n[k].plot(
time_list,
pd.Series(np.array(recordings["spike"])[:, j])
.rolling(window=20, min_periods=1)
.mean()
.values,
"C2",
)
axs_n[k].set_title(f"{k}")
k += 1
# Save run
rates.append(
[
[
v.sum() / (time_list[-1] - config["env"]["settle"]),
v.sum() / (len(time_list) - config["env"]["settle"] // env.dt + 1),
]
for k, v in neurons.items()
if "spike" in k
]
)
data = pd.DataFrame(
{
"time": time_list,
"pos_z": np.array(state_list)[:, 0],
"vel_z": np.array(state_list)[:, 1],
"thrust": np.array(state_list)[:, 2],
"tsp": np.array(action_list),
"tsp_lp": pd.Series(action_list)
.rolling(window=20, min_periods=1)
.mean()
.values,
"div": np.array(obs_list)[:, 0],
"div_gt": np.array(obs_gt_list)[:, 0],
"divdot": np.array(obs_list)[:, 1],
"divdot_gt": np.array(obs_gt_list)[:, 1],
"spike_count": np.array(spike_list)[:, 0],
"spike_step": np.array(spike_list)[:, 1],
}
)
neurons = pd.DataFrame(neurons)
data = pd.concat([data, neurons], 1)
data.to_csv(str(save_folder) + f"/run{i}.csv", index=False, sep=",")
# Compute rates
rates = pd.DataFrame(
{
"mean_time": np.array(rates).mean(0)[:, 0],
"mean_steps": np.array(rates).mean(0)[:, 1],
"std_time": np.array(rates).std(0)[:, 0],
"std_steps": np.array(rates).std(0)[:, 1],
}
)
rates.to_csv(str(save_folder) + f"/rates.csv", index=False, sep=",")
# Write network to tikz-network-compatible file
# Edges
if network.neuron1 is not None:
# First layer
edges_0 = | pd.DataFrame(columns=["u", "v", "lw_raw", "color", "lw"]) | pandas.DataFrame |
import pandas as pd
import re
import numpy as np
def read_single_data(path):
"""
Read data in ndarray type
:param path: path of data file
:return: data: ndarray
"""
normal = pd.read_csv(path, header=None)
normal = filter_data(normal)
return normal.values
def read_origin_data(path):
"""
Read data in data frame type
:param path: path of data file
:return: data: ndarray
"""
normal = pd.read_csv(path, header=None)
# normal = filter_data(normal)
return normal
def filter_data(data_frame):
"""
Process string value
:param data_frame: raw data in data frame type
:return: data frame
"""
data_frame[data_frame.columns[len(data_frame.columns) - 2]] = \
data_frame[data_frame.columns[len(data_frame.columns) - 2]].apply(lambda x: int("".join(re.findall(r'\d+', x))))
data_frame[data_frame.columns[len(data_frame.columns) - 1]] = \
data_frame[data_frame.columns[len(data_frame.columns) - 1]].apply(lambda x: int(x, 16))
return data_frame
def preprocess(path, sample_bound=-1, abnormal_bound=-1, normal_bound=-1, abnormal_rate = 0.5):
"""
Complete data preprocess.
:param path: data path
:param sample_bound: max sample num
:param abnormal_bound: max abnormal tag num
:param normal_bound: max normal tag num
:return: origin_data, sample_data, abnormal_index, normal_index, abnormal_data
"""
origin_data = read_origin_data(path)
normal_origin_data = origin_data[origin_data[0] == 0]
abnormal_origin_data = origin_data[origin_data[0] == 1]
if sample_bound > 0:
abnormal_sample_data = abnormal_origin_data.sample(n=int(sample_bound*abnormal_rate))
normal_sample_data = normal_origin_data.sample(n=int(sample_bound*(1-abnormal_rate)))
origin_data = | pd.concat([abnormal_sample_data, normal_sample_data]) | pandas.concat |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
# Import python libraries
import sys
import os
import argparse
import pandas as pd
import math
import time
import datetime
try:
from pyrainbowterm import *
except ImportError:
print('Can not import pyrainbowterm!', log_type='error')
print('Try: pip install pyrainbowterm', log_type='hint')
sys.exit(1)
# Import file_operations
import file_operations
# Source code meta data
__author__ = '<NAME>'
__email__ = '<EMAIL>'
# Numeric mapping of the entire data
def numeric_mapping(data_frame, mapping_dict):
"""
This function maps every string values into a numeric values in pandas data frame
:param data_frame: Python pandas data frame
:param mapping_dict: Python dictionary with str -> number(int/long) mapping
:return: Python pandas data frame
"""
# Map source and target row according to mapping dictionary
print('Mapping data frame.....', log_type='info')
data_frame['source'] = data_frame['source'].map(mapping_dict)
data_frame['target'] = data_frame['target'].map(mapping_dict)
# Return mapped data frame
return data_frame
# Create output file
def create_output_file(numeric_data_frame, output_file_name):
"""
This function creates a file from python pandas data frame
:param numeric_data_frame: Python pandas data frame
:param output_file_name: Output file's full path with extension
:return: NULL
"""
# Create numeric output file
print('Creating numerically mapped output file.....', log_type='info')
try:
numeric_data_frame.to_csv(output_file_name, index=False, header=False, sep=' ')
print('Output file creation complete!', color='green', log_type='info')
except Exception as e:
print('Can not write output file. ERROR: {}'.format(e), log_type='error')
sys.exit(1)
# Extract unique nodes/values for mapping
def extract_nodes(data_frame):
"""
This function extracts unique values from a python pandas data frame given that first two columns have headers
'source' and 'target'
:param data_frame: Python pandas data frame
:return: Python dictionary with unique values mapped to an integer
"""
# Find unique values to create a look up table
# Returns a numpy array
print('Extracting unique values/nodes.....', log_type='info')
unique_values = pd.unique(data_frame[['source', 'target']].values.ravel('K'))
# Create a PANDAS data frame out of numpy array with unique nodes
print('Converting values into pandas data frame.....', log_type='info')
lookup_data = | pd.DataFrame(unique_values, columns=['label']) | pandas.DataFrame |
'''
For more information and details about the algorithm, please refer to
Pattern classification with Evolving Long-term Cognitive
Networks
<NAME> a,b,⇑, <NAME>˛bska c, <NAME> d
'''
import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn import datasets
from sklearn import model_selection
from sklearn.utils import shuffle
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import accuracy_score
from tensorflow.keras import backend as K
from tensorflow.python.keras.backend import set_session
from tensorflow.keras import regularizers
import matplotlib.pyplot as plt
from scipy.stats import entropy
from math import log, e
import warnings
warnings.filterwarnings("ignore")
class WeightRegularizer(tf.keras.regularizers.Regularizer):
'''
regularizing the wieghts
'''
def __init__(self, coef, mask):
self.coef = coef
self.mask = mask
def __call__(self, weight_matrix):
tensor = tf.convert_to_tensor(self.coef, np.float32)
reg1 = K.sum(K.abs(tf.math.multiply(tf.math.subtract(weight_matrix,tensor), self.mask)))
reg2 = 0.001 * K.sum(K.square(weight_matrix))
return reg1 + reg2
def get_config(self):
return {'coefficients': self.coef, 'penalization': self.mask}
def run_model(file, layers=5, folds=5, epochs=1000, verbose=True, regularize=True):
'''
:param file: expects optimize .arff values with features vals [0,1]
:param layers: numbers of nn layers (def 5)
:param folds: how many validation folds (def 5)
:param epochs: number of epochs (def 1000)
:param verbose:
:param regularize:
:return: accuracy and weights matrix
'''
X, y, out = read_arff(file)
hidden = len(X[0])
skf = StratifiedKFold(n_splits=folds)
skf.get_n_splits(X, y)
acc_arr = []
ent_arr = []
for train_index, test_index in skf.split(X, y):
X_train, X_test = np.asarray(X[train_index]).astype(np.float32),np.asarray(X[test_index]).astype(np.float32) #X[train_index], X[test_index]
y_train, y_test = np.asarray(y[train_index]).astype(np.float32),np.asarray(y[test_index]).astype(np.float32) #y[train_index], y[test_index]
# X = np.asarray(X).astype(np.float32)
if(regularize):
coef, mask = coefficients(matrix=X_train)
network = [tf.keras.layers.Flatten()]
for i in range(layers):
reg = None if (i == layers-1 or not regularize) else WeightRegularizer(coef, mask)
dense = tf.keras.layers.Dense(hidden, activation=tf.nn.tanh, kernel_regularizer=reg)
network.append(dense)
network.append(tf.keras.layers.Dense(out, activation=tf.nn.softmax))
model = tf.keras.models.Sequential(network)
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
history = model.fit(X_train, y_train, epochs=epochs, verbose=0)
if(verbose):
plot_loss_weights(history, model, mask)
weights = model.get_weights()
relajo = error(weights, coef, mask)
ent_arr.append(relajo)
loss, acc = model.evaluate(X_test, y_test, verbose=0)
acc_arr.append(acc)
return np.mean(acc_arr), np.mean(ent_arr), weights
def coefficients(matrix):
n, m = matrix.shape
temp1 = np.sum(matrix, axis=0)
temp2 = np.sum(matrix**2, axis=0)
df_data = | pd.DataFrame(data=matrix,dtype=float) | pandas.DataFrame |
import os
from datetime import datetime, timedelta
from http import HTTPStatus
from typing import Any, List, Tuple
import pandas as pd
import numpy as np
import plotly.graph_objects as go
import tinvest as ti
import edhec_risk_kit as erk
class HTTPError(Exception):
pass
class CustomClient(ti.SyncClient):
def request(self, *args, **kwargs) -> Any:
response = super().request(*args, **kwargs)
if response.status_code != HTTPStatus.OK:
raise HTTPError(response.parse_error().json())
return response.parse_json().payload
client = CustomClient(os.getenv('TINVEST_SANDBOX_TOKEN', ''), use_sandbox=True)
api = ti.OpenApi(client)
def main():
markets=api.market.market_stocks_get()
#l = ["ATVI", "KO", "INTC", "LPL", "MAT"]
#l = ["SBER", "TATN", "PHOR", "CHMF", "MGNT", "GCHE", "GAZP", "LKOH", "ROSN"]
l = ["MO", "T", "IRM", "KMI", "SPG", "WMB", "VLO"]
budget=1000
df = dict()
dc = dict()
k=0
for MI in markets.instruments:
if MI.ticker in l:
now = datetime.now()
try:
cndls = api.market.market_candles_get(MI.figi,
from_=now -
timedelta(days=60),
to=now,
interval=ti.CandleResolution.day)
df2 = dict()
cost = 0
for cndl in cndls.candles:
cost = cndl.c
#if MI.currency == MI.currency.usd:
# cost = cost*70
dc[MI.ticker]=cost*MI.lot
df2[str(cndl.time)] = ((cndl.c-cndl.o)/cndl.o)*100
#if cost*MI.lot < 1000:
df[MI.ticker] = df2
except:
pass
k=k+1
pddf = | pd.DataFrame(df) | pandas.DataFrame |
from sklearn import datasets
import pandas as pd
# %matplotlib inline
ds = datasets.load_breast_cancer();
NC = 4
lFeatures = ds.feature_names[0:NC]
df_orig = pd.DataFrame(ds.data[:,0:NC] , columns=lFeatures)
df_orig['TGT'] = ds.target
df_orig.sample(6, random_state=1960)
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier(n_estimators=120, random_state = 1960)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(df_orig[lFeatures].values,
df_orig['TGT'].values,
test_size=0.2,
random_state=1960)
df_train = pd.DataFrame(X_train , columns=lFeatures)
df_train['TGT'] = y_train
df_test = pd.DataFrame(X_test , columns=lFeatures)
df_test['TGT'] = y_test
clf.fit(X_train , y_train)
###########################################
from sklearn.linear_model import *
def create_score_stats(df, feature_bins = 4 , score_bins=30):
df_binned = df.copy()
df_binned['Score'] = clf.predict_proba(df[lFeatures].values)[:,0]
df_binned['Score_bin'] = pd.qcut(df_binned['Score'] , score_bins, labels=False, duplicates='drop')
for col in lFeatures:
df_binned[col + '_bin'] = pd.qcut(df[col] , feature_bins, labels=False, duplicates='drop')
binned_features = [col + '_bin' for col in lFeatures]
lInterpolted_Score= pd.Series(index=df_binned.index)
bin_classifiers = {}
coefficients = {}
for b in range(score_bins):
bin_clf = Ridge(random_state = 1960)
bin_indices = (df_binned['Score_bin'] == b)
# print("PER_BIN_INDICES" , b , bin_indices)
bin_data = df_binned[bin_indices]
bin_X = bin_data[binned_features]
bin_y = bin_data['Score']
if(bin_y.shape[0] > 0):
bin_clf.fit(bin_X , bin_y)
bin_classifiers[b] = bin_clf
bin_coefficients = dict(zip(lFeatures, [bin_clf.coef_.ravel()[i] for i in range(len(lFeatures))]))
print("PER_BIN_COEFFICIENTS" , b , bin_coefficients)
coefficients[b] = bin_coefficients
predicted = bin_clf.predict(bin_X)
lInterpolted_Score[bin_indices] = predicted
df_binned['Score_interp'] = lInterpolted_Score
return (df_binned , bin_classifiers , coefficients)
(df_cross_stats , per_bin_classifiers , per_bin_coefficients) = create_score_stats(df_train , feature_bins=20 , score_bins=20)
df_cross_stats.sample(6, random_state=1960)
###########################
df2 = df_cross_stats.sort_values('Score').reset_index()
print(df2.columns)
df2.plot('Score', ['Score_bin'])
df2.plot('Score', ['Score_interp' ])
for col in lFeatures:
df2.plot('Score', [col + '_bin'])
df2.sample(12)
####################################
pd.crosstab(df_cross_stats['mean radius_bin'], df_cross_stats['Score_bin'])
#######################################
for col in lFeatures:
lcoef = df_cross_stats['Score_bin'].apply(lambda x : per_bin_coefficients.get(x).get(col))
lContrib = lcoef * df_cross_stats[col + '_bin']
df1 = | pd.DataFrame() | pandas.DataFrame |
import argparse
import os
import numpy as np
import pandas as pd
def save_exp(exp_dir):
"""
Stores the rewards and corresponding time-steps for each run (since other parts of the
logs are not used in the final table). Also calculates and store the mean and standard
error over all the repetitions. Change the `key` variable if something other than the
reward needs to be saved.
Args:
exp_dir: The directory where the logs are stored (each repetition will have a
directory inside this directory with `progress.csv` file in it)
"""
key = "policy_reward_mean/receiver"
steps_key = "timesteps_total"
all_scalars = []
for d in os.listdir(exp_dir):
if os.path.isdir(f"{exp_dir}/{d}"):
res_file = f"{exp_dir}/{d}/progress.csv"
res_df = pd.read_csv(res_file)
scalars = res_df[key].to_numpy()
steps = res_df[steps_key].to_numpy()
np.save(f"{exp_dir}/{d}/_scalars", scalars)
np.save(f"{exp_dir}/{d}/_steps", steps)
all_scalars.append(scalars)
res_df = | pd.DataFrame(all_scalars) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import matplotlib
import seaborn as sns
sns.set_theme(style="ticks", color_codes=True)
# In[2]:
#load data
df = | pd.read_csv('in-vehicle-coupon-recommendation.csv') | pandas.read_csv |
#!/usr/bin/env python3
"""Add domain as nested property to transcript. Same for hugo symbol and exon
info. Output resulting JSON"""
import pandas as pd
import numpy as np
import argparse
def add_nested_hgnc(transcripts):
""" Make nested object HGNC symbols per transcript"""
def get_hgnc_symbol(transcript_id):
hgnc_symbols = transcripts.loc[transcript_id]
if hgnc_symbols.ndim == 1:
if | pd.isnull(hgnc_symbols.hgnc_symbol) | pandas.isnull |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import re
import bisect
from io import BytesIO
from pathlib import Path
import fire
import requests
import pandas as pd
from lxml import etree
from loguru import logger
NEW_COMPANIES_URL = "http://www.csindex.com.cn/uploads/file/autofile/cons/000300cons.xls"
CSI300_CHANGES_URL = "http://www.csindex.com.cn/zh-CN/search/total?key=%E5%85%B3%E4%BA%8E%E8%B0%83%E6%95%B4%E6%B2%AA%E6%B7%B1300%E5%92%8C%E4%B8%AD%E8%AF%81%E9%A6%99%E6%B8%AF100%E7%AD%89%E6%8C%87%E6%95%B0%E6%A0%B7%E6%9C%AC%E8%82%A1%E7%9A%84%E5%85%AC%E5%91%8A"
CSI300_BENCH_URL = "http://push2his.eastmoney.com/api/qt/stock/kline/get?secid=1.000300&fields1=f1%2Cf2%2Cf3%2Cf4%2Cf5&fields2=f51%2Cf52%2Cf53%2Cf54%2Cf55%2Cf56%2Cf57%2Cf58&klt=101&fqt=0&beg=19900101&end=20220101"
CSI300_START_DATE = pd.Timestamp("2005-01-01")
CUR_DIR = Path(__file__).resolve().parent
class CSI300:
REMOVE = "remove"
ADD = "add"
def __init__(self, qlib_dir=None):
"""
Parameters
----------
qlib_dir: str
qlib data dir, default "Path(__file__).parent/qlib_data"
"""
if qlib_dir is None:
qlib_dir = CUR_DIR.joinpath("qlib_data")
self.instruments_dir = Path(qlib_dir).expanduser().resolve().joinpath("instruments")
self.instruments_dir.mkdir(exist_ok=True, parents=True)
self._calendar_list = None
@property
def calendar_list(self) -> list:
"""get history trading date
Returns
-------
"""
# TODO: get calendar from MSN
if self._calendar_list is None:
logger.info("get all trading date")
value_list = requests.get(CSI300_BENCH_URL).json()["data"]["klines"]
self._calendar_list = sorted(map(lambda x: pd.Timestamp(x.split(",")[0]), value_list))
return self._calendar_list
def _get_trading_date_by_shift(self, trading_date: pd.Timestamp, shift=1):
"""get trading date by shift
Parameters
----------
shift : int
shift, default is 1
trading_date : pd.Timestamp
trading date
Returns
-------
"""
left_index = bisect.bisect_left(self.calendar_list, trading_date)
try:
res = self.calendar_list[left_index + shift]
except IndexError:
res = trading_date
return res
def _get_changes(self) -> pd.DataFrame:
"""get companies changes
Returns
-------
"""
logger.info("get companies changes......")
res = []
for _url in self._get_change_notices_url():
_df = self._read_change_from_url(_url)
res.append(_df)
logger.info("get companies changes finish")
return pd.concat(res)
@staticmethod
def normalize_symbol(symbol):
symbol = f"{int(symbol):06}"
return f"SH{symbol}" if symbol.startswith("60") else f"SZ{symbol}"
def _read_change_from_url(self, url: str) -> pd.DataFrame:
"""read change from url
Parameters
----------
url : str
change url
Returns
-------
"""
resp = requests.get(url)
_text = resp.text
date_list = re.findall(r"(\d{4}).*?年.*?(\d+).*?月.*?(\d+).*?日", _text)
if len(date_list) >= 2:
add_date = pd.Timestamp("-".join(date_list[0]))
else:
_date = pd.Timestamp("-".join(re.findall(r"(\d{4}).*?年.*?(\d+).*?月", _text)[0]))
add_date = self._get_trading_date_by_shift(_date, shift=0)
remove_date = self._get_trading_date_by_shift(add_date, shift=-1)
logger.info(f"get {add_date} changes")
try:
excel_url = re.findall('.*href="(.*?xls.*?)".*', _text)[0]
_io = BytesIO(requests.get(f"http://www.csindex.com.cn{excel_url}").content)
df_map = | pd.read_excel(_io, sheet_name=None) | pandas.read_excel |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
from numpy import nan
import numpy as np
import pandas as pd
from pandas.types.common import is_integer, is_scalar
from pandas import Index, Series, DataFrame, isnull, date_range
from pandas.core.index import MultiIndex
from pandas.core.indexing import IndexingError
from pandas.tseries.index import Timestamp
from pandas.tseries.offsets import BDay
from pandas.tseries.tdi import Timedelta
from pandas.compat import lrange, range
from pandas import compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from pandas.tests.series.common import TestData
JOIN_TYPES = ['inner', 'outer', 'left', 'right']
class TestSeriesIndexing(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_get(self):
# GH 6383
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56, 45,
51, 39, 55, 43, 54, 52, 51, 54]))
result = s.get(25, 0)
expected = 0
self.assertEqual(result, expected)
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56,
45, 51, 39, 55, 43, 54, 52, 51, 54]),
index=pd.Float64Index(
[25.0, 36.0, 49.0, 64.0, 81.0, 100.0,
121.0, 144.0, 169.0, 196.0, 1225.0,
1296.0, 1369.0, 1444.0, 1521.0, 1600.0,
1681.0, 1764.0, 1849.0, 1936.0],
dtype='object'))
result = s.get(25, 0)
expected = 43
self.assertEqual(result, expected)
# GH 7407
# with a boolean accessor
df = pd.DataFrame({'i': [0] * 3, 'b': [False] * 3})
vc = df.i.value_counts()
result = vc.get(99, default='Missing')
self.assertEqual(result, 'Missing')
vc = df.b.value_counts()
result = vc.get(False, default='Missing')
self.assertEqual(result, 3)
result = vc.get(True, default='Missing')
self.assertEqual(result, 'Missing')
def test_delitem(self):
# GH 5542
# should delete the item inplace
s = Series(lrange(5))
del s[0]
expected = Series(lrange(1, 5), index=lrange(1, 5))
assert_series_equal(s, expected)
del s[1]
expected = Series(lrange(2, 5), index=lrange(2, 5))
assert_series_equal(s, expected)
# empty
s = Series()
def f():
del s[0]
self.assertRaises(KeyError, f)
# only 1 left, del, add, del
s = Series(1)
del s[0]
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='int64')))
s[0] = 1
assert_series_equal(s, Series(1))
del s[0]
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='int64')))
# Index(dtype=object)
s = Series(1, index=['a'])
del s['a']
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='object')))
s['a'] = 1
assert_series_equal(s, Series(1, index=['a']))
del s['a']
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='object')))
def test_getitem_setitem_ellipsis(self):
s = Series(np.random.randn(10))
np.fix(s)
result = s[...]
assert_series_equal(result, s)
s[...] = 5
self.assertTrue((result == 5).all())
def test_getitem_negative_out_of_bounds(self):
s = Series(tm.rands_array(5, 10), index=tm.rands_array(10, 10))
self.assertRaises(IndexError, s.__getitem__, -11)
self.assertRaises(IndexError, s.__setitem__, -11, 'foo')
def test_pop(self):
# GH 6600
df = DataFrame({'A': 0, 'B': np.arange(5, dtype='int64'), 'C': 0, })
k = df.iloc[4]
result = k.pop('B')
self.assertEqual(result, 4)
expected = Series([0, 0], index=['A', 'C'], name=4)
assert_series_equal(k, expected)
def test_getitem_get(self):
idx1 = self.series.index[5]
idx2 = self.objSeries.index[5]
self.assertEqual(self.series[idx1], self.series.get(idx1))
self.assertEqual(self.objSeries[idx2], self.objSeries.get(idx2))
self.assertEqual(self.series[idx1], self.series[5])
self.assertEqual(self.objSeries[idx2], self.objSeries[5])
self.assertEqual(
self.series.get(-1), self.series.get(self.series.index[-1]))
self.assertEqual(self.series[5], self.series.get(self.series.index[5]))
# missing
d = self.ts.index[0] - BDay()
self.assertRaises(KeyError, self.ts.__getitem__, d)
# None
# GH 5652
for s in [Series(), Series(index=list('abc'))]:
result = s.get(None)
self.assertIsNone(result)
def test_iget(self):
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.iget(1)
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.irow(1)
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.iget_value(1)
for i in range(len(s)):
result = s.iloc[i]
exp = s[s.index[i]]
assert_almost_equal(result, exp)
# pass a slice
result = s.iloc[slice(1, 3)]
expected = s.ix[2:4]
assert_series_equal(result, expected)
# test slice is a view
result[:] = 0
self.assertTrue((s[1:3] == 0).all())
# list of integers
result = s.iloc[[0, 2, 3, 4, 5]]
expected = s.reindex(s.index[[0, 2, 3, 4, 5]])
assert_series_equal(result, expected)
def test_iget_nonunique(self):
s = Series([0, 1, 2], index=[0, 1, 0])
self.assertEqual(s.iloc[2], 2)
def test_getitem_regression(self):
s = Series(lrange(5), index=lrange(5))
result = s[lrange(5)]
assert_series_equal(result, s)
def test_getitem_setitem_slice_bug(self):
s = Series(lrange(10), lrange(10))
result = s[-12:]
assert_series_equal(result, s)
result = s[-7:]
assert_series_equal(result, s[3:])
result = s[:-12]
assert_series_equal(result, s[:0])
s = Series(lrange(10), lrange(10))
s[-12:] = 0
self.assertTrue((s == 0).all())
s[:-12] = 5
self.assertTrue((s == 0).all())
def test_getitem_int64(self):
idx = np.int64(5)
self.assertEqual(self.ts[idx], self.ts[5])
def test_getitem_fancy(self):
slice1 = self.series[[1, 2, 3]]
slice2 = self.objSeries[[1, 2, 3]]
self.assertEqual(self.series.index[2], slice1.index[1])
self.assertEqual(self.objSeries.index[2], slice2.index[1])
self.assertEqual(self.series[2], slice1[1])
self.assertEqual(self.objSeries[2], slice2[1])
def test_getitem_boolean(self):
s = self.series
mask = s > s.median()
# passing list is OK
result = s[list(mask)]
expected = s[mask]
assert_series_equal(result, expected)
self.assert_index_equal(result.index, s.index[mask])
def test_getitem_boolean_empty(self):
s = Series([], dtype=np.int64)
s.index.name = 'index_name'
s = s[s.isnull()]
self.assertEqual(s.index.name, 'index_name')
self.assertEqual(s.dtype, np.int64)
# GH5877
# indexing with empty series
s = Series(['A', 'B'])
expected = Series(np.nan, index=['C'], dtype=object)
result = s[Series(['C'], dtype=object)]
assert_series_equal(result, expected)
s = Series(['A', 'B'])
expected = Series(dtype=object, index=Index([], dtype='int64'))
result = s[Series([], dtype=object)]
assert_series_equal(result, expected)
# invalid because of the boolean indexer
# that's empty or not-aligned
def f():
s[Series([], dtype=bool)]
self.assertRaises(IndexingError, f)
def f():
s[Series([True], dtype=bool)]
self.assertRaises(IndexingError, f)
def test_getitem_generator(self):
gen = (x > 0 for x in self.series)
result = self.series[gen]
result2 = self.series[iter(self.series > 0)]
expected = self.series[self.series > 0]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
def test_type_promotion(self):
# GH12599
s = pd.Series()
s["a"] = pd.Timestamp("2016-01-01")
s["b"] = 3.0
s["c"] = "foo"
expected = Series([pd.Timestamp("2016-01-01"), 3.0, "foo"],
index=["a", "b", "c"])
assert_series_equal(s, expected)
def test_getitem_boolean_object(self):
# using column from DataFrame
s = self.series
mask = s > s.median()
omask = mask.astype(object)
# getitem
result = s[omask]
expected = s[mask]
assert_series_equal(result, expected)
# setitem
s2 = s.copy()
cop = s.copy()
cop[omask] = 5
s2[mask] = 5
assert_series_equal(cop, s2)
# nans raise exception
omask[5:10] = np.nan
self.assertRaises(Exception, s.__getitem__, omask)
self.assertRaises(Exception, s.__setitem__, omask, 5)
def test_getitem_setitem_boolean_corner(self):
ts = self.ts
mask_shifted = ts.shift(1, freq=BDay()) > ts.median()
# these used to raise...??
self.assertRaises(Exception, ts.__getitem__, mask_shifted)
self.assertRaises(Exception, ts.__setitem__, mask_shifted, 1)
# ts[mask_shifted]
# ts[mask_shifted] = 1
self.assertRaises(Exception, ts.ix.__getitem__, mask_shifted)
self.assertRaises(Exception, ts.ix.__setitem__, mask_shifted, 1)
# ts.ix[mask_shifted]
# ts.ix[mask_shifted] = 2
def test_getitem_setitem_slice_integers(self):
s = Series(np.random.randn(8), index=[2, 4, 6, 8, 10, 12, 14, 16])
result = s[:4]
expected = s.reindex([2, 4, 6, 8])
assert_series_equal(result, expected)
s[:4] = 0
self.assertTrue((s[:4] == 0).all())
self.assertTrue(not (s[4:] == 0).any())
def test_getitem_out_of_bounds(self):
# don't segfault, GH #495
self.assertRaises(IndexError, self.ts.__getitem__, len(self.ts))
# GH #917
s = Series([])
self.assertRaises(IndexError, s.__getitem__, -1)
def test_getitem_setitem_integers(self):
# caused bug without test
s = Series([1, 2, 3], ['a', 'b', 'c'])
self.assertEqual(s.ix[0], s['a'])
s.ix[0] = 5
self.assertAlmostEqual(s['a'], 5)
def test_getitem_box_float64(self):
value = self.ts[5]
tm.assertIsInstance(value, np.float64)
def test_getitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
self.assertRaises(KeyError, s.__getitem__, 1)
self.assertRaises(KeyError, s.ix.__getitem__, 1)
def test_getitem_unordered_dup(self):
obj = Series(lrange(5), index=['c', 'a', 'a', 'b', 'b'])
self.assertTrue(is_scalar(obj['c']))
self.assertEqual(obj['c'], 0)
def test_getitem_dups_with_missing(self):
# breaks reindex, so need to use .ix internally
# GH 4246
s = Series([1, 2, 3, 4], ['foo', 'bar', 'foo', 'bah'])
expected = s.ix[['foo', 'bar', 'bah', 'bam']]
result = s[['foo', 'bar', 'bah', 'bam']]
assert_series_equal(result, expected)
def test_getitem_dups(self):
s = Series(range(5), index=['A', 'A', 'B', 'C', 'C'], dtype=np.int64)
expected = Series([3, 4], index=['C', 'C'], dtype=np.int64)
result = s['C']
assert_series_equal(result, expected)
def test_getitem_dataframe(self):
rng = list(range(10))
s = pd.Series(10, index=rng)
df = pd.DataFrame(rng, index=rng)
self.assertRaises(TypeError, s.__getitem__, df > 5)
def test_getitem_callable(self):
# GH 12533
s = pd.Series(4, index=list('ABCD'))
result = s[lambda x: 'A']
self.assertEqual(result, s.loc['A'])
result = s[lambda x: ['A', 'B']]
tm.assert_series_equal(result, s.loc[['A', 'B']])
result = s[lambda x: [True, False, True, True]]
tm.assert_series_equal(result, s.iloc[[0, 2, 3]])
def test_setitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
# equivalent of an append
s2 = s.copy()
s2[1] = 5
expected = s.append(Series([5], index=[1]))
assert_series_equal(s2, expected)
s2 = s.copy()
s2.ix[1] = 5
expected = s.append(Series([5], index=[1]))
assert_series_equal(s2, expected)
def test_setitem_float_labels(self):
# note labels are floats
s = Series(['a', 'b', 'c'], index=[0, 0.5, 1])
tmp = s.copy()
s.ix[1] = 'zoo'
tmp.iloc[2] = 'zoo'
assert_series_equal(s, tmp)
def test_setitem_callable(self):
# GH 12533
s = pd.Series([1, 2, 3, 4], index=list('ABCD'))
s[lambda x: 'A'] = -1
tm.assert_series_equal(s, pd.Series([-1, 2, 3, 4], index=list('ABCD')))
def test_setitem_other_callable(self):
# GH 13299
inc = lambda x: x + 1
s = pd.Series([1, 2, -1, 4])
s[s < 0] = inc
expected = pd.Series([1, 2, inc, 4])
tm.assert_series_equal(s, expected)
def test_slice(self):
numSlice = self.series[10:20]
numSliceEnd = self.series[-10:]
objSlice = self.objSeries[10:20]
self.assertNotIn(self.series.index[9], numSlice.index)
self.assertNotIn(self.objSeries.index[9], objSlice.index)
self.assertEqual(len(numSlice), len(numSlice.index))
self.assertEqual(self.series[numSlice.index[0]],
numSlice[numSlice.index[0]])
self.assertEqual(numSlice.index[1], self.series.index[11])
self.assertTrue(tm.equalContents(numSliceEnd, np.array(self.series)[
-10:]))
# test return view
sl = self.series[10:20]
sl[:] = 0
self.assertTrue((self.series[10:20] == 0).all())
def test_slice_can_reorder_not_uniquely_indexed(self):
s = Series(1, index=['a', 'a', 'b', 'b', 'c'])
s[::-1] # it works!
def test_slice_float_get_set(self):
self.assertRaises(TypeError, lambda: self.ts[4.0:10.0])
def f():
self.ts[4.0:10.0] = 0
self.assertRaises(TypeError, f)
self.assertRaises(TypeError, self.ts.__getitem__, slice(4.5, 10.0))
self.assertRaises(TypeError, self.ts.__setitem__, slice(4.5, 10.0), 0)
def test_slice_floats2(self):
s = Series(np.random.rand(10), index=np.arange(10, 20, dtype=float))
self.assertEqual(len(s.ix[12.0:]), 8)
self.assertEqual(len(s.ix[12.5:]), 7)
i = np.arange(10, 20, dtype=float)
i[2] = 12.2
s.index = i
self.assertEqual(len(s.ix[12.0:]), 8)
self.assertEqual(len(s.ix[12.5:]), 7)
def test_slice_float64(self):
values = np.arange(10., 50., 2)
index = Index(values)
start, end = values[[5, 15]]
s = Series(np.random.randn(20), index=index)
result = s[start:end]
expected = s.iloc[5:16]
assert_series_equal(result, expected)
result = s.loc[start:end]
assert_series_equal(result, expected)
df = DataFrame(np.random.randn(20, 3), index=index)
result = df[start:end]
expected = df.iloc[5:16]
tm.assert_frame_equal(result, expected)
result = df.loc[start:end]
tm.assert_frame_equal(result, expected)
def test_setitem(self):
self.ts[self.ts.index[5]] = np.NaN
self.ts[[1, 2, 17]] = np.NaN
self.ts[6] = np.NaN
self.assertTrue(np.isnan(self.ts[6]))
self.assertTrue(np.isnan(self.ts[2]))
self.ts[np.isnan(self.ts)] = 5
self.assertFalse(np.isnan(self.ts[2]))
# caught this bug when writing tests
series = Series(tm.makeIntIndex(20).astype(float),
index=tm.makeIntIndex(20))
series[::2] = 0
self.assertTrue((series[::2] == 0).all())
# set item that's not contained
s = self.series.copy()
s['foobar'] = 1
app = Series([1], index=['foobar'], name='series')
expected = self.series.append(app)
assert_series_equal(s, expected)
# Test for issue #10193
key = pd.Timestamp('2012-01-01')
series = pd.Series()
series[key] = 47
expected = pd.Series(47, [key])
assert_series_equal(series, expected)
series = pd.Series([], pd.DatetimeIndex([], freq='D'))
series[key] = 47
expected = pd.Series(47, pd.DatetimeIndex([key], freq='D'))
assert_series_equal(series, expected)
def test_setitem_dtypes(self):
# change dtypes
# GH 4463
expected = Series([np.nan, 2, 3])
s = Series([1, 2, 3])
s.iloc[0] = np.nan
assert_series_equal(s, expected)
s = Series([1, 2, 3])
s.loc[0] = np.nan
assert_series_equal(s, expected)
s = Series([1, 2, 3])
s[0] = np.nan
assert_series_equal(s, expected)
s = Series([False])
s.loc[0] = np.nan
assert_series_equal(s, Series([np.nan]))
s = Series([False, True])
s.loc[0] = np.nan
assert_series_equal(s, Series([np.nan, 1.0]))
def test_set_value(self):
idx = self.ts.index[10]
res = self.ts.set_value(idx, 0)
self.assertIs(res, self.ts)
self.assertEqual(self.ts[idx], 0)
# equiv
s = self.series.copy()
res = s.set_value('foobar', 0)
self.assertIs(res, s)
self.assertEqual(res.index[-1], 'foobar')
self.assertEqual(res['foobar'], 0)
s = self.series.copy()
s.loc['foobar'] = 0
self.assertEqual(s.index[-1], 'foobar')
self.assertEqual(s['foobar'], 0)
def test_setslice(self):
sl = self.ts[5:20]
self.assertEqual(len(sl), len(sl.index))
self.assertTrue(sl.index.is_unique)
def test_basic_getitem_setitem_corner(self):
# invalid tuples, e.g. self.ts[:, None] vs. self.ts[:, 2]
with tm.assertRaisesRegexp(ValueError, 'tuple-index'):
self.ts[:, 2]
with tm.assertRaisesRegexp(ValueError, 'tuple-index'):
self.ts[:, 2] = 2
# weird lists. [slice(0, 5)] will work but not two slices
result = self.ts[[slice(None, 5)]]
expected = self.ts[:5]
assert_series_equal(result, expected)
# OK
self.assertRaises(Exception, self.ts.__getitem__,
[5, slice(None, None)])
self.assertRaises(Exception, self.ts.__setitem__,
[5, slice(None, None)], 2)
def test_basic_getitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
result = self.ts[indices]
expected = self.ts.reindex(indices)
assert_series_equal(result, expected)
result = self.ts[indices[0]:indices[2]]
expected = self.ts.ix[indices[0]:indices[2]]
assert_series_equal(result, expected)
# integer indexes, be careful
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
inds = [0, 2, 5, 7, 8]
arr_inds = np.array([0, 2, 5, 7, 8])
result = s[inds]
expected = s.reindex(inds)
assert_series_equal(result, expected)
result = s[arr_inds]
expected = s.reindex(arr_inds)
assert_series_equal(result, expected)
# GH12089
# with tz for values
s = Series(pd.date_range("2011-01-01", periods=3, tz="US/Eastern"),
index=['a', 'b', 'c'])
expected = Timestamp('2011-01-01', tz='US/Eastern')
result = s.loc['a']
self.assertEqual(result, expected)
result = s.iloc[0]
self.assertEqual(result, expected)
result = s['a']
self.assertEqual(result, expected)
def test_basic_setitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices] = 0
exp.ix[indices] = 0
assert_series_equal(cp, exp)
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices[0]:indices[2]] = 0
exp.ix[indices[0]:indices[2]] = 0
assert_series_equal(cp, exp)
# integer indexes, be careful
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
inds = [0, 4, 6]
arr_inds = np.array([0, 4, 6])
cp = s.copy()
exp = s.copy()
s[inds] = 0
s.ix[inds] = 0
assert_series_equal(cp, exp)
cp = s.copy()
exp = s.copy()
s[arr_inds] = 0
s.ix[arr_inds] = 0
assert_series_equal(cp, exp)
inds_notfound = [0, 4, 5, 6]
arr_inds_notfound = np.array([0, 4, 5, 6])
self.assertRaises(Exception, s.__setitem__, inds_notfound, 0)
self.assertRaises(Exception, s.__setitem__, arr_inds_notfound, 0)
# GH12089
# with tz for values
s = Series(pd.date_range("2011-01-01", periods=3, tz="US/Eastern"),
index=['a', 'b', 'c'])
s2 = s.copy()
expected = Timestamp('2011-01-03', tz='US/Eastern')
s2.loc['a'] = expected
result = s2.loc['a']
self.assertEqual(result, expected)
s2 = s.copy()
s2.iloc[0] = expected
result = s2.iloc[0]
self.assertEqual(result, expected)
s2 = s.copy()
s2['a'] = expected
result = s2['a']
self.assertEqual(result, expected)
def test_ix_getitem(self):
inds = self.series.index[[3, 4, 7]]
assert_series_equal(self.series.ix[inds], self.series.reindex(inds))
assert_series_equal(self.series.ix[5::2], self.series[5::2])
# slice with indices
d1, d2 = self.ts.index[[5, 15]]
result = self.ts.ix[d1:d2]
expected = self.ts.truncate(d1, d2)
assert_series_equal(result, expected)
# boolean
mask = self.series > self.series.median()
assert_series_equal(self.series.ix[mask], self.series[mask])
# ask for index value
self.assertEqual(self.ts.ix[d1], self.ts[d1])
self.assertEqual(self.ts.ix[d2], self.ts[d2])
def test_ix_getitem_not_monotonic(self):
d1, d2 = self.ts.index[[5, 15]]
ts2 = self.ts[::2][[1, 2, 0]]
self.assertRaises(KeyError, ts2.ix.__getitem__, slice(d1, d2))
self.assertRaises(KeyError, ts2.ix.__setitem__, slice(d1, d2), 0)
def test_ix_getitem_setitem_integer_slice_keyerrors(self):
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
# this is OK
cp = s.copy()
cp.ix[4:10] = 0
self.assertTrue((cp.ix[4:10] == 0).all())
# so is this
cp = s.copy()
cp.ix[3:11] = 0
self.assertTrue((cp.ix[3:11] == 0).values.all())
result = s.ix[4:10]
result2 = s.ix[3:11]
expected = s.reindex([4, 6, 8, 10])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# non-monotonic, raise KeyError
s2 = s.iloc[lrange(5) + lrange(5, 10)[::-1]]
self.assertRaises(KeyError, s2.ix.__getitem__, slice(3, 11))
self.assertRaises(KeyError, s2.ix.__setitem__, slice(3, 11), 0)
def test_ix_getitem_iterator(self):
idx = iter(self.series.index[:10])
result = self.series.ix[idx]
assert_series_equal(result, self.series[:10])
def test_setitem_with_tz(self):
for tz in ['US/Eastern', 'UTC', 'Asia/Tokyo']:
orig = pd.Series(pd.date_range('2016-01-01', freq='H', periods=3,
tz=tz))
self.assertEqual(orig.dtype, 'datetime64[ns, {0}]'.format(tz))
# scalar
s = orig.copy()
s[1] = pd.Timestamp('2011-01-01', tz=tz)
exp = pd.Series([pd.Timestamp('2016-01-01 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2016-01-01 02:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
# vector
vals = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz)], index=[1, 2])
self.assertEqual(vals.dtype, 'datetime64[ns, {0}]'.format(tz))
s[[1, 2]] = vals
exp = pd.Series([pd.Timestamp('2016-01-01 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2012-01-01 00:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
def test_setitem_with_tz_dst(self):
# GH XXX
tz = 'US/Eastern'
orig = pd.Series(pd.date_range('2016-11-06', freq='H', periods=3,
tz=tz))
self.assertEqual(orig.dtype, 'datetime64[ns, {0}]'.format(tz))
# scalar
s = orig.copy()
s[1] = pd.Timestamp('2011-01-01', tz=tz)
exp = pd.Series([pd.Timestamp('2016-11-06 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2016-11-06 02:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
# vector
vals = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz)], index=[1, 2])
self.assertEqual(vals.dtype, 'datetime64[ns, {0}]'.format(tz))
s[[1, 2]] = vals
exp = pd.Series([pd.Timestamp('2016-11-06 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2012-01-01 00:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
def test_where(self):
s = Series(np.random.randn(5))
cond = s > 0
rs = s.where(cond).dropna()
rs2 = s[cond]
assert_series_equal(rs, rs2)
rs = s.where(cond, -s)
assert_series_equal(rs, s.abs())
rs = s.where(cond)
assert (s.shape == rs.shape)
assert (rs is not s)
# test alignment
cond = Series([True, False, False, True, False], index=s.index)
s2 = -(s.abs())
expected = s2[cond].reindex(s2.index[:3]).reindex(s2.index)
rs = s2.where(cond[:3])
assert_series_equal(rs, expected)
expected = s2.abs()
expected.ix[0] = s2[0]
rs = s2.where(cond[:3], -s2)
assert_series_equal(rs, expected)
self.assertRaises(ValueError, s.where, 1)
self.assertRaises(ValueError, s.where, cond[:3].values, -s)
# GH 2745
s = Series([1, 2])
s[[True, False]] = [0, 1]
expected = Series([0, 2])
assert_series_equal(s, expected)
# failures
self.assertRaises(ValueError, s.__setitem__, tuple([[[True, False]]]),
[0, 2, 3])
self.assertRaises(ValueError, s.__setitem__, tuple([[[True, False]]]),
[])
# unsafe dtype changes
for dtype in [np.int8, np.int16, np.int32, np.int64, np.float16,
np.float32, np.float64]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
s[mask] = lrange(2, 7)
expected = Series(lrange(2, 7) + lrange(5, 10), dtype=dtype)
assert_series_equal(s, expected)
self.assertEqual(s.dtype, expected.dtype)
# these are allowed operations, but are upcasted
for dtype in [np.int64, np.float64]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
values = [2.5, 3.5, 4.5, 5.5, 6.5]
s[mask] = values
expected = Series(values + lrange(5, 10), dtype='float64')
assert_series_equal(s, expected)
self.assertEqual(s.dtype, expected.dtype)
# GH 9731
s = Series(np.arange(10), dtype='int64')
mask = s > 5
values = [2.5, 3.5, 4.5, 5.5]
s[mask] = values
expected = Series(lrange(6) + values, dtype='float64')
assert_series_equal(s, expected)
# can't do these as we are forced to change the itemsize of the input
# to something we cannot
for dtype in [np.int8, np.int16, np.int32, np.float16, np.float32]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
values = [2.5, 3.5, 4.5, 5.5, 6.5]
self.assertRaises(Exception, s.__setitem__, tuple(mask), values)
# GH3235
s = Series(np.arange(10), dtype='int64')
mask = s < 5
s[mask] = lrange(2, 7)
expected = Series(lrange(2, 7) + lrange(5, 10), dtype='int64')
assert_series_equal(s, expected)
self.assertEqual(s.dtype, expected.dtype)
s = Series(np.arange(10), dtype='int64')
mask = s > 5
s[mask] = [0] * 4
expected = Series([0, 1, 2, 3, 4, 5] + [0] * 4, dtype='int64')
assert_series_equal(s, expected)
s = Series(np.arange(10))
mask = s > 5
def f():
s[mask] = [5, 4, 3, 2, 1]
self.assertRaises(ValueError, f)
def f():
s[mask] = [0] * 5
self.assertRaises(ValueError, f)
# dtype changes
s = Series([1, 2, 3, 4])
result = s.where(s > 2, np.nan)
expected = Series([np.nan, np.nan, 3, 4])
assert_series_equal(result, expected)
# GH 4667
# setting with None changes dtype
s = Series(range(10)).astype(float)
s[8] = None
result = s[8]
self.assertTrue(isnull(result))
s = Series(range(10)).astype(float)
s[s > 8] = None
result = s[isnull(s)]
expected = Series(np.nan, index=[9])
assert_series_equal(result, expected)
def test_where_setitem_invalid(self):
# GH 2702
# make sure correct exceptions are raised on invalid list assignment
# slice
s = Series(list('abc'))
def f():
s[0:3] = list(range(27))
self.assertRaises(ValueError, f)
s[0:3] = list(range(3))
expected = Series([0, 1, 2])
assert_series_equal(s.astype(np.int64), expected, )
# slice with step
s = Series(list('abcdef'))
def f():
s[0:4:2] = list(range(27))
self.assertRaises(ValueError, f)
s = Series(list('abcdef'))
s[0:4:2] = list(range(2))
expected = Series([0, 'b', 1, 'd', 'e', 'f'])
assert_series_equal(s, expected)
# neg slices
s = Series(list('abcdef'))
def f():
s[:-1] = list(range(27))
self.assertRaises(ValueError, f)
s[-3:-1] = list(range(2))
expected = Series(['a', 'b', 'c', 0, 1, 'f'])
assert_series_equal(s, expected)
# list
s = Series(list('abc'))
def f():
s[[0, 1, 2]] = list(range(27))
self.assertRaises(ValueError, f)
s = Series(list('abc'))
def f():
s[[0, 1, 2]] = list(range(2))
self.assertRaises(ValueError, f)
# scalar
s = Series(list('abc'))
s[0] = list(range(10))
expected = Series([list(range(10)), 'b', 'c'])
assert_series_equal(s, expected)
def test_where_broadcast(self):
# Test a variety of differently sized series
for size in range(2, 6):
# Test a variety of boolean indices
for selection in [
# First element should be set
np.resize([True, False, False, False, False], size),
# Set alternating elements]
np.resize([True, False], size),
# No element should be set
np.resize([False], size)]:
# Test a variety of different numbers as content
for item in [2.0, np.nan, np.finfo(np.float).max,
np.finfo(np.float).min]:
# Test numpy arrays, lists and tuples as the input to be
# broadcast
for arr in [np.array([item]), [item], (item, )]:
data = np.arange(size, dtype=float)
s = Series(data)
s[selection] = arr
# Construct the expected series by taking the source
# data or item based on the selection
expected = Series([item if use_item else data[
i] for i, use_item in enumerate(selection)])
assert_series_equal(s, expected)
s = Series(data)
result = s.where(~selection, arr)
assert_series_equal(result, expected)
def test_where_inplace(self):
s = Series(np.random.randn(5))
cond = s > 0
rs = s.copy()
rs.where(cond, inplace=True)
assert_series_equal(rs.dropna(), s[cond])
assert_series_equal(rs, s.where(cond))
rs = s.copy()
rs.where(cond, -s, inplace=True)
assert_series_equal(rs, s.where(cond, -s))
def test_where_dups(self):
# GH 4550
# where crashes with dups in index
s1 = Series(list(range(3)))
s2 = Series(list(range(3)))
comb = pd.concat([s1, s2])
result = comb.where(comb < 2)
expected = Series([0, 1, np.nan, 0, 1, np.nan],
index=[0, 1, 2, 0, 1, 2])
assert_series_equal(result, expected)
# GH 4548
# inplace updating not working with dups
comb[comb < 1] = 5
expected = Series([5, 1, 2, 5, 1, 2], index=[0, 1, 2, 0, 1, 2])
assert_series_equal(comb, expected)
comb[comb < 2] += 10
expected = Series([5, 11, 2, 5, 11, 2], index=[0, 1, 2, 0, 1, 2])
assert_series_equal(comb, expected)
def test_where_datetime(self):
s = Series(date_range('20130102', periods=2))
expected = Series([10, 10], dtype='datetime64[ns]')
mask = np.array([False, False])
rs = s.where(mask, [10, 10])
assert_series_equal(rs, expected)
rs = s.where(mask, 10)
assert_series_equal(rs, expected)
rs = s.where(mask, 10.0)
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, 10.0])
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, np.nan])
expected = Series([10, None], dtype='datetime64[ns]')
assert_series_equal(rs, expected)
def test_where_timedelta(self):
s = Series([1, 2], dtype='timedelta64[ns]')
expected = Series([10, 10], dtype='timedelta64[ns]')
mask = np.array([False, False])
rs = s.where(mask, [10, 10])
assert_series_equal(rs, expected)
rs = s.where(mask, 10)
assert_series_equal(rs, expected)
rs = s.where(mask, 10.0)
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, 10.0])
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, np.nan])
expected = Series([10, None], dtype='timedelta64[ns]')
assert_series_equal(rs, expected)
def test_mask(self):
# compare with tested results in test_where
s = Series(np.random.randn(5))
cond = s > 0
rs = s.where(~cond, np.nan)
assert_series_equal(rs, s.mask(cond))
rs = s.where(~cond)
rs2 = s.mask(cond)
assert_series_equal(rs, rs2)
rs = s.where(~cond, -s)
rs2 = s.mask(cond, -s)
assert_series_equal(rs, rs2)
cond = Series([True, False, False, True, False], index=s.index)
s2 = -(s.abs())
rs = s2.where(~cond[:3])
rs2 = s2.mask(cond[:3])
assert_series_equal(rs, rs2)
rs = s2.where(~cond[:3], -s2)
rs2 = s2.mask(cond[:3], -s2)
assert_series_equal(rs, rs2)
self.assertRaises(ValueError, s.mask, 1)
self.assertRaises(ValueError, s.mask, cond[:3].values, -s)
# dtype changes
s = Series([1, 2, 3, 4])
result = s.mask(s > 2, np.nan)
expected = Series([1, 2, np.nan, np.nan])
assert_series_equal(result, expected)
def test_mask_broadcast(self):
# GH 8801
# copied from test_where_broadcast
for size in range(2, 6):
for selection in [
# First element should be set
np.resize([True, False, False, False, False], size),
# Set alternating elements]
np.resize([True, False], size),
# No element should be set
np.resize([False], size)]:
for item in [2.0, np.nan, np.finfo(np.float).max,
np.finfo(np.float).min]:
for arr in [np.array([item]), [item], (item, )]:
data = np.arange(size, dtype=float)
s = Series(data)
result = s.mask(selection, arr)
expected = Series([item if use_item else data[
i] for i, use_item in enumerate(selection)])
assert_series_equal(result, expected)
def test_mask_inplace(self):
s = Series(np.random.randn(5))
cond = s > 0
rs = s.copy()
rs.mask(cond, inplace=True)
assert_series_equal(rs.dropna(), s[~cond])
assert_series_equal(rs, s.mask(cond))
rs = s.copy()
rs.mask(cond, -s, inplace=True)
assert_series_equal(rs, s.mask(cond, -s))
def test_ix_setitem(self):
inds = self.series.index[[3, 4, 7]]
result = self.series.copy()
result.ix[inds] = 5
expected = self.series.copy()
expected[[3, 4, 7]] = 5
assert_series_equal(result, expected)
result.ix[5:10] = 10
expected[5:10] = 10
assert_series_equal(result, expected)
# set slice with indices
d1, d2 = self.series.index[[5, 15]]
result.ix[d1:d2] = 6
expected[5:16] = 6 # because it's inclusive
assert_series_equal(result, expected)
# set index value
self.series.ix[d1] = 4
self.series.ix[d2] = 6
self.assertEqual(self.series[d1], 4)
self.assertEqual(self.series[d2], 6)
def test_where_numeric_with_string(self):
# GH 9280
s = pd.Series([1, 2, 3])
w = s.where(s > 1, 'X')
self.assertFalse(is_integer(w[0]))
self.assertTrue(is_integer(w[1]))
self.assertTrue(is_integer(w[2]))
self.assertTrue(isinstance(w[0], str))
self.assertTrue(w.dtype == 'object')
w = s.where(s > 1, ['X', 'Y', 'Z'])
self.assertFalse(is_integer(w[0]))
self.assertTrue(is_integer(w[1]))
self.assertTrue(is_integer(w[2]))
self.assertTrue(isinstance(w[0], str))
self.assertTrue(w.dtype == 'object')
w = s.where(s > 1, np.array(['X', 'Y', 'Z']))
self.assertFalse(is_integer(w[0]))
self.assertTrue(is_integer(w[1]))
self.assertTrue(is_integer(w[2]))
self.assertTrue(isinstance(w[0], str))
self.assertTrue(w.dtype == 'object')
def test_setitem_boolean(self):
mask = self.series > self.series.median()
# similiar indexed series
result = self.series.copy()
result[mask] = self.series * 2
expected = self.series * 2
assert_series_equal(result[mask], expected[mask])
# needs alignment
result = self.series.copy()
result[mask] = (self.series * 2)[0:5]
expected = (self.series * 2)[0:5].reindex_like(self.series)
expected[-mask] = self.series[mask]
assert_series_equal(result[mask], expected[mask])
def test_ix_setitem_boolean(self):
mask = self.series > self.series.median()
result = self.series.copy()
result.ix[mask] = 0
expected = self.series
expected[mask] = 0
assert_series_equal(result, expected)
def test_ix_setitem_corner(self):
inds = list(self.series.index[[5, 8, 12]])
self.series.ix[inds] = 5
self.assertRaises(Exception, self.series.ix.__setitem__,
inds + ['foo'], 5)
def test_get_set_boolean_different_order(self):
ordered = self.series.sort_values()
# setting
copy = self.series.copy()
copy[ordered > 0] = 0
expected = self.series.copy()
expected[expected > 0] = 0
assert_series_equal(copy, expected)
# getting
sel = self.series[ordered > 0]
exp = self.series[self.series > 0]
assert_series_equal(sel, exp)
def test_setitem_na(self):
# these induce dtype changes
expected = Series([np.nan, 3, np.nan, 5, np.nan, 7, np.nan, 9, np.nan])
s = Series([2, 3, 4, 5, 6, 7, 8, 9, 10])
s[::2] = np.nan
assert_series_equal(s, expected)
# get's coerced to float, right?
expected = Series([np.nan, 1, np.nan, 0])
s = Series([True, True, False, False])
s[::2] = np.nan
assert_series_equal(s, expected)
expected = Series([np.nan, np.nan, np.nan, np.nan, np.nan, 5, 6, 7, 8,
9])
s = Series(np.arange(10))
s[:5] = np.nan
assert_series_equal(s, expected)
def test_basic_indexing(self):
s = Series(np.random.randn(5), index=['a', 'b', 'a', 'a', 'b'])
self.assertRaises(IndexError, s.__getitem__, 5)
self.assertRaises(IndexError, s.__setitem__, 5, 0)
self.assertRaises(KeyError, s.__getitem__, 'c')
s = s.sort_index()
self.assertRaises(IndexError, s.__getitem__, 5)
self.assertRaises(IndexError, s.__setitem__, 5, 0)
def test_int_indexing(self):
s = Series(np.random.randn(6), index=[0, 0, 1, 1, 2, 2])
self.assertRaises(KeyError, s.__getitem__, 5)
self.assertRaises(KeyError, s.__getitem__, 'c')
# not monotonic
s = Series(np.random.randn(6), index=[2, 2, 0, 0, 1, 1])
self.assertRaises(KeyError, s.__getitem__, 5)
self.assertRaises(KeyError, s.__getitem__, 'c')
def test_datetime_indexing(self):
from pandas import date_range
index = date_range('1/1/2000', '1/7/2000')
index = index.repeat(3)
s = Series(len(index), index=index)
stamp = Timestamp('1/8/2000')
self.assertRaises(KeyError, s.__getitem__, stamp)
s[stamp] = 0
self.assertEqual(s[stamp], 0)
# not monotonic
s = Series(len(index), index=index)
s = s[::-1]
self.assertRaises(KeyError, s.__getitem__, stamp)
s[stamp] = 0
self.assertEqual(s[stamp], 0)
def test_timedelta_assignment(self):
# GH 8209
s = Series([])
s.loc['B'] = timedelta(1)
tm.assert_series_equal(s, Series(Timedelta('1 days'), index=['B']))
s = s.reindex(s.index.insert(0, 'A'))
tm.assert_series_equal(s, Series(
[np.nan, Timedelta('1 days')], index=['A', 'B']))
result = s.fillna(timedelta(1))
expected = Series(Timedelta('1 days'), index=['A', 'B'])
tm.assert_series_equal(result, expected)
s.loc['A'] = timedelta(1)
tm.assert_series_equal(s, expected)
# GH 14155
s = Series(10 * [np.timedelta64(10, 'm')])
s.loc[[1, 2, 3]] = np.timedelta64(20, 'm')
expected = pd.Series(10 * [np.timedelta64(10, 'm')])
expected.loc[[1, 2, 3]] = pd.Timedelta(np.timedelta64(20, 'm'))
tm.assert_series_equal(s, expected)
def test_underlying_data_conversion(self):
# GH 4080
df = DataFrame(dict((c, [1, 2, 3]) for c in ['a', 'b', 'c']))
df.set_index(['a', 'b', 'c'], inplace=True)
s = Series([1], index=[(2, 2, 2)])
df['val'] = 0
df
df['val'].update(s)
expected = DataFrame(
dict(a=[1, 2, 3], b=[1, 2, 3], c=[1, 2, 3], val=[0, 1, 0]))
expected.set_index(['a', 'b', 'c'], inplace=True)
tm.assert_frame_equal(df, expected)
# GH 3970
# these are chained assignments as well
pd.set_option('chained_assignment', None)
df = DataFrame({"aa": range(5), "bb": [2.2] * 5})
df["cc"] = 0.0
ck = [True] * len(df)
df["bb"].iloc[0] = .13
# TODO: unused
df_tmp = df.iloc[ck] # noqa
df["bb"].iloc[0] = .15
self.assertEqual(df['bb'].iloc[0], 0.15)
pd.set_option('chained_assignment', 'raise')
# GH 3217
df = DataFrame(dict(a=[1, 3], b=[np.nan, 2]))
df['c'] = np.nan
df['c'].update(pd.Series(['foo'], index=[0]))
expected = DataFrame(dict(a=[1, 3], b=[np.nan, 2], c=['foo', np.nan]))
tm.assert_frame_equal(df, expected)
def test_preserveRefs(self):
seq = self.ts[[5, 10, 15]]
seq[1] = np.NaN
self.assertFalse(np.isnan(self.ts[10]))
def test_drop(self):
# unique
s = Series([1, 2], index=['one', 'two'])
expected = Series([1], index=['one'])
result = s.drop(['two'])
assert_series_equal(result, expected)
result = s.drop('two', axis='rows')
assert_series_equal(result, expected)
# non-unique
# GH 5248
s = Series([1, 1, 2], index=['one', 'two', 'one'])
expected = Series([1, 2], index=['one', 'one'])
result = s.drop(['two'], axis=0)
assert_series_equal(result, expected)
result = s.drop('two')
assert_series_equal(result, expected)
expected = Series([1], index=['two'])
result = s.drop(['one'])
assert_series_equal(result, expected)
result = s.drop('one')
assert_series_equal(result, expected)
# single string/tuple-like
s = Series(range(3), index=list('abc'))
self.assertRaises(ValueError, s.drop, 'bc')
self.assertRaises(ValueError, s.drop, ('a', ))
# errors='ignore'
s = Series(range(3), index=list('abc'))
result = s.drop('bc', errors='ignore')
assert_series_equal(result, s)
result = s.drop(['a', 'd'], errors='ignore')
expected = s.ix[1:]
assert_series_equal(result, expected)
# bad axis
self.assertRaises(ValueError, s.drop, 'one', axis='columns')
# GH 8522
s = Series([2, 3], index=[True, False])
self.assertTrue(s.index.is_object())
result = s.drop(True)
expected = Series([3], index=[False])
assert_series_equal(result, expected)
def test_align(self):
def _check_align(a, b, how='left', fill=None):
aa, ab = a.align(b, join=how, fill_value=fill)
join_index = a.index.join(b.index, how=how)
if fill is not None:
diff_a = aa.index.difference(join_index)
diff_b = ab.index.difference(join_index)
if len(diff_a) > 0:
self.assertTrue((aa.reindex(diff_a) == fill).all())
if len(diff_b) > 0:
self.assertTrue((ab.reindex(diff_b) == fill).all())
ea = a.reindex(join_index)
eb = b.reindex(join_index)
if fill is not None:
ea = ea.fillna(fill)
eb = eb.fillna(fill)
assert_series_equal(aa, ea)
assert_series_equal(ab, eb)
self.assertEqual(aa.name, 'ts')
self.assertEqual(ea.name, 'ts')
self.assertEqual(ab.name, 'ts')
self.assertEqual(eb.name, 'ts')
for kind in JOIN_TYPES:
_check_align(self.ts[2:], self.ts[:-5], how=kind)
_check_align(self.ts[2:], self.ts[:-5], how=kind, fill=-1)
# empty left
_check_align(self.ts[:0], self.ts[:-5], how=kind)
_check_align(self.ts[:0], self.ts[:-5], how=kind, fill=-1)
# empty right
_check_align(self.ts[:-5], self.ts[:0], how=kind)
_check_align(self.ts[:-5], self.ts[:0], how=kind, fill=-1)
# both empty
_check_align(self.ts[:0], self.ts[:0], how=kind)
_check_align(self.ts[:0], self.ts[:0], how=kind, fill=-1)
def test_align_fill_method(self):
def _check_align(a, b, how='left', method='pad', limit=None):
aa, ab = a.align(b, join=how, method=method, limit=limit)
join_index = a.index.join(b.index, how=how)
ea = a.reindex(join_index)
eb = b.reindex(join_index)
ea = ea.fillna(method=method, limit=limit)
eb = eb.fillna(method=method, limit=limit)
assert_series_equal(aa, ea)
assert_series_equal(ab, eb)
for kind in JOIN_TYPES:
for meth in ['pad', 'bfill']:
_check_align(self.ts[2:], self.ts[:-5], how=kind, method=meth)
_check_align(self.ts[2:], self.ts[:-5], how=kind, method=meth,
limit=1)
# empty left
_check_align(self.ts[:0], self.ts[:-5], how=kind, method=meth)
_check_align(self.ts[:0], self.ts[:-5], how=kind, method=meth,
limit=1)
# empty right
_check_align(self.ts[:-5], self.ts[:0], how=kind, method=meth)
_check_align(self.ts[:-5], self.ts[:0], how=kind, method=meth,
limit=1)
# both empty
_check_align(self.ts[:0], self.ts[:0], how=kind, method=meth)
_check_align(self.ts[:0], self.ts[:0], how=kind, method=meth,
limit=1)
def test_align_nocopy(self):
b = self.ts[:5].copy()
# do copy
a = self.ts.copy()
ra, _ = a.align(b, join='left')
ra[:5] = 5
self.assertFalse((a[:5] == 5).any())
# do not copy
a = self.ts.copy()
ra, _ = a.align(b, join='left', copy=False)
ra[:5] = 5
self.assertTrue((a[:5] == 5).all())
# do copy
a = self.ts.copy()
b = self.ts[:5].copy()
_, rb = a.align(b, join='right')
rb[:3] = 5
self.assertFalse((b[:3] == 5).any())
# do not copy
a = self.ts.copy()
b = self.ts[:5].copy()
_, rb = a.align(b, join='right', copy=False)
rb[:2] = 5
self.assertTrue((b[:2] == 5).all())
def test_align_sameindex(self):
a, b = self.ts.align(self.ts, copy=False)
self.assertIs(a.index, self.ts.index)
self.assertIs(b.index, self.ts.index)
# a, b = self.ts.align(self.ts, copy=True)
# self.assertIsNot(a.index, self.ts.index)
# self.assertIsNot(b.index, self.ts.index)
def test_align_multiindex(self):
# GH 10665
midx = pd.MultiIndex.from_product([range(2), range(3), range(2)],
names=('a', 'b', 'c'))
idx = pd.Index(range(2), name='b')
s1 = pd.Series(np.arange(12, dtype='int64'), index=midx)
s2 = pd.Series(np.arange(2, dtype='int64'), index=idx)
# these must be the same results (but flipped)
res1l, res1r = s1.align(s2, join='left')
res2l, res2r = s2.align(s1, join='right')
expl = s1
tm.assert_series_equal(expl, res1l)
tm.assert_series_equal(expl, res2r)
expr = pd.Series([0, 0, 1, 1, np.nan, np.nan] * 2, index=midx)
tm.assert_series_equal(expr, res1r)
| tm.assert_series_equal(expr, res2l) | pandas.util.testing.assert_series_equal |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
from numpy import nan
import numpy as np
import pandas as pd
from pandas.types.common import is_integer, is_scalar
from pandas import Index, Series, DataFrame, isnull, date_range
from pandas.core.index import MultiIndex
from pandas.core.indexing import IndexingError
from pandas.tseries.index import Timestamp
from pandas.tseries.offsets import BDay
from pandas.tseries.tdi import Timedelta
from pandas.compat import lrange, range
from pandas import compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from pandas.tests.series.common import TestData
JOIN_TYPES = ['inner', 'outer', 'left', 'right']
class TestSeriesIndexing(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_get(self):
# GH 6383
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56, 45,
51, 39, 55, 43, 54, 52, 51, 54]))
result = s.get(25, 0)
expected = 0
self.assertEqual(result, expected)
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56,
45, 51, 39, 55, 43, 54, 52, 51, 54]),
index=pd.Float64Index(
[25.0, 36.0, 49.0, 64.0, 81.0, 100.0,
121.0, 144.0, 169.0, 196.0, 1225.0,
1296.0, 1369.0, 1444.0, 1521.0, 1600.0,
1681.0, 1764.0, 1849.0, 1936.0],
dtype='object'))
result = s.get(25, 0)
expected = 43
self.assertEqual(result, expected)
# GH 7407
# with a boolean accessor
df = pd.DataFrame({'i': [0] * 3, 'b': [False] * 3})
vc = df.i.value_counts()
result = vc.get(99, default='Missing')
self.assertEqual(result, 'Missing')
vc = df.b.value_counts()
result = vc.get(False, default='Missing')
self.assertEqual(result, 3)
result = vc.get(True, default='Missing')
self.assertEqual(result, 'Missing')
def test_delitem(self):
# GH 5542
# should delete the item inplace
s = Series(lrange(5))
del s[0]
expected = Series(lrange(1, 5), index=lrange(1, 5))
assert_series_equal(s, expected)
del s[1]
expected = Series(lrange(2, 5), index=lrange(2, 5))
assert_series_equal(s, expected)
# empty
s = Series()
def f():
del s[0]
self.assertRaises(KeyError, f)
# only 1 left, del, add, del
s = Series(1)
del s[0]
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='int64')))
s[0] = 1
assert_series_equal(s, Series(1))
del s[0]
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='int64')))
# Index(dtype=object)
s = Series(1, index=['a'])
del s['a']
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='object')))
s['a'] = 1
assert_series_equal(s, Series(1, index=['a']))
del s['a']
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='object')))
def test_getitem_setitem_ellipsis(self):
s = Series(np.random.randn(10))
np.fix(s)
result = s[...]
assert_series_equal(result, s)
s[...] = 5
self.assertTrue((result == 5).all())
def test_getitem_negative_out_of_bounds(self):
s = Series(tm.rands_array(5, 10), index=tm.rands_array(10, 10))
self.assertRaises(IndexError, s.__getitem__, -11)
self.assertRaises(IndexError, s.__setitem__, -11, 'foo')
def test_pop(self):
# GH 6600
df = DataFrame({'A': 0, 'B': np.arange(5, dtype='int64'), 'C': 0, })
k = df.iloc[4]
result = k.pop('B')
self.assertEqual(result, 4)
expected = Series([0, 0], index=['A', 'C'], name=4)
assert_series_equal(k, expected)
def test_getitem_get(self):
idx1 = self.series.index[5]
idx2 = self.objSeries.index[5]
self.assertEqual(self.series[idx1], self.series.get(idx1))
self.assertEqual(self.objSeries[idx2], self.objSeries.get(idx2))
self.assertEqual(self.series[idx1], self.series[5])
self.assertEqual(self.objSeries[idx2], self.objSeries[5])
self.assertEqual(
self.series.get(-1), self.series.get(self.series.index[-1]))
self.assertEqual(self.series[5], self.series.get(self.series.index[5]))
# missing
d = self.ts.index[0] - BDay()
self.assertRaises(KeyError, self.ts.__getitem__, d)
# None
# GH 5652
for s in [Series(), Series(index=list('abc'))]:
result = s.get(None)
self.assertIsNone(result)
def test_iget(self):
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.iget(1)
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.irow(1)
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.iget_value(1)
for i in range(len(s)):
result = s.iloc[i]
exp = s[s.index[i]]
assert_almost_equal(result, exp)
# pass a slice
result = s.iloc[slice(1, 3)]
expected = s.ix[2:4]
assert_series_equal(result, expected)
# test slice is a view
result[:] = 0
self.assertTrue((s[1:3] == 0).all())
# list of integers
result = s.iloc[[0, 2, 3, 4, 5]]
expected = s.reindex(s.index[[0, 2, 3, 4, 5]])
assert_series_equal(result, expected)
def test_iget_nonunique(self):
s = Series([0, 1, 2], index=[0, 1, 0])
self.assertEqual(s.iloc[2], 2)
def test_getitem_regression(self):
s = Series(lrange(5), index=lrange(5))
result = s[lrange(5)]
assert_series_equal(result, s)
def test_getitem_setitem_slice_bug(self):
s = Series(lrange(10), lrange(10))
result = s[-12:]
assert_series_equal(result, s)
result = s[-7:]
assert_series_equal(result, s[3:])
result = s[:-12]
assert_series_equal(result, s[:0])
s = Series(lrange(10), lrange(10))
s[-12:] = 0
self.assertTrue((s == 0).all())
s[:-12] = 5
self.assertTrue((s == 0).all())
def test_getitem_int64(self):
idx = np.int64(5)
self.assertEqual(self.ts[idx], self.ts[5])
def test_getitem_fancy(self):
slice1 = self.series[[1, 2, 3]]
slice2 = self.objSeries[[1, 2, 3]]
self.assertEqual(self.series.index[2], slice1.index[1])
self.assertEqual(self.objSeries.index[2], slice2.index[1])
self.assertEqual(self.series[2], slice1[1])
self.assertEqual(self.objSeries[2], slice2[1])
def test_getitem_boolean(self):
s = self.series
mask = s > s.median()
# passing list is OK
result = s[list(mask)]
expected = s[mask]
assert_series_equal(result, expected)
self.assert_index_equal(result.index, s.index[mask])
def test_getitem_boolean_empty(self):
s = Series([], dtype=np.int64)
s.index.name = 'index_name'
s = s[s.isnull()]
self.assertEqual(s.index.name, 'index_name')
self.assertEqual(s.dtype, np.int64)
# GH5877
# indexing with empty series
s = Series(['A', 'B'])
expected = Series(np.nan, index=['C'], dtype=object)
result = s[Series(['C'], dtype=object)]
assert_series_equal(result, expected)
s = Series(['A', 'B'])
expected = Series(dtype=object, index=Index([], dtype='int64'))
result = s[Series([], dtype=object)]
assert_series_equal(result, expected)
# invalid because of the boolean indexer
# that's empty or not-aligned
def f():
s[Series([], dtype=bool)]
self.assertRaises(IndexingError, f)
def f():
s[Series([True], dtype=bool)]
self.assertRaises(IndexingError, f)
def test_getitem_generator(self):
gen = (x > 0 for x in self.series)
result = self.series[gen]
result2 = self.series[iter(self.series > 0)]
expected = self.series[self.series > 0]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
def test_type_promotion(self):
# GH12599
s = pd.Series()
s["a"] = pd.Timestamp("2016-01-01")
s["b"] = 3.0
s["c"] = "foo"
expected = Series([pd.Timestamp("2016-01-01"), 3.0, "foo"],
index=["a", "b", "c"])
assert_series_equal(s, expected)
def test_getitem_boolean_object(self):
# using column from DataFrame
s = self.series
mask = s > s.median()
omask = mask.astype(object)
# getitem
result = s[omask]
expected = s[mask]
assert_series_equal(result, expected)
# setitem
s2 = s.copy()
cop = s.copy()
cop[omask] = 5
s2[mask] = 5
assert_series_equal(cop, s2)
# nans raise exception
omask[5:10] = np.nan
self.assertRaises(Exception, s.__getitem__, omask)
self.assertRaises(Exception, s.__setitem__, omask, 5)
def test_getitem_setitem_boolean_corner(self):
ts = self.ts
mask_shifted = ts.shift(1, freq=BDay()) > ts.median()
# these used to raise...??
self.assertRaises(Exception, ts.__getitem__, mask_shifted)
self.assertRaises(Exception, ts.__setitem__, mask_shifted, 1)
# ts[mask_shifted]
# ts[mask_shifted] = 1
self.assertRaises(Exception, ts.ix.__getitem__, mask_shifted)
self.assertRaises(Exception, ts.ix.__setitem__, mask_shifted, 1)
# ts.ix[mask_shifted]
# ts.ix[mask_shifted] = 2
def test_getitem_setitem_slice_integers(self):
s = Series(np.random.randn(8), index=[2, 4, 6, 8, 10, 12, 14, 16])
result = s[:4]
expected = s.reindex([2, 4, 6, 8])
assert_series_equal(result, expected)
s[:4] = 0
self.assertTrue((s[:4] == 0).all())
self.assertTrue(not (s[4:] == 0).any())
def test_getitem_out_of_bounds(self):
# don't segfault, GH #495
self.assertRaises(IndexError, self.ts.__getitem__, len(self.ts))
# GH #917
s = Series([])
self.assertRaises(IndexError, s.__getitem__, -1)
def test_getitem_setitem_integers(self):
# caused bug without test
s = Series([1, 2, 3], ['a', 'b', 'c'])
self.assertEqual(s.ix[0], s['a'])
s.ix[0] = 5
self.assertAlmostEqual(s['a'], 5)
def test_getitem_box_float64(self):
value = self.ts[5]
tm.assertIsInstance(value, np.float64)
def test_getitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
self.assertRaises(KeyError, s.__getitem__, 1)
self.assertRaises(KeyError, s.ix.__getitem__, 1)
def test_getitem_unordered_dup(self):
obj = Series(lrange(5), index=['c', 'a', 'a', 'b', 'b'])
self.assertTrue(is_scalar(obj['c']))
self.assertEqual(obj['c'], 0)
def test_getitem_dups_with_missing(self):
# breaks reindex, so need to use .ix internally
# GH 4246
s = Series([1, 2, 3, 4], ['foo', 'bar', 'foo', 'bah'])
expected = s.ix[['foo', 'bar', 'bah', 'bam']]
result = s[['foo', 'bar', 'bah', 'bam']]
assert_series_equal(result, expected)
def test_getitem_dups(self):
s = Series(range(5), index=['A', 'A', 'B', 'C', 'C'], dtype=np.int64)
expected = Series([3, 4], index=['C', 'C'], dtype=np.int64)
result = s['C']
assert_series_equal(result, expected)
def test_getitem_dataframe(self):
rng = list(range(10))
s = pd.Series(10, index=rng)
df = pd.DataFrame(rng, index=rng)
self.assertRaises(TypeError, s.__getitem__, df > 5)
def test_getitem_callable(self):
# GH 12533
s = pd.Series(4, index=list('ABCD'))
result = s[lambda x: 'A']
self.assertEqual(result, s.loc['A'])
result = s[lambda x: ['A', 'B']]
tm.assert_series_equal(result, s.loc[['A', 'B']])
result = s[lambda x: [True, False, True, True]]
tm.assert_series_equal(result, s.iloc[[0, 2, 3]])
def test_setitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
# equivalent of an append
s2 = s.copy()
s2[1] = 5
expected = s.append(Series([5], index=[1]))
assert_series_equal(s2, expected)
s2 = s.copy()
s2.ix[1] = 5
expected = s.append(Series([5], index=[1]))
assert_series_equal(s2, expected)
def test_setitem_float_labels(self):
# note labels are floats
s = Series(['a', 'b', 'c'], index=[0, 0.5, 1])
tmp = s.copy()
s.ix[1] = 'zoo'
tmp.iloc[2] = 'zoo'
assert_series_equal(s, tmp)
def test_setitem_callable(self):
# GH 12533
s = pd.Series([1, 2, 3, 4], index=list('ABCD'))
s[lambda x: 'A'] = -1
tm.assert_series_equal(s, pd.Series([-1, 2, 3, 4], index=list('ABCD')))
def test_setitem_other_callable(self):
# GH 13299
inc = lambda x: x + 1
s = pd.Series([1, 2, -1, 4])
s[s < 0] = inc
expected = pd.Series([1, 2, inc, 4])
tm.assert_series_equal(s, expected)
def test_slice(self):
numSlice = self.series[10:20]
numSliceEnd = self.series[-10:]
objSlice = self.objSeries[10:20]
self.assertNotIn(self.series.index[9], numSlice.index)
self.assertNotIn(self.objSeries.index[9], objSlice.index)
self.assertEqual(len(numSlice), len(numSlice.index))
self.assertEqual(self.series[numSlice.index[0]],
numSlice[numSlice.index[0]])
self.assertEqual(numSlice.index[1], self.series.index[11])
self.assertTrue(tm.equalContents(numSliceEnd, np.array(self.series)[
-10:]))
# test return view
sl = self.series[10:20]
sl[:] = 0
self.assertTrue((self.series[10:20] == 0).all())
def test_slice_can_reorder_not_uniquely_indexed(self):
s = Series(1, index=['a', 'a', 'b', 'b', 'c'])
s[::-1] # it works!
def test_slice_float_get_set(self):
self.assertRaises(TypeError, lambda: self.ts[4.0:10.0])
def f():
self.ts[4.0:10.0] = 0
self.assertRaises(TypeError, f)
self.assertRaises(TypeError, self.ts.__getitem__, slice(4.5, 10.0))
self.assertRaises(TypeError, self.ts.__setitem__, slice(4.5, 10.0), 0)
def test_slice_floats2(self):
s = Series(np.random.rand(10), index=np.arange(10, 20, dtype=float))
self.assertEqual(len(s.ix[12.0:]), 8)
self.assertEqual(len(s.ix[12.5:]), 7)
i = np.arange(10, 20, dtype=float)
i[2] = 12.2
s.index = i
self.assertEqual(len(s.ix[12.0:]), 8)
self.assertEqual(len(s.ix[12.5:]), 7)
def test_slice_float64(self):
values = np.arange(10., 50., 2)
index = Index(values)
start, end = values[[5, 15]]
s = Series(np.random.randn(20), index=index)
result = s[start:end]
expected = s.iloc[5:16]
assert_series_equal(result, expected)
result = s.loc[start:end]
assert_series_equal(result, expected)
df = DataFrame(np.random.randn(20, 3), index=index)
result = df[start:end]
expected = df.iloc[5:16]
tm.assert_frame_equal(result, expected)
result = df.loc[start:end]
tm.assert_frame_equal(result, expected)
def test_setitem(self):
self.ts[self.ts.index[5]] = np.NaN
self.ts[[1, 2, 17]] = np.NaN
self.ts[6] = np.NaN
self.assertTrue(np.isnan(self.ts[6]))
self.assertTrue(np.isnan(self.ts[2]))
self.ts[np.isnan(self.ts)] = 5
self.assertFalse(np.isnan(self.ts[2]))
# caught this bug when writing tests
series = Series(tm.makeIntIndex(20).astype(float),
index=tm.makeIntIndex(20))
series[::2] = 0
self.assertTrue((series[::2] == 0).all())
# set item that's not contained
s = self.series.copy()
s['foobar'] = 1
app = Series([1], index=['foobar'], name='series')
expected = self.series.append(app)
assert_series_equal(s, expected)
# Test for issue #10193
key = pd.Timestamp('2012-01-01')
series = pd.Series()
series[key] = 47
expected = pd.Series(47, [key])
assert_series_equal(series, expected)
series = pd.Series([], pd.DatetimeIndex([], freq='D'))
series[key] = 47
expected = pd.Series(47, pd.DatetimeIndex([key], freq='D'))
assert_series_equal(series, expected)
def test_setitem_dtypes(self):
# change dtypes
# GH 4463
expected = Series([np.nan, 2, 3])
s = Series([1, 2, 3])
s.iloc[0] = np.nan
assert_series_equal(s, expected)
s = Series([1, 2, 3])
s.loc[0] = np.nan
assert_series_equal(s, expected)
s = Series([1, 2, 3])
s[0] = np.nan
assert_series_equal(s, expected)
s = Series([False])
s.loc[0] = np.nan
assert_series_equal(s, Series([np.nan]))
s = Series([False, True])
s.loc[0] = np.nan
assert_series_equal(s, Series([np.nan, 1.0]))
def test_set_value(self):
idx = self.ts.index[10]
res = self.ts.set_value(idx, 0)
self.assertIs(res, self.ts)
self.assertEqual(self.ts[idx], 0)
# equiv
s = self.series.copy()
res = s.set_value('foobar', 0)
self.assertIs(res, s)
self.assertEqual(res.index[-1], 'foobar')
self.assertEqual(res['foobar'], 0)
s = self.series.copy()
s.loc['foobar'] = 0
self.assertEqual(s.index[-1], 'foobar')
self.assertEqual(s['foobar'], 0)
def test_setslice(self):
sl = self.ts[5:20]
self.assertEqual(len(sl), len(sl.index))
self.assertTrue(sl.index.is_unique)
def test_basic_getitem_setitem_corner(self):
# invalid tuples, e.g. self.ts[:, None] vs. self.ts[:, 2]
with tm.assertRaisesRegexp(ValueError, 'tuple-index'):
self.ts[:, 2]
with tm.assertRaisesRegexp(ValueError, 'tuple-index'):
self.ts[:, 2] = 2
# weird lists. [slice(0, 5)] will work but not two slices
result = self.ts[[slice(None, 5)]]
expected = self.ts[:5]
assert_series_equal(result, expected)
# OK
self.assertRaises(Exception, self.ts.__getitem__,
[5, slice(None, None)])
self.assertRaises(Exception, self.ts.__setitem__,
[5, slice(None, None)], 2)
def test_basic_getitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
result = self.ts[indices]
expected = self.ts.reindex(indices)
assert_series_equal(result, expected)
result = self.ts[indices[0]:indices[2]]
expected = self.ts.ix[indices[0]:indices[2]]
assert_series_equal(result, expected)
# integer indexes, be careful
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
inds = [0, 2, 5, 7, 8]
arr_inds = np.array([0, 2, 5, 7, 8])
result = s[inds]
expected = s.reindex(inds)
assert_series_equal(result, expected)
result = s[arr_inds]
expected = s.reindex(arr_inds)
assert_series_equal(result, expected)
# GH12089
# with tz for values
s = Series(pd.date_range("2011-01-01", periods=3, tz="US/Eastern"),
index=['a', 'b', 'c'])
expected = Timestamp('2011-01-01', tz='US/Eastern')
result = s.loc['a']
self.assertEqual(result, expected)
result = s.iloc[0]
self.assertEqual(result, expected)
result = s['a']
self.assertEqual(result, expected)
def test_basic_setitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices] = 0
exp.ix[indices] = 0
assert_series_equal(cp, exp)
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices[0]:indices[2]] = 0
exp.ix[indices[0]:indices[2]] = 0
assert_series_equal(cp, exp)
# integer indexes, be careful
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
inds = [0, 4, 6]
arr_inds = np.array([0, 4, 6])
cp = s.copy()
exp = s.copy()
s[inds] = 0
s.ix[inds] = 0
assert_series_equal(cp, exp)
cp = s.copy()
exp = s.copy()
s[arr_inds] = 0
s.ix[arr_inds] = 0
assert_series_equal(cp, exp)
inds_notfound = [0, 4, 5, 6]
arr_inds_notfound = np.array([0, 4, 5, 6])
self.assertRaises(Exception, s.__setitem__, inds_notfound, 0)
self.assertRaises(Exception, s.__setitem__, arr_inds_notfound, 0)
# GH12089
# with tz for values
s = Series(pd.date_range("2011-01-01", periods=3, tz="US/Eastern"),
index=['a', 'b', 'c'])
s2 = s.copy()
expected = Timestamp('2011-01-03', tz='US/Eastern')
s2.loc['a'] = expected
result = s2.loc['a']
self.assertEqual(result, expected)
s2 = s.copy()
s2.iloc[0] = expected
result = s2.iloc[0]
self.assertEqual(result, expected)
s2 = s.copy()
s2['a'] = expected
result = s2['a']
self.assertEqual(result, expected)
def test_ix_getitem(self):
inds = self.series.index[[3, 4, 7]]
assert_series_equal(self.series.ix[inds], self.series.reindex(inds))
assert_series_equal(self.series.ix[5::2], self.series[5::2])
# slice with indices
d1, d2 = self.ts.index[[5, 15]]
result = self.ts.ix[d1:d2]
expected = self.ts.truncate(d1, d2)
assert_series_equal(result, expected)
# boolean
mask = self.series > self.series.median()
assert_series_equal(self.series.ix[mask], self.series[mask])
# ask for index value
self.assertEqual(self.ts.ix[d1], self.ts[d1])
self.assertEqual(self.ts.ix[d2], self.ts[d2])
def test_ix_getitem_not_monotonic(self):
d1, d2 = self.ts.index[[5, 15]]
ts2 = self.ts[::2][[1, 2, 0]]
self.assertRaises(KeyError, ts2.ix.__getitem__, slice(d1, d2))
self.assertRaises(KeyError, ts2.ix.__setitem__, slice(d1, d2), 0)
def test_ix_getitem_setitem_integer_slice_keyerrors(self):
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
# this is OK
cp = s.copy()
cp.ix[4:10] = 0
self.assertTrue((cp.ix[4:10] == 0).all())
# so is this
cp = s.copy()
cp.ix[3:11] = 0
self.assertTrue((cp.ix[3:11] == 0).values.all())
result = s.ix[4:10]
result2 = s.ix[3:11]
expected = s.reindex([4, 6, 8, 10])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# non-monotonic, raise KeyError
s2 = s.iloc[lrange(5) + lrange(5, 10)[::-1]]
self.assertRaises(KeyError, s2.ix.__getitem__, slice(3, 11))
self.assertRaises(KeyError, s2.ix.__setitem__, slice(3, 11), 0)
def test_ix_getitem_iterator(self):
idx = iter(self.series.index[:10])
result = self.series.ix[idx]
assert_series_equal(result, self.series[:10])
def test_setitem_with_tz(self):
for tz in ['US/Eastern', 'UTC', 'Asia/Tokyo']:
orig = pd.Series(pd.date_range('2016-01-01', freq='H', periods=3,
tz=tz))
self.assertEqual(orig.dtype, 'datetime64[ns, {0}]'.format(tz))
# scalar
s = orig.copy()
s[1] = pd.Timestamp('2011-01-01', tz=tz)
exp = pd.Series([pd.Timestamp('2016-01-01 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2016-01-01 02:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[1] = | pd.Timestamp('2011-01-01', tz=tz) | pandas.Timestamp |
import blpapi
import logging
from .BbgRefDataService import BbgRefDataService
import pandas as pd
import numpy as np
from . import BbgLogger
logger = BbgLogger.logger
SECURITY_DATA = blpapi.Name("securityData")
SECURITY = blpapi.Name("security")
FIELD_DATA = blpapi.Name("fieldData")
FIELD_EXCEPTIONS = blpapi.Name("fieldExceptions")
FIELD_ID = blpapi.Name("fieldId")
ERROR_INFO = blpapi.Name("errorInfo")
class BbgDataService(BbgRefDataService):
def __init__(self, field, securities, overrides = None):
'''
Bloomberg Bulk Reference Data query object. Allows user to input a list of securities and fields for retrieval over a specified time period with the ability to override certain field (as specified in FLDS <GO>) if required.
Parameters
----------
field : string
The field to be retrieved, field names and data types can be determined by typing FLDS <GO> and using the search box. If more than one field is provided will raise a TypeError
securities : string, tuple, list, or ndarray
List of Bloomberg tickers to retrieve data for. If one item is passed this can be input as a string, otherwise inputs must be passed as a list or array-like.
overrides : dictionary, optional
A dictionary containing key, value pairs of fields and override values to input.
See Also
--------
BbgDataService.constructDf : Constructor method, retrieves data associated with a BbgDataService query object and generates a dataframe from it.
BbgDataPoint : Retrieve single point static, calculated or other reference data.
BbgIntradayTick : Retrieve historic tick-level data for a given security.
BbgIntradayBar : Retrieve historic bar level data for a given security (open, high, low and close) for a specified time interval given in minutes.
Examples
--------
Constructing DataFrame of last and bid prices for ACGB 3Y and 10Y Futures.
>>> import BloombergDataModule as bbg
>>> import pandas as pd
>>> curveTenorRates = bbg.BbgDataService(fields = ['CURVE_TENOR_RATES'], securities = ['YCGT0025 Index','YCGT0016 Index', 'YCGT0001 Index'], overrides = {'CURVE_DATE': '20060830'})
>>> curveTenorRates.constructDf().head()
Ask Yield Bid Yield Last Update Mid Yield Tenor Tenor Ticker
BB_TICKER
YCGT0025 Index 5.041 5.051 2006-08-30 5.046 3M 912795YG Govt
YCGT0025 Index 5.126 5.137 2006-08-30 5.132 6M 912795YV Govt
YCGT0025 Index 4.809 4.817 2006-08-30 4.813 2Y 912828FR Govt
YCGT0025 Index 4.737 4.742 2006-08-30 4.740 3Y 912828FP Govt
YCGT0025 Index 4.723 4.727 2006-08-30 4.725 5Y 912828FN Govt
'''
self.fields = list(field) if type(field) is not list else field
if len(self.fields) > 1:
raise TypeError("BbgDataService is only designed to handle a single bulk field per request.")
self.securities = securities
self.overrides = overrides
def constructDf(self):
'''
The constructDf method retrieves data associated with a BbgDataService query object and generates a dataframe from it.
Parameters
----------
None
Returns
-------
table : DataFrame
Raises
------
ValueError:
Blah blah blah
See Also
--------
BbgDataHistory.constructDf : retrieves static history data and constructs a DataFrame from it. It has more customisability with respect to overrides
BbgIntradayTick.constructDf: retrieves intraday (or multi-day) tick level data and constructs a dataframe from it. It has applications in more data intensive and granular analysis
BbgDataPoint.constructDf: retrieves intraday (or multi-day) bar level (open-high-low-close) data and constructs a dataframe from it. It is for use in more data intensive and granular analysis.constructDf. The bar interval frequency can be specified in minutes to optimise for efficiency and speed.
Notes
-----
Blah blah blah
Examples
--------
Constructing DataFrame of last and bid prices for ACGB 3Y and 10Y Futures.
>>> import BloombergDataModule as bbg
>>> import pandas as pd
>>> curveTenorRates = bbg.BbgDataService(fields = ['CURVE_TENOR_RATES'], securities = ['YCGT0025 Index','YCGT0016 Index', 'YCGT0001 Index'], overrides = {'CURVE_DATE': '20060830'})
>>> curveTenorRates.constructDf().head()
Ask Yield Bid Yield Last Update Mid Yield Tenor Tenor Ticker
BB_TICKER
YCGT0025 Index 5.041 5.051 2006-08-30 5.046 3M 912795YG Govt
YCGT0025 Index 5.126 5.137 2006-08-30 5.132 6M 912795YV Govt
YCGT0025 Index 4.809 4.817 2006-08-30 4.813 2Y 912828FR Govt
YCGT0025 Index 4.737 4.742 2006-08-30 4.740 3Y 912828FP Govt
YCGT0025 Index 4.723 4.727 2006-08-30 4.725 5Y 912828FN Govt
'''
BbgRefDataService.__init__(self)
self.request = self.createRequest(securities = self.securities, fields = self.fields, requestType = "ReferenceDataRequest")
self.request = self.appendRequestOverrides(request = self.request, overrides = self.overrides)
self.cid = self.session.sendRequest(request = self.request)
self.bbgRefData = pd.DataFrame()
for response in self.parseResponse(self.cid):
self.bbgRefData = self.bbgRefData.append(self.refDataContentToDf(response))
return self.bbgRefData
def refDataContentToDf(self, response):
responseData = response['content']['ReferenceDataResponse']
returnDf = | pd.DataFrame() | pandas.DataFrame |
from collections import OrderedDict, Counter
import pandas as pd
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
import pylcs
import config.constants as constants
from config.constants import DOC_LABELS, SUBTYPE_A, SUBTYPE_B
from corpus.tokenization import get_tokenizer
TOKENIZER = get_tokenizer()
def PRF(df):
df[constants.P] = df[constants.TP]/(df[constants.NP].astype(float))
df[constants.R] = df[constants.TP]/(df[constants.NT].astype(float))
df[constants.F1] = 2*df[constants.P]*df[constants.R]/(df[constants.P] + df[constants.R])
return df
def prf1(df):
df[constants.FN] = df[constants.NT] - df[constants.TP]
df[constants.FP] = df[constants.NP] - df[constants.TP]
df[constants.P] = df[constants.TP].astype(float)/(df[constants.NP].astype(float))
df[constants.R] = df[constants.TP].astype(float)/(df[constants.NT].astype(float))
df[constants.F1] = 2*df[constants.P]*df[constants.R]/(df[constants.P] + df[constants.R])
return df
def has_overlap(a, b):
A = set(range(*a))
B = set(range(*b))
return len(A.intersection(B)) > 0
def entity_indices_exact(t, p):
# indices match?
indices_match = t.indices() == p.indices()
return int(indices_match)
def entity_indices_overlap(t, p):
# indices overlap?
indices_match = has_overlap(t.indices(), p.indices())
return int(indices_match)
def entity_indices_partial(t, p):
# find overlapping character indices
ol_start = max(t.start, p.start)
ol_end = min(t.end, p.end)
ol_n = ol_end - ol_start
# no overlap
if ol_n <= 0:
return 0
# at least one character
else:
crop = lambda text, start, ol_start, ol_n: text[ol_start-start:ol_start-start+ol_n]
t_text = crop(t.text, t.start, ol_start, ol_n)
p_text = crop(p.text, p.start, ol_start, ol_n)
assert t_text == p_text, f'''"{t_text}" VS "{p_text}"'''
tokens = list(TOKENIZER(t_text))
return len(tokens)
#def entity_equiv(t, p, exact=True, subtype=True):
def entity_equiv(t, p, match_type="exact", subtype=True):
# type match?
type_match = t.type_ == p.type_
# sub type mach?
if subtype:
type_match = type_match and (t.subtype == p.subtype)
type_match = int(type_match)
#if exact:
if match_type == "exact":
indices_match = entity_indices_exact(t, p)
elif match_type == "overlap":
indices_match = entity_indices_overlap(t, p)
elif match_type == "partial":
indices_match = entity_indices_partial(t, p)
else:
raise ValueError(f"invalid match_type: {match_type}")
return type_match*indices_match
def compare_doc_labels(T, P, out_type='DataFrame'):
'''
Compare entities, only requiring overlap for corpus (e.g. list of documents)
'''
# initialize counters
count_true = Counter()
count_predict = Counter()
count_match = Counter()
# iterate over documents
assert len(T) == len(P)
for t, p in zip(T, P):
for k in t:
count_true[(k, t[k])] += 1
count_predict[(k, p[k])] += 1
if t[k] == p[k]:
count_match[(k, t[k])] += 1
if out_type == "DataFrame":
x = []
keys = set(count_true.keys()).union(set(count_predict.keys()))
for k in keys:
x.append(list(k) + [count_true[k], count_predict[k], count_match[k]])
fields = [constants.TYPE, constants.SUBTYPE]
columns = fields + [constants.NT, constants.NP, constants.TP]
df = pd.DataFrame(x, columns=columns)
df = prf1(df)
df = df.sort_values(fields)
return df
elif out_type == "Counter":
return (count_true, count_predict, count_match)
else:
ValueError("Invalid output type")
def compare_sent_labels(T, P, out_type='DataFrame'):
'''
Compare entities, only requiring overlap for corpus (e.g. list of documents)
'''
# initialize counters
count_true = Counter()
count_predict = Counter()
count_match = Counter()
# iterate over documents
assert len(T) == len(P)
# iterate over documents
for t, p in zip(T, P):
# iterate over document level types
for doc_label, subtype_labels in t.items():
# iterate over subtype combinations
for (subtype_a, subtype_b) in subtype_labels:
c = (subtype_a, subtype_b)
k = (doc_label, subtype_a, subtype_b)
t_ = t[doc_label][c]
p_ = p[doc_label][c]
count_true[k] += sum(t_)
count_predict[k] += sum(p_)
count_match[k] += sum([int(a == b)*a for a, b in zip(t_, p_)])
if out_type == "DataFrame":
x = []
keys = set(count_true.keys()).union(set(count_predict.keys()))
for k in keys:
x.append(list(k) + [count_true[k], count_predict[k], count_match[k]])
fields = [DOC_LABELS, SUBTYPE_A, SUBTYPE_B]
columns = fields + [constants.NT, constants.NP, constants.TP]
df = pd.DataFrame(x, columns=columns)
df = prf1(df)
df = df.sort_values(fields)
return df
elif out_type == "Counter":
return (count_true, count_predict, count_match)
else:
ValueError("Invalid output type")
def entity_hist(X, match_type="exact", subtype=True):
counter = Counter()
for x in X:
if subtype:
k = (x.type_, x.subtype)
else:
k = (x.type_,)
if match_type in ["exact", "overlap"]:
c = 1
elif match_type in ["partial"]:
c = len(list(TOKENIZER(x.text)))
else:
raise ValueError("invalid match_type")
counter[k] += c
return counter
#def compare_entities_doc(T, P, exact=True, subtype=True):
def compare_entities_doc(T, P, match_type="exact", subtype=True):
# entity count, truth
count_true = entity_hist(T, match_type=match_type, subtype=subtype)
# entity count, prediction
count_predict = entity_hist(P, match_type=match_type, subtype=subtype)
# entity count, correct
p_found = set([])
count_match = Counter()
for t in T:
for i, p in enumerate(P):
# only count if all are true
match_score = entity_equiv(t, p, match_type=match_type, subtype=subtype)
if (match_score > 0) and (i not in p_found):
if subtype:
k = (t.type_, t.subtype)
else:
k = (t.type_,)
count_match[k] += match_score
p_found.add(i)
break
return (count_true, count_predict, count_match)
#def compare_entities(T, P, exact=True, subtype=True, out_type='DataFrame'):
def compare_entities(T, P, match_type="exact", subtype=True, out_type='DataFrame'):
'''
Compare entities, only requiring overlap for corpus (e.g. list of documents)
'''
# initialize counters
count_true = Counter()
count_predict = Counter()
count_match = Counter()
# iterate over documents
assert len(T) == len(P)
for t, p in zip(T, P):
c_t, c_p, c_m = compare_entities_doc(t, p, match_type=match_type, subtype=subtype)
count_true += c_t
count_predict += c_p
count_match += c_m
if out_type == "DataFrame":
x = []
keys = set(count_true.keys()).union(set(count_predict.keys()))
for k in keys:
x.append(list(k) + [count_true[k], count_predict[k], count_match[k]])
if subtype:
fields = [constants.TYPE, constants.SUBTYPE]
else:
fields = [constants.TYPE]
columns = fields + [constants.NT, constants.NP, constants.TP]
df = | pd.DataFrame(x, columns=columns) | pandas.DataFrame |
import pandas as pd
TRAIN_PATH = 'data/multinli_1.0/multinli_1.0_train.txt'
DEV_PATH = 'data/multinli_1.0/multinli_1.0_dev_matched.txt'
#things get a bit weird here as we use the dev set as the test set
#and make a test set from the train set
train_df = pd.read_csv(TRAIN_PATH, sep='\t', error_bad_lines=False, keep_default_na=False)
test_df = | pd.read_csv(DEV_PATH, sep='\t', keep_default_na=False) | pandas.read_csv |
"""
GIS For Electrification (GISEle)
Developed by the Energy Department of Politecnico di Milano
Supporting Code
Group of supporting functions used inside all the process of GISEle algorithm
"""
import os
import requests
import pandas as pd
import geopandas as gpd
import numpy as np
import json
import shapely.ops
import iso8601
from scipy.spatial import distance_matrix
from scipy.spatial.distance import cdist
from shapely.geometry import Point, box, LineString, MultiPoint
from shapely.ops import split
from gisele.michele.michele import start
from gisele.data_import import import_pv_data, import_wind_data
from datetime import datetime
def l():
"""Print long separating lines."""
print('-' * 100)
def s():
"""Print short separating lines."""
print("-" * 40)
def nearest(row, df, src_column=None):
"""
Find the nearest point and return the value from specified column.
:param row: Iterative row of the first dataframe
:param df: Second dataframe to be found the nearest value
:param src_column: Column of the second dataframe that will be returned
:return value: Value of the desired src_column of the second dataframe
"""
# Find the geometry that is closest
nearest_p = df['geometry'] == shapely.ops.nearest_points(row['geometry'],
df.unary_union)[1]
# Get the corresponding value from df2 (matching is based on the geometry)
value = df.loc[nearest_p, src_column].values[0]
return value
def distance_2d(df1, df2, x, y):
"""
Find the 2D distance matrix between two datasets of points.
:param df1: first point dataframe
:param df2: second point dataframe
:param x: column representing the x coordinates (longitude)
:param y: column representing the y coordinates (latitude)
:return value: 2D Distance matrix between df1 and df2
"""
d1_coordinates = {'x': df1[x], 'y': df1[y]}
df1_loc = pd.DataFrame(data=d1_coordinates)
df1_loc.index = df1['ID']
d2_coordinates = {'x': df2[x], 'y': df2[y]}
df2_loc = pd.DataFrame(data=d2_coordinates)
df2_loc.index = df2['ID']
value = distance_matrix(df1_loc, df2_loc)
return value
def distance_3d(df1, df2, x, y, z):
"""
Find the 3D euclidean distance matrix between two datasets of points.
:param df1: first point dataframe
:param df2: second point dataframe
:param x: column representing the x coordinates (longitude)
:param y: column representing the y coordinates (latitude)
:param z: column representing the z coordinates (Elevation)
:return value: 3D Distance matrix between df1 and df2
"""
d1_coordinates = {'x': df1[x], 'y': df1[y], 'z': df1[z]}
df1_loc = pd.DataFrame(data=d1_coordinates)
df1_loc.index = df1['ID']
d2_coordinates = {'x': df2[x], 'y': df2[y], 'z': df2[z]}
df2_loc = pd.DataFrame(data=d2_coordinates)
df2_loc.index = df2['ID']
value = pd.DataFrame(cdist(df1_loc.values, df2_loc.values, 'euclidean'),
index=df1_loc.index, columns=df2_loc.index)
return value
def river_intersection(gdf,resolution):
'''
Check which lines in the adjacency matrix cross the rivers and assign
a very high weight to those lines
:param gdf:
:return:
'''
print('begin river intersection')
n = gdf['X'].size
weight_columns_x1 = np.repeat(gdf['X'].values[:,np.newaxis], n,1)
weight_columns_y1 = np.repeat(gdf['Y'].values[:,np.newaxis], n,1)
weight_columns_x2 = np.repeat(gdf['X'].values[np.newaxis,:], n,0)
weight_columns_y2 = np.repeat(gdf['Y'].values[np.newaxis,:], n,0)
weight_columns_x1_res = np.reshape(weight_columns_x1, (n*n, 1), order='F')
weight_columns_x2_res = np.reshape(weight_columns_x2, (n*n, 1), order='F')
weight_columns_y1_res = np.reshape(weight_columns_y1, (n*n, 1), order='F')
weight_columns_y2_res = np.reshape(weight_columns_y2, (n*n, 1), order='F')
df=pd.DataFrame()
df['x1'] = weight_columns_x1_res[:,0]
df['x2'] = weight_columns_x2_res[:,0]
df['y1'] = weight_columns_y1_res[:,0]
df['y2'] = weight_columns_y2_res[:,0]
# todo-> very slow passage, need to speed it up, no sense to compute it each time,
#todo -> take only short lines in a predefined buffer around rivers
#import rivers, intersect them according to the area considered
#create a buffer around them
#filter geodf to take only point that are closer than 1.5*resolution to the river
# it would be better to associate weights in advance,
df['2d_dist'] = ((df['x1']-df['x2'])**2+(df['y1']-df['y2'])**2)**0.5
# select only short lines and create linestrings
df_short_lines = df.loc[(df['2d_dist']<resolution * 1.5) &(df['2d_dist']>0)]
df_short_lines['geometry'] = df.apply(
lambda x: LineString([(x['x1'], x['y1']), (x['x2'], x['y2'])]), axis=1)
geodf = gpd.GeoDataFrame(df_short_lines, geometry='geometry')
# todo -> automatize this step!!!
geodf.crs = 'epsg:22287'
case_study='test_3'
dir='Case studies/'+case_study
# intersect short lines
test_inters = gpd.read_file('C:/Users/silvi/Progetti Github/Gisele_development/Case studies/test_3/Input/rivers.shp')
a = np.empty(shape=(geodf['geometry'].size, test_inters['geometry'].size))
for i, row in test_inters.iterrows():
a[:, i] = geodf['geometry'].intersects(row['geometry'])
a_tot = np.amax(a, 1)
geodf['intersection'] = a_tot
df['a_tot'] = 0
df.loc[geodf.index,'a_tot'] = geodf['intersection']
matrix = df['a_tot'].values.reshape((n, n), order='F') * 100
# df['geometry']=df.apply(lambda x: LineString([(x['x1'], x['y1']),(x['x2'], x['y2']) ]),axis=1)
# geodf= gpd.GeoDataFrame(df,geometry='geometry')
# geodf.crs = 'epsg:32737'
#
# test_inters=gpd.read_file('test_inters.shp')
# a=np.empty(shape =(geodf['geometry'].size,test_inters['geometry'].size))
# for i, row in test_inters.iterrows():
# a[:,i] = geodf['geometry'].intersects(row['geometry'])
# a_tot = np.amax(a, 1)
# geodf['intersection']=a_tot
# matrix=a_tot.reshape((n, n), order='F')*100
return matrix
def river_intersection(graph_gdf,box,graph,rivers):
#todo ->create the rivers geodf
rivers_clipped = gpd.clip(rivers, box) # box needs to be gdf with same crs as rivers
graph_gdf.loc[graph_gdf[rivers_clipped['geometry'].intersects(graph_gdf['geometry'])],'Cost'] = \
graph_gdf.loc[graph_gdf[rivers_clipped['geometry'].intersects(graph_gdf['geometry'])],'Cost']*5
graph_gdf['inters'] =''
for i, row in graph_gdf.iterrows():
graph_gdf.loc[i,'inters'] = rivers_clipped['geometry'].intersects(row['geometry'])
graph_gdf.loc[graph_gdf['inters']==True,'Cost'] = graph_gdf.loc[graph_gdf['inters']==True,'Cost']*10
graph_intersect =graph_gdf[graph_gdf['inters']==True]
for i,row in graph_intersect.iterrows():
graph[row['ID1']][row['ID2']]['weight'] = row['Cost']
return (graph,graph_gdf)
def cost_matrix(gdf, dist_3d_matrix, line_bc,resolution,Rivers_option):
"""
Creates the cost matrix in €/km by finding the average weight between
two points and then multiplying by the distance and the line base cost.
:param gdf: Geodataframe being analyzed
:param dist_3d_matrix: 3D distance matrix of all points [meters]
:param line_bc: line base cost for line deployment [€/km]
:return value: Cost matrix of all the points present in the gdf [€]
"""
# Altitude distance in meters
weight = gdf['Weight'].values
n = gdf['X'].size
weight_columns = np.repeat(weight[:, np.newaxis], n, 1)
weight_rows = np.repeat(weight[np.newaxis, :], n, 0)
if Rivers_option:
river_inters =river_intersection(gdf,resolution)
total_weight = (weight_columns + weight_rows) / 2 + river_inters
else:
total_weight = (weight_columns + weight_rows) / 2
# 3D distance
value = (dist_3d_matrix * total_weight) * line_bc / 1000
return value
def line_to_points(line, df):
"""
Finds all the points of a linestring geodataframe correspondent to a
point geodataframe.
:param line: Linestring geodataframe being analyzed
:param df: Point geodataframe where the linestring nodes will be referenced
:return nodes_in_df: Point geodataframe containing all nodes of linestring
"""
nodes = list(line.ID1.astype(int)) + list(line.ID2.astype(int))
nodes = list(dict.fromkeys(nodes))
nodes_in_df = gpd.GeoDataFrame(crs=df.crs, geometry=[])
for i in nodes:
nodes_in_df = nodes_in_df.append(df[df['ID'] == i], sort=False)
nodes_in_df.reset_index(drop=True, inplace=True)
return nodes_in_df
def create_roads(gdf_roads, geo_df):
'''
Creates two geodataframes
:param gdf_roads: geodataframe with all roads in the area, as imported from OSM
:param geo_df: geodataframe with the grid of points
:return:line_gdf: point geodataframe containing verteces of the roads (in all the area)
segments: geodataframe containing all the roads segments (in all the area)
the GeoDataframes are also saves as shapefiles
'''
#w = geo_df.shape[0]
if not geo_df.empty: #in case we are just processing the roads without pre-existing grid of points
w = int(geo_df['ID'].max())+1 # this is because not all road points are included in the weighted grid of points. Basically,
#it could be 10500,10501 and then 10504. df.shape[0] will give us a bad starting point in this case-> we want to start from 10505
else:
w=0
line_vertices = pd.DataFrame(
index=pd.Series(range(w, w + len(gdf_roads.index))),
columns=['ID', 'X', 'Y', 'ID_line', 'Weight', 'Elevation'], dtype=int)
# create geodataframe with all the segments that compose the road
segments = gpd.GeoDataFrame(columns=['geometry', 'ID1', 'ID2'])
k = 0
gdf_roads.reset_index(inplace=True)
x = 0
for i, row in gdf_roads.iterrows():
for j in list(row['geometry'].coords):
line_vertices.loc[w, 'X'] = j[0]
line_vertices.loc[w, 'Y'] = j[1]
line_vertices.loc[w, 'ID_line'] = k
line_vertices.loc[w, 'ID'] = w
line_vertices.loc[w, 'Weight'] = 1
w = w + 1
k = k + 1
points_to_split = MultiPoint(
[Point(x, y) for x, y in row['geometry'].coords[1:]])
splitted = split(row['geometry'], points_to_split)
for j in splitted:
segments.loc[x, 'geometry'] = j
segments.loc[x, 'length'] = segments.loc[
x, 'geometry'].length / 1000
segments.loc[x, 'ID1'] = line_vertices[
(line_vertices['X'] == j.coords[0][0]) & (
line_vertices['Y'] == j.coords[0][1])][
'ID'].values[0]
segments.loc[x, 'ID2'] = line_vertices[
(line_vertices['X'] == j.coords[1][0]) & (
line_vertices['Y'] == j.coords[1][1])][
'ID'].values[0]
x = x + 1
print('\r' + str(i) + '/' + str(gdf_roads.index.__len__()),
sep=' ', end='', flush=True)
if not geo_df.empty:
line_vertices.loc[:, 'Elevation'] = geo_df[geo_df['Elevation']>0].Elevation.mean()
else:
line_vertices.loc[:, 'Elevation']=1000
# line_vertices.loc[:, 'Elevation'] = 300
geometry = [Point(xy) for xy in
zip(line_vertices['X'], line_vertices['Y'])]
line_gdf = gpd.GeoDataFrame(line_vertices, crs=geo_df.crs,
geometry=geometry)
#line_gdf.to_file('Output/Datasets/Roads/gdf_roads.shp')
#segments.to_file('Output/Datasets/Roads/roads_segments.shp')
#segments.crs=22287
# line_gdf.to_file('Testing_strategy/Points.shp')
# segments.to_file('Testing_strategy/lines.shp')
return line_gdf, segments
def create_roads2(gdf_roads, geo_df,crs):
'''
Creates two geodataframes
:param gdf_roads: geodataframe with all roads in the area, as imported from OSM
:param geo_df: geodataframe with the grid of points
:return:line_gdf: point geodataframe containing verteces of the roads (in all the area)
segments: geodataframe containing all the roads segments (in all the area)
the GeoDataframes are also saves as shapefiles
'''
#w = geo_df.shape[0]
if not geo_df.empty: #in case we are just processing the roads without pre-existing grid of points
w = int(geo_df['ID'].max())+1 # this is because not all road points are included in the weighted grid of points. Basically,
#it could be 10500,10501 and then 10504. df.shape[0] will give us a bad starting point in this case-> we want to start from 10505
else:
w=0
line_vertices = pd.DataFrame(
index=pd.Series(range(w, w + len(gdf_roads.index))),
columns=['ID', 'X', 'Y', 'ID_line', 'Weight', 'Elevation'], dtype=int)
# create geodataframe with all the segments that compose the road
segments = gpd.GeoDataFrame(columns=['geometry', 'ID1', 'ID2'])
k = 0
gdf_roads.reset_index(inplace=True)
x = 0
for i, row in gdf_roads.iterrows():
for j in list(row['geometry'].coords):
if not (j[0] in line_vertices['X'].to_list() and j[1] in line_vertices['Y'].to_list()):
line_vertices.loc[w, 'X'] = j[0]
line_vertices.loc[w, 'Y'] = j[1]
line_vertices.loc[w, 'ID_line'] = k
line_vertices.loc[w, 'ID'] = w
line_vertices.loc[w, 'Weight'] = 1
w = w + 1
else:
pass
#print('Double road point!')
k = k + 1
points_to_split = MultiPoint(
[Point(x, y) for x, y in row['geometry'].coords[1:]])
splitted = split(row['geometry'], points_to_split)
for j in splitted:
segments.loc[x, 'geometry'] = j
segments.loc[x, 'length'] = segments.loc[
x, 'geometry'].length / 1000
segments.loc[x, 'ID1'] = line_vertices[
(line_vertices['X'] == j.coords[0][0]) & (
line_vertices['Y'] == j.coords[0][1])][
'ID'].values[0]
segments.loc[x, 'ID2'] = line_vertices[
(line_vertices['X'] == j.coords[1][0]) & (
line_vertices['Y'] == j.coords[1][1])][
'ID'].values[0]
x = x + 1
print('\r' + str(i) + '/' + str(gdf_roads.index.__len__()),
sep=' ', end='', flush=True)
if not geo_df.empty:
line_vertices.loc[:, 'Elevation'] = geo_df[geo_df['Elevation']>0].Elevation.mean()
else:
line_vertices.loc[:, 'Elevation']=1000
# line_vertices.loc[:, 'Elevation'] = 300
geometry = [Point(xy) for xy in
zip(line_vertices['X'], line_vertices['Y'])]
line_gdf = gpd.GeoDataFrame(line_vertices, crs=crs,
geometry=geometry)
#line_gdf.to_file('Output/Datasets/Roads/gdf_roads.shp')
#segments.to_file('Output/Datasets/Roads/roads_segments.shp')
#segments.crs=22287
# line_gdf.to_file('Testing_strategy/Points.shp')
# segments.to_file('Testing_strategy/lines.shp')
return line_gdf, segments
def create_box(limits, df):
"""
Creates a delimiting box around a geodataframe.
:param limits: Linestring geodataframe being analyzed
:param df: Point geodataframe to be delimited
:return df_box: All points of df that are inside the delimited box
"""
x_min = min(limits.X)
x_max = max(limits.X)
y_min = min(limits.Y)
y_max = max(limits.Y)
dist = Point(x_min, y_min).distance(Point(x_max, y_max))
if dist < 5000:
extension = dist
elif dist < 15000:
extension = dist * 0.6
else:
extension = dist / 4
bubble = box(minx=x_min - extension, maxx=x_max + extension,
miny=y_min - extension, maxy=y_max + extension)
df_box = df[df.within(bubble)]
df_box.index = pd.Series(range(0, len(df_box.index)))
return df_box
def create_box(limits, df,resolution):
"""
Creates a delimiting box around a geodataframe.
:param limits: Linestring geodataframe being analyzed
:param df: Point geodataframe to be delimited
:return df_box: All points of df that are inside the delimited box
"""
x_min = min(limits.X)
x_max = max(limits.X)
y_min = min(limits.Y)
y_max = max(limits.Y)
dist = Point(x_min, y_min).distance(Point(x_max, y_max))
if dist < 5*resolution:
extension = dist
elif dist < 15*resolution:
extension = dist * 0.6
else:
extension = dist / 4
bubble = box(minx=x_min - extension, maxx=x_max + extension,
miny=y_min - extension, maxy=y_max + extension)
df_box = df[df.within(bubble)]
df_box.index = pd.Series(range(0, len(df_box.index)))
return df_box
def edges_to_line(path, df, edges_matrix):
"""
Transforms a list of NetworkX graph edges into a linestring geodataframe
based on a input point geodataframe
:param path: NetworkX graph edges sequence
:param df: Point geodataframe to be used as reference
:param edges_matrix: Matrix containing the cost to connect a pair of points
:return line: Linestring geodataframe containing point IDs and its cost
:return line_points: All points of df that are part of the linestring
"""
steps = len(path)
line = gpd.GeoDataFrame(index=range(0, steps),
columns=['ID1', 'ID2', 'Cost', 'geometry'],
crs=df.crs)
line_points = []
for h in range(0, steps):
line.at[h, 'geometry'] = LineString(
[(df.loc[path[h][0], 'X'],
df.loc[path[h][0], 'Y']),
(df.loc[path[h][1], 'X'],
df.loc[path[h][1], 'Y'])])
# int here is necessary to use the command .to_file
line.at[h, 'ID1'] = int(df.loc[path[h][0], 'ID'])
line.at[h, 'ID2'] = int(df.loc[path[h][1], 'ID'])
line.at[h, 'Cost'] = int(edges_matrix.loc[df.loc[path[h][0], 'ID'],
df.loc[path[h][1], 'ID']])
line_points.append(list(df.loc[path[h], 'ID']))
line.drop(line[line['Cost'] == 0].index, inplace=True)
line.Cost = line.Cost.astype(int)
return line, line_points
def load(clusters_list, grid_lifetime, input_profile,gisele_folder, case_study):
"""
Reads the input daily load profile from the input csv. Reads the number of
years of the project and the demand growth from the data.dat file of
Micehele. Then it multiplies the load profile by the Clusters' peak load
and append values to create yearly profile composed of 12 representative
days.
:param grid_lifetime: Number of years the grid will operate
:param clusters_list: List of clusters ID numbers
:return load_profile: Cluster load profile for the whole period
:return years: Number of years the microgrid will operate
:return total_energy: Energy provided by the grid in its lifetime [kWh]
"""
l()
print("5. Microgrid Sizing")
l()
case_folder = gisele_folder + '/Case studies/' + case_study
data_michele = pd.read_table(gisele_folder+"/gisele/michele/Inputs/data.dat", sep="=",
header=None)
print("Creating load profile for each cluster..")
daily_profile = pd.DataFrame(index=range(1, 25),
columns=clusters_list.Cluster)
for column in daily_profile:
daily_profile.loc[:, column] = \
(input_profile.loc[:, 'Hourly Factor']
* float(clusters_list.loc[clusters_list['Cluster']==column, 'Load [kW]'])).values
rep_days = int(data_michele.loc[0, 1].split(';')[0])
grid_energy = daily_profile.append([daily_profile] * 364,
ignore_index=True)
# append 11 times since we are using 12 representative days in a year
load_profile = daily_profile.append([daily_profile] * (rep_days - 1),
ignore_index=True)
years = int(data_michele.loc[1, 1].split(';')[0])
demand_growth = float(data_michele.loc[87, 1].split(';')[0])
daily_profile_new = daily_profile
# appending for all the years considering demand growth
for i in range(grid_lifetime - 1):
daily_profile_new = daily_profile_new.multiply(1 + demand_growth)
if i < (years - 1):
load_profile = load_profile.append([daily_profile_new] * rep_days,
ignore_index=True)
grid_energy = grid_energy.append([daily_profile_new] * 365,
ignore_index=True)
total_energy = pd.DataFrame(index=clusters_list.Cluster,
columns=['Energy'])
for cluster in clusters_list.Cluster:
total_energy.loc[cluster, 'Energy'] = \
grid_energy.loc[:, cluster].sum().round(2)
print("Load profile created")
total_energy.to_csv(case_folder +'/Intermediate/Microgrid/Grid_energy.csv')
return load_profile, years, total_energy
def shift_timezone(df, shift):
"""
Move the values of a dataframe with DateTimeIndex to another UTC zone,
adding or removing hours.
:param df: Dataframe to be analyzed
:param shift: Amount of hours to be shifted
:return df: Input dataframe with values shifted in time
"""
if shift > 0:
add_hours = df.tail(shift)
df = | pd.concat([add_hours, df], ignore_index=True) | pandas.concat |
"""
Generate all plots for the pipeline. For biotype specific plots, all plots are generated as a multi page PDF. There
is a plot for each biotype on its own, and one for the combined results.
"""
import json
import matplotlib
import logging
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.use('Agg')
import itertools
import warnings
from collections import OrderedDict
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('ticks')
import numpy as np
import pandas as pd
import tools.psl
import tools.sqlInterface
import tools.nameConversions
logger = logging.getLogger(__name__)
# suppress all warnings to make logging cleaner. The only warnings should be the chained assignment warning from pandas
# as well as the bottom == top when plots have no data.
warnings.filterwarnings('ignore')
bar_width = 0.45
boxplot_saturation = 0.7
def generate_plots(args):
"""
Generates the plots.
:param args:
:return:
"""
tm_data = OrderedDict([[genome, json.load(open(tgt))] for genome, tgt in args.tm_jsons.iteritems()])
consensus_data = OrderedDict([[genome, json.load(open(tgt))] for genome, tgt in args.metrics_jsons.iteritems()])
tm_metrics = load_tm_metrics(args.dbs)
transcript_biotype_map = tools.sqlInterface.get_transcript_biotype_map(args.annotation_db)
gene_biotype_map = tools.sqlInterface.get_gene_biotype_map(args.annotation_db)
biotypes = sorted(tools.sqlInterface.get_transcript_biotypes(args.annotation_db))
args.ordered_genomes = list(args.ordered_genomes) # weird bug in pandas
# hack to bring coding to the top
try:
biotypes.insert(0, biotypes.pop(biotypes.index('protein_coding')))
except ValueError:
pass
tx_modes_plot(consensus_data, args.ordered_genomes, args.tx_modes)
tm_metrics_plot(tm_metrics, args.ordered_genomes, biotypes, transcript_biotype_map, args.tm_coverage,
args.tm_identity)
tm_para_plot(tm_data, args.ordered_genomes, biotypes, args.paralogy, args.unfiltered_paralogy)
tm_gene_family_plot(tm_data, args.ordered_genomes, biotypes, args.gene_collapse)
consensus_metrics_plot(consensus_data, args.ordered_genomes, biotypes, args.coverage, args.identity)
missing_rate_plot(consensus_data, args.ordered_genomes, biotypes, args.missing)
consensus_support_plot(consensus_data, args.ordered_genomes, biotypes,
modes=['Splice Annotation Support', 'Exon Annotation Support', 'Original Introns'],
title='Reference annotation support',
tgt=args.consensus_annot_support)
consensus_support_plot(consensus_data, args.ordered_genomes, biotypes,
modes=['Splice Support', 'Exon Support'],
title='Extrinsic support',
tgt=args.consensus_extrinsic_support)
completeness_plot(consensus_data, args.ordered_genomes, biotypes, args.completeness, gene_biotype_map,
transcript_biotype_map)
indel_plot(consensus_data, args.ordered_genomes, args.indel)
if 'denovo' in args:
denovo_plot(consensus_data, args.ordered_genomes, args.denovo)
if 'split_genes' in args:
split_genes_plot(tm_data, args.ordered_genomes, args.split_genes)
if 'pb_support' in args:
pb_support_plot(consensus_data, args.ordered_genomes, args.pb_genomes, args.pb_support)
if 'improvement' in args:
improvement_plot(consensus_data, args.ordered_genomes, args.improvement)
###
# Load metrics from transMap PSLs
###
def load_tm_metrics(dbs):
"""Loads transMap data from PSLs"""
tm_metrics = {'transMap Coverage': OrderedDict(), 'transMap Identity': OrderedDict()}
tm_name_map = {'TransMapCoverage': 'transMap Coverage', 'TransMapIdentity': 'transMap Identity'}
for genome, db_path in dbs.iteritems():
session = tools.sqlInterface.start_session(db_path)
table = tools.sqlInterface.TmEval
for classifier in ['TransMapCoverage', 'TransMapIdentity']:
query = session.query(table.AlignmentId, table.value).filter(table.classifier == classifier)
tm_metrics[tm_name_map[classifier]][genome] = dict(query.all())
return tm_metrics
###
# Plots
###
def tm_metrics_plot(tm_metrics, ordered_genomes, biotypes, transcript_biotype_map, tm_coverage_tgt, tm_identity_tgt):
"""plots for transMap coverage, identity"""
tm_iter = zip(*[['transMap Coverage', 'transMap Identity'],
[tm_coverage_tgt, tm_identity_tgt]])
for mode, tgt in tm_iter:
df = dict_to_df_with_biotype(tm_metrics[mode], transcript_biotype_map)
df = pd.melt(df, id_vars='biotype', value_vars=ordered_genomes).dropna()
df.columns = ['biotype', 'genome', mode]
cov_ident_plot(biotypes, ordered_genomes, mode, tgt, df, x=mode, y='genome')
def consensus_metrics_plot(consensus_data, ordered_genomes, biotypes, coverage_tgt, identity_tgt):
"""plots for consensus coverage, identity, score"""
cons_iter = zip(*[['Coverage', 'Identity'],
[coverage_tgt, identity_tgt]])
for mode, tgt in cons_iter:
df = json_to_df_with_biotype(consensus_data, mode)
cov_ident_plot(biotypes, ordered_genomes, mode, tgt, df, x=mode, y='genome')
def consensus_support_plot(consensus_data, ordered_genomes, biotypes, modes, title, tgt):
"""grouped violin plots of original intron / intron annotation / exon annotation support"""
def adjust_plot(g, this_title):
g.set_xticklabels(rotation=90)
g.fig.suptitle(this_title)
g.fig.subplots_adjust(top=0.9)
for ax in g.axes.flat:
ax.set_ylabel('Percent supported')
ax.set_ylim(-1, 101)
dfs = []
for i, mode in enumerate(modes):
df = json_to_df_with_biotype(consensus_data, mode)
if i > 0:
df = df[mode]
dfs.append(df)
df = pd.concat(dfs, axis=1)
df = pd.melt(df, value_vars=modes, id_vars=['genome', 'biotype'])
with tgt.open('w') as outf, PdfPages(outf) as pdf:
if len(ordered_genomes) > 1:
g = sns.factorplot(data=df, y='value', x='genome', col='variable', col_wrap=2, kind='violin', sharex=True,
sharey=True, row_order=ordered_genomes, cut=0)
else:
g = sns.factorplot(data=df, y='value', x='variable', kind='violin', sharex=True,
sharey=True, row_order=ordered_genomes, cut=0)
adjust_plot(g, title)
multipage_close(pdf, tight_layout=False)
title += ' for {}'
for biotype in biotypes:
this_title = title.format(biotype)
biotype_df = biotype_filter(df, biotype)
if biotype_df is not None:
if len(ordered_genomes) > 1:
g = sns.factorplot(data=biotype_df, y='value', x='genome', col='variable', col_wrap=2,
kind='violin', sharex=True, sharey=True, row_order=ordered_genomes, cut=0)
else:
g = sns.factorplot(data=df, y='value', x='variable', kind='violin', sharex=True,
sharey=True, row_order=ordered_genomes, cut=0)
adjust_plot(g, this_title)
multipage_close(pdf, tight_layout=False)
def tm_para_plot(tm_data, ordered_genomes, biotypes, para_tgt, unfiltered_para_tgt):
"""transMap paralogy plots"""
for key, tgt in [['Paralogy', para_tgt], ['UnfilteredParalogy', unfiltered_para_tgt]]:
legend_labels = ['= 1', '= 2', '= 3', u'\u2265 4']
title_string = 'Proportion of transcripts that have multiple alignments'
biotype_title_string = 'Proportion of {} transcripts that have multiple alignments'
df = json_biotype_nested_counter_to_df(tm_data, key)
# we want a dataframe where each row is the counts, in genome order
# we construct the transpose first
r = []
df[key] = pd.to_numeric(df[key])
# make sure genomes are in order
df['genome'] = pd.Categorical(df['genome'], ordered_genomes, ordered=True)
df = df.sort_values('genome')
for biotype, biotype_df in df.groupby('biotype'):
for genome, genome_df in biotype_df.groupby('genome'):
high_para = genome_df[genome_df[key] >= 4]['count'].sum()
counts = dict(zip(genome_df[key], genome_df['count']))
r.append([biotype, genome, counts.get(1, 0), counts.get(2, 0), counts.get(3, 0), high_para])
df = pd.DataFrame(r, columns=['biotype', 'genome', '1', '2', '3', u'\u2265 4'])
sum_df = df.groupby('genome', sort=False).aggregate(sum).T
plot_fn = generic_unstacked_barplot if len(df.columns) <= 5 else generic_stacked_barplot
box_label = 'Number of\nalignments'
with tgt.open('w') as outf, PdfPages(outf) as pdf:
plot_fn(sum_df, pdf, title_string, legend_labels, 'Number of transcripts', ordered_genomes, box_label)
for biotype in biotypes:
biotype_df = biotype_filter(df, biotype)
if biotype_df is not None:
biotype_df = biotype_df.drop(['genome', 'biotype'], axis=1).T
title_string = biotype_title_string.format(biotype)
plot_fn(biotype_df, pdf, title_string, legend_labels, 'Number of transcripts', ordered_genomes,
box_label)
def tm_gene_family_plot(tm_data, ordered_genomes, biotypes, gene_family_tgt):
"""transMap gene family collapse plots."""
try:
df = json_biotype_nested_counter_to_df(tm_data, 'Gene Family Collapse')
except ValueError: # no gene family collapse. probably the test set.
with gene_family_tgt.open('w') as outf:
pass
return
df['Gene Family Collapse'] = pd.to_numeric(df['Gene Family Collapse'])
tot_df = df[['Gene Family Collapse', 'genome', 'count']].\
groupby(['genome', 'Gene Family Collapse']).aggregate(sum).reset_index()
tot_df = tot_df.sort_values('Gene Family Collapse')
with gene_family_tgt.open('w') as outf, PdfPages(outf) as pdf:
g = sns.factorplot(y='count', col='genome', x='Gene Family Collapse', data=tot_df, kind='bar',
col_order=ordered_genomes, col_wrap=4)
g.fig.suptitle('Number of genes collapsed during gene family collapse')
g.set_xlabels('Number of genes collapsed to one locus')
g.set_ylabels('Number of genes')
g.fig.subplots_adjust(top=0.9)
multipage_close(pdf, tight_layout=False)
for biotype in biotypes:
biotype_df = biotype_filter(df, biotype)
if biotype_df is None:
continue
biotype_df = biotype_df.sort_values('Gene Family Collapse')
g = sns.factorplot(y='count', col='genome', x='Gene Family Collapse', data=biotype_df, kind='bar',
col_order=[x for x in ordered_genomes if x in set(biotype_df.genome)], col_wrap=4)
g.fig.suptitle('Number of genes collapsed during gene family collapse for {}'.format(biotype))
g.set_xlabels('Number of genes collapsed to one locus')
g.set_ylabels('Number of genes')
g.fig.subplots_adjust(top=0.9)
multipage_close(pdf, tight_layout=False)
def missing_rate_plot(consensus_data, ordered_genomes, biotypes, missing_plot_tgt):
"""Missing genes/transcripts"""
base_title = 'Number of missing orthologs in consensus set'
gene_missing_df = json_biotype_counter_to_df(consensus_data, 'Gene Missing')
gene_missing_df.columns = ['biotype', 'Genes', 'genome']
transcript_missing_df = json_biotype_counter_to_df(consensus_data, 'Transcript Missing')
transcript_missing_df.columns = ['biotype', 'Transcripts', 'genome']
df = transcript_missing_df.merge(gene_missing_df, on=['genome', 'biotype'])
df = pd.melt(df, id_vars=['biotype', 'genome'])
ylabel = 'Number of genes or transcripts'
with missing_plot_tgt.open('w') as outf, PdfPages(outf) as pdf:
tot_df = df.groupby(['genome', 'biotype', 'variable']).aggregate(sum).reset_index()
generic_barplot(tot_df, pdf, '', ylabel, base_title, x='genome', y='value',
col='variable', row_order=ordered_genomes)
for biotype in biotypes:
biotype_df = biotype_filter(df, biotype)
if biotype_df is None:
continue
biotype_df = biotype_df.groupby(['genome', 'variable']).aggregate(sum).reset_index()
title = base_title + ' for biotype {}'.format(biotype)
generic_barplot(biotype_df, pdf, '', ylabel, title, x='genome', y='value',
col='variable', row_order=ordered_genomes)
def tx_modes_plot(consensus_data, ordered_genomes, tx_mode_plot_tgt):
ordered_groups = ['transMap', 'transMap+TM', 'transMap+TMR', 'transMap+TM+TMR', 'TM', 'TMR', 'TM+TMR', 'CGP', 'PB',
'Other']
ordered_groups = OrderedDict([[frozenset(x.split('+')), x] for x in ordered_groups])
def split_fn(s):
return ordered_groups.get(frozenset(s['Transcript Modes'].replace('aug', '').split(',')), 'Other')
modes_df = json_biotype_counter_to_df(consensus_data, 'Transcript Modes')
df = modes_df.pivot(index='genome', columns='Transcript Modes').transpose().reset_index()
df['Modes'] = df.apply(split_fn, axis=1)
df = df[['Modes'] + ordered_genomes]
ordered_values = [x for x in ordered_groups.itervalues() if x in set(df['Modes'])]
with tx_mode_plot_tgt.open('w') as outf, PdfPages(outf) as pdf:
title_string = 'Transcript modes in protein coding consensus gene set'
ylabel = 'Number of transcripts'
if len(ordered_genomes) > 1:
df['Ordered Modes'] = pd.Categorical(df['Modes'], ordered_values, ordered=True)
df = df.sort_values('Ordered Modes')
df = df[['Ordered Modes'] + ordered_genomes].set_index('Ordered Modes')
df = df.fillna(0)
generic_stacked_barplot(df, pdf, title_string, df.index, ylabel, ordered_genomes, 'Transcript mode(s)',
bbox_to_anchor=(1.25, 0.7))
else:
generic_barplot(pd.melt(df, id_vars='Modes'), pdf, 'Transcript mode(s)', ylabel, title_string, x='Modes',
y='value', order=ordered_values)
def denovo_plot(consensus_data, ordered_genomes, denovo_tgt):
with denovo_tgt.open('w') as outf, PdfPages(outf) as pdf:
try:
df = json_biotype_nested_counter_to_df(consensus_data, 'denovo')
except ValueError:
# No de novo results. Probably the test set.
return
# fix column names because json_biotype_nested_counter_to_df makes assumptions
df.columns = ['Result', 'Number of transcripts', 'Augustus mode', 'genome']
has_pb = len(set(df['Augustus mode'])) == 2
if len(set(df.genome)) > 1: # if we ran in PB only, we may not have multiple genomes
if has_pb is True:
ax = sns.factorplot(data=df, x='genome', y='Number of transcripts', kind='bar', col='Result',
hue='Augustus mode', col_wrap=2, row_order=ordered_genomes, sharex=True,
sharey=False)
else:
ax = sns.factorplot(data=df, x='genome', y='Number of transcripts', kind='bar', col='Result',
col_wrap=2, row_order=ordered_genomes, sharex=True, sharey=False)
else:
if has_pb is True:
ax = sns.factorplot(data=df, x='Result', y='Number of transcripts', kind='bar', hue='Augustus mode')
else:
ax = sns.factorplot(data=df, x='Result', y='Number of transcripts', kind='bar')
ax.set_xticklabels(rotation=90)
ax.fig.suptitle('Incorporation of de-novo predictions')
ax.fig.subplots_adjust(top=0.9)
multipage_close(pdf, tight_layout=False)
def split_genes_plot(tm_data, ordered_genomes, split_plot_tgt):
with split_plot_tgt.open('w') as outf, PdfPages(outf) as pdf:
df = json_biotype_counter_to_df(tm_data, 'Split Genes')
df.columns = ['category', 'count', 'genome']
title = 'Split genes'
if len(ordered_genomes) > 1:
g = generic_barplot(pdf=pdf, data=df, x='genome', y='count', col='category', xlabel='', col_wrap=2,
sharey=False, ylabel='Number of transcripts or genes', row_order=ordered_genomes,
title=title)
else:
g = generic_barplot(pdf=pdf, data=df, x='category', y='count', ylabel='Number of transcripts or genes',
title=title, xlabel='Category')
def pb_support_plot(consensus_data, ordered_genomes, pb_genomes, pb_support_tgt):
with pb_support_tgt.open('w') as outf, PdfPages(outf) as pdf:
pb_genomes = [x for x in ordered_genomes if x in pb_genomes] # fix order
df = json_biotype_counter_to_df(consensus_data, 'IsoSeq Transcript Validation')
if len(df) == 0:
# no support information
return
df.columns = ['IsoSeq Transcript Validation', 'Number of transcripts', 'genome']
ax = sns.factorplot(data=df, x='genome', y='Number of transcripts', hue='IsoSeq Transcript Validation',
kind='bar', row_order=pb_genomes)
ax.set_xticklabels(rotation=90)
ax.fig.suptitle('Isoforms validated by at least one IsoSeq read')
multipage_close(pdf, tight_layout=False)
def completeness_plot(consensus_data, ordered_genomes, biotypes, completeness_plot_tgt, gene_biotype_map,
transcript_biotype_map):
def adjust_plot(g, gene_count, tx_count):
for ax, c in zip(*[g.axes[0], [gene_count, tx_count]]):
_ = ax.set_ylim(0, c)
ax.spines['top'].set_edgecolor('#e74c3c')
ax.spines['top'].set_linewidth(2)
ax.spines['top'].set_visible(True)
ax.spines['top'].set_linestyle('dashed')
df = json_grouped_biotype_nested_counter_to_df(consensus_data, 'Completeness')
with completeness_plot_tgt.open('w') as outf, PdfPages(outf) as pdf:
tot_df = df.groupby(by=['genome', 'category']).aggregate(np.sum).reset_index()
tot_df = sort_long_df(tot_df, ordered_genomes)
title = 'Number of comparative genes/transcripts present'
g = generic_barplot(pdf=pdf, data=tot_df, x='genome', y='count', col='category', xlabel='',
sharey=False, ylabel='Number of genes/transcripts', title=title,
col_order=['Gene', 'Transcript'], close=False, palette=choose_palette(ordered_genomes))
adjust_plot(g, len(gene_biotype_map), len(transcript_biotype_map))
multipage_close(pdf, tight_layout=False)
for biotype in biotypes:
biotype_df = biotype_filter(df, biotype)
if biotype_df is not None:
biotype_df = sort_long_df(biotype_df, ordered_genomes)
gene_biotype_count = len({i for i, b in gene_biotype_map.iteritems() if b == biotype})
tx_biotype_count = len({i for i, b in transcript_biotype_map.iteritems() if b == biotype})
title = 'Number of comparative genes/transcripts present for biotype {}'.format(biotype)
g = generic_barplot(pdf=pdf, data=biotype_df, x='genome', y='count', col='category', xlabel='',
sharey=False, ylabel='Number of genes/transcripts',
title=title, col_order=['Gene', 'Transcript'], close=False,
palette=choose_palette(ordered_genomes))
adjust_plot(g, gene_biotype_count, tx_biotype_count)
multipage_close(pdf, tight_layout=False)
def improvement_plot(consensus_data, ordered_genomes, improvement_tgt):
def do_kdeplot(x, y, ax, n_levels=None, bw='scott'):
try:
sns.kdeplot(x, y, ax=ax, cut=0, cmap='Purples_d', shade=True, shade_lowest=False, n_levels=n_levels, bw=bw,
rasterized=True)
except:
logger.warning('Unable to do a KDE fit to AUGUSTUS improvement.')
pass
with improvement_tgt.open('w') as outf, PdfPages(outf) as pdf, sns.axes_style("whitegrid"):
for genome in ordered_genomes:
data = | pd.DataFrame(consensus_data[genome]['Evaluation Improvement']['changes']) | pandas.DataFrame |
##### file path
# input
path_df_D = "tianchi_fresh_comp_train_user.csv"
path_df_part_1 = "df_part_1.csv"
path_df_part_2 = "df_part_2.csv"
path_df_part_3 = "df_part_3.csv"
path_df_part_1_tar = "df_part_1_tar.csv"
path_df_part_2_tar = "df_part_2_tar.csv"
path_df_part_1_uic_label = "df_part_1_uic_label.csv"
path_df_part_2_uic_label = "df_part_2_uic_label.csv"
path_df_part_3_uic = "df_part_3_uic.csv"
# output
path_df_part_1_U = "df_part_1_U.csv"
path_df_part_1_I = "df_part_1_I.csv"
path_df_part_1_C = "df_part_1_C.csv"
path_df_part_1_IC = "df_part_1_IC.csv"
path_df_part_1_UI = "df_part_1_UI.csv"
path_df_part_1_UC = "df_part_1_UC.csv"
path_df_part_2_U = "df_part_2_U.csv"
path_df_part_2_I = "df_part_2_I.csv"
path_df_part_2_C = "df_part_2_C.csv"
path_df_part_2_IC = "df_part_2_IC.csv"
path_df_part_2_UI = "df_part_2_UI.csv"
path_df_part_2_UC = "df_part_2_UC.csv"
path_df_part_3_U = "df_part_3_U.csv"
path_df_part_3_I = "df_part_3_I.csv"
path_df_part_3_C = "df_part_3_C.csv"
path_df_part_3_IC = "df_part_3_IC.csv"
path_df_part_3_UI = "df_part_3_UI.csv"
path_df_part_3_UC = "df_part_3_UC.csv"
import pandas as pd
import numpy as np
##========================================================##
##======================== Part 3 ========================##
##========================================================##
###########################################
'''Step 1.1 feature data set U of df_part_3
(1)
u_b1_count_in_6
u_b2_count_in_6
u_b3_count_in_6
u_b4_count_in_6
u_b_count_in_6
(2)
u_b1_count_in_3
u_b2_count_in_3
u_b3_count_in_3
u_b4_count_in_3
u_b_count_in_3
(2)
u_b1_count_in_1
u_b2_count_in_1
u_b3_count_in_1
u_b4_count_in_1
u_b_count_in_1
(3)
u_b4_rate (in_6)
u_b4_diff_hours (in_6)
'''
# loading data
path_df = open(path_df_part_3, 'r')
try:
df_part_3 = pd.read_csv(path_df, index_col=False, parse_dates=[0])
df_part_3.columns = ['time', 'user_id', 'item_id', 'behavior_type', 'item_category']
finally:
path_df.close()
# u_b_count_in_6
df_part_3['cumcount'] = df_part_3.groupby(['user_id', 'behavior_type']).cumcount()
df_part_3_u_b_count_in_6 = df_part_3.drop_duplicates(['user_id', 'behavior_type'], 'last')[
['user_id', 'behavior_type', 'cumcount']]
df_part_3_u_b_count_in_6 = pd.get_dummies(df_part_3_u_b_count_in_6['behavior_type']).join(
df_part_3_u_b_count_in_6[['user_id', 'cumcount']])
df_part_3_u_b_count_in_6.rename(columns={1: 'behavior_type_1',
2: 'behavior_type_2',
3: 'behavior_type_3',
4: 'behavior_type_4'}, inplace=True)
df_part_3_u_b_count_in_6['u_b1_count_in_6'] = df_part_3_u_b_count_in_6['behavior_type_1'] * (
df_part_3_u_b_count_in_6['cumcount'] + 1)
df_part_3_u_b_count_in_6['u_b2_count_in_6'] = df_part_3_u_b_count_in_6['behavior_type_2'] * (
df_part_3_u_b_count_in_6['cumcount'] + 1)
df_part_3_u_b_count_in_6['u_b3_count_in_6'] = df_part_3_u_b_count_in_6['behavior_type_3'] * (
df_part_3_u_b_count_in_6['cumcount'] + 1)
df_part_3_u_b_count_in_6['u_b4_count_in_6'] = df_part_3_u_b_count_in_6['behavior_type_4'] * (
df_part_3_u_b_count_in_6['cumcount'] + 1)
df_part_3_u_b_count_in_6 = df_part_3_u_b_count_in_6.groupby('user_id').agg({'u_b1_count_in_6': np.sum,
'u_b2_count_in_6': np.sum,
'u_b3_count_in_6': np.sum,
'u_b4_count_in_6': np.sum})
df_part_3_u_b_count_in_6.reset_index(inplace=True)
df_part_3_u_b_count_in_6['u_b_count_in_6'] = df_part_3_u_b_count_in_6[['u_b1_count_in_6',
'u_b2_count_in_6',
'u_b3_count_in_6',
'u_b4_count_in_6']].apply(lambda x: x.sum(),
axis=1)
# u_b_count_in_3
df_part_3_in_3 = df_part_3[df_part_3['time'] >= np.datetime64('2014-12-16')]
df_part_3_in_3['cumcount'] = df_part_3_in_3.groupby(['user_id', 'behavior_type']).cumcount()
df_part_3_u_b_count_in_3 = df_part_3.drop_duplicates(['user_id', 'behavior_type'], 'last')[
['user_id', 'behavior_type', 'cumcount']]
df_part_3_u_b_count_in_3 = pd.get_dummies(df_part_3_u_b_count_in_3['behavior_type']).join(
df_part_3_u_b_count_in_3[['user_id', 'cumcount']])
df_part_3_u_b_count_in_3.rename(columns={1: 'behavior_type_1',
2: 'behavior_type_2',
3: 'behavior_type_3',
4: 'behavior_type_4'}, inplace=True)
df_part_3_u_b_count_in_3['u_b1_count_in_3'] = df_part_3_u_b_count_in_3['behavior_type_1'] * (
df_part_3_u_b_count_in_3['cumcount'] + 1)
df_part_3_u_b_count_in_3['u_b2_count_in_3'] = df_part_3_u_b_count_in_3['behavior_type_2'] * (
df_part_3_u_b_count_in_3['cumcount'] + 1)
df_part_3_u_b_count_in_3['u_b3_count_in_3'] = df_part_3_u_b_count_in_3['behavior_type_3'] * (
df_part_3_u_b_count_in_3['cumcount'] + 1)
df_part_3_u_b_count_in_3['u_b4_count_in_3'] = df_part_3_u_b_count_in_3['behavior_type_4'] * (
df_part_3_u_b_count_in_3['cumcount'] + 1)
df_part_3_u_b_count_in_3 = df_part_3_u_b_count_in_3.groupby('user_id').agg({'u_b1_count_in_3': np.sum,
'u_b2_count_in_3': np.sum,
'u_b3_count_in_3': np.sum,
'u_b4_count_in_3': np.sum})
df_part_3_u_b_count_in_3.reset_index(inplace=True)
df_part_3_u_b_count_in_3['u_b_count_in_3'] = df_part_3_u_b_count_in_3[['u_b1_count_in_3',
'u_b2_count_in_3',
'u_b3_count_in_3',
'u_b4_count_in_3']].apply(lambda x: x.sum(),
axis=1)
# u_b_count_in_1
df_part_3_in_1 = df_part_3[df_part_3['time'] >= np.datetime64('2014-12-18')]
df_part_3_in_1['cumcount'] = df_part_3_in_1.groupby(['user_id', 'behavior_type']).cumcount()
df_part_3_u_b_count_in_1 = df_part_3_in_1.drop_duplicates(['user_id', 'behavior_type'], 'last')[
['user_id', 'behavior_type', 'cumcount']]
df_part_3_u_b_count_in_1 = pd.get_dummies(df_part_3_u_b_count_in_1['behavior_type']).join(
df_part_3_u_b_count_in_1[['user_id', 'cumcount']])
df_part_3_u_b_count_in_1.rename(columns={1: 'behavior_type_1',
2: 'behavior_type_2',
3: 'behavior_type_3',
4: 'behavior_type_4'}, inplace=True)
df_part_3_u_b_count_in_1['u_b1_count_in_1'] = df_part_3_u_b_count_in_1['behavior_type_1'] * (
df_part_3_u_b_count_in_1['cumcount'] + 1)
df_part_3_u_b_count_in_1['u_b2_count_in_1'] = df_part_3_u_b_count_in_1['behavior_type_2'] * (
df_part_3_u_b_count_in_1['cumcount'] + 1)
df_part_3_u_b_count_in_1['u_b3_count_in_1'] = df_part_3_u_b_count_in_1['behavior_type_3'] * (
df_part_3_u_b_count_in_1['cumcount'] + 1)
df_part_3_u_b_count_in_1['u_b4_count_in_1'] = df_part_3_u_b_count_in_1['behavior_type_4'] * (
df_part_3_u_b_count_in_1['cumcount'] + 1)
df_part_3_u_b_count_in_1 = df_part_3_u_b_count_in_1.groupby('user_id').agg({'u_b1_count_in_1': np.sum,
'u_b2_count_in_1': np.sum,
'u_b3_count_in_1': np.sum,
'u_b4_count_in_1': np.sum})
df_part_3_u_b_count_in_1.reset_index(inplace=True)
df_part_3_u_b_count_in_1['u_b_count_in_1'] = df_part_3_u_b_count_in_1[['u_b1_count_in_1',
'u_b2_count_in_1',
'u_b3_count_in_1',
'u_b4_count_in_1']].apply(lambda x: x.sum(),
axis=1)
# merge the result of count_in_6, count_in_3, count_in_1
df_part_3_u_b_count = pd.merge(df_part_3_u_b_count_in_6,
df_part_3_u_b_count_in_3, on=['user_id'], how='left').fillna(0)
df_part_3_u_b_count = pd.merge(df_part_3_u_b_count,
df_part_3_u_b_count_in_1, on=['user_id'], how='left').fillna(0)
df_part_3_u_b_count[['u_b1_count_in_6',
'u_b2_count_in_6',
'u_b3_count_in_6',
'u_b4_count_in_6',
'u_b_count_in_6',
'u_b1_count_in_3',
'u_b2_count_in_3',
'u_b3_count_in_3',
'u_b4_count_in_3',
'u_b_count_in_3',
'u_b1_count_in_1',
'u_b2_count_in_1',
'u_b3_count_in_1',
'u_b4_count_in_1',
'u_b_count_in_1']] = df_part_3_u_b_count[['u_b1_count_in_6',
'u_b2_count_in_6',
'u_b3_count_in_6',
'u_b4_count_in_6',
'u_b_count_in_6',
'u_b1_count_in_3',
'u_b2_count_in_3',
'u_b3_count_in_3',
'u_b4_count_in_3',
'u_b_count_in_3',
'u_b1_count_in_1',
'u_b2_count_in_1',
'u_b3_count_in_1',
'u_b4_count_in_1',
'u_b_count_in_1']].astype(int)
# u_b4_rate
df_part_3_u_b_count['u_b4_rate'] = df_part_3_u_b_count['u_b4_count_in_6'] / df_part_3_u_b_count['u_b_count_in_6']
# u_b4_diff_time
df_part_3 = df_part_3.sort_values(by=['user_id', 'time'])
df_part_3_u_b4_time = df_part_3[df_part_3['behavior_type'] == 4].drop_duplicates(['user_id'], 'first')[
['user_id', 'time']]
df_part_3_u_b4_time.columns = ['user_id', 'b4_first_time']
df_part_3_u_b_time = df_part_3.drop_duplicates(['user_id'], 'first')[['user_id', 'time']]
df_part_3_u_b_time.columns = ['user_id', 'b_first_time']
df_part_3_u_b_b4_time = pd.merge(df_part_3_u_b_time, df_part_3_u_b4_time, on=['user_id'])
df_part_3_u_b_b4_time['u_b4_diff_time'] = df_part_3_u_b_b4_time['b4_first_time'] - df_part_3_u_b_b4_time['b_first_time']
df_part_3_u_b_b4_time = df_part_3_u_b_b4_time[['user_id', 'u_b4_diff_time']]
df_part_3_u_b_b4_time['u_b4_diff_hours'] = df_part_3_u_b_b4_time['u_b4_diff_time'].apply(
lambda x: x.days * 24 + x.seconds // 3600)
# generating feature set U
f_U_part_3 = pd.merge(df_part_3_u_b_count,
df_part_3_u_b_b4_time,
on=['user_id'], how='left')[['user_id',
'u_b1_count_in_6',
'u_b2_count_in_6',
'u_b3_count_in_6',
'u_b4_count_in_6',
'u_b_count_in_6',
'u_b1_count_in_3',
'u_b2_count_in_3',
'u_b3_count_in_3',
'u_b4_count_in_3',
'u_b_count_in_3',
'u_b1_count_in_1',
'u_b2_count_in_1',
'u_b3_count_in_1',
'u_b4_count_in_1',
'u_b_count_in_1',
'u_b4_rate',
'u_b4_diff_hours']]
# write to csv file
f_U_part_3 = f_U_part_3.round({'u_b4_rate': 3})
f_U_part_3.to_csv(path_df_part_3_U, index=False)
###########################################
'''Step 1.2 feature data set I of df_part_3
(1)
i_u_count_in_6
i_u_count_in_3
i_u_count_in_1
(2)
i_b1_count_in_6
i_b2_count_in_6
i_b3_count_in_6
i_b4_count_in_6
i_b_count_in_6
i_b1_count_in_3
i_b2_count_in_3
i_b3_count_in_3
i_b4_count_in_3
i_b_count_in_3
i_b1_count_in_1
i_b2_count_in_1
i_b3_count_in_1
i_b4_count_in_1
i_b_count_in_1
(3)
i_b4_rate (in_6)
i_b4_diff_hours (in_6)
'''
# loading data
path_df = open(path_df_part_3, 'r')
try:
df_part_3 = pd.read_csv(path_df, index_col=False, parse_dates=[0])
df_part_3.columns = ['time', 'user_id', 'item_id', 'behavior_type', 'item_category']
finally:
path_df.close()
# i_u_count_in_6
df_part_3_in_6 = df_part_3.drop_duplicates(['item_id', 'user_id'])
df_part_3_in_6['i_u_count_in_6'] = df_part_3_in_6.groupby('item_id').cumcount() + 1
df_part_3_i_u_count_in_6 = df_part_3_in_6.drop_duplicates(['item_id'], 'last')[['item_id', 'i_u_count_in_6']]
# i_u_count_in_3
df_part_3_in_3 = df_part_3[df_part_3['time'] >= np.datetime64('2014-12-16')].drop_duplicates(['item_id', 'user_id'])
df_part_3_in_3['i_u_count_in_3'] = df_part_3_in_3.groupby('item_id').cumcount() + 1
df_part_3_i_u_count_in_3 = df_part_3_in_3.drop_duplicates(['item_id'], 'last')[['item_id', 'i_u_count_in_3']]
# i_u_count_in_1
df_part_3_in_1 = df_part_3[df_part_3['time'] >= np.datetime64('2014-12-18')].drop_duplicates(['item_id', 'user_id'])
df_part_3_in_1['i_u_count_in_1'] = df_part_3_in_1.groupby('item_id').cumcount() + 1
df_part_3_i_u_count_in_1 = df_part_3_in_1.drop_duplicates(['item_id'], 'last')[['item_id', 'i_u_count_in_1']]
# merge for generation of i_u_count
df_part_3_i_u_count = pd.merge(df_part_3_i_u_count_in_6,
df_part_3_i_u_count_in_3,
on=['item_id'], how='left').fillna(0)
df_part_3_i_u_count = pd.merge(df_part_3_i_u_count,
df_part_3_i_u_count_in_1,
on=['item_id'], how='left').fillna(0)
df_part_3_i_u_count[['i_u_count_in_6',
'i_u_count_in_3',
'i_u_count_in_1']] = df_part_3_i_u_count[['i_u_count_in_6',
'i_u_count_in_3',
'i_u_count_in_1']].astype(int)
# i_b_count_in_6
df_part_3['cumcount'] = df_part_3.groupby(['item_id', 'behavior_type']).cumcount()
df_part_3_i_b_count_in_6 = df_part_3.drop_duplicates(['item_id', 'behavior_type'], 'last')[
['item_id', 'behavior_type', 'cumcount']]
df_part_3_i_b_count_in_6 = pd.get_dummies(df_part_3_i_b_count_in_6['behavior_type']).join(
df_part_3_i_b_count_in_6[['item_id', 'cumcount']])
df_part_3_i_b_count_in_6.rename(columns={1: 'behavior_type_1',
2: 'behavior_type_2',
3: 'behavior_type_3',
4: 'behavior_type_4'}, inplace=True)
df_part_3_i_b_count_in_6['i_b1_count_in_6'] = df_part_3_i_b_count_in_6['behavior_type_1'] * (
df_part_3_i_b_count_in_6['cumcount'] + 1)
df_part_3_i_b_count_in_6['i_b2_count_in_6'] = df_part_3_i_b_count_in_6['behavior_type_2'] * (
df_part_3_i_b_count_in_6['cumcount'] + 1)
df_part_3_i_b_count_in_6['i_b3_count_in_6'] = df_part_3_i_b_count_in_6['behavior_type_3'] * (
df_part_3_i_b_count_in_6['cumcount'] + 1)
df_part_3_i_b_count_in_6['i_b4_count_in_6'] = df_part_3_i_b_count_in_6['behavior_type_4'] * (
df_part_3_i_b_count_in_6['cumcount'] + 1)
df_part_3_i_b_count_in_6 = df_part_3_i_b_count_in_6[['item_id',
'i_b1_count_in_6',
'i_b2_count_in_6',
'i_b3_count_in_6',
'i_b4_count_in_6']]
df_part_3_i_b_count_in_6 = df_part_3_i_b_count_in_6.groupby('item_id').agg({'i_b1_count_in_6': np.sum,
'i_b2_count_in_6': np.sum,
'i_b3_count_in_6': np.sum,
'i_b4_count_in_6': np.sum})
df_part_3_i_b_count_in_6.reset_index(inplace=True)
df_part_3_i_b_count_in_6['i_b_count_in_6'] = df_part_3_i_b_count_in_6['i_b1_count_in_6'] + \
df_part_3_i_b_count_in_6['i_b2_count_in_6'] + \
df_part_3_i_b_count_in_6['i_b3_count_in_6'] + \
df_part_3_i_b_count_in_6['i_b4_count_in_6']
# i_b_count_in_3
df_part_3_in_3 = df_part_3[df_part_3['time'] >= np.datetime64('2014-12-16')]
df_part_3_in_3['cumcount'] = df_part_3_in_3.groupby(['item_id', 'behavior_type']).cumcount()
df_part_3_i_b_count_in_3 = df_part_3.drop_duplicates(['item_id', 'behavior_type'], 'last')[
['item_id', 'behavior_type', 'cumcount']]
df_part_3_i_b_count_in_3 = pd.get_dummies(df_part_3_i_b_count_in_3['behavior_type']).join(
df_part_3_i_b_count_in_3[['item_id', 'cumcount']])
df_part_3_i_b_count_in_3.rename(columns={1: 'behavior_type_1',
2: 'behavior_type_2',
3: 'behavior_type_3',
4: 'behavior_type_4'}, inplace=True)
df_part_3_i_b_count_in_3['i_b1_count_in_3'] = df_part_3_i_b_count_in_3['behavior_type_1'] * (
df_part_3_i_b_count_in_3['cumcount'] + 1)
df_part_3_i_b_count_in_3['i_b2_count_in_3'] = df_part_3_i_b_count_in_3['behavior_type_2'] * (
df_part_3_i_b_count_in_3['cumcount'] + 1)
df_part_3_i_b_count_in_3['i_b3_count_in_3'] = df_part_3_i_b_count_in_3['behavior_type_3'] * (
df_part_3_i_b_count_in_3['cumcount'] + 1)
df_part_3_i_b_count_in_3['i_b4_count_in_3'] = df_part_3_i_b_count_in_3['behavior_type_4'] * (
df_part_3_i_b_count_in_3['cumcount'] + 1)
df_part_3_i_b_count_in_3 = df_part_3_i_b_count_in_3[['item_id',
'i_b1_count_in_3',
'i_b2_count_in_3',
'i_b3_count_in_3',
'i_b4_count_in_3']]
df_part_3_i_b_count_in_3 = df_part_3_i_b_count_in_3.groupby('item_id').agg({'i_b1_count_in_3': np.sum,
'i_b2_count_in_3': np.sum,
'i_b3_count_in_3': np.sum,
'i_b4_count_in_3': np.sum})
df_part_3_i_b_count_in_3.reset_index(inplace=True)
df_part_3_i_b_count_in_3['i_b_count_in_3'] = df_part_3_i_b_count_in_3['i_b1_count_in_3'] + \
df_part_3_i_b_count_in_3['i_b2_count_in_3'] + \
df_part_3_i_b_count_in_3['i_b3_count_in_3'] + \
df_part_3_i_b_count_in_3['i_b4_count_in_3']
# i_b_count_in_1
df_part_3_in_1 = df_part_3[df_part_3['time'] >= np.datetime64('2014-12-18')]
df_part_3_in_1['cumcount'] = df_part_3_in_1.groupby(['item_id', 'behavior_type']).cumcount()
df_part_3_i_b_count_in_1 = df_part_3_in_1.drop_duplicates(['item_id', 'behavior_type'], 'last')[
['item_id', 'behavior_type', 'cumcount']]
df_part_3_i_b_count_in_1 = pd.get_dummies(df_part_3_i_b_count_in_1['behavior_type']).join(
df_part_3_i_b_count_in_1[['item_id', 'cumcount']])
df_part_3_i_b_count_in_1.rename(columns={1: 'behavior_type_1',
2: 'behavior_type_2',
3: 'behavior_type_3',
4: 'behavior_type_4'}, inplace=True)
df_part_3_i_b_count_in_1['i_b1_count_in_1'] = df_part_3_i_b_count_in_1['behavior_type_1'] * (
df_part_3_i_b_count_in_1['cumcount'] + 1)
df_part_3_i_b_count_in_1['i_b2_count_in_1'] = df_part_3_i_b_count_in_1['behavior_type_2'] * (
df_part_3_i_b_count_in_1['cumcount'] + 1)
df_part_3_i_b_count_in_1['i_b3_count_in_1'] = df_part_3_i_b_count_in_1['behavior_type_3'] * (
df_part_3_i_b_count_in_1['cumcount'] + 1)
df_part_3_i_b_count_in_1['i_b4_count_in_1'] = df_part_3_i_b_count_in_1['behavior_type_4'] * (
df_part_3_i_b_count_in_1['cumcount'] + 1)
df_part_3_i_b_count_in_1 = df_part_3_i_b_count_in_1[['item_id',
'i_b1_count_in_1',
'i_b2_count_in_1',
'i_b3_count_in_1',
'i_b4_count_in_1']]
df_part_3_i_b_count_in_1 = df_part_3_i_b_count_in_1.groupby('item_id').agg({'i_b1_count_in_1': np.sum,
'i_b2_count_in_1': np.sum,
'i_b3_count_in_1': np.sum,
'i_b4_count_in_1': np.sum})
df_part_3_i_b_count_in_1.reset_index(inplace=True)
df_part_3_i_b_count_in_1['i_b_count_in_1'] = df_part_3_i_b_count_in_1['i_b1_count_in_1'] + \
df_part_3_i_b_count_in_1['i_b2_count_in_1'] + \
df_part_3_i_b_count_in_1['i_b3_count_in_1'] + \
df_part_3_i_b_count_in_1['i_b4_count_in_1']
# merge for generation of i_b_count
df_part_3_i_b_count = pd.merge(df_part_3_i_b_count_in_6,
df_part_3_i_b_count_in_3,
on=['item_id'], how='left').fillna(0)
df_part_3_i_b_count = pd.merge(df_part_3_i_b_count,
df_part_3_i_b_count_in_1,
on=['item_id'], how='left').fillna(0)
df_part_3_i_b_count[['i_b1_count_in_6',
'i_b2_count_in_6',
'i_b3_count_in_6',
'i_b4_count_in_6',
'i_b_count_in_6',
'i_b1_count_in_3',
'i_b2_count_in_3',
'i_b3_count_in_3',
'i_b4_count_in_3',
'i_b_count_in_3',
'i_b1_count_in_1',
'i_b2_count_in_1',
'i_b3_count_in_1',
'i_b4_count_in_1',
'i_b_count_in_1']] = df_part_3_i_b_count[['i_b1_count_in_6',
'i_b2_count_in_6',
'i_b3_count_in_6',
'i_b4_count_in_6',
'i_b_count_in_6',
'i_b1_count_in_3',
'i_b2_count_in_3',
'i_b3_count_in_3',
'i_b4_count_in_3',
'i_b_count_in_3',
'i_b1_count_in_1',
'i_b2_count_in_1',
'i_b3_count_in_1',
'i_b4_count_in_1',
'i_b_count_in_1']].astype(int)
# i_b4_rate
df_part_3_i_b_count['i_b4_rate'] = df_part_3_i_b_count['i_b4_count_in_6'] / df_part_3_i_b_count['i_b_count_in_6']
# i_b4_diff_time
df_part_3 = df_part_3.sort_values(by=['item_id', 'time'])
df_part_3_i_b4_time = df_part_3[df_part_3['behavior_type'] == 4].drop_duplicates(['item_id'], 'first')[
['item_id', 'time']]
df_part_3_i_b4_time.columns = ['item_id', 'b4_first_time']
df_part_3_i_b_time = df_part_3.drop_duplicates(['item_id'], 'first')[['item_id', 'time']]
df_part_3_i_b_time.columns = ['item_id', 'b_first_time']
df_part_3_i_b_b4_time = pd.merge(df_part_3_i_b_time, df_part_3_i_b4_time, on=['item_id'])
df_part_3_i_b_b4_time['i_b4_diff_time'] = df_part_3_i_b_b4_time['b4_first_time'] - df_part_3_i_b_b4_time['b_first_time']
df_part_3_i_b_b4_time['i_b4_diff_hours'] = df_part_3_i_b_b4_time['i_b4_diff_time'].apply(
lambda x: x.days * 24 + x.seconds // 3600)
df_part_3_i_b_b4_time = df_part_3_i_b_b4_time[['item_id', 'i_b4_diff_hours']]
# generating feature set I
f_I_part_3 = pd.merge(df_part_3_i_b_count,
df_part_3_i_b_b4_time,
on=['item_id'], how='left')
f_I_part_3 = pd.merge(f_I_part_3,
df_part_3_i_u_count,
on=['item_id'], how='left')[['item_id',
'i_u_count_in_6',
'i_u_count_in_3',
'i_u_count_in_1',
'i_b1_count_in_6',
'i_b2_count_in_6',
'i_b3_count_in_6',
'i_b4_count_in_6',
'i_b_count_in_6',
'i_b1_count_in_3',
'i_b2_count_in_3',
'i_b3_count_in_3',
'i_b4_count_in_3',
'i_b_count_in_3',
'i_b1_count_in_1',
'i_b2_count_in_1',
'i_b3_count_in_1',
'i_b4_count_in_1',
'i_b_count_in_1',
'i_b4_rate',
'i_b4_diff_hours']]
# write to csv file
f_I_part_3 = f_I_part_3.round({'i_b4_rate': 3})
f_I_part_3.to_csv(path_df_part_3_I, index=False)
###########################################
'''Step 1.3 feature data set C of df_part_3
(1)
c_u_count_in_6
c_u_count_in_3
c_u_count_in_1
(2)
c_b1_count_in_6
c_b2_count_in_6
c_b3_count_in_6
c_b4_count_in_6
c_b_count_in_6
c_b1_count_in_3
c_b2_count_in_3
c_b3_count_in_3
c_b4_count_in_3
c_b_count_in_3
c_b1_count_in_1
c_b2_count_in_1
c_b3_count_in_1
c_b4_count_in_1
c_b_count_in_1
(3)
c_b4_rate (in_6)
c_b4_diff_hours (in_6)
'''
# loading data
path_df = open(path_df_part_3, 'r')
try:
df_part_3 = pd.read_csv(path_df, index_col=False, parse_dates=[0])
df_part_3.columns = ['time', 'user_id', 'item_id', 'behavior_type', 'item_category']
finally:
path_df.close()
# c_u_count_in_6
df_part_3_in_6 = df_part_3.drop_duplicates(['item_category', 'user_id'])
df_part_3_in_6['c_u_count_in_6'] = df_part_3_in_6.groupby('item_category').cumcount() + 1
df_part_3_c_u_count_in_6 = df_part_3_in_6.drop_duplicates(['item_category'], 'last')[
['item_category', 'c_u_count_in_6']]
# c_u_count_in_3
df_part_3_in_3 = df_part_3[df_part_3['time'] >= np.datetime64('2014-12-16')].drop_duplicates(
['item_category', 'user_id'])
df_part_3_in_3['c_u_count_in_3'] = df_part_3_in_3.groupby('item_category').cumcount() + 1
df_part_3_c_u_count_in_3 = df_part_3_in_3.drop_duplicates(['item_category'], 'last')[
['item_category', 'c_u_count_in_3']]
# c_u_count_in_1
df_part_3_in_1 = df_part_3[df_part_3['time'] >= np.datetime64('2014-12-18')].drop_duplicates(
['item_category', 'user_id'])
df_part_3_in_1['c_u_count_in_1'] = df_part_3_in_1.groupby('item_category').cumcount() + 1
df_part_3_c_u_count_in_1 = df_part_3_in_1.drop_duplicates(['item_category'], 'last')[
['item_category', 'c_u_count_in_1']]
df_part_3_c_u_count = pd.merge(df_part_3_c_u_count_in_6, df_part_3_c_u_count_in_3, on=['item_category'],
how='left').fillna(0)
df_part_3_c_u_count = pd.merge(df_part_3_c_u_count, df_part_3_c_u_count_in_1, on=['item_category'], how='left').fillna(
0)
df_part_3_c_u_count[['c_u_count_in_6',
'c_u_count_in_3',
'c_u_count_in_1']] = df_part_3_c_u_count[['c_u_count_in_6',
'c_u_count_in_3',
'c_u_count_in_1']].astype(int)
# c_b_count_in_6
df_part_3['cumcount'] = df_part_3.groupby(['item_category', 'behavior_type']).cumcount()
df_part_3_c_b_count_in_6 = df_part_3.drop_duplicates(['item_category', 'behavior_type'], 'last')[
['item_category', 'behavior_type', 'cumcount']]
df_part_3_c_b_count_in_6 = pd.get_dummies(df_part_3_c_b_count_in_6['behavior_type']).join(
df_part_3_c_b_count_in_6[['item_category', 'cumcount']])
df_part_3_c_b_count_in_6.rename(columns={1: 'behavior_type_1',
2: 'behavior_type_2',
3: 'behavior_type_3',
4: 'behavior_type_4'}, inplace=True)
df_part_3_c_b_count_in_6['c_b1_count_in_6'] = df_part_3_c_b_count_in_6['behavior_type_1'] * (
df_part_3_c_b_count_in_6['cumcount'] + 1)
df_part_3_c_b_count_in_6['c_b2_count_in_6'] = df_part_3_c_b_count_in_6['behavior_type_2'] * (
df_part_3_c_b_count_in_6['cumcount'] + 1)
df_part_3_c_b_count_in_6['c_b3_count_in_6'] = df_part_3_c_b_count_in_6['behavior_type_3'] * (
df_part_3_c_b_count_in_6['cumcount'] + 1)
df_part_3_c_b_count_in_6['c_b4_count_in_6'] = df_part_3_c_b_count_in_6['behavior_type_4'] * (
df_part_3_c_b_count_in_6['cumcount'] + 1)
df_part_3_c_b_count_in_6 = df_part_3_c_b_count_in_6[['item_category',
'c_b1_count_in_6',
'c_b2_count_in_6',
'c_b3_count_in_6',
'c_b4_count_in_6']]
df_part_3_c_b_count_in_6 = df_part_3_c_b_count_in_6.groupby('item_category').agg({'c_b1_count_in_6': np.sum,
'c_b2_count_in_6': np.sum,
'c_b3_count_in_6': np.sum,
'c_b4_count_in_6': np.sum})
df_part_3_c_b_count_in_6.reset_index(inplace=True)
df_part_3_c_b_count_in_6['c_b_count_in_6'] = df_part_3_c_b_count_in_6['c_b1_count_in_6'] + \
df_part_3_c_b_count_in_6['c_b2_count_in_6'] + \
df_part_3_c_b_count_in_6['c_b3_count_in_6'] + \
df_part_3_c_b_count_in_6['c_b4_count_in_6']
# c_b_count_in_3
df_part_3_in_3 = df_part_3[df_part_3['time'] >= np.datetime64('2014-12-16')]
df_part_3_in_3['cumcount'] = df_part_3_in_3.groupby(['item_category', 'behavior_type']).cumcount()
df_part_3_c_b_count_in_3 = df_part_3_in_3.drop_duplicates(['item_category', 'behavior_type'], 'last')[
['item_category', 'behavior_type', 'cumcount']]
df_part_3_c_b_count_in_3 = pd.get_dummies(df_part_3_c_b_count_in_3['behavior_type']).join(
df_part_3_c_b_count_in_3[['item_category', 'cumcount']])
df_part_3_c_b_count_in_3.rename(columns={1: 'behavior_type_1',
2: 'behavior_type_2',
3: 'behavior_type_3',
4: 'behavior_type_4'}, inplace=True)
df_part_3_c_b_count_in_3['c_b1_count_in_3'] = df_part_3_c_b_count_in_3['behavior_type_1'] * (
df_part_3_c_b_count_in_3['cumcount'] + 1)
df_part_3_c_b_count_in_3['c_b2_count_in_3'] = df_part_3_c_b_count_in_3['behavior_type_2'] * (
df_part_3_c_b_count_in_3['cumcount'] + 1)
df_part_3_c_b_count_in_3['c_b3_count_in_3'] = df_part_3_c_b_count_in_3['behavior_type_3'] * (
df_part_3_c_b_count_in_3['cumcount'] + 1)
df_part_3_c_b_count_in_3['c_b4_count_in_3'] = df_part_3_c_b_count_in_3['behavior_type_4'] * (
df_part_3_c_b_count_in_3['cumcount'] + 1)
df_part_3_c_b_count_in_3 = df_part_3_c_b_count_in_3[['item_category',
'c_b1_count_in_3',
'c_b2_count_in_3',
'c_b3_count_in_3',
'c_b4_count_in_3']]
df_part_3_c_b_count_in_3 = df_part_3_c_b_count_in_3.groupby('item_category').agg({'c_b1_count_in_3': np.sum,
'c_b2_count_in_3': np.sum,
'c_b3_count_in_3': np.sum,
'c_b4_count_in_3': np.sum})
df_part_3_c_b_count_in_3.reset_index(inplace=True)
df_part_3_c_b_count_in_3['c_b_count_in_3'] = df_part_3_c_b_count_in_3['c_b1_count_in_3'] + \
df_part_3_c_b_count_in_3['c_b2_count_in_3'] + \
df_part_3_c_b_count_in_3['c_b3_count_in_3'] + \
df_part_3_c_b_count_in_3['c_b4_count_in_3']
# c_b_count_in_1
df_part_3_in_1 = df_part_3[df_part_3['time'] >= np.datetime64('2014-12-18')]
df_part_3_in_1['cumcount'] = df_part_3_in_1.groupby(['item_category', 'behavior_type']).cumcount()
df_part_3_c_b_count_in_1 = df_part_3_in_1.drop_duplicates(['item_category', 'behavior_type'], 'last')[
['item_category', 'behavior_type', 'cumcount']]
df_part_3_c_b_count_in_1 = pd.get_dummies(df_part_3_c_b_count_in_1['behavior_type']).join(
df_part_3_c_b_count_in_1[['item_category', 'cumcount']])
df_part_3_c_b_count_in_1.rename(columns={1: 'behavior_type_1',
2: 'behavior_type_2',
3: 'behavior_type_3',
4: 'behavior_type_4'}, inplace=True)
df_part_3_c_b_count_in_1['c_b1_count_in_1'] = df_part_3_c_b_count_in_1['behavior_type_1'] * (
df_part_3_c_b_count_in_1['cumcount'] + 1)
df_part_3_c_b_count_in_1['c_b2_count_in_1'] = df_part_3_c_b_count_in_1['behavior_type_2'] * (
df_part_3_c_b_count_in_1['cumcount'] + 1)
df_part_3_c_b_count_in_1['c_b3_count_in_1'] = df_part_3_c_b_count_in_1['behavior_type_3'] * (
df_part_3_c_b_count_in_1['cumcount'] + 1)
df_part_3_c_b_count_in_1['c_b4_count_in_1'] = df_part_3_c_b_count_in_1['behavior_type_4'] * (
df_part_3_c_b_count_in_1['cumcount'] + 1)
df_part_3_c_b_count_in_1 = df_part_3_c_b_count_in_1[['item_category',
'c_b1_count_in_1',
'c_b2_count_in_1',
'c_b3_count_in_1',
'c_b4_count_in_1']]
df_part_3_c_b_count_in_1 = df_part_3_c_b_count_in_1.groupby('item_category').agg({'c_b1_count_in_1': np.sum,
'c_b2_count_in_1': np.sum,
'c_b3_count_in_1': np.sum,
'c_b4_count_in_1': np.sum})
df_part_3_c_b_count_in_1.reset_index(inplace=True)
df_part_3_c_b_count_in_1['c_b_count_in_1'] = df_part_3_c_b_count_in_1['c_b1_count_in_1'] + \
df_part_3_c_b_count_in_1['c_b2_count_in_1'] + \
df_part_3_c_b_count_in_1['c_b3_count_in_1'] + \
df_part_3_c_b_count_in_1['c_b4_count_in_1']
df_part_3_c_b_count = pd.merge(df_part_3_c_b_count_in_6, df_part_3_c_b_count_in_3, on=['item_category'],
how='left').fillna(0)
df_part_3_c_b_count = pd.merge(df_part_3_c_b_count, df_part_3_c_b_count_in_1, on=['item_category'], how='left').fillna(
0)
df_part_3_c_b_count[['c_b1_count_in_6',
'c_b2_count_in_6',
'c_b3_count_in_6',
'c_b4_count_in_6',
'c_b_count_in_6',
'c_b1_count_in_3',
'c_b2_count_in_3',
'c_b3_count_in_3',
'c_b4_count_in_3',
'c_b_count_in_3',
'c_b1_count_in_1',
'c_b2_count_in_1',
'c_b3_count_in_1',
'c_b4_count_in_1',
'c_b_count_in_1']] = df_part_3_c_b_count[['c_b1_count_in_6',
'c_b2_count_in_6',
'c_b3_count_in_6',
'c_b4_count_in_6',
'c_b_count_in_6',
'c_b1_count_in_3',
'c_b2_count_in_3',
'c_b3_count_in_3',
'c_b4_count_in_3',
'c_b_count_in_3',
'c_b1_count_in_1',
'c_b2_count_in_1',
'c_b3_count_in_1',
'c_b4_count_in_1',
'c_b_count_in_1']].astype(int)
# c_b4_rate
df_part_3_c_b_count['c_b4_rate'] = df_part_3_c_b_count['c_b4_count_in_6'] / df_part_3_c_b_count['c_b_count_in_6']
# c_b4_diff_time
df_part_3 = df_part_3.sort_values(by=['item_category', 'time'])
df_part_3_c_b4_time = df_part_3[df_part_3['behavior_type'] == 4].drop_duplicates(['item_category'], 'first')[
['item_category', 'time']]
df_part_3_c_b4_time.columns = ['item_category', 'b4_first_time']
df_part_3_c_b_time = df_part_3.drop_duplicates(['item_category'], 'first')[['item_category', 'time']]
df_part_3_c_b_time.columns = ['item_category', 'b_first_time']
df_part_3_c_b_b4_time = pd.merge(df_part_3_c_b_time, df_part_3_c_b4_time, on=['item_category'])
df_part_3_c_b_b4_time['c_b4_diff_time'] = df_part_3_c_b_b4_time['b4_first_time'] - df_part_3_c_b_b4_time['b_first_time']
df_part_3_c_b_b4_time['c_b4_diff_hours'] = df_part_3_c_b_b4_time['c_b4_diff_time'].apply(
lambda x: x.days * 24 + x.seconds // 3600)
df_part_3_c_b_b4_time = df_part_3_c_b_b4_time[['item_category',
'c_b4_diff_hours']]
# generating feature set C
f_C_part_3 = pd.merge(df_part_3_c_u_count, df_part_3_c_b_count, on=['item_category'], how='left')
f_C_part_3 = pd.merge(f_C_part_3, df_part_3_c_b_b4_time, on=['item_category'], how='left')
f_C_part_3 = f_C_part_3.round({'c_b4_rate': 3})
# write to csv file
f_C_part_3.to_csv(path_df_part_3_C, index=False)
############################################
'''Step 1.4 feature data set IC of df_part_3
ic_u_rank_in_c (in_6)
ic_b_rank_in_c (in_6)
ic_b4_rank_in_c (in_6)
'''
# get df_part_3_i_ub_count
path_df = open(path_df_part_3_I, 'r')
try:
df_part_3_I = pd.read_csv(path_df, index_col=False)
finally:
path_df.close()
df_part_3_i_ub_count = df_part_3_I[['item_id', 'i_u_count_in_6', 'i_b_count_in_6', 'i_b4_count_in_6']]
del (df_part_3_I)
# get df_part_3_uic for merge i & c
path_df = open(path_df_part_3_uic, 'r')
try:
df_part_3_uic = pd.read_csv(path_df, index_col=False)
finally:
path_df.close()
df_part_3_ic_u_b_count = pd.merge(df_part_3_uic, df_part_3_i_ub_count, on=['item_id'], how='left').fillna(0)
df_part_3_ic_u_b_count = df_part_3_ic_u_b_count.drop_duplicates(['item_id', 'item_category'])
# ic_u_rank_in_c
df_part_3_ic_u_b_count['ic_u_rank_in_c'] = df_part_3_ic_u_b_count.groupby('item_category')['i_u_count_in_6'].rank(
method='min', ascending=False).astype('int')
# ic_b_rank_in_c
df_part_3_ic_u_b_count['ic_b_rank_in_c'] = df_part_3_ic_u_b_count.groupby('item_category')['i_b_count_in_6'].rank(
method='min', ascending=False).astype('int')
# ic_b4_rank_in_c
df_part_3_ic_u_b_count['ic_b4_rank_in_c'] = df_part_3_ic_u_b_count.groupby('item_category')['i_b4_count_in_6'].rank(
method='min', ascending=False).astype('int')
f_IC_part_3 = df_part_3_ic_u_b_count[['item_id',
'item_category',
'ic_u_rank_in_c',
'ic_b_rank_in_c',
'ic_b4_rank_in_c']]
# write to csv file
f_IC_part_3.to_csv(path_df_part_3_IC, index=False)
############################################
'''Step 1.5 feature data set UI of df_part_3
(1)
ui_b1_count_in_6
ui_b2_count_in_6
ui_b3_count_in_6
ui_b4_count_in_6
ui_b_count_in_6
ui_b1_count_in_3
ui_b2_count_in_3
ui_b3_count_in_3
ui_b4_count_in_3
ui_b_count_in_3
ui_b1_count_in_1
ui_b2_count_in_1
ui_b3_count_in_1
ui_b4_count_in_1
ui_b_count_in_1
(2)
ui_b_count_rank_in_u (in_6)
ui_b_count_rank_in_uc (in_6)
(3)
ui_b1_last_hours (in_6)
ui_b2_last_hours (in_6)
ui_b3_last_hours (in_6)
ui_b4_last_hours (in_6)
'''
path_df = open(path_df_part_3, 'r')
try:
df_part_3 = | pd.read_csv(path_df, index_col=False, parse_dates=[0]) | pandas.read_csv |
"""ClinVar integration script"""
import fire
import fsspec
import pandas as pd
from datetime import datetime
from pathlib import Path
from prefect import task, context, Flow, Parameter, Task
from prefect.engine.results import LocalResult
from data_source.prefect.tasks import constant
from data_source import catalog
from data_source.utils import get_df_info
from data_source.core import entry_key_str
#pylint: disable=no-value-for-parameter
@task(target="{flow_name}/{task_name}", checkpoint=True, result=LocalResult(dir="~/.prefect"))
def download(url, csv_path):
of = fsspec.open(url)
of.fs.download(url, csv_path)
return csv_path
@task
def convert_to_parquet(input_path, output_path):
df = pd.read_csv(input_path, skiprows=15, sep='\t')
info = get_df_info(df)
nrow = len(df)
df.to_parquet(output_path)
return dict(info=info, nrow=nrow)
@task
def upload(entry, parquet_path, url):
entry.fs.upload(parquet_path, url)
return True
@task
def add_entry(entry, info, catalog_path):
entry.artifact.metadata=dict(info=info['info'], nrow=info['nrow'])
catalog.add_entry(entry, urlpath=catalog_path, overwrite=True)
def get_entry(version, created):
dt = | pd.to_datetime(created) | pandas.to_datetime |
from datetime import datetime
import numpy as np
import pandas as pd
import pytest
from numba import njit
import vectorbt as vbt
from tests.utils import record_arrays_close
from vectorbt.generic.enums import range_dt, drawdown_dt
from vectorbt.portfolio.enums import order_dt, trade_dt, log_dt
day_dt = np.timedelta64(86400000000000)
example_dt = np.dtype([
('id', np.int64),
('col', np.int64),
('idx', np.int64),
('some_field1', np.float64),
('some_field2', np.float64)
], align=True)
records_arr = np.asarray([
(0, 0, 0, 10, 21),
(1, 0, 1, 11, 20),
(2, 0, 2, 12, 19),
(3, 1, 0, 13, 18),
(4, 1, 1, 14, 17),
(5, 1, 2, 13, 18),
(6, 2, 0, 12, 19),
(7, 2, 1, 11, 20),
(8, 2, 2, 10, 21)
], dtype=example_dt)
records_nosort_arr = np.concatenate((
records_arr[0::3],
records_arr[1::3],
records_arr[2::3]
))
group_by = pd.Index(['g1', 'g1', 'g2', 'g2'])
wrapper = vbt.ArrayWrapper(
index=['x', 'y', 'z'],
columns=['a', 'b', 'c', 'd'],
ndim=2,
freq='1 days'
)
wrapper_grouped = wrapper.replace(group_by=group_by)
records = vbt.records.Records(wrapper, records_arr)
records_grouped = vbt.records.Records(wrapper_grouped, records_arr)
records_nosort = records.replace(records_arr=records_nosort_arr)
records_nosort_grouped = vbt.records.Records(wrapper_grouped, records_nosort_arr)
# ############# Global ############# #
def setup_module():
vbt.settings.numba['check_func_suffix'] = True
vbt.settings.caching.enabled = False
vbt.settings.caching.whitelist = []
vbt.settings.caching.blacklist = []
def teardown_module():
vbt.settings.reset()
# ############# col_mapper.py ############# #
class TestColumnMapper:
def test_col_arr(self):
np.testing.assert_array_equal(
records['a'].col_mapper.col_arr,
np.array([0, 0, 0])
)
np.testing.assert_array_equal(
records.col_mapper.col_arr,
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
)
def test_get_col_arr(self):
np.testing.assert_array_equal(
records.col_mapper.get_col_arr(),
records.col_mapper.col_arr
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_arr(),
np.array([0, 0, 0, 0, 0, 0])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_arr(),
np.array([0, 0, 0, 0, 0, 0, 1, 1, 1])
)
def test_col_range(self):
np.testing.assert_array_equal(
records['a'].col_mapper.col_range,
np.array([
[0, 3]
])
)
np.testing.assert_array_equal(
records.col_mapper.col_range,
np.array([
[0, 3],
[3, 6],
[6, 9],
[-1, -1]
])
)
def test_get_col_range(self):
np.testing.assert_array_equal(
records.col_mapper.get_col_range(),
np.array([
[0, 3],
[3, 6],
[6, 9],
[-1, -1]
])
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_range(),
np.array([[0, 6]])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_range(),
np.array([[0, 6], [6, 9]])
)
def test_col_map(self):
np.testing.assert_array_equal(
records['a'].col_mapper.col_map[0],
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
records['a'].col_mapper.col_map[1],
np.array([3])
)
np.testing.assert_array_equal(
records.col_mapper.col_map[0],
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
)
np.testing.assert_array_equal(
records.col_mapper.col_map[1],
np.array([3, 3, 3, 0])
)
def test_get_col_map(self):
np.testing.assert_array_equal(
records.col_mapper.get_col_map()[0],
records.col_mapper.col_map[0]
)
np.testing.assert_array_equal(
records.col_mapper.get_col_map()[1],
records.col_mapper.col_map[1]
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_map()[0],
np.array([0, 1, 2, 3, 4, 5])
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_map()[1],
np.array([6])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_map()[0],
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_map()[1],
np.array([6, 3])
)
def test_is_sorted(self):
assert records.col_mapper.is_sorted()
assert not records_nosort.col_mapper.is_sorted()
# ############# mapped_array.py ############# #
mapped_array = records.map_field('some_field1')
mapped_array_grouped = records_grouped.map_field('some_field1')
mapped_array_nosort = records_nosort.map_field('some_field1')
mapped_array_nosort_grouped = records_nosort_grouped.map_field('some_field1')
mapping = {x: 'test_' + str(x) for x in pd.unique(mapped_array.values)}
mp_mapped_array = mapped_array.replace(mapping=mapping)
mp_mapped_array_grouped = mapped_array_grouped.replace(mapping=mapping)
class TestMappedArray:
def test_config(self, tmp_path):
assert vbt.MappedArray.loads(mapped_array.dumps()) == mapped_array
mapped_array.save(tmp_path / 'mapped_array')
assert vbt.MappedArray.load(tmp_path / 'mapped_array') == mapped_array
def test_mapped_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].values,
np.array([10., 11., 12.])
)
np.testing.assert_array_equal(
mapped_array.values,
np.array([10., 11., 12., 13., 14., 13., 12., 11., 10.])
)
def test_id_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].id_arr,
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
mapped_array.id_arr,
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
)
def test_col_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].col_arr,
np.array([0, 0, 0])
)
np.testing.assert_array_equal(
mapped_array.col_arr,
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
)
def test_idx_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].idx_arr,
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
mapped_array.idx_arr,
np.array([0, 1, 2, 0, 1, 2, 0, 1, 2])
)
def test_is_sorted(self):
assert mapped_array.is_sorted()
assert mapped_array.is_sorted(incl_id=True)
assert not mapped_array_nosort.is_sorted()
assert not mapped_array_nosort.is_sorted(incl_id=True)
def test_sort(self):
assert mapped_array.sort().is_sorted()
assert mapped_array.sort().is_sorted(incl_id=True)
assert mapped_array.sort(incl_id=True).is_sorted(incl_id=True)
assert mapped_array_nosort.sort().is_sorted()
assert mapped_array_nosort.sort().is_sorted(incl_id=True)
assert mapped_array_nosort.sort(incl_id=True).is_sorted(incl_id=True)
def test_apply_mask(self):
mask_a = mapped_array['a'].values >= mapped_array['a'].values.mean()
np.testing.assert_array_equal(
mapped_array['a'].apply_mask(mask_a).id_arr,
np.array([1, 2])
)
mask = mapped_array.values >= mapped_array.values.mean()
filtered = mapped_array.apply_mask(mask)
np.testing.assert_array_equal(
filtered.id_arr,
np.array([2, 3, 4, 5, 6])
)
np.testing.assert_array_equal(filtered.col_arr, mapped_array.col_arr[mask])
np.testing.assert_array_equal(filtered.idx_arr, mapped_array.idx_arr[mask])
assert mapped_array_grouped.apply_mask(mask).wrapper == mapped_array_grouped.wrapper
assert mapped_array_grouped.apply_mask(mask, group_by=False).wrapper.grouper.group_by is None
def test_map_to_mask(self):
@njit
def every_2_nb(inout, idxs, col, mapped_arr):
inout[idxs[::2]] = True
np.testing.assert_array_equal(
mapped_array.map_to_mask(every_2_nb),
np.array([True, False, True, True, False, True, True, False, True])
)
def test_top_n_mask(self):
np.testing.assert_array_equal(
mapped_array.top_n_mask(1),
np.array([False, False, True, False, True, False, True, False, False])
)
def test_bottom_n_mask(self):
np.testing.assert_array_equal(
mapped_array.bottom_n_mask(1),
np.array([True, False, False, True, False, False, False, False, True])
)
def test_top_n(self):
np.testing.assert_array_equal(
mapped_array.top_n(1).id_arr,
np.array([2, 4, 6])
)
def test_bottom_n(self):
np.testing.assert_array_equal(
mapped_array.bottom_n(1).id_arr,
np.array([0, 3, 8])
)
def test_to_pd(self):
target = pd.DataFrame(
np.array([
[10., 13., 12., np.nan],
[11., 14., 11., np.nan],
[12., 13., 10., np.nan]
]),
index=wrapper.index,
columns=wrapper.columns
)
pd.testing.assert_series_equal(
mapped_array['a'].to_pd(),
target['a']
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(),
target
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(fill_value=0.),
target.fillna(0.)
)
mapped_array2 = vbt.MappedArray(
wrapper,
records_arr['some_field1'].tolist() + [1],
records_arr['col'].tolist() + [2],
idx_arr=records_arr['idx'].tolist() + [2]
)
with pytest.raises(Exception):
_ = mapped_array2.to_pd()
pd.testing.assert_series_equal(
mapped_array['a'].to_pd(ignore_index=True),
pd.Series(np.array([10., 11., 12.]), name='a')
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(ignore_index=True),
pd.DataFrame(
np.array([
[10., 13., 12., np.nan],
[11., 14., 11., np.nan],
[12., 13., 10., np.nan]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(fill_value=0, ignore_index=True),
pd.DataFrame(
np.array([
[10., 13., 12., 0.],
[11., 14., 11., 0.],
[12., 13., 10., 0.]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array_grouped.to_pd(ignore_index=True),
pd.DataFrame(
np.array([
[10., 12.],
[11., 11.],
[12., 10.],
[13., np.nan],
[14., np.nan],
[13., np.nan],
]),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
def test_apply(self):
@njit
def cumsum_apply_nb(idxs, col, a):
return np.cumsum(a)
np.testing.assert_array_equal(
mapped_array['a'].apply(cumsum_apply_nb).values,
np.array([10., 21., 33.])
)
np.testing.assert_array_equal(
mapped_array.apply(cumsum_apply_nb).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
mapped_array_grouped.apply(cumsum_apply_nb, apply_per_group=False).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
mapped_array_grouped.apply(cumsum_apply_nb, apply_per_group=True).values,
np.array([10., 21., 33., 46., 60., 73., 12., 23., 33.])
)
assert mapped_array_grouped.apply(cumsum_apply_nb).wrapper == \
mapped_array.apply(cumsum_apply_nb, group_by=group_by).wrapper
assert mapped_array.apply(cumsum_apply_nb, group_by=False).wrapper.grouper.group_by is None
def test_reduce(self):
@njit
def mean_reduce_nb(col, a):
return np.mean(a)
assert mapped_array['a'].reduce(mean_reduce_nb) == 11.
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb),
pd.Series(np.array([11., 13.333333333333334, 11., np.nan]), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, fill_value=0.),
pd.Series(np.array([11., 13.333333333333334, 11., 0.]), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, fill_value=0., wrap_kwargs=dict(dtype=np.int_)),
pd.Series(np.array([11., 13.333333333333334, 11., 0.]), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, wrap_kwargs=dict(to_timedelta=True)),
pd.Series(np.array([11., 13.333333333333334, 11., np.nan]), index=wrapper.columns).rename('reduce') * day_dt
)
pd.testing.assert_series_equal(
mapped_array_grouped.reduce(mean_reduce_nb),
pd.Series([12.166666666666666, 11.0], index=pd.Index(['g1', 'g2'], dtype='object')).rename('reduce')
)
assert mapped_array_grouped['g1'].reduce(mean_reduce_nb) == 12.166666666666666
pd.testing.assert_series_equal(
mapped_array_grouped[['g1']].reduce(mean_reduce_nb),
pd.Series([12.166666666666666], index=pd.Index(['g1'], dtype='object')).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb),
mapped_array_grouped.reduce(mean_reduce_nb, group_by=False)
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, group_by=group_by),
mapped_array_grouped.reduce(mean_reduce_nb)
)
def test_reduce_to_idx(self):
@njit
def argmin_reduce_nb(col, a):
return np.argmin(a)
assert mapped_array['a'].reduce(argmin_reduce_nb, returns_idx=True) == 'x'
pd.testing.assert_series_equal(
mapped_array.reduce(argmin_reduce_nb, returns_idx=True),
pd.Series(np.array(['x', 'x', 'z', np.nan], dtype=object), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(argmin_reduce_nb, returns_idx=True, to_index=False),
pd.Series(np.array([0, 0, 2, -1], dtype=int), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array_grouped.reduce(argmin_reduce_nb, returns_idx=True, to_index=False),
pd.Series(np.array([0, 2], dtype=int), index=pd.Index(['g1', 'g2'], dtype='object')).rename('reduce')
)
def test_reduce_to_array(self):
@njit
def min_max_reduce_nb(col, a):
return np.array([np.min(a), np.max(a)])
pd.testing.assert_series_equal(
mapped_array['a'].reduce(min_max_reduce_nb, returns_array=True,
wrap_kwargs=dict(name_or_index=['min', 'max'])),
pd.Series([10., 12.], index=pd.Index(['min', 'max'], dtype='object'), name='a')
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, wrap_kwargs=dict(name_or_index=['min', 'max'])),
pd.DataFrame(
np.array([
[10., 13., 10., np.nan],
[12., 14., 12., np.nan]
]),
index=pd.Index(['min', 'max'], dtype='object'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, fill_value=0.),
pd.DataFrame(
np.array([
[10., 13., 10., 0.],
[12., 14., 12., 0.]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, wrap_kwargs=dict(to_timedelta=True)),
pd.DataFrame(
np.array([
[10., 13., 10., np.nan],
[12., 14., 12., np.nan]
]),
columns=wrapper.columns
) * day_dt
)
pd.testing.assert_frame_equal(
mapped_array_grouped.reduce(min_max_reduce_nb, returns_array=True),
pd.DataFrame(
np.array([
[10., 10.],
[14., 12.]
]),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True),
mapped_array_grouped.reduce(min_max_reduce_nb, returns_array=True, group_by=False)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, group_by=group_by),
mapped_array_grouped.reduce(min_max_reduce_nb, returns_array=True)
)
pd.testing.assert_series_equal(
mapped_array_grouped['g1'].reduce(min_max_reduce_nb, returns_array=True),
pd.Series([10., 14.], name='g1')
)
pd.testing.assert_frame_equal(
mapped_array_grouped[['g1']].reduce(min_max_reduce_nb, returns_array=True),
pd.DataFrame([[10.], [14.]], columns=pd.Index(['g1'], dtype='object'))
)
def test_reduce_to_idx_array(self):
@njit
def idxmin_idxmax_reduce_nb(col, a):
return np.array([np.argmin(a), np.argmax(a)])
pd.testing.assert_series_equal(
mapped_array['a'].reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
wrap_kwargs=dict(name_or_index=['min', 'max'])
),
pd.Series(
np.array(['x', 'z'], dtype=object),
index=pd.Index(['min', 'max'], dtype='object'),
name='a'
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
wrap_kwargs=dict(name_or_index=['min', 'max'])
),
pd.DataFrame(
{
'a': ['x', 'z'],
'b': ['x', 'y'],
'c': ['z', 'x'],
'd': [np.nan, np.nan]
},
index=pd.Index(['min', 'max'], dtype='object')
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
to_index=False
),
pd.DataFrame(
np.array([
[0, 0, 2, -1],
[2, 1, 0, -1]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array_grouped.reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
to_index=False
),
pd.DataFrame(
np.array([
[0, 2],
[1, 0]
]),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
def test_nth(self):
assert mapped_array['a'].nth(0) == 10.
pd.testing.assert_series_equal(
mapped_array.nth(0),
pd.Series(np.array([10., 13., 12., np.nan]), index=wrapper.columns).rename('nth')
)
assert mapped_array['a'].nth(-1) == 12.
pd.testing.assert_series_equal(
mapped_array.nth(-1),
pd.Series(np.array([12., 13., 10., np.nan]), index=wrapper.columns).rename('nth')
)
with pytest.raises(Exception):
_ = mapped_array.nth(10)
pd.testing.assert_series_equal(
mapped_array_grouped.nth(0),
pd.Series(np.array([10., 12.]), index=pd.Index(['g1', 'g2'], dtype='object')).rename('nth')
)
def test_nth_index(self):
assert mapped_array['a'].nth(0) == 10.
pd.testing.assert_series_equal(
mapped_array.nth_index(0),
pd.Series(
np.array(['x', 'x', 'x', np.nan], dtype='object'),
index=wrapper.columns
).rename('nth_index')
)
assert mapped_array['a'].nth(-1) == 12.
pd.testing.assert_series_equal(
mapped_array.nth_index(-1),
pd.Series(
np.array(['z', 'z', 'z', np.nan], dtype='object'),
index=wrapper.columns
).rename('nth_index')
)
with pytest.raises(Exception):
_ = mapped_array.nth_index(10)
pd.testing.assert_series_equal(
mapped_array_grouped.nth_index(0),
pd.Series(
np.array(['x', 'x'], dtype='object'),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('nth_index')
)
def test_min(self):
assert mapped_array['a'].min() == mapped_array['a'].to_pd().min()
pd.testing.assert_series_equal(
mapped_array.min(),
mapped_array.to_pd().min().rename('min')
)
pd.testing.assert_series_equal(
mapped_array_grouped.min(),
pd.Series([10., 10.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('min')
)
def test_max(self):
assert mapped_array['a'].max() == mapped_array['a'].to_pd().max()
pd.testing.assert_series_equal(
mapped_array.max(),
mapped_array.to_pd().max().rename('max')
)
pd.testing.assert_series_equal(
mapped_array_grouped.max(),
pd.Series([14., 12.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('max')
)
def test_mean(self):
assert mapped_array['a'].mean() == mapped_array['a'].to_pd().mean()
pd.testing.assert_series_equal(
mapped_array.mean(),
mapped_array.to_pd().mean().rename('mean')
)
pd.testing.assert_series_equal(
mapped_array_grouped.mean(),
pd.Series([12.166667, 11.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('mean')
)
def test_median(self):
assert mapped_array['a'].median() == mapped_array['a'].to_pd().median()
pd.testing.assert_series_equal(
mapped_array.median(),
mapped_array.to_pd().median().rename('median')
)
pd.testing.assert_series_equal(
mapped_array_grouped.median(),
pd.Series([12.5, 11.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('median')
)
def test_std(self):
assert mapped_array['a'].std() == mapped_array['a'].to_pd().std()
pd.testing.assert_series_equal(
mapped_array.std(),
mapped_array.to_pd().std().rename('std')
)
pd.testing.assert_series_equal(
mapped_array.std(ddof=0),
mapped_array.to_pd().std(ddof=0).rename('std')
)
pd.testing.assert_series_equal(
mapped_array_grouped.std(),
pd.Series([1.4719601443879746, 1.0], index=pd.Index(['g1', 'g2'], dtype='object')).rename('std')
)
def test_sum(self):
assert mapped_array['a'].sum() == mapped_array['a'].to_pd().sum()
pd.testing.assert_series_equal(
mapped_array.sum(),
mapped_array.to_pd().sum().rename('sum')
)
pd.testing.assert_series_equal(
mapped_array_grouped.sum(),
pd.Series([73.0, 33.0], index=pd.Index(['g1', 'g2'], dtype='object')).rename('sum')
)
def test_count(self):
assert mapped_array['a'].count() == mapped_array['a'].to_pd().count()
pd.testing.assert_series_equal(
mapped_array.count(),
mapped_array.to_pd().count().rename('count')
)
pd.testing.assert_series_equal(
mapped_array_grouped.count(),
pd.Series([6, 3], index=pd.Index(['g1', 'g2'], dtype='object')).rename('count')
)
def test_idxmin(self):
assert mapped_array['a'].idxmin() == mapped_array['a'].to_pd().idxmin()
pd.testing.assert_series_equal(
mapped_array.idxmin(),
mapped_array.to_pd().idxmin().rename('idxmin')
)
pd.testing.assert_series_equal(
mapped_array_grouped.idxmin(),
pd.Series(
np.array(['x', 'z'], dtype=object),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('idxmin')
)
def test_idxmax(self):
assert mapped_array['a'].idxmax() == mapped_array['a'].to_pd().idxmax()
pd.testing.assert_series_equal(
mapped_array.idxmax(),
mapped_array.to_pd().idxmax().rename('idxmax')
)
pd.testing.assert_series_equal(
mapped_array_grouped.idxmax(),
pd.Series(
np.array(['y', 'x'], dtype=object),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('idxmax')
)
def test_describe(self):
pd.testing.assert_series_equal(
mapped_array['a'].describe(),
mapped_array['a'].to_pd().describe()
)
pd.testing.assert_frame_equal(
mapped_array.describe(percentiles=None),
mapped_array.to_pd().describe(percentiles=None)
)
pd.testing.assert_frame_equal(
mapped_array.describe(percentiles=[]),
mapped_array.to_pd().describe(percentiles=[])
)
pd.testing.assert_frame_equal(
mapped_array.describe(percentiles=np.arange(0, 1, 0.1)),
mapped_array.to_pd().describe(percentiles=np.arange(0, 1, 0.1))
)
pd.testing.assert_frame_equal(
mapped_array_grouped.describe(),
pd.DataFrame(
np.array([
[6., 3.],
[12.16666667, 11.],
[1.47196014, 1.],
[10., 10.],
[11.25, 10.5],
[12.5, 11.],
[13., 11.5],
[14., 12.]
]),
columns=pd.Index(['g1', 'g2'], dtype='object'),
index=mapped_array.describe().index
)
)
def test_value_counts(self):
pd.testing.assert_series_equal(
mapped_array['a'].value_counts(),
pd.Series(
np.array([1, 1, 1]),
index=pd.Float64Index([10.0, 11.0, 12.0], dtype='float64'),
name='a'
)
)
pd.testing.assert_series_equal(
mapped_array['a'].value_counts(mapping=mapping),
pd.Series(
np.array([1, 1, 1]),
index=pd.Index(['test_10.0', 'test_11.0', 'test_12.0'], dtype='object'),
name='a'
)
)
pd.testing.assert_frame_equal(
mapped_array.value_counts(),
pd.DataFrame(
np.array([
[1, 0, 1, 0],
[1, 0, 1, 0],
[1, 0, 1, 0],
[0, 2, 0, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([10.0, 11.0, 12.0, 13.0, 14.0], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array_grouped.value_counts(),
pd.DataFrame(
np.array([
[1, 1],
[1, 1],
[1, 1],
[2, 0],
[1, 0]
]),
index=pd.Float64Index([10.0, 11.0, 12.0, 13.0, 14.0], dtype='float64'),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
mapped_array2 = mapped_array.replace(mapped_arr=[4, 4, 3, 2, np.nan, 4, 3, 2, 1])
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort_uniques=False),
pd.DataFrame(
np.array([
[2, 1, 0, 0],
[1, 0, 1, 0],
[0, 1, 1, 0],
[0, 0, 1, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([4.0, 3.0, 2.0, 1.0, None], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort_uniques=True),
pd.DataFrame(
np.array([
[0, 0, 1, 0],
[0, 1, 1, 0],
[1, 0, 1, 0],
[2, 1, 0, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([1.0, 2.0, 3.0, 4.0, None], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True),
pd.DataFrame(
np.array([
[2, 1, 0, 0],
[0, 1, 1, 0],
[1, 0, 1, 0],
[0, 0, 1, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([4.0, 2.0, 3.0, 1.0, np.nan], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True, ascending=True),
pd.DataFrame(
np.array([
[0, 0, 1, 0],
[0, 1, 0, 0],
[0, 1, 1, 0],
[1, 0, 1, 0],
[2, 1, 0, 0]
]),
index=pd.Float64Index([1.0, np.nan, 2.0, 3.0, 4.0], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True, normalize=True),
pd.DataFrame(
np.array([
[0.2222222222222222, 0.1111111111111111, 0.0, 0.0],
[0.0, 0.1111111111111111, 0.1111111111111111, 0.0],
[0.1111111111111111, 0.0, 0.1111111111111111, 0.0],
[0.0, 0.0, 0.1111111111111111, 0.0],
[0.0, 0.1111111111111111, 0.0, 0.0]
]),
index=pd.Float64Index([4.0, 2.0, 3.0, 1.0, np.nan], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True, normalize=True, dropna=True),
pd.DataFrame(
np.array([
[0.25, 0.125, 0.0, 0.0],
[0.0, 0.125, 0.125, 0.0],
[0.125, 0.0, 0.125, 0.0],
[0.0, 0.0, 0.125, 0.0]
]),
index=pd.Float64Index([4.0, 2.0, 3.0, 1.0], dtype='float64'),
columns=wrapper.columns
)
)
@pytest.mark.parametrize(
"test_nosort",
[False, True],
)
def test_indexing(self, test_nosort):
if test_nosort:
ma = mapped_array_nosort
ma_grouped = mapped_array_nosort_grouped
else:
ma = mapped_array
ma_grouped = mapped_array_grouped
np.testing.assert_array_equal(
ma['a'].id_arr,
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
ma['a'].col_arr,
np.array([0, 0, 0])
)
pd.testing.assert_index_equal(
ma['a'].wrapper.columns,
pd.Index(['a'], dtype='object')
)
np.testing.assert_array_equal(
ma['b'].id_arr,
np.array([3, 4, 5])
)
np.testing.assert_array_equal(
ma['b'].col_arr,
np.array([0, 0, 0])
)
pd.testing.assert_index_equal(
ma['b'].wrapper.columns,
pd.Index(['b'], dtype='object')
)
np.testing.assert_array_equal(
ma[['a', 'a']].id_arr,
np.array([0, 1, 2, 0, 1, 2])
)
np.testing.assert_array_equal(
ma[['a', 'a']].col_arr,
np.array([0, 0, 0, 1, 1, 1])
)
pd.testing.assert_index_equal(
ma[['a', 'a']].wrapper.columns,
pd.Index(['a', 'a'], dtype='object')
)
np.testing.assert_array_equal(
ma[['a', 'b']].id_arr,
np.array([0, 1, 2, 3, 4, 5])
)
np.testing.assert_array_equal(
ma[['a', 'b']].col_arr,
np.array([0, 0, 0, 1, 1, 1])
)
pd.testing.assert_index_equal(
ma[['a', 'b']].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
with pytest.raises(Exception):
_ = ma.iloc[::2, :] # changing time not supported
pd.testing.assert_index_equal(
ma_grouped['g1'].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
assert ma_grouped['g1'].wrapper.ndim == 2
assert ma_grouped['g1'].wrapper.grouped_ndim == 1
pd.testing.assert_index_equal(
ma_grouped['g1'].wrapper.grouper.group_by,
pd.Index(['g1', 'g1'], dtype='object')
)
pd.testing.assert_index_equal(
ma_grouped['g2'].wrapper.columns,
pd.Index(['c', 'd'], dtype='object')
)
assert ma_grouped['g2'].wrapper.ndim == 2
assert ma_grouped['g2'].wrapper.grouped_ndim == 1
pd.testing.assert_index_equal(
ma_grouped['g2'].wrapper.grouper.group_by,
pd.Index(['g2', 'g2'], dtype='object')
)
pd.testing.assert_index_equal(
ma_grouped[['g1']].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
assert ma_grouped[['g1']].wrapper.ndim == 2
assert ma_grouped[['g1']].wrapper.grouped_ndim == 2
pd.testing.assert_index_equal(
ma_grouped[['g1']].wrapper.grouper.group_by,
pd.Index(['g1', 'g1'], dtype='object')
)
pd.testing.assert_index_equal(
ma_grouped[['g1', 'g2']].wrapper.columns,
pd.Index(['a', 'b', 'c', 'd'], dtype='object')
)
assert ma_grouped[['g1', 'g2']].wrapper.ndim == 2
assert ma_grouped[['g1', 'g2']].wrapper.grouped_ndim == 2
pd.testing.assert_index_equal(
ma_grouped[['g1', 'g2']].wrapper.grouper.group_by,
pd.Index(['g1', 'g1', 'g2', 'g2'], dtype='object')
)
def test_magic(self):
a = vbt.MappedArray(
wrapper,
records_arr['some_field1'],
records_arr['col'],
id_arr=records_arr['id'],
idx_arr=records_arr['idx']
)
a_inv = vbt.MappedArray(
wrapper,
records_arr['some_field1'][::-1],
records_arr['col'][::-1],
id_arr=records_arr['id'][::-1],
idx_arr=records_arr['idx'][::-1]
)
b = records_arr['some_field2']
a_bool = vbt.MappedArray(
wrapper,
records_arr['some_field1'] > np.mean(records_arr['some_field1']),
records_arr['col'],
id_arr=records_arr['id'],
idx_arr=records_arr['idx']
)
b_bool = records_arr['some_field2'] > np.mean(records_arr['some_field2'])
assert a ** a == a ** 2
with pytest.raises(Exception):
_ = a * a_inv
# binary ops
# comparison ops
np.testing.assert_array_equal((a == b).values, a.values == b)
np.testing.assert_array_equal((a != b).values, a.values != b)
np.testing.assert_array_equal((a < b).values, a.values < b)
np.testing.assert_array_equal((a > b).values, a.values > b)
np.testing.assert_array_equal((a <= b).values, a.values <= b)
np.testing.assert_array_equal((a >= b).values, a.values >= b)
# arithmetic ops
np.testing.assert_array_equal((a + b).values, a.values + b)
np.testing.assert_array_equal((a - b).values, a.values - b)
np.testing.assert_array_equal((a * b).values, a.values * b)
np.testing.assert_array_equal((a ** b).values, a.values ** b)
np.testing.assert_array_equal((a % b).values, a.values % b)
np.testing.assert_array_equal((a // b).values, a.values // b)
np.testing.assert_array_equal((a / b).values, a.values / b)
# __r*__ is only called if the left object does not have an __*__ method
np.testing.assert_array_equal((10 + a).values, 10 + a.values)
np.testing.assert_array_equal((10 - a).values, 10 - a.values)
np.testing.assert_array_equal((10 * a).values, 10 * a.values)
np.testing.assert_array_equal((10 ** a).values, 10 ** a.values)
np.testing.assert_array_equal((10 % a).values, 10 % a.values)
np.testing.assert_array_equal((10 // a).values, 10 // a.values)
np.testing.assert_array_equal((10 / a).values, 10 / a.values)
# mask ops
np.testing.assert_array_equal((a_bool & b_bool).values, a_bool.values & b_bool)
np.testing.assert_array_equal((a_bool | b_bool).values, a_bool.values | b_bool)
np.testing.assert_array_equal((a_bool ^ b_bool).values, a_bool.values ^ b_bool)
np.testing.assert_array_equal((True & a_bool).values, True & a_bool.values)
np.testing.assert_array_equal((True | a_bool).values, True | a_bool.values)
np.testing.assert_array_equal((True ^ a_bool).values, True ^ a_bool.values)
# unary ops
np.testing.assert_array_equal((-a).values, -a.values)
np.testing.assert_array_equal((+a).values, +a.values)
np.testing.assert_array_equal((abs(-a)).values, abs((-a.values)))
def test_stats(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Count', 'Mean', 'Std', 'Min', 'Median', 'Max', 'Min Index', 'Max Index'
], dtype='object')
pd.testing.assert_series_equal(
mapped_array.stats(),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'),
2.25, 11.777777777777779, 0.859116756396542, 11.0, 11.666666666666666, 12.666666666666666
],
index=stats_index[:-2],
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
mapped_array.stats(column='a'),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'),
3, 11.0, 1.0, 10.0, 11.0, 12.0, 'x', 'z'
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
mapped_array.stats(column='g1', group_by=group_by),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'),
6, 12.166666666666666, 1.4719601443879746, 10.0, 12.5, 14.0, 'x', 'y'
],
index=stats_index,
name='g1'
)
)
pd.testing.assert_series_equal(
mapped_array['c'].stats(),
mapped_array.stats(column='c')
)
pd.testing.assert_series_equal(
mapped_array['c'].stats(),
mapped_array.stats(column='c', group_by=False)
)
pd.testing.assert_series_equal(
mapped_array_grouped['g2'].stats(),
mapped_array_grouped.stats(column='g2')
)
pd.testing.assert_series_equal(
mapped_array_grouped['g2'].stats(),
mapped_array.stats(column='g2', group_by=group_by)
)
stats_df = mapped_array.stats(agg_func=None)
assert stats_df.shape == (4, 11)
pd.testing.assert_index_equal(stats_df.index, mapped_array.wrapper.columns)
pd.testing.assert_index_equal(stats_df.columns, stats_index)
def test_stats_mapping(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Count', 'Value Counts: test_10.0',
'Value Counts: test_11.0', 'Value Counts: test_12.0',
'Value Counts: test_13.0', 'Value Counts: test_14.0'
], dtype='object')
pd.testing.assert_series_equal(
mp_mapped_array.stats(),
pd.Series([
'x',
'z',
pd.Timedelta('3 days 00:00:00'),
2.25, 0.5, 0.5, 0.5, 0.5, 0.25
],
index=stats_index,
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
mp_mapped_array.stats(column='a'),
pd.Series([
'x',
'z',
pd.Timedelta('3 days 00:00:00'),
3, 1, 1, 1, 0, 0
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
mp_mapped_array.stats(column='g1', group_by=group_by),
pd.Series([
'x',
'z',
pd.Timedelta('3 days 00:00:00'),
6, 1, 1, 1, 2, 1
],
index=stats_index,
name='g1'
)
)
pd.testing.assert_series_equal(
mp_mapped_array.stats(),
mapped_array.stats(settings=dict(mapping=mapping))
)
pd.testing.assert_series_equal(
mp_mapped_array['c'].stats(settings=dict(incl_all_keys=True)),
mp_mapped_array.stats(column='c')
)
pd.testing.assert_series_equal(
mp_mapped_array['c'].stats(settings=dict(incl_all_keys=True)),
mp_mapped_array.stats(column='c', group_by=False)
)
pd.testing.assert_series_equal(
mp_mapped_array_grouped['g2'].stats(settings=dict(incl_all_keys=True)),
mp_mapped_array_grouped.stats(column='g2')
)
pd.testing.assert_series_equal(
mp_mapped_array_grouped['g2'].stats(settings=dict(incl_all_keys=True)),
mp_mapped_array.stats(column='g2', group_by=group_by)
)
stats_df = mp_mapped_array.stats(agg_func=None)
assert stats_df.shape == (4, 9)
pd.testing.assert_index_equal(stats_df.index, mp_mapped_array.wrapper.columns)
pd.testing.assert_index_equal(stats_df.columns, stats_index)
# ############# base.py ############# #
class TestRecords:
def test_config(self, tmp_path):
assert vbt.Records.loads(records['a'].dumps()) == records['a']
assert vbt.Records.loads(records.dumps()) == records
records.save(tmp_path / 'records')
assert vbt.Records.load(tmp_path / 'records') == records
def test_records(self):
pd.testing.assert_frame_equal(
records.records,
pd.DataFrame.from_records(records_arr)
)
def test_recarray(self):
np.testing.assert_array_equal(records['a'].recarray.some_field1, records['a'].values['some_field1'])
np.testing.assert_array_equal(records.recarray.some_field1, records.values['some_field1'])
def test_records_readable(self):
pd.testing.assert_frame_equal(
records.records_readable,
pd.DataFrame([
[0, 'a', 'x', 10.0, 21.0], [1, 'a', 'y', 11.0, 20.0], [2, 'a', 'z', 12.0, 19.0],
[3, 'b', 'x', 13.0, 18.0], [4, 'b', 'y', 14.0, 17.0], [5, 'b', 'z', 13.0, 18.0],
[6, 'c', 'x', 12.0, 19.0], [7, 'c', 'y', 11.0, 20.0], [8, 'c', 'z', 10.0, 21.0]
], columns=pd.Index(['Id', 'Column', 'Timestamp', 'some_field1', 'some_field2'], dtype='object'))
)
def test_is_sorted(self):
assert records.is_sorted()
assert records.is_sorted(incl_id=True)
assert not records_nosort.is_sorted()
assert not records_nosort.is_sorted(incl_id=True)
def test_sort(self):
assert records.sort().is_sorted()
assert records.sort().is_sorted(incl_id=True)
assert records.sort(incl_id=True).is_sorted(incl_id=True)
assert records_nosort.sort().is_sorted()
assert records_nosort.sort().is_sorted(incl_id=True)
assert records_nosort.sort(incl_id=True).is_sorted(incl_id=True)
def test_apply_mask(self):
mask_a = records['a'].values['some_field1'] >= records['a'].values['some_field1'].mean()
record_arrays_close(
records['a'].apply_mask(mask_a).values,
np.array([
(1, 0, 1, 11., 20.), (2, 0, 2, 12., 19.)
], dtype=example_dt)
)
mask = records.values['some_field1'] >= records.values['some_field1'].mean()
filtered = records.apply_mask(mask)
record_arrays_close(
filtered.values,
np.array([
(2, 0, 2, 12., 19.), (3, 1, 0, 13., 18.), (4, 1, 1, 14., 17.),
(5, 1, 2, 13., 18.), (6, 2, 0, 12., 19.)
], dtype=example_dt)
)
assert records_grouped.apply_mask(mask).wrapper == records_grouped.wrapper
def test_map_field(self):
np.testing.assert_array_equal(
records['a'].map_field('some_field1').values,
np.array([10., 11., 12.])
)
np.testing.assert_array_equal(
records.map_field('some_field1').values,
np.array([10., 11., 12., 13., 14., 13., 12., 11., 10.])
)
assert records_grouped.map_field('some_field1').wrapper == \
records.map_field('some_field1', group_by=group_by).wrapper
assert records_grouped.map_field('some_field1', group_by=False).wrapper.grouper.group_by is None
def test_map(self):
@njit
def map_func_nb(record):
return record['some_field1'] + record['some_field2']
np.testing.assert_array_equal(
records['a'].map(map_func_nb).values,
np.array([31., 31., 31.])
)
np.testing.assert_array_equal(
records.map(map_func_nb).values,
np.array([31., 31., 31., 31., 31., 31., 31., 31., 31.])
)
assert records_grouped.map(map_func_nb).wrapper == \
records.map(map_func_nb, group_by=group_by).wrapper
assert records_grouped.map(map_func_nb, group_by=False).wrapper.grouper.group_by is None
def test_map_array(self):
arr = records_arr['some_field1'] + records_arr['some_field2']
np.testing.assert_array_equal(
records['a'].map_array(arr[:3]).values,
np.array([31., 31., 31.])
)
np.testing.assert_array_equal(
records.map_array(arr).values,
np.array([31., 31., 31., 31., 31., 31., 31., 31., 31.])
)
assert records_grouped.map_array(arr).wrapper == \
records.map_array(arr, group_by=group_by).wrapper
assert records_grouped.map_array(arr, group_by=False).wrapper.grouper.group_by is None
def test_apply(self):
@njit
def cumsum_apply_nb(records):
return np.cumsum(records['some_field1'])
np.testing.assert_array_equal(
records['a'].apply(cumsum_apply_nb).values,
np.array([10., 21., 33.])
)
np.testing.assert_array_equal(
records.apply(cumsum_apply_nb).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
records_grouped.apply(cumsum_apply_nb, apply_per_group=False).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
records_grouped.apply(cumsum_apply_nb, apply_per_group=True).values,
np.array([10., 21., 33., 46., 60., 73., 12., 23., 33.])
)
assert records_grouped.apply(cumsum_apply_nb).wrapper == \
records.apply(cumsum_apply_nb, group_by=group_by).wrapper
assert records_grouped.apply(cumsum_apply_nb, group_by=False).wrapper.grouper.group_by is None
def test_count(self):
assert records['a'].count() == 3
pd.testing.assert_series_equal(
records.count(),
pd.Series(
np.array([3, 3, 3, 0]),
index=wrapper.columns
).rename('count')
)
assert records_grouped['g1'].count() == 6
pd.testing.assert_series_equal(
records_grouped.count(),
pd.Series(
np.array([6, 3]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('count')
)
@pytest.mark.parametrize(
"test_nosort",
[False, True],
)
def test_indexing(self, test_nosort):
if test_nosort:
r = records_nosort
r_grouped = records_nosort_grouped
else:
r = records
r_grouped = records_grouped
record_arrays_close(
r['a'].values,
np.array([
(0, 0, 0, 10., 21.), (1, 0, 1, 11., 20.), (2, 0, 2, 12., 19.)
], dtype=example_dt)
)
pd.testing.assert_index_equal(
r['a'].wrapper.columns,
pd.Index(['a'], dtype='object')
)
pd.testing.assert_index_equal(
r['b'].wrapper.columns,
pd.Index(['b'], dtype='object')
)
record_arrays_close(
r[['a', 'a']].values,
np.array([
(0, 0, 0, 10., 21.), (1, 0, 1, 11., 20.), (2, 0, 2, 12., 19.),
(0, 1, 0, 10., 21.), (1, 1, 1, 11., 20.), (2, 1, 2, 12., 19.)
], dtype=example_dt)
)
pd.testing.assert_index_equal(
r[['a', 'a']].wrapper.columns,
pd.Index(['a', 'a'], dtype='object')
)
record_arrays_close(
r[['a', 'b']].values,
np.array([
(0, 0, 0, 10., 21.), (1, 0, 1, 11., 20.), (2, 0, 2, 12., 19.),
(3, 1, 0, 13., 18.), (4, 1, 1, 14., 17.), (5, 1, 2, 13., 18.)
], dtype=example_dt)
)
pd.testing.assert_index_equal(
r[['a', 'b']].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
with pytest.raises(Exception):
_ = r.iloc[::2, :] # changing time not supported
pd.testing.assert_index_equal(
r_grouped['g1'].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
assert r_grouped['g1'].wrapper.ndim == 2
assert r_grouped['g1'].wrapper.grouped_ndim == 1
pd.testing.assert_index_equal(
r_grouped['g1'].wrapper.grouper.group_by,
pd.Index(['g1', 'g1'], dtype='object')
)
pd.testing.assert_index_equal(
r_grouped['g2'].wrapper.columns,
pd.Index(['c', 'd'], dtype='object')
)
assert r_grouped['g2'].wrapper.ndim == 2
assert r_grouped['g2'].wrapper.grouped_ndim == 1
pd.testing.assert_index_equal(
r_grouped['g2'].wrapper.grouper.group_by,
pd.Index(['g2', 'g2'], dtype='object')
)
pd.testing.assert_index_equal(
r_grouped[['g1']].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
assert r_grouped[['g1']].wrapper.ndim == 2
assert r_grouped[['g1']].wrapper.grouped_ndim == 2
pd.testing.assert_index_equal(
r_grouped[['g1']].wrapper.grouper.group_by,
pd.Index(['g1', 'g1'], dtype='object')
)
pd.testing.assert_index_equal(
r_grouped[['g1', 'g2']].wrapper.columns,
pd.Index(['a', 'b', 'c', 'd'], dtype='object')
)
assert r_grouped[['g1', 'g2']].wrapper.ndim == 2
assert r_grouped[['g1', 'g2']].wrapper.grouped_ndim == 2
pd.testing.assert_index_equal(
r_grouped[['g1', 'g2']].wrapper.grouper.group_by,
pd.Index(['g1', 'g1', 'g2', 'g2'], dtype='object')
)
def test_filtering(self):
filtered_records = vbt.Records(wrapper, records_arr[[0, -1]])
record_arrays_close(
filtered_records.values,
np.array([(0, 0, 0, 10., 21.), (8, 2, 2, 10., 21.)], dtype=example_dt)
)
# a
record_arrays_close(
filtered_records['a'].values,
np.array([(0, 0, 0, 10., 21.)], dtype=example_dt)
)
np.testing.assert_array_equal(
filtered_records['a'].map_field('some_field1').id_arr,
np.array([0])
)
assert filtered_records['a'].map_field('some_field1').min() == 10.
assert filtered_records['a'].count() == 1.
# b
record_arrays_close(
filtered_records['b'].values,
np.array([], dtype=example_dt)
)
np.testing.assert_array_equal(
filtered_records['b'].map_field('some_field1').id_arr,
np.array([])
)
assert np.isnan(filtered_records['b'].map_field('some_field1').min())
assert filtered_records['b'].count() == 0.
# c
record_arrays_close(
filtered_records['c'].values,
np.array([(8, 0, 2, 10., 21.)], dtype=example_dt)
)
np.testing.assert_array_equal(
filtered_records['c'].map_field('some_field1').id_arr,
np.array([8])
)
assert filtered_records['c'].map_field('some_field1').min() == 10.
assert filtered_records['c'].count() == 1.
# d
record_arrays_close(
filtered_records['d'].values,
np.array([], dtype=example_dt)
)
np.testing.assert_array_equal(
filtered_records['d'].map_field('some_field1').id_arr,
np.array([])
)
assert np.isnan(filtered_records['d'].map_field('some_field1').min())
assert filtered_records['d'].count() == 0.
def test_stats(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Count'
], dtype='object')
pd.testing.assert_series_equal(
records.stats(),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'), 2.25
],
index=stats_index,
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
records.stats(column='a'),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'), 3
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
records.stats(column='g1', group_by=group_by),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'), 6
],
index=stats_index,
name='g1'
)
)
pd.testing.assert_series_equal(
records['c'].stats(),
records.stats(column='c')
)
pd.testing.assert_series_equal(
records['c'].stats(),
records.stats(column='c', group_by=False)
)
pd.testing.assert_series_equal(
records_grouped['g2'].stats(),
records_grouped.stats(column='g2')
)
pd.testing.assert_series_equal(
records_grouped['g2'].stats(),
records.stats(column='g2', group_by=group_by)
)
stats_df = records.stats(agg_func=None)
assert stats_df.shape == (4, 4)
pd.testing.assert_index_equal(stats_df.index, records.wrapper.columns)
pd.testing.assert_index_equal(stats_df.columns, stats_index)
# ############# ranges.py ############# #
ts = pd.DataFrame({
'a': [1, -1, 3, -1, 5, -1],
'b': [-1, -1, -1, 4, 5, 6],
'c': [1, 2, 3, -1, -1, -1],
'd': [-1, -1, -1, -1, -1, -1]
}, index=[
datetime(2020, 1, 1),
datetime(2020, 1, 2),
datetime(2020, 1, 3),
datetime(2020, 1, 4),
datetime(2020, 1, 5),
datetime(2020, 1, 6)
])
ranges = vbt.Ranges.from_ts(ts, wrapper_kwargs=dict(freq='1 days'))
ranges_grouped = vbt.Ranges.from_ts(ts, wrapper_kwargs=dict(freq='1 days', group_by=group_by))
class TestRanges:
def test_mapped_fields(self):
for name in range_dt.names:
np.testing.assert_array_equal(
getattr(ranges, name).values,
ranges.values[name]
)
def test_from_ts(self):
record_arrays_close(
ranges.values,
np.array([
(0, 0, 0, 1, 1), (1, 0, 2, 3, 1), (2, 0, 4, 5, 1), (3, 1, 3, 5, 0), (4, 2, 0, 3, 1)
], dtype=range_dt)
)
assert ranges.wrapper.freq == day_dt
pd.testing.assert_index_equal(
ranges_grouped.wrapper.grouper.group_by,
group_by
)
def test_records_readable(self):
records_readable = ranges.records_readable
np.testing.assert_array_equal(
records_readable['Range Id'].values,
np.array([
0, 1, 2, 3, 4
])
)
np.testing.assert_array_equal(
records_readable['Column'].values,
np.array([
'a', 'a', 'a', 'b', 'c'
])
)
np.testing.assert_array_equal(
records_readable['Start Timestamp'].values,
np.array([
'2020-01-01T00:00:00.000000000', '2020-01-03T00:00:00.000000000',
'2020-01-05T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-01T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['End Timestamp'].values,
np.array([
'2020-01-02T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-06T00:00:00.000000000', '2020-01-06T00:00:00.000000000',
'2020-01-04T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['Status'].values,
np.array([
'Closed', 'Closed', 'Closed', 'Open', 'Closed'
])
)
def test_to_mask(self):
pd.testing.assert_series_equal(
ranges['a'].to_mask(),
ts['a'] != -1
)
pd.testing.assert_frame_equal(
ranges.to_mask(),
ts != -1
)
pd.testing.assert_frame_equal(
ranges_grouped.to_mask(),
pd.DataFrame(
[
[True, True],
[False, True],
[True, True],
[True, False],
[True, False],
[True, False]
],
index=ts.index,
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
def test_duration(self):
np.testing.assert_array_equal(
ranges['a'].duration.values,
np.array([1, 1, 1])
)
np.testing.assert_array_equal(
ranges.duration.values,
np.array([1, 1, 1, 3, 3])
)
def test_avg_duration(self):
assert ranges['a'].avg_duration() == pd.Timedelta('1 days 00:00:00')
pd.testing.assert_series_equal(
ranges.avg_duration(),
pd.Series(
np.array([86400000000000, 259200000000000, 259200000000000, 'NaT'], dtype='timedelta64[ns]'),
index=wrapper.columns
).rename('avg_duration')
)
pd.testing.assert_series_equal(
ranges_grouped.avg_duration(),
pd.Series(
np.array([129600000000000, 259200000000000], dtype='timedelta64[ns]'),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('avg_duration')
)
def test_max_duration(self):
assert ranges['a'].max_duration() == pd.Timedelta('1 days 00:00:00')
pd.testing.assert_series_equal(
ranges.max_duration(),
pd.Series(
np.array([86400000000000, 259200000000000, 259200000000000, 'NaT'], dtype='timedelta64[ns]'),
index=wrapper.columns
).rename('max_duration')
)
pd.testing.assert_series_equal(
ranges_grouped.max_duration(),
pd.Series(
np.array([259200000000000, 259200000000000], dtype='timedelta64[ns]'),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('max_duration')
)
def test_coverage(self):
assert ranges['a'].coverage() == 0.5
pd.testing.assert_series_equal(
ranges.coverage(),
pd.Series(
np.array([0.5, 0.5, 0.5, np.nan]),
index=ts2.columns
).rename('coverage')
)
pd.testing.assert_series_equal(
ranges.coverage(),
ranges.replace(records_arr=np.repeat(ranges.values, 2)).coverage()
)
pd.testing.assert_series_equal(
ranges.replace(records_arr=np.repeat(ranges.values, 2)).coverage(overlapping=True),
pd.Series(
np.array([1.0, 1.0, 1.0, np.nan]),
index=ts2.columns
).rename('coverage')
)
pd.testing.assert_series_equal(
ranges.coverage(normalize=False),
pd.Series(
np.array([3.0, 3.0, 3.0, np.nan]),
index=ts2.columns
).rename('coverage')
)
pd.testing.assert_series_equal(
ranges.replace(records_arr=np.repeat(ranges.values, 2)).coverage(overlapping=True, normalize=False),
pd.Series(
np.array([3.0, 3.0, 3.0, np.nan]),
index=ts2.columns
).rename('coverage')
)
pd.testing.assert_series_equal(
ranges_grouped.coverage(),
pd.Series(
np.array([0.4166666666666667, 0.25]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('coverage')
)
pd.testing.assert_series_equal(
ranges_grouped.coverage(),
ranges_grouped.replace(records_arr=np.repeat(ranges_grouped.values, 2)).coverage()
)
def test_stats(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Coverage', 'Overlap Coverage',
'Total Records', 'Duration: Min', 'Duration: Median', 'Duration: Max',
'Duration: Mean', 'Duration: Std'
], dtype='object')
pd.testing.assert_series_equal(
ranges.stats(),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-06 00:00:00'),
pd.Timedelta('6 days 00:00:00'), pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('0 days 00:00:00'), 1.25, pd.Timedelta('2 days 08:00:00'),
pd.Timedelta('2 days 08:00:00'), pd.Timedelta('2 days 08:00:00'),
pd.Timedelta('2 days 08:00:00'), pd.Timedelta('0 days 00:00:00')
],
index=stats_index,
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
ranges.stats(column='a'),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-06 00:00:00'),
pd.Timedelta('6 days 00:00:00'), pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('0 days 00:00:00'), 3, pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('1 days 00:00:00'), pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('1 days 00:00:00'), pd.Timedelta('0 days 00:00:00')
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
ranges.stats(column='g1', group_by=group_by),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-06 00:00:00'),
pd.Timedelta('6 days 00:00:00'), pd.Timedelta('5 days 00:00:00'),
pd.Timedelta('1 days 00:00:00'), 4, pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('1 days 00:00:00'), pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('1 days 12:00:00'), pd.Timedelta('1 days 00:00:00')
],
index=stats_index,
name='g1'
)
)
pd.testing.assert_series_equal(
ranges['c'].stats(),
ranges.stats(column='c')
)
pd.testing.assert_series_equal(
ranges['c'].stats(),
ranges.stats(column='c', group_by=False)
)
pd.testing.assert_series_equal(
ranges_grouped['g2'].stats(),
ranges_grouped.stats(column='g2')
)
pd.testing.assert_series_equal(
ranges_grouped['g2'].stats(),
ranges.stats(column='g2', group_by=group_by)
)
stats_df = ranges.stats(agg_func=None)
assert stats_df.shape == (4, 11)
pd.testing.assert_index_equal(stats_df.index, ranges.wrapper.columns)
pd.testing.assert_index_equal(stats_df.columns, stats_index)
# ############# drawdowns.py ############# #
ts2 = pd.DataFrame({
'a': [2, 1, 3, 1, 4, 1],
'b': [1, 2, 1, 3, 1, 4],
'c': [1, 2, 3, 2, 1, 2],
'd': [1, 2, 3, 4, 5, 6]
}, index=[
datetime(2020, 1, 1),
datetime(2020, 1, 2),
datetime(2020, 1, 3),
datetime(2020, 1, 4),
datetime(2020, 1, 5),
datetime(2020, 1, 6)
])
drawdowns = vbt.Drawdowns.from_ts(ts2, wrapper_kwargs=dict(freq='1 days'))
drawdowns_grouped = vbt.Drawdowns.from_ts(ts2, wrapper_kwargs=dict(freq='1 days', group_by=group_by))
class TestDrawdowns:
def test_mapped_fields(self):
for name in drawdown_dt.names:
np.testing.assert_array_equal(
getattr(drawdowns, name).values,
drawdowns.values[name]
)
def test_ts(self):
pd.testing.assert_frame_equal(
drawdowns.ts,
ts2
)
pd.testing.assert_series_equal(
drawdowns['a'].ts,
ts2['a']
)
pd.testing.assert_frame_equal(
drawdowns_grouped['g1'].ts,
ts2[['a', 'b']]
)
assert drawdowns.replace(ts=None)['a'].ts is None
def test_from_ts(self):
record_arrays_close(
drawdowns.values,
np.array([
(0, 0, 0, 1, 1, 2, 2.0, 1.0, 3.0, 1), (1, 0, 2, 3, 3, 4, 3.0, 1.0, 4.0, 1),
(2, 0, 4, 5, 5, 5, 4.0, 1.0, 1.0, 0), (3, 1, 1, 2, 2, 3, 2.0, 1.0, 3.0, 1),
(4, 1, 3, 4, 4, 5, 3.0, 1.0, 4.0, 1), (5, 2, 2, 3, 4, 5, 3.0, 1.0, 2.0, 0)
], dtype=drawdown_dt)
)
assert drawdowns.wrapper.freq == day_dt
pd.testing.assert_index_equal(
drawdowns_grouped.wrapper.grouper.group_by,
group_by
)
def test_records_readable(self):
records_readable = drawdowns.records_readable
np.testing.assert_array_equal(
records_readable['Drawdown Id'].values,
np.array([
0, 1, 2, 3, 4, 5
])
)
np.testing.assert_array_equal(
records_readable['Column'].values,
np.array([
'a', 'a', 'a', 'b', 'b', 'c'
])
)
np.testing.assert_array_equal(
records_readable['Peak Timestamp'].values,
np.array([
'2020-01-01T00:00:00.000000000', '2020-01-03T00:00:00.000000000',
'2020-01-05T00:00:00.000000000', '2020-01-02T00:00:00.000000000',
'2020-01-04T00:00:00.000000000', '2020-01-03T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['Start Timestamp'].values,
np.array([
'2020-01-02T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-06T00:00:00.000000000', '2020-01-03T00:00:00.000000000',
'2020-01-05T00:00:00.000000000', '2020-01-04T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['Valley Timestamp'].values,
np.array([
'2020-01-02T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-06T00:00:00.000000000', '2020-01-03T00:00:00.000000000',
'2020-01-05T00:00:00.000000000', '2020-01-05T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['End Timestamp'].values,
np.array([
'2020-01-03T00:00:00.000000000', '2020-01-05T00:00:00.000000000',
'2020-01-06T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-06T00:00:00.000000000', '2020-01-06T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['Peak Value'].values,
np.array([
2., 3., 4., 2., 3., 3.
])
)
np.testing.assert_array_equal(
records_readable['Valley Value'].values,
np.array([
1., 1., 1., 1., 1., 1.
])
)
np.testing.assert_array_equal(
records_readable['End Value'].values,
np.array([
3., 4., 1., 3., 4., 2.
])
)
np.testing.assert_array_equal(
records_readable['Status'].values,
np.array([
'Recovered', 'Recovered', 'Active', 'Recovered', 'Recovered', 'Active'
])
)
def test_drawdown(self):
np.testing.assert_array_almost_equal(
drawdowns['a'].drawdown.values,
np.array([-0.5, -0.66666667, -0.75])
)
np.testing.assert_array_almost_equal(
drawdowns.drawdown.values,
np.array([-0.5, -0.66666667, -0.75, -0.5, -0.66666667, -0.66666667])
)
pd.testing.assert_frame_equal(
drawdowns.drawdown.to_pd(),
pd.DataFrame(
np.array([
[np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan],
[-0.5, np.nan, np.nan, np.nan],
[np.nan, -0.5, np.nan, np.nan],
[-0.66666669, np.nan, np.nan, np.nan],
[-0.75, -0.66666669, -0.66666669, np.nan]
]),
index=ts2.index,
columns=ts2.columns
)
)
def test_avg_drawdown(self):
assert drawdowns['a'].avg_drawdown() == -0.6388888888888888
pd.testing.assert_series_equal(
drawdowns.avg_drawdown(),
pd.Series(
np.array([-0.63888889, -0.58333333, -0.66666667, np.nan]),
index=wrapper.columns
).rename('avg_drawdown')
)
pd.testing.assert_series_equal(
drawdowns_grouped.avg_drawdown(),
pd.Series(
np.array([-0.6166666666666666, -0.6666666666666666]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('avg_drawdown')
)
def test_max_drawdown(self):
assert drawdowns['a'].max_drawdown() == -0.75
pd.testing.assert_series_equal(
drawdowns.max_drawdown(),
pd.Series(
np.array([-0.75, -0.66666667, -0.66666667, np.nan]),
index=wrapper.columns
).rename('max_drawdown')
)
pd.testing.assert_series_equal(
drawdowns_grouped.max_drawdown(),
pd.Series(
np.array([-0.75, -0.6666666666666666]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('max_drawdown')
)
def test_recovery_return(self):
np.testing.assert_array_almost_equal(
drawdowns['a'].recovery_return.values,
np.array([2., 3., 0.])
)
np.testing.assert_array_almost_equal(
drawdowns.recovery_return.values,
np.array([2., 3., 0., 2., 3., 1.])
)
pd.testing.assert_frame_equal(
drawdowns.recovery_return.to_pd(),
pd.DataFrame(
np.array([
[np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan],
[2.0, np.nan, np.nan, np.nan],
[np.nan, 2.0, np.nan, np.nan],
[3.0, np.nan, np.nan, np.nan],
[0.0, 3.0, 1.0, np.nan]
]),
index=ts2.index,
columns=ts2.columns
)
)
def test_avg_recovery_return(self):
assert drawdowns['a'].avg_recovery_return() == 1.6666666666666667
pd.testing.assert_series_equal(
drawdowns.avg_recovery_return(),
pd.Series(
np.array([1.6666666666666667, 2.5, 1.0, np.nan]),
index=wrapper.columns
).rename('avg_recovery_return')
)
pd.testing.assert_series_equal(
drawdowns_grouped.avg_recovery_return(),
pd.Series(
np.array([2.0, 1.0]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('avg_recovery_return')
)
def test_max_recovery_return(self):
assert drawdowns['a'].max_recovery_return() == 3.0
pd.testing.assert_series_equal(
drawdowns.max_recovery_return(),
pd.Series(
np.array([3.0, 3.0, 1.0, np.nan]),
index=wrapper.columns
).rename('max_recovery_return')
)
pd.testing.assert_series_equal(
drawdowns_grouped.max_recovery_return(),
pd.Series(
np.array([3.0, 1.0]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('max_recovery_return')
)
def test_duration(self):
np.testing.assert_array_almost_equal(
drawdowns['a'].duration.values,
np.array([1, 1, 1])
)
np.testing.assert_array_almost_equal(
drawdowns.duration.values,
np.array([1, 1, 1, 1, 1, 3])
)
def test_avg_duration(self):
assert drawdowns['a'].avg_duration() == pd.Timedelta('1 days 00:00:00')
pd.testing.assert_series_equal(
drawdowns.avg_duration(),
pd.Series(
np.array([86400000000000, 86400000000000, 259200000000000, 'NaT'], dtype='timedelta64[ns]'),
index=wrapper.columns
).rename('avg_duration')
)
pd.testing.assert_series_equal(
drawdowns_grouped.avg_duration(),
pd.Series(
np.array([86400000000000, 259200000000000], dtype='timedelta64[ns]'),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('avg_duration')
)
def test_max_duration(self):
assert drawdowns['a'].max_duration() == pd.Timedelta('1 days 00:00:00')
pd.testing.assert_series_equal(
drawdowns.max_duration(),
pd.Series(
np.array([86400000000000, 86400000000000, 259200000000000, 'NaT'], dtype='timedelta64[ns]'),
index=wrapper.columns
).rename('max_duration')
)
pd.testing.assert_series_equal(
drawdowns_grouped.max_duration(),
pd.Series(
np.array([86400000000000, 259200000000000], dtype='timedelta64[ns]'),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('max_duration')
)
def test_coverage(self):
assert drawdowns['a'].coverage() == 0.5
pd.testing.assert_series_equal(
drawdowns.coverage(),
pd.Series(
np.array([0.5, 0.3333333333333333, 0.5, np.nan]),
index=ts2.columns
).rename('coverage')
)
pd.testing.assert_series_equal(
drawdowns_grouped.coverage(),
pd.Series(
np.array([0.4166666666666667, 0.25]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('coverage')
)
def test_decline_duration(self):
np.testing.assert_array_almost_equal(
drawdowns['a'].decline_duration.values,
np.array([1., 1., 1.])
)
np.testing.assert_array_almost_equal(
drawdowns.decline_duration.values,
np.array([1., 1., 1., 1., 1., 2.])
)
def test_recovery_duration(self):
np.testing.assert_array_almost_equal(
drawdowns['a'].recovery_duration.values,
np.array([1, 1, 0])
)
np.testing.assert_array_almost_equal(
drawdowns.recovery_duration.values,
np.array([1, 1, 0, 1, 1, 1])
)
def test_recovery_duration_ratio(self):
np.testing.assert_array_almost_equal(
drawdowns['a'].recovery_duration_ratio.values,
np.array([1., 1., 0.])
)
np.testing.assert_array_almost_equal(
drawdowns.recovery_duration_ratio.values,
np.array([1., 1., 0., 1., 1., 0.5])
)
def test_active_records(self):
assert isinstance(drawdowns.active, vbt.Drawdowns)
assert drawdowns.active.wrapper == drawdowns.wrapper
record_arrays_close(
drawdowns['a'].active.values,
np.array([
(2, 0, 4, 5, 5, 5, 4., 1., 1., 0)
], dtype=drawdown_dt)
)
record_arrays_close(
drawdowns['a'].active.values,
drawdowns.active['a'].values
)
record_arrays_close(
drawdowns.active.values,
np.array([
(2, 0, 4, 5, 5, 5, 4.0, 1.0, 1.0, 0), (5, 2, 2, 3, 4, 5, 3.0, 1.0, 2.0, 0)
], dtype=drawdown_dt)
)
def test_recovered_records(self):
assert isinstance(drawdowns.recovered, vbt.Drawdowns)
assert drawdowns.recovered.wrapper == drawdowns.wrapper
record_arrays_close(
drawdowns['a'].recovered.values,
np.array([
(0, 0, 0, 1, 1, 2, 2.0, 1.0, 3.0, 1), (1, 0, 2, 3, 3, 4, 3.0, 1.0, 4.0, 1)
], dtype=drawdown_dt)
)
record_arrays_close(
drawdowns['a'].recovered.values,
drawdowns.recovered['a'].values
)
record_arrays_close(
drawdowns.recovered.values,
np.array([
(0, 0, 0, 1, 1, 2, 2.0, 1.0, 3.0, 1), (1, 0, 2, 3, 3, 4, 3.0, 1.0, 4.0, 1),
(3, 1, 1, 2, 2, 3, 2.0, 1.0, 3.0, 1), (4, 1, 3, 4, 4, 5, 3.0, 1.0, 4.0, 1)
], dtype=drawdown_dt)
)
def test_active_drawdown(self):
assert drawdowns['a'].active_drawdown() == -0.75
pd.testing.assert_series_equal(
drawdowns.active_drawdown(),
pd.Series(
np.array([-0.75, np.nan, -0.3333333333333333, np.nan]),
index=wrapper.columns
).rename('active_drawdown')
)
with pytest.raises(Exception):
drawdowns_grouped.active_drawdown()
def test_active_duration(self):
assert drawdowns['a'].active_duration() == np.timedelta64(86400000000000)
pd.testing.assert_series_equal(
drawdowns.active_duration(),
pd.Series(
np.array([86400000000000, 'NaT', 259200000000000, 'NaT'], dtype='timedelta64[ns]'),
index=wrapper.columns
).rename('active_duration')
)
with pytest.raises(Exception):
drawdowns_grouped.active_duration()
def test_active_recovery(self):
assert drawdowns['a'].active_recovery() == 0.
pd.testing.assert_series_equal(
drawdowns.active_recovery(),
pd.Series(
np.array([0., np.nan, 0.5, np.nan]),
index=wrapper.columns
).rename('active_recovery')
)
with pytest.raises(Exception):
drawdowns_grouped.active_recovery()
def test_active_recovery_return(self):
assert drawdowns['a'].active_recovery_return() == 0.
pd.testing.assert_series_equal(
drawdowns.active_recovery_return(),
pd.Series(
np.array([0., np.nan, 1., np.nan]),
index=wrapper.columns
).rename('active_recovery_return')
)
with pytest.raises(Exception):
drawdowns_grouped.active_recovery_return()
def test_active_recovery_duration(self):
assert drawdowns['a'].active_recovery_duration() == pd.Timedelta('0 days 00:00:00')
pd.testing.assert_series_equal(
drawdowns.active_recovery_duration(),
pd.Series(
np.array([0, 'NaT', 86400000000000, 'NaT'], dtype='timedelta64[ns]'),
index=wrapper.columns
).rename('active_recovery_duration')
)
with pytest.raises(Exception):
drawdowns_grouped.active_recovery_duration()
def test_stats(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Coverage [%]', 'Total Records',
'Total Recovered Drawdowns', 'Total Active Drawdowns',
'Active Drawdown [%]', 'Active Duration', 'Active Recovery [%]',
'Active Recovery Return [%]', 'Active Recovery Duration',
'Max Drawdown [%]', 'Avg Drawdown [%]', 'Max Drawdown Duration',
'Avg Drawdown Duration', 'Max Recovery Return [%]',
'Avg Recovery Return [%]', 'Max Recovery Duration',
'Avg Recovery Duration', 'Avg Recovery Duration Ratio'
], dtype='object')
pd.testing.assert_series_equal(
drawdowns.stats(),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-06 00:00:00'),
pd.Timedelta('6 days 00:00:00'), 44.444444444444436, 1.5, 1.0, 0.5,
54.166666666666664, pd.Timedelta('2 days 00:00:00'), 25.0, 50.0,
pd.Timedelta('0 days 12:00:00'), 66.66666666666666, 58.33333333333333,
pd.Timedelta('1 days 00:00:00'), pd.Timedelta('1 days 00:00:00'), 300.0, 250.0,
pd.Timedelta('1 days 00:00:00'), pd.Timedelta('1 days 00:00:00'), 1.0
],
index=stats_index,
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
drawdowns.stats(settings=dict(incl_active=True)),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-06 00:00:00'),
pd.Timedelta('6 days 00:00:00'), 44.444444444444436, 1.5, 1.0, 0.5,
54.166666666666664, pd.Timedelta('2 days 00:00:00'), 25.0, 50.0,
pd.Timedelta('0 days 12:00:00'), 69.44444444444444, 62.962962962962955,
pd.Timedelta('1 days 16:00:00'), pd.Timedelta('1 days 16:00:00'), 300.0, 250.0,
pd.Timedelta('1 days 00:00:00'), pd.Timedelta('1 days 00:00:00'), 1.0
],
index=stats_index,
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
drawdowns.stats(column='a'),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-06 00:00:00'),
pd.Timedelta('6 days 00:00:00'), 50.0, 3, 2, 1, 75.0, pd.Timedelta('1 days 00:00:00'),
0.0, 0.0, pd.Timedelta('0 days 00:00:00'), 66.66666666666666, 58.33333333333333,
pd.Timedelta('1 days 00:00:00'), pd.Timedelta('1 days 00:00:00'), 300.0, 250.0,
pd.Timedelta('1 days 00:00:00'), pd.Timedelta('1 days 00:00:00'), 1.0
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
drawdowns.stats(column='g1', group_by=group_by),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-06 00:00:00'),
pd.Timedelta('6 days 00:00:00'), 41.66666666666667, 5, 4, 1, 66.66666666666666,
58.33333333333333, pd.Timedelta('1 days 00:00:00'), pd.Timedelta('1 days 00:00:00'),
300.0, 250.0, pd.Timedelta('1 days 00:00:00'), pd.Timedelta('1 days 00:00:00'), 1.0
],
index=pd.Index([
'Start', 'End', 'Period', 'Coverage [%]', 'Total Records',
'Total Recovered Drawdowns', 'Total Active Drawdowns',
'Max Drawdown [%]', 'Avg Drawdown [%]', 'Max Drawdown Duration',
'Avg Drawdown Duration', 'Max Recovery Return [%]',
'Avg Recovery Return [%]', 'Max Recovery Duration',
'Avg Recovery Duration', 'Avg Recovery Duration Ratio'
], dtype='object'),
name='g1'
)
)
pd.testing.assert_series_equal(
drawdowns['c'].stats(),
drawdowns.stats(column='c')
)
pd.testing.assert_series_equal(
drawdowns['c'].stats(),
drawdowns.stats(column='c', group_by=False)
)
pd.testing.assert_series_equal(
drawdowns_grouped['g2'].stats(),
drawdowns_grouped.stats(column='g2')
)
pd.testing.assert_series_equal(
drawdowns_grouped['g2'].stats(),
drawdowns.stats(column='g2', group_by=group_by)
)
stats_df = drawdowns.stats(agg_func=None)
assert stats_df.shape == (4, 21)
pd.testing.assert_index_equal(stats_df.index, drawdowns.wrapper.columns)
pd.testing.assert_index_equal(stats_df.columns, stats_index)
# ############# orders.py ############# #
close = pd.Series([1, 2, 3, 4, 5, 6, 7, 8], index=[
datetime(2020, 1, 1),
datetime(2020, 1, 2),
datetime(2020, 1, 3),
datetime(2020, 1, 4),
datetime(2020, 1, 5),
datetime(2020, 1, 6),
datetime(2020, 1, 7),
datetime(2020, 1, 8)
]).vbt.tile(4, keys=['a', 'b', 'c', 'd'])
size = np.full(close.shape, np.nan, dtype=np.float_)
size[:, 0] = [1, 0.1, -1, -0.1, np.nan, 1, -1, 2]
size[:, 1] = [-1, -0.1, 1, 0.1, np.nan, -1, 1, -2]
size[:, 2] = [1, 0.1, -1, -0.1, np.nan, 1, -2, 2]
orders = vbt.Portfolio.from_orders(close, size, fees=0.01, freq='1 days').orders
orders_grouped = orders.regroup(group_by)
class TestOrders:
def test_mapped_fields(self):
for name in order_dt.names:
np.testing.assert_array_equal(
getattr(orders, name).values,
orders.values[name]
)
def test_close(self):
pd.testing.assert_frame_equal(
orders.close,
close
)
pd.testing.assert_series_equal(
orders['a'].close,
close['a']
)
pd.testing.assert_frame_equal(
orders_grouped['g1'].close,
close[['a', 'b']]
)
assert orders.replace(close=None)['a'].close is None
def test_records_readable(self):
records_readable = orders.records_readable
np.testing.assert_array_equal(
records_readable['Order Id'].values,
np.array([
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20
])
)
np.testing.assert_array_equal(
records_readable['Timestamp'].values,
np.array([
'2020-01-01T00:00:00.000000000', '2020-01-02T00:00:00.000000000',
'2020-01-03T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-06T00:00:00.000000000', '2020-01-07T00:00:00.000000000',
'2020-01-08T00:00:00.000000000', '2020-01-01T00:00:00.000000000',
'2020-01-02T00:00:00.000000000', '2020-01-03T00:00:00.000000000',
'2020-01-04T00:00:00.000000000', '2020-01-06T00:00:00.000000000',
'2020-01-07T00:00:00.000000000', '2020-01-08T00:00:00.000000000',
'2020-01-01T00:00:00.000000000', '2020-01-02T00:00:00.000000000',
'2020-01-03T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-06T00:00:00.000000000', '2020-01-07T00:00:00.000000000',
'2020-01-08T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['Column'].values,
np.array([
'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', 'b', 'b', 'b', 'b', 'b',
'b', 'c', 'c', 'c', 'c', 'c', 'c', 'c'
])
)
np.testing.assert_array_equal(
records_readable['Size'].values,
np.array([
1.0, 0.1, 1.0, 0.1, 1.0, 1.0, 2.0, 1.0, 0.1, 1.0, 0.1, 1.0, 1.0,
2.0, 1.0, 0.1, 1.0, 0.1, 1.0, 2.0, 2.0
])
)
np.testing.assert_array_equal(
records_readable['Price'].values,
np.array([
1.0, 2.0, 3.0, 4.0, 6.0, 7.0, 8.0, 1.0, 2.0, 3.0, 4.0, 6.0, 7.0,
8.0, 1.0, 2.0, 3.0, 4.0, 6.0, 7.0, 8.0
])
)
np.testing.assert_array_equal(
records_readable['Fees'].values,
np.array([
0.01, 0.002, 0.03, 0.004, 0.06, 0.07, 0.16, 0.01, 0.002, 0.03,
0.004, 0.06, 0.07, 0.16, 0.01, 0.002, 0.03, 0.004, 0.06, 0.14,
0.16
])
)
np.testing.assert_array_equal(
records_readable['Side'].values,
np.array([
'Buy', 'Buy', 'Sell', 'Sell', 'Buy', 'Sell', 'Buy', 'Sell', 'Sell',
'Buy', 'Buy', 'Sell', 'Buy', 'Sell', 'Buy', 'Buy', 'Sell', 'Sell',
'Buy', 'Sell', 'Buy'
])
)
def test_buy_records(self):
assert isinstance(orders.buy, vbt.Orders)
assert orders.buy.wrapper == orders.wrapper
record_arrays_close(
orders['a'].buy.values,
np.array([
(0, 0, 0, 1., 1., 0.01, 0), (1, 0, 1, 0.1, 2., 0.002, 0),
(4, 0, 5, 1., 6., 0.06, 0), (6, 0, 7, 2., 8., 0.16, 0)
], dtype=order_dt)
)
record_arrays_close(
orders['a'].buy.values,
orders.buy['a'].values
)
record_arrays_close(
orders.buy.values,
np.array([
(0, 0, 0, 1., 1., 0.01, 0), (1, 0, 1, 0.1, 2., 0.002, 0),
(4, 0, 5, 1., 6., 0.06, 0), (6, 0, 7, 2., 8., 0.16, 0),
(9, 1, 2, 1., 3., 0.03, 0), (10, 1, 3, 0.1, 4., 0.004, 0),
(12, 1, 6, 1., 7., 0.07, 0), (14, 2, 0, 1., 1., 0.01, 0),
(15, 2, 1, 0.1, 2., 0.002, 0), (18, 2, 5, 1., 6., 0.06, 0),
(20, 2, 7, 2., 8., 0.16, 0)
], dtype=order_dt)
)
def test_sell_records(self):
assert isinstance(orders.sell, vbt.Orders)
assert orders.sell.wrapper == orders.wrapper
record_arrays_close(
orders['a'].sell.values,
np.array([
(2, 0, 2, 1., 3., 0.03, 1), (3, 0, 3, 0.1, 4., 0.004, 1),
(5, 0, 6, 1., 7., 0.07, 1)
], dtype=order_dt)
)
record_arrays_close(
orders['a'].sell.values,
orders.sell['a'].values
)
record_arrays_close(
orders.sell.values,
np.array([
(2, 0, 2, 1., 3., 0.03, 1), (3, 0, 3, 0.1, 4., 0.004, 1),
(5, 0, 6, 1., 7., 0.07, 1), (7, 1, 0, 1., 1., 0.01, 1),
(8, 1, 1, 0.1, 2., 0.002, 1), (11, 1, 5, 1., 6., 0.06, 1),
(13, 1, 7, 2., 8., 0.16, 1), (16, 2, 2, 1., 3., 0.03, 1),
(17, 2, 3, 0.1, 4., 0.004, 1), (19, 2, 6, 2., 7., 0.14, 1)
], dtype=order_dt)
)
def test_stats(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Total Records', 'Total Buy Orders', 'Total Sell Orders',
'Min Size', 'Max Size', 'Avg Size', 'Avg Buy Size', 'Avg Sell Size',
'Avg Buy Price', 'Avg Sell Price', 'Total Fees', 'Min Fees', 'Max Fees',
'Avg Fees', 'Avg Buy Fees', 'Avg Sell Fees'
], dtype='object')
pd.testing.assert_series_equal(
orders.stats(),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-08 00:00:00'),
pd.Timedelta('8 days 00:00:00'), 5.25, 2.75, 2.5, 0.10000000000000002, 2.0,
0.9333333333333335, 0.9166666666666666, 0.9194444444444446, 4.388888888888889,
4.527777777777779, 0.26949999999999996, 0.002, 0.16, 0.051333333333333335,
0.050222222222222224, 0.050222222222222224
],
index=stats_index,
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
orders.stats(column='a'),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-08 00:00:00'),
pd.Timedelta('8 days 00:00:00'), 7, 4, 3, 0.1, 2.0, 0.8857142857142858,
1.025, 0.7000000000000001, 4.25, 4.666666666666667, 0.33599999999999997,
0.002, 0.16, 0.047999999999999994, 0.057999999999999996, 0.03466666666666667
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
orders.stats(column='g1', group_by=group_by),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-08 00:00:00'),
pd.Timedelta('8 days 00:00:00'), 14, 7, 7, 0.1, 2.0, 0.8857142857142858,
0.8857142857142856, 0.8857142857142858, 4.428571428571429, 4.428571428571429,
0.672, 0.002, 0.16, 0.048, 0.048, 0.047999999999999994
],
index=stats_index,
name='g1'
)
)
pd.testing.assert_series_equal(
orders['c'].stats(),
orders.stats(column='c')
)
pd.testing.assert_series_equal(
orders['c'].stats(),
orders.stats(column='c', group_by=False)
)
pd.testing.assert_series_equal(
orders_grouped['g2'].stats(),
orders_grouped.stats(column='g2')
)
pd.testing.assert_series_equal(
orders_grouped['g2'].stats(),
orders.stats(column='g2', group_by=group_by)
)
stats_df = orders.stats(agg_func=None)
assert stats_df.shape == (4, 19)
pd.testing.assert_index_equal(stats_df.index, orders.wrapper.columns)
pd.testing.assert_index_equal(stats_df.columns, stats_index)
# ############# trades.py ############# #
exit_trades = vbt.ExitTrades.from_orders(orders)
exit_trades_grouped = vbt.ExitTrades.from_orders(orders_grouped)
class TestExitTrades:
def test_mapped_fields(self):
for name in trade_dt.names:
if name == 'return':
np.testing.assert_array_equal(
getattr(exit_trades, 'returns').values,
exit_trades.values[name]
)
else:
np.testing.assert_array_equal(
getattr(exit_trades, name).values,
exit_trades.values[name]
)
def test_close(self):
pd.testing.assert_frame_equal(
exit_trades.close,
close
)
pd.testing.assert_series_equal(
exit_trades['a'].close,
close['a']
)
pd.testing.assert_frame_equal(
exit_trades_grouped['g1'].close,
close[['a', 'b']]
)
assert exit_trades.replace(close=None)['a'].close is None
def test_records_arr(self):
record_arrays_close(
exit_trades.values,
np.array([
(0, 0, 1., 0, 1.09090909, 0.01090909, 2, 3., 0.03, 1.86818182, 1.7125, 0, 1, 0),
(1, 0, 0.1, 0, 1.09090909, 0.00109091, 3, 4., 0.004, 0.28581818, 2.62, 0, 1, 0),
(2, 0, 1., 5, 6., 0.06, 6, 7., 0.07, 0.87, 0.145, 0, 1, 1),
(3, 0, 2., 7, 8., 0.16, 7, 8., 0., -0.16, -0.01, 0, 0, 2),
(4, 1, 1., 0, 1.09090909, 0.01090909, 2, 3., 0.03, -1.95, -1.7875, 1, 1, 3),
(5, 1, 0.1, 0, 1.09090909, 0.00109091, 3, 4., 0.004, -0.296, -2.71333333, 1, 1, 3),
(6, 1, 1., 5, 6., 0.06, 6, 7., 0.07, -1.13, -0.18833333, 1, 1, 4),
(7, 1, 2., 7, 8., 0.16, 7, 8., 0., -0.16, -0.01, 1, 0, 5),
(8, 2, 1., 0, 1.09090909, 0.01090909, 2, 3., 0.03, 1.86818182, 1.7125, 0, 1, 6),
(9, 2, 0.1, 0, 1.09090909, 0.00109091, 3, 4., 0.004, 0.28581818, 2.62, 0, 1, 6),
(10, 2, 1., 5, 6., 0.06, 6, 7., 0.07, 0.87, 0.145, 0, 1, 7),
(11, 2, 1., 6, 7., 0.07, 7, 8., 0.08, -1.15, -0.16428571, 1, 1, 8),
(12, 2, 1., 7, 8., 0.08, 7, 8., 0., -0.08, -0.01, 0, 0, 9)
], dtype=trade_dt)
)
reversed_col_orders = orders.replace(records_arr=np.concatenate((
orders.values[orders.values['col'] == 2],
orders.values[orders.values['col'] == 1],
orders.values[orders.values['col'] == 0]
)))
record_arrays_close(
vbt.ExitTrades.from_orders(reversed_col_orders).values,
exit_trades.values
)
def test_records_readable(self):
records_readable = exit_trades.records_readable
np.testing.assert_array_equal(
records_readable['Exit Trade Id'].values,
np.array([
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12
])
)
np.testing.assert_array_equal(
records_readable['Column'].values,
np.array([
'a', 'a', 'a', 'a', 'b', 'b', 'b', 'b', 'c', 'c', 'c', 'c', 'c'
])
)
np.testing.assert_array_equal(
records_readable['Size'].values,
np.array([
1.0, 0.10000000000000009, 1.0, 2.0, 1.0, 0.10000000000000009, 1.0,
2.0, 1.0, 0.10000000000000009, 1.0, 1.0, 1.0
])
)
np.testing.assert_array_equal(
records_readable['Entry Timestamp'].values,
np.array([
'2020-01-01T00:00:00.000000000', '2020-01-01T00:00:00.000000000',
'2020-01-06T00:00:00.000000000', '2020-01-08T00:00:00.000000000',
'2020-01-01T00:00:00.000000000', '2020-01-01T00:00:00.000000000',
'2020-01-06T00:00:00.000000000', '2020-01-08T00:00:00.000000000',
'2020-01-01T00:00:00.000000000', '2020-01-01T00:00:00.000000000',
'2020-01-06T00:00:00.000000000', '2020-01-07T00:00:00.000000000',
'2020-01-08T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['Avg Entry Price'].values,
np.array([
1.0909090909090908, 1.0909090909090908, 6.0, 8.0,
1.0909090909090908, 1.0909090909090908, 6.0, 8.0,
1.0909090909090908, 1.0909090909090908, 6.0, 7.0, 8.0
])
)
np.testing.assert_array_equal(
records_readable['Entry Fees'].values,
np.array([
0.010909090909090908, 0.0010909090909090918, 0.06, 0.16,
0.010909090909090908, 0.0010909090909090918, 0.06, 0.16,
0.010909090909090908, 0.0010909090909090918, 0.06, 0.07, 0.08
])
)
np.testing.assert_array_equal(
records_readable['Exit Timestamp'].values,
np.array([
'2020-01-03T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-07T00:00:00.000000000', '2020-01-08T00:00:00.000000000',
'2020-01-03T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-07T00:00:00.000000000', '2020-01-08T00:00:00.000000000',
'2020-01-03T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-07T00:00:00.000000000', '2020-01-08T00:00:00.000000000',
'2020-01-08T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['Avg Exit Price'].values,
np.array([
3.0, 4.0, 7.0, 8.0, 3.0, 4.0, 7.0, 8.0, 3.0, 4.0, 7.0, 8.0, 8.0
])
)
np.testing.assert_array_equal(
records_readable['Exit Fees'].values,
np.array([
0.03, 0.004, 0.07, 0.0, 0.03, 0.004, 0.07, 0.0, 0.03, 0.004, 0.07, 0.08, 0.0
])
)
np.testing.assert_array_equal(
records_readable['PnL'].values,
np.array([
1.8681818181818182, 0.2858181818181821, 0.8699999999999999, -0.16,
-1.9500000000000002, -0.29600000000000026, -1.1300000000000001,
-0.16, 1.8681818181818182, 0.2858181818181821, 0.8699999999999999,
-1.1500000000000001, -0.08
])
)
np.testing.assert_array_equal(
records_readable['Return'].values,
np.array([
1.7125000000000001, 2.62, 0.145, -0.01, -1.7875000000000003,
-2.7133333333333334, -0.18833333333333335, -0.01,
1.7125000000000001, 2.62, 0.145, -0.1642857142857143, -0.01
])
)
np.testing.assert_array_equal(
records_readable['Direction'].values,
np.array([
'Long', 'Long', 'Long', 'Long', 'Short', 'Short', 'Short',
'Short', 'Long', 'Long', 'Long', 'Short', 'Long'
])
)
np.testing.assert_array_equal(
records_readable['Status'].values,
np.array([
'Closed', 'Closed', 'Closed', 'Open', 'Closed', 'Closed', 'Closed',
'Open', 'Closed', 'Closed', 'Closed', 'Closed', 'Open'
])
)
np.testing.assert_array_equal(
records_readable['Position Id'].values,
np.array([
0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9
])
)
def test_duration(self):
np.testing.assert_array_almost_equal(
exit_trades['a'].duration.values,
np.array([2, 3, 1, 1])
)
np.testing.assert_array_almost_equal(
exit_trades.duration.values,
np.array([2, 3, 1, 1, 2, 3, 1, 1, 2, 3, 1, 1, 1])
)
def test_winning_records(self):
assert isinstance(exit_trades.winning, vbt.ExitTrades)
assert exit_trades.winning.wrapper == exit_trades.wrapper
record_arrays_close(
exit_trades['a'].winning.values,
np.array([
(0, 0, 1., 0, 1.09090909, 0.01090909, 2, 3., 0.03, 1.86818182, 1.7125, 0, 1, 0),
(1, 0, 0.1, 0, 1.09090909, 0.00109091, 3, 4., 0.004, 0.28581818, 2.62, 0, 1, 0),
(2, 0, 1., 5, 6., 0.06, 6, 7., 0.07, 0.87, 0.145, 0, 1, 1)
], dtype=trade_dt)
)
record_arrays_close(
exit_trades['a'].winning.values,
exit_trades.winning['a'].values
)
record_arrays_close(
exit_trades.winning.values,
np.array([
(0, 0, 1., 0, 1.09090909, 0.01090909, 2, 3., 0.03, 1.86818182, 1.7125, 0, 1, 0),
(1, 0, 0.1, 0, 1.09090909, 0.00109091, 3, 4., 0.004, 0.28581818, 2.62, 0, 1, 0),
(2, 0, 1., 5, 6., 0.06, 6, 7., 0.07, 0.87, 0.145, 0, 1, 1),
(8, 2, 1., 0, 1.09090909, 0.01090909, 2, 3., 0.03, 1.86818182, 1.7125, 0, 1, 6),
(9, 2, 0.1, 0, 1.09090909, 0.00109091, 3, 4., 0.004, 0.28581818, 2.62, 0, 1, 6),
(10, 2, 1., 5, 6., 0.06, 6, 7., 0.07, 0.87, 0.145, 0, 1, 7)
], dtype=trade_dt)
)
def test_losing_records(self):
assert isinstance(exit_trades.losing, vbt.ExitTrades)
assert exit_trades.losing.wrapper == exit_trades.wrapper
record_arrays_close(
exit_trades['a'].losing.values,
np.array([
(3, 0, 2., 7, 8., 0.16, 7, 8., 0., -0.16, -0.01, 0, 0, 2)
], dtype=trade_dt)
)
record_arrays_close(
exit_trades['a'].losing.values,
exit_trades.losing['a'].values
)
record_arrays_close(
exit_trades.losing.values,
np.array([
(3, 0, 2., 7, 8., 0.16, 7, 8., 0., -0.16, -0.01, 0, 0, 2),
(4, 1, 1., 0, 1.09090909, 0.01090909, 2, 3., 0.03, -1.95, -1.7875, 1, 1, 3),
(5, 1, 0.1, 0, 1.09090909, 0.00109091, 3, 4., 0.004, -0.296, -2.71333333, 1, 1, 3),
(6, 1, 1., 5, 6., 0.06, 6, 7., 0.07, -1.13, -0.18833333, 1, 1, 4),
(7, 1, 2., 7, 8., 0.16, 7, 8., 0., -0.16, -0.01, 1, 0, 5),
(11, 2, 1., 6, 7., 0.07, 7, 8., 0.08, -1.15, -0.16428571, 1, 1, 8),
(12, 2, 1., 7, 8., 0.08, 7, 8., 0., -0.08, -0.01, 0, 0, 9)
], dtype=trade_dt)
)
def test_win_rate(self):
assert exit_trades['a'].win_rate() == 0.75
pd.testing.assert_series_equal(
exit_trades.win_rate(),
pd.Series(
np.array([0.75, 0., 0.6, np.nan]),
index=close.columns
).rename('win_rate')
)
pd.testing.assert_series_equal(
exit_trades_grouped.win_rate(),
pd.Series(
np.array([0.375, 0.6]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('win_rate')
)
def test_winning_streak(self):
np.testing.assert_array_almost_equal(
exit_trades['a'].winning_streak.values,
np.array([1, 2, 3, 0])
)
np.testing.assert_array_almost_equal(
exit_trades.winning_streak.values,
np.array([1, 2, 3, 0, 0, 0, 0, 0, 1, 2, 3, 0, 0])
)
def test_losing_streak(self):
np.testing.assert_array_almost_equal(
exit_trades['a'].losing_streak.values,
np.array([0, 0, 0, 1])
)
np.testing.assert_array_almost_equal(
exit_trades.losing_streak.values,
np.array([0, 0, 0, 1, 1, 2, 3, 4, 0, 0, 0, 1, 2])
)
def test_profit_factor(self):
assert exit_trades['a'].profit_factor() == 18.9
pd.testing.assert_series_equal(
exit_trades.profit_factor(),
pd.Series(
np.array([18.9, 0., 2.45853659, np.nan]),
index=ts2.columns
).rename('profit_factor')
)
pd.testing.assert_series_equal(
exit_trades_grouped.profit_factor(),
pd.Series(
np.array([0.81818182, 2.45853659]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('profit_factor')
)
def test_expectancy(self):
assert exit_trades['a'].expectancy() == 0.716
pd.testing.assert_series_equal(
exit_trades.expectancy(),
pd.Series(
np.array([0.716, -0.884, 0.3588, np.nan]),
index=ts2.columns
).rename('expectancy')
)
pd.testing.assert_series_equal(
exit_trades_grouped.expectancy(),
pd.Series(
np.array([-0.084, 0.3588]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('expectancy')
)
def test_sqn(self):
assert exit_trades['a'].sqn() == 1.634155521947584
pd.testing.assert_series_equal(
exit_trades.sqn(),
pd.Series(
np.array([1.63415552, -2.13007307, 0.71660403, np.nan]),
index=ts2.columns
).rename('sqn')
)
pd.testing.assert_series_equal(
exit_trades_grouped.sqn(),
pd.Series(
np.array([-0.20404671, 0.71660403]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('sqn')
)
def test_long_records(self):
assert isinstance(exit_trades.long, vbt.ExitTrades)
assert exit_trades.long.wrapper == exit_trades.wrapper
record_arrays_close(
exit_trades['a'].long.values,
np.array([
(0, 0, 1., 0, 1.09090909, 0.01090909, 2, 3., 0.03, 1.86818182, 1.7125, 0, 1, 0),
(1, 0, 0.1, 0, 1.09090909, 0.00109091, 3, 4., 0.004, 0.28581818, 2.62, 0, 1, 0),
(2, 0, 1., 5, 6., 0.06, 6, 7., 0.07, 0.87, 0.145, 0, 1, 1),
(3, 0, 2., 7, 8., 0.16, 7, 8., 0., -0.16, -0.01, 0, 0, 2)
], dtype=trade_dt)
)
record_arrays_close(
exit_trades['a'].long.values,
exit_trades.long['a'].values
)
record_arrays_close(
exit_trades.long.values,
np.array([
(0, 0, 1., 0, 1.09090909, 0.01090909, 2, 3., 0.03, 1.86818182, 1.7125, 0, 1, 0),
(1, 0, 0.1, 0, 1.09090909, 0.00109091, 3, 4., 0.004, 0.28581818, 2.62, 0, 1, 0),
(2, 0, 1., 5, 6., 0.06, 6, 7., 0.07, 0.87, 0.145, 0, 1, 1),
(3, 0, 2., 7, 8., 0.16, 7, 8., 0., -0.16, -0.01, 0, 0, 2),
(8, 2, 1., 0, 1.09090909, 0.01090909, 2, 3., 0.03, 1.86818182, 1.7125, 0, 1, 6),
(9, 2, 0.1, 0, 1.09090909, 0.00109091, 3, 4., 0.004, 0.28581818, 2.62, 0, 1, 6),
(10, 2, 1., 5, 6., 0.06, 6, 7., 0.07, 0.87, 0.145, 0, 1, 7),
(12, 2, 1., 7, 8., 0.08, 7, 8., 0., -0.08, -0.01, 0, 0, 9)
], dtype=trade_dt)
)
def test_short_records(self):
assert isinstance(exit_trades.short, vbt.ExitTrades)
assert exit_trades.short.wrapper == exit_trades.wrapper
record_arrays_close(
exit_trades['a'].short.values,
np.array([], dtype=trade_dt)
)
record_arrays_close(
exit_trades['a'].short.values,
exit_trades.short['a'].values
)
record_arrays_close(
exit_trades.short.values,
np.array([
(4, 1, 1., 0, 1.09090909, 0.01090909, 2, 3., 0.03, -1.95, -1.7875, 1, 1, 3),
(5, 1, 0.1, 0, 1.09090909, 0.00109091, 3, 4., 0.004, -0.296, -2.71333333, 1, 1, 3),
(6, 1, 1., 5, 6., 0.06, 6, 7., 0.07, -1.13, -0.18833333, 1, 1, 4),
(7, 1, 2., 7, 8., 0.16, 7, 8., 0., -0.16, -0.01, 1, 0, 5),
(11, 2, 1., 6, 7., 0.07, 7, 8., 0.08, -1.15, -0.16428571, 1, 1, 8)
], dtype=trade_dt)
)
def test_open_records(self):
assert isinstance(exit_trades.open, vbt.ExitTrades)
assert exit_trades.open.wrapper == exit_trades.wrapper
record_arrays_close(
exit_trades['a'].open.values,
np.array([
(3, 0, 2., 7, 8., 0.16, 7, 8., 0., -0.16, -0.01, 0, 0, 2)
], dtype=trade_dt)
)
record_arrays_close(
exit_trades['a'].open.values,
exit_trades.open['a'].values
)
record_arrays_close(
exit_trades.open.values,
np.array([
(3, 0, 2., 7, 8., 0.16, 7, 8., 0., -0.16, -0.01, 0, 0, 2),
(7, 1, 2., 7, 8., 0.16, 7, 8., 0., -0.16, -0.01, 1, 0, 5),
(12, 2, 1., 7, 8., 0.08, 7, 8., 0., -0.08, -0.01, 0, 0, 9)
], dtype=trade_dt)
)
def test_closed_records(self):
assert isinstance(exit_trades.closed, vbt.ExitTrades)
assert exit_trades.closed.wrapper == exit_trades.wrapper
record_arrays_close(
exit_trades['a'].closed.values,
np.array([
(0, 0, 1., 0, 1.09090909, 0.01090909, 2, 3., 0.03, 1.86818182, 1.7125, 0, 1, 0),
(1, 0, 0.1, 0, 1.09090909, 0.00109091, 3, 4., 0.004, 0.28581818, 2.62, 0, 1, 0),
(2, 0, 1., 5, 6., 0.06, 6, 7., 0.07, 0.87, 0.145, 0, 1, 1)
], dtype=trade_dt)
)
record_arrays_close(
exit_trades['a'].closed.values,
exit_trades.closed['a'].values
)
record_arrays_close(
exit_trades.closed.values,
np.array([
(0, 0, 1., 0, 1.09090909, 0.01090909, 2, 3., 0.03, 1.86818182, 1.7125, 0, 1, 0),
(1, 0, 0.1, 0, 1.09090909, 0.00109091, 3, 4., 0.004, 0.28581818, 2.62, 0, 1, 0),
(2, 0, 1., 5, 6., 0.06, 6, 7., 0.07, 0.87, 0.145, 0, 1, 1),
(4, 1, 1., 0, 1.09090909, 0.01090909, 2, 3., 0.03, -1.95, -1.7875, 1, 1, 3),
(5, 1, 0.1, 0, 1.09090909, 0.00109091, 3, 4., 0.004, -0.296, -2.71333333, 1, 1, 3),
(6, 1, 1., 5, 6., 0.06, 6, 7., 0.07, -1.13, -0.18833333, 1, 1, 4),
(8, 2, 1., 0, 1.09090909, 0.01090909, 2, 3., 0.03, 1.86818182, 1.7125, 0, 1, 6),
(9, 2, 0.1, 0, 1.09090909, 0.00109091, 3, 4., 0.004, 0.28581818, 2.62, 0, 1, 6),
(10, 2, 1., 5, 6., 0.06, 6, 7., 0.07, 0.87, 0.145, 0, 1, 7),
(11, 2, 1., 6, 7., 0.07, 7, 8., 0.08, -1.15, -0.16428571, 1, 1, 8)
], dtype=trade_dt)
)
def test_stats(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'First Trade Start', 'Last Trade End',
'Coverage', 'Overlap Coverage', 'Total Records', 'Total Long Trades',
'Total Short Trades', 'Total Closed Trades', 'Total Open Trades',
'Open Trade PnL', 'Win Rate [%]', 'Max Win Streak', 'Max Loss Streak',
'Best Trade [%]', 'Worst Trade [%]', 'Avg Winning Trade [%]',
'Avg Losing Trade [%]', 'Avg Winning Trade Duration',
'Avg Losing Trade Duration', 'Profit Factor', 'Expectancy', 'SQN'
], dtype='object')
pd.testing.assert_series_equal(
exit_trades.stats(),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-08 00:00:00'),
pd.Timedelta('8 days 00:00:00'), pd.Timestamp('2020-01-01 00:00:00'),
pd.Timestamp('2020-01-08 00:00:00'), pd.Timedelta('5 days 08:00:00'),
pd.Timedelta('2 days 00:00:00'), 3.25, 2.0, 1.25, 2.5, 0.75, -0.1,
58.333333333333336, 2.0, 1.3333333333333333, 168.38888888888889,
-91.08730158730158, 149.25, -86.3670634920635, pd.Timedelta('2 days 00:00:00'),
pd.Timedelta('1 days 12:00:00'), np.inf, 0.11705555555555548, 0.18931590012681135
],
index=stats_index,
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
exit_trades.stats(settings=dict(incl_open=True)),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-08 00:00:00'),
pd.Timedelta('8 days 00:00:00'), pd.Timestamp('2020-01-01 00:00:00'),
pd.Timestamp('2020-01-08 00:00:00'), pd.Timedelta('5 days 08:00:00'),
pd.Timedelta('2 days 00:00:00'), 3.25, 2.0, 1.25, 2.5, 0.75, -0.1,
58.333333333333336, 2.0, 2.3333333333333335, 174.33333333333334,
-96.25396825396825, 149.25, -42.39781746031746, pd.Timedelta('2 days 00:00:00'),
pd.Timedelta('1 days 06:00:00'), 7.11951219512195, 0.06359999999999993, 0.07356215977397455
],
index=stats_index,
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
exit_trades.stats(column='a'),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-08 00:00:00'),
pd.Timedelta('8 days 00:00:00'), pd.Timestamp('2020-01-01 00:00:00'),
pd.Timestamp('2020-01-08 00:00:00'), pd.Timedelta('5 days 00:00:00'),
pd.Timedelta('2 days 00:00:00'), 4, 4, 0, 3, 1, -0.16, 100.0, 3, 0,
262.0, 14.499999999999998, 149.25, np.nan, pd.Timedelta('2 days 00:00:00'),
pd.NaT, np.inf, 1.008, 2.181955050824476
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
exit_trades.stats(column='g1', group_by=group_by),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-08 00:00:00'),
pd.Timedelta('8 days 00:00:00'), | pd.Timestamp('2020-01-01 00:00:00') | pandas.Timestamp |
#!/bin/env python
# -*- coding: utf-8 -*-
#
# Created on 5/23/19
#
# Created for py_bacy
#
# @author: <NAME>, <EMAIL>
#
# Copyright (C) {2019} {<NAME>}
#
# System modules
import logging
import os
import glob
import time
import warnings
import abc
from typing import Any
import gc
# External modules
import numpy as np
import pandas as pd
import xarray as xr
from pytassim.assimilation.filter import DistributedLETKFUncorr
from pytassim.localization.gaspari_cohn import GaspariCohn
# Internal modules
from .logger_mixin import LoggerMixin
from .intf_pytassim import obs_op, utils
from .model import ModelModule
from .utilities import check_if_folder_exist_create
logger = logging.getLogger(__name__)
class PyTassimModule(ModelModule, LoggerMixin, abc.ABC):
def __init__(self, name, parent=None, config=None):
super().__init__(name, parent, config)
self.assimilation = None
@property
@abc.abstractmethod
def module(self):
pass
def init_assimilation(self, start_time, end_time, parent_model,
cycle_config):
if 'smoother' not in self.config:
self.config['smoother'] = True
localization = GaspariCohn(
np.array(self.config['loc_radius']),
dist_func=self.module.distance_func
)
letkf = DistributedLETKFUncorr(
client=cycle_config['CLUSTER']['client'],
chunksize=self.config['chunksize'],
localization=localization, inf_factor=self.config['inf_factor'],
smoother=self.config['smoother']
)
return letkf
def run(self, start_time, end_time, parent_model, cycle_config):
self.assimilation = self.init_assimilation(
start_time, end_time, parent_model, cycle_config
)
self.create_symbolic(start_time, end_time, parent_model,
cycle_config)
self.assimilate_data(start_time, end_time, parent_model,
cycle_config)
self.clean_up(start_time, end_time, parent_model, cycle_config)
def clean_up(self, start_time, end_time, parent_model, cycle_config):
del self.assimilation
cycle_config['CLUSTER']['client'].restart()
cycle_config['CLUSTER']['cluster'].scale(0)
gc.collect()
def disturb_obs(self, ds_obs: xr.Dataset) -> xr.Dataset:
if not self.config['obs']['stochastic']:
logger.info('No stochastic disturbance of observations')
return ds_obs
if ds_obs.obs.correlated:
raise ValueError('Observations can be only disturbed for '
'uncorrelated observations!')
ds_obs = ds_obs.copy()
obs_stddev = np.sqrt(ds_obs['covariance'])
drawn_noise = np.random.normal(
scale=obs_stddev, size=ds_obs['observations'].shape
)
ds_obs['observations'] = ds_obs['observations'] + drawn_noise
return ds_obs
def localize_obs(
self,
ds_obs: xr.Dataset,
analysis_time: Any
) -> xr.Dataset:
if self.config['obs']['path_loc_mat'] is None:
logger.info('No temporal localization of observations')
return ds_obs
ds_obs = ds_obs.copy()
loc_mat = xr.open_dataset(self.config['obs']['path_loc_mat'])
loc_mat = loc_mat['localization']
sel_loc_mat = loc_mat.sel(analysis_time=analysis_time)
sel_loc_mat['timedelta'] = sel_loc_mat['timedelta'] + pd.to_datetime(analysis_time)
sel_loc_mat = sel_loc_mat.rename({'timedelta': 'time'})
time_intersection = sel_loc_mat.indexes['time'].intersection(
ds_obs.indexes['time']
)
sel_loc_mat = sel_loc_mat.sel(time=time_intersection)
ds_obs = ds_obs.sel(time=time_intersection)
non_zero_weight = sel_loc_mat != 0
ds_obs = ds_obs.isel(time=non_zero_weight)
sel_loc_mat = sel_loc_mat.isel(time=non_zero_weight)
print('Temporal weights:', sel_loc_mat)
cov_matrix = ds_obs['covariance'].expand_dims(time=ds_obs.time)
cov_matrix = cov_matrix / sel_loc_mat
ds_obs['covariance'] = cov_matrix
logger.info('Observations are temporal localized')
return ds_obs
def assimilate_data(self, start_time, analysis_time, parent_model,
cycle_config):
cycle_config['CLUSTER']['cluster'].scale(
cycle_config['CLUSTER']['n_workers']
)
run_dir = self.get_run_dir(start_time, cycle_config)
ensemble_members = cycle_config['ENSEMBLE']['size']
util_dir = self.config['obs']['utils_path']
file_path_obs = self.config['obs']['path']
fg_files = self.config['obs']['fg_files']
assim_vars = self.config['assim_vars']
bg_files = self.config['bg_files']
obs_timedelta = [
| pd.to_timedelta(self.config['obs']['td_start']) | pandas.to_timedelta |
#+ 数据科学常用工具
import matplotlib as mpl
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.style as style
import seaborn as sns
from sklearn.preprocessing import PowerTransformer
import category_encoders as ce
from sklearn.model_selection import StratifiedKFold, KFold
from joblib import Parallel, delayed
import multiprocessing
from scipy import stats
from pandas.api.types import is_datetime64_any_dtype as is_datetime
from pandas.api.types import is_categorical_dtype
from sklearn.model_selection import KFold
from sklearn.model_selection._split import _BaseKFold, indexable, _num_samples
from sklearn.utils.validation import _deprecate_positional_args
from openpyxl import load_workbook, Workbook
import xlrd
# ---------------------------------
# 可视化工具
# ---------------------------------
def set_format():
pd.set_eng_float_format(accuracy=2, use_eng_prefix=False)
def set_matplotlib():
plt.rcParams['font.sans-serif'] = ['Arial Unicode MS']
mpl.rcParams['figure.dpi'] = 100
# style.available
# style.use('ggplot')
def whiteboard(row=1, col=1, dpi=100):
fig, ax = plt.subplots(row, col, figsize=(6.4, 4.8), dpi=dpi)
return (fig, ax)
def count_plot(df, var, type='bar'):
fig, ax = whiteboard()
counts = df[var].value_counts()
if type == 'bar':
counts.head(15).sort_values().plot.barh(ax=ax)
else:
counts.sort_index().plot.line(ax=ax)
ax.set_xlabel('count')
ax.set_ylabel('value')
ax.set_title(var)
def kde_by_target(df_raw, var, target, cut=0.99):
if cut is not None:
upper = df_raw[var].quantile(cut)
df = df_raw[df_raw[var] <= upper]
else:
df = df_raw
# 大多数的变量都有很长的尾部,为了看得清楚做截尾
fig, ax = whiteboard()
for y in df[target].unique():
sub = df[df[target] == y]
sns.distplot(sub[var], hist=False, ax=ax, label=str(y),
kde_kws={"lw": 0.7})
ax.legend()
def series_plot(series, xdate=True, xlabel='date', ylabel='', title=''):
fig, ax = whiteboard()
series.plot(ax=ax, linewidth=1.0)
if xdate:
fig.autofmt_xdate()
ax.set_ylabel(ylabel)
ax.set_xlabel(xlabel)
ax.set_title(title)
def sns_sparse_xticks(plot_, freq):
for ind, label in enumerate(plot_.get_xticklabels()):
if ind % freq == 0: # every 10th label is kept
label.set_visible(True)
else:
label.set_visible(False)
# ---------------------------------
# 描述统计
# ---------------------------------
def count_na(df):
missing_dict = df.isna().sum()
return missing_dict[missing_dict > 0]
def groupby_info(df, col, target):
return df.groupby(target)[col].agg(mean=np.mean, median=np.median, mode=lambda x: stats.mode(x)[0][0], max=np.max,
min=np.min, std=np.std)
# ---------------------------------
# 特征清洗
# ---------------------------------
# 缺失值
def replace(x, from_, to_):
tmp = x.copy()
tmp[tmp == from_] = to_
return tmp
def group_fillna(df, col, target, method='mean'):
if method == 'mean':
result = df.groupby([target], sort=False)[col].apply(lambda x: x.fillna(x.mean()))
elif method == 'median':
result = df.groupby([target], sort=False)[col].apply(lambda x: x.fillna(x.median()))
return result
# 异常值
def windsorize(series, upper, lower):
return series.clip(lower=lower, upper=upper)
def cap(x, extreme=5):
# 生成分位数
width = (x.quantile(0.75) - x.quantile(0.25)) / 2
median = x.median()
return x.clip(median - extreme * width, median + extreme * width)
# ---------------------------------
# 单特征变换
# ---------------------------------
def box_cox(x_train, x_test=None):
bc = PowerTransformer(method='box-cox')
bc = bc.fit(x_train)
x_train_bc = bc.transform(x_train)
if x_test is not None:
x_test_bc = bc.transform(x_test)
else:
x_test_bc = None
return (x_train_bc, x_test_bc)
def standardize(x_train, x_test=None, cut=None):
"""
cut: 截断cut倍标准差
"""
avg, var = x_train.mean(), x_train.std()
x_train_s = (x_train - avg) / var
if cut is not None:
x_train_s = windsorize(x_train_s, cut, -cut)
if x_test is not None:
x_test_s = (x_test - avg) / var
if cut is not None:
x_test_s = windsorize(x_test_s, cut, -cut)
else:
x_test_s = None
return (x_train_s, x_test_s)
def bin(x, n_scatter=10):
"""
连续变量分箱
"""
result = | pd.qcut(x, n_scatter) | pandas.qcut |
from django.http import JsonResponse
import pandas as pd
import numpy as np
import json
from django.views.decorators.csrf import csrf_protect
import os # os.getcwd()
df_comPreRequisitos = pd.read_csv('data_science/disciplinas_prerequisitosnome.csv')
df_turmas2015 = pd.read_csv('data_science/turmas_new.csv')
def dataFrameToJson(dataFrame):
dataFrame = dataFrame.to_json(orient='records')
dataFrame = json.loads(dataFrame)
return dataFrame
# Retorna as disciplinas e seus respectivos pre-requisito informando o periodo
@csrf_protect
def disciplinasPeriodo(request):
periodo = int(request.GET.get('periodo'))
df_retorno = df_comPreRequisitos[df_comPreRequisitos['periodo']==periodo]
if(periodo == 0):
df_retorno = df_comPreRequisitos['nome']
return JsonResponse({'results':dataFrameToJson(df_retorno)})
# Retorna o desvio padrão da disciplina
@csrf_protect
def desvioPadrao(request):
disciplina = request.GET.get('disciplina')
data = df_turmas2015[df_turmas2015['nome'] == disciplina].media_final
media = data.mean()
soma = 0
soma += ((data.map(int) - media) ** 2).sum()
variancia = soma / (len(data) - 1)
desvio = variancia ** 0.5
return JsonResponse({'Desvio padrão': desvio})
# Retorna a média da disciplina
@csrf_protect
def media(request):
disciplina = request.GET.get('disciplina')
data = df_turmas2015[df_turmas2015['nome'] == disciplina].media_final
media = data.mean()
return JsonResponse({'Media': media})
# Retorna as notas da disciplima
@csrf_protect
def notas(request):
disciplina = request.GET.get('disciplina')
colunas = ['discente', 'id_turma', 'media_final', 'nome']
df = df_turmas2015[colunas].drop_duplicates()
if(disciplina==""):
notas = df[['nome', 'media_final']]
else:
notas = df[df['nome'] == disciplina].media_final
return JsonResponse({'Notas': dataFrameToJson(notas)})
# Retorna as notas da disciplima
@csrf_protect
def notasFiltro(request):
disciplina = request.GET.get('disciplina')
filtro = int(request.GET.get('filtro'))
notas = df_turmas2015[df_turmas2015['nome'] == disciplina].media_final
notas = notas[notas>= filtro]
return JsonResponse({'Notas': dataFrameToJson(notas)})
# Retorna (int) correlação entre duas disciplinas informadas
def simpleCorrelacao(discA,discB):
dataFrame = df_turmas2015
dataFrameA = dataFrame[dataFrame['nome'] == discA]
dataFrameB = dataFrame[dataFrame['nome'] == discB]
# Aprovados no DiscA
dataFrameA = dataFrameA[dataFrameA['descricao'].str.contains('APROVADO')]
series_aprovados = dataFrameA.discente.unique()
df_finalB = dataFrameB[dataFrameB.discente.isin(series_aprovados)]
df_finalB = df_finalB.groupby('discente').periodoano.min().reset_index()
df_finalB = pd.merge(df_finalB, dataFrameB, on=["discente","periodoano"])
colunas = ['discente', 'media_final', 'nome']
dataFrameA = dataFrameA[colunas]
df_finalB = df_finalB[colunas]
conc = pd.concat([dataFrameA, df_finalB])
df = pd.crosstab(conc.discente, conc.nome, conc.media_final, aggfunc=np.mean)
df = df.dropna()
df_correlacao = df.corr()
# return JsonResponse({'results': df_correlacao[discA][discB] })
return df_correlacao[discA][discB]
# Calcula a correlação de uma lista de disciplinas
@csrf_protect
def correlacao(request):
args = request.GET.get('lista')
lista_disciplinas = args.split(',')
# matriz de zeros
w, h = len(lista_disciplinas), len(lista_disciplinas)
content = [[0] * w for i in range(h)]
correlacoes = np.array(content, dtype='f')
# calculo das relacões sem repetição
for i in range(0, len(lista_disciplinas)):
for j in range(0, len(lista_disciplinas)):
if i == j:
correlacoes[i][j] = 1
if i < j:
correlacoes[i][j] = simpleCorrelacao(lista_disciplinas[i], lista_disciplinas[j])
df_retorno = pd.DataFrame(correlacoes, columns=lista_disciplinas)
# df_retorno = df_retorno.set_axis(lista_disciplinas, axis=0, inplace=False)
return JsonResponse({'results':dataFrameToJson(df_retorno)})
# Retorna
@csrf_protect
def coordenadasParalelas(request):
args = request.GET.get('lista')
lista_disciplinas = args.split(',')
dataFrame = df_turmas2015
# Contando reprovações de media_final notnull
df_contagemRep = dataFrame[dataFrame['descricao'].str.contains('REPROVADO')]
df_contagemRep = df_contagemRep[df_contagemRep.media_final.notnull()]
colunas_1 = ['descricao', 'discente', 'media_final', 'id_turma', 'nome']
df_contagemRep = df_contagemRep[colunas_1].drop_duplicates()
df_contagemRep = df_contagemRep[df_contagemRep['nome'].isin(lista_disciplinas)]
df_contagemRep = df_contagemRep.groupby(['discente']).descricao.count().reset_index()
# Aprovados e não foram reprovados
series_Rep = df_contagemRep['discente']
df_NRep = dataFrame[dataFrame['descricao'].str.contains('APROVADO')]
# tirando os reprovados
df_NRep = df_NRep[~df_NRep['discente'].isin(series_Rep)]
df_NRep = df_NRep[df_NRep.media_final.notnull()]
colunas_2 = ['descricao', 'discente', 'media_final', 'id_turma', 'nome']
df_NRep = df_NRep[colunas_2].drop_duplicates()
df_NRep = df_NRep[df_NRep['nome'].isin(lista_disciplinas)]
# junta APROVADOS e REPROVADOS
aprovados = pd.DataFrame()
aprovados['discente'] = df_NRep['discente']
aprovados['descricao'] = df_NRep['descricao']
aprovados = aprovados.replace('APROVADO', 0)
aprovados = aprovados.replace('APROVADO POR NOTA', 0)
df_contagem = | pd.concat([df_contagemRep, aprovados]) | pandas.concat |
"""
"""
"""
>>> # ---
>>> # SETUP
>>> # ---
>>> import os
>>> import logging
>>> logger = logging.getLogger('PT3S.Rm')
>>> # ---
>>> # path
>>> # ---
>>> if __name__ == "__main__":
... try:
... dummy=__file__
... logger.debug("{0:s}{1:s}{2:s}".format('DOCTEST: __main__ Context: ','path = os.path.dirname(__file__)'," ."))
... path = os.path.dirname(__file__)
... except NameError:
... logger.debug("{0:s}{1:s}{2:s}".format('DOCTEST: __main__ Context: ',"path = '.' because __file__ not defined and: "," from Rm import Rm"))
... path = '.'
... from Rm import Rm
... else:
... path = '.'
... logger.debug("{0:s}{1:s}".format('Not __main__ Context: ',"path = '.' ."))
>>> try:
... from PT3S import Mx
... except ImportError:
... logger.debug("{0:s}{1:s}".format("DOCTEST: from PT3S import Mx: ImportError: ","trying import Mx instead ... maybe pip install -e . is active ..."))
... import Mx
>>> try:
... from PT3S import Xm
... except ImportError:
... logger.debug("{0:s}{1:s}".format("DOCTEST: from PT3S import Xm: ImportError: ","trying import Xm instead ... maybe pip install -e . is active ..."))
... import Xm
>>> # ---
>>> # testDir
>>> # ---
>>> # globs={'testDir':'testdata'}
>>> try:
... dummy= testDir
... except NameError:
... testDir='testdata'
>>> # ---
>>> # dotResolution
>>> # ---
>>> # globs={'dotResolution':''}
>>> try:
... dummy= dotResolution
... except NameError:
... dotResolution=''
>>> import pandas as pd
>>> import matplotlib.pyplot as plt
>>> pd.set_option('display.max_columns',None)
>>> pd.set_option('display.width',666666666)
>>> # ---
>>> # LocalHeatingNetwork SETUP
>>> # ---
>>> xmlFile=os.path.join(os.path.join(path,testDir),'LocalHeatingNetwork.XML')
>>> xm=Xm.Xm(xmlFile=xmlFile)
>>> mx1File=os.path.join(path,os.path.join(testDir,'WDLocalHeatingNetwork\B1\V0\BZ1\M-1-0-1'+dotResolution+'.MX1'))
>>> mx=Mx.Mx(mx1File=mx1File,NoH5Read=True,NoMxsRead=True)
>>> mx.setResultsToMxsFile(NewH5Vec=True)
5
>>> xm.MxSync(mx=mx)
>>> rm=Rm(xm=xm,mx=mx)
>>> # ---
>>> # Plot 3Classes False
>>> # ---
>>> plt.close('all')
>>> ppi=72 # matplotlib default
>>> dpi_screen=2*ppi
>>> fig=plt.figure(dpi=dpi_screen,linewidth=1.)
>>> timeDeltaToT=mx.df.index[2]-mx.df.index[0]
>>> # 3Classes und FixedLimits sind standardmaessig Falsch; RefPerc ist standardmaessig Wahr
>>> # die Belegung von MCategory gemaess FixedLimitsHigh/Low erfolgt immer ...
>>> pFWVB=rm.pltNetDHUS(timeDeltaToT=timeDeltaToT,pFWVBMeasureCBFixedLimitHigh=0.80,pFWVBMeasureCBFixedLimitLow=0.66,pFWVBGCategory=['BLNZ1u5u7'],pVICsDf=pd.DataFrame({'Kundenname': ['VIC1'],'Knotenname': ['V-K007']}))
>>> # ---
>>> # Check pFWVB Return
>>> # ---
>>> f=lambda x: "{0:8.5f}".format(x)
>>> print(pFWVB[['Measure','MCategory','GCategory','VIC']].round(2).to_string(formatters={'Measure':f}))
Measure MCategory GCategory VIC
0 0.81000 Top BLNZ1u5u7 NaN
1 0.67000 Middle NaN
2 0.66000 Middle BLNZ1u5u7 NaN
3 0.66000 Bottom BLNZ1u5u7 VIC1
4 0.69000 Middle NaN
>>> # ---
>>> # Print
>>> # ---
>>> (wD,fileName)=os.path.split(xm.xmlFile)
>>> (base,ext)=os.path.splitext(fileName)
>>> plotFileName=wD+os.path.sep+base+'.'+'pdf'
>>> if os.path.exists(plotFileName):
... os.remove(plotFileName)
>>> plt.savefig(plotFileName,dpi=2*dpi_screen)
>>> os.path.exists(plotFileName)
True
>>> # ---
>>> # Plot 3Classes True
>>> # ---
>>> plt.close('all')
>>> # FixedLimits wird automatisch auf Wahr gesetzt wenn 3Classes Wahr ...
>>> pFWVB=rm.pltNetDHUS(timeDeltaToT=timeDeltaToT,pFWVBMeasure3Classes=True,pFWVBMeasureCBFixedLimitHigh=0.80,pFWVBMeasureCBFixedLimitLow=0.66)
>>> # ---
>>> # LocalHeatingNetwork Clean Up
>>> # ---
>>> if os.path.exists(mx.h5File):
... os.remove(mx.h5File)
>>> if os.path.exists(mx.mxsZipFile):
... os.remove(mx.mxsZipFile)
>>> if os.path.exists(mx.h5FileVecs):
... os.remove(mx.h5FileVecs)
>>> if os.path.exists(plotFileName):
... os.remove(plotFileName)
"""
__version__='172.16.58.3.dev1'
import warnings # 3.6
#...\Anaconda3\lib\site-packages\h5py\__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
# from ._conv import register_converters as _register_converters
warnings.simplefilter(action='ignore', category=FutureWarning)
#C:\Users\Wolters\Anaconda3\lib\site-packages\matplotlib\cbook\deprecation.py:107: MatplotlibDeprecationWarning: Adding an axes using the same arguments as a previous axes currently reuses the earlier instance. In a future version, a new instance will always be created and returned. Meanwhile, this warning can be suppressed, and the future behavior ensured, by passing a unique label to each axes instance.
# warnings.warn(message, mplDeprecation, stacklevel=1)
import matplotlib.cbook
warnings.filterwarnings("ignore",category=matplotlib.cbook.mplDeprecation)
import os
import sys
import nbformat
from nbconvert.preprocessors import ExecutePreprocessor
from nbconvert.preprocessors.execute import CellExecutionError
import timeit
import xml.etree.ElementTree as ET
import re
import struct
import collections
import zipfile
import pandas as pd
import h5py
from collections import namedtuple
from operator import attrgetter
import subprocess
import warnings
import tables
import math
import matplotlib.pyplot as plt
from matplotlib import colors
from matplotlib.colorbar import make_axes
import matplotlib as mpl
import matplotlib.gridspec as gridspec
import matplotlib.dates as mdates
from matplotlib import markers
from matplotlib.path import Path
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.backends.backend_pdf import PdfPages
import numpy as np
import scipy
import networkx as nx
from itertools import chain
import math
import sys
from copy import deepcopy
from itertools import chain
import scipy
from scipy.signal import savgol_filter
import logging
# ---
# --- PT3S Imports
# ---
logger = logging.getLogger('PT3S')
if __name__ == "__main__":
logger.debug("{0:s}{1:s}".format('in MODULEFILE: __main__ Context','.'))
else:
logger.debug("{0:s}{1:s}{2:s}{3:s}".format('in MODULEFILE: Not __main__ Context: ','__name__: ',__name__," ."))
try:
from PT3S import Mx
except ImportError:
logger.debug("{0:s}{1:s}".format('ImportError: ','from PT3S import Mx - trying import Mx instead ... maybe pip install -e . is active ...'))
import Mx
try:
from PT3S import Xm
except ImportError:
logger.debug("{0:s}{1:s}".format('ImportError: ','from PT3S import Xm - trying import Xm instead ... maybe pip install -e . is active ...'))
import Xm
try:
from PT3S import Am
except ImportError:
logger.debug("{0:s}{1:s}".format('ImportError: ','from PT3S import Am - trying import Am instead ... maybe pip install -e . is active ...'))
import Am
try:
from PT3S import Lx
except ImportError:
logger.debug("{0:s}{1:s}".format('ImportError: ','from PT3S import Lx - trying import Lx instead ... maybe pip install -e . is active ...'))
import Lx
# ---
# --- main Imports
# ---
import argparse
import unittest
import doctest
import math
from itertools import tee
# --- Parameter Allgemein
# -----------------------
DINA6 = (4.13 , 5.83)
DINA5 = (5.83 , 8.27)
DINA4 = (8.27 , 11.69)
DINA3 = (11.69 , 16.54)
DINA2 = (16.54 , 23.39)
DINA1 = (23.39 , 33.11)
DINA0 = (33.11 , 46.81)
DINA6q = ( 5.83, 4.13)
DINA5q = ( 8.27, 5.83)
DINA4q = ( 11.69, 8.27)
DINA3q = ( 16.54,11.69)
DINA2q = ( 23.39,16.54)
DINA1q = ( 33.11,23.39)
DINA0q = ( 46.81,33.11)
dpiSize=72
DINA4_x=8.2677165354
DINA4_y=11.6929133858
DINA3_x=DINA4_x*math.sqrt(2)
DINA3_y=DINA4_y*math.sqrt(2)
linestyle_tuple = [
('loosely dotted', (0, (1, 10))),
('dotted', (0, (1, 1))),
('densely dotted', (0, (1, 1))),
('loosely dashed', (0, (5, 10))),
('dashed', (0, (5, 5))),
('densely dashed', (0, (5, 1))),
('loosely dashdotted', (0, (3, 10, 1, 10))),
('dashdotted', (0, (3, 5, 1, 5))),
('densely dashdotted', (0, (3, 1, 1, 1))),
('dashdotdotted', (0, (3, 5, 1, 5, 1, 5))),
('loosely dashdotdotted', (0, (3, 10, 1, 10, 1, 10))),
('densely dashdotdotted', (0, (3, 1, 1, 1, 1, 1)))]
ylimpD=(-5,70)
ylimpDmlc=(600,1350) #(300,1050)
ylimQD=(-75,300)
ylim3rdD=(0,3)
yticks3rdD=[0,1,2,3]
yGridStepsD=30
yticksALD=[0,3,4,10,20,30,40]
ylimALD=(yticksALD[0],yticksALD[-1])
yticksRD=[0,2,4,10,15,30,45]
ylimRD=(-yticksRD[-1],yticksRD[-1])
ylimACD=(-5,5)
yticksACD=[-5,0,5]
yticksTVD=[0,100,135,180,200,300]
ylimTVD=(yticksTVD[0],yticksTVD[-1])
plotTVAmLabelD='TIMER u. AM [Sek. u. (N)m3*100]'
def getDerivative(df,col,shiftSize=1,windowSize=60,fct=None,savgol_polyorder=None):
"""
returns a df
df: the df
col: the col of df to be derived
shiftsize: the Difference between 2 indices for dValue and dt
windowSize: size for rolling mean or window_length of savgol_filter; choosen filtertechnique is applied after fct
windowsSize must be an even number
for savgol_filter windowsSize-1 is used
fct: function to be applied on dValue/dt
savgol_polyorder: if not None savgol_filter is applied; pandas' rolling.mean() is applied otherwise
new cols:
dt (with shiftSize)
dValue (from col)
dValueDt (from col); fct applied
dValueDtFiltered; choosen filtertechnique is applied
"""
mDf=df.dropna().copy(deep=True)
try:
dt=mDf.index.to_series().diff(periods=shiftSize)
mDf['dt']=dt
mDf['dValue']=mDf[col].diff(periods=shiftSize)
mDf=mDf.iloc[shiftSize:]
mDf['dValueDt']=mDf.apply(lambda row: row['dValue']/row['dt'].total_seconds(),axis=1)
if fct != None:
mDf['dValueDt']=mDf['dValueDt'].apply(fct)
if savgol_polyorder == None:
mDf['dValueDtFiltered']=mDf['dValueDt'].rolling(window=windowSize).mean()
mDf=mDf.iloc[windowSize-1:]
else:
mDf['dValueDtFiltered']=savgol_filter(mDf['dValueDt'].values,windowSize-1, savgol_polyorder)
mDf=mDf.iloc[windowSize/2+1+savgol_polyorder-1:]
#mDf=mDf.iloc[windowSize-1:]
except Exception as e:
raise e
finally:
return mDf
def fCVDNodesFromName(x):
Nodes=x.replace('°','~')
Nodes=Nodes.split('~')
Nodes =[Node.lstrip().rstrip() for Node in Nodes if len(Node)>0]
return Nodes
def fgetMaxpMinFromName(CVDName,dfSegsNodesNDataDpkt):
"""
returns max. pMin for alle NODEs in CVDName
"""
nodeLst=fCVDNodesFromName(CVDName)
df=dfSegsNodesNDataDpkt[dfSegsNodesNDataDpkt['NODEsName'].isin(nodeLst)][['pMin','pMinMlc']]
s=df.max()
return s.pMin
# --- Funktionen Allgemein
# -----------------------
def pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
a, b = tee(iterable)
next(b, None)
return zip(a, b)
def genTimespans(timeStart
,timeEnd
,timeSpan=pd.Timedelta('12 Minutes')
,timeOverlap=pd.Timedelta('0 Seconds')
,timeStartPraefix=pd.Timedelta('0 Seconds')
,timeEndPostfix=pd.Timedelta('0 Seconds')
):
# generates timeSpan-Sections
# if timeStart is
# an int, it is considered as the number of desired Sections before timeEnd; timeEnd must be a time
# a time, it is considered as timeStart
# if timeEnd is
# an int, it is considered as the number of desired Sections after timeStart; timeStart must be a time
# a time, it is considered as timeEnd
# if timeSpan is
# an int, it is considered as the number of desired Sections
# a time, it is considered as timeSpan
# returns an array of tuples
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
xlims=[]
try:
if type(timeStart) == int:
numOfDesiredSections=timeStart
timeStartEff=timeEnd+timeEndPostfix-numOfDesiredSections*timeSpan+(numOfDesiredSections-1)*timeOverlap-timeStartPraefix
else:
timeStartEff=timeStart-timeStartPraefix
logger.debug("{0:s}timeStartEff: {1:s}".format(logStr,str(timeStartEff)))
if type(timeEnd) == int:
numOfDesiredSections=timeEnd
timeEndEff=timeStart-timeStartPraefix+numOfDesiredSections*timeSpan-(numOfDesiredSections-1)*timeOverlap+timeEndPostfix
else:
timeEndEff=timeEnd+timeEndPostfix
logger.debug("{0:s}timeEndEff: {1:s}".format(logStr,str(timeEndEff)))
if type(timeSpan) == int:
numOfDesiredSections=timeSpan
dt=timeEndEff-timeStartEff
timeSpanEff=dt/numOfDesiredSections+(numOfDesiredSections-1)*timeOverlap
else:
timeSpanEff=timeSpan
logger.debug("{0:s}timeSpanEff: {1:s}".format(logStr,str(timeSpanEff)))
logger.debug("{0:s}timeOverlap: {1:s}".format(logStr,str(timeOverlap)))
timeStartAct = timeStartEff
while timeStartAct < timeEndEff:
logger.debug("{0:s}timeStartAct: {1:s}".format(logStr,str(timeStartAct)))
timeEndAct=timeStartAct+timeSpanEff
xlim=(timeStartAct,timeEndAct)
xlims.append(xlim)
timeStartAct = timeEndAct - timeOverlap
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return xlims
def gen2Timespans(
timeStart # Anfang eines "Prozesses"
,timeEnd # Ende eines "Prozesses"
,timeSpan=pd.Timedelta('12 Minutes')
,timeStartPraefix=pd.Timedelta('0 Seconds')
,timeEndPostfix=pd.Timedelta('0 Seconds')
,roundStr=None # i.e. '5min': timeStart.round(roundStr) und timeEnd dito
):
"""
erzeugt 2 gleich lange Zeitbereiche
1 um timeStart herum
1 um timeEnd herum
"""
#print("timeStartPraefix: {:s}".format(str(timeStartPraefix)))
#print("timeEndPostfix: {:s}".format(str(timeEndPostfix)))
xlims=[]
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
if roundStr != None:
timeStart=timeStart.round(roundStr)
timeEnd=timeEnd.round(roundStr)
xlims.append((timeStart-timeStartPraefix,timeStart-timeStartPraefix+timeSpan))
xlims.append((timeEnd+timeEndPostfix-timeSpan,timeEnd+timeEndPostfix))
return xlims
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return xlims
def fTotalTimeFromPairs(
x
,denominator=None # i.e. pd.Timedelta('1 minute') for totalTime in Minutes
,roundToInt=True # round to and return as int if denominator is specified; else td is rounded by 2
):
tdTotal=pd.Timedelta('0 seconds')
for idx,tPairs in enumerate(x):
t1,t2=tPairs
if idx==0:
tLast=t2
else:
if t1 <= tLast:
print("Zeitpaar überlappt?!")
td=t2-t1
if td < pd.Timedelta('1 seconds'):
pass
#print("Zeitpaar < als 1 Sekunde?!")
tdTotal=tdTotal+td
if denominator==None:
return tdTotal
else:
td=tdTotal / denominator
if roundToInt:
td=int(round(td,0))
else:
td=round(td,2)
return td
def findAllTimeIntervalls(
df
,fct=lambda row: True if row['col'] == 46 else False
,tdAllowed=None
):
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
tPairs=[]
try:
rows,cols=df.shape
if df.empty:
logger.debug("{:s}df ist leer".format(logStr))
elif rows == 1:
logger.debug("{:s}df hat nur 1 Zeile: {:s}".format(logStr,df.to_string()))
rowValue=fct(df.iloc[0])
if rowValue:
tPair=(df.index[0],df.index[0])
tPairs.append(tPair)
else:
pass
else:
tEin=None
# paarweise über alle Zeilen
for (i1, row1), (i2, row2) in pairwise(df.iterrows()):
row1Value=fct(row1)
row2Value=fct(row2)
# wenn 1 nicht x und 2 x tEin=t2 "geht Ein"
if not row1Value and row2Value:
tEin=i2
# wenn 1 x und 2 nicht x tAus=t2 "geht Aus"
elif row1Value and not row2Value:
if tEin != None:
# Paar speichern
tPair=(tEin,i1)
tPairs.append(tPair)
else:
pass # sonst: Bed. ist jetzt Aus und war nicht Ein
# Bed. kann nur im ersten Fall Ein gehen
# wenn 1 x und 2 x
elif row1Value and row2Value:
if tEin != None:
pass
else:
# im ersten Wertepaar ist der Bereich Ein
tEin=i1
# letztes Paar
if row1Value and row2Value:
if tEin != None:
tPair=(tEin,i2)
tPairs.append(tPair)
if tdAllowed != None:
tPairs=fCombineSubsequenttPairs(tPairs,tdAllowed)
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return tPairs
def findAllTimeIntervallsSeries(
s=pd.Series()
,fct=lambda x: True if x == 46 else False
,tdAllowed=None # if not None all subsequent TimePairs with TimeDifference <= tdAllowed are combined to one TimePair
,debugOutput=True
):
"""
# if fct:
# alle [Zeitbereiche] finden fuer die fct Wahr ist; diese Zeitbereiche werden geliefert; es werden nur Paare geliefert; Wahr-Solitäre gehen nicht verloren sondern werden als Paar (t,t) geliefert
# Wahr-Solitäre sind NUR dann enthalten, wenn s nur 1 Wert enthält und dieser Wahr ist; das 1 gelieferte Paar enthaelt dann den Solitär-Zeitstempel für beide Zeiten
# tdAllowed can be be specified
# dann im Anschluss in Zeitbereiche zusammenfassen, die nicht mehr als tdAllowed auseinander liegen; diese Zeitbereiche werden dann geliefert
# if fct None:
# tdAllowed must be specified
# in Zeitbereiche zerlegen, die nicht mehr als Schwellwert tdAllowed auseinander liegen; diese Zeitbereiche werden geliefert
# generell hat jeder gelieferte Zeitbereich Anfang und Ende (d.h. 2 Zeiten), auch dann, wenn dadurch ein- oder mehrfach der Schwellwert ignoriert werden muss
# denn es soll kein Zeitbereich verloren gehen, der in s enthalten ist
# wenn s nur 1 Wert enthält, wird 1 Zeitpaar mit demselben Zeitstempel für beide Zeiten geliefert, wenn Wert nicht Null
# returns array of Time-Pair-Tuples
>>> import pandas as pd
>>> t=pd.Timestamp('2021-03-19 01:02:00')
>>> t1=t +pd.Timedelta('1 second')
>>> t2=t1+pd.Timedelta('1 second')
>>> t3=t2+pd.Timedelta('1 second')
>>> t4=t3+pd.Timedelta('1 second')
>>> t5=t4+pd.Timedelta('1 second')
>>> t6=t5+pd.Timedelta('1 second')
>>> t7=t6+pd.Timedelta('1 second')
>>> d = {t1: 46, t2: 0} # geht aus - kein Paar
>>> s1PaarGehtAus=pd.Series(data=d, index=[t1, t2])
>>> d = {t1: 0, t2: 46} # geht ein - kein Paar
>>> s1PaarGehtEin=pd.Series(data=d, index=[t1, t2])
>>> d = {t5: 46, t6: 0} # geht ausE - kein Paar
>>> s1PaarGehtAusE=pd.Series(data=d, index=[t5, t6])
>>> d = {t5: 0, t6: 46} # geht einE - kein Paar
>>> s1PaarGehtEinE=pd.Series(data=d, index=[t5, t6])
>>> d = {t1: 46, t2: 46} # geht aus - ein Paar
>>> s1PaarEin=pd.Series(data=d, index=[t1, t2])
>>> d = {t1: 0, t2: 0} # geht aus - kein Paar
>>> s1PaarAus=pd.Series(data=d, index=[t1, t2])
>>> s2PaarAus=pd.concat([s1PaarGehtAus,s1PaarGehtAusE])
>>> s2PaarEin=pd.concat([s1PaarGehtEin,s1PaarGehtEinE])
>>> s2PaarAusEin=pd.concat([s1PaarGehtAus,s1PaarGehtEinE])
>>> s2PaarEinAus=pd.concat([s1PaarGehtEin,s1PaarGehtAusE])
>>> # 1 Wert
>>> d = {t1: 46} # 1 Wert - Wahr
>>> s1WertWahr=pd.Series(data=d, index=[t1])
>>> d = {t1: 44} # 1 Wert - Falsch
>>> s1WertFalsch=pd.Series(data=d, index=[t1])
>>> d = {t1: None} # 1 Wert - None
>>> s1WertNone=pd.Series(data=d, index=[t1])
>>> ###
>>> # 46 0
>>> # 0 46
>>> # 0 0
>>> # 46 46 !1 Paar
>>> # 46 0 46 0
>>> # 46 0 0 46
>>> # 0 46 0 46
>>> # 0 46 46 0 !1 Paar
>>> ###
>>> findAllTimeIntervallsSeries(s1PaarGehtAus)
[]
>>> findAllTimeIntervallsSeries(s1PaarGehtEin)
[]
>>> findAllTimeIntervallsSeries(s1PaarEin)
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02'))]
>>> findAllTimeIntervallsSeries(s1PaarAus)
[]
>>> findAllTimeIntervallsSeries(s2PaarAus)
[]
>>> findAllTimeIntervallsSeries(s2PaarEin)
[]
>>> findAllTimeIntervallsSeries(s2PaarAusEin)
[]
>>> findAllTimeIntervallsSeries(s2PaarEinAus)
[(Timestamp('2021-03-19 01:02:02'), Timestamp('2021-03-19 01:02:05'))]
>>> # 1 Wert
>>> findAllTimeIntervallsSeries(s1WertWahr)
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:01'))]
>>> findAllTimeIntervallsSeries(s1WertFalsch)
[]
>>> ###
>>> # 46 0 !1 Paar
>>> # 0 46 !1 Paar
>>> # 0 0 !1 Paar
>>> # 46 46 !1 Paar
>>> # 46 0 46 0 !2 Paare
>>> # 46 0 0 46 !2 Paare
>>> # 0 46 0 46 !2 Paare
>>> # 0 46 46 0 !2 Paare
>>> ###
>>> findAllTimeIntervallsSeries(s1PaarGehtAus,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02'))]
>>> findAllTimeIntervallsSeries(s1PaarGehtEin,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02'))]
>>> findAllTimeIntervallsSeries(s1PaarEin,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02'))]
>>> findAllTimeIntervallsSeries(s1PaarAus,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02'))]
>>> findAllTimeIntervallsSeries(s2PaarAus,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02')), (Timestamp('2021-03-19 01:02:05'), Timestamp('2021-03-19 01:02:06'))]
>>> findAllTimeIntervallsSeries(s2PaarEin,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02')), (Timestamp('2021-03-19 01:02:05'), Timestamp('2021-03-19 01:02:06'))]
>>> findAllTimeIntervallsSeries(s2PaarAusEin,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02')), (Timestamp('2021-03-19 01:02:05'), Timestamp('2021-03-19 01:02:06'))]
>>> findAllTimeIntervallsSeries(s2PaarEinAus,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02')), (Timestamp('2021-03-19 01:02:05'), Timestamp('2021-03-19 01:02:06'))]
>>> # 1 Wert
>>> findAllTimeIntervallsSeries(s1WertWahr,fct=None)
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:01'))]
>>> findAllTimeIntervallsSeries(s1WertNone,fct=None)
[]
>>> ###
>>> d = {t1: 0, t3: 0}
>>> s1PaarmZ=pd.Series(data=d, index=[t1, t3])
>>> findAllTimeIntervallsSeries(s1PaarmZ,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:03'))]
>>> d = {t4: 0, t5: 0}
>>> s1PaaroZ=pd.Series(data=d, index=[t4, t5])
>>> s2PaarmZoZ=pd.concat([s1PaarmZ,s1PaaroZ])
>>> findAllTimeIntervallsSeries(s2PaarmZoZ,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:05'))]
>>> ###
>>> d = {t1: 0, t2: 0}
>>> s1PaaroZ=pd.Series(data=d, index=[t1, t2])
>>> d = {t3: 0, t5: 0}
>>> s1PaarmZ=pd.Series(data=d, index=[t3, t5])
>>> s2PaaroZmZ=pd.concat([s1PaaroZ,s1PaarmZ])
>>> findAllTimeIntervallsSeries(s2PaaroZmZ,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:05'))]
>>> ###
>>> d = {t6: 0, t7: 0}
>>> s1PaaroZ2=pd.Series(data=d, index=[t6, t7])
>>> d = {t4: 0}
>>> solitaer=pd.Series(data=d, index=[t4])
>>> s5er=pd.concat([s1PaaroZ,solitaer,s1PaaroZ2])
>>> findAllTimeIntervallsSeries(s5er,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02')), (Timestamp('2021-03-19 01:02:04'), Timestamp('2021-03-19 01:02:07'))]
>>> s3er=pd.concat([s1PaaroZ,solitaer])
>>> findAllTimeIntervallsSeries(s3er,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:04'))]
>>> s3er=pd.concat([solitaer,s1PaaroZ2])
>>> findAllTimeIntervallsSeries(s3er,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:04'), Timestamp('2021-03-19 01:02:07'))]
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
#logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
tPairs=[]
try:
if s.empty:
logger.debug("{:s}Series {!s:s} ist leer".format(logStr,s.name))
elif s.size == 1:
logger.debug("{:s}Series {!s:s} hat nur 1 Element: {:s}".format(logStr,s.name,s.to_string()))
if fct != None:
# 1 Paar mit selben Zeiten wenn das 1 Element Wahr
sValue=fct(s.iloc[0])
if sValue:
tPair=(s.index[0],s.index[0])
tPairs.append(tPair)
else:
pass
else:
# 1 Paar mit selben Zeiten wenn das 1 Element nicht None
sValue=s.iloc[0]
if sValue != None:
tPair=(s.index[0],s.index[0])
tPairs.append(tPair)
else:
pass
else:
tEin=None
if fct != None:
# paarweise über alle Zeiten
for idx,((i1, s1), (i2, s2)) in enumerate(pairwise(s.iteritems())):
s1Value=fct(s1)
s2Value=fct(s2)
# wenn 1 nicht x und 2 x tEin=t2 "geht Ein"
if not s1Value and s2Value:
tEin=i2
if idx > 0: # Info
pass
else:
# beim ersten Paar "geht Ein"
pass
# wenn 1 x und 2 nicht x tAus=t2 "geht Aus"
elif s1Value and not s2Value:
if tEin != None:
if tEin<i1:
# Paar speichern
tPair=(tEin,i1)
tPairs.append(tPair)
else:
# singulaeres Ereignis
# Paar mit selben Zeiten
tPair=(tEin,i1)
tPairs.append(tPair)
pass
else: # geht Aus ohne Ein zu sein
if idx > 0: # Info
pass
else:
# im ersten Paar
pass
# wenn 1 x und 2 x
elif s1Value and s2Value:
if tEin != None:
pass
else:
# im ersten Wertepaar ist der Bereich Ein
tEin=i1
# Behandlung letztes Paar
# bleibt Ein am Ende der Series: Paar speichern
if s1Value and s2Value:
if tEin != None:
tPair=(tEin,i2)
tPairs.append(tPair)
# Behandlung tdAllowed
if tdAllowed != None:
if debugOutput:
logger.debug("{:s}Series {!s:s}: Intervalle werden mit {!s:s} zusammengefasst ...".format(logStr,s.name,tdAllowed))
tPairsOld=tPairs.copy()
tPairs=fCombineSubsequenttPairs(tPairs,tdAllowed,debugOutput=debugOutput)
if debugOutput:
tPairsZusammengefasst=sorted(list(set(tPairsOld) - set(tPairs)))
if len(tPairsZusammengefasst)>0:
logger.debug("{:s}Series {!s:s}: Intervalle wurden wg. {!s:s} zusammengefasst. Nachfolgend die zusgefassten Intervalle: {!s:s}. Sowie die entsprechenden neuen: {!s:s}".format(
logStr
,s.name
,tdAllowed
,tPairsZusammengefasst
,sorted(list(set(tPairs) - set(tPairsOld)))
))
else:
# paarweise über alle Zeiten
# neues Paar beginnen
anzInPair=1 # Anzahl der Zeiten in aktueller Zeitspanne
for (i1, s1), (i2, s2) in pairwise(s.iteritems()):
td=i2-i1
if td > tdAllowed: # Zeit zwischen 2 Zeiten > als Schwelle: Zeitspanne ist abgeschlossen
if tEin==None:
# erstes Paar liegt bereits > als Schwelle auseinander
# Zeitspannenabschluss wird ignoriert, denn sonst Zeitspanne mit nur 1 Wert
# aktuelle Zeitspanne beginnt beim 1. Wert und geht über Schwellwert
tEin=i1
anzInPair=2
else:
if anzInPair>=2:
# Zeitspanne abschließen
tPair=(tEin,i1)
tPairs.append(tPair)
# neue Zeitspanne beginnen
tEin=i2
anzInPair=1
else:
# Zeitspannenabschluss wird ignoriert, denn sonst Zeitspanne mit nur 1 Wert
anzInPair=2
else: # Zeitspanne zugelassen, weiter ...
if tEin==None:
tEin=i1
anzInPair=anzInPair+1
# letztes Zeitpaar behandeln
if anzInPair>=2:
tPair=(tEin,i2)
tPairs.append(tPair)
else:
# ein letzter Wert wuerde ueber bleiben, letzte Zeitspanne verlängern ...
tPair=tPairs[-1]
tPair=(tPair[0],i2)
tPairs[-1]=tPair
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
#logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return tPairs
def fCombineSubsequenttPairs(
tPairs
,tdAllowed=pd.Timedelta('1 second') # all subsequent TimePairs with TimeDifference <= tdAllowed are combined to one TimePair
,debugOutput=False
):
# returns tPairs
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
#logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
for idx,(tp1,tp2) in enumerate(pairwise(tPairs)):
t1Ende=tp1[1]
t2Start=tp2[0]
if t2Start-t1Ende <= tdAllowed:
if debugOutput:
logger.debug("{:s} t1Ende: {!s:s} t2Start: {!s:s} Gap: {!s:s}".format(logStr,t1Ende,t2Start,t2Start-t1Ende))
tPairs[idx]=(tp1[0],tp2[1]) # Folgepaar in vorheriges Paar integrieren
tPairs.remove(tp2) # Folgepaar löschen
tPairs=fCombineSubsequenttPairs(tPairs,tdAllowed) # Rekursion
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
#logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return tPairs
class RmError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
AlarmEvent = namedtuple('alarmEvent','tA,tE,ZHKNR,LDSResBaseType')
# --- Parameter und Funktionen LDS Reports
# ----------------------------------------
def pltMakeCategoricalColors(color,nOfSubColorsReq=3,reversedOrder=False):
"""
Returns an array of rgb colors derived from color.
Parameter:
color: a rgb color
nOfSubColorsReq: number of SubColors requested
Raises:
RmError
>>> import matplotlib
>>> color='red'
>>> c=list(matplotlib.colors.to_rgb(color))
>>> import Rm
>>> Rm.pltMakeCategoricalColors(c)
array([[1. , 0. , 0. ],
[1. , 0.375, 0.375],
[1. , 0.75 , 0.75 ]])
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
rgb=None
try:
chsv = matplotlib.colors.rgb_to_hsv(color[:3])
arhsv = np.tile(chsv,nOfSubColorsReq).reshape(nOfSubColorsReq,3)
arhsv[:,1] = np.linspace(chsv[1],0.25,nOfSubColorsReq)
arhsv[:,2] = np.linspace(chsv[2],1,nOfSubColorsReq)
rgb = matplotlib.colors.hsv_to_rgb(arhsv)
if reversedOrder:
rgb=list(reversed(rgb))
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return rgb
# Farben fuer Druecke
SrcColorp='green'
SrcColorsp=pltMakeCategoricalColors(list(matplotlib.colors.to_rgb(SrcColorp)),nOfSubColorsReq=4,reversedOrder=False)
# erste Farbe ist Original-Farbe
SnkColorp='blue'
SnkColorsp=pltMakeCategoricalColors(list(matplotlib.colors.to_rgb(SnkColorp)),nOfSubColorsReq=4,reversedOrder=True)
# letzte Farbe ist Original-Farbe
# Farben fuer Fluesse
SrcColorQ='red'
SrcColorsQ=pltMakeCategoricalColors(list(matplotlib.colors.to_rgb(SrcColorQ)),nOfSubColorsReq=4,reversedOrder=False)
# erste Farbe ist Original-Farbe
SnkColorQ='orange'
SnkColorsQ=pltMakeCategoricalColors(list(matplotlib.colors.to_rgb(SnkColorQ)),nOfSubColorsReq=4,reversedOrder=True)
# letzte Farbe ist Original-Farbe
lwBig=4.5
lwSmall=2.5
attrsDct={ 'p Src':{'color':SrcColorp,'lw':lwBig,'where':'post'}
,'p Snk':{'color':SnkColorp,'lw':lwSmall+1.,'where':'post'}
,'p Snk 2':{'color':'mediumorchid','where':'post'}
,'p Snk 3':{'color':'darkviolet','where':'post'}
,'p Snk 4':{'color':'plum','where':'post'}
,'Q Src':{'color':SrcColorQ,'lw':lwBig,'where':'post'}
,'Q Snk':{'color':SnkColorQ,'lw':lwSmall+1.,'where':'post'}
,'Q Snk 2':{'color':'indianred','where':'post'}
,'Q Snk 3':{'color':'coral','where':'post'}
,'Q Snk 4':{'color':'salmon','where':'post'}
,'Q Src RTTM':{'color':SrcColorQ,'lw':matplotlib.rcParams['lines.linewidth']+1.,'ls':'dotted','where':'post'}
,'Q Snk RTTM':{'color':SnkColorQ,'lw':matplotlib.rcParams['lines.linewidth'] ,'ls':'dotted','where':'post'}
,'Q Snk 2 RTTM':{'color':'indianred','ls':'dotted','where':'post'}
,'Q Snk 3 RTTM':{'color':'coral','ls':'dotted','where':'post'}
,'Q Snk 4 RTTM':{'color':'salmon','ls':'dotted','where':'post'}
,'p ISrc 1':{'color':SrcColorsp[-1],'ls':'dashdot','where':'post'}
,'p ISrc 2':{'color':SrcColorsp[-2],'ls':'dashdot','where':'post'}
,'p ISrc 3':{'color':SrcColorsp[-2],'ls':'dashdot','where':'post'} # ab hier selbe Farbe
,'p ISrc 4':{'color':SrcColorsp[-2],'ls':'dashdot','where':'post'}
,'p ISrc 5':{'color':SrcColorsp[-2],'ls':'dashdot','where':'post'}
,'p ISrc 6':{'color':SrcColorsp[-2],'ls':'dashdot','where':'post'}
,'p ISnk 1':{'color':SnkColorsp[0],'ls':'dashdot','where':'post'}
,'p ISnk 2':{'color':SnkColorsp[1],'ls':'dashdot','where':'post'}
,'p ISnk 3':{'color':SnkColorsp[1],'ls':'dashdot','where':'post'} # ab hier selbe Farbe
,'p ISnk 4':{'color':SnkColorsp[1],'ls':'dashdot','where':'post'}
,'p ISnk 5':{'color':SnkColorsp[1],'ls':'dashdot','where':'post'}
,'p ISnk 6':{'color':SnkColorsp[1],'ls':'dashdot','where':'post'}
,'Q xSrc 1':{'color':SrcColorsQ[-1],'ls':'dashdot','where':'post'}
,'Q xSrc 2':{'color':SrcColorsQ[-2],'ls':'dashdot','where':'post'}
,'Q xSrc 3':{'color':SrcColorsQ[-3],'ls':'dashdot','where':'post'}
,'Q xSnk 1':{'color':SnkColorsQ[0],'ls':'dashdot','where':'post'}
,'Q xSnk 2':{'color':SnkColorsQ[1],'ls':'dashdot','where':'post'}
,'Q xSnk 3':{'color':SnkColorsQ[2],'ls':'dashdot','where':'post'}
,'Q (DE) Me':{'color': 'indigo','ls': 'dashdot','where': 'post','lw':1.5}
,'Q (DE) Re':{'color': 'cyan','ls': 'dashdot','where': 'post','lw':3.5}
,'p (DE) SS Me':{'color': 'magenta','ls': 'dashdot','where': 'post'}
,'p (DE) DS Me':{'color': 'darkviolet','ls': 'dashdot','where': 'post'}
,'p (DE) SS Re':{'color': 'magenta','ls': 'dotted','where': 'post'}
,'p (DE) DS Re':{'color': 'darkviolet','ls': 'dotted','where': 'post'}
,'p OPC LDSErgV':{'color':'olive'
,'lw':lwSmall-.5
,'ms':matplotlib.rcParams['lines.markersize']
,'marker':'x'
,'mec':'olive'
,'mfc':'olive'
,'where':'post'}
,'p OPC Src':{'color':SrcColorp
,'lw':0.05+2
,'ms':matplotlib.rcParams['lines.markersize']/2
,'marker':'D'
,'mec':SrcColorp
,'mfc':SrcColorQ
,'where':'post'}
,'p OPC Snk':{'color':SnkColorp
,'lw':0.05+2
,'ms':matplotlib.rcParams['lines.markersize']/2
,'marker':'D'
,'mec':SnkColorp
,'mfc':SnkColorQ
,'where':'post'}
,'Q OPC Src':{'color':SrcColorQ
,'lw':0.05+2
,'ms':matplotlib.rcParams['lines.markersize']/2
,'marker':'D'
,'mec':SrcColorQ
,'mfc':SrcColorp
,'where':'post'}
,'Q OPC Snk':{'color':SnkColorQ
,'lw':0.05+2
,'ms':matplotlib.rcParams['lines.markersize']/2
,'marker':'D'
,'mec':SnkColorQ
,'mfc':SnkColorp
,'where':'post'}
}
attrsDctLDS={
'Seg_AL_S_Attrs':{'color':'blue','lw':3.,'where':'post'}
,'Druck_AL_S_Attrs':{'color':'blue','lw':3.,'ls':'dashed','where':'post'}
,'Seg_MZ_AV_Attrs':{'color':'orange','zorder':3,'where':'post'}
,'Druck_MZ_AV_Attrs':{'color':'orange','zorder':3,'ls':'dashed','where':'post'}
,'Seg_LR_AV_Attrs':{'color':'green','zorder':1,'where':'post'}
,'Druck_LR_AV_Attrs':{'color':'green','zorder':1,'ls':'dashed','where':'post'}
,'Seg_LP_AV_Attrs':{'color':'turquoise','zorder':0,'lw':1.50,'where':'post'}
,'Druck_LP_AV_Attrs':{'color':'turquoise','zorder':0,'lw':1.50,'ls':'dashed','where':'post'}
,'Seg_NG_AV_Attrs':{'color':'red','zorder':2,'where':'post'}
,'Druck_NG_AV_Attrs':{'color':'red','zorder':2,'ls':'dashed','where':'post'}
,'Seg_SB_S_Attrs':{'color':'black','alpha':.5,'where':'post'}
,'Druck_SB_S_Attrs':{'color':'black','ls':'dashed','alpha':.75,'where':'post','lw':1.0}
,'Seg_AC_AV_Attrs':{'color':'indigo','where':'post'}
,'Druck_AC_AV_Attrs':{'color':'indigo','ls':'dashed','where':'post'}
,'Seg_ACF_AV_Attrs':{'color':'blueviolet','where':'post','lw':1.0}
,'Druck_ACF_AV_Attrs':{'color':'blueviolet','ls':'dashed','where':'post','lw':1.0}
,'Seg_ACC_Limits_Attrs':{'color':'indigo','ls':linestyle_tuple[2][1]} # 'densely dotted'
,'Druck_ACC_Limits_Attrs':{'color':'indigo','ls':linestyle_tuple[8][1]} # 'densely dashdotted'
,'Seg_TIMER_AV_Attrs':{'color':'chartreuse','where':'post'}
,'Druck_TIMER_AV_Attrs':{'color':'chartreuse','ls':'dashed','where':'post'}
,'Seg_AM_AV_Attrs':{'color':'chocolate','where':'post'}
,'Druck_AM_AV_Attrs':{'color':'chocolate','ls':'dashed','where':'post'}
#
,'Seg_DPDT_REF_Attrs':{'color':'violet','ls':linestyle_tuple[2][1]} # 'densely dotted'
,'Druck_DPDT_REF_Attrs':{'color':'violet','ls':linestyle_tuple[8][1]} # 'densely dashdotted'
,'Seg_DPDT_AV_Attrs':{'color':'fuchsia','where':'post','lw':2.0}
,'Druck_DPDT_AV_Attrs':{'color':'fuchsia','ls':'dashed','where':'post','lw':2.0}
,'Seg_QM16_AV_Attrs':{'color':'sandybrown','ls':linestyle_tuple[6][1],'where':'post','lw':1.0} # 'loosely dashdotted'
,'Druck_QM16_AV_Attrs':{'color':'sandybrown','ls':linestyle_tuple[10][1],'where':'post','lw':1.0} # 'loosely dashdotdotted'
}
pSIDEvents=re.compile('(?P<Prae>IMDI\.)?Objects\.(?P<colRegExMiddle>3S_FBG_ESCHIEBER|FBG_ESCHIEBER{1})\.(3S_)?(?P<colRegExSchieberID>[a-z,A-Z,0-9,_]+)\.(?P<colRegExEventID>(In\.ZUST|In\.LAEUFT|In\.LAEUFT_NICHT|In\.STOER|Out\.AUF|Out\.HALT|Out\.ZU)$)')
# ausgewertet werden: colRegExSchieberID (um welchen Schieber geht es), colRegExMiddle (Befehl oder Zustand) und colRegExEventID (welcher Befehl bzw. Zustand)
# die Befehle bzw. Zustaende (die Auspraegungen von colRegExEventID) muessen nachf. def. sein um den Marker (des Befehls bzw. des Zustandes) zu definieren
eventCCmds={ 'Out.AUF':0
,'Out.ZU':1
,'Out.HALT':2}
eventCStats={'In.LAEUFT':3
,'In.LAEUFT_NICHT':4
,'In.ZUST':5
,'Out.AUF':6
,'Out.ZU':7
,'Out.HALT':8
,'In.STOER':9}
valRegExMiddleCmds='3S_FBG_ESCHIEBER' # colRegExMiddle-Auspraegung fuer Befehle (==> eventCCmds)
LDSParameter=[
'ACC_SLOWTRANSIENT'
,'ACC_TRANSIENT'
,'DESIGNFLOW'
,'DT'
,'FILTERWINDOW'
#,'L_PERCENT'
,'L_PERCENT_STDY'
,'L_PERCENT_STRAN'
,'L_PERCENT_TRANS'
,'L_SHUTOFF'
,'L_SLOWTRANSIENT'
,'L_SLOWTRANSIENTQP'
,'L_STANDSTILL'
,'L_STANDSTILLQP'
,'L_TRANSIENT'
,'L_TRANSIENTQP'
,'L_TRANSIENTVBIGF'
,'L_TRANSIENTPDNTF'
,'MEAN'
,'NAME'
,'ORDER'
,'TIMER'
,'TTIMERTOALARM'
,'TIMERTOLISS'
,'TIMERTOLIST'
]
LDSParameterDataD={
'ACC_SLOWTRANSIENT':0.1
,'ACC_TRANSIENT':0.8
,'DESIGNFLOW':250.
,'DT':1
,'FILTERWINDOW':180
#,'L_PERCENT':1.6
,'L_PERCENT_STDY':1.6
,'L_PERCENT_STRAN':1.6
,'L_PERCENT_TRANS':1.6
,'L_SHUTOFF':2.
,'L_SLOWTRANSIENT':4.
,'L_SLOWTRANSIENTQP':4.
,'L_STANDSTILL':2.
,'L_STANDSTILLQP':2.
,'L_TRANSIENT':10.
,'L_TRANSIENTQP':10.
,'L_TRANSIENTVBIGF':3.
,'L_TRANSIENTPDNTF':1.5
,'MEAN':1
,'ORDER':1
,'TIMER':180
,'TTIMERTOALARM':45 # TIMER/4
,'TIMERTOLISS':180
,'TIMERTOLIST':180
,'NAME':''
}
def fSEGNameFromPV_2(Beschr):
# fSEGNameFromSWVTBeschr
# 2,3,4,5
if Beschr in ['',None]:
return None
m=re.search(Lx.pID,Beschr)
if m == None:
return Beschr
return m.group('C2')+'_'+m.group('C3')+'_'+m.group('C4')+'_'+m.group('C5')
def fSEGNameFromPV_3(PV):
# fSEGNameFromPV
# ...
m=re.search(Lx.pID,PV)
return m.group('C3')+'_'+m.group('C4')+'_'+m.group('C5')+m.group('C6')
def fSEGNameFromPV_3m(PV):
# fSEGNameFromPV
# ...
m=re.search(Lx.pID,PV)
#print("C4: {:s} C6: {:s}".format(m.group('C4'),m.group('C6')))
if m.group('C4')=='AAD' and m.group('C6')=='_OHN':
return m.group('C3')+'_'+m.group('C4')+'_'+m.group('C5')+'_OHV1'
elif m.group('C4')=='OHN' and m.group('C6')=='_NGD':
return m.group('C3')+'_'+'OHV2'+'_'+m.group('C5')+m.group('C6')
else:
return m.group('C3')+'_'+m.group('C4')+'_'+m.group('C5')+m.group('C6')
# Ableitung eines DIVPipelineNamens von PV
def fDIVNameFromPV(PV):
m=re.search(Lx.pID,PV)
return m.group('C2')+'-'+m.group('C4')
# Ableitung eines DIVPipelineNamens von SEGName
def fDIVNameFromSEGName(SEGName):
if pd.isnull(SEGName):
return None
# dfSegsNodesNDataDpkt['DIVPipelineName']=dfSegsNodesNDataDpkt['SEGName'].apply(lambda x: re.search('(\d+)_(\w+)_(\w+)_(\w+)',x).group(1)+'_'+re.search('(\d+)_(\w+)_(\w+)_(\w+)',x).group(3) )
m=re.search('(\d+)_(\w+)_(\w+)_(\w+)',SEGName)
if m == None:
return SEGName
return m.group(1)+'_'+m.group(3)
#def getNamesFromOPCITEM_ID(dfSegsNodesNDataDpkt
# ,OPCITEM_ID):
# """
# Returns tuple (DIVPipelineName,SEGName) from OPCITEM_ID PH
# """
# df=dfSegsNodesNDataDpkt[dfSegsNodesNDataDpkt['OPCITEM_ID']==OPCITEM_ID]
# if not df.empty:
# return (df['DIVPipelineName'].iloc[0],df['SEGName'].iloc[0])
def fGetBaseIDFromResID(
ID='Objects.3S_XXX_DRUCK.3S_6_BNV_01_PTI_01.In.MW.value'
):
"""
Returns 'Objects.3S_XXX_DRUCK.3S_6_BNV_01_PTI_01.In.'
funktioniert im Prinzip fuer SEG- und Druck-Ergs: jede Erg-PV eines Vektors liefert die Basis gueltig fuer alle Erg-PVs des Vektors
d.h. die Erg-PVs eines Vektors unterscheiden sich nur hinten
siehe auch fGetSEGBaseIDFromSEGName
"""
if pd.isnull(ID):
return None
m=re.search(Lx.pID,ID)
if m == None:
return None
try:
base=m.group('A')+'.'+m.group('B')\
+'.'+m.group('C1')\
+'_'+m.group('C2')\
+'_'+m.group('C3')\
+'_'+m.group('C4')\
+'_'+m.group('C5')\
+m.group('C6')
#print(m.groups())
#print(m.groupdict())
if 'C7' in m.groupdict().keys():
if m.group('C7') != None:
base=base+m.group('C7')
base=base+'.'+m.group('D')\
+'.'
#print(base)
except:
base=m.group(0)+' (Fehler in fGetBaseIDFromResID)'
return base
def fGetSEGBaseIDFromSEGName(
SEGName='6_AAD_41_OHV1'
):
"""
Returns 'Objects.3S_FBG_SEG_INFO.3S_L_'+SEGName+'.In.'
In some cases SEGName is manipulated ...
siehe auch fGetBaseIDFromResID
"""
if SEGName == '6_AAD_41_OHV1':
x='6_AAD_41_OHN'
elif SEGName == '6_OHV2_41_NGD':
x='6_OHN_41_NGD'
else:
x=SEGName
return 'Objects.3S_FBG_SEG_INFO.3S_L_'+x+'.In.'
def getNamesFromSEGResIDBase(dfSegsNodesNDataDpkt
,SEGResIDBase):
"""
Returns tuple (DIVPipelineName,SEGName) from SEGResIDBase
"""
df=dfSegsNodesNDataDpkt[dfSegsNodesNDataDpkt['SEGResIDBase']==SEGResIDBase]
if not df.empty:
return (df['DIVPipelineName'].iloc[0],df['SEGName'].iloc[0])
def getNamesFromDruckResIDBase(dfSegsNodesNDataDpkt
,DruckResIDBase):
"""
Returns tuple (DIVPipelineName,SEGName,SEGResIDBase,SEGOnlyInLDSPara) from DruckResIDBase
"""
df=dfSegsNodesNDataDpkt[dfSegsNodesNDataDpkt['DruckResIDBase']==DruckResIDBase]
if not df.empty:
#return (df['DIVPipelineName'].iloc[0],df['SEGName'].iloc[0],df['SEGResIDBase'].iloc[0])
tupleLst=[]
for index,row in df.iterrows():
tupleItem=(row['DIVPipelineName'],row['SEGName'],row['SEGResIDBase'],row['SEGOnlyInLDSPara'])
tupleLst.append(tupleItem)
return tupleLst
else:
return []
def fGetErgIDsFromBaseID(
baseID='Objects.3S_FBG_SEG_INFO.3S_L_6_BUV_01_BUA.In.'
,dfODI=pd.DataFrame() # df mit ODI Parametrierungsdaten
,strSep=' '
,patternPat='^IMDI.' #
,pattern=True # nur ergIDs, fuer die 2ndPatternPat zutrifft liefern
):
"""
returns string
mit strSep getrennten IDs aus dfODI, welche baseID enthalten (und bei pattern WAHR patternPat matchen)
baseID (und group(0) von patternPat bei pattern WAHR) sind in den IDs entfernt
"""
if baseID in [None,'']:
return None
df=dfODI[dfODI.index.str.contains(baseID)]
if df.empty:
return None
if pattern:
ergIDs=''.join([e.replace(baseID,'').replace(re.search(patternPat,e).group(0),'')+' ' for e in df.index if re.search(patternPat,e) != None])
else:
ergIDs=''.join([e.replace(baseID,'')+' ' for e in df.index if re.search(patternPat,e) == None])
return ergIDs
def dfSegsNodesNDataDpkt(
VersionsDir=r"C:\3s\Projekte\Projekt\04 - Versionen\Version82.3"
,Model=r"MDBDOC\FBG.mdb" # a Access Model
,am=None # a Access Model already processed
,SEGsDefPattern='(?P<SEG_Ki>\S+)~(?P<SEG_Kk>\S+)$' # RSLW-Beschreibung: liefert die Knotennamen der Segmentdefinition ()
,RIDefPattern='(?P<Prae>\S+)\.(?P<Post>RICHT.S)$' # SWVT-Beschreibung (RICHT-DP): liefert u.a. SEGName
,fSEGNameFromPV_2=fSEGNameFromPV_2 # Funktion, die von SWVT-Beschreibung (RICHT-DP) u.a. SEGName liefert
,fGetBaseIDFromResID=fGetBaseIDFromResID # Funktion, die von OPCITEM-ID des PH-Kanals eines KNOTens den Wortstamm der Knotenergebnisse liefert
,fGetSEGBaseIDFromSEGName=fGetSEGBaseIDFromSEGName # Funktion, die aus SEGName den Wortstamm der Segmentergebnisse liefert
,LDSPara=r"App LDS\Modelle\WDFBG\B1\V0\BZ1\LDS_Para.xml"
,LDSParaPT=r"App LDS\SirOPC\AppLDS_DPDTParams.csv"
,ODI=r"App LDS\SirOPC\AppLDS_ODI.csv"
,LDSParameter=LDSParameter
,LDSParameterDataD=LDSParameterDataD
):
"""
alle Segmente mit Pfaddaten (Kantenzuege) mit Kanten- und Knotendaten sowie Parametrierungsdaten
returns df:
DIVPipelineName
SEGName
SEGNodes (Ki~Kk; Schluessel in LDSPara)
SEGOnlyInLDSPara
NODEsRef
NODEsRef_max
NODEsSEGLfdNr
NODEsSEGLfdNrType
NODEsName
OBJTYPE
ZKOR
Blockname
ATTRTYPE (PH)
CLIENT_ID
OPCITEM_ID
NAME (der DPKT-Gruppe)
DruckResIDBase
SEGResIDBase
SEGResIDs
SEGResIDsIMDI
DruckResIDs
DruckResIDsIMDI
NODEsSEGDruckErgLfdNr
# LDSPara
ACC_SLOWTRANSIENT
ACC_TRANSIENT
DESIGNFLOW
DT
FILTERWINDOW
L_PERCENT_STDY
L_PERCENT_STRAN
L_PERCENT_TRANS
L_SHUTOFF
L_SLOWTRANSIENT
L_SLOWTRANSIENTQP
L_STANDSTILL
L_STANDSTILLQP
L_TRANSIENT
L_TRANSIENTPDNTF
L_TRANSIENTQP
L_TRANSIENTVBIGF
MEAN
ORDER
TIMER
TIMERTOLISS
TIMERTOLIST
TTIMERTOALARM
# LDSParaPT
#ID
pMin
DT_Vorhaltemass
TTimer_PMin
Faktor_PMin
MaxL_PMin
pMinMlc
pMinMlcMinSEG
pMinMlcMaxSEG
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
dfSegsNodesNDataDpkt=pd.DataFrame()
try:
###### --- LDSPara
LDSParaFile=os.path.join(VersionsDir,LDSPara)
logger.info("{:s}###### {:10s}: {:s}: Lesen und prüfen ...".format(logStr,'LDSPara',LDSPara))
with open(LDSParaFile) as f:
xml = f.read()
xmlWellFormed='<root>'+xml+'</root>'
root=ET.fromstring(xmlWellFormed)
LDSParameterData={}
for key in LDSParameterDataD.keys():
LDSParameterData[key]=[]
logger.debug("{:s}LDSParameter: {!s:s}.".format(logStr,LDSParameter))
for idx,element in enumerate(root.iter(tag='LDSI')):
attribKeysMute=[]
for key,value in element.attrib.items():
if key not in LDSParameter:
logger.warning("{:s}{:s}: Parameter: {:s} undefiniert.".format(logStr,element.attrib['NAME'],key))
attribKeysMute.append(key)
keysIst=element.attrib.keys()
keysSoll=set(LDSParameter)
keysExplizitFehlend=keysSoll-keysIst
LDSIParaDct=element.attrib
for key in keysExplizitFehlend:
if key=='ORDER':
LDSIParaDct[key]=LDSParameterDataD[key]
logger.debug("{:s}{:25s}: explizit fehlender Parameter: {:20s} ({!s:s}).".format(logStr,element.attrib['NAME'],key,LDSIParaDct[key]))
elif key=='TTIMERTOALARM':
LDSIParaDct[key]=int(LDSIParaDct['TIMER'])/4
logger.debug("{:s}{:25s}: explizit fehlender Parameter: {:20s} ({!s:s}).".format(logStr,element.attrib['NAME'],key,LDSIParaDct[key]))
else:
LDSIParaDct[key]=LDSParameterDataD[key]
logger.debug("{:s}{:25s}: explizit fehlender Parameter: {:20s} ({!s:s}).".format(logStr,element.attrib['NAME'],key,LDSIParaDct[key]))
keyListToProcess=[key for key in LDSIParaDct.keys() if key not in attribKeysMute]
for key in keyListToProcess:
LDSParameterData[key].append(LDSIParaDct[key])
df=pd.DataFrame.from_dict(LDSParameterData)
df=df.set_index('NAME').sort_index()
df.index.rename('SEGMENT', inplace=True)
df=df[sorted(df.columns.to_list())]
df = df.apply(pd.to_numeric)
#logger.debug("{:s}df: {:s}".format(logStr,df.to_string()))
logger.debug("{:s}Parameter, die nicht auf Standardwerten sind:".format(logStr))
for index, row in df.iterrows():
for colName, colValue in zip(df.columns.to_list(),row):
if colValue != LDSParameterDataD[colName]:
logger.debug("Segment: {:30s}: Parameter: {:20s} Wert: {:10s} (Standard: {:s})".format(index,colName,str(colValue),str(LDSParameterDataD[colName])))
dfPara=df
# --- Einlesen Modell
if am == None:
accFile=os.path.join(VersionsDir,Model)
logger.info("{:s}###### {:10s}: {:s}: Lesen und verarbeiten ...".format(logStr,'Modell',Model))
am=Am.Am(accFile=accFile)
V_BVZ_RSLW=am.dataFrames['V_BVZ_RSLW']
V_BVZ_SWVT=am.dataFrames['V_BVZ_SWVT']
V3_KNOT=am.dataFrames['V3_KNOT']
V3_VBEL=am.dataFrames['V3_VBEL']
V3_DPKT=am.dataFrames['V3_DPKT']
V3_RSLW_SWVT=am.dataFrames['V3_RSLW_SWVT']
# --- Segmente ermitteln
# --- per Modell
SEGsDefinesPerRICHT=V3_RSLW_SWVT[
(V3_RSLW_SWVT['BESCHREIBUNG'].str.match(SEGsDefPattern).isin([True])) # Muster Ki~Kk ...
& #!
(V3_RSLW_SWVT['BESCHREIBUNG_SWVT'].str.match(RIDefPattern).isin([True])) # Muster Förderrichtungs-PV ...
].copy(deep=True)
SEGsDefinesPerRICHT=SEGsDefinesPerRICHT[['BESCHREIBUNG','BESCHREIBUNG_SWVT']]
# --- nur per LDS Para
lSEGOnlyInLDSPara=[str(SEGNodes) for SEGNodes in dfPara.index if str(SEGNodes) not in SEGsDefinesPerRICHT['BESCHREIBUNG'].values]
for SEGNodes in lSEGOnlyInLDSPara:
logger.warning("{:s}LDSPara SEG {:s} ist nicht Modell-definiert!".format(logStr,SEGNodes))
# --- zusammenfassen
SEGsDefines=pd.concat([SEGsDefinesPerRICHT,pd.DataFrame(lSEGOnlyInLDSPara,columns=['BESCHREIBUNG'])])
# Knotennamen der SEGDef ergänzen
df=SEGsDefines['BESCHREIBUNG'].str.extract(SEGsDefPattern,expand=True)
dfCols=df.columns.to_list()
SEGsDefines=pd.concat([SEGsDefines,df],axis=1)
# ausduennen
SEGsDefines=SEGsDefines[dfCols+['BESCHREIBUNG_SWVT','BESCHREIBUNG']]
# sortieren
SEGsDefines=SEGsDefines.sort_values(by=['BESCHREIBUNG_SWVT','BESCHREIBUNG']).reset_index(drop=True)
# SEGName
SEGsDefines['BESCHREIBUNG_SWVT']=SEGsDefines.apply(lambda row: row['BESCHREIBUNG_SWVT'] if not pd.isnull(row['BESCHREIBUNG_SWVT']) else row['BESCHREIBUNG'] ,axis=1)
#print(SEGsDefines)
SEGsDefines['SEGName']=SEGsDefines['BESCHREIBUNG_SWVT'].apply(lambda x: fSEGNameFromPV_2(x))
# --- Segmentkantenzuege ermitteln
dfSegsNodeLst={} # nur zu Kontrollzwecken
dfSegsNode=[]
for index,row in SEGsDefines[~SEGsDefines[dfCols[-1]].isnull()].iterrows():
df=Xm.Xm.constructShortestPathFromNodeList(df=V3_VBEL.reset_index()
,sourceCol='NAME_i'
,targetCol='NAME_k'
,nl=[row[dfCols[0]],row[dfCols[-1]]]
,weight=None,query=None,fmask=None,filterNonQ0Rows=True)
s=pd.concat([pd.Series([row[dfCols[0]]]),df['nextNODE']])
s.name=row['SEGName']
dfSegsNodeLst[row['SEGName']]=s.reset_index(drop=True)
df2=pd.DataFrame(s.reset_index(drop=True)).rename(columns={s.name:'NODEs'})
df2['SEGName']=s.name
df2=df2[['SEGName','NODEs']]
sObj=pd.concat([pd.Series(['None']),df['OBJTYPE']])
sObj.name='OBJTYPE'
df3=pd.concat([df2,pd.DataFrame(sObj.reset_index(drop=True))],axis=1)
df4=df3.reset_index().rename(columns={'index':'NODEsLfdNr','NODEs':'NODEsName'})[['SEGName','NODEsLfdNr','NODEsName','OBJTYPE']]
df4['NODEsType']=df4.apply(lambda row: row['NODEsLfdNr'] if row['NODEsLfdNr'] < df4.index[-1] else -1, axis=1)
df4=df4[['SEGName','NODEsLfdNr','NODEsType','NODEsName','OBJTYPE']]
df4['SEGNodes']=row[dfCols[0]]+'~'+row[dfCols[-1]]
dfSegsNode.append(df4)
dfSegsNodes=pd.concat(dfSegsNode).reset_index(drop=True)
# ---
dfSegsNodes['SEGOnlyInLDSPara']=dfSegsNodes.apply(lambda row: True if row['SEGNodes'] in lSEGOnlyInLDSPara else False,axis=1)
dfSegsNodes['NODEsRef']=dfSegsNodes.sort_values(
by=['NODEsName','SEGOnlyInLDSPara','NODEsType','SEGName']
,ascending=[True,True,False,True]).groupby(['NODEsName']).cumcount() + 1
dfSegsNodes=pd.merge(dfSegsNodes,dfSegsNodes.groupby(['NODEsName']).max(),left_on='NODEsName',right_index=True,suffixes=('','_max'))
dfSegsNodes=dfSegsNodes[['SEGName','SEGNodes','SEGOnlyInLDSPara'
,'NODEsRef'
,'NODEsRef_max'
,'NODEsLfdNr','NODEsType','NODEsName','OBJTYPE']]
dfSegsNodes=dfSegsNodes.rename(columns={'NODEsLfdNr':'NODEsSEGLfdNr','NODEsType':'NODEsSEGLfdNrType'})
### # ---
### dfSegsNodes['SEGOnlyInLDSPara']=dfSegsNodes.apply(lambda row: True if row['SEGNodes'] in lSEGOnlyInLDSPara else False,axis=1)
dfSegsNodes=dfSegsNodes[['SEGName','SEGNodes','SEGOnlyInLDSPara'
,'NODEsRef'
,'NODEsRef_max'
,'NODEsSEGLfdNr','NODEsSEGLfdNrType','NODEsName','OBJTYPE']]
# --- Knotendaten ergaenzen
dfSegsNodesNData=pd.merge(dfSegsNodes,V3_KNOT, left_on='NODEsName',right_on='NAME',suffixes=('','KNOT'))
dfSegsNodesNData=dfSegsNodesNData.filter(items=dfSegsNodes.columns.to_list()+['ZKOR','NAME_CONT','NAME_VKNO','pk'])
dfSegsNodesNData=dfSegsNodesNData.rename(columns={'NAME_CONT':'Blockname','NAME_VKNO':'Bl.Kn. fuer Block'})
# --- Knotendatenpunktdaten ergänzen
V3_DPKT_KNOT=pd.merge(V3_DPKT,V3_KNOT,left_on='fkOBJTYPE',right_on='pk',suffixes=('','_KNOT'))
V3_DPKT_KNOT_PH=V3_DPKT_KNOT[V3_DPKT_KNOT['ATTRTYPE'].isin(['PH'])]
# Mehrfacheintraege sollte es nicht geben ...
# V3_DPKT_KNOT_PH[V3_DPKT_KNOT_PH.duplicated(subset=['fkOBJTYPE'])]
df=pd.merge(dfSegsNodesNData,V3_DPKT_KNOT_PH,left_on='pk',right_on='fkOBJTYPE',suffixes=('','_DPKT'),how='left')
cols=dfSegsNodesNData.columns.to_list()
cols.remove('pk')
df=df.filter(items=cols+['ATTRTYPE','CLIENT_ID','OPCITEM_ID','NAME'])
dfSegsNodesNDataDpkt=df
#dfSegsNodesNDataDpkt
# ---
colList=dfSegsNodesNDataDpkt.columns.to_list()
dfSegsNodesNDataDpkt['DIVPipelineName']=dfSegsNodesNDataDpkt['SEGName'].apply(lambda x: fDIVNameFromSEGName(x))
### dfSegsNodesNDataDpkt['DIVPipelineName']=dfSegsNodesNDataDpkt['SEGName'].apply(lambda x: re.search('(\d+)_(\w+)_(\w+)_(\w+)',x).group(1)+'_'+re.search('(\d+)_(\w+)_(\w+)_(\w+)',x).group(3) )
dfSegsNodesNDataDpkt=dfSegsNodesNDataDpkt.filter(items=['DIVPipelineName']+colList)
dfSegsNodesNDataDpkt=dfSegsNodesNDataDpkt.sort_values(by=['DIVPipelineName','SEGName','NODEsSEGLfdNr']).reset_index(drop=True)
dfSegsNodesNDataDpkt['DruckResIDBase']=dfSegsNodesNDataDpkt['OPCITEM_ID'].apply(lambda x: fGetBaseIDFromResID(x) )
dfSegsNodesNDataDpkt['SEGResIDBase']=dfSegsNodesNDataDpkt['SEGName'].apply(lambda x: fGetSEGBaseIDFromSEGName(x) )
###### --- ODI
ODIFile=os.path.join(VersionsDir,ODI)
logger.info("{:s}###### {:10s}: {:s}: Lesen und prüfen ...".format(logStr,'ODI',ODI))
dfODI=Lx.getDfFromODI(ODIFile)
dfSegsNodesNDataDpkt['SEGResIDs']=dfSegsNodesNDataDpkt['SEGResIDBase'].apply(lambda x: fGetErgIDsFromBaseID(baseID=x,dfODI=dfODI,pattern=False))
dfSegsNodesNDataDpkt['SEGResIDsIMDI']=dfSegsNodesNDataDpkt['SEGResIDBase'].apply(lambda x: fGetErgIDsFromBaseID(baseID=x,dfODI=dfODI,pattern=True))
dfSegsNodesNDataDpkt['DruckResIDs']=dfSegsNodesNDataDpkt['DruckResIDBase'].apply(lambda x: fGetErgIDsFromBaseID(baseID=x,dfODI=dfODI,pattern=False))
dfSegsNodesNDataDpkt['DruckResIDsIMDI']=dfSegsNodesNDataDpkt['DruckResIDBase'].apply(lambda x: fGetErgIDsFromBaseID(baseID=x,dfODI=dfODI,pattern=True))
# --- lfd. Nr. der Druckmessstelle im Segment ermitteln
df=dfSegsNodesNDataDpkt[dfSegsNodesNDataDpkt['DruckResIDBase'].notnull()].copy()
df['NODEsSEGDruckErgLfdNr']=df.groupby('SEGName').cumcount() + 1
df['NODEsSEGDruckErgLfdNr']=df['NODEsSEGDruckErgLfdNr'].astype(int)
cols=dfSegsNodesNDataDpkt.columns.to_list()
cols.append('NODEsSEGDruckErgLfdNr')
dfSegsNodesNDataDpkt=pd.merge(dfSegsNodesNDataDpkt
,df
,left_index=True
,right_index=True
,how='left'
,suffixes=('','_df')
).filter(items=cols)
dfSegsNodesNDataDpkt['NODEsSEGDruckErgLfdNr']=dfSegsNodesNDataDpkt['NODEsSEGDruckErgLfdNr'].astype(int,errors='ignore')
# LDSPara ergaenzen
logger.debug("{:s}dfSegsNodesNDataDpkt: shape vorher: {!s:s}".format(logStr,dfSegsNodesNDataDpkt.shape))
dfSegsNodesNDataDpkt=pd.merge(dfSegsNodesNDataDpkt,dfPara,left_on='SEGNodes',right_index=True,suffixes=('','_LDSPara'),how='left')
logger.debug("{:s}dfSegsNodesNDataDpkt: shape nachher: {!s:s}".format(logStr,dfSegsNodesNDataDpkt.shape))
#for SEGNodes in [str(SEGNodes) for SEGNodes in df.index if str(SEGNodes) not in dfSegsNodesNDataDpkt['SEGNodes'].values]:
# logger.warning("{:s}LDSPara SEG {:s} ist nicht Modell-definiert!".format(logStr,SEGNodes))
###### --- LDSParaPT
LDSParaPTFile=os.path.join(VersionsDir,LDSParaPT)
if os.path.exists(LDSParaPTFile):
logger.info("{:s}###### {:10s}: {:s}: Lesen und prüfen ...".format(logStr,'LDSParaPT',LDSParaPT))
dfDPDTParams=pd.read_csv(LDSParaPTFile,delimiter=';',error_bad_lines=False,warn_bad_lines=True)
dfMehrfach=dfDPDTParams.groupby(by='#ID').filter(lambda x: len(x) > 1)
rows,cols=dfMehrfach.shape
if rows > 0:
logger.warning("{:s}Mehrfachkonfigurationen:".format(logStr))
logger.warning("{:s}".format(dfMehrfach.to_string()))
dfDPDTParams=dfDPDTParams.groupby(by='#ID').first()
# LDSParaPT ergaenzen
logger.debug("{:s}dfSegsNodesNDataDpkt: shape vorher: {!s:s}".format(logStr,dfSegsNodesNDataDpkt.shape))
dfSegsNodesNDataDpkt=pd.merge(dfSegsNodesNDataDpkt,dfDPDTParams,left_on='CLIENT_ID',right_on='#ID',how='left')
logger.debug("{:s}dfSegsNodesNDataDpkt: shape nachher: {!s:s}".format(logStr,dfSegsNodesNDataDpkt.shape))
dfOhne=dfSegsNodesNDataDpkt[(~pd.isnull(dfSegsNodesNDataDpkt['CLIENT_ID']) & dfSegsNodesNDataDpkt['CLIENT_ID'].str.len()>0 ) & (pd.isnull(dfSegsNodesNDataDpkt['pMin'])) ][['DIVPipelineName','SEGName','NODEsName','ZKOR','CLIENT_ID']].reset_index(drop=True)
rows,cols=dfOhne.shape
if rows > 0:
logger.debug("{:s}Druckmessstellen ohne Mindestdruck:".format(logStr))
logger.debug("{:s}".format(dfOhne.to_string()))
dfSegsNodesNDataDpkt['pMinMlc']=dfSegsNodesNDataDpkt.apply(lambda row: row['ZKOR']+row['pMin']*100000/(794.*9.81),axis=1)
g=dfSegsNodesNDataDpkt.groupby(by='SEGName')
df=g.pMinMlc.agg(pMinMlcMinSEG=np.min,pMinMlcMaxSEG=np.max)
# pMinMlcMinSEG, pMinMlcMaxSEG ergaenzen
logger.debug("{:s}dfSegsNodesNDataDpkt: shape vorher: {!s:s}".format(logStr,dfSegsNodesNDataDpkt.shape))
dfSegsNodesNDataDpkt=pd.merge(dfSegsNodesNDataDpkt,df,left_on='SEGName',right_index=True,how='left')
logger.debug("{:s}dfSegsNodesNDataDpkt: shape nachher: {!s:s}".format(logStr,dfSegsNodesNDataDpkt.shape))
# Segmente ohne Mindestdruecke ausgeben
df=dfSegsNodesNDataDpkt.groupby(['SEGName']).first()
df=df[pd.isnull(df['pMinMlcMinSEG'])][['DIVPipelineName','SEGNodes']]
rows,cols=df.shape
if rows > 0:
logger.debug("{:s}ganze Segmente ohne Mindestdruck:".format(logStr))
logger.debug("{:s}".format(df.to_string()))
# Mindestdruecke ausgeben
df=dfSegsNodesNDataDpkt[(~pd.isnull(dfSegsNodesNDataDpkt['CLIENT_ID']) & dfSegsNodesNDataDpkt['CLIENT_ID'].str.len()>0 ) & (~pd.isnull(dfSegsNodesNDataDpkt['pMin'])) ][['DIVPipelineName','SEGName','NODEsName','ZKOR','CLIENT_ID','pMin']].reset_index(drop=True)
logger.debug("{:s}dfSegsNodesNDataDpkt: Mindestdrücke: {!s:s}".format(logStr,df.to_string()))
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return dfSegsNodesNDataDpkt
def fResValidSeriesSTAT_S(x): # STAT_S
if pd.isnull(x)==False:
if x >=0:
return True
else:
return False
else:
return False
def fResValidSeriesSTAT_S601(x): # STAT_S
if pd.isnull(x)==False:
if x==601:
return True
else:
return False
else:
return False
def fResValidSeriesAL_S(x,value=20): # AL_S
if pd.isnull(x)==False:
if x==value:
return True
else:
return False
else:
return False
def fResValidSeriesAL_S10(x):
return fResValidSeriesAL_S(x,value=10)
def fResValidSeriesAL_S4(x):
return fResValidSeriesAL_S(x,value=4)
def fResValidSeriesAL_S3(x):
return fResValidSeriesAL_S(x,value=3)
ResChannelFunctions=[fResValidSeriesSTAT_S,fResValidSeriesAL_S,fResValidSeriesSTAT_S601]
ResChannelResultNames=['Zustaendig','Alarm','Stoerung']
ResChannelTypes=['STAT_S','AL_S','STAT_S']
# (fast) alle verfuegbaren Erg-Kanaele
ResChannelTypesAll=['AL_S','STAT_S','SB_S','MZ_AV','LR_AV','NG_AV','LP_AV','AC_AV','ACCST_AV','ACCTR_AV','ACF_AV','TIMER_AV','AM_AV','DNTD_AV','DNTP_AV','DPDT_AV'
,'DPDT_REF_AV'
,'DPDT_REF' # Workaround
,'QM_AV','ZHKNR_S']
baseColorsSchieber=[ # Schieberfarben
'g' # 1
,'b' # 2
,'m' # 3
,'r' # 4
,'c' # 5
# alle Basisfarben außer y gelb
,'tab:blue' # 6
,'tab:orange' # 7
,'tab:green' # 8
,'tab:red' # 9
,'tab:purple' # 10
,'tab:brown' # 11
,'tab:pink' # 12
,'gold' # 13
,'fuchsia' # 14
,'coral' # 15
]
markerDefSchieber=[ # Schiebersymobole
'^' # 0 Auf
,'v' # 1 Zu
,'>' # 2 Halt
# ab hier Zustaende
,'4' # 3 Laeuft
,'3' # 4 Laeuft nicht
,'P' # 5 Zust
,'1' # 6 Auf
,'2' # 7 Zu
,'+' # 8 Halt
,'x' # 9 Stoer
]
# --- Reports LDS: Funktionen und Hilfsfunktionen
# -----------------------------------------------
def getLDSResVecDf(
ResIDBase='ID.' # i.e. for Segs Objects.3S_XYZ_SEG_INFO.3S_L_6_EL1_39_TUD.In. / i.e. for Drks Objects.3S_XYZ_DRUCK.3S_6_EL1_39_PTI_02_E.In.
,LDSResBaseType='SEG' # or Druck
,lx=None
,timeStart=None,timeEnd=None
,ResChannelTypes=ResChannelTypesAll
,timeShiftPair=None
):
"""
returns a df: the specified LDSResChannels (AL_S, ...) for an ResIDBase
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
dfResVec=pd.DataFrame()
try:
# zu lesende IDs basierend auf ResIDBase bestimmen
ErgIDs=[ResIDBase+ext for ext in ResChannelTypes]
IMDIErgIDs=['IMDI.'+ID for ID in ErgIDs]
ErgIDsAll=[*ErgIDs,*IMDIErgIDs]
# Daten lesen von TC-H5s
dfFiltered=lx.getTCsFromH5s(timeStart=timeStart,timeEnd=timeEnd,LDSResOnly=True,LDSResColsSpecified=ErgIDsAll,LDSResTypeSpecified=LDSResBaseType,timeShiftPair=timeShiftPair)
# Spalten umbenennen
colDct={}
for col in dfFiltered.columns:
m=re.search(Lx.pID,col)
colDct[col]=m.group('E')
dfResVec=dfFiltered.rename(columns=colDct)
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return dfResVec
def fGetResTimes(
ResIDBases=[] # Liste der Wortstaemme der Ergebnisvektoren
,df=pd.DataFrame() # TCsLDSRes...
,ResChannelTypes=ResChannelTypes # ['STAT_S','AL_S','STAT_S'] # Liste der Ergebnisvektoren Postfixe
,ResChannelFunctions=ResChannelFunctions # [fResValidSeriesSTAT_S,ResValidSeriesAL_S,fResValidSeriesSTAT_S601] # Liste der Ergebnisvektoren Funktionen
,ResChannelResultNames=ResChannelResultNames # ['Zustaendig','Alarm','Stoerung'] # Liste der key-Namen der Ergebnisse
,tdAllowed=pd.Timedelta('1 second') # erlaubte Zeitspanne zwischen geht und kommt (die beiden an diese Zeitspanne angrenzenden Zeitbereiche werden als 1 Zeit gewertet)
):
"""
Return: dct
key: ResIDBase
value: dct:
key: ResChannelResultName
Value: Liste mit Zeitpaaren (oder leere Liste)
"""
resTimesDct={}
for ResIDBase in ResIDBases:
tPairsDct={}
for idx,ext in enumerate(ResChannelTypes):
ID=ResIDBase+ext
if ext == 'AL_S':
debugOutput=True
else:
debugOutput=False
if ID in df:
#print("{:s} in Ergliste".format(ID))
tPairs=findAllTimeIntervallsSeries(
s=df[ID].dropna() #!
,fct=ResChannelFunctions[idx]
,tdAllowed=tdAllowed#pd.Timedelta('1 second')
,debugOutput=debugOutput
)
else:
#print("{:s} nicht in Ergliste".format(ID))
tPairs=[]
tPairsDct[ResChannelResultNames[idx]]=tPairs
resTimesDct[ResIDBase]=tPairsDct
return resTimesDct
def getAlarmStatistikData(
h5File='a.h5'
,dfSegsNodesNDataDpkt=pd.DataFrame()
,timeShiftPair=None # z.B. (1,'H') bei Replay
):
"""
Returns TCsLDSRes1,TCsLDSRes2,dfCVDataOnly
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
TCsLDSRes1=pd.DataFrame()
TCsLDSRes2=pd.DataFrame()
try:
# "connect" to the App Logs
lx=Lx.AppLog(h5File=h5File)
if hasattr(lx, 'h5FileLDSRes'):
logger.error("{0:s}{1:s}".format(logStr,'In den TCs nur Res und nicht Res1 und Res2?!'))
raise RmError
# zu lesende Daten ermitteln
l=dfSegsNodesNDataDpkt['DruckResIDBase'].unique()
l = l[~pd.isnull(l)]
DruckErgIDs=[*[ID+'AL_S' for ID in l],*[ID+'STAT_S' for ID in l],*[ID+'SB_S' for ID in l],*[ID+'ZHKNR_S' for ID in l]]
#
l=dfSegsNodesNDataDpkt['SEGResIDBase'].unique()
l = l[~pd.isnull(l)]
SEGErgIDs=[*[ID+'AL_S' for ID in l],*[ID+'STAT_S' for ID in l],*[ID+'SB_S' for ID in l],*[ID+'ZHKNR_S' for ID in l]]
ErgIDs=[*DruckErgIDs,*SEGErgIDs]
# Daten lesen
TCsLDSRes1,TCsLDSRes2=lx.getTCsFromH5s(LDSResOnly=True,LDSResColsSpecified=ErgIDs,timeShiftPair=timeShiftPair)
(preriod,freq)=timeShiftPair
timeDeltaStr="{:d} {:s}".format(preriod,freq)
timeDelta=pd.Timedelta(timeDeltaStr)
dfCVDataOnly=lx.getCVDFromH5(timeDelta=timeDelta,returnDfCVDataOnly=True)
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return TCsLDSRes1,TCsLDSRes2,dfCVDataOnly
def processAlarmStatistikData(
TCsLDSRes1=pd.DataFrame()
,TCsLDSRes2=pd.DataFrame()
,dfSegsNodesNDataDpkt=pd.DataFrame()
,tdAllowed=None # pd.Timedelta('1 second')
# Alarm geht ... Alarm kommt (wieder): wenn Zeitspanne ... <= tdAllowed, dann wird dies _gewertet als dieselbe Alarmzeitspanne
# d.h. es handelt sich _gewertet inhaltlich um denselben Alarm
# None zählt die Alarme strikt getrennt
):
"""
Returns: SEGResDct,DruckResDct
ResDct:
key: baseID (i.e. Objects.3S_FBG_SEG_INFO.<KEY>.
value: dct
key: Zustaendig: value: Zeitbereiche, in denen der Ergebnisvektor zustaendig ist (Liste von Zeitstempelpaaren)
key: Alarm: value: Zeitbereiche, in denen der Ergebnisvektor in Alarm war (Liste von Zeitstempelpaaren)
key: Stoerung: value: Zeitbereiche, in denen der Ergebnisvektor in Stoerung war (Liste von Zeitstempelpaaren)
key: AL_S_SB_S: value: Liste mit Listen (den verschiedenen SB_S pro Alarm in der zeitlichen Reihenfolge ihres Auftretens) (Länge der Liste == Länge der Liste von Alarm)
key: <KEY>S: value: Liste mit Listen (den verschiedenen ZHKNR_S pro Alarm in der zeitlichen Reihenfolge ihres Auftretens) (Länge der Liste == Länge der Liste von Alarm)
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
# Zeiten SEGErgs mit zustaendig und Alarm ...
l=[baseID for baseID in dfSegsNodesNDataDpkt[~dfSegsNodesNDataDpkt['SEGOnlyInLDSPara']]['SEGResIDBase'].unique() if not pd.isnull(baseID)]
SEGResDct=fGetResTimes(ResIDBases=l,df=TCsLDSRes1,tdAllowed=tdAllowed)
logger.debug("{:s}SEGResDct: {!s:s}".format(logStr,SEGResDct))
# Zeiten DruckErgs mit zustaendig und Alarm ...
l=[baseID for baseID in dfSegsNodesNDataDpkt[~dfSegsNodesNDataDpkt['SEGOnlyInLDSPara']]['DruckResIDBase'].unique() if not pd.isnull(baseID)]
DruckResDct=fGetResTimes(ResIDBases=l,df=TCsLDSRes2,tdAllowed=tdAllowed)
logger.debug("{:s}DruckResDct: {!s:s}".format(logStr,DruckResDct))
# verschiedene Auspraegungen pro Alarmzeit ermitteln
for ResDct, ResSrc, LDSResBaseType in zip([SEGResDct, DruckResDct],[TCsLDSRes1,TCsLDSRes2],['SEG','Druck']):
for idxID,(ID,IDDct) in enumerate(ResDct.items()):
# IDDct: das zu erweiternde Dct
# Alarme
tPairs=IDDct['Alarm']
for keyStr, colExt in zip(['AL_S_SB_S','<KEY>'],['SB_S','ZHKNR_S']):
lGes=[]
if tPairs != []:
for tPair in tPairs:
col=ID+colExt
lSingle=ResSrc.loc[tPair[0]:tPair[1],col]
lSingle=[int(x) for x in lSingle if pd.isnull(x)==False]
lSingle=[lSingle[0]]+[lSingle[i] for i in range(1,len(lSingle)) if lSingle[i]!=lSingle[i-1]]
lGes.append(lSingle)
IDDct[keyStr]=lGes
# das erweiterte Dct zuweisen
ResDct[ID]=IDDct
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return SEGResDct,DruckResDct
def addNrToAlarmStatistikData(
SEGResDct={}
,DruckResDct={}
,dfAlarmEreignisse=pd.DataFrame()
):
"""
Returns: SEGResDct,DruckResDct added with key AL_S_NR
ResDct:
key: baseID (i.e. Objects.3S_FBG_SEG_INFO.3S_L_6_BUV_01_SPV.In.
value: dct
key: Zustaendig: value: Zeitbereiche, in denen der Ergebnisvektor zustaendig ist (Liste von Zeitstempelpaaren)
key: Alarm: value: Zeitbereiche, in denen der Ergebnisvektor in Alarm war (Liste von Zeitstempelpaaren)
key: Stoerung: value: Zeitbereiche, in denen der Ergebnisvektor in Stoerung war (Liste von Zeitstempelpaaren)
key: AL_S_SB_S: value: Liste mit Listen (den verschiedenen SB_S pro Alarm in der zeitlichen Reihenfolge ihres Auftretens) (Länge der Liste == Länge der Liste von Alarm)
key: AL_S_ZHKNR_S: value: Liste mit Listen (den verschiedenen ZHKNR_S pro Alarm in der zeitlichen Reihenfolge ihres Auftretens) (Länge der Liste == Länge der Liste von Alarm)
# ergänzt:
key: AL_S_NR: value: Liste mit der Nr. (aus dfAlarmEreignisse) pro Alarm (Länge der Liste == Länge der Liste von Alarm)
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
#
for ResDct, LDSResBaseType in zip([SEGResDct, DruckResDct],['SEG','Druck']):
for idxID,(ID,IDDct) in enumerate(ResDct.items()):
# IDDct: das zu erweiternde Dct
# Alarme
tPairs=IDDct['Alarm']
lNr=[]
if tPairs != []:
ZHKNRnListen=IDDct['AL_S_ZHKNR_S']
for idxAlarm,tPair in enumerate(tPairs):
ZHKNRnListe=ZHKNRnListen[idxAlarm]
ZHKNR=ZHKNRnListe[0]
ae=AlarmEvent(tPair[0],tPair[1],ZHKNR,LDSResBaseType)
Nr=dfAlarmEreignisse[dfAlarmEreignisse['AlarmEvent']==ae]['Nr'].iloc[0]
lNr.append(Nr)
IDDct['AL_S_NR']=lNr
# das erweiterte Dct zuweisen
ResDct[ID]=IDDct
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return SEGResDct,DruckResDct
def processAlarmStatistikData2(
DruckResDct=pd.DataFrame()
,TCsLDSRes2=pd.DataFrame()
,dfSegsNodesNDataDpkt=pd.DataFrame()
):
"""
Druckergebnisaussagen auf Segmentergebnisaussagen geeignet verdichtet
Returns: SEGDruckResDct
ResDct:
key: baseID
value: dct
sortiert und direkt angrenzende oder gar ueberlappende Zeiten aus Druckergebnissen zusammenfasst
key: Zustaendig: value: Zeitbereiche, in denen ein Druckergebnisvektor zustaendig ist (Liste von Zeitstempelpaaren)
key: Alarm: value: Zeitbereiche, in denen ein Druckergebnisvektor in Alarm war (Liste von Zeitstempelpaaren)
key: Stoerung: value: Zeitbereiche, in denen ein Druckergebnisvektor in Stoerung war (Liste von Zeitstempelpaaren)
voneiander verschiedene Ausprägungen (sortiert) aus Druckergebnissen
key: AL_S_SB_S: Liste
key: AL_S_ZHKNR_S: Liste
key: AL_S_NR: Liste
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
# Druckergebnisaussagen auf Segmentergebnisaussagen geeignet verdichtet
SEGDruckResDct={}
# merken, ob eine ID bereits bei einem SEG gezählt wurde; die Alarme einer ID sollen nur bei einem SEG gezaehlt werden
IDBereitsGezaehlt={}
# über alle DruckErgs
for idx,(ID,tPairsDct) in enumerate(DruckResDct.items()):
# SEG ermitteln
# ein DruckErg kann zu mehreren SEGs gehoeren z.B. gehoert ein Verzweigungsknoten i.d.R. zu 3 versch. SEGs
tupleLst=getNamesFromDruckResIDBase(dfSegsNodesNDataDpkt,ID)
for idxTuple,(DIVPipelineName,SEGName,SEGResIDBase,SEGOnlyInLDSPara) in enumerate(tupleLst):
# wenn internes SEG
if SEGOnlyInLDSPara:
logger.debug("{:s}ID {:35s} wird bei SEGName {:s} nicht gezaehlt da dieses SEG intern.".format(logStr,ID,SEGName))
continue
# ID wurde bereits gezählt
if ID in IDBereitsGezaehlt.keys():
logger.debug("{:s}ID {:35s} wird bei SEGName {:s} nicht gezaehlt sondern wurde bereits bei SEGName {:s} gezaehlt.".format(logStr,ID,SEGName,IDBereitsGezaehlt[ID]))
continue
else:
# ID wurde noch nicht gezaehlt
IDBereitsGezaehlt[ID]=SEGName
#if idxTuple>0:
# logger.debug("{:s}ID {:35s} wird bei SEGName {:s} nicht gezaehlt sondern nur bei SEGName {:s}.".format(logStr,ID,SEGName,tupleLst[0][1]))
# continue
if len(tPairsDct['Alarm'])>0:
logger.debug("{:s}SEGName {:20s}: durch ID {:40s} mit Alarm. Nr des Verweises von ID auf ein Segment: {:d}".format(logStr,SEGName,ID, idxTuple+1))
if SEGResIDBase not in SEGDruckResDct.keys():
# auf dieses SEG wurde noch nie verwiesen
SEGDruckResDct[SEGResIDBase]=deepcopy(tPairsDct) # das Segment erhält die Ergebnisse des ersten Druckvektors der zum Segment gehört
else:
# ergaenzen
# Zeitlisten ergänzen
for idx2,ext in enumerate(ResChannelTypes):
tPairs=tPairsDct[ResChannelResultNames[idx2]]
for idx3,tPair in enumerate(tPairs):
if True: #tPair not in SEGDruckResDct[SEGResIDBase][ResChannelResultNames[idx2]]: # keine identischen Zeiten mehrfach zaehlen
# die Ueberlappung von Zeiten wird weiter unten behandelt
SEGDruckResDct[SEGResIDBase][ResChannelResultNames[idx2]].append(tPair)
# weitere Listen ergaenzen
for ext in ['AL_S_SB_S','AL_S_ZHKNR_S','AL_S_NR']:
SEGDruckResDct[SEGResIDBase][ext]=SEGDruckResDct[SEGResIDBase][ext]+tPairsDct[ext]
# Ergebnis: sortieren und dann direkt angrenzende oder gar ueberlappende Zeiten zusammenfassen
for idx,(ID,tPairsDct) in enumerate(SEGDruckResDct.items()):
for idx2,ext in enumerate(tPairsDct.keys()):
if ext in ['AL_S_SB_S','AL_S_ZHKNR_S','AL_S_NR']: # keine Zeiten
pass
else:
tPairs=tPairsDct[ResChannelResultNames[idx2]]
tPairs=sorted(tPairs,key=lambda tup: tup[0])
tPairs=fCombineSubsequenttPairs(tPairs)
SEGDruckResDct[ID][ResChannelResultNames[idx2]]=tPairs
# voneiander verschiedene Ausprägungen (sortiert)
for idx,(ID,tPairsDct) in enumerate(SEGDruckResDct.items()):
for idx2,ext in enumerate(tPairsDct.keys()):
v=tPairsDct[ext]
if ext in ['AL_S_SB_S','AL_S_ZHKNR_S']: # Liste von Listen
l=[*{*chain.from_iterable(v)}]
l=sorted(pd.unique(l))
SEGDruckResDct[ID][ext]=l
elif ext in ['AL_S_NR']: # Liste
l=sorted(pd.unique(v))
SEGDruckResDct[ID][ext]=l
else:
pass
logger.debug("{:s}SEGDruckResDct: {!s:s}".format(logStr,SEGDruckResDct))
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return SEGDruckResDct
def buildAlarmDataframes(
TCsLDSRes1=pd.DataFrame()
,TCsLDSRes2=pd.DataFrame()
,dfSegsNodesNDataDpkt=pd.DataFrame()
,dfCVDataOnly=pd.DataFrame()
,SEGResDct={}
,DruckResDct={}
,replaceTup=('2021-','')
,NrBy=['LDSResBaseType','SEGName','Ort','tA','ZHKNR']
,NrAsc=[False]+4*[True]
):
"""
Returns dfAlarmStatistik,dfAlarmEreignisse,SEGDruckResDct
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
dfAlarmStatistik=pd.DataFrame()
dfAlarmEreignisse=pd.DataFrame()
try:
# Ereignisse
dfAlarmEreignisse=buildDfAlarmEreignisse(
SEGResDct=SEGResDct
,DruckResDct=DruckResDct
,TCsLDSRes1=TCsLDSRes1
,TCsLDSRes2=TCsLDSRes2
,dfCVDataOnly=dfCVDataOnly
,dfSegsNodesNDataDpkt=dfSegsNodesNDataDpkt
,replaceTup=replaceTup
,NrBy=NrBy
,NrAsc=NrAsc
)
# in dfAlarmEreignisse erzeugte Alarm-Nr. an Dct merken
SEGResDct,DruckResDct=addNrToAlarmStatistikData(
SEGResDct
,DruckResDct
,dfAlarmEreignisse
)
# BZKat der Alarme
def fGetAlarmKat(row):
"""
"""
# baseID des Alarms
baseID=row['OrteIDs'][0]
# dct des Alarms
if row['LDSResBaseType']=='SEG':
dct=SEGResDct[baseID]
else:
dct=DruckResDct[baseID]
# Nrn der baseID
Nrn=dct['AL_S_NR']
# idx dieses Alarms innerhalb der Alarme der baseID
idxAl=Nrn.index(row['Nr'])
# Zustaende dieses alarms
SB_S=dct['AL_S_SB_S'][idxAl]
kat=''
if 3 in SB_S:
kat='instationär'
else:
if 2 in SB_S:
kat = 'schw. instationär'
else:
if 1 in SB_S:
kat = 'stat. Fluss'
elif 4 in SB_S:
kat = 'stat. Ruhe'
return kat
dfAlarmEreignisse['BZKat']=dfAlarmEreignisse.apply(lambda row: fGetAlarmKat(row),axis=1)
# Segment-verdichtete Druckergebnisse
SEGDruckResDct=processAlarmStatistikData2(
DruckResDct
,TCsLDSRes2
,dfSegsNodesNDataDpkt
)
# Alarmstatistik bilden
dfAlarmStatistik=dfSegsNodesNDataDpkt[~dfSegsNodesNDataDpkt['SEGOnlyInLDSPara']]
dfAlarmStatistik=dfAlarmStatistik[['DIVPipelineName','SEGName','SEGNodes','SEGResIDBase']].drop_duplicates(keep='first').reset_index(drop=True)
dfAlarmStatistik['Nr']=dfAlarmStatistik.apply(lambda row: "{:2d}".format(int(row.name)),axis=1)
# SEG
dfAlarmStatistik['FörderZeiten']=dfAlarmStatistik['SEGResIDBase'].apply(lambda x: SEGResDct[x]['Zustaendig'])
dfAlarmStatistik['FörderZeitenAl']=dfAlarmStatistik['SEGResIDBase'].apply(lambda x: SEGResDct[x]['Alarm'])
dfAlarmStatistik['FörderZeitenSt']=dfAlarmStatistik['SEGResIDBase'].apply(lambda x: SEGResDct[x]['Stoerung'])
dfAlarmStatistik['FörderZeitenAlAnz']=dfAlarmStatistik['FörderZeitenAl'].apply(lambda x: len(x))
dfAlarmStatistik['FörderZeitenAlSbs']=dfAlarmStatistik['SEGResIDBase'].apply(lambda x: SEGResDct[x]['AL_S_SB_S'])
dfAlarmStatistik['FörderZeitenAlNrn']=dfAlarmStatistik['SEGResIDBase'].apply(lambda x: SEGResDct[x]['AL_S_NR'])
# Druck (SEG-verdichtet)
dfAlarmStatistik['RuheZeiten']=dfAlarmStatistik['SEGResIDBase'].apply(lambda x: SEGDruckResDct[x]['Zustaendig'] if x in SEGDruckResDct.keys() else [])
dfAlarmStatistik['RuheZeitenAl']=dfAlarmStatistik['SEGResIDBase'].apply(lambda x: SEGDruckResDct[x]['Alarm'] if x in SEGDruckResDct.keys() else [])
dfAlarmStatistik['RuheZeitenSt']=dfAlarmStatistik['SEGResIDBase'].apply(lambda x: SEGDruckResDct[x]['Stoerung'] if x in SEGDruckResDct.keys() else [])
dfAlarmStatistik['RuheZeitenAlSbs']=dfAlarmStatistik['SEGResIDBase'].apply(lambda x: SEGDruckResDct[x]['AL_S_SB_S'] if x in SEGDruckResDct.keys() else [])
dfAlarmStatistik['RuheZeitenAlNrn']=dfAlarmStatistik['SEGResIDBase'].apply(lambda x: SEGDruckResDct[x]['AL_S_NR'] if x in SEGDruckResDct.keys() else [])
#dfAlarmStatistik['RuheZeitenAlAnz']=dfAlarmStatistik['RuheZeitenAl'].apply(lambda x: len(x))
dfAlarmStatistik['RuheZeitenAlAnz']=dfAlarmStatistik['RuheZeitenAlNrn'].apply(lambda x: len(x))
# je 3 Zeiten bearbeitet
dfAlarmStatistik['FörderZeit']=dfAlarmStatistik['FörderZeiten'].apply(lambda x: fTotalTimeFromPairs(x,pd.Timedelta('1 minute'),False))
dfAlarmStatistik['RuheZeit']=dfAlarmStatistik['RuheZeiten'].apply(lambda x: fTotalTimeFromPairs(x,pd.Timedelta('1 minute'),False))
dfAlarmStatistik['FörderZeitAl']=dfAlarmStatistik['FörderZeitenAl'].apply(lambda x: fTotalTimeFromPairs(x,pd.Timedelta('1 minute'),False))
dfAlarmStatistik['RuheZeitAl']=dfAlarmStatistik['RuheZeitenAl'].apply(lambda x: fTotalTimeFromPairs(x,pd.Timedelta('1 minute'),False))
dfAlarmStatistik['FörderZeitSt']=dfAlarmStatistik['FörderZeitenSt'].apply(lambda x: fTotalTimeFromPairs(x,pd.Timedelta('1 minute'),False))
dfAlarmStatistik['RuheZeitSt']=dfAlarmStatistik['RuheZeitenSt'].apply(lambda x: fTotalTimeFromPairs(x,pd.Timedelta('1 minute'),False))
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return dfAlarmEreignisse, dfAlarmStatistik,SEGDruckResDct
def plotDfAlarmStatistik(
dfAlarmStatistik=pd.DataFrame()
):
"""
Returns the plt.table
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
df=dfAlarmStatistik[[
'Nr'
,'DIVPipelineName'
,'SEGName'
,'FörderZeit'
,'FörderZeitenAlAnz'
,'FörderZeitAl'
,'FörderZeitSt'
,'RuheZeit'
,'RuheZeitenAlAnz'
,'RuheZeitAl'
,'RuheZeitSt'
]].copy()
# diese Zeiten um (Störzeiten) annotieren
df['FörderZeit']=df.apply(lambda row: "{!s:s} ({!s:s})".format(row['FörderZeit'],row['FörderZeitSt']) if row['FörderZeitSt'] > 0. else row['FörderZeit'] ,axis=1)
df['RuheZeit']=df.apply(lambda row: "{!s:s} ({!s:s})".format(row['RuheZeit'],row['RuheZeitSt']) if row['RuheZeitSt'] > 0. else row['RuheZeit'],axis=1)
# LfdNr. annotieren
df['LfdNr']=df.apply(lambda row: "{:2d} - {:s}".format(int(row.Nr)+1,str(row.DIVPipelineName)),axis=1)
# Zeiten Alarm um Alarm-Nrn annotieren
def fAddZeitMitNrn(zeit,lAlNr):
if len(lAlNr) > 0:
if len(lAlNr) <= 3:
return "{!s:s} (Nrn.: {!s:s})".format(zeit,lAlNr)
else:
# mehr als 3 Alarme...
return "{!s:s} (Nrn.: {!s:s}, ...)".format(zeit,lAlNr[0])
else:
return zeit
df['FörderZeitAl']=dfAlarmStatistik.apply(lambda row: fAddZeitMitNrn(row['FörderZeitAl'],row['FörderZeitenAlNrn']),axis=1)
df['RuheZeitAl']=dfAlarmStatistik.apply(lambda row: fAddZeitMitNrn(row['RuheZeitAl'],row['RuheZeitenAlNrn']),axis=1)
df=df[[
'LfdNr'
,'SEGName'
,'FörderZeit'
,'FörderZeitenAlAnz'
,'FörderZeitAl'
,'RuheZeit'
,'RuheZeitenAlAnz'
,'RuheZeitAl'
]]
try:
t=plt.table(cellText=df.values, colLabels=df.columns, loc='center')
cols=df.columns.to_list()
colIdxLfdNr=cols.index('LfdNr')
colIdxFoerderZeit=cols.index('FörderZeit')
colIdxFoerderZeitenAlAnz=cols.index('FörderZeitenAlAnz')
colIdxFoerderZeitAl=cols.index('FörderZeitAl')
colIdxRuheZeit=cols.index('RuheZeit')
colIdxRuheZeitenAlAnz=cols.index('RuheZeitenAlAnz')
colIdxRuheZeitAl=cols.index('RuheZeitAl')
cells = t.properties()["celld"]
for cellTup,cellObj in cells.items():
cellObj.set_text_props(ha='left')
row,col=cellTup # row: 0 fuer Ueberschrift bei Ueberschrift; col mit 0
if row == 0:
if col in [colIdxRuheZeit,colIdxRuheZeitenAlAnz,colIdxRuheZeitAl]:
pass
cellObj.set_text_props(backgroundcolor='plum')
elif col in [colIdxFoerderZeit,colIdxFoerderZeitenAlAnz,colIdxFoerderZeitAl]:
pass
cellObj.set_text_props(backgroundcolor='lightsteelblue')
if col == colIdxLfdNr:
if row==0:
continue
if 'color' in dfAlarmStatistik.columns.to_list():
color=dfAlarmStatistik['color'].iloc[row-1]
cellObj.set_text_props(backgroundcolor=color)
if col == colIdxFoerderZeit:
if row==0:
continue
if dfAlarmStatistik.loc[row-1,'FörderZeit']==0:
pass
else:
if dfAlarmStatistik.loc[row-1,'FörderZeitSt']/ dfAlarmStatistik.loc[row-1,'FörderZeit']*100>1:
cellObj.set_text_props(backgroundcolor='goldenrod')
if col == colIdxFoerderZeitenAlAnz:
if row==0:
continue
if dfAlarmStatistik.loc[row-1,'FörderZeit']==0:
cellObj.set_text_props(backgroundcolor='lightgrey')
else: # hat Förderzeit
if df.loc[row-1,'FörderZeitenAlAnz']==0:
cellObj.set_text_props(backgroundcolor='springgreen')
else:
cellObj.set_text_props(ha='center')
cellObj.set_text_props(backgroundcolor='navajowhite') # palegoldenrod
#if df.loc[row-1,'FörderZeitAl']/ dfAlarmStatistik.loc[row-1,'FörderZeit']*100>1:
if dfAlarmStatistik.loc[row-1,'FörderZeitAl']/ dfAlarmStatistik.loc[row-1,'FörderZeit']*100>1:
cellObj.set_text_props(backgroundcolor='tomato')
if col == colIdxRuheZeit:
if row==0:
continue
if dfAlarmStatistik.loc[row-1,'RuheZeit']==0:
pass
else:
if dfAlarmStatistik.loc[row-1,'RuheZeitSt']/ dfAlarmStatistik.loc[row-1,'RuheZeit']*100>1:
cellObj.set_text_props(backgroundcolor='goldenrod')
if col == colIdxRuheZeitenAlAnz:
if row==0:
continue
if dfAlarmStatistik.loc[row-1,'RuheZeit']==0:
cellObj.set_text_props(backgroundcolor='lightgrey')
else: # hat Ruhezeit
if df.loc[row-1,'RuheZeitenAlAnz']==0:
cellObj.set_text_props(backgroundcolor='springgreen')
else:
pass
cellObj.set_text_props(ha='center')
cellObj.set_text_props(backgroundcolor='navajowhite') # # palegoldenrod
#if df.loc[row-1,'RuheZeitAl']/ dfAlarmStatistik.loc[row-1,'RuheZeit']*100>1:
if dfAlarmStatistik.loc[row-1,'RuheZeitAl']/ dfAlarmStatistik.loc[row-1,'RuheZeit']*100>1:
cellObj.set_text_props(backgroundcolor='tomato')
plt.axis('off')
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return t
def fOrteStripped(LDSResBaseType,OrteIDs):
"""
returns Orte stripped
"""
if LDSResBaseType == 'SEG': # 'Objects.3S_FBG_SEG_INFO.3S_L_6_MHV_02_FUD.In.']
orteStripped=[]
for OrtID in OrteIDs:
pass
m=re.search(Lx.pID,OrtID+'dummy')
ortStripped=m.group('C3')+'_'+m.group('C4')+'_'+m.group('C5')+'_'+m.group('C6')
orteStripped.append(ortStripped)
return orteStripped
elif LDSResBaseType == 'Druck': # Objects.3S_FBG_DRUCK.3S_6_BNV_01_PTI_01.In
orteStripped=[]
for OrtID in OrteIDs:
pass
m=re.search(Lx.pID,OrtID+'dummy')
ortStripped=m.group('C2')+'_'+m.group('C3')+'_'+m.group('C4')+'_'+m.group('C5')+m.group('C6')
orteStripped.append(ortStripped)
return orteStripped
else:
return None
def fCVDTime(row,dfSEG,dfDruck,replaceTup=('2021-','')):
"""
in:
dfSEG/dfDruck: TCsLDSRes1/TCsLDSRes2
row: Zeile aus dfAlarmEreignisse
von row verwendet:
LDSResBaseType: SEG (dfSEG) oder nicht (dfDruck)
OrteIDs: ==> ID von ZHKNR_S in dfSEG/dfDruck
ZHKNR: ZHKNR
returns:
string: xZeitA - ZeitEx
ZeitA: erste Zeit in der ZHKNR_S in dfSEG/dfDruck den Wert von ZHKNR trägt
ZeitE: letzte Zeit in der ZHKNR_S in dfSEG/dfDruck den Wert von ZHKNR trägt
xZeitA, wenn ZeitA die erste Zeit in dfSEG/dfDruck ist mit einem von Null verschiedenen Wert
xZeitE, wenn ZeitE die letzte Zeit in dfSEG/dfDruck ist mit einem von Null verschiedenen Wert
in Zeit wurde replaceTup angewendet
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
#logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
Time=""
ID=row['OrteIDs'][0]+'ZHKNR_S'
ZHKNR=row['ZHKNR']
if row['LDSResBaseType']=='SEG':
df=dfSEG
else:
df=dfDruck
s=df[df[ID]==ZHKNR][ID] # eine Spalte; Zeilen in denen ZHKNR_S den Wert von ZHKNR trägt
tA=s.index[0] # 1. Zeit
tE=s.index[-1] # letzte Zeit
Time=" {!s:s} - {!s:s} ".format(tA,tE)
try:
if tA==df[ID].dropna().index[0]:
Time='x'+Time.lstrip()
except:
logger.debug("{0:s}Time: {1:s}: x-tA Annotation Fehler; keine Annotation".format(logStr,Time))
try:
if tE==df[ID].dropna().index[-1]:
Time=Time.rstrip()+'x'
except:
logger.debug("{0:s}Time: {1:s}: x-tE Annotation Fehler; keine Annotation".format(logStr,Time))
Time=Time.replace(replaceTup[0],replaceTup[1])
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
#logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return Time
def buildDfAlarmEreignisse(
SEGResDct={}
,DruckResDct={}
,TCsLDSRes1=pd.DataFrame()
,TCsLDSRes2=pd.DataFrame()
,dfCVDataOnly=pd.DataFrame()
,dfSegsNodesNDataDpkt=pd.DataFrame()
,replaceTup=('2021-','')
,NrBy=['LDSResBaseType','SEGName','Ort','tA','ZHKNR'] # Sortierspalten für die Nr. der Ereignisse
,NrAsc=[False]+4*[True] # aufsteigend j/n für die o.g. Sortierspalten
):
"""
Returns dfAlarmEreignisse:
Nr: lfd. Nr (gebildet gem. NrBy und NrAsc)
tA: Anfangszeit
tE: Endezeit
tD: Dauer des Alarms
ZHKNR: ZHKNR (die zeitlich 1., wenn der Alarm sich über mehrere ZHKNRn erstreckt)
tD_ZHKNR: Lebenszeit der ZHKNR; x-Annotationen am Anfang/Ende, wenn ZHK beginnt bei Res12-Anfang / andauert bei Res12-Ende; '-1', wenn Lebenszeit nicht ermittelt werden konnte
ZHKNRn: sortierte Liste der ZHKNRn des Alarms; eine davon ist ZHKNR; typischerweise die 1. der Liste
LDSResBaseType: SEG oder Druck
OrteIDs: OrteIDs des Alarms
Orte: Kurzform von OrteIDs des Alarms
Ort: der 1. Ort von Orte
SEGName: Segment zu dem der 1. Ort des Alarms gehört
DIVPipelineName:
Voralarm: ermittelter Vorlalarm des Alarms; -1, wenn kein Voralarm in Res12 gefunden werden konnte
Type: Typ des Kontrollraumns; z.B. p-p für vollständige Flussbilanzen; '', wenn kein Typ gefunden werden konnte
Name: Name des Bilanzraumes
NrSD: lfd. Nr Alarm BaseType
NrName: lfd. Nr Alarm Name
NrSEGName: lfd. Nr Alarm SEGName
AlarmEvent: AlarmEvent-Objekt
###BZKat: Betriebszustandskategorie des Alarms
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
dfAlarmEreignisse=pd.DataFrame()
try:
AlarmEvents=[] # Liste von AlarmEvent
AlarmEventsOrte={} # dct der Orte, die diesen (key) AlarmEvent melden
AlarmEventsZHKNRn={} # dct der ZHKNRn, die zu diesem (key) gehoeren
# über SEG- und Druck-Ergebnisvektoren
for ResDct, ResSrc, LDSResBaseType in zip([SEGResDct, DruckResDct],[TCsLDSRes1,TCsLDSRes2],['SEG','Druck']):
for ResIDBase,dct in ResDct.items():
AL_S=dct['Alarm']
if len(AL_S) > 0:
# eine Erg-ID weist Alarme [(tA,tE),...] auf
# korrespondiernede Liste der ZHKs: [(999,1111),...]
ZHKNRnListen=dct['AL_S_ZHKNR_S']
ID=ResIDBase+'ZHKNR_S' # fuer nachfolgende Ausgabe
# ueber alle Alarme der Erg-ID
for idx,AL_S_Timepair in enumerate(AL_S):
(t1,t2)=AL_S_Timepair # tA, tE
ZHKNR_S_Lst=ZHKNRnListen[idx] # Liste der ZHKs in dieser Zeit
if len(ZHKNR_S_Lst) != 1:
logger.warning(("{:s}ID:\n\t {:s}: Alarm {:d} der ID\n\t Zeit von {!s:s} bis {!s:s}:\n\t Anzahl verschiedener ZHKNRn !=1: {:d} {:s}:\n\t ZHKNR eines Alarms wechselt waehrend eines Alarms. Alarm wird identifiziert mit 1. ZHKNR.".format(logStr,ID
,idx
,t1
,t2
,len(ZHKNR_S_Lst)
,str(ZHKNR_S_Lst)
)))
# die erste wird verwendet
ZHKNR=int(ZHKNR_S_Lst[0])
# AlarmEvent erzeugen
alarmEvent=AlarmEvent(t1,t2,ZHKNR,LDSResBaseType)
if alarmEvent not in AlarmEvents:
# diesen Alarm gibt es noch nicht in der Ereignisliste ...
AlarmEvents.append(alarmEvent)
AlarmEventsOrte[alarmEvent]=[]
AlarmEventsZHKNRn[alarmEvent]=[]
else:
pass
# Ort ergaenzen (derselbe Alarm wird erst ab V83.5.3 nur an einem Ort - dem lexikalisch kleinsten des Bilanzraumes - ausgegeben; zuvor konnte derselbe Alarm an mehreren Orten auftreten)
AlarmEventsOrte[alarmEvent].append(ResIDBase)
# ZHKNR(n) ergaenzen (ein Alarm wird unter 1 ZHKNR geführt)
AlarmEventsZHKNRn[alarmEvent].append(ZHKNR_S_Lst)
# df erzeugen
dfAlarmEreignisse=pd.DataFrame.from_records(
[alarmEvent for alarmEvent in AlarmEvents],
columns=AlarmEvent._fields
)
# Liste der EventOrte erstellen, zuweisen
l=[]
for idx,alarmEvent in enumerate(AlarmEvents):
l.append(AlarmEventsOrte[alarmEvent])
dfAlarmEreignisse['OrteIDs']=l
# abgekuerzte Orte
dfAlarmEreignisse['Orte']=dfAlarmEreignisse.apply(lambda row: fOrteStripped(row.LDSResBaseType,row.OrteIDs),axis=1)
dfAlarmEreignisse['Ort']=dfAlarmEreignisse['Orte'].apply(lambda x: x[0])
# Liste der ZHKNRn erstellen, zuweisen
l=[]
for idx,alarmEvent in enumerate(AlarmEvents):
lOfZl=AlarmEventsZHKNRn[alarmEvent]
lOfZ=[*{*chain.from_iterable(lOfZl)}]
lOfZ=sorted(pd.unique(lOfZ))
l.append(lOfZ)
dfAlarmEreignisse['ZHKNRn']=l
# Segmentname eines Ereignisses
dfAlarmEreignisse['SEGName']=dfAlarmEreignisse.apply(lambda row:
dfSegsNodesNDataDpkt[dfSegsNodesNDataDpkt['SEGResIDBase']==row['OrteIDs'][0]]['SEGName'].iloc[0] if row['LDSResBaseType']=='SEG'
else [tuple for tuple in getNamesFromDruckResIDBase(dfSegsNodesNDataDpkt,row['OrteIDs'][0]) if not tuple[-1]][0][1],axis=1)
# DIVPipelineName eines Ereignisses
dfAlarmEreignisse['DIVPipelineName']=dfAlarmEreignisse.apply(lambda row:
dfSegsNodesNDataDpkt[dfSegsNodesNDataDpkt['SEGName']==row['SEGName']]['DIVPipelineName'].iloc[0]
,axis=1)
# Alarm: ---
#tA: Anfangszeit
#tE: Endezeit
#ZHKNR: ZHKNR (1. bei mehreren Alarmen)
#LDSResBaseType: SEG oder Druck
# Orte: ---
#OrteIDs: OrteIDs des Alarms
#Orte: Kurzform von OrteIDs des Alarms
#ZHKNRn:
#SEGName: Segmentname
#DIVPipelineName
## Nr.
dfAlarmEreignisse.sort_values(by=NrBy,ascending=NrAsc,inplace=True)
dfAlarmEreignisse['Nr']=dfAlarmEreignisse.index+1
#dfAlarmEreignisse['Nr']=dfAlarmEreignisse['Nr']+1
logger.debug("{0:s}{1:s}: {2:s}".format(logStr,'dfAlarmEreignisse',dfAlarmEreignisse.to_string()))
# Voralarm
VoralarmTypen=[]
for index, row in dfAlarmEreignisse.iterrows():
# zur Information bei Ausgaben
OrteIDs=row['OrteIDs']
OrtID=OrteIDs[0]
VoralarmTyp=None
try:
if row['LDSResBaseType']=='SEG':
VoralarmTyp=TCsLDSRes1.loc[:row['tA']-pd.Timedelta('1 second'),OrtID+'AL_S'].iloc[-1]
elif row['LDSResBaseType']=='Druck':
VoralarmTyp=TCsLDSRes2.loc[:row['tA']-pd.Timedelta('1 second'),OrtID+'AL_S'].iloc[-1]
except:
pass
if pd.isnull(VoralarmTyp): # == None: #?! - ggf. Nachfolger eines neutralen Bilanzraumwechsels
VoralarmTyp=-1
logger.warning("{:s}PV: {:40s} Alarm Nr. {:d} ZHKNR {:d}\n\t tA {!s:s}: kein (isnull) Vorlalarm gefunden?! (ggf. neutraler BRWechsel) - Voralarm gesetzt auf: {:d}".format(logStr
,row['OrteIDs'][0]
,int(row['Nr'])
,row['ZHKNR']
,row['tA'],int(VoralarmTyp)))
if int(VoralarmTyp)==0: # == 0: #?! - ggf. Nachfolger eines neutralen Bilanzraumwechsels
VoralarmTyp=0
logger.warning("{:s}PV: {:40s} Alarm Nr. {:d} ZHKNR {:d}\n\t tA {!s:s}: Vorlalarm 0?! (ggf. war Bilanz in Stoerung)".format(logStr
,row['OrteIDs'][0]
,int(row['Nr'])
,row['ZHKNR']
,row['tA']))
if int(VoralarmTyp) not in [-1,0,3,4,10]:
logger.warning("{:s}PV: {:s} Alarm Nr. {:d} {:d} tA {!s:s}: unbekannter Vorlalarm gefunden: {:d}".format(logStr,row['OrteIDs'][0],int(row['Nr']),row['ZHKNR'],row['tA'],int(VoralarmTyp)))
logger.debug("{:s}{:d} {!s:s} VoralarmTyp:{:d}".format(logStr,int(row['Nr']),row['tA'],int(VoralarmTyp)))
VoralarmTypen.append(VoralarmTyp)
dfAlarmEreignisse['Voralarm']=[int(x) for x in VoralarmTypen]
# Type (aus dfCVDataOnly) und Erzeugungszeit (aus dfCVDataOnly) und Name (aus dfCVDataOnly)
dfAlarmEreignisse['ZHKNR']=dfAlarmEreignisse['ZHKNR'].astype('int64')
dfAlarmEreignisse['ZHKNRStr']=dfAlarmEreignisse['ZHKNR'].astype('string')
dfCVDataOnly['ZHKNRStr']=dfCVDataOnly['ZHKNR'].astype('string')
# wg. aelteren App-Log Versionen in denen ZHKNR in dfCVDataOnly nicht ermittelt werden konnte
# Type,ScenTime,Name sind dann undefiniert
dfAlarmEreignisse=pd.merge(dfAlarmEreignisse,dfCVDataOnly,on='ZHKNRStr',suffixes=('','_CVD'),how='left').filter(items=dfAlarmEreignisse.columns.to_list()+['Type'
#,'ScenTime'
,'Name'])
dfAlarmEreignisse=dfAlarmEreignisse.drop(['ZHKNRStr'],axis=1)
dfAlarmEreignisse=dfAlarmEreignisse.fillna(value='')
# lfd. Nummern
dfAlarmEreignisse['NrSD']=dfAlarmEreignisse.groupby(['LDSResBaseType']).cumcount() + 1
dfAlarmEreignisse['NrName']=dfAlarmEreignisse.groupby(['Name']).cumcount() + 1
dfAlarmEreignisse['NrSEGName']=dfAlarmEreignisse.groupby(['SEGName']).cumcount() + 1
# Lebenszeit der ZHKNR
try:
dfAlarmEreignisse['tD_ZHKNR']=dfAlarmEreignisse.apply(lambda row: fCVDTime(row,TCsLDSRes1,TCsLDSRes2,replaceTup),axis=1)
except:
logger.debug("{:s}Spalte tD_ZHKNR (Lebenszeit einer ZHKNR) konnte nicht ermittelt werden. Vmtl. aeltere App-Log Version.".format(logStr))
dfAlarmEreignisse['tD_ZHKNR']='-1'
# Dauer des Alarms
dfAlarmEreignisse['tD']=dfAlarmEreignisse.apply(lambda row: row['tE']-row['tA'],axis=1)
dfAlarmEreignisse['tD']= dfAlarmEreignisse['tD'].apply(lambda x: "{!s:s}".format(x).replace('days','Tage').replace('0 Tage','').replace('Tage','T'))
# AlarmEvent = namedtuple('alarmEvent','tA,tE,ZHKNR,LDSResBaseType')
dfAlarmEreignisse=dfAlarmEreignisse[['Nr','tA', 'tE','tD','ZHKNR','tD_ZHKNR','ZHKNRn','LDSResBaseType'
,'OrteIDs', 'Orte', 'Ort', 'SEGName','DIVPipelineName'
,'Voralarm', 'Type', 'Name'
,'NrSD', 'NrName', 'NrSEGName'
]]
dfAlarmEreignisse['AlarmEvent']=dfAlarmEreignisse.apply(lambda row: AlarmEvent(row['tA'],row['tE'],row['ZHKNR'],row['LDSResBaseType']),axis=1)
# unklar, warum erforderlich
dfAlarmEreignisse['Nr']=dfAlarmEreignisse.index+1
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return dfAlarmEreignisse
def fCVDName(Name
):
"""
"""
lName=len(Name)
if len(Name)==0:
Name='ZHKName vmtl. nicht in Log'
lNameMaxH=20
if lName > 2*lNameMaxH:
Name=Name[:lNameMaxH-2]+'....'+Name[lName-lNameMaxH+2:]
Name=Name.replace('°','|')
return Name
def plotDfAlarmEreignisse(
dfAlarmEreignisse=pd.DataFrame()
,sortBy=[]
,replaceTup=('2021-','')
,replaceTuptD=('0 days','')
):
"""
Returns the plt.table
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
df=dfAlarmEreignisse[['Nr','LDSResBaseType','Voralarm','Type','NrSD','tA','tE','tD','ZHKNR','Name','Orte','tD_ZHKNR','NrName','NrSEGName','SEGName','BZKat']].copy()
df['tA']=df['tA'].apply(lambda x: str(x).replace(replaceTup[0],replaceTup[1]))
df['tE']=df['tE'].apply(lambda x: str(x).replace(replaceTup[0],replaceTup[1]))
###df['Anz']=df['Orte'].apply(lambda x: len(x))
##df['Orte']=df['Orte'].apply(lambda x: str(x).replace('[','').replace(']','').replace("'",""))
df['Orte']=df['Orte'].apply(lambda x: str(x[0]))
df['LDSResBaseType']=df.apply(lambda row: "{:s} {:s} - {:d}".format(row['LDSResBaseType'],row['Type'],row['Voralarm']),axis=1)
df=df[['Nr','LDSResBaseType','NrSD','tA','tE','tD','ZHKNR','Name','NrName','NrSEGName','SEGName','tD_ZHKNR','Orte','BZKat']]
df.rename(columns={'LDSResBaseType':'ResTyp - Voralarm'},inplace=True)
df.rename(columns={'tD_ZHKNR':'ZHKZeit','Name':'ZHKName'},inplace=True)
###df['ZHKName']=df['ZHKName'].apply(lambda x: fCVDName(x))
####df['ZHKName']=df['Orte'].apply(lambda x: x[0])
df['NrSEGName (SEGName)']=df.apply(lambda row: "{!s:2s} ({!s:s})".format(row['NrSEGName'],row['SEGName']),axis=1)
df=df[['Nr','ResTyp - Voralarm','NrSD','tA','tD','ZHKNR'
,'Orte' #'ZHKName'
,'BZKat'
,'NrName','NrSEGName (SEGName)','ZHKZeit']]
df.rename(columns={'Orte':'ID'},inplace=True)
df['tD']=df['tD'].apply(lambda x: str(x).replace(replaceTuptD[0],replaceTuptD[1]))
def fGetZHKNRStr(row,dfOrig):
"""
returns:
ZHKNStr in Abhängigkeit der aktuellen Zeile und dfOrig
"""
s=dfOrig[dfOrig['Nr']==row['Nr']].iloc[0]
if len(s.ZHKNRn)>1:
if len(s.ZHKNRn)==2:
return "{:d} ({!s:s})".format(row['ZHKNR'],s.ZHKNRn[1:])
else:
return "{:d} (+{:d})".format(row['ZHKNR'],len(s.ZHKNRn)-1)
else:
return "{:d}".format(row['ZHKNR'])
df['ZHKNR']=df.apply(lambda row: fGetZHKNRStr(row,dfAlarmEreignisse),axis=1)
if sortBy!=[]:
df=df.sort_values(by=sortBy)
t=plt.table(cellText=df.values, colLabels=df.columns
,colWidths=[.03,.1 # Nr ResTyp-Voralarm
,.04 # NrSD
,.08,.08 # tA tD
,.085 # ZHKNR
,.1125,.07 #.1125 # ID BZKat
,.04 # NrName
,.14 # NrSEGName (SEGName)
,.2125] # ZHKZeit
, cellLoc='left'
, loc='center')
t.auto_set_font_size(False)
t.set_fontsize(10)
cols=df.columns.to_list()
#colIdxOrte=cols.index('Orte')
#colIdxName=cols.index('ZHKName')
colIdxNrSD=cols.index('NrSD')
colIdxNrSEG=cols.index('NrSEGName (SEGName)')
# ResTyp - Voralarm
colIdxResTypVA=cols.index('ResTyp - Voralarm')
cells = t.properties()["celld"]
for cellTup,cellObj in cells.items():
cellObj.set_text_props(ha='left')
row,col=cellTup # row: 0 fuer Ueberschrift bei Ueberschrift; col mit 0
#if col == colIdxName:
# cellObj.set_text_props(ha='left')
if col == colIdxNrSD:
if row > 0:
if dfAlarmEreignisse.loc[row-1,'LDSResBaseType']=='SEG':
cellObj.set_text_props(backgroundcolor='lightsteelblue')
else:
cellObj.set_text_props(backgroundcolor='plum')
elif col == colIdxNrSEG:
if row==0:
continue
if 'color' in dfAlarmEreignisse.columns.to_list():
color=dfAlarmEreignisse['color'].iloc[row-1]
cellObj.set_text_props(backgroundcolor=color)
elif col == colIdxResTypVA and row > 0:
pass
if dfAlarmEreignisse.loc[row-1,'Voralarm'] in [10]:
cellObj.set_text_props(backgroundcolor='sandybrown')
elif dfAlarmEreignisse.loc[row-1,'Voralarm'] in [4]:
cellObj.set_text_props(backgroundcolor='pink')
elif dfAlarmEreignisse.loc[row-1,'Voralarm'] in [3]:
cellObj.set_text_props(backgroundcolor='lightcoral')
else:
pass
#cellObj.set_text_props(fontsize=16)
plt.axis('off')
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return t
def plotDfAlarmStatistikReportsSEGErgs(
h5File='a.h5'
,dfAlarmStatistik=pd.DataFrame()
,SEGResDct={}
,timeStart=None,timeEnd=None
,SEGErgsFile='SEGErgs.pdf'
,stopAtSEGNr=None
,dateFormat='%y.%m.%d: %H:%M:%S'
,byhour=[0,3,6,9,12,15,18,21]
,byminute=None
,bysecond=None
,timeFloorCeilStr=None #'1H' # Runden (1 Stunde)
,timeFloorCeilStrDetailPre='6T' # Runden (3 Minuten)
,timeFloorCeilStrDetailPost='3T'
,timeShiftPair=None
):
"""
Creates PDF for all SEGs with FörderZeitenAlAnz>0
1 Base Plot and Detail Plots for the Alarms
Creates corresponding Single-PNGs
Returns xlimsDct:
key: BaseID
value: list of Timepairs of the Detail Plots for the Alarms
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
lx=Lx.AppLog(h5File=h5File)
firstTime,lastTime,tdTotalGross,tdTotal,tdBetweenFilesTotal=lx.getTotalLogTime()
if timeStart==None:
if timeFloorCeilStr != None:
timeStart = firstTime.floor(freq=timeFloorCeilStr)
else:
timeStart = firstTime
if timeEnd==None:
if timeFloorCeilStr != None:
timeEnd = lastTime.ceil(freq=timeFloorCeilStr)
else:
timeEnd = lastTime
logger.debug("{0:s}timeStart (ohne timeShift): {1:s} timeEnd (ohne timeShift): {2:s}".format(logStr,str(timeStart),str(timeEnd)))
xlimsDct={}
pdf=PdfPages(SEGErgsFile)
(fileNameBase,ext)= os.path.splitext(SEGErgsFile)
if timeShiftPair != None:
(preriod,freq)=timeShiftPair
timeDeltaStr="{:d} {:s}".format(preriod,freq)
timeDelta=pd.Timedelta(timeDeltaStr)
else:
timeDelta=pd.Timedelta('0 Seconds')
idxSEGPlotted=0
for idx,(index,row) in enumerate(dfAlarmStatistik.iterrows()):
if stopAtSEGNr != None:
if idxSEGPlotted>=stopAtSEGNr:
break
titleStr="LfdNr {:2d} - {:s}: {:s}: {:s}".format(
int(row.Nr)+1
,str(row.DIVPipelineName)
# ,row['SEGNodes']
,row['SEGName']
,row['SEGResIDBase'])
if row['FörderZeitenAlAnz']==0: # and row['RuheZeitenAlAnz']==0:
logger.info("{:s}: FörderZeitenAlAnz: 0".format(titleStr))
continue # keine SEGs ohne Alarme drucken
# Erg lesen
ResIDBase=row['SEGResIDBase']
dfSegReprVec=getLDSResVecDf(ResIDBase=ResIDBase,LDSResBaseType='SEG',lx=lx,timeStart=timeStart,timeEnd=timeEnd,timeShiftPair=timeShiftPair)
ID='AL_S'
if ID not in dfSegReprVec.keys():
continue
idxSEGPlotted=idxSEGPlotted+1
xlimsDct[ResIDBase]=[]
logger.debug("{:s}ResIDBase: {:s} dfSegReprVec: Spalten: {!s:s}".format(logStr,ResIDBase,dfSegReprVec.columns.to_list()))
# Plot Basis ###########################################################
fig=plt.figure(figsize=DINA4q,dpi=dpiSize)
ax=fig.gca()
pltLDSErgVec(
ax
,dfSegReprVec=dfSegReprVec # Ergebnisvektor SEG; pass empty Df if Druck only
,dfDruckReprVec=pd.DataFrame() # Ergebnisvektor DRUCK; pass empty Df if Seg only
,xlim=(timeStart+timeDelta,timeEnd+timeDelta)
,dateFormat=dateFormat
,byhour=byhour
,byminute=byminute
,bysecond=bysecond
,plotLegend=True
)
backgroundcolor='white'
if row['FörderZeit']==0:
backgroundcolor='lightgrey'
else: # hat Förderzeit
if row['FörderZeitenAlAnz']==0:
backgroundcolor='springgreen'
else:
backgroundcolor='navajowhite'
if row['FörderZeitAl']/row['FörderZeit']*100>1:
backgroundcolor='tomato'
txt="SEG: {:s}: FörderZeit: {:8.2f} FörderZeitenAlAnz: {:d}".format(row['SEGNodes'],row['FörderZeit'],row['FörderZeitenAlAnz'])
if row['FörderZeitenAlAnz'] > 0:
if row['FörderZeitenAlAnz'] <= 3:
txtNr=" Nrn.: {!s:s}".format(row['FörderZeitenAlNrn'])
else:
txtNr=" Nrn.: {!s:s} u.w.".format(row['FörderZeitenAlNrn'][0])
txt=txt+txtNr
else:
txtNr=''
ax.text(.98, .1,txt,
horizontalalignment='right',
verticalalignment='center',
backgroundcolor=backgroundcolor,
transform=ax.transAxes)
if row['FörderZeitSt']>0:
backgroundcolor='white'
if row['FörderZeitSt']/row['FörderZeit']*100>1:
backgroundcolor='goldenrod'
txt="(SEG: FörderZeitSt: {:8.2f})".format(row['FörderZeitSt'])
ax.text(.98, .05,txt,
horizontalalignment='right',
verticalalignment='center',
backgroundcolor=backgroundcolor,
transform=ax.transAxes)
ax.set_title( titleStr,loc='left')
fig.tight_layout(pad=2.)
# PDF
pdf.savefig(fig)
# png
fileName="{:s} {:2d} - {:s} {:s} {:s}.png".format(fileNameBase
,int(row.Nr)+1
,str(row.DIVPipelineName)
,row['SEGName']
,txtNr.replace('Nrn.: ','Nrn ').replace(',','').replace('[','').replace(']','').replace('u.w.','u w'))
plt.savefig(fileName)
plt.show()
###plt.clf()
plt.close()
# Plot Alarme ###########################################################
dct=SEGResDct[row['SEGResIDBase']]
timeFirstAlarmStarts,dummy=dct['Alarm'][0]
dummy,timeLastAlarmEnds=dct['Alarm'][-1]
for idxAl,AlNr in enumerate(row['FörderZeitenAlNrn']):
timeAlarmStarts,timeAlarmEnds=dct['Alarm'][idxAl]
timeStartDetail = timeAlarmStarts.floor(freq=timeFloorCeilStrDetailPre)
timeEndDetail = timeAlarmEnds.ceil(freq=timeFloorCeilStrDetailPost)
# wenn AlarmRand - PlotRand < 3 Minuten: um 3 Minuten erweitern
if timeAlarmStarts-timeStartDetail<pd.Timedelta('3 Minutes'):
timeStartDetail=timeStartDetail-pd.Timedelta('3 Minutes')
if timeEndDetail-timeAlarmEnds<pd.Timedelta('3 Minutes'):
timeEndDetail=timeEndDetail+pd.Timedelta('3 Minutes')
xlimsDct[ResIDBase].append((timeStartDetail,timeEndDetail))
fig=plt.figure(figsize=DINA4q,dpi=dpiSize)
ax=fig.gca()
pltLDSErgVec(
ax
,dfSegReprVec=dfSegReprVec # Ergebnisvektor SEG; pass empty Df if Druck only
,dfDruckReprVec=pd.DataFrame() # Ergebnisvektor DRUCK; pass empty Df if Seg only
,xlim=(timeStartDetail,timeEndDetail) # wenn die dct-Zeiten time-geshifted sind ist das korrekt
,dateFormat=dateFormat
,byhour=None#byhour
,byminute=list(np.arange(0,60))#byminute
,bysecond=None#bysecond
)
backgroundcolor='white'
if row['FörderZeit']==0:
backgroundcolor='lightgrey'
else: # hat Förderzeit
if row['FörderZeitenAlAnz']==0:
backgroundcolor='springgreen'
else:
backgroundcolor='navajowhite'
if row['FörderZeitAl']/row['FörderZeit']*100>1:
backgroundcolor='tomato'
txt="SEG: {:s}: FörderZeit: {:8.2f} FörderZeitenAlAnz: {:d}".format(row['SEGNodes'],row['FörderZeit'],row['FörderZeitenAlAnz'])
txtNr=" Nr.: {!s:s}".format(AlNr)
txt=txt+txtNr
ax.text(.98, .1,txt,
horizontalalignment='right',
verticalalignment='center',
backgroundcolor=backgroundcolor,
transform=ax.transAxes)
if row['FörderZeitSt']>0:
backgroundcolor='white'
if row['FörderZeitSt']/row['FörderZeit']*100>1:
backgroundcolor='goldenrod'
txt="(SEG: FörderZeitSt: {:8.2f})".format(row['FörderZeitSt'])
ax.text(.98, .05,txt,
horizontalalignment='right',
verticalalignment='center',
backgroundcolor=backgroundcolor,
transform=ax.transAxes)
ax.set_title( titleStr,loc='left')
#logger.info("{:s}".format(titleStr))
fig.tight_layout(pad=2.)
# PDF
pdf.savefig(fig)
# png
#(fileName,ext)= os.path.splitext(SEGErgsFile)
fileNameAlarm="{:s} {:s}.png".format(fileName.replace('.png','')
,txtNr.replace('Nr.: ','Nr ').replace(',','').replace('[','').replace(']',''))
plt.savefig(fileNameAlarm)
plt.show()
###plt.clf()
plt.close()
###plt.close()
pdf.close()
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return xlimsDct
def plotDfAlarmStatistikReportsDruckErgs(
h5File='a.h5'
,dfAlarmStatistik=pd.DataFrame()
,dfSegsNodesNDataDpkt=pd.DataFrame()
,DruckResDct={}
,timeStart=None,timeEnd=None
,DruckErgsFile='DruckErgs.pdf'
,stopAtSEGNr=None
,dateFormat='%y.%m.%d: %H:%M:%S'
,byhour=[0,3,6,9,12,15,18,21]
,byminute=None
,bysecond=None
,timeFloorCeilStr=None #'1H'
,timeFloorCeilStrDetailPre='6T' # Runden (3 Minuten)
,timeFloorCeilStrDetailPost='3T'
,timeShiftPair=None
):
"""
Creates PDF for all SEGs with RuheZeitenAlAnz>0
1 Base Plot for a Druck with an Alarm and Detail Plots for the Alarms
Creates corresponding Single-PNGs
Returns xlimsDct:
key: BaseID
value: list of Timepairs of the Detail Plots for the Alarms
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
lx=Lx.AppLog(h5File=h5File)
firstTime,lastTime,tdTotalGross,tdTotal,tdBetweenFilesTotal=lx.getTotalLogTime()
logger.debug("{0:s}firstTime (ohne TimeShift): {1:s} lastTime (ohne TimeShift): {2:s}".format(logStr,str(firstTime),str(lastTime)))
if timeStart==None:
if timeFloorCeilStr != None:
timeStart = firstTime.floor(freq=timeFloorCeilStr) # https://stackoverflow.com/questions/35339139/where-is-the-documentation-on-pandas-freq-tags
else:
timeStart = firstTime
if timeEnd==None:
if timeFloorCeilStr != None:
timeEnd = lastTime.ceil(freq=timeFloorCeilStr)
else:
timeEnd = lastTime
if timeShiftPair != None:
(preriod,freq)=timeShiftPair
timeDeltaStr="{:d} {:s}".format(preriod,freq)
timeDelta=pd.Timedelta(timeDeltaStr)
else:
timeDelta=pd.Timedelta('0 Seconds')
logger.debug("{0:s}timeStart abgerundet (ohne TimeShift): {1:s} timeEnd aufgerundet (ohne TimeShift): {2:s} TimeShift: {3:s}".format(logStr
,str(timeStart)
,str(timeEnd)
,str(timeDelta)))
xlimsDct={}
pdf=PdfPages(DruckErgsFile)
(fileNameBase,ext)= os.path.splitext(DruckErgsFile)
# über alle Segmente der Alarmstatistik (die DruckIDs sollen in der Reihenfolge der Alarmstatistik abgearbeitet werden)
idxSEGPlotted=0
for idx,(index,row) in enumerate(dfAlarmStatistik.iterrows()):
if row['RuheZeitenAlAnz']==0: # and row['RuheZeitenAlAnz']==0:
logger.info("LfdNr {:2d} - {:s}: {:s}: RuheZeitenAlAnz: 0".format(
int(row.Nr)+1
,str(row.DIVPipelineName)
# ,row['SEGNodes']
,row['SEGName']))
continue # keine SEGs ohne Alarme drucken
if stopAtSEGNr != None:
if idxSEGPlotted>=stopAtSEGNr:
break
idxSEGPlotted=idxSEGPlotted+1
# DruckIDs eines Segmentes
DruckIDs=sorted([ID for ID in dfSegsNodesNDataDpkt[dfSegsNodesNDataDpkt['SEGName']==row['SEGName']]['DruckResIDBase'].unique() if not pd.isnull(ID)])
for idxDruckID,DruckResIDBase in enumerate(DruckIDs):
dct=DruckResDct[DruckResIDBase]
if len(dct['Alarm'])==0:
# nur DruckIDs mit Alarmen plotten
continue
fig=plt.figure(figsize=DINA4q,dpi=dpiSize)
# Erg lesen
ResIDBase=DruckResIDBase
dfDruckReprVec=getLDSResVecDf(ResIDBase=ResIDBase,LDSResBaseType='Druck',lx=lx,timeStart=timeStart,timeEnd=timeEnd,timeShiftPair=timeShiftPair)
logger.debug("{:s}ResIDBase: {:s} dfDruckReprVec: Spalten: {!s:s}".format(logStr,ResIDBase,dfDruckReprVec.columns.to_list()))
logger.debug("{:s}ID: {:s}: timeStart (mit TimeShift): {:s} timeEnd (mit TimeShift): {:s}".format(logStr
,DruckResIDBase
,str(dfDruckReprVec.index[0])
,str(dfDruckReprVec.index[-1])
))
ID='AL_S'
if ID not in dfDruckReprVec.keys():
continue
xlimsDct[ResIDBase]=[]
ax=fig.gca()
pltLDSErgVec(
ax
,dfSegReprVec=pd.DataFrame() # Ergebnisvektor SEG; pass empty Df if Druck only
,dfDruckReprVec=dfDruckReprVec # Ergebnisvektor DRUCK; pass empty Df if Seg only
,xlim=(timeStart+timeDelta,timeEnd+timeDelta)
,dateFormat=dateFormat
,byhour=byhour
,byminute=byminute
,bysecond=bysecond
)
backgroundcolor='white'
if row['RuheZeit']==0:
backgroundcolor='lightgrey'
else: # hat Ruhezeit
if row['RuheZeitenAlAnz']==0:
backgroundcolor='springgreen'
else:
backgroundcolor='navajowhite'
if row['RuheZeitAl']/row['RuheZeit']*100>1:
backgroundcolor='tomato'
txt="SEG: {:s}: LfdNr {:2d}: RuheZeit: {:8.2f} RuheZeitenAlAnz: {:d}".format(
row['SEGNodes']
,int(row.Nr)+1
,row['RuheZeit']
,row['RuheZeitenAlAnz'])
ax.text(.98, .1,txt,
horizontalalignment='right',
verticalalignment='center',
backgroundcolor=backgroundcolor,
transform=ax.transAxes)
if row['RuheZeitSt']>0:
backgroundcolor='white'
if row['RuheZeitSt']/row['RuheZeit']*100>1:
backgroundcolor='goldenrod'
txt="(SEG: RuheZeitSt: {:8.2f})".format(row['RuheZeitSt'])
ax.text(.98, .05,txt,
horizontalalignment='right',
verticalalignment='center',
backgroundcolor=backgroundcolor,
transform=ax.transAxes)
RuheZeiten=DruckResDct[DruckResIDBase]['Zustaendig']
RuheZeit=fTotalTimeFromPairs(RuheZeiten,pd.Timedelta('1 minute'),False)
AlarmZeiten=DruckResDct[DruckResIDBase]['Alarm']
AlarmZeit=fTotalTimeFromPairs(AlarmZeiten,pd.Timedelta('1 minute'),False)
RuheZeitenSt=DruckResDct[DruckResIDBase]['Stoerung']
RuheZeitSt=fTotalTimeFromPairs(RuheZeitenSt,pd.Timedelta('1 minute'),False)
txt="Druck: RuheZeit: {:8.2f} (davon St: {:8.2f}) RuheZeitenAlAnz: {:3d}".format(
RuheZeit
,RuheZeitSt
,len(AlarmZeiten))
ax.text(.98, .15,txt,
horizontalalignment='right',
verticalalignment='center',
backgroundcolor='white',
transform=ax.transAxes)
titleStr="LfdNr {:2d} - {:s}: {:s}: {:s}".format(
int(row.Nr)+1
,str(row.DIVPipelineName)
# ,row['SEGNodes']
,row['SEGName']
,DruckResIDBase)
ax.set_title( titleStr,loc='left')
fig.tight_layout(pad=2.)
# png
fileName="{:s} {:2d} - {:s} {:s} {:s}.png".format(fileNameBase
,int(row.Nr)+1
,str(row.DIVPipelineName)
,row['SEGName']
,fOrteStripped('Druck',[DruckResIDBase])[0]
)
plt.savefig(fileName)
plt.show()
pdf.savefig(fig)
plt.close()
# Plot Alarme ###########################################################
dct=DruckResDct[DruckResIDBase]
timeFirstAlarmStarts,dummy=dct['Alarm'][0]
dummy,timeLastAlarmEnds=dct['Alarm'][-1]
for idxAl,AlNr in enumerate(row['RuheZeitenAlNrn']):
timeAlarmStarts,timeAlarmEnds=dct['Alarm'][idxAl]
timeStartDetail = timeAlarmStarts.floor(freq=timeFloorCeilStrDetailPre)
timeEndDetail = timeAlarmEnds.ceil(freq=timeFloorCeilStrDetailPost)
if timeAlarmStarts-timeStartDetail<pd.Timedelta('3 Minutes'):
timeStartDetail=timeStartDetail-pd.Timedelta('3 Minutes')
if timeEndDetail-timeAlarmEnds<pd.Timedelta('3 Minutes'):
timeEndDetail=timeEndDetail+pd.Timedelta('3 Minutes')
xlimsDct[ResIDBase].append((timeStartDetail,timeEndDetail))
fig=plt.figure(figsize=DINA4q,dpi=dpiSize)
ax=fig.gca()
pltLDSErgVec(
ax
,dfSegReprVec=pd.DataFrame() # Ergebnisvektor SEG; pass empty Df if Druck only
,dfDruckReprVec=dfDruckReprVec # Ergebnisvektor DRUCK; pass empty Df if Seg only
,xlim=(timeStartDetail,timeEndDetail) # wenn die dct-Zeiten time-geshifted sind ist das korrekt
,dateFormat=dateFormat
,byhour=None#byhour
,byminute=list(np.arange(0,60))#byminute
,bysecond=None#bysecond
)
backgroundcolor='white'
if row['RuheZeit']==0:
backgroundcolor='lightgrey'
else: # hat Ruhezeit
if row['RuheZeitenAlAnz']==0:
backgroundcolor='springgreen'
else:
backgroundcolor='navajowhite'
if row['RuheZeitAl']/row['RuheZeit']*100>1:
backgroundcolor='tomato'
txt="SEG: {:s}: LfdNr {:2d}: RuheZeit: {:8.2f} RuheZeitenAlAnz: {:d}".format(
row['SEGNodes']
,int(row.Nr)+1
,row['RuheZeit']
,row['RuheZeitenAlAnz'])
ax.text(.98, .1,txt,
horizontalalignment='right',
verticalalignment='center',
backgroundcolor=backgroundcolor,
transform=ax.transAxes)
if row['RuheZeitSt']>0:
backgroundcolor='white'
if row['RuheZeitSt']/row['RuheZeit']*100>1:
backgroundcolor='goldenrod'
txt="(SEG: RuheZeitSt: {:8.2f})".format(row['RuheZeitSt'])
ax.text(.98, .05,txt,
horizontalalignment='right',
verticalalignment='center',
backgroundcolor=backgroundcolor,
transform=ax.transAxes)
RuheZeiten=DruckResDct[DruckResIDBase]['Zustaendig']
RuheZeit=fTotalTimeFromPairs(RuheZeiten,pd.Timedelta('1 minute'),False)
AlarmZeiten=DruckResDct[DruckResIDBase]['Alarm']
AlarmZeit=fTotalTimeFromPairs(AlarmZeiten,pd.Timedelta('1 minute'),False)
RuheZeitenSt=DruckResDct[DruckResIDBase]['Stoerung']
RuheZeitSt=fTotalTimeFromPairs(RuheZeitenSt,pd.Timedelta('1 minute'),False)
txt="Druck: RuheZeit: {:8.2f} (davon St: {:8.2f}) RuheZeitenAlAnz: {:3d} Nr. {:4d}".format(
RuheZeit
,RuheZeitSt
,len(AlarmZeiten)
,AlNr)
ax.text(.98, .15,txt,
horizontalalignment='right',
verticalalignment='center',
backgroundcolor='white',
transform=ax.transAxes)
titleStr="LfdNr {:2d} - {:s}: {:s}: {:s}".format(
int(row.Nr)+1
,str(row.DIVPipelineName)
# ,row['SEGNodes']
,row['SEGName']
,DruckResIDBase)
ax.set_title( titleStr,loc='left')
fig.tight_layout(pad=2.)
# PDF
pdf.savefig(fig)
# png
fileNameAlarm="{:s} Nr {:d}.png".format(fileName.replace('.png',''),AlNr)
plt.savefig(fileNameAlarm)
plt.show()
plt.close()
#plt.close()
pdf.close()
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return xlimsDct
def plotTimespans(
xlims # list of sections
,orientation='landscape' # oben HYD unten LDS; 'portrait': # links HYD rechts LDS
,pad=3.5 # tight_layout() can take keyword arguments of pad, w_pad and h_pad. These control the extra padding around the figure border and between subplots. The pads are specified in fraction of fontsize.
,w_pad=0.5
,h_pad=0.5
# 'portrait' # links HYD rechts LDS
,rectSpalteLinks=[0, 0, 0.5, 1]
,rectSpalteRechts=[0.325, 0, 1, 1]
# 'landscape': # oben HYD unten LDS
,rectZeileOben=[0, .5, 1, 1]
,rectZeileUnten=[0, 0, 1, .5]
,dateFormat='%y.%m.%d: %H:%M:%S' # can be a list
,bysecond=None #[0,15,30,45] # can be a list
,byminute=None # can be a list
,byhour=None
,figTitle='' #!
,figSave=False #!
,sectionTitles=[] # list of section titles to be used
,sectionTexts=[] # list of section texts to be used
,sectionTitlesLDS=None # list of section titles to be used
,sectionTextsLDS=None # list of section texts to be used
,vLinesX=[] # plotted in each HYD section if X-time fits
,hLinesY=[] # plotted in each HYD section
,vAreasX=[] # for each HYD section a list of areas to highlight i.e. [[(timeStartAusschnittDruck,timeEndAusschnittDruck),...],...]
,vLinesXLDS=None # plotted in each LDS section if X-time fits
,vAreasXLDS=None # for each LDS section a list of areas to highlight i.e. [[(timeStartAusschnittDruck,timeEndAusschnittDruck),...],...]
,vLinesXColor='gray'
,vAreasXColor='whitesmoke'
,vLinesXColorLDS=None
,vAreasXColorLDS=None
,yTwinedAxesPosDeltaHPStart=-0.0125 #: (i.d.R. negativer) Abstand der 1. y-Achse von der Zeichenfläche
,yTwinedAxesPosDeltaHP=-0.0875 #: (i.d.R. negativer) zus. Abstand jeder weiteren y-Achse von der Zeichenfläche
,ySpanMin=0.9
,plotLegend=True # interpretiert fuer diese Funktion; Inverse gilt fuer pltLDSErgVec selbst
,plotLegend1stOnly=True
,legendLoc='best'
,legendFramealpha=.2
,legendFacecolor='white'
# --- Args Fct. HYD ---:
,TCsLDSIn=pd.DataFrame() # es werden nur die aDct-definierten geplottet
,TCsOPC=pd.DataFrame() # es werden nur die aDctOPC-definierten geplottet
# der Schluessel in den vorstehenden Dcts ist die ID (der Spaltenname) in den TCs
,TCsOPCScenTimeShift=pd.Timedelta('1 hour')
,TCsSIDEvents=pd.DataFrame() # es werden alle Schieberevents geplottet
,TCsSIDEventsTimeShift=pd.Timedelta('1 hour')
,TCsSIDEventsInXlimOnly=True # es werden nur die Spalten geplottet, die in xlim vorkommen und dort mindestens 1x nicht Null sind (sonst sind alle (zumindest in der Legende) dargestellt)
,TCsSIDEventsyOffset=.05 # die y-Werte werden ab dem 1. Schieber um je dfTCsSIDEventsyOffset erhöht (damit zeitgleiche Events besser sichtbar werden)
,QDct={}
,pDct={}
,QDctOPC={}
,pDctOPC={}
,IDPltKey='IDPlt' # Schluesselbezeichner in den vorstehenden 4 Dcts; Wert ist Referenz auf das folgende Layout-Dct und das folgende Fcts-Dct; Werte muessen eindeutig sein
,attrsDct=attrsDct
,fctsDct={}
,plotRTTM=True
# p y-Achse
,ylimp=ylimpD #wenn undef., dann min/max
,ylimpxlim=False #wenn Wahr und ylim undef., dann wird xlim beruecksichtigt bei min/max
,yticksp=None #[0,50,100] #wenn undef., dann aus ylimp
,ylabelp='[bar]'
# Q y-Achse
,ylimQ=ylimQD
,ylimQxlim=False
,yticksQ=None
,ylabelQ='[Nm³/h]'
# 3. Achse
,ylim3rd=ylim3rdD
,yticks3rd=yticks3rdD
,yGridSteps=yGridStepsD
# SchieberEvents
,pSIDEvents=pSIDEvents
# ausgewertet werden: colRegExSchieberID (um welchen Schieber geht es), colRegExMiddle (Befehl oder Zustand) und colRegExEventID (welcher Befehl bzw. Zustand)
# die Befehle bzw. Zustaende (die Auspraegungen von colRegExEventID) muessen nachf. def. sein um den Marker (des Befehls bzw. des Zustandes) zu definieren
,eventCCmds=eventCCmds
,eventCStats=eventCStats
,valRegExMiddleCmds=valRegExMiddleCmds
# es muessen soviele Farben definiert sein wie Schieber
,baseColorsDef=baseColorsSchieber
,markerDef=markerDefSchieber
# --- Args Fct. LDS ---:
,dfSegReprVec=pd.DataFrame()
,dfDruckReprVec=pd.DataFrame()
,ylimAL=ylimALD
,yticksAL=yticksALD
,ylimR=ylimRD #can be a list #None #(-10,10) #wenn undef., dann min/max dfSegReprVec
,ylimRxlim=False # can be a list #wenn Wahr und ylimR undef. (None), dann wird xlim beruecksichtigt bei min/max dfSegReprVec
,yticksR=yticksRD # can be a list of lists #[0,2,4,10,15,30,40] #wenn undef. (None), dann aus ylimR; matplotlib "vergrößert" mit dem Setzen von yTicks ein ebenfalls gesetztes ylim wenn die Ticks außerhalb des ylims liegen
# <NAME>.
,ylimAC=ylimACD
,ylimACxlim=False
,yticksAC=yticksACD
,attrsDctLDS=attrsDctLDS
,plotLPRate=True
,plotR2FillSeg=True
,plotR2FillDruck=True
,plotAC=True
,plotACCLimits=True
,highlightAreas=True
,Seg_Highlight_Color='cyan'
,Seg_Highlight_Alpha=.1
,Seg_Highlight_Fct=lambda row: True if row['STAT_S']==101 else False
,Seg_HighlightError_Color='peru'
,Seg_Highlight_Alpha_Error=.3
,Seg_HighlightError_Fct=lambda row: True if row['STAT_S']==601 else False
,Druck_Highlight_Color='cyan'
,Druck_Highlight_Alpha=.1
,Druck_Highlight_Fct=lambda row: True if row['STAT_S']==101 else False
,Druck_HighlightError_Color='peru'
,Druck_Highlight_Alpha_Error=.3
,Druck_HighlightError_Fct=lambda row: True if row['STAT_S']==601 else False
,plotTV=True
,plotTVTimerFct=None
,plotTVAmFct=lambda x: x*100
,plotTVAmLabel=plotTVAmLabelD
,ylimTV=ylimTVD
,yticksTV=yticksTVD
,plotDPDT=True
,plotSB_S=True
):
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
fig=plt.gcf()
if orientation=='landscape':
# oben HYD unten LDS
gsHYD = gridspec.GridSpec(1,len(xlims),figure=fig)
axLstHYD=[fig.add_subplot(gsHYD[idx]) for idx in np.arange(gsHYD.ncols)]
gsLDS = gridspec.GridSpec(1,len(xlims),figure=fig)
axLstLDS=[fig.add_subplot(gsLDS[idx]) for idx in np.arange(gsLDS.ncols)]
else:
# links HYD rechts LDS
gsHYD = gridspec.GridSpec(len(xlims),1,figure=fig)
axLstHYD=[fig.add_subplot(gsHYD[idx]) for idx in np.arange(gsHYD.nrows)]
gsLDS = gridspec.GridSpec(len(xlims),1,figure=fig)
axLstLDS=[fig.add_subplot(gsLDS[idx]) for idx in np.arange(gsLDS.nrows)]
pltLDSpQAndEventsResults=plotTimespansHYD(
axLst=axLstHYD
,xlims=xlims
,figTitle=figTitle # ''
,figSave=figSave # False
,sectionTitles=sectionTitles
,sectionTexts=sectionTexts
,vLinesX=vLinesX
,hLinesY=hLinesY
,vAreasX=vAreasX
,vLinesXColor=vLinesXColor
,vAreasXColor=vAreasXColor
,plotLegend=plotLegend
,plotLegend1stOnly=plotLegend1stOnly
# --- Args Fct. ---:
,dfTCsLDSIn=TCsLDSIn
,dfTCsOPC=TCsOPC
,dfTCsOPCScenTimeShift=TCsOPCScenTimeShift
,dfTCsSIDEvents=TCsSIDEvents
,dfTCsSIDEventsTimeShift=TCsSIDEventsTimeShift
,dfTCsSIDEventsInXlimOnly=TCsSIDEventsInXlimOnly
,QDct=QDct
,pDct=pDct
,QDctOPC=QDctOPC
,pDctOPC=pDctOPC
,attrsDct=attrsDct
,fctsDct=fctsDct
,dateFormat=dateFormat
,bysecond=bysecond
,byminute=byminute
,byhour=byhour
,plotRTTM=plotRTTM
,ylimp=ylimp
,ylabelp=ylabelp
,yticksp=yticksp
,ylimQ=ylimQ
,yticksQ=yticksQ
,yGridSteps=yGridSteps
,ylim3rd=ylim3rd
,yticks3rd=yticks3rd
)
if orientation=='landscape':
# oben HYD unten LDS
gsHYD.tight_layout(fig, pad=pad,h_pad=h_pad,w_pad=w_pad, rect=rectZeileOben)
else:
# links HYD rechts LDS
gsHYD.tight_layout(fig, pad=pad,h_pad=h_pad,w_pad=w_pad, rect=rectSpalteLinks)
if sectionTitlesLDS==None:
sectionTitlesLDS=sectionTitles
if sectionTextsLDS==None:
sectionTextsLDS=sectionTexts
if vLinesXLDS==None:
vLinesXLDS=vLinesX
if vAreasXLDS==None:
vAreasXLDS=vAreasX
if vLinesXColorLDS==None:
vLinesXColorLDS=vLinesXColor
if vAreasXColorLDS==None:
vAreasXColorLDS=vAreasXColor
pltLDSErgVecResults=plotTimespansLDS(
axLst=axLstLDS
,xlims=xlims
,figTitle=figTitle # ''
,figSave=figSave # False
,sectionTitles=sectionTitlesLDS
,sectionTexts=sectionTextsLDS
,vLinesX=vLinesXLDS
,vAreasX=vAreasXLDS
,vLinesXColor=vLinesXColorLDS
,vAreasXColor=vAreasXColorLDS
,plotLegend=plotLegend
,plotLegend1stOnly=plotLegend1stOnly
# --- Args Fct. ---:
,dfSegReprVec=dfSegReprVec
,dfDruckReprVec=dfDruckReprVec
,dateFormat=dateFormat
,bysecond=bysecond
,byminute=byminute
,byhour=byhour
,ylimR=ylimR
,ylimRxlim=ylimRxlim
,yticksR=yticksR
,plotLPRate=plotLPRate
,plotR2FillSeg=plotR2FillSeg
,plotR2FillDruck=plotR2FillDruck
,plotAC=plotAC
,ylimAC=ylimAC
,ylimACxlim=ylimACxlim
,yticksAC=yticksAC
,plotTV=plotTV
,plotTVTimerFct=plotTVTimerFct
,plotTVAmFct=plotTVAmFct
,plotTVAmLabel=plotTVAmLabel
,ylimTV=ylimTV
,yticksTV=yticksTV
,plotDPDT=plotDPDT
,plotSB_S=plotSB_S
)
# wenn weniger als 5 Achsen geplottet werden stimmt der erste Wert von rectSpalteRechts nicht
#(axes,lines)=pltLDSErgVecResults[0]
#
# numOfYAxes=len(axes)
#corFac=5-numOfYAxes
#rectSpalteRechtsCor=rectSpalteRechts #[0.325, 0, 1, 1]
#rectSpalteRechtsCor[0]=rectSpalteRechtsCor[0]+0.06*corFac
if orientation=='landscape':
# oben HYD unten LDS
gsLDS.tight_layout(fig, pad=pad,h_pad=h_pad,w_pad=w_pad, rect=rectZeileUnten)
else:
# links HYD rechts LDS
gsLDS.tight_layout(fig, pad=pad,h_pad=h_pad,w_pad=w_pad, rect=rectSpalteRechts)
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return gsHYD,gsLDS,pltLDSpQAndEventsResults,pltLDSErgVecResults
def plotTimespansHYD(
axLst # list of axes to be used
,xlims # list of sections
,figTitle='' # the title of the plot; will be extended by min. and max. time calculated over all sections; will be also the pdf and png fileName
,figSave=False #True # creates pdf and png
,sectionTitles=[] # list of section titles to be used
,sectionTexts=[] # list of section texts to be used
,vLinesX=[] # plotted in each section if X-time fits
,hLinesY=[] # plotted in each section
,vAreasX=[] # for each section a list of areas to highlight i.e. [[(timeStartAusschnittDruck,timeEndAusschnittDruck),...],...]
,vLinesXColor='gray'
,vAreasXColor='whitesmoke'
# --- Args Fct. ---:
,dfTCsLDSIn=pd.DataFrame() # es werden nur die aDct-definierten geplottet
,dfTCsOPC=pd.DataFrame() # es werden nur die aDctOPC-definierten geplottet
# der Schluessel in den vorstehenden Dcts ist die ID (der Spaltenname) in den TCs
,dfTCsOPCScenTimeShift=pd.Timedelta('1 hour')
,dfTCsSIDEvents=pd.DataFrame() # es werden alle Schieberevents geplottet
,dfTCsSIDEventsTimeShift=pd.Timedelta('1 hour')
,dfTCsSIDEventsInXlimOnly=True # es werden nur die Spalten geplottet, die in xlim vorkommen und dort mindestens 1x nicht Null sind (sonst sind alle (zumindest in der Legende) dargestellt)
,dfTCsSIDEventsyOffset=.05 # die y-Werte werden ab dem 1. Schieber um je dfTCsSIDEventsyOffset erhöht (damit zeitgleiche Events besser sichtbar werden)
,QDct={ # Exanple
'Objects.FBG_MESSW.6_KED_39_FT_01.In.MW.value':{'IDPlt':'Q Src','RTTM':'IMDI.Objects.FBG_MESSW.6_KED_39_FT_01.In.MW.value'}
,'Objects.FBG_MESSW.6_TUD_39_FT_01.In.MW.value':{'IDPlt':'Q Snk','RTTM':'IMDI.Objects.FBG_MESSW.6_TUD_39_FT_01.In.MW.value'}
}
,pDct={ # Example
'Objects.FBG_HPS_M.6_KED_39_PTI_01_E.In.MW.value':{'IDPlt':'p Src'}
,'Objects.FBG_HPS_M.6_TUD_39_PTI_01_E.In.MW.value':{'IDPlt':'p Snk'}
,'Objects.FBG_HPS_M.6_EL1_39_PTI_01_E.In.MW.value':{'IDPlt':'p ISrc 1'}
,'Objects.FBG_HPS_M.6_EL1_39_PTI_02_E.In.MW.value':{'IDPlt':'p ISnk 2'}
}
,QDctOPC={ # Exanple
'Objects.FBG_MESSW.6_EL1_39_FT_01.In.MW.value':{'IDPlt':'Q xSrc 1'}
}
,pDctOPC={}
,IDPltKey='IDPlt' # Schluesselbezeichner in den vorstehenden 4 Dcts; Wert ist Referenz auf das folgende Layout-Dct und das folgende Fcts-Dct; Werte muessen eindeutig sein
,attrsDct=attrsDct
,fctsDct={} # a Dct with Fcts
,dateFormat='%y.%m.%d: %H:%M:%S' # can be a list
,bysecond=None#[0,15,30,45] # can be a list
,byminute=None # can be a list
,byhour=None
,yTwinedAxesPosDeltaHPStart=-0.0125 #: (i.d.R. negativer) Abstand der 1. y-Achse von der Zeichenfläche
,yTwinedAxesPosDeltaHP=-0.0875 #: (i.d.R. negativer) zus. Abstand jeder weiteren y-Achse von der Zeichenfläche
,plotRTTM=True
# p y-Achse
,ylimp=ylimpD #wenn undef., dann min/max
,ylimpxlim=False #wenn Wahr und ylim undef., dann wird xlim beruecksichtigt bei min/max
,yticksp=None #[0,50,100] #wenn undef., dann aus ylimp
,ylabelp='[bar]'
# Q y-Achse
,ylimQ=ylimQD
,ylimQxlim=False
,yticksQ=None
,ylabelQ='[Nm³/h]'
# 3. Achse
,ylim3rd=ylim3rdD
,yticks3rd=yticks3rdD
,yGridSteps=yGridStepsD
,ySpanMin=0.9 # wenn ylim undef. vermeidet dieses Maß eine y-Achse mit einer zu kleinen Differenz zwischen min/max
,plotLegend=True # interpretiert fuer diese Funktion; Inverse gilt fuer pltLDSpQAndEvents selbst
,plotLegend1stOnly=True # diese Funktion plottet wenn plotLegend=True die Legende nur im ersten Plot
,legendLoc='best'
,legendFramealpha=.2
,legendFacecolor='white'
# SchieberEvents
,pSIDEvents=pSIDEvents
# ausgewertet werden: colRegExSchieberID (um welchen Schieber geht es), colRegExMiddle (Befehl oder Zustand) und colRegExEventID (welcher Befehl bzw. Zustand)
# die Befehle bzw. Zustaende (die Auspraegungen von colRegExEventID) muessen nachf. def. sein um den Marker (des Befehls bzw. des Zustandes) zu definieren
,eventCCmds=eventCCmds
,eventCStats=eventCStats
,valRegExMiddleCmds=valRegExMiddleCmds # colRegExMiddle-Auspraegung fuer Befehle (==> eventCCmds)
# es muessen soviele Farben definiert sein wie Schieber
,baseColorsDef=baseColorsSchieber
,markerDef=markerDefSchieber
):
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
# plots pltLDSpQAndEvents-Sections
# returns a Lst of pltLDSpQAndEvents-Results, a Lst of (axes,lines,scatters)
try:
if sectionTitles==[] or sectionTitles==None:
sectionTitles=len(xlims)*['a plotTimespansHYD sectionTitle Praefix']
if not isinstance(sectionTitles, list):
logger.warning("{0:s}sectionTitles muss eine Liste von strings sein.".format(logStr))
sectionTitles=len(xlims)*['a plotTimespansHYD sectionTitle Praefix']
if len(sectionTitles)!=len(xlims):
logger.debug("{0:s}sectionTitles muss dieselbe Laenge haben wie xlims.".format(logStr))
if len(sectionTitles) == 1:
sectionTitles=len(xlims)*[sectionTitles[0]]
else:
sectionTitles=len(xlims)*['a plotTimespansHYD sectionTitle Praefix']
if sectionTexts==[] or sectionTexts==None:
sectionTexts=len(xlims)*['']
if not isinstance(sectionTexts, list):
logger.warning("{0:s}sectionTexts muss eine Liste von strings sein.".format(logStr))
sectionTexts=len(xlims)*['']
if len(sectionTexts)!=len(xlims):
logger.warning("{0:s}sectionTexts muss dieselbe Laenge haben wie xlims.".format(logStr))
sectionTexts=len(xlims)*['']
if plotLegend:
plotLegendFct=False
else:
plotLegendFct=True
pltLDSpQAndEventsResults=[]
for idx,xlim in enumerate(xlims):
ax = axLst[idx]
if isinstance(dateFormat, list):
dateFormatIdx=dateFormat[idx]
else:
dateFormatIdx=dateFormat
bysecondIdx=bysecond
if isinstance(bysecond, list):
if any(isinstance(el, list) for el in bysecond):
bysecondIdx=bysecond[idx]
byminuteIdx=byminute
if isinstance(byminute, list):
if any(isinstance(el, list) for el in byminute):
byminuteIdx=byminute[idx]
byhourIdx=byhour
if isinstance(byhour, list):
if any(isinstance(el, list) for el in byhour):
byhourIdx=byhour[idx]
(axes,lines,scatters)=pltLDSpQAndEvents(
ax
,dfTCsLDSIn=dfTCsLDSIn
,dfTCsOPC=dfTCsOPC
,dfTCsOPCScenTimeShift=dfTCsOPCScenTimeShift
,dfTCsSIDEvents=dfTCsSIDEvents
,dfTCsSIDEventsTimeShift=dfTCsSIDEventsTimeShift
,dfTCsSIDEventsInXlimOnly=dfTCsSIDEventsInXlimOnly
,dfTCsSIDEventsyOffset=dfTCsSIDEventsyOffset
,QDct=QDct
,pDct=pDct
,QDctOPC=QDctOPC
,pDctOPC=pDctOPC
,attrsDct=attrsDct
,fctsDct=fctsDct
,xlim=xlim
,dateFormat=dateFormatIdx
,bysecond=bysecondIdx
,byminute=byminuteIdx
,byhour=byhourIdx
,plotRTTM=plotRTTM
,ylimp=ylimp
,ylabelp=ylabelp
,yticksp=yticksp
,ylimQ=ylimQ
,yticksQ=yticksQ
# 3. Achse
,ylim3rd=ylim3rd
,yticks3rd=yticks3rd
,yGridSteps=yGridSteps
,plotLegend=plotLegendFct
,baseColorsDef=baseColorsDef
)
pltLDSpQAndEventsResults.append((axes,lines,scatters))
sectionText=sectionTexts[idx]
ax.text(
0.5, 0.5,
sectionText,
ha='center', va='top',
transform=ax.transAxes
)
(timeStart,timeEnd)=xlim
sectionTitleSingle="{:s}: Plot Nr. {:d} - Zeitspanne: {:s}".format(sectionTitles[idx],idx+1,str(timeEnd-timeStart)).replace('days','Tage')
ax.set_title(sectionTitleSingle)
for vLineX in vLinesX:
if vLineX >= timeStart and vLineX <= timeEnd:
ax.axvline(x=vLineX,ymin=0, ymax=1, color=vLinesXColor,ls=linestyle_tuple[11][1])
for hLineY in hLinesY:
ax.axhline(y=hLineY,xmin=0, xmax=1,color='gray',ls=linestyle_tuple[11][1])
if len(vAreasX) == len(xlims):
vAreasXSection=vAreasX[idx]
if vAreasXSection==[] or vAreasXSection==None:
pass
else:
for vArea in vAreasXSection:
ax.axvspan(vArea[0], vArea[1], alpha=0.6, color=vAreasXColor)
else:
if len(vAreasX)>0:
logger.warning("{0:s}vAreasX muss dieselbe Laenge haben wie xlims.".format(logStr))
# Legend
if plotLegend:
legendHorizontalPos='center'
if len(xlims)>1:
if idx in [0,2,4]: # Anfahren ...
legendHorizontalPos='right'
elif idx in [1,3,5]: # Abfahren ...
legendHorizontalPos='left'
if plotLegend1stOnly:
legendHorizontalPos='center' # wenn nur 1x Legende dann Mitte
if plotLegend1stOnly and idx>0:
pass
else:
patterBCp='^p S[rc|nk]'
patterBCQ='^Q S[rc|nk]'
patterBCpQ='^[p|Q] S[rc|nk]'
linesp=[line for line in lines if re.search(patterBCp,line) != None]
linesQ=[line for line in lines if re.search(patterBCQ,line) != None]
linespTxt=tuple([lines[line] for line in linesp])
linesQTxt=tuple([lines[line] for line in linesQ])
moreLines=[line for line in lines if re.search(patterBCpQ,line) == None]
moreLinesp=[line for line in moreLines if re.search('^p',line) != None]
moreLinesQ=[line for line in moreLines if re.search('^Q',line) != None]
moreLinespTxt=tuple([lines[line] for line in moreLinesp])
moreLinesQTxt=tuple([lines[line] for line in moreLinesQ])
axes['p'].add_artist(axes['p'].legend(
linespTxt+moreLinespTxt
,linesp+moreLinesp
,loc='upper '+legendHorizontalPos
,framealpha=legendFramealpha
,facecolor=legendFacecolor
))
axes['Q'].add_artist(axes['Q'].legend(
linesQTxt+moreLinesQTxt
,linesQ+moreLinesQ
,loc='lower '+legendHorizontalPos
,framealpha=legendFramealpha
,facecolor=legendFacecolor
))
if 'SID' in axes.keys() and len(scatters)>0:
if legendHorizontalPos == 'center':
legendHorizontalPosAct=''
else:
legendHorizontalPosAct=' '+legendHorizontalPos
axes['SID'].legend(loc='center'+legendHorizontalPosAct
,framealpha=legendFramealpha
,facecolor=legendFacecolor)
# Titel
tMin=xlims[0][0]
tMax=xlims[-1][1]
for tPair in xlims:
(t1,t2)=tPair
if t1 < tMin:
tMin=t1
if t2>tMax:
tMax=t2
if figTitle not in ['',None]:
figTitle="{:s} - {:s} - {:s}".format(figTitle,str(tMin),str(tMax)).replace(':',' ')
fig=plt.gcf()
fig.suptitle(figTitle)
# speichern?!
if figSave:
fig.tight_layout(pad=2.) # gs.tight_layout(fig,pad=2.)
plt.savefig(figTitle+'.png')
plt.savefig(figTitle+'.pdf')
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return pltLDSpQAndEventsResults
def plotTimespansLDS(
axLst # list of axes to be used
,xlims # list of sections
,figTitle='' # the title of the plot; will be extended by min. and max. time calculated over all sections; will be also the pdf and png fileName
,figSave=False #True # creates pdf and png
,sectionTitles=[] # list of section titles to be used
,sectionTexts=[] # list of section texts to be used
,vLinesX=[] # plotted in each section if X-time fits
,vAreasX=[] # for each section a list of areas to highlight i.e. [[(timeStartAusschnittDruck,timeEndAusschnittDruck),...],...]
,vLinesXColor='gray'
,vAreasXColor='whitesmoke'
# --- Args Fct. ---:
,dfSegReprVec=pd.DataFrame()
,dfDruckReprVec=pd.DataFrame()
#,xlim=None
,dateFormat='%y.%m.%d: %H:%M:%S' # can be a list
,bysecond=None #[0,15,30,45] # can be a list
,byminute=None # can be a list
,byhour=None
,ylimAL=ylimALD
,yticksAL=yticksALD
,yTwinedAxesPosDeltaHPStart=-0.0125
,yTwinedAxesPosDeltaHP=-0.0875
,ylimR=ylimRD # can be a list
,ylimRxlim=False # can be a list
,yticksR=yticksRD # can be a list
# dito Beschl.
,ylimAC=ylimACD
,ylimACxlim=False
,yticksAC=yticksACD
,ySpanMin=0.9
,plotLegend=True # interpretiert fuer diese Funktion; Inverse gilt fuer pltLDSErgVec selbst
,plotLegend1stOnly=True # diese Funktion plottet wenn plotLegend=True die Legende nur im ersten Plot
,legendLoc='best'
,legendFramealpha=.2
,legendFacecolor='white'
,attrsDctLDS=attrsDctLDS
,plotLPRate=True
,plotR2FillSeg=True
,plotR2FillDruck=True
,plotAC=True
,plotACCLimits=True
,highlightAreas=True
,Seg_Highlight_Color='cyan'
,Seg_Highlight_Alpha=.1
,Seg_Highlight_Fct=lambda row: True if row['STAT_S']==101 else False
,Seg_HighlightError_Color='peru'
,Seg_Highlight_Alpha_Error=.3
,Seg_HighlightError_Fct=lambda row: True if row['STAT_S']==601 else False
,Druck_Highlight_Color='cyan'
,Druck_Highlight_Alpha=.1
,Druck_Highlight_Fct=lambda row: True if row['STAT_S']==101 else False
,Druck_HighlightError_Color='peru'
,Druck_Highlight_Alpha_Error=.3
,Druck_HighlightError_Fct=lambda row: True if row['STAT_S']==601 else False
,plotTV=True
,plotTVTimerFct=None
,plotTVAmFct=lambda x: x*100
,plotTVAmLabel=plotTVAmLabelD
,ylimTV=ylimTVD
,yticksTV=yticksTVD
,plotDPDT=True
,plotSB_S=True
):
# plots pltLDSErgVec-Sections
# returns a Lst of pltLDSErgVec-Results, a Lst of (axes,lines)
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
if sectionTitles==[] or sectionTitles ==None:
sectionTitles=len(xlims)*['a plotTimespansLDS sectionTitle Praefix']
if not isinstance(sectionTitles, list):
logger.warning("{0:s}sectionTitles muss eine Liste von strings sein.".format(logStr))
sectionTitles=len(xlims)*['a plotTimespansLDS sectionTitle Praefix']
if len(sectionTitles)!=len(xlims):
logger.debug("{0:s}sectionTitles muss dieselbe Laenge haben wie xlims.".format(logStr))
if len(sectionTitles) == 1:
sectionTitles=len(xlims)*[sectionTitles[0]]
else:
sectionTitles=len(xlims)*['a plotTimespansLDS sectionTitle Praefix']
if sectionTexts==[] or sectionTexts==None:
sectionTexts=len(xlims)*['']
if not isinstance(sectionTexts, list):
logger.warning("{0:s}sectionTexts muss eine Liste von strings sein.".format(logStr))
sectionTexts=len(xlims)*['']
if len(sectionTexts)!=len(xlims):
logger.warning("{0:s}sectionTexts muss dieselbe Laenge haben wie xlims.".format(logStr))
sectionTexts=len(xlims)*['']
if plotLegend:
plotLegendFct=False
else:
plotLegendFct=True
pltLDSErgVecResults=[]
for idx,xlim in enumerate(xlims):
ax = axLst[idx]
if isinstance(dateFormat, list):
dateFormatIdx=dateFormat[idx]
else:
dateFormatIdx=dateFormat
bysecondIdx=bysecond
if isinstance(bysecond, list):
if any(isinstance(el, list) for el in bysecond):
bysecondIdx=bysecond[idx]
byminuteIdx=byminute
if isinstance(byminute, list):
if any(isinstance(el, list) for el in byminute):
byminuteIdx=byminute[idx]
byhourIdx=byhour
if isinstance(byhour, list):
if any(isinstance(el, list) for el in byhour):
byhourIdx=byhour[idx]
ylimRIdx=ylimR
if isinstance(ylimR, list):
ylimRIdx=ylimR[idx]
ylimRxlimIdx=ylimRxlim
if isinstance(ylimRxlim, list):
ylimRxlimIdx=ylimRxlim[idx]
yticksRIdx=yticksR
if isinstance(yticksR, list):
if any(isinstance(el, list) for el in yticksR):
yticksRIdx=yticksR[idx]
(axes,lines)=pltLDSErgVec(
ax
,dfSegReprVec=dfSegReprVec
,dfDruckReprVec=dfDruckReprVec
,xlim=xlims[idx]
,dateFormat=dateFormatIdx
,bysecond=bysecondIdx
,byminute=byminuteIdx
,byhour=byhourIdx
,ylimAL=ylimAL
,yticksAL=yticksAL
,yTwinedAxesPosDeltaHPStart=yTwinedAxesPosDeltaHPStart
,yTwinedAxesPosDeltaHP=yTwinedAxesPosDeltaHP
,ylimR=ylimRIdx
,ylimRxlim=ylimRxlimIdx
,yticksR=yticksRIdx
,ylimAC=ylimAC
,ylimACxlim=ylimACxlim
,yticksAC=yticksAC
,ySpanMin=ySpanMin
,plotLegend=plotLegendFct
,legendLoc=legendLoc
,legendFramealpha=legendFramealpha
,legendFacecolor=legendFacecolor
,attrsDctLDS=attrsDctLDS
,plotLPRate=plotLPRate
,plotR2FillSeg=plotR2FillSeg
,plotR2FillDruck=plotR2FillDruck
,plotAC=plotAC
,plotACCLimits=plotACCLimits
,highlightAreas=highlightAreas
,Seg_Highlight_Color=Seg_Highlight_Color
,Seg_Highlight_Alpha=Seg_Highlight_Alpha
,Seg_Highlight_Fct=Seg_Highlight_Fct
,Seg_HighlightError_Color=Seg_HighlightError_Color
,Seg_Highlight_Alpha_Error=Seg_Highlight_Alpha_Error #
,Seg_HighlightError_Fct=Seg_HighlightError_Fct
,Druck_Highlight_Color=Druck_Highlight_Color
,Druck_Highlight_Alpha=Druck_Highlight_Alpha
,Druck_Highlight_Fct=Druck_Highlight_Fct
,Druck_HighlightError_Color=Druck_HighlightError_Color
,Druck_Highlight_Alpha_Error=Druck_Highlight_Alpha_Error #
,Druck_HighlightError_Fct=Druck_HighlightError_Fct
,plotTV=plotTV
,plotTVTimerFct=plotTVTimerFct
,plotTVAmFct=plotTVAmFct
,plotTVAmLabel=plotTVAmLabel
,ylimTV=ylimTV
,yticksTV=yticksTV
,plotDPDT=plotDPDT
,plotSB_S=plotSB_S
)
pltLDSErgVecResults.append((axes,lines))
sectionText=sectionTexts[idx]
ax.text(
0.5, 0.5,
sectionText,
ha='center', va='top',
transform=ax.transAxes
)
(timeStart,timeEnd)=xlim
sectionTitleSingle="{:s}: Plot Nr. {:d} - Zeitspanne: {:s}".format(sectionTitles[idx],idx+1,str(timeEnd-timeStart)).replace('days','Tage')
ax.set_title(sectionTitleSingle)
for vLineX in vLinesX:
if vLineX >= timeStart and vLineX <= timeEnd:
ax.axvline(x=vLineX,ymin=0, ymax=1, color=vLinesXColor,ls=linestyle_tuple[11][1])
if len(vAreasX) == len(xlims):
vAreasXSection=vAreasX[idx]
if vAreasXSection==[] or vAreasXSection==None:
pass
else:
for vArea in vAreasXSection:
ax.axvspan(vArea[0], vArea[1], alpha=0.6, color=vAreasXColor)
else:
if len(vAreasX)>0:
logger.warning("{0:s}vAreasX muss dieselbe Laenge haben wie xlims.".format(logStr))
# Legend
if plotLegend:
legendHorizontalPos='center'
if len(xlims)>1:
if idx in [0,2,4]: # Anfahren ...
legendHorizontalPos='right'
elif idx in [1,3,5]: # Abfahren ...
legendHorizontalPos='left'
if plotLegend1stOnly and idx>0:
pass
else:
if not dfSegReprVec.empty:
patternSeg='Seg$'
axes['A'].add_artist(axes['A'].legend(
tuple([lines[line] for line in lines if re.search(patternSeg,line) != None])
,tuple([line for line in lines if re.search(patternSeg,line) != None])
,loc='upper '+legendHorizontalPos
,framealpha=legendFramealpha
,facecolor=legendFacecolor
))
if not dfDruckReprVec.empty:
patternDruck='Drk$'
axes['A'].add_artist(axes['A'].legend(
tuple([lines[line] for line in lines if re.search(patternDruck,line) != None])
,tuple([line for line in lines if re.search(patternDruck,line) != None])
,loc='lower '+legendHorizontalPos
,framealpha=legendFramealpha
,facecolor=legendFacecolor
))
# Titel
tMin=xlims[0][0]
tMax=xlims[-1][1]
for tPair in xlims:
(t1,t2)=tPair
if t1 < tMin:
tMin=t1
if t2>tMax:
tMax=t2
if figTitle not in ['',None]:
figTitle="{:s} - {:s} - {:s}".format(figTitle,str(tMin),str(tMax)).replace(':',' ')
fig=plt.gcf()
fig.suptitle(figTitle)
# speichern?!
if figSave:
fig.tight_layout(pad=2.) # gs.tight_layout(fig,pad=2.)
plt.savefig(figTitle+'.png')
plt.savefig(figTitle+'.pdf')
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return pltLDSErgVecResults
def pltLDSpQAndEvents(
ax
,dfTCsLDSIn # es werden nur die aDct-definierten geplottet
,dfTCsOPC=pd.DataFrame() # es werden nur die aDctOPC-definierten geplottet
# der Schluessel in den vorgenannten Dcts ist die ID (der Spaltenname) in den TCs
,dfTCsOPCScenTimeShift=pd.Timedelta('1 hour')
,dfTCsSIDEvents=pd.DataFrame()
,dfTCsSIDEventsTimeShift=pd.Timedelta('1 hour')
,dfTCsSIDEventsInXlimOnly=True # es werden nur die Spalten geplottet, die in xlim vorkommen und dort mindestens 1x nicht Null sind (sonst sind alle (zumindest in der Legende) dargestellt)
,dfTCsSIDEventsyOffset=.05 # die y-Werte werden ab dem 1. Schieber um je dfTCsSIDEventsyOffset erhöht (damit zeitgleiche Events besser sichtbar werden)
,QDct={ # Exanple
'Objects.FBG_MESSW.6_KED_39_FT_01.In.MW.value':{'IDPlt':'Q Src','RTTM':'IMDI.Objects.FBG_MESSW.6_KED_39_FT_01.In.MW.value'}
,'Objects.FBG_MESSW.6_TUD_39_FT_01.In.MW.value':{'IDPlt':'Q Snk','RTTM':'IMDI.Objects.FBG_MESSW.6_TUD_39_FT_01.In.MW.value'}
}
,pDct={# Example
'Objects.FBG_HPS_M.6_KED_39_PTI_01_E.In.MW.value':{'IDPlt':'p Src'}
,'Objects.FBG_HPS_M.6_TUD_39_PTI_01_E.In.MW.value':{'IDPlt':'p Snk'}
,'Objects.FBG_HPS_M.6_EL1_39_PTI_01_E.In.MW.value':{'IDPlt':'p ISrc 1'}
,'Objects.FBG_HPS_M.6_EL1_39_PTI_02_E.In.MW.value':{'IDPlt':'p ISnk 2'}
}
,QDctOPC={ # Exanple
'Objects.FBG_MESSW.6_EL1_39_FT_01.In.MW.value':{'IDPlt':'Q xSnk 1'}
}
,pDctOPC={}
,IDPltKey='IDPlt' # Schluesselbezeichner in den vorstehenden 4 Dcts; Wert ist Referenz auf das folgende Layout-Dct und das folgende Fcts-Dct; Werte muessen eindeutig sein
,attrsDct=attrsDct
,fctsDct={} # a Dct with Fcts
,xlim=None
,dateFormat='%y.%m.%d: %H:%M:%S'
,bysecond=None #[0,15,30,45]
,byminute=None
,byhour=None
,yTwinedAxesPosDeltaHPStart=-0.0125 #: (i.d.R. negativer) Abstand der 1. y-Achse von der Zeichenfläche
,yTwinedAxesPosDeltaHP=-0.0875 #: (i.d.R. negativer) zus. Abstand jeder weiteren y-Achse von der Zeichenfläche
,plotRTTM=True # plot RTTM-Echoes
# p y-Achse
,ylimp=ylimpD #wenn undef., dann min/max
,ylimpxlim=False #wenn Wahr und ylim undef., dann wird xlim beruecksichtigt bei min/max
,yticksp=None #wenn undef., dann aus ylimp
,ylabelp='[bar]'
# Q y-Achse
,ylimQ=ylimQD
,ylimQxlim=False
,yticksQ=None #wenn undef., dann aus ylimQ
,ylabelQ='[Nm³/h]'
# 3. Achse
,ylim3rd=ylim3rdD
,yticks3rd=yticks3rdD
,ylabel3rd='Schieber (ZUSTände 0,1,2 jew. + x; Befehle)'
,yGridSteps=30 # 0: das y-Gitter besteht dann bei ylimp=ylimQ=yticksp=yticksQ None nur aus min/max (also 1 Gitterabschnitt)
,ySpanMin=0.9 # wenn ylim undef. vermeidet dieses Maß eine y-Achse mit einer zu kleinen Differenz zwischen min/max
,plotLegend=True
,legendLoc='best'
,legendFramealpha=.2
,legendFacecolor='white'
# SchieberEvents
,pSIDEvents=pSIDEvents
# ausgewertet werden: colRegExSchieberID (um welchen Schieber geht es), colRegExMiddle (Befehl oder Zustand) und colRegExEventID (welcher Befehl bzw. Zustand)
# die Befehle bzw. Zustaende (die Auspraegungen von colRegExEventID) muessen nachf. def. sein um den Marker (des Befehls bzw. des Zustandes) zu definieren
,eventCCmds=eventCCmds
,eventCStats=eventCStats
,valRegExMiddleCmds=valRegExMiddleCmds # colRegExMiddle-Auspraegung fuer Befehle (==> eventCCmds)
# es muessen soviele Farben definiert sein wie Schieber
,baseColorsDef=baseColorsSchieber
,markerDef=markerDefSchieber
):
"""
zeichnet pq-Zeitkurven - ggf. ergaenzt durch Events
Returns:
* axes (Dct of axes)
* lines (Dct of lines)
* scatters (List of ax.scatter-Results)
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
axes={}
lines={}
scatters=[]
try:
axes['p']=ax
# x-Achse ----------------
if xlim == None:
xlimMin=dfTCsLDSIn.index[0]
xlimMax=dfTCsLDSIn.index[-1]
xlim=(xlimMin,xlimMax)
(xlimMin,xlimMax)=xlim
ax.set_xlim(xlim)
logger.debug("{0:s}dfTCsOPCScenTimeShift: {1:s}".format(logStr,str(dfTCsOPCScenTimeShift)))
logger.debug("{0:s}bysecond: {1:s}".format(logStr,str(bysecond)))
logger.debug("{0:s}byminute: {1:s}".format(logStr,str(byminute)))
logger.debug("{0:s}byhour: {1:s}".format(logStr,str(byhour)))
pltHelperX(
ax
,dateFormat=dateFormat
,bysecond=bysecond
,byminute=byminute
,byhour=byhour
,yPos=yTwinedAxesPosDeltaHPStart
)
# Eindeutigkeit der IDPlts pruefen
keys=[]
keysUneindeutig=[]
for dct in [QDct,pDct,QDctOPC,pDctOPC]:
for key, value in dct.items():
if IDPltKey in value.keys():
IDPltValue=value[IDPltKey]
if IDPltValue in keys:
print("IDPlt {:s} bereits vergeben".format(IDPltValue))
keysUneindeutig.append(IDPltValue)
else:
keys.append(IDPltValue)
# 1. Achse p -----------------------
logger.debug("{0:s}{1:s}".format(logStr,'# 1. Achse p'))
for key, value in pDct.items(): # nur die konfigurierten IDs plotten
if key in dfTCsLDSIn.columns: # nur dann, wenn ID als Spalte enthalten
label, linesAct = pltLDSpQHelper(
ax
,TCdf=dfTCsLDSIn
,ID=key # Spaltenname
,xDctValue=value # a Dct - i.e. {'IDPlt':'Q Src','RTTM':'IMDI.Objects.FBG_MESSW.6_KED_39_FT_01.In.MW.value'}
,xDctAttrs=attrsDct # a Dct with - i.e. {'Q Src':{'color':'red'},...}
,IDPltKey=IDPltKey # Schluesselbezeichner in value
,IDPltValuePostfix=None
,xDctFcts=fctsDct
#,timeShift=pd.Timedelta('1 hour') # pd.Timedelta('0 seconds')
)
lines[label]=linesAct[0]
else:
logger.debug("{0:s}Spalte {1:s} gibt es nicht. Weiter.".format(logStr,key))
if 'RTTM' in value.keys():
if value['RTTM'] in dfTCsLDSIn.columns:
label, linesAct = pltLDSpQHelper(
ax
,TCdf=dfTCsLDSIn
,ID=value['RTTM']
,xDctValue=value # a Dct - i.e. {'IDPlt':'Q Src','RTTM':'IMDI.Objects.FBG_MESSW.6_KED_39_FT_01.In.MW.value'}
,xDctAttrs=attrsDct
,IDPltKey=IDPltKey
,IDPltValuePostfix=' RTTM'
,xDctFcts=fctsDct
#,timeShift=pd.Timedelta('0 hour') #pd.Timedelta('0 seconds')
)
lines[label]=linesAct[0]
else:
logger.debug("{0:s}Spalte {1:s} gibt es nicht. Weiter.".format(logStr,value['RTTM']))
if not dfTCsOPC.empty:
logger.debug("{0:s}{1:s}".format(logStr,'# 1. Achse p OPC'))
for key, value in pDctOPC.items():
if key in dfTCsOPC.columns:
label, linesAct = pltLDSpQHelper(
ax
,TCdf=dfTCsOPC
,ID=key
,xDctValue=value
,xDctAttrs=attrsDct
,IDPltKey=IDPltKey
,IDPltValuePostfix=None
,xDctFcts=fctsDct
,timeShift=dfTCsOPCScenTimeShift
)
lines[label]=linesAct[0]
else:
logger.debug("{0:s}Spalte {1:s} gibt es nicht. Weiter.".format(logStr,key))
ylimp,yticksp=pltLDSpQHelperYLimAndTicks(
dfTCsLDSIn
,pDct.keys()
,ylim=ylimp
,yticks=yticksp
,ylimxlim=ylimpxlim
,xlim=xlim
,ySpanMin=ySpanMin
,yGridSteps=yGridSteps
)
ax.set_ylim(ylimp)
ax.set_yticks(yticksp)
ax.grid()
ax.set_zorder(10)
ax.patch.set_visible(False)
ax.set_ylabel(ylabelp)
# 2. y-Achse Q ----------------------------------------
logger.debug("{0:s}{1:s}".format(logStr,'# 2. Achse Q'))
ax2 = ax.twinx()
axes['Q']=ax2
pltHelperX(
ax2
,dateFormat=dateFormat
,bysecond=bysecond
,byminute=byminute
,byhour=byhour
,yPos=yTwinedAxesPosDeltaHPStart+yTwinedAxesPosDeltaHP
)
for key, value in QDct.items():
if key in dfTCsLDSIn.columns:
label, linesAct = pltLDSpQHelper(
ax2
,TCdf=dfTCsLDSIn
,ID=key
,xDctValue=value # a Dct - i.e. {'IDPlt':'Q Src','RTTM':'IMDI.Objects.FBG_MESSW.6_KED_39_FT_01.In.MW.value'}
,xDctAttrs=attrsDct
,IDPltKey=IDPltKey
,IDPltValuePostfix=None
,xDctFcts=fctsDct
# ,timeShift=pd.Timedelta('0 hour') # pd.Timedelta('0 seconds')
)
lines[label]=linesAct[0]
else:
logger.debug("{0:s}Spalte {1:s} gibt es nicht. Weiter.".format(logStr,key))
if 'RTTM' in value.keys() and plotRTTM:
if value['RTTM'] in dfTCsLDSIn.columns:
label, linesAct = pltLDSpQHelper(
ax2
,TCdf=dfTCsLDSIn
,ID=value['RTTM']
,xDctValue=value # a Dct - i.e. {'IDPlt':'Q Src','RTTM':'IMDI.Objects.FBG_MESSW.6_KED_39_FT_01.In.MW.value'}
,xDctAttrs=attrsDct
,IDPltKey=IDPltKey
,IDPltValuePostfix=' RTTM'
,xDctFcts=fctsDct
#,timeShift=pd.Timedelta('0 hour') #pd.Timedelta('0 seconds')
)
lines[label]=linesAct[0]
else:
logger.debug("{0:s}Spalte {1:s} gibt es nicht. Weiter.".format(logStr,value['RTTM']))
if not dfTCsOPC.empty:
logger.debug("{0:s}{1:s}".format(logStr,'# 2. Achse Q OPC'))
for key, value in QDctOPC.items():
if key in dfTCsOPC.columns:
label, linesAct = pltLDSpQHelper(
ax2
,TCdf=dfTCsOPC
,ID=key
,xDctValue=value
,xDctAttrs=attrsDct
,IDPltKey=IDPltKey
,IDPltValuePostfix=None
,xDctFcts=fctsDct
,timeShift=dfTCsOPCScenTimeShift
)
lines[label]=linesAct[0]
else:
logger.debug("{0:s}Spalte {1:s} gibt es nicht. Weiter.".format(logStr,key))
pltLDSHelperY(ax2)
ylimQ,yticksQ=pltLDSpQHelperYLimAndTicks(
dfTCsLDSIn
,QDct.keys()
,ylim=ylimQ
,yticks=yticksQ
,ylimxlim=ylimQxlim
,xlim=xlim
,ySpanMin=ySpanMin
,yGridSteps=yGridSteps
)
ax2.set_ylim(ylimQ)
ax2.set_yticks(yticksQ)
ax2.grid()
ax2.set_ylabel(ylabelQ)
# ggf. 3. Achse
if not dfTCsSIDEvents.empty:
logger.debug("{0:s}{1:s}".format(logStr,'# 3. Achse SID'))
ax3 = ax.twinx()
axes['SID']=ax3
pltHelperX(
ax3
,dateFormat=dateFormat
,bysecond=bysecond
,byminute=byminute
,byhour=byhour
,yPos=yTwinedAxesPosDeltaHPStart+2*yTwinedAxesPosDeltaHP
)
if dfTCsSIDEventsInXlimOnly:
# auf xlim beschränken
dfTCsSIDEventsPlot=dfTCsSIDEvents[
(dfTCsSIDEvents.index-dfTCsSIDEventsTimeShift>=xlim[0])
&#:
(dfTCsSIDEvents.index-dfTCsSIDEventsTimeShift<=xlim[1])
]
# weiter beschränken auf die, die in xlim mind. 1 Eintrag haben
dfTCsSIDEventsPlot=dfTCsSIDEventsPlot.dropna(axis=1,how='all')
else:
dfTCsSIDEventsPlot=dfTCsSIDEvents
# doppelte bzw. mehrfache Spaltennamen eliminieren (das waere ein Aufruf-Fehler)
dfTCsSIDEventsPlot = dfTCsSIDEventsPlot.loc[:,~dfTCsSIDEventsPlot.columns.duplicated()]
logger.debug("{:s}dfTCsSIDEventsPlot.dropna(how='all'): {:s}".format(logStr,dfTCsSIDEventsPlot.dropna(how='all').to_string()))
if not dfTCsSIDEventsPlot.dropna(how='all').empty: # mind. 1 Ereignis in irgendeiner Spalte muss ueberbleiben
# aus Performanzgruenden wird nur zum Plot gegeben, was in xlim auch zu sehen sein wird
dfTCsSIDEventsPlot2=dfTCsSIDEventsPlot[
( dfTCsSIDEventsPlot.index-dfTCsSIDEventsTimeShift>=xlim[0])
&#:
( dfTCsSIDEventsPlot.index-dfTCsSIDEventsTimeShift<=xlim[1])
]
labelsOneCall,scattersOneCall=pltLDSSIDHelper(
ax3
,dfTCsSIDEventsPlot2
,dfTCsSIDEventsTimeShift
,dfTCsSIDEventsyOffset
,pSIDEvents
,valRegExMiddleCmds
,eventCCmds
,eventCStats
,markerDef
,baseColorsDef
)
scatters=scatters+scattersOneCall
pltLDSHelperY(ax3)
ax3.set_ylim(ylim3rd)
ax3.set_yticks(yticks3rd)
ax3.set_ylabel(ylabel3rd)
if plotLegend:
legendHorizontalPos='center'
patterBCp='^p S[rc|nk]'
patterBCQ='^Q S[rc|nk]'
patterBCpQ='^[p|Q] S[rc|nk]'
linesp=[line for line in lines if re.search(patterBCp,line) != None]
linesQ=[line for line in lines if re.search(patterBCQ,line) != None]
linespTxt=tuple([lines[line] for line in linesp])
linesQTxt=tuple([lines[line] for line in linesQ])
moreLines=[line for line in lines if re.search(patterBCpQ,line) == None]
moreLinesp=[line for line in moreLines if re.search('^p',line) != None]
moreLinesQ=[line for line in moreLines if re.search('^Q',line) != None]
moreLinespTxt=tuple([lines[line] for line in moreLinesp])
moreLinesQTxt=tuple([lines[line] for line in moreLinesQ])
axes['p'].add_artist(axes['p'].legend(
linespTxt+moreLinespTxt
,linesp+moreLinesp
,loc='upper '+legendHorizontalPos
,framealpha=legendFramealpha
,facecolor=legendFacecolor
))
axes['Q'].add_artist(axes['Q'].legend(
linesQTxt+moreLinesQTxt
,linesQ+moreLinesQ
,loc='lower '+legendHorizontalPos
,framealpha=legendFramealpha
,facecolor=legendFacecolor
))
if 'SID' in axes.keys() and len(scatters)>0:
if legendHorizontalPos == 'center':
legendHorizontalPosAct=''
else:
legendHorizontalPosAct=' '+legendHorizontalPos
axes['SID'].legend(loc='center'+legendHorizontalPosAct
,framealpha=legendFramealpha
,facecolor=legendFacecolor)
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return axes,lines,scatters
def pltLDSErgVec(
ax=None # Axes auf die geplottet werden soll (und aus der neue axes ge-twinx-ed werden; plt.gcf().gca() wenn undef.
,dfSegReprVec=pd.DataFrame() # Ergebnisvektor SEG; pass empty Df if Druck only
,dfDruckReprVec=pd.DataFrame() # Ergebnisvektor DRUCK; pass empty Df if Seg only
,xlim=None # tuple (xmin,xmax); wenn undef. gelten min/max aus vorgenannten Daten als xlim; wenn Seg angegeben, gilt Seg
,dateFormat='%y.%m.%d: %H:%M:%S'
,bysecond=None #[0,15,30,45]
,byminute=None
,byhour=None
,ylimAL=ylimALD
,yticksAL=yticksALD #[0,10,20,30,40]
,yTwinedAxesPosDeltaHPStart=-0.0125 #: (i.d.R. negativer) Abstand der 1. y-Achse von der Zeichenfläche
,yTwinedAxesPosDeltaHP=-0.0875 #: (i.d.R. negativer) zus. Abstand jeder weiteren y-Achse von der Zeichenfläche
,ylimR=ylimRD #None #(-10,10) #wenn undef., dann min/max dfSegReprVec
,ylimRxlim=False #wenn Wahr und ylimR undef. (None), dann wird xlim beruecksichtigt bei min/max dfSegReprVec
,yticksR=yticksRD #[0,2,4,10,15,30,40] #wenn undef. (None), dann aus ylimR; matplotlib "vergrößert" mit dem Setzen von yTicks ein ebenfalls gesetztes ylim wenn die Ticks außerhalb des ylims liegen
# <NAME>.
,ylimAC=ylimACD
,ylimACxlim=False
,yticksAC=yticksACD
,ySpanMin=0.9 # wenn ylim R/AC undef. vermeidet dieses Maß eine y-Achse mit einer zu kleinen Differenz zwischen min/max
,plotLegend=True
,legendLoc='best'
,legendFramealpha=.2
,legendFacecolor='white'
,attrsDctLDS=attrsDctLDS
,plotLPRate=True
,plotR2FillSeg=True
,plotR2FillDruck=True
,plotAC=True
,plotACCLimits=True
,highlightAreas=True
,Seg_Highlight_Color='cyan'
,Seg_Highlight_Alpha=.1
,Seg_Highlight_Fct=lambda row: True if row['STAT_S']==101 else False
,Seg_HighlightError_Color='peru'
,Seg_Highlight_Alpha_Error=.3
,Seg_HighlightError_Fct=lambda row: True if row['STAT_S']==601 else False
,Druck_Highlight_Color='cyan'
,Druck_Highlight_Alpha=.1
,Druck_Highlight_Fct=lambda row: True if row['STAT_S']==101 else False
,Druck_HighlightError_Color='peru'
,Druck_Highlight_Alpha_Error=.3
,Druck_HighlightError_Fct=lambda row: True if row['STAT_S']==601 else False
,plotTV=True
,plotTVTimerFct=None
,plotTVAmFct=lambda x: x*100
,plotTVAmLabel=plotTVAmLabelD
,ylimTV=ylimTVD
,yticksTV=yticksTVD
,plotDPDT=True
,plotSB_S=True
):
"""
zeichnet Zeitkurven von App LDS Ergebnisvektoren auf ax
return: axes (Dct der Achsen), yLines (Dct der Linien)
Dct der Achsen: 'A': Alarm etc.; 'R': m3/h; 'a': ACC; 'TV': Timer und Leckvolumen
#! Lücken (nicht plotten) wenn keine Zeiten
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
axes={}
yLines={}
try:
if dfSegReprVec.empty and dfDruckReprVec.empty:
logger.error("{0:s}{1:s}".format(logStr,'dfSegReprVec UND dfDruckReprVec leer?! Return.'))
return
if not dfSegReprVec.empty:
# keine komplett leeren Zeilen
dfSegReprVec=dfSegReprVec[~dfSegReprVec.isnull().all(1)]
# keine doppelten Indices
dfSegReprVec=dfSegReprVec[~dfSegReprVec.index.duplicated(keep='last')] # dfSegReprVec.groupby(dfSegReprVec.index).last() # df[~df.index.duplicated(keep='last')]
if not dfDruckReprVec.empty:
# keine komplett leeren Zeilen
dfDruckReprVec=dfDruckReprVec[~dfDruckReprVec.isnull().all(1)]
# keine doppelten Indices
dfDruckReprVec=dfDruckReprVec[~dfDruckReprVec.index.duplicated(keep='last')] # dfDruckReprVec.groupby(dfDruckReprVec.index).last() # df[~df.index.duplicated(keep='last')]
if ax==None:
ax=plt.gcf().gca()
axes['A']=ax
# x-Achse ----------------
if xlim == None:
if not dfSegReprVec.empty:
xlimMin=dfSegReprVec.index[0]
xlimMax=dfSegReprVec.index[-1]
elif not dfDruckReprVec.empty:
xlimMin=dfDruckReprVec.index[0]
xlimMax=dfDruckReprVec.index[-1]
xlim=(xlimMin,xlimMax)
(xlimMin,xlimMax)=xlim
ax.set_xlim(xlim)
logger.debug("{0:s}bysecond: {1:s}".format(logStr,str(bysecond)))
logger.debug("{0:s}byminute: {1:s}".format(logStr,str(byminute)))
logger.debug("{0:s}byhour: {1:s}".format(logStr,str(byhour)))
logger.debug("{0:s}dateFormat: {1:s}".format(logStr,dateFormat))
pltHelperX(
ax
,dateFormat=dateFormat
,bysecond=bysecond
,byminute=byminute
,byhour=byhour
,yPos=yTwinedAxesPosDeltaHPStart
)
# 1. Achse Alarm -----------------------
if not dfSegReprVec.empty and highlightAreas:
tPairs=findAllTimeIntervalls(dfSegReprVec,Seg_Highlight_Fct)
for t1,t2 in tPairs:
ax.axvspan(t1, t2, alpha=Seg_Highlight_Alpha, color=Seg_Highlight_Color)
tPairs=findAllTimeIntervalls(dfSegReprVec,Seg_HighlightError_Fct)
for t1,t2 in tPairs:
ax.axvspan(t1, t2, alpha=Seg_Highlight_Alpha_Error, color=Seg_HighlightError_Color)
if not dfDruckReprVec.empty and highlightAreas:
tPairs=findAllTimeIntervalls(dfDruckReprVec,Druck_Highlight_Fct)
for t1,t2 in tPairs:
ax.axvspan(t1, t2, alpha=Druck_Highlight_Alpha, color=Druck_Highlight_Color)
tPairs=findAllTimeIntervalls(dfDruckReprVec,Druck_HighlightError_Fct)
for t1,t2 in tPairs:
ax.axvspan(t1, t2, alpha=Druck_Highlight_Alpha_Error, color=Druck_HighlightError_Color)
if not dfSegReprVec.empty:
lines = pltLDSErgVecHelper(ax,dfSegReprVec,'AL_S',attrsDctLDS['Seg_AL_S_Attrs'])
yLines['AL_S Seg']=lines[0]
if not dfDruckReprVec.empty:
lines = pltLDSErgVecHelper(ax,dfDruckReprVec,'AL_S',attrsDctLDS['Druck_AL_S_Attrs'])
yLines['AL_S Drk']=lines[0]
if not dfSegReprVec.empty and plotSB_S:
lines = pltLDSErgVecHelper(ax,dfSegReprVec,'SB_S',attrsDctLDS['Seg_SB_S_Attrs'],fct=lambda x: x*10)
yLines['SB_S Seg']=lines[0]
if not dfDruckReprVec.empty and plotSB_S:
lines = pltLDSErgVecHelper(ax,dfDruckReprVec,'SB_S',attrsDctLDS['Druck_SB_S_Attrs'],fct=lambda x: x*10)
yLines['SB_S Drk']=lines[0]
ax.set_ylim(ylimAL)
ax.set_yticks(yticksAL)
ax.grid()
ax.set_zorder(10)
ax.patch.set_visible(False)
ax.set_ylabel('A [0/10/20] u. 10x B [0/1/2/3/4]')
# 2. y-<NAME> ----------------------------------------
ax2 = ax.twinx()
axes['R']=ax2
pltHelperX(
ax2
,dateFormat=dateFormat
,bysecond=bysecond
,byminute=byminute
,byhour=byhour
,yPos=yTwinedAxesPosDeltaHPStart+yTwinedAxesPosDeltaHP
)
pltLDSHelperY(ax2)
if not dfSegReprVec.empty:
lines = pltLDSErgVecHelper(ax2,dfSegReprVec,'MZ_AV',attrsDctLDS['Seg_MZ_AV_Attrs'])
yLines['MZ_AV (R1) Seg']=lines[0]
lines = pltLDSErgVecHelper(ax2,dfSegReprVec,'LR_AV',attrsDctLDS['Seg_LR_AV_Attrs'])
yLines['LR_AV (R2) Seg']=lines[0]
lines = pltLDSErgVecHelper(ax2,dfSegReprVec,'NG_AV',attrsDctLDS['Seg_NG_AV_Attrs'])
yLines['NG_AV Seg']=lines[0]
lines = pltLDSErgVecHelper(ax2,dfSegReprVec,'QM_AV',attrsDctLDS['Seg_QM16_AV_Attrs'],fct=lambda x: x*1.6/100.)
yLines['QM16_AV Seg']=lines[0]
if plotLPRate:
# R2 = R1 - LP
# R2 - R1 = -LP
# LP = R1 - R2
lines = pltLDSErgVecHelper(ax2,dfSegReprVec,'LP_AV',attrsDctLDS['Seg_LP_AV_Attrs'])
yLines['LP_AV Seg']=lines[0]
if plotR2FillSeg:
df=dfSegReprVec
df=df.reindex(pd.date_range(start=df.index[0], end=df.index[-1], freq='1s'))
df=df.fillna(method='ffill').fillna(method='bfill')
# R2 unter 0
dummy=ax2.fill_between(df.index, df['LR_AV'],0
,where=df['LR_AV']<0,color='grey',alpha=.2)
# zwischen R2 und 0
dummy=ax2.fill_between(df.index, 0, df['LR_AV']
,where=df['LR_AV']>0
#,color='yellow',alpha=.1
,color='red',alpha=.1
)
# R2 über 0 aber unter NG
dummy=ax2.fill_between(df.index, df['LR_AV'], df['NG_AV']
,where=(df['LR_AV']>0) & (df['LR_AV']<df['NG_AV'])
#,color='red',alpha=.1
,color='yellow',alpha=.1
)
# R2 über NG
dummy=ax2.fill_between(df.index, df['LR_AV'], df['NG_AV']
,where=df['LR_AV']>df['NG_AV']
,color='red',alpha=.2)
if not dfDruckReprVec.empty:
lines = pltLDSErgVecHelper(ax2,dfDruckReprVec,'MZ_AV',attrsDctLDS['Druck_MZ_AV_Attrs'])
yLines['MZ_AV (R1) Drk']=lines[0]
lines = pltLDSErgVecHelper(ax2,dfDruckReprVec,'LR_AV',attrsDctLDS['Druck_LR_AV_Attrs'])
yLines['LR_AV (R2) Drk']=lines[0]
lines = pltLDSErgVecHelper(ax2,dfDruckReprVec,'NG_AV',attrsDctLDS['Druck_NG_AV_Attrs'])
yLines['NG_AV Drk']=lines[0]
if plotLPRate:
lines = pltLDSErgVecHelper(ax2,dfDruckReprVec,'LP_AV',attrsDctLDS['Druck_LP_AV_Attrs'])
yLines['LP_AV Drk']=lines[0]
lines = pltLDSErgVecHelper(ax2,dfDruckReprVec,'QM_AV',attrsDctLDS['Druck_QM16_AV_Attrs'],fct=lambda x: x*1.6/100.)
yLines['QM16_AV Drk']=lines[0]
if plotR2FillDruck:
df=dfDruckReprVec
df=df.reindex(pd.date_range(start=df.index[0], end=df.index[-1], freq='1s'))
df=df.fillna(method='ffill').fillna(method='bfill')
# R2 unter 0
dummy=ax2.fill_between(df.index, df['LR_AV'],0
,where=df['LR_AV']<0,color='grey',alpha=.4)
# zwischen R2 und 0
dummy=ax2.fill_between(df.index, 0, df['LR_AV']
,where=df['LR_AV']>0
#,color='yellow',alpha=.1
,color='red',alpha=.1
)
# R2 über 0 aber unter NG
dummy=ax2.fill_between(df.index, df['LR_AV'], df['NG_AV']
,where=(df['LR_AV']>0) & (df['LR_AV']<df['NG_AV'])
#,color='red',alpha=.1
,color='yellow',alpha=.1
)
# R2 über NG
dummy=ax2.fill_between(df.index, df['LR_AV'], df['NG_AV']
,where=df['LR_AV']>df['NG_AV']
,color='red',alpha=.2)
ylimSeg,yticksSeg=pltLDSErgVecHelperYLimAndTicks(
dfSegReprVec
,'LR_AV'
,ylim=ylimR
,yticks=yticksR
,ylimxlim=ylimRxlim
,xlim=xlim
,ySpanMin=ySpanMin
)
logger.debug("{0:s}ylimRSeg: {1:s} yticksRSeg: {2:s}".format(logStr,str(ylimSeg),str(yticksSeg)))
ylimDrk,yticksDrk=pltLDSErgVecHelperYLimAndTicks(
dfDruckReprVec
,'LR_AV'
,ylim=ylimR
,yticks=yticksR
,ylimxlim=ylimRxlim
,xlim=xlim
,ySpanMin=ySpanMin
)
logger.debug("{0:s}ylimRDrk: {1:s} yticksRDrk: {2:s}".format(logStr,str(ylimDrk),str(yticksDrk)))
if ylimSeg[1]>=ylimDrk[1]:
ylimR=ylimSeg
yticksR=yticksSeg
else:
ylimR=ylimDrk
yticksR=yticksDrk
logger.debug("{0:s}ylimR: {1:s} yticksR: {2:s}".format(logStr,str(ylimR),str(yticksR)))
ax2.set_ylim(ylimR)
ax2.set_yticks(yticksR)
ax2.grid()
ax2.set_ylabel('R1, R2, NG, LP (R1-R2), QM 1.6% [Nm³/h]')
# 3. y-Achse Beschleunigung ----------------------------------------
if plotAC:
# 3. y-Achse Beschleunigung -------------------------------------------------
ax3 = ax.twinx()
axes['a']=ax3
pltHelperX(
ax3
,dateFormat=dateFormat
,bysecond=bysecond
,byminute=byminute
,byhour=byhour
,yPos=yTwinedAxesPosDeltaHPStart+2*yTwinedAxesPosDeltaHP
)
pltLDSHelperY(ax3)
if not dfSegReprVec.empty:
lines = pltLDSErgVecHelper(ax3,dfSegReprVec,'AC_AV',attrsDctLDS['Seg_AC_AV_Attrs'])
yLines['AC_AV Seg']=lines[0]
lines = pltLDSErgVecHelper(ax3,dfSegReprVec,'ACF_AV',attrsDctLDS['Seg_ACF_AV_Attrs'])
yLines['ACF_AV Seg']=lines[0]
if not dfDruckReprVec.empty:
lines = pltLDSErgVecHelper(ax3,dfDruckReprVec,'AC_AV',attrsDctLDS['Druck_AC_AV_Attrs'])
yLines['AC_AV Drk']=lines[0]
lines = pltLDSErgVecHelper(ax3,dfDruckReprVec,'ACF_AV',attrsDctLDS['Druck_ACF_AV_Attrs'])
yLines['ACF_AV Drk']=lines[0]
# ACC Limits
if plotACCLimits:
if not dfSegReprVec.empty:
# +
line=ax3.axhline(y=dfSegReprVec['ACCST_AV'].max())
for prop,value in attrsDctLDS['Seg_ACC_Limits_Attrs'].items():
plt.setp(line,"{:s}".format(prop),value)
line=ax3.axhline(y=dfSegReprVec['ACCTR_AV'].max())
for prop,value in attrsDctLDS['Seg_ACC_Limits_Attrs'].items():
plt.setp(line,"{:s}".format(prop),value)
if not dfDruckReprVec.empty:
# +
line=ax3.axhline(y=dfDruckReprVec['ACCST_AV'].max())
for prop,value in attrsDctLDS['Druck_ACC_Limits_Attrs'].items():
plt.setp(line,"{:s}".format(prop),value)
line=ax3.axhline(y=dfDruckReprVec['ACCTR_AV'].max())
for prop,value in attrsDctLDS['Druck_ACC_Limits_Attrs'].items():
plt.setp(line,"{:s}".format(prop),value)
if not dfSegReprVec.empty:
# -
line=ax3.axhline(y=-dfSegReprVec['ACCST_AV'].max())
for prop,value in attrsDctLDS['Seg_ACC_Limits_Attrs'].items():
plt.setp(line,"{:s}".format(prop),value)
line=ax3.axhline(y=-dfSegReprVec['ACCTR_AV'].max())
for prop,value in attrsDctLDS['Seg_ACC_Limits_Attrs'].items():
plt.setp(line,"{:s}".format(prop),value)
if not dfDruckReprVec.empty:
# -
line=ax3.axhline(y=-dfDruckReprVec['ACCST_AV'].max())
for prop,value in attrsDctLDS['Druck_ACC_Limits_Attrs'].items():
plt.setp(line,"{:s}".format(prop),value)
line=ax3.axhline(y=-dfDruckReprVec['ACCTR_AV'].max())
for prop,value in attrsDctLDS['Druck_ACC_Limits_Attrs'].items():
plt.setp(line,"{:s}".format(prop),value)
ylimSeg,yticksSeg=pltLDSErgVecHelperYLimAndTicks(
dfSegReprVec
,'AC_AV'
,ylim=ylimAC
,yticks=yticksAC
,ylimxlim=ylimACxlim
,xlim=xlim
,ySpanMin=ySpanMin
)
logger.debug("{0:s}ylimACSeg: {1:s} yticksACSeg: {2:s}".format(logStr,str(ylimSeg),str(yticksSeg)))
ylimDrk,yticksDrk=pltLDSErgVecHelperYLimAndTicks(
dfDruckReprVec
,'AC_AV'
,ylim=ylimAC
,yticks=yticksAC
,ylimxlim=ylimACxlim
,xlim=xlim
,ySpanMin=ySpanMin
)
logger.debug("{0:s}ylimACDrk: {1:s} yticksACDrk: {2:s}".format(logStr,str(ylimDrk),str(yticksDrk)))
if ylimSeg[1]>=ylimDrk[1]:
ylimAC=ylimSeg
yticksAC=yticksSeg
else:
ylimAC=ylimDrk
yticksAC=yticksDrk
logger.debug("{0:s}ylimAC: {1:s} yticksAC: {2:s}".format(logStr,str(ylimAC),str(yticksAC)))
ax3.set_ylim(ylimAC)
ax3.set_yticks(yticksAC)
ax3.set_ylabel('a [mm/s²]')
# 4. y-Achse Timer und Volumen ----------------------------------------
if plotTV:
# 4. y-Achse Timer und Volumen ----------------------------------------
ax4 = ax.twinx()
axes['TV']=ax4
yPos=yTwinedAxesPosDeltaHPStart+2*yTwinedAxesPosDeltaHP
if plotAC:
yPos=yPos+yTwinedAxesPosDeltaHP
pltHelperX(
ax4
,dateFormat=dateFormat
,bysecond=bysecond
,byminute=byminute
,byhour=byhour
,yPos=yPos
)
pltLDSHelperY(ax4)
if not dfSegReprVec.empty:
# TIMER_AV
lines = pltLDSErgVecHelper(ax4,dfSegReprVec,'TIMER_AV',attrsDctLDS['Seg_TIMER_AV_Attrs'],fct=plotTVTimerFct)
yLines['TIMER_AV Seg']=lines[0]
# AM_AV
lines = pltLDSErgVecHelper(ax4,dfSegReprVec,'AM_AV',attrsDctLDS['Seg_AM_AV_Attrs'],fct=plotTVAmFct)
yLines['AM_AV Seg']=lines[0]
if not dfDruckReprVec.empty:
# TIMER_AV
lines = pltLDSErgVecHelper(ax4,dfDruckReprVec,'TIMER_AV',attrsDctLDS['Druck_TIMER_AV_Attrs'],fct=plotTVTimerFct)
yLines['TIMER_AV Drk']=lines[0]
# AM_AV
lines = pltLDSErgVecHelper(ax4,dfDruckReprVec,'AM_AV',attrsDctLDS['Druck_AM_AV_Attrs'],fct=plotTVAmFct)
yLines['AM_AV Drk']=lines[0]
if not dfSegReprVec.empty or not dfDruckReprVec.empty:
ax4.set_ylim(ylimTV)
ax4.set_yticks(yticksTV)
ax4.set_ylabel(plotTVAmLabel)
ax4.grid()
# 5. y-Achse DPDT ----------------------------------------
if plotDPDT and (not dfSegReprVec.empty or not dfDruckReprVec.empty):
# Min. ermitteln
DPDT_REF_MinSEG=0
if not dfSegReprVec.empty:
if 'DPDT_REF' in dfSegReprVec.columns.to_list():
s=dfSegReprVec.loc[xlim[0]:xlim[1],'DPDT_REF'].dropna()
if not s.empty:
DPDT_REF_MinSEG=s.min()
DPDT_REF_MinDruck=0
if not dfDruckReprVec.empty:
if 'DPDT_REF' in dfDruckReprVec.columns.to_list():
s=dfDruckReprVec.loc[xlim[0]:xlim[1],'DPDT_REF'].dropna()
if not s.empty:
DPDT_REF_MinDruck=s.min()
DPDT_REF_Min=min(DPDT_REF_MinSEG,DPDT_REF_MinDruck)
if DPDT_REF_Min >= 0:
pass # es gibt nichts zu plotten
else:
# Max ermitteln
maxSeg=DPDT_REF_Min
if not dfSegReprVec.empty:
if 'DPDT_REF' in dfSegReprVec.columns.to_list():
s=dfSegReprVec.loc[xlim[0]:xlim[1],'DPDT_REF'].dropna()
s=s[s<0]
if not s.empty:
DPDT_REF_MinSEG=s.max()
maxDruck=DPDT_REF_Min
if not dfDruckReprVec.empty:
if 'DPDT_REF' in dfDruckReprVec.columns.to_list():
s=dfDruckReprVec.loc[xlim[0]:xlim[1],'DPDT_REF'].dropna()
s=s[s<0]
if not s.empty:
DPDT_REF_MinDruck=s.max()
DPDT_REF_Max=max(maxSeg,maxDruck)
# 5. y-Achse DPDT ----------------------------------------
ax5 = ax.twinx()
axes['DPDT']=ax5
yPos=yTwinedAxesPosDeltaHPStart+2*yTwinedAxesPosDeltaHP
if plotAC:
yPos=yPos+yTwinedAxesPosDeltaHP
if plotTV:
yPos=yPos+yTwinedAxesPosDeltaHP
pltHelperX(
ax5
,dateFormat=dateFormat
,bysecond=bysecond
,byminute=byminute
,byhour=byhour
,yPos=yPos
)
pltLDSHelperY(ax5)
if not dfSegReprVec.empty:
if 'DPDT_REF' in dfSegReprVec.columns.to_list():
lines = pltLDSErgVecHelper(ax5,dfSegReprVec,'DPDT_REF',attrsDctLDS['Seg_DPDT_REF_Attrs'],fct=None)
yLines['DPDT_REF Seg']=lines[0]
df=dfSegReprVec.loc[xlim[0]:xlim[1],:].query('DPDT_AV < 0')
#logger.debug("{0:s}df (neg. DPDT): {1:s}".format(logStr,df.to_string()))
lines = pltLDSErgVecHelper(ax5,df,'DPDT_AV',attrsDctLDS['Seg_DPDT_AV_Attrs'],fct=None)
yLines['DPDT_AV Seg']=lines[0]
if not dfDruckReprVec.empty:
if 'DPDT_REF' in dfDruckReprVec.columns.to_list():
lines = pltLDSErgVecHelper(ax5,dfDruckReprVec,'DPDT_REF',attrsDctLDS['Druck_DPDT_REF_Attrs'],fct=None)
yLines['DPDT_REF Drk']=lines[0]
df=dfDruckReprVec.loc[xlim[0]:xlim[1],:].query('DPDT_AV < 0')
#logger.debug("{0:s}df (neg. DPDT): {1:s}".format(logStr,df.to_string()))
lines = pltLDSErgVecHelper(ax5,df,'DPDT_AV',attrsDctLDS['Druck_DPDT_AV_Attrs'],fct=None)
yLines['DPDT_AV Drk']=lines[0]
yTickList=[DPDT_REF_Min*10
,DPDT_REF_Min/0.9 # bei einem Vorhaltemass von 0.9 steht hier x; 0.9 X x kann man an den anderen beiden Ticks ablesen
#,DPDT_REF_Min
,0
,DPDT_REF_Min*-1
]
ax5.set_ylim(yTickList[0],yTickList[-1])
ax5.set_yticks([round(yTick,2) for yTick in yTickList])
if DPDT_REF_Max > DPDT_REF_Min:
ax5.set_ylabel("bar/Minute (max. Wert: {:6.3f})".format(DPDT_REF_Max))
else:
ax5.set_ylabel('bar/Minute')
ax5.grid()
if plotLegend:
legendHorizontalPos='center'
if not dfSegReprVec.empty:
if dfDruckReprVec.empty:
loc=legendLoc # Vorgabe
else:
loc='upper '+legendHorizontalPos # beide: fix
patternSeg='Seg$'
axes['A'].add_artist(axes['A'].legend(
tuple([yLines[line] for line in yLines if re.search(patternSeg,line) != None])
,tuple([line for line in yLines if re.search(patternSeg,line) != None])
,loc=loc #'upper '+legendHorizontalPos
,framealpha=legendFramealpha
,facecolor=legendFacecolor
))
if not dfDruckReprVec.empty:
if dfSegReprVec.empty:
loc=legendLoc # Vorgabe
else:
loc='lower '+legendHorizontalPos # beide: fix
patternDruck='Drk$'
axes['A'].add_artist(axes['A'].legend(
tuple([yLines[line] for line in yLines if re.search(patternDruck,line) != None])
,tuple([line for line in yLines if re.search(patternDruck,line) != None])
,loc=loc #'lower '+legendHorizontalPos
,framealpha=legendFramealpha
,facecolor=legendFacecolor
))
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return axes,yLines
def pltHelperX(
ax
,dateFormat='%d.%m.%y: %H:%M:%S'
,bysecond=None # [0,15,30,45]
,byminute=None
,byhour=None
,yPos=-0.0125 #: (i.d.R. negativer) Abstand der y-Achse von der Zeichenfläche; default: -0.0125
):
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
logger.debug("{0:s}bysecond: {1:s}".format(logStr,str(bysecond)))
logger.debug("{0:s}byminute: {1:s}".format(logStr,str(byminute)))
logger.debug("{0:s}byhour: {1:s}".format(logStr,str(byhour)))
logger.debug("{0:s}dateFormat: {1:s}".format(logStr,dateFormat))
if bysecond != None:
majLocatorTmp=mdates.SecondLocator(bysecond=bysecond)
elif byminute != None:
majLocatorTmp=mdates.MinuteLocator(byminute=byminute)
elif byhour != None:
majLocatorTmp=mdates.HourLocator(byhour=byhour)
else:
majLocatorTmp=mdates.HourLocator(byhour=[0,12])
majFormatterTmp=mdates.DateFormatter(dateFormat)
logger.debug("{0:s}ax.xaxis.set_major_locator ...".format(logStr))
ax.xaxis.set_major_locator(majLocatorTmp)
logger.debug("{0:s}ax.xaxis.set_major_formatter ...".format(logStr))
ax.xaxis.set_major_formatter(majFormatterTmp)
#logger.debug("{0:s}ax.get_xticks(): {1:s}".format(logStr,str(ax.get_xticks())))
logger.debug("{0:s}setp(ax.xaxis.get_majorticklabels() ...".format(logStr))
dummy=plt.setp(ax.xaxis.get_majorticklabels(),rotation='vertical',ha='center')
ax.spines["left"].set_position(("axes",yPos))
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
def pltLDSHelperY(
ax
):
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
#logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
pltMakePatchSpinesInvisible(ax)
ax.spines['left'].set_visible(True)
ax.yaxis.set_label_position('left')
ax.yaxis.set_ticks_position('left')
#logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
def pltLDSErgVecHelperYLimAndTicks(
dfReprVec
,dfReprVecCol
,ylim=None #(-10,10) # wenn undef., dann min/max dfReprVec
,yticks=None #[-10,0,10] # wenn undef., dann aus dem Ergebnis von ylim
,ylimxlim=False #wenn Wahr und ylim undef., dann wird nachf. xlim beruecksichtigt bei min/max dfReprVec
,xlim=None
,ySpanMin=0.1 # wenn ylim undef. vermeidet dieses Maß eine y-Achse mit einer zu kleinen Differenz zwischen min/max
):
"""
Returns: ylim,yticks
Der y-Werte-Bereich ylim wird zur x-Achse symmetrisch ermittelt.
yticks spielt dabei keine Rolle.
Sind ylim bzw. yticks definiert, erfahren sie keine Änderung.
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
if ylim != None:
# der y-Wertebereich ist explizit definiert
pass
else:
if not dfReprVec.empty and not dfReprVec.loc[:,dfReprVecCol].isnull().all().all():
if not ylimxlim:
ylimmin=dfReprVec.loc[:,dfReprVecCol].min()
ylimmax=dfReprVec.loc[:,dfReprVecCol].max()
else:
(xlimMin,xlimMax)=xlim
if not dfReprVec.loc[xlimMin:xlimMax,dfReprVecCol].isnull().all().all():
ylimmin=dfReprVec.loc[xlimMin:xlimMax,dfReprVecCol].min()
ylimmax=dfReprVec.loc[xlimMin:xlimMax,dfReprVecCol].max()
else:
ylimmin=0
ylimmax=0
ylimminR=round(ylimmin,0)
ylimmaxR=round(ylimmax,0)
if ylimminR > ylimmin:
ylimminR=ylimminR-1
if ylimmaxR < ylimmax:
ylimmaxR=ylimmaxR+1
ylimminAbsR=math.fabs(ylimminR)
# B auf den extremaleren Wert
ylimB=max(ylimminAbsR,ylimmaxR)
if ylimB < ySpanMin:
# B auf Mindestwert
ylimB=ySpanMin
## Differenz < Mindestwert: B+
#if math.fabs(ylimmax-ylimmin) < ySpanMin:
# ylimB=.5*(ylimminAbs+ylimmax)+ySpanMin
ylim=(-ylimB,ylimB)
else:
ylim=(-ySpanMin,ySpanMin)
if yticks != None:
# die y-Ticks sind explizit definiert
pass
else:
# aus Wertebereich
(ylimMin,ylimMax)=ylim
yticks=[ylimMin,0,ylimMax]
#logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return ylim,yticks
def pltLDSpQHelperYLimAndTicks(
dfReprVec
,dfReprVecCols
,ylim=None # wenn undef., dann min/max dfReprVec
,yticks=None # wenn undef., dann aus ylimR
,ylimxlim=False # wenn Wahr und ylim undef., dann wird nachf. xlim beruecksichtigt bei min/max dfReprVec
,xlim=None # x-Wertebereich
,ySpanMin=0.1 # wenn ylim undef. vermeidet dieses Maß eine y-Achse mit einer zu kleinen Differenz zwischen min/max
,yGridSteps=yGridStepsD
):
"""
Returns: ylim,yticks
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
if ylim != None:
# der y-Wertebereich ist explizit definiert
pass
else:
df=dfReprVec.loc[:,[col for col in dfReprVecCols]]
if not ylimxlim:
# Extremalwerte Analysebereich
ylimmin=df.min().min()
ylimmax=df.max().max()
else:
if xlim == None:
logger.error("{0:s} xlim muss angegeben sein wenn ylimxlim Wahr gesetzt wird. Weiter mit ylimxlim Falsch.".format(logStr))
ylimmin=df.min().min()
ylimmax=df.max().max()
else:
# Extremalwerte x-Wertebereich
(xlimMin,xlimMax)=xlim
# Extremalwerte Analysebereich
ylimmin=df.loc[xlimMin:xlimMax,:].min().min()
ylimmax=df.loc[xlimMin:xlimMax,:].max().max()
logger.debug("{0:s} ylimmin={1:10.2f} ylimmax={2:10.2f}.".format(logStr,ylimmin,ylimmax))
if math.fabs(ylimmax-ylimmin) < ySpanMin:
ylimmax=ylimmin+ySpanMin
logger.debug("{0:s} ylimmin={1:10.2f} ylimmax={2:10.2f}.".format(logStr,ylimmin,ylimmax))
ylimMinR=round(ylimmin,0)
ylimMaxR=round(ylimmax,0)
if ylimMinR>ylimmin:
ylimMinR=ylimMinR-1
if ylimMaxR<ylimmax:
ylimMaxR=ylimMaxR+1
logger.debug("{0:s} ylimMinR={1:10.2f} ylimMaxR={2:10.2f}.".format(logStr,ylimMinR,ylimMaxR))
ylim=(ylimMinR,ylimMaxR)
if yticks != None:
# die y-Ticks sind explizit definiert
pass
else:
# aus Wertebereich
(ylimMin,ylimMax)=ylim
if yGridSteps==0:
yticks=[ylimMin,ylimMax]
else:
dYGrid=(ylimMax-ylimMin)/yGridSteps
y=np.arange(ylimMin,ylimMax,dYGrid)
if y[-1]<ylimMax:
y=np.append(y,y[-1]+dYGrid)
yticks=y
logger.debug("{0:s} yticks={1:s}.".format(logStr,str(yticks)))
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return ylim,yticks
def pltLDSErgVecHelper(
ax
,dfReprVec=pd.DataFrame()
,ID='AL_S' # Spaltenname in dfReprVec
,attrs={}
,fct=None # Function
):
"""
Helper
Returns:
lines: ax.plot-Ergebnis
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
lines=[]
label=ID
x=dfReprVec.index.values
if ID in dfReprVec.columns.to_list():
if fct==None:
y=dfReprVec[ID].values
else:
y=dfReprVec[ID].apply(fct).values
if 'where' in attrs.keys():
logger.debug("{0:s}ID: {1:s}: step-Plot".format(logStr,ID))
lines = ax.step(x,y,label=label
,where=attrs['where'])
else:
lines = ax.plot(x,y,label=label
)
for prop,propValue in [(prop,value) for (prop, value) in attrs.items() if prop not in ['where']]:
plt.setp(lines[0],"{:s}".format(prop),propValue)
else:
logger.warning("{0:s}Spalte: {1:s}: nicht vorhanden?!".format(logStr,ID))
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return lines
def pltLDSpQHelper(
ax
,TCdf=pd.DataFrame()
,ID='' # Spaltenname
,xDctValue={} # a Dct - i.e. {'IDPlt':'Q Src','RTTM':'IMDI.Objects.FBG_MESSW.6_KED_39_FT_01.In.MW.value'}
,xDctAttrs={} # a Dct with - i.e. {'Q Src':{'color':'red'},...}
,IDPltKey='IDPlt' # Schluesselbezeichner in xDctValue (Key in xDctAttrs und xDctFcts)
,IDPltValuePostfix=None # SchluesselPostfix in xDctAttrs und xDctFcts - i.e. ' RTTM'
,xDctFcts={} # a Dct with Fcts - i.e. {'p Src': lambda x: 134.969 + x*10^5/(794.*9.81)}
,timeShift=pd.Timedelta('0 seconds')
):
"""
Helper
Returns:
label: Bezeichner
lines: ax.plot-Ergebnis
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
logger.debug("{:s}Echo der Parameter: xDctFcts: {!s:s}".format(logStr,xDctFcts))
label=''
lines=[]
# nur Not Null plotten
s=TCdf[ID][TCdf[ID].notnull()]
logger.debug("{0:s}timeShift: {1:s}".format(logStr,str(timeShift)))
x=s.index.values+timeShift #TCdf.index.values+timeShift
IDPltValue=None
if IDPltKey in xDctValue.keys():
# es liegt ein Schluessel fuer eine Layout-Informationen vor
IDPltValue=xDctValue[IDPltKey] # koennte auch None sein ... {'IDPlt':None}
if IDPltValue != None and IDPltValuePostfix != None:
IDPltValue=IDPltValue+IDPltValuePostfix
if IDPltValue in xDctFcts.keys():
logger.debug("{:s}Fcts fuer: {:s}".format(logStr,IDPltValue))
fct=xDctFcts[IDPltValue]
y=s.apply(fct).values#TCdf[ID].apply(fct).values
else:
y=s.values #TCdf[ID].values
if IDPltValue != None:
label=IDPltValue+' '+ID
if IDPltValue in xDctAttrs.keys():
if 'where' in xDctAttrs[IDPltValue].keys():
logger.debug("{0:s}ID: {1:s}: step-Plot".format(logStr,ID))
lines = ax.step(x,y
,label=label
,where=xDctAttrs[IDPltValue]['where'])
else:
lines = ax.plot(x,y
,label=label
)
for prop,propValue in [(prop,value) for (prop, value) in xDctAttrs[IDPltValue].items() if prop not in ['where']]:
plt.setp(lines[0],"{:s}".format(prop),propValue)
else:
# es ist kein Layout definiert - einfach plotten
logger.debug("{0:s}IDPltValue: {1:s}: es ist kein Layout definiert - einfach plotten ...".format(logStr,IDPltValue))
lines = ax.plot(x,y
,label=label
)
else:
# es liegt kein Schluessel (oder Wert None) fuer eine Layout-Informationen vor - einfach plotten
label=ID
logger.debug("{0:s}ID: {1:s}: es liegt kein Schluessel (oder kein Wert) fuer eine Layout-Informationen vor - einfach plotten ...".format(logStr,ID))
lines = ax.plot(x,y)
logger.debug("{0:s}label: '{1:s}' len(lines): {2:d}".format(logStr,label,len(lines)))
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return label, lines
def pltLDSSIDHelper(
ax
,dfTCsSIDEvents
,dfTCsScenTimeShift
,dfTCsSIDEventsyOffset # die y-Werte werden ab dem 1. Schieber um je dfTCsSIDEventsyOffset erhöht (damit zeitgleiche Events besser sichtbar werden); max. Erhöhung: 0.9
,pSIDEvents
,valRegExMiddleCmds
,eventCCmds
,eventCStats
,markerDef
,baseColorsDef
):
"""
Helper
Returns:
labels: Bezeichner
scatters: ax.scatter-Ergebnisse
eventCCmds={ 'Out.AUF':0
,'Out.ZU':1
,'Out.HALT':2}
eventCStats={'In.LAEUFT':3
,'In.LAEUFT_NICHT':4
,'In.ZUST':5
,'Out.AUF':6
,'Out.ZU':7
,'Out.HALT':8
,'In.STOER':9}
markerDefSchieber=[ # Schiebersymobole
'^' # 0 Auf
,'v' # 1 Zu
,'>' # 2 Halt
# ab hier Zustaende
,'4' # 3 Laeuft
,'3' # 4 Laeuft nicht
,'P' # 5 Zust (gefülltes "dickes" Kreuz)
,'1' # 6 Auf
,'2' # 7 Zu
,'+' # 8 Halt
,'x' # 9 Stoer
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
labels=[]
scatters=[]
# Anzahl der verschiedenen Schieber ermitteln
idxKat={}
idxSchieberLfd=0
for col in dfTCsSIDEvents.columns:
m=re.search(pSIDEvents,col)
valRegExSchieberID=m.group('colRegExSchieberID')
if valRegExSchieberID not in idxKat.keys():
idxKat[valRegExSchieberID]=idxSchieberLfd
idxSchieberLfd=idxSchieberLfd+1
logger.debug("{0:s}Dct idxKat: keys (versch. Schieber - meint versch. Kategorien): {1:s}: values (Index der jeweiligen Kategorie): {2:s}".format(logStr,str(idxKat.keys()),str(idxKat.values())))
dfTCsSIDEventsPlot = dfTCsSIDEvents # hier keine Veränderungen mehr
for col in dfTCsSIDEventsPlot.columns:
m=re.search(pSIDEvents,col)
valRegExSchieberID=m.group('colRegExSchieberID')
idxSchieberLfd=idxKat[valRegExSchieberID]
valRegExEventID=m.group('colRegExEventID')
valRegExMiddle=m.group('colRegExMiddle')
# Markersize
s=plt.rcParams['lines.markersize']**2
# Marker
if valRegExMiddle == valRegExMiddleCmds:
idxMarker=eventCCmds[valRegExEventID]
else:
idxMarker=eventCStats[valRegExEventID]
if valRegExEventID in ['In.ZUST']:
s=s*2.5
if idxMarker < len(markerDef):
m=markerDef[idxMarker]
else:
m=markerDef[-1]
logger.debug("{0:s}{1:s}: idxMarker: Soll: {2:d} MarkerIdx gewählt: {3:d}".format(logStr,col,idxMarker,len(markerDef)-1))
if idxSchieberLfd < len(baseColorsDef):
c=baseColorsDef[idxSchieberLfd]
else:
c=baseColorsDef[-1]
logger.debug("{0:s}{1:s}: idxSchieberLfd: Ist: {2:d} FarbenIdx gewählt: {3:d}".format(logStr,col,idxSchieberLfd,len(baseColorsDef)-1))
colors=[c for idx in range(len(dfTCsSIDEventsPlot.index))] # alle Ereignisse (der Spalte) haben dieselbe Farbe
label=col # alle Ereignisse (der Spalte) haben dasselbe Label
#sDefault=plt.rcParams['lines.markersize']**2
x=dfTCsSIDEventsPlot.index.values+dfTCsScenTimeShift
y=dfTCsSIDEventsPlot[col].values+min(idxSchieberLfd*dfTCsSIDEventsyOffset,.9)
logger.debug("{:s}{:s}: erste und letzte Werte: x:{!s:s} y:{!s:s}...".format(logStr,col,x[::len(x)-1],y[::len(y)-1]))
scatter = ax.scatter(
x
,y
,c=colors
,marker=m
,label=label
,s=s#Default
)
# scatter ist eine PathCollection; Attribut u.a. get_label(): Return the label used for this artist in the legend
# auch wenn y durchgehend Null wird ein scatter zurueckgegeben (d.h. ist ein Legendeneintrag vorhanden)
labels.append(label)
scatters.append(scatter)
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return labels, scatters
# --- PLOT: Funktionen und Hilfsfunktionen
# -----------------------------------------------
def pltMakeCategoricalCmap(baseColorsDef="tab10",catagoryColors=None,nOfSubCatsReq=3,reversedSubCatOrder=False):
"""
Returns a cmap with nOfCatsReq * nOfSubCatsReq discrete colors.
Parameter:
baseColorsDef: a (discrete) cmap defining the "base"colors
default: tab10
if baseColorsDef is not via get_cmap a matplotlib.colors.ListedColormap, baseColorsDef is interpreted via to_rgb as a list of colors
in this case catagoryColors is ignored
catagoryColors: a list of "base"colors indices for this cmap
the length of the list is the number of Categories requested: nOfCatsReq
apparently cmap's nOfColors must be ge than nOfCatsReq
default: None (==> nOfCatsReq = cmap's nOfColors)
i.e. [2,8,3] for tab10 is green, yellow (ocher), red
nOfSubCatsReq: number of Subcategories requested
reversedSubCatOrder: False (default): if True, the last color of a category is from baseColorsDef
reversedSubCatOrder can be a list
Returns:
cmap with nOfCatsReq * nOfSubCatsReq discrete colors; None if an error occurs
one "base"color per category
nOfSubCatsReq "sub"colors per category
so each category consists of nOfSubCatsReq colors
Raises:
RmError
>>> import matplotlib
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> import Rm
>>> Rm.pltMakeCategoricalCmap().N
30
>>> Rm.pltMakeCategoricalCmap(catagoryColors=[2,8,3]).N # 2 8 3 in tab10: grün gelb rot
9
>>> baseColorsDef="tab10"
>>> catagoryColors=[2,8,3]
>>> nOfSubCatsReq=4
>>> # grün gelb rot mit je 4 Farben von hell nach dunkel
>>> cm=Rm.pltMakeCategoricalCmap(baseColorsDef=baseColorsDef,catagoryColors=catagoryColors,nOfSubCatsReq=nOfSubCatsReq,reversedSubCatOrder=True)
>>> cm.colors
array([[0.75 , 1. , 0.75 ],
[0.51819172, 0.87581699, 0.51819172],
[0.32570806, 0.75163399, 0.32570806],
[0.17254902, 0.62745098, 0.17254902],
[0.9983871 , 1. , 0.75 ],
[0.91113148, 0.91372549, 0.51165404],
[0.82408742, 0.82745098, 0.30609849],
[0.7372549 , 0.74117647, 0.13333333],
[1. , 0.75 , 0.75142857],
[0.94640523, 0.53069452, 0.53307001],
[0.89281046, 0.33167491, 0.3348814 ],
[0.83921569, 0.15294118, 0.15686275]])
>>> cm2=Rm.pltMakeCategoricalCmap(baseColorsDef=baseColorsDef,catagoryColors=catagoryColors,nOfSubCatsReq=nOfSubCatsReq,reversedSubCatOrder=[False]+2*[True])
>>> cm.colors[nOfSubCatsReq-1]==cm2.colors[0]
array([ True, True, True])
>>> plt.close()
>>> size_DINA6quer=(5.8,4.1)
>>> fig, ax = plt.subplots(figsize=size_DINA6quer)
>>> fig.subplots_adjust(bottom=0.5)
>>> norm=matplotlib.colors.Normalize(vmin=0, vmax=100)
>>> cb=matplotlib.colorbar.ColorbarBase(ax, cmap=cm2,norm=norm,orientation='horizontal')
>>> cb.set_label('baseColorsDef was (via get_cmap) a matplotlib.colors.ListedColormap')
>>> #plt.show()
>>> cm3=Rm.pltMakeCategoricalCmap(baseColorsDef=['b','c','m'],nOfSubCatsReq=nOfSubCatsReq,reversedSubCatOrder=True)
>>> cm3.colors
array([[0.75 , 0.75 , 1. ],
[0.5 , 0.5 , 1. ],
[0.25 , 0.25 , 1. ],
[0. , 0. , 1. ],
[0.75 , 1. , 1. ],
[0.45833333, 0.91666667, 0.91666667],
[0.20833333, 0.83333333, 0.83333333],
[0. , 0.75 , 0.75 ],
[1. , 0.75 , 1. ],
[0.91666667, 0.45833333, 0.91666667],
[0.83333333, 0.20833333, 0.83333333],
[0.75 , 0. , 0.75 ]])
>>> plt.close()
>>> fig, ax = plt.subplots(figsize=size_DINA6quer)
>>> fig.subplots_adjust(bottom=0.5)
>>> norm=matplotlib.colors.Normalize(vmin=0, vmax=100)
>>> cb=matplotlib.colorbar.ColorbarBase(ax, cmap=cm3,norm=norm,orientation='horizontal')
>>> cb.set_label('baseColorsDef was (via to_rgb) a list of colors')
>>> #plt.show()
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
cmap=None
try:
try:
# Farben, "base"colors, welche die cmap hat
nOfColors=plt.get_cmap(baseColorsDef).N
if catagoryColors==None:
catagoryColors=np.arange(nOfColors,dtype=int)
# verlangte Kategorien
nOfCatsReq=len(catagoryColors)
if nOfCatsReq > nOfColors:
logStrFinal="{0:s}: nOfCatsReq: {1:d} > cmap's nOfColors: {2:d}!".format(logStr,nOfCatsReq,nOfColors)
raise RmError(logStrFinal)
if max(catagoryColors) > nOfColors-1:
logStrFinal="{0:s}: max. Idx of catsReq: {1:d} > cmap's nOfColors-1: {2:d}!".format(logStr,max(catagoryColors),nOfColors-1)
raise RmError(logStrFinal)
# alle Farben holen, welche die cmap hat
ccolors = plt.get_cmap(baseColorsDef)(np.arange(nOfColors,dtype=int))
# die gewuenschten Kategorie"Basis"farben extrahieren
ccolors=[ccolors[idx] for idx in catagoryColors]
except:
listOfColors=baseColorsDef
nOfColors=len(listOfColors)
nOfCatsReq=nOfColors
ccolors=[]
for color in listOfColors:
ccolors.append(list(matplotlib.colors.to_rgb(color)))
finally:
pass
logger.debug("{0:s}ccolors: {1:s}".format(logStr,str(ccolors)))
logger.debug("{0:s}nOfCatsReq: {1:s}".format(logStr,str((nOfCatsReq))))
logger.debug("{0:s}nOfSubCatsReq: {1:s}".format(logStr,str((nOfSubCatsReq))))
# Farben bauen -------------------------------------
# resultierende Farben vorbelegen
cols = np.zeros((nOfCatsReq*nOfSubCatsReq, 3))
# ueber alle Kategoriefarben
if type(reversedSubCatOrder) is not list:
reversedSubCatOrderLst=nOfCatsReq*[reversedSubCatOrder]
else:
reversedSubCatOrderLst=reversedSubCatOrder
logger.debug("{0:s}reversedSubCatOrderLst: {1:s}".format(logStr,str((reversedSubCatOrderLst))))
for i, c in enumerate(ccolors):
rgb=pltMakeCategoricalColors(c,nOfSubColorsReq=nOfSubCatsReq,reversedOrder=reversedSubCatOrderLst[i])
cols[i*nOfSubCatsReq:(i+1)*nOfSubCatsReq,:] = rgb
cmap = matplotlib.colors.ListedColormap(cols)
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return cmap
def pltMakePatchSpinesInvisible(ax):
ax.set_frame_on(True)
ax.patch.set_visible(False)
for sp in ax.spines.values():
sp.set_visible(False)
def pltHlpAlignMarker(marker,halign='center',valign='middle'):
"""
create markers with specified alignment.
Parameters
----------
marker : a valid marker specification.
See mpl.markers
halign : string, float {'left', 'center', 'right'}
Specifies the horizontal alignment of the marker. *float* values
specify the alignment in units of the markersize/2 (0 is 'center',
-1 is 'right', 1 is 'left').
valign : string, float {'top', 'middle', 'bottom'}
Specifies the vertical alignment of the marker. *float* values
specify the alignment in units of the markersize/2 (0 is 'middle',
-1 is 'top', 1 is 'bottom').
Returns
-------
marker_array : numpy.ndarray
A Nx2 array that specifies the marker path relative to the
plot target point at (0, 0).
Notes
-----
The mark_array can be passed directly to ax.plot and ax.scatter, e.g.::
ax.plot(1, 1, marker=align_marker('>', 'left'))
"""
if isinstance(halign,str):
halign = {'right': -1.,
'middle': 0.,
'center': 0.,
'left': 1.,
}[halign]
if isinstance(valign,str):
valign = {'top': -1.,
'middle': 0.,
'center': 0.,
'bottom': 1.,
}[valign]
# Define the base marker
bm = markers.MarkerStyle(marker)
# Get the marker path and apply the marker transform to get the
# actual marker vertices (they should all be in a unit-square
# centered at (0, 0))
m_arr = bm.get_path().transformed(bm.get_transform()).vertices
# Shift the marker vertices for the specified alignment.
m_arr[:, 0] += halign / 2
m_arr[:, 1] += valign / 2
return Path(m_arr, bm.get_path().codes)
def pltNetFigAx(pDf,**kwds):
"""
Erzeugt eine für die Netzdarstellung verzerrungsfreie Axes-Instanz.
* verwendet gcf() (will return an existing figure if one is open, or it will make a new one if there is no active figure)
* an already existing figure might be created this way: fig=plt.figure(dpi=2*72,linewidth=1.)
* errechnet die verzerrungsfreie Darstellung unter Berücksichtigung einer zukünftigen horizontalen Farblegende
* erzeugt eine Axes-Instanz
* setzt Attribute der Axes-Instanz
* setzt Attribute der Figure-Instanz
Args:
pDf: dataFrame
Coordinates:
* pXCor_i: colName in pDf (default: 'pXCor_i'): x-Start Coordinate of all Edges to be plotted
* pYCor_i: colName in pDf (default: 'pYCor_i'): y-Start Coordinate of all Edges to be plotted
* pXCor_k: colName in pDf (default: 'pXCor_k'): x-End Coordinate of all Edges to be plotted
* pYCor_k: colName in pDf (default: 'pYCor_k'): y-End Coordinate of all Edges to be plotted
Colorlegend:
* CBFraction: fraction of original axes to use for colorbar (default: 0.05)
* CBHpad: fraction of original axes between colorbar and new image axes (default: 0.0275)
Figure:
* pltTitle: title [not suptitle] (default: 'pltNetFigAx')
* figFrameon: figure frame (background): displayed or invisible (default: True)
* figEdgecolor: edge color of the Figure rectangle (default: 'black')
* figFacecolor: face color of the Figure rectangle (default: 'white')
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
keys = sorted(kwds.keys())
# Coordinates
if 'pXCor_i' not in keys:
kwds['pXCor_i']='pXCor_i'
if 'pYCor_i' not in keys:
kwds['pYCor_i']='pYCor_i'
if 'pXCor_k' not in keys:
kwds['pXCor_k']='pXCor_k'
if 'pYCor_k' not in keys:
kwds['pYCor_k']='pYCor_k'
# Colorlegend
if 'CBFraction' not in keys:
kwds['CBFraction']=0.05
if 'CBHpad' not in keys:
kwds['CBHpad']=0.0275
# Figure
if 'pltTitle' not in keys:
kwds['pltTitle']='pltNetFigAx'
if 'figFrameon' not in keys:
kwds['figFrameon']=True
if 'figEdgecolor' not in keys:
kwds['figEdgecolor']='black'
if 'figFacecolor' not in keys:
kwds['figFacecolor']='white'
except:
pass
try:
dx=max(pDf[kwds['pXCor_i']].max(),pDf[kwds['pXCor_k']].max())
dy=max(pDf[kwds['pYCor_i']].max(),pDf[kwds['pYCor_k']].max())
# erf. Verhältnis bei verzerrungsfreier Darstellung
dydx=dy/dx
if(dydx>=1):
dxInch=DINA4_x # Hochformat
else:
dxInch=DINA4_y # Querformat
figwidth=dxInch
#verzerrungsfrei: Blattkoordinatenverhaeltnis = Weltkoordinatenverhaeltnis
factor=1-(kwds['CBFraction']+kwds['CBHpad'])
# verzerrungsfreie Darstellung sicherstellen
figheight=figwidth*dydx*factor
# Weltkoordinatenbereich
xlimLeft=0
ylimBottom=0
xlimRight=dx
ylimTop=dy
# plt.figure(dpi=, facecolor=, edgecolor=, linewidth=, frameon=True)
fig = plt.gcf() # This will return an existing figure if one is open, or it will make a new one if there is no active figure.
fig.set_figwidth(figwidth)
fig.set_figheight(figheight)
logger.debug("{:s}dx={:10.2f} dy={:10.2f}".format(logStr,dx,dy))
logger.debug("{:s}figwidth={:10.2f} figheight={:10.2f}".format(logStr,figwidth,figheight))
ax=plt.subplot()
ax.set_xlim(left=xlimLeft)
ax.set_ylim(bottom=ylimBottom)
ax.set_xlim(right=xlimRight)
ax.set_ylim(top=ylimTop)
xTicks=ax.get_xticks()
dxTick = xTicks[1]-xTicks[0]
yTicks=ax.set_yticks([idx*dxTick for idx in range(math.floor(dy/dxTick)+1)])
plt.title(kwds['pltTitle'])
fig.set_frameon(kwds['figFrameon'])
fig.set_edgecolor(kwds['figEdgecolor'])
fig.set_facecolor(kwds['figFacecolor'])
# https://stackoverflow.com/questions/14827650/pyplot-scatter-plot-marker-size
# Size in pts:
# the argument markersize in plot denotes the markersize (i.e. diameter) in points
# the argument s in scatter denotes the markersize**2 in points^2
# so a given plot-marker with markersize=x needs a scatter-marker with s=x**2 if the scatter-marker shall cover the same "area" in points^2
# the "area" of the scatter-marker is proportional to the s param
# What are points - pts:
# the standard size of points in matplotlib is 72 ppi
# 1 point is hence 1/72 inches (1 inch = 1 Zoll = 2.54 cm)
# 1 point = 0.352777.... mm
# points and pixels - px:
# 1 point = dpi/ppi
# the standard dpi in matplotlib is 100
# a scatter-marker whos "area" covers always 10 pixel:
# s=(10*ppi/dpi)**2
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
def pltNetNodes(pDf,**kwds):
"""
Scatters NODEs on gca().
Args:
pDf: dataFrame
NODE: Size (Attribute)
* pAttribute: colName (default: 'Attribute') in pDf
* pSizeFactor: (deafault: 1.)
* scatter Sy-Area in pts^2 = pSizeFactor * Attribute
NODE: Color (Measure)
* pMeasure: colName (default: 'Measure') in pDf
* pMeasureColorMap (default: plt.cm.autumn)
* pMeasureAlpha (default: 0.9)
* pMeasureClip (default: False)
* CBFixedLimits (default: True)
* CBFixedLimitLow (default: 0.)
* CBFixedLimitHigh (default: 1.)
NODE: 3Classes
* pMeasure3Classes (default: True)
* pMCategory: colName (default: 'MCategory') in pDf
* pMCatTopTxt (default: 'Top')
* pMCatMidTxt (default: 'Middle')
* pMCatBotTxt (default: 'Bottom')
* pMCatTopColor (default: 'palegreen')
* pMCatTopAlpha (default: 0.9)
* pMCatTopClip (default: False)
* pMCatMidColorMap (default: plt.cm.autumn)
* pMCatMidAlpha (default: 0.9)
* pMCatMidClip (default: False)
* pMCatBotColor (default: 'violet')
* pMCatBotAlpha (default: 0.9)
* pMCatBotClip (default: False)
NODE:
* pXCor: colName (default: 'pXCor_i') in pDf
* pYCor: colName (default: 'pYCor_i') in pDf
Returns:
(pcN, vmin, vmax)
* pcN: die mit Farbskala gezeichneten Symbole
* vmin/vmax: die für die Farbskala verwendeten Extremalwerte
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
keys = sorted(kwds.keys())
# NODE: Size (Attribute)
if 'pAttribute' not in keys:
kwds['pAttribute']='Attribute'
if 'pSizeFactor' not in keys:
kwds['pSizeFactor']=1.
# NODE: Color (Measure)
if 'pMeasure' not in keys:
kwds['pMeasure']='Measure'
if 'pMeasureColorMap' not in keys:
kwds['pMeasureColorMap']=plt.cm.autumn
if 'pMeasureAlpha' not in keys:
kwds['pMeasureAlpha']=0.9
if 'pMeasureClip' not in keys:
kwds['pMeasureClip']=False
if 'CBFixedLimits' not in keys:
kwds['CBFixedLimits']=True
if 'CBFixedLimitLow' not in keys:
kwds['CBFixedLimitLow']=0.
if 'CBFixedLimitHigh' not in keys:
kwds['CBFixedLimitHigh']=1.
# NODE: 3Classes
if 'pMeasure3Classes' not in keys:
kwds['pMeasure3Classes']=True
if 'pMCategory' not in keys:
kwds['pMCategory']='MCategory'
if 'pMCatTopTxt' not in keys:
kwds['pMCatTopTxt']='Top'
if 'pMCatMidTxt' not in keys:
kwds['pMCatMidTxt']='Middle'
if 'pMCatBotTxt' not in keys:
kwds['pMCatBotTxt']='Bottom'
if 'pMCatTopColor' not in keys:
kwds['pMCatTopColor']='palegreen'
if 'pMCatTopAlpha' not in keys:
kwds['pMCatTopAlpha']=0.9
if 'pMCatTopClip' not in keys:
kwds['pMCatTopClip']=False
if 'pMCatMidColorMap' not in keys:
kwds['pMCatMidColorMap']=plt.cm.autumn
if 'pMCatMidAlpha' not in keys:
kwds['pMCatMidAlpha']=0.9
if 'pMCatMidClip' not in keys:
kwds['pMCatMidClip']=False
if 'pMCatBotColor' not in keys:
kwds['pMCatBotColor']='violet'
if 'pMCatBotAlpha' not in keys:
kwds['pMCatBotAlpha']=0.9
if 'pMCatBotClip' not in keys:
kwds['pMCatBotClip']=False
# NODE:
if 'pXCor' not in keys:
kwds['pXCor']='pXCor_i'
if 'pYCor' not in keys:
kwds['pYCor']='pYCor_i'
except:
pass
try:
ax=plt.gca()
if kwds['pMeasure3Classes']:
pN_top=pDf[(pDf[kwds['pMCategory']]==kwds['pMCatTopTxt'])]
pN_mid=pDf[(pDf[kwds['pMCategory']]==kwds['pMCatMidTxt'])]
pN_bot=pDf[(pDf[kwds['pMCategory']]==kwds['pMCatBotTxt'])]
pN_top_Anz,col=pN_top.shape
pN_mid_Anz,col=pN_mid.shape
pN_bot_Anz,col=pN_bot.shape
pcN_top=ax.scatter(
pN_top[kwds['pXCor']],pN_top[kwds['pYCor']]
,s=kwds['pSizeFactor']*pN_top[kwds['pAttribute']]
,color=kwds['pMCatTopColor']
,alpha=kwds['pMCatTopAlpha']
,edgecolors='face'
,clip_on=kwds['pMCatTopClip'])
logger.debug("{:s}Anzahl mit fester Farbe Top gezeichneter Symbole={:d}".format(logStr,pN_top_Anz))
if not kwds['CBFixedLimits']:
vmin=pN_mid[kwds['pMeasure']].min()
vmax=pN_mid[kwds['pMeasure']].max()
else:
vmin=kwds['CBFixedLimitLow']
vmax=kwds['CBFixedLimitHigh']
pcN=ax.scatter(
pN_mid[kwds['pXCor']],pN_mid[kwds['pYCor']]
,s=kwds['pSizeFactor']*pN_mid[kwds['pAttribute']]
# Farbskala
,cmap=kwds['pMCatMidColorMap']
# Normierung Farbe
,vmin=vmin
,vmax=vmax
# Farbwert
,c=pN_mid[kwds['pMeasure']]
,alpha=kwds['pMCatMidAlpha']
,edgecolors='face'
,clip_on=kwds['pMCatMidClip']
)
logger.debug("{:s}Anzahl mit Farbskala gezeichneter Symbole={:d}".format(logStr,pN_mid_Anz))
pcN_bot=ax.scatter(
pN_bot[kwds['pXCor']],pN_bot[kwds['pYCor']]
,s=kwds['pSizeFactor']*pN_bot[kwds['pAttribute']]
,color=kwds['pMCatBotColor']
,alpha=kwds['pMCatBotAlpha']
,edgecolors='face'
,clip_on=kwds['pMCatBotClip'])
logger.debug("{:s}Anzahl mit fester Farbe Bot gezeichneter Symbole={:d}".format(logStr,pN_bot_Anz))
else:
pN_Anz,col=pDf.shape
if not kwds['CBFixedLimits']:
vmin=pDf[kwds['pMeasure']].min()
vmax=pDf[kwds['pMeasure']].max()
else:
vmin=kwds['CBFixedLimitLow']
vmax=kwds['CBFixedLimitHigh']
pcN=ax.scatter(
pDf[kwds['pXCor']],pDf[kwds['pYCor']]
,s=kwds['pSizeFactor']*pDf[kwds['pAttribute']]
# Farbskala
,cmap=kwds['pMeasureColorMap']
# Normierung Farbe
,vmin=vmin
,vmax=vmax
# Farbwert
,c=pDf[kwds['pMeasure']]
,alpha=kwds['pMeasureAlpha']
,edgecolors='face'
,clip_on=kwds['pMeasureClip']
)
logger.debug("{:s}Anzahl mit Farbskala gezeichneter Symbole={:d}".format(logStr,pN_Anz))
logger.debug("{:s}Farbskala vmin={:10.3f} Farbskala vmax={:10.3f}".format(logStr,vmin,vmax))
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return (pcN, vmin, vmax)
def pltNetPipes(pDf,**kwds):
"""
Plots Lines with Marker on gca().
Args:
pDf: dataFrame
PIPE-Line:
* pAttribute: column in pDf (default: 'Attribute')
* pAttributeLs (default: '-')
* pAttributeSizeFactor: plot linewidth in pts = pAttributeSizeFactor (default: 1.0) * Attribute
* pAttributeSizeMin (default: None): if set: use pAttributeSizeMin-Value as Attribute for LineSize if Attribute < pAttributeSizeMin
* pAttributeColorMap (default: plt.cm.binary)
* pAttributeColorMapUsageStart (default: 1./3; Wertebereich: [0,1])
* Farbskala nach vorh. min./max. Wert
* die Farbskala wird nur ab UsageStart genutzt
* d.h. Werte die eine "kleinere" Farbe hätten, bekommen die Farbe von UsageStart
PIPE-Marker:
* pMeasure: column in pDf (default: 'Measure')
* pMeasureMarker (default: '.')
* pMeasureSizeFactor: plot markersize in pts = pMeasureSizeFactor (default: 1.0) * Measure
* pMeasureSizeMin (default: None): if set: use pMeasureSizeMin-Value as Measure for MarkerSize if Measure < pMeasureSizeMin
* pMeasureColorMap (default: plt.cm.cool)
* pMeasureColorMapUsageStart (default: 0.; Wertebereich: [0,1])
* Farbskala nach vorh. min./max. Wert
* die Farbskala wird nur ab UsageStart genutzt
* d.h. Werte die eine "kleinere" Farbe hätten, bekommen die Farbe von UsageStart
PIPE:
* pWAYPXCors: column in pDf (default: 'pWAYPXCors')
* pWAYPYCors: column in pDf (default: 'pWAYPYCors')
* pClip (default: False)
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
keys = sorted(kwds.keys())
# PIPE-Line
if 'pAttribute' not in keys:
kwds['pAttribute']='Attribute'
if 'pAttributeSizeFactor' not in keys:
kwds['pAttributeSizeFactor']=1.
if 'pAttributeSizeMin' not in keys:
kwds['pAttributeSizeMin']=None
if 'pAttributeLs' not in keys:
kwds['pAttributeLs']='-'
if 'pAttributeColorMap' not in keys:
kwds['pAttributeColorMap']=plt.cm.binary
if 'pAttributeColorMapUsageStart' not in keys:
kwds['pAttributeColorMapUsageStart']=1./3.
# PIPE-Marker
if 'pMeasure' not in keys:
kwds['pMeasure']='Measure'
if 'pMeasureSizeFactor' not in keys:
kwds['pMeasureSizeFactor']=1.
if 'pMeasureSizeMin' not in keys:
kwds['pMeasureSizeMin']=None
if 'pMeasureMarker' not in keys:
kwds['pMeasureMarker']='.'
if 'pMeasureColorMap' not in keys:
kwds['pMeasureColorMap']=plt.cm.cool
if 'pMeasureColorMapUsageStart' not in keys:
kwds['pMeasureColorMapUsageStart']=0.
# PIPE
if 'pWAYPXCors' not in keys:
kwds['pWAYPXCors']='pWAYPXCors'
if 'pWAYPYCors' not in keys:
kwds['pWAYPYCors']='pWAYPYCors'
if 'pClip' not in keys:
kwds['pClip']=False
except:
pass
try:
# Line
minLine=pDf[kwds['pAttribute']].min()
maxLine=pDf[kwds['pAttribute']].max()
logger.debug("{:s}minLine (Attribute): {:6.2f}".format(logStr,minLine))
logger.debug("{:s}maxLine (Attribute): {:6.2f}".format(logStr,maxLine))
normLine=colors.Normalize(minLine,maxLine)
usageLineValue=minLine+kwds['pAttributeColorMapUsageStart']*(maxLine-minLine)
usageLineColor=kwds['pAttributeColorMap'](normLine(usageLineValue))
# Marker
minMarker=pDf[kwds['pMeasure']].min()
maxMarker=pDf[kwds['pMeasure']].max()
logger.debug("{:s}minMarker (Measure): {:6.2f}".format(logStr,minMarker))
logger.debug("{:s}maxMarker (Measure): {:6.2f}".format(logStr,maxMarker))
normMarker=colors.Normalize(minMarker,maxMarker)
usageMarkerValue=minMarker+kwds['pMeasureColorMapUsageStart']*(maxMarker-minMarker)
usageMarkerColor=kwds['pMeasureColorMap'](normMarker(usageMarkerValue))
ax=plt.gca()
for xs,ys,vLine,vMarker in zip(pDf[kwds['pWAYPXCors']],pDf[kwds['pWAYPYCors']],pDf[kwds['pAttribute']],pDf[kwds['pMeasure']]):
if vLine >= usageLineValue:
colorLine=kwds['pAttributeColorMap'](normLine(vLine))
else:
colorLine=usageLineColor
if vMarker >= usageMarkerValue:
colorMarker=kwds['pMeasureColorMap'](normMarker(vMarker))
else:
colorMarker=usageMarkerColor
linewidth=kwds['pAttributeSizeFactor']*vLine
if kwds['pAttributeSizeMin'] != None:
if vLine < kwds['pAttributeSizeMin']:
linewidth=kwds['pAttributeSizeFactor']*kwds['pAttributeSizeMin']
mSize=kwds['pMeasureSizeFactor']*vMarker
if kwds['pMeasureSizeMin'] != None:
if vMarker < kwds['pMeasureSizeMin']:
mSize=kwds['pMeasureSizeFactor']*kwds['pMeasureSizeMin']
pcLines=ax.plot(xs,ys
,color=colorLine
,linewidth=linewidth
,ls=kwds['pAttributeLs']
,marker=kwds['pMeasureMarker']
,mfc=colorMarker
,mec=colorMarker
,mfcalt=colorMarker
,mew=0
,ms=mSize #kwds['pMeasureSizeFactor']*vMarker
,markevery=[0,len(xs)-1]
,aa=True
,clip_on=kwds['pClip']
)
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
def pltNetLegendColorbar(pc,pDf,**kwds):
"""
Erzeugt eine Axes cax für den Legendenbereich aus ax (=gca()) und zeichnet auf cax die Farblegende (die Farbskala mit allen Eigenschaften).
Args:
pc: (eingefaerbte) PathCollection (aus pltNetNodes); wird für die Erzeugung der Farbskala zwingend benoetigt
pDf: dataFrame (default: None)
Measure:
* pMeasure: colName in pDf (default: 'Measure')
* pMeasureInPerc: Measure wird interpretiert in Prozent [0-1] (default: True)
* pMeasure3Classes (default: False d.h. Measure wird nicht in 3 Klassen dargestellt)
CBFixedLimits (Ticks):
* CBFixedLimits (default: False d.h. Farbskala nach vorh. min./max. Wert)
* CBFixedLimitLow (default: .10)
* CBFixedLimitHigh (default: .95)
Label:
* pMeasureUNIT (default: '[]')
* pMeasureTYPE (default: '')
CB
* CBFraction: fraction of original axes to use for colorbar (default: 0.05)
* CBHpad: fraction of original axes between colorbar and new image axes (default: 0.0275)
* CBLabelPad (default: -50)
* CBTicklabelsHPad (default: 0.)
* CBAspect: ratio of long to short dimension (default: 10.)
* CBShrink: fraction by which to shrink the colorbar (default: 0.3)
* CBAnchorHorizontal: horizontaler Fußpunkt der colorbar in Plot-% (default: 0.)
* CBAnchorVertical: vertikaler Fußpunkt der colorbar in Plot-% (default: 0.2)
Return:
cax
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
keys = sorted(kwds.keys())
# Measure
if 'pMeasure' not in keys:
kwds['pMeasure']='Measure'
if 'pMeasureInPerc' not in keys:
kwds['pMeasureInPerc']=True
if 'pMeasure3Classes' not in keys:
kwds['pMeasure3Classes']=False
# Label
if 'pMeasureUNIT' not in keys:
kwds['pMeasureUNIT']='[]'
if 'pMeasureTYPE' not in keys:
kwds['pMeasureTYPE']=''
# CBFixedLimits
if 'CBFixedLimits' not in keys:
kwds['CBFixedLimits']=False
if 'CBFixedLimitLow' not in keys:
kwds['CBFixedLimitLow']=.10
if 'CBFixedLimitHigh' not in keys:
kwds['CBFixedLimitHigh']=.95
# CB
if 'CBFraction' not in keys:
kwds['CBFraction']=0.05
if 'CBHpad' not in keys:
kwds['CBHpad']=0.0275
if 'CBLabelPad' not in keys:
kwds['CBLabelPad']=-50
if 'CBTicklabelsHPad' not in keys:
kwds['CBTicklabelsHPad']=0
if 'CBAspect' not in keys:
kwds['CBAspect']=10.
if 'CBShrink' not in keys:
kwds['CBShrink']=0.3
if 'CBAnchorHorizontal' not in keys:
kwds['CBAnchorHorizontal']=0.
if 'CBAnchorVertical' not in keys:
kwds['CBAnchorVertical']=0.2
except:
pass
try:
ax=plt.gca()
fig=plt.gcf()
# cax
cax=None
cax,kw=make_axes(ax
,location='right'
,fraction=kwds['CBFraction'] # fraction of original axes to use for colorbar
,pad=kwds['CBHpad'] # fraction of original axes between colorbar and new image axes
,anchor=(kwds['CBAnchorHorizontal'],kwds['CBAnchorVertical']) # the anchor point of the colorbar axes
,aspect=kwds['CBAspect'] # ratio of long to short dimension
,shrink=kwds['CBShrink'] # fraction by which to shrink the colorbar
)
# colorbar
colorBar=fig.colorbar(pc
,cax=cax
,**kw
)
# tick Values
if kwds['pMeasure3Classes']: # FixedLimits should be True and FixedLimitHigh/Low should be set ...
minCBtickValue=kwds['CBFixedLimitLow']
maxCBtickValue=kwds['CBFixedLimitHigh']
else:
if kwds['CBFixedLimits'] and isinstance(kwds['CBFixedLimitHigh'],float) and isinstance(kwds['CBFixedLimitLow'],float):
minCBtickValue=kwds['CBFixedLimitLow']
maxCBtickValue=kwds['CBFixedLimitHigh']
else:
minCBtickValue=pDf[kwds['pMeasure']].min()
maxCBtickValue=pDf[kwds['pMeasure']].max()
colorBar.set_ticks([minCBtickValue,minCBtickValue+.5*(maxCBtickValue-minCBtickValue),maxCBtickValue])
# tick Labels
if kwds['pMeasureInPerc']:
if kwds['pMeasure3Classes']:
minCBtickLabel=">{:3.0f}%".format(minCBtickValue*100)
maxCBtickLabel="<{:3.0f}%".format(maxCBtickValue*100)
else:
minCBtickLabel="{:6.2f}%".format(minCBtickValue*100)
maxCBtickLabel="{:6.2f}%".format(maxCBtickValue*100)
else:
if kwds['pMeasure3Classes']:
minCBtickLabel=">{:6.2f}".format(minCBtickValue)
maxCBtickLabel="<{:6.2f}".format(maxCBtickValue)
else:
minCBtickLabel="{:6.2f}".format(minCBtickValue)
maxCBtickLabel="{:6.2f}".format(maxCBtickValue)
logger.debug("{:s}minCBtickLabel={:s} maxCBtickLabel={:s}".format(logStr,minCBtickLabel,maxCBtickLabel))
colorBar.set_ticklabels([minCBtickLabel,'',maxCBtickLabel])
colorBar.ax.yaxis.set_tick_params(pad=kwds['CBTicklabelsHPad'])
# Label
if kwds['pMeasureInPerc']:
CBLabelText="{:s} in [%]".format(kwds['pMeasureTYPE'])
else:
CBLabelText="{:s} in {:s}".format(kwds['pMeasureTYPE'],kwds['pMeasureUNIT'])
colorBar.set_label(CBLabelText,labelpad=kwds['CBLabelPad'])
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return cax
def pltNetLegendColorbar3Classes(pDf,**kwds):
"""
Zeichnet auf gca() die ergaenzenden Legendeninformationen bei 3 Klassen.
* scatters the Top-Symbol
* scatters the Bot-Symbol
* the "Mid-Symbol" is the (already existing) colorbar with (already existing) ticks and ticklabels
Args:
pDf: dataFrame
Category:
* pMCategory: colName in pDf (default: 'MCategory')
* pMCatTopText
* pMCatMidText
* pMCatBotText
CBLegend (3Classes) - Parameterization of the representative Symbols
* CBLe3cTopVPad (default: 1+1*1/4)
* CBLe3cMidVPad (default: .5)
* CBLe3cBotVPad (default: 0-1*1/4)
* "1" is the height of the Colorbar
* the VPads (the vertical Sy-Positions) are defined in cax.transAxes Coordinates
* cax is the Colorbar Axes
* CBLe3cSySize=10**2 (Sy-Area in pts^2)
* CBLe3cSyType='o'
Color:
* pMCatBotColor='violet'
* pMCatTopColor='palegreen'
Returns:
(bbTop, bbMid, bbBot): the boundingBoxes of the 3Classes-Symbols
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
keys = sorted(kwds.keys())
# Cats
if 'pMCategory' not in keys:
kwds['pMCategory']='MCategory'
if 'pMCatTopText' not in keys:
kwds['pMCatTopText']='Top'
if 'pMCatMidText' not in keys:
kwds['pMCatMidText']='Middle'
if 'pMCatBotText' not in keys:
kwds['pMCatBotText']='Bottom'
# CBLegend3Cats
if 'CBLe3cTopVPad' not in keys:
kwds['CBLe3cTopVPad']=1+1*1/4
if 'CBLe3cMidVPad' not in keys:
kwds['CBLe3cMidVPad']=.5
if 'CBLe3cBotVPad' not in keys:
kwds['CBLe3cBotVPad']=0-1*1/4
if 'CBLe3cSySize' not in keys:
kwds['CBLe3cSySize']=10**2
if 'CBLe3cSyType' not in keys:
kwds['CBLe3cSyType']='o'
# CatAttribs
if 'pMCatTopColor' not in keys:
kwds['pMCatTopColor']='palegreen'
if 'pMCatBotColor' not in keys:
kwds['pMCatBotColor']='violet'
except:
pass
try:
cax=plt.gca()
pDf_top=pDf[(pDf[kwds['pMCategory']]==kwds['pMCatTopTxt'])]
pDf_mid=pDf[(pDf[kwds['pMCategory']]==kwds['pMCatMidTxt'])]
pDf_bot=pDf[(pDf[kwds['pMCategory']]==kwds['pMCatBotTxt'])]
pDf_top_Anz,col=pDf_top.shape
pDf_mid_Anz,col=pDf_mid.shape
pDf_bot_Anz,col=pDf_bot.shape
logger.debug("{:s} pDf_bot_Anz={:d} pDf_mid_Anz={:d} pDf_top_Anz={:d}".format(logStr,pDf_bot_Anz,pDf_mid_Anz,pDf_top_Anz))
logger.debug("{:s} CBLe3cBotVPad={:f} CBLe3cMidVPad={:f} CBLe3cTopVPad={:f}".format(logStr,kwds['CBLe3cBotVPad'],kwds['CBLe3cMidVPad'],kwds['CBLe3cTopVPad']))
bbBot=None
bbMid=None
bbTop=None
if pDf_bot_Anz >= 0:
po=cax.scatter( 0.,kwds['CBLe3cBotVPad']
,s=kwds['CBLe3cSySize']
,c=kwds['pMCatBotColor']
,alpha=0.9
,edgecolors='face'
,clip_on=False
,marker=pltHlpAlignMarker(kwds['CBLe3cSyType'], halign='left')
)
# Text dazu
o=po.findobj(match=None)
p=o[0]
bbBot=p.get_datalim(cax.transAxes)
logger.debug("{:s} bbBot={!s:s}".format(logStr,bbBot))
# a=plt.annotate(pMCatBotText
# ,xy=(CBHpad+CBLe3cHpadSymbol+CBLe3cHpad+CBLe3cTextSpaceFactor*(bb.x1-bb.x0),CBLe3cBotVpad)
# ,xycoords=cax.transAxes
# ,rotation='vertical' #90
# ,va='center'
# ,ha='center'
# ,color=pMCatBotColor
# )
# # weiterer Text dazu
# a=plt.annotate("Anz HA: {:6d}".format(pDf_bot_Anz)
# ,xy=(CBHpad+CBLe3cHpadSymbol+CBLe3cHpad+CBLe3cTextSpaceFactor*(bb.x1-bb.x0)+.5,CBLe3cBotVpad)
# ,xycoords=cax.transAxes
# ,rotation='vertical' #90
# ,va='center'
# ,ha='center'
# ,color=pMCatBotColor
# )
if pDf_top_Anz >= 0:
po=cax.scatter( 0.,kwds['CBLe3cTopVPad']
,s=kwds['CBLe3cSySize']
,c=kwds['pMCatTopColor']
,alpha=0.9
,edgecolors='face'
,clip_on=False
,marker=pltHlpAlignMarker(kwds['CBLe3cSyType'], halign='left')
)
o=po.findobj(match=None)
p=o[0]
bbTop=p.get_datalim(cax.transAxes)
# #Text dazu
# o=po.findobj(match=None)
# p=o[0]
# bb=p.get_datalim(cax.transAxes)
# bbTop=bb
# a=plt.annotate(pMCatTopText
# ,xy=(CBHpad+CBLe3cHpadSymbol+CBLe3cHpad+CBLe3cTextSpaceFactor*(bb.x1-bb.x0),CBLe3cTopVpad)
# ,xycoords=cax.transAxes
# ,rotation='vertical' #90
# ,va='center'
# ,ha='center'
# ,color=pMCatTopColor
# )
# #weiterer Text dazu
# a=plt.annotate("Anz HA: {:6d}".format(pDf_top_Anz)
# ,xy=(CBHpad+CBLe3cHpadSymbol+CBLe3cHpad++CBLe3cTextSpaceFactor*(bb.x1-bb.x0)+.5,CBLe3cTopVpad)
# ,xycoords=cax.transAxes
# ,rotation='vertical' #90
# ,va='center'
# ,ha='center'
# ,color=pMCatTopColor
# )
if pDf_mid_Anz >= 0:
po=cax.scatter( 0.,kwds['CBLe3cMidVPad']
,s=kwds['CBLe3cSySize']
,c='lightgrey'
,alpha=0.9
,edgecolors='face'
,clip_on=False
,visible=False # es erden nur die Koordinaten benoetigt
,marker=pltHlpAlignMarker(kwds['CBLe3cSyType'], halign='left')
)
o=po.findobj(match=None)
p=o[0]
bbMid=p.get_datalim(cax.transAxes)
# #Text dazu
# o=po.findobj(match=None)
# p=o[0]
# bb=p.get_datalim(cax.transAxes)
# a=plt.annotate(pMCatMidText
# ,xy=(CBHpad+CBLe3cHpadSymbol+CBLe3cHpad+CBLe3cTextSpaceFactor*(bb.x1-bb.x0),CBLe3cMidVpad)
# ,xycoords=cax.transAxes
# ,rotation='vertical' #90
# ,va='center'
# ,ha='center'
# ,color=pMCatMidColor
# ,visible=False
# )
# #weiterer Text dazu
# a=plt.annotate("Anz HA: {:6d}".format(pDf_mid_Anz)
# ,xy=(CBHpad+CBLe3cHpadSymbol+CBLe3cHpad+CBLe3cTextSpaceFactor*(bb.x1-bb.x0)+.5,CBLe3cMidVpad)
# ,xycoords=cax.transAxes
# ,rotation='vertical' #90
# ,va='center'
# ,ha='center'
# ,color=pMCatMidColor
# ,visible=False
# )
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return (bbTop, bbMid, bbBot)
def pltNetLegendTitleblock(text='',**kwds):
"""
Zeichnet auf gca() ergaenzende Schriftfeldinformationen.
Args:
text
Parametrierung:
* anchorVertical
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
keys = sorted(kwds.keys())
if 'anchorVertical' not in keys:
kwds['anchorVertical']=1.
except:
pass
cax=plt.gca()
try:
a=plt.text( 0.
,kwds['anchorVertical']
,text
,transform=cax.transAxes
,family='monospace'
,size='smaller'
,rotation='vertical'
,va='bottom'
,ha='left'
)
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
def pltNetTextblock(text='',**kwds):
"""
Zeichnet einen Textblock auf gca().
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
keys = sorted(kwds.keys())
if 'x' not in keys:
kwds['x']=0.
if 'y' not in keys:
kwds['y']=1.
except:
pass
ax=plt.gca()
try:
a=plt.text( kwds['x']
,kwds['y']
,text
,transform=ax.transAxes
,family='monospace'
,size='smaller'
,rotation='horizontal'
,va='bottom'
,ha='left'
)
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
class Rm():
@classmethod
def pltNetPipes(cls,pDf,**kwds):
"""
Plots colored PIPES.
Args:
DATA:
pDf: dataFrame
* query: query to filter pDf; default: None; Exp.: ="CONT_ID == '1001'"
* fmask: function to filter pDf; default: None; Exp.: =lambda row: True if row.KVR_i=='2' and row.KVR_k=='2' else False
* query and fmask are used both (query 1st) if not None
* sort_values_by: list of colNames defining the plot order; default: None (d.h. die Plotreihenfolge - und damit die z-Order - ist dann die pDF-Reihenfolge)
* sort_values_ascending; default: False (d.h. kleine zuletzt und damit (wenn pAttrLineSize = pAttribute/pAttributeFunc) auch dünne über dicke); nur relevant bei sort_values_by
AXES:
pAx: Axes to be plotted on; if not specified: gca() is used
Colorlegend:
* CBFraction in % (default: 5)
* CBHpad (default: 0.05)
* CBLabel (default: pAttribute/pAttributeFunc)
* CBBinTicks (default: None, d.h. keine Vorgabe von Außen); Vorgabe N: N yTicks; bei diskreten CM gemeint im Sinne von N-1 diskreten Kategorien
* CBBinDiscrete (default: False, d.h. eine gegebene (kontinuierliche) CM wird nicht in eine diskrete gewandelt)
* wenn CBBinDiscrete, dann gilt N aus CBBinTicks fuer die Ticks (bzw. Kategorien); ist CBBinTicks undef. gilt 4 (also 3 Kategorien)
* bei den vorgenannten Kategorien handelt es sich um eine gleichmäßige Unterteilung des definierten Wertebereiches
* CBBinBounds (default: None): wenn die CM eine diskrete ist, dann wird eine vorgegebene BoundaryNorm angewandt; CBBinTicks hat dann keine Bedeutung
* CBTicks: individuell vorgegebene Ticks; wird am Schluss prozessiert, d.h. vorh. (ggf. auch durch CBBinTicks bzw. <=/>= u. v=/^= bereits manipulierte) ...
* ... Ticks werden überschrieben; kann ohne CBTickLabels verwendet werden
* CBTickLabels: individuell vorgegebene Ticklabels; wird danach prozessiert; Länge muss zu dann existierenden Ticks passen; kann auch ohne CBTicks verwendet werden
PIPE-Attribute:
* pAttribute: column in pDf (default: 'Attribute')
* pAttributeFunc:
* function to be used to construct a new col to be plotted
* if pAttributeFunc is not None pAttribute is not used: pAttribute is set to 'pAttributeFunc'
* the new constructed col is named 'pAttributeFunc'; this name can be used in sort_values_by
PIPE-Color:
* pAttributeColorMap (default: plt.cm.cool)
* Farbskalamapping:
* ------------------
* pAttributeColorMapMin (default: pAttribute.min()); ordnet der kleinsten Farbe einen Wert zu; CM: wenn angegeben _und unterschritten: <=
* pAttributeColorMapMax (default: pAttribute.max()); ordnet der größten Farbe einen Wert zu; CM: wenn angegeben _und überschritten: >=
* Standard: Farbskala wird voll ausgenutzt; d.h. der (ggf. mit Min/Max) eingegrenzte Wertebereich wird den Randfarben der Skala zugeordnet
* wenn ein anderer, kleinerer, Wertebereich mit derselben Farbskala geplottet wird, dann sind die Farben in den Plots nicht vergleichbar ...
* ... wenn eine Farbvergleichbarkeit erzielt werden soll, darf dieselbe Farbskala nicht voll ausgenutzt werden
* pAttributeColorMapUsageStart (default: 0.; Wertebereich: [0,1[)
* hier: die Farbskala wird unten nur ab UsageStart genutzt ...
* ... d.h. Werte die eine "kleinere" Farbe hätten, bekommen die Farbe von UsageStart; CM: v=
* pAttributeColorMapUsageEnd (default: 1.; Wertebereich: ]0,1])
* hier: die Farbskala wird oben nur bis UsageEnd genutzt ...
* ... d.h. Werte die eine "größere" Farbe hätten, bekommen die Farbe von UsageEnd; CM: ^=
* etwas anderes ist es, wenn man eine Farbskala an den Rändern nicht voll ausnutzen möchte weil einem die Farben dort nicht gefallen ...
PIPE-Color 2nd:
* um "unwichtige" Bereiche zu "dimmen": Beispiele:
* räumlich: nicht-Schnitt Bereiche; Bestand (2nd) vs. Ausbau; Zonen unwichtig (2nd) vs. Zonen wichtig; Ok (2nd) von NOK
* es werden erst die 2nd-Color Pipes gezeichnet; die (1st-)Color Pipes werden danach gezeichnet, liegen also "über" den "unwichtigen"
* es wird dieselbe Spalte pAttribute/pAttributeFunc für die 2. Farbskala verwendet
* es wird derselbe Linienstil (pAttributeLs) für die 2. Farbskala verwendet
* es wird dieselbe Dicke pAttrLineSize (pAttribute/pAttributeFunc) für die 2. Farbskala verwendet
* nur die Farbskala ist anders sowie ggf. das Farbskalamapping
* pAttributeColorMapFmask: function to filter pDf to decide to plot with colorMap; default: =lambda row: True
* pAttributeColorMap2ndFmask: function to filter pDf to decide to plot with colorMap2nd; default: =lambda row: False
* mit den beiden Funktionsmasken kann eine Filterung zusätzlich zu query und fmask realisiert werden
* die Funktionsmasken sollten schnittmengenfrei sein; wenn nicht: 2nd überschreibt
* pAttributeColorMap2nd (default: plt.cm.binary)
* Farbskalamapping:
* ------------------
* pAttributeColorMap2ndMin (default: pAttributeColorMapMin)
* pAttributeColorMap2ndMax (default: pAttributeColorMapMax)
* die Farbskala wird an den Rändern nicht voll ausgenutzt wenn die Farben dort ggf. nicht gefallen:
* pAttributeColorMap2ndUsageStart (default: 0.; Wertebereich: [0,1[)
* pAttributeColorMap2ndUsageEnd (default: 1.; Wertebereich: ]0,1])
PIPE-Linestyle:
* pAttributeLs (default: '-')
* same for all colors if mutliple colors are specified
PIPE-Linesize:
* pAttrLineSize: column in pDf; if not specified: pAttribute/pAttributeFunc
* pAttrLineSizeFactor (>0): plot linewidth in pts = pAttrLineSizeFactor (default: =...) * fabs(pAttrLineSize)
* ...: 1./(pDf[pAttrLineSize].std()*2.)
* same for all colors if mutliple colors are specified
PIPE-Geometry:
* pWAYPXCors: column in pDf (default: 'pWAYPXCors')
* pWAYPYCors: column in pDf (default: 'pWAYPYCors')
* pClip (default: True)
>>> import pandas as pd
>>> import matplotlib
>>> import matplotlib.pyplot as plt
>>> import matplotlib.gridspec as gridspec
>>> import math
>>> # ---
>>> try:
... import Rm
... except ImportError:
... from PT3S import Rm
>>> # ---
>>> xm=xms['DHNetwork']
>>> #mx=mxs['DHNetwork']
>>> # ---
>>> plt.close()
>>> size_DINA3quer=(16.5, 11.7)
>>> dpiSize=72
>>> fig=plt.figure(figsize=size_DINA3quer,dpi=dpiSize)
>>> gs = gridspec.GridSpec(4, 2)
>>> # ---
>>> vROHR=xm.dataFrames['vROHR']
>>> # ---
>>> # Attribute (with neg. Values)
>>> # --------------------------
>>> axNfd = fig.add_subplot(gs[0])
>>> Rm.Rm.pltNetPipes(vROHR
... ,query="CONT_ID == '1001'"
... ,fmask=lambda row: True if row.KVR_i=='2' and row.KVR_k=='2' else False
... ,pAx=axNfd
... ,pAttribute='ROHR~*~*~*~QMAV'
... )
>>> txt=axNfd.set_title('RL QMAV')
>>> # ---
>>> # Function as Attribute
>>> # --------------------------
>>> axNfd = fig.add_subplot(gs[1])
>>> Rm.Rm.pltNetPipes(vROHR
... ,query="CONT_ID == '1001'"
... ,fmask=lambda row: True if row.KVR_i=='2' and row.KVR_k=='2' else False
... ,pAx=axNfd
... ,pAttributeFunc=lambda row: math.fabs(row['ROHR~*~*~*~QMAV'])
... )
>>> txt=axNfd.set_title('RL QMAV Abs')
>>> # --------------------------
>>> # ---
>>> # Mi/MaD zS auf
>>> # --------------------------
>>> axNfd = fig.add_subplot(gs[2])
>>> Rm.Rm.pltNetPipes(vROHR
... ,query="CONT_ID == '1001'"
... ,fmask=lambda row: True if row.KVR_i=='2' and row.KVR_k=='2' else False
... ,pAx=axNfd
... ,pAttributeFunc=lambda row: math.fabs(row['ROHR~*~*~*~QMAV'])
... ,pAttributeColorMapMin=0.
... ,pAttributeColorMapMax=1600.
... ,CBLabel='Q [t/h]'
... ,sort_values_by=['pAttributeFunc']
... ,sort_values_ascending=True
... )
>>> txt=axNfd.set_title('Mi/MaD zS auf')
>>> # --------------------------
>>> # ---
>>> # ind. Kategorien
>>> # --------------------------
>>> axNfd = fig.add_subplot(gs[3])
>>> cm = matplotlib.colors.ListedColormap(['cyan', 'royalblue', 'magenta', 'coral'])
>>> cm.set_over('0.25')
>>> cm.set_under('0.75')
>>> bounds = [10.,100.,200.,800.,1600.]
>>> norm = matplotlib.colors.BoundaryNorm(bounds, cm.N)
>>> Rm.Rm.pltNetPipes(vROHR
... ,query="CONT_ID == '1001'"
... ,fmask=lambda row: True if row.KVR_i=='2' and row.KVR_k=='2' else False
... ,pAx=axNfd
... ,pAttributeFunc=lambda row: math.fabs(row['ROHR~*~*~*~QMAV'])
... ,pAttributeColorMap=cm
... ,CBBinBounds=bounds
... ,CBLabel='Q [t/h]'
... ,sort_values_by=['pAttributeFunc']
... ,sort_values_ascending=True
... )
>>> txt=axNfd.set_title('ind. Kategorien')
>>> # --------------------------
>>> # ---
>>> # Unwichtiges ausblenden über 2nd Color
>>> # --------------------------
>>> vAGSN=xm.dataFrames['vAGSN']
>>> hpRL=vAGSN[(vAGSN['LFDNR']=='1') & (vAGSN['Layer']==2)]
>>> pDf=pd.merge(vROHR
... ,hpRL[hpRL.IptIdx=='S'] # wg. Innenpunkte
... ,how='left'
... ,left_on='pk'
... ,right_on='OBJID'
... ,suffixes=('','_AGSN')).filter(items=vROHR.columns.tolist()+['OBJID'])
>>> axNfd = fig.add_subplot(gs[4])
>>> Rm.Rm.pltNetPipes(pDf
... ,query="CONT_ID == '1001'"
... ,fmask=lambda row: True if row.KVR_i=='2' and row.KVR_k=='2' else False
... ,pAx=axNfd
... ,pAttributeFunc=lambda row: math.fabs(row['ROHR~*~*~*~QMAV'])
... ,pAttributeColorMapMin=0.
... ,pAttributeColorMapMax=1500.
... ,CBBinTicks=7
... ,CBLabel='Q [t/h]'
... ,sort_values_by=['pAttributeFunc']
... ,sort_values_ascending=True
... ,pAttributeColorMapFmask=lambda row: True if not pd.isnull(row.OBJID) else False
... ,pAttributeColorMap2ndFmask=lambda row: True if pd.isnull(row.OBJID) else False
... )
>>> txt=axNfd.set_title('Unwichtiges ausblenden über 2nd Color')
>>> # --------------------------
>>> # ---
>>> # Farbskalen an den Rändern abschneiden
>>> # --------------------------
>>> axNfd = fig.add_subplot(gs[5])
>>> Rm.Rm.pltNetPipes(pDf
... ,query="CONT_ID == '1001'"
... ,fmask=lambda row: True if row.KVR_i=='2' and row.KVR_k=='2' else False
... ,pAx=axNfd
... ,pAttributeFunc=lambda row: math.fabs(row['ROHR~*~*~*~QMAV'])
... ,pAttributeColorMapMin=0.
... ,pAttributeColorMapMax=1500.
... ,CBLabel='Q [t/h]'
... ,sort_values_by=['pAttributeFunc']
... ,sort_values_ascending=True
... ,pAttributeColorMapFmask=lambda row: True if not pd.isnull(row.OBJID) else False
... ,pAttributeColorMap2ndFmask=lambda row: True if pd.isnull(row.OBJID) else False
... ,pAttributeColorMap2ndUsageStart=.5/5. # nicht zu weiß
... ,pAttributeColorMap2ndUsageEnd=2.5/5. # nicht zu schwarz
... ,pAttributeColorMapUsageStart=3/15.
... ,pAttributeColorMapUsageEnd=12/15.
... )
>>> txt=axNfd.set_title('Farbskalen an den Rändern abschneiden')
>>> # --------------------------
>>> # ---
>>> # Farbskala diskretisieren
>>> # --------------------------
>>> axNfd = fig.add_subplot(gs[6])
>>> Rm.Rm.pltNetPipes(pDf
... ,query="CONT_ID == '1001'"
... ,fmask=lambda row: True if row.KVR_i=='2' and row.KVR_k=='2' else False
... ,pAx=axNfd
... ,pAttributeFunc=lambda row: math.fabs(row['ROHR~*~*~*~QMAV'])
... ,pAttributeColorMapMin=0.
... ,pAttributeColorMapMax=1500.
... ,CBBinDiscrete=True
... ,CBLabel='Q [t/h]'
... ,sort_values_by=['pAttributeFunc']
... ,sort_values_ascending=True
... ,pAttributeColorMapFmask=lambda row: True if not pd.isnull(row.OBJID) else False
... ,pAttributeColorMap2ndFmask=lambda row: True if pd.isnull(row.OBJID) else False
... ,pAttributeColorMap2ndUsageStart=.5/5. # nicht zu weiß
... ,pAttributeColorMap2ndUsageEnd=2.5/5. # nicht zu schwarz
... ,CBTicks=[250,750,1250]
... ,CBTickLabels=['klein','mittel','groß']
... )
>>> txt=axNfd.set_title('Farbskala diskretisieren')
>>> # --------------------------
>>> # ---
>>> # Unterkategorien
>>> # --------------------------
>>> baseColorsDef="tab10"
>>> catagoryColors=[9,6,1]
>>> nOfSubCatsReq=4
>>> cm=Rm.pltMakeCategoricalCmap(baseColorsDef=baseColorsDef,catagoryColors=catagoryColors,nOfSubCatsReq=nOfSubCatsReq,reversedSubCatOrder=True)
>>> axNfd = fig.add_subplot(gs[7])
>>> Rm.Rm.pltNetPipes(pDf
... ,query="CONT_ID == '1001'"
... ,fmask=lambda row: True if row.KVR_i=='2' and row.KVR_k=='2' else False
... ,pAx=axNfd
... ,pAttributeFunc=lambda row: math.fabs(row['ROHR~*~*~*~QMAV'])
... ,pAttributeColorMap=cm
... ,pAttributeColorMapMin=0.
... ,pAttributeColorMapMax=1500.
... ,CBBinTicks=16
... ,CBLabel='Q [t/h]'
... ,sort_values_by=['pAttributeFunc']
... ,sort_values_ascending=True
... ,pAttributeColorMapFmask=lambda row: True if not pd.isnull(row.OBJID) else False
... ,pAttributeColorMap2ndFmask=lambda row: True if pd.isnull(row.OBJID) else False
... ,pAttributeColorMap2ndUsageStart=.5/5. # nicht zu weiß
... ,pAttributeColorMap2ndUsageEnd=2.5/5. # nicht zu schwarz
... )
>>> txt=axNfd.set_title('Unterkategorien')
>>> # --------------------------
>>> gs.tight_layout(fig)
>>> plt.show()
>>> plt.savefig('pltNetPipes.pdf',format='pdf',dpi=dpiSize*2)
>>> # -----
>>> plt.close()
>>> fig=plt.figure(figsize=Rm.DINA3q,dpi=dpiSize)
>>> gs = gridspec.GridSpec(1, 1)
>>> # ---
>>> #
>>> # --------------------------
>>> axNfd = fig.add_subplot(gs[0])
>>> Rm.Rm.pltNetPipes(vROHR
... ,query="CONT_ID == '1001'"
... ,fmask=lambda row: True if row.KVR_i=='2' and row.KVR_k=='2' and row.LTGR_NAME=='NWDUF2' else False
... ,pAx=axNfd
... ,pAttributeFunc=lambda row: math.fabs(row['ROHR~*~*~*~QMAV'])
... )
>>> txt=axNfd.set_title('RL QMAV Abs (Ausschnitt)')
>>> gs.tight_layout(fig)
>>> plt.show()
>>> # -----
>>> plt.close()
>>> fig=plt.figure(figsize=Rm.DINA3,dpi=dpiSize)
>>> gs = gridspec.GridSpec(1, 1)
>>> # ---
>>> #
>>> # --------------------------
>>> axNfd = fig.add_subplot(gs[0])
>>> Rm.Rm.pltNetPipes(vROHR
... ,query="CONT_ID == '1001'"
... ,fmask=lambda row: True if row.KVR_i=='2' and row.KVR_k=='2' and row.LTGR_NAME=='NWDUF2' else False
... ,pAx=axNfd
... ,pAttributeFunc=lambda row: math.fabs(row['ROHR~*~*~*~QMAV'])
... )
>>> txt=axNfd.set_title('RL QMAV Abs (Ausschnitt)')
>>> gs.tight_layout(fig)
>>> plt.show()
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
keys = sorted(kwds.keys())
# AXES
if 'pAx' not in keys:
kwds['pAx']=plt.gca()
# CB
if 'CBFraction' not in keys:
kwds['CBFraction']=5 # in %
if 'CBHpad' not in keys:
kwds['CBHpad']=0.05
if 'CBLabel' not in keys:
kwds['CBLabel']=None
# CB / Farbskala
if 'CBBinTicks' not in keys:
kwds['CBBinTicks']=None
if 'CBBinDiscrete' not in keys:
kwds['CBBinDiscrete']=False
if kwds['CBBinDiscrete']:
if kwds['CBBinTicks']==None:
kwds['CBBinTicks']=4 # (d.h. 3 Kategorien)
if 'CBBinBounds' not in keys:
kwds['CBBinBounds']=None
# customized yTicks
if 'CBTicks' not in keys:
kwds['CBTicks'] = None
if 'CBTickLabels' not in keys:
kwds['CBTickLabels'] = None
# DATA
if 'query' not in keys:
kwds['query']=None # Exp.: = "KVR_i=='2' & KVR_k=='2'"
if 'fmask' not in keys:
kwds['fmask']=None # Exp.: =lambda row: True if row.KVR_i=='2' and row.KVR_k=='2' else False
if 'sort_values_by' not in keys:
kwds['sort_values_by']=None
if 'sort_values_ascending' not in keys:
kwds['sort_values_ascending']=False
# PIPE-Attribute
if 'pAttribute' not in keys:
kwds['pAttribute']='Attribute'
if 'pAttributeFunc' not in keys:
logger.debug("{:s}pAttribute: not specified?! 'Attribute' will be used. pAttributeFunc is also not specified?!".format(logStr))
if 'pAttributeFunc' not in keys:
kwds['pAttributeFunc']=None
# PIPE-Color
if 'pAttributeColorMap' not in keys:
kwds['pAttributeColorMap']=plt.cm.cool
if 'pAttributeColorMapMin' not in keys:
kwds['pAttributeColorMapMin']=None
if 'pAttributeColorMapMax' not in keys:
kwds['pAttributeColorMapMax']=None
# Trunc Cmap
if 'pAttributeColorMapUsageStart' not in keys and 'pAttributeColorMapUsageEnd' not in keys:
kwds['pAttributeColorMapTrunc']=False
else:
kwds['pAttributeColorMapTrunc']=True
if 'pAttributeColorMapUsageStart' not in keys:
kwds['pAttributeColorMapUsageStart']=0.
if 'pAttributeColorMapUsageEnd' not in keys:
kwds['pAttributeColorMapUsageEnd']=1.
# PIPE-Color 1st/2nd - FMasks
if 'pAttributeColorMapFmask' not in keys:
kwds['pAttributeColorMapFmask']=lambda row: True
else:
logger.debug("{:s}Color 1st-PIPEs are filtered with fmask: {:s} ...".format(logStr,str(kwds['pAttributeColorMapFmask'])))
if 'pAttributeColorMap2ndFmask' not in keys:
kwds['pAttributeColorMap2ndFmask']=lambda row: False
else:
logger.debug("{:s}Color 2nd-PIPEs are filtered with fmask: {:s} ...".format(logStr,str(kwds['pAttributeColorMap2ndFmask'])))
# PIPE-Color 2nd
if 'pAttributeColorMap2nd' not in keys:
kwds['pAttributeColorMap2nd']=plt.cm.binary
if 'pAttributeColorMap2ndMin' not in keys:
kwds['pAttributeColorMap2ndMin']=kwds['pAttributeColorMapMin']
if 'pAttributeColorMap2ndMax' not in keys:
kwds['pAttributeColorMap2ndMax']=kwds['pAttributeColorMapMax']
# Trunc Cmap
if 'pAttributeColorMap2ndUsageStart' not in keys and 'pAttributeColorMap2ndUsageEnd' not in keys:
kwds['pAttributeColorMap2ndTrunc']=False
else:
kwds['pAttributeColorMap2ndTrunc']=True
if 'pAttributeColorMap2ndUsageStart' not in keys:
kwds['pAttributeColorMap2ndUsageStart']=0.
if 'pAttributeColorMap2ndUsageEnd' not in keys:
kwds['pAttributeColorMap2ndUsageEnd']=1.
# PIPE-Linestyle
if 'pAttributeLs' not in keys:
kwds['pAttributeLs']='-'
# PIPE-Linesize
if 'pAttrLineSize' not in keys:
kwds['pAttrLineSize']=None
if 'pAttrLineSizeFactor' not in keys:
kwds['pAttrLineSizeFactor']=None
# PIPE-Geometry
if 'pWAYPXCors' not in keys:
kwds['pWAYPXCors']='pWAYPXCors'
if 'pWAYPYCors' not in keys:
kwds['pWAYPYCors']='pWAYPYCors'
if 'pClip' not in keys:
kwds['pClip']=True
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
try:
# ggf. filtern
if kwds['query'] != None:
logger.debug("{:s}pDf is filtered with query: {:s} ...".format(logStr,str(kwds['query'])))
pDf=pd.DataFrame(pDf.query(kwds['query']).values,columns=pDf.columns)
if kwds['fmask'] != None:
logger.debug("{:s}pDf is filtered with fmask: {:s} ...".format(logStr,str(kwds['fmask'])))
pDf=pd.DataFrame(pDf[pDf.apply(kwds['fmask'],axis=1)].values,columns=pDf.columns)
# ggf. zu plottende Spalte(n) neu ausrechnen bzw. Plotreihenfolge ändern: Kopie erstellen
if kwds['pAttributeFunc'] != None or kwds['sort_values_by'] != None:
# Kopie!
logger.debug("{:s}pDf is copied ...".format(logStr))
pDf=pDf.copy(deep=True)
# ggf. zu plottende Spalte(n) neu ausrechnen
if kwds['pAttributeFunc'] != None:
logger.debug("{:s}pAttribute: col '{:s}' is not used: ...".format(logStr,kwds['pAttribute']))
logger.debug("{:s}... pAttributeFunc {:s} is used to calculate a new col named 'pAttributeFunc'".format(logStr,str(kwds['pAttributeFunc'])))
pDf['pAttributeFunc']=pDf.apply(kwds['pAttributeFunc'],axis=1)
kwds['pAttribute']='pAttributeFunc'
logger.debug("{:s}col '{:s}' is used as Attribute.".format(logStr,kwds['pAttribute']))
# Label für CB
if kwds['CBLabel'] == None:
kwds['CBLabel']=kwds['pAttribute']
# Spalte für Liniendicke ermitteln
if kwds['pAttrLineSize'] == None:
kwds['pAttrLineSize']=kwds['pAttribute']
logger.debug("{:s}col '{:s}' is used as LineSize.".format(logStr,kwds['pAttrLineSize']))
# Liniendicke skalieren
if kwds['pAttrLineSizeFactor']==None:
kwds['pAttrLineSizeFactor']=1./(pDf[kwds['pAttrLineSize']].std()*2.)
logger.debug("{:s}Faktor Liniendicke: {:12.6f} - eine Linie mit Attributwert {:6.2f} wird in {:6.2f} Pts Dicke geplottet.".format(logStr
,kwds['pAttrLineSizeFactor']
,pDf[kwds['pAttrLineSize']].std()*2.
,kwds['pAttrLineSizeFactor']*pDf[kwds['pAttrLineSize']].std()*2.
))
logger.debug("{:s}min. Liniendicke: Attributwert {:9.2f} Pts: {:6.2f}.".format(logStr
,math.fabs(pDf[kwds['pAttrLineSize']].min())
,kwds['pAttrLineSizeFactor']*math.fabs(pDf[kwds['pAttrLineSize']].min()))
)
logger.debug("{:s}max. Liniendicke: Attributwert {:9.2f} Pts: {:6.2f}.".format(logStr
,math.fabs(pDf[kwds['pAttrLineSize']].max())
,kwds['pAttrLineSizeFactor']*math.fabs(pDf[kwds['pAttrLineSize']].max()))
)
# ggf. Plotreihenfolge ändern
if kwds['sort_values_by'] != None:
logger.debug("{:s}pDf is sorted (=Plotreihenfolge) by {:s} ascending={:s}.".format(logStr,str(kwds['sort_values_by']),str(kwds['sort_values_ascending'])))
pDf.sort_values(by=kwds['sort_values_by'],ascending=kwds['sort_values_ascending'],inplace=True)
# ----------------------------------------------------------------------------------------------------------------------------------------
# x,y-Achsen: Lims ermitteln und setzen (das Setzen beeinflusst Ticks und data_ratio; ohne dieses Setzen wären diese auf Standardwerten)
# ----------------------------------------------------------------------------------------------------------------------------------------
xMin=923456789
yMin=923456789
xMax=0
yMax=0
for xs,ys in zip(pDf[kwds['pWAYPXCors']],pDf[kwds['pWAYPYCors']]):
xMin=min(xMin,min(xs))
yMin=min(yMin,min(ys))
xMax=max(xMax,max(xs))
yMax=max(yMax,max(ys))
logger.debug("{:s}pWAYPXCors: {:s} Min: {:6.2f} Max: {:6.2f}".format(logStr,kwds['pWAYPXCors'],xMin,xMax))
logger.debug("{:s}pWAYPYCors: {:s} Min: {:6.2f} Max: {:6.2f}".format(logStr,kwds['pWAYPYCors'],yMin,yMax))
dx=xMax-xMin
dy=yMax-yMin
dxdy=dx/dy
dydx=1./dxdy
# i.d.R. "krumme" Grenzen (die Ticks werden von mpl i.d.R. trotzdem "glatt" ermittelt)
kwds['pAx'].set_xlim(xMin,xMax)
kwds['pAx'].set_ylim(yMin,yMax)
# ----------------------------------------------------------------------------------------------------------------------------------------
# x,y-Achsen: Ticks ermitteln aber NICHT verändern -----------------------------------------------------------------
# auch bei "krummen" Grenzen setzt matplotlib i.d.R. "glatte" Ticks
# ----------------------------------------------------------------------------------------------------------------------------------------
# Ticks ermitteln
xTicks=kwds['pAx'].get_xticks()
yTicks=kwds['pAx'].get_yticks()
dxTick = xTicks[1]-xTicks[0]
xTickSpan=xTicks[-1]-xTicks[0]
dyTick = yTicks[1]-yTicks[0]
yTickSpan=yTicks[-1]-yTicks[0]
logger.debug("{:s}xTicks : {:s} dx: {:6.2f}".format(logStr,str(xTicks),dxTick))
logger.debug("{:s}yTicks : {:s} dy: {:6.2f}".format(logStr,str(yTicks),dyTick))
# dTick gleich setzen (deaktiviert)
if dyTick == dxTick:
pass # nichts zu tun
elif dyTick > dxTick:
# dyTick zu dxTick (kleinere) setzen
dTickW=dxTick
# erf. Anzahl
numOfTicksErf=math.floor(dy/dTickW)+1
newTicks=[idx*dTickW+yTicks[0] for idx in range(numOfTicksErf)]
#kwds['pAx'].set_yticks(newTicks)
#yTicks=kwds['pAx'].get_yticks()
#dyTick = yTicks[1]-yTicks[0]
#logger.debug("{:s}yTicks NEU: {:s} dy: {:6.2f}".format(logStr,str(yTicks),dyTick))
else:
# dxTick zu dyTick (kleinere) setzen
dTickW=dyTick
# erf. Anzahl
numOfTicksErf=math.floor(dx/dTickW)+1
newTicks=[idx*dTickW+xTicks[0] for idx in range(numOfTicksErf)]
#kwds['pAx'].set_xticks(newTicks)
#xTicks=kwds['pAx'].get_xticks()
#dxTick = xTicks[1]-xTicks[0]
#logger.debug("{:s}xTicks NEU: {:s} dx: {:6.2f}".format(logStr,str(xTicks),dxTick))
# ----------------------------------------------------------------------------------------------------------------------------------------
# Grid und Aspect
# ----------------------------------------------------------------------------------------------------------------------------------------
kwds['pAx'].grid()
kwds['pAx'].set_aspect(aspect='equal') # zur Sicherheit; andere als verzerrungsfreie Darstellungen machen im Netz kaum Sinn
kwds['pAx'].set_adjustable('box')
kwds['pAx'].set_anchor('SW')
## x,y-Seitenverhältnisse ermitteln ---------------------------------------------------------------------------
## total figure size
#figW, figH = kwds['pAx'].get_figure().get_size_inches()
## Axis pos. on figure
#x0, y0, w, h = kwds['pAx'].get_position().bounds
## Ratio of display units
#disp_ratio = (figH * h) / (figW * w)
#disp_ratioA = (figH) / (figW )
#disp_ratioB = (h) / (w)
## Ratio of data units
#data_ratio=kwds['pAx'].get_data_ratio()
#logger.debug("{:s}figW: {:6.2f} figH: {:6.2f}".format(logStr,figW,figH))
#logger.debug("{:s}x0: {:6.2f} y0: {:6.2f} w: {:6.2f} h: {:6.2f}".format(logStr,x0,y0,w,h))
#logger.debug("{:s}pWAYPCors: Y/X: {:6.2f}".format(logStr,dydx))
#logger.debug("{:s}Ticks: Y/X: {:6.2f}".format(logStr,yTickSpan/xTickSpan))
#logger.debug("{:s}disp_ratio: {:6.2f} data_ratio: {:6.2f}".format(logStr,disp_ratio,data_ratio))
#logger.debug("{:s}disp_ratioA: {:6.2f} disp_ratioB: {:6.2f}".format(logStr,disp_ratioA,disp_ratioB))
# PIPE-Color: Farbskalamapping:
cMap=plt.cm.get_cmap(kwds['pAttributeColorMap'])
if kwds['CBBinDiscrete'] and hasattr(cMap,'from_list'): # diskrete Farbskala aus kontinuierlicher erzeugen
N=kwds['CBBinTicks']-1
color_list = cMap(np.linspace(0, 1, N))
cmap_name = cMap.name + str(N)
kwds['pAttributeColorMap']=cMap.from_list(cmap_name, color_list, N)
minAttr=pDf[kwds['pAttribute']].min()
maxAttr=pDf[kwds['pAttribute']].max()
if kwds['pAttributeColorMapMin'] != None:
minLine=kwds['pAttributeColorMapMin']
else:
minLine=minAttr
if kwds['pAttributeColorMapMax'] != None:
maxLine=kwds['pAttributeColorMapMax']
else:
maxLine=maxAttr
logger.debug("{:s}Attribute: minLine (used for CM-Scaling): {:8.2f} min (Data): {:8.2f}".format(logStr,minLine,minAttr))
logger.debug("{:s}Attribute: maxLine (used for CM-Scaling): {:8.2f} max (Data): {:8.2f}".format(logStr,maxLine,maxAttr))
# Norm
normLine=colors.Normalize(minLine,maxLine)
# kont. Farbskala truncated: Farbskala und Norm anpassen
cMap=plt.cm.get_cmap(kwds['pAttributeColorMap'])
if kwds['pAttributeColorMapTrunc'] and hasattr(cMap,'from_list'):
#
usageStartLineValue=minLine+kwds['pAttributeColorMapUsageStart']*(maxLine-minLine)
usageStartLineColor=kwds['pAttributeColorMap'](normLine(usageStartLineValue))
logger.debug("{:s}pAttributeColorMapUsageStart: {:6.2f} ==> usageStartLineValue: {:8.2f} (minLine: {:8.2f}) color: {:s}".format(logStr
,kwds['pAttributeColorMapUsageStart']
,usageStartLineValue,minLine,str(usageStartLineColor)))
#
usageEndLineValue=maxLine-(1.-kwds['pAttributeColorMapUsageEnd'])*(maxLine-minLine)
usageEndLineColor=kwds['pAttributeColorMap'](normLine(usageEndLineValue))
logger.debug("{:s}pAttributeColorMapUsageEnd: {:6.2f} ==> usageEndLineValue: {:8.2f} (maxLine: {:8.2f}) color: {:s}".format(logStr
,kwds['pAttributeColorMapUsageEnd']
,usageEndLineValue,maxLine,str(usageEndLineColor)))
nColors=100
kwds['pAttributeColorMap'] = colors.LinearSegmentedColormap.from_list(
'trunc({n},{a:.2f},{b:.2f})'.format(n=cMap.name, a=kwds['pAttributeColorMapUsageStart'], b=kwds['pAttributeColorMapUsageEnd'])
,cMap(np.linspace(kwds['pAttributeColorMapUsageStart'],kwds['pAttributeColorMapUsageEnd'],nColors)))
normLine=colors.Normalize(max(minLine,usageStartLineValue),min(maxLine,usageEndLineValue))
# diskrete Farbskala mit individuellen Kategorien: Norm anpassen
cMap=plt.cm.get_cmap(kwds['pAttributeColorMap'])
if kwds['CBBinBounds'] != None and not hasattr(cMap,'from_list'): # diskrete Farbskala liegt vor und Bounds sind vorgegeben
normLine = colors.BoundaryNorm(kwds['CBBinBounds'],cMap.N)
#CBPropExtend='both'
CBPropExtend='neither'
else:
CBPropExtend='neither'
# PIPE-Color 2nd: Farbskalamapping:
if kwds['pAttributeColorMap2ndMin'] != None:
minLine2nd=kwds['pAttributeColorMap2ndMin']
else:
minLine2nd=minAttr
if kwds['pAttributeColorMap2ndMax'] != None:
maxLine2nd=kwds['pAttributeColorMap2ndMax']
else:
maxLine2nd=maxAttr
logger.debug("{:s}Attribute: minLine2nd (used for CM-Scaling): {:8.2f} min (Data): {:8.2f}".format(logStr,minLine2nd,minAttr))
logger.debug("{:s}Attribute: maxLine2nd (used for CM-Scaling): {:8.2f} max (Data): {:8.2f}".format(logStr,maxLine2nd,maxAttr))
# Norm
normLine2nd=colors.Normalize(minLine2nd,maxLine2nd)
# kont. Farbskala truncated: Farbskala anpassen
cMap=plt.cm.get_cmap(kwds['pAttributeColorMap2nd'])
if kwds['pAttributeColorMap2ndTrunc'] and hasattr(cMap,'from_list'):
#
usageStartLineValue2nd=minLine2nd+kwds['pAttributeColorMap2ndUsageStart']*(maxLine2nd-minLine2nd)
logger.debug("{:s}pAttributeColorMap2ndUsageStart: {:8.2f} ==> usageStartLineValue2nd: {:8.2f} (minLine2nd: {:8.2f})".format(logStr,kwds['pAttributeColorMap2ndUsageStart'],usageStartLineValue2nd,minLine2nd))
usageStartLineColor2nd=kwds['pAttributeColorMap2nd'](normLine2nd(usageStartLineValue2nd))
#
usageEndLineValue2nd=maxLine2nd-(1.-kwds['pAttributeColorMap2ndUsageEnd'])*(maxLine2nd-minLine2nd)
logger.debug("{:s}pAttributeColorMap2ndUsageEnd: {:8.2f} ==> usageEndLineValue2nd: {:8.2f} (maxLine2nd: {:8.2f})".format(logStr,kwds['pAttributeColorMap2ndUsageEnd'],usageEndLineValue2nd,maxLine2nd))
usageEndLineColor2nd=kwds['pAttributeColorMap2nd'](normLine2nd(usageEndLineValue2nd))
nColors=100
kwds['pAttributeColorMap2nd'] = colors.LinearSegmentedColormap.from_list(
'trunc({n},{a:.2f},{b:.2f})'.format(n=cMap.name, a=kwds['pAttributeColorMap2ndUsageStart'], b=kwds['pAttributeColorMap2ndUsageEnd'])
,cMap(np.linspace(kwds['pAttributeColorMap2ndUsageStart'],kwds['pAttributeColorMap2ndUsageEnd'],nColors)))
# PIPE-Color 2nd: PLOT
pDfColorMap2nd=pDf[pDf.apply(kwds['pAttributeColorMap2ndFmask'],axis=1)]
(rows ,cols)=pDf.shape
(rows2nd,cols)=pDfColorMap2nd.shape
logger.debug("{:s}Color 2nd-PIPEs: {:d} von {:d}".format(logStr,rows2nd,rows))
for xs,ys,vLine,tLine in zip(pDfColorMap2nd[kwds['pWAYPXCors']],pDfColorMap2nd[kwds['pWAYPYCors']],pDfColorMap2nd[kwds['pAttribute']],pDfColorMap2nd[kwds['pAttrLineSize']]):
#if vLine >= usageStartLineValue2nd and vLine <= usageEndLineValue2nd:
# colorLine=kwds['pAttributeColorMap2nd'](normLine2nd(vLine))
#elif vLine > usageEndLineValue2nd:
# colorLine=usageEndLineColor2nd
#else:
# colorLine=usageStartLineColor2nd
colorLine=kwds['pAttributeColorMap2nd'](normLine2nd(vLine))
pcLines=kwds['pAx'].plot(xs,ys
,color=colorLine
,linewidth=kwds['pAttrLineSizeFactor']*math.fabs(tLine)#(vLine)
,ls=kwds['pAttributeLs']
,solid_capstyle='round'
,aa=True
,clip_on=kwds['pClip']
)
# PIPE-Color: PLOT
pDfColorMap=pDf[pDf.apply(kwds['pAttributeColorMapFmask'],axis=1)]
(rows ,cols)=pDf.shape
(rows1st,cols)=pDfColorMap.shape
colorsCBValues=[]
logger.debug("{:s}Color 1st-PIPEs: {:d} von {:d}".format(logStr,rows1st,rows))
for xs,ys,vLine,tLine in zip(pDfColorMap[kwds['pWAYPXCors']],pDfColorMap[kwds['pWAYPYCors']],pDfColorMap[kwds['pAttribute']],pDfColorMap[kwds['pAttrLineSize']]):
#if vLine >= usageStartLineValue and vLine <= usageEndLineValue:
# colorLine=kwds['pAttributeColorMap'](normLine(vLine))
# value=vLine
#elif vLine > usageEndLineValue:
# colorLine=usageEndLineColor
# value=usageEndLineValue
#else:
# colorLine=usageStartLineColor
# value=usageStartLineValue
colorLine=kwds['pAttributeColorMap'](normLine(vLine))
colorsCBValues.append(vLine)
pcLines=kwds['pAx'].plot(xs,ys
,color=colorLine
,linewidth=kwds['pAttrLineSizeFactor']*math.fabs(tLine)#(vLine)
,ls=kwds['pAttributeLs']
,solid_capstyle='round'
,aa=True
,clip_on=kwds['pClip']
)
# PIPE-Color: PLOT der PIPE-Anfänge um Farbskala konstruieren zu koennen
xScatter=[]
yScatter=[]
for xs,ys in zip(pDfColorMap[kwds['pWAYPXCors']],pDfColorMap[kwds['pWAYPYCors']]):
xScatter.append(xs[0])
yScatter.append(ys[0])
s=kwds['pAttrLineSizeFactor']*pDfColorMap[kwds['pAttrLineSize']].apply(lambda x: math.fabs(x))
s=s.apply(lambda x: math.pow(x,2)) # https://stackoverflow.com/questions/14827650/pyplot-scatter-plot-marker-size
#pcN=kwds['pAx'].scatter(pDfColorMap['pXCor_i'],pDfColorMap['pYCor_i']
pcN=kwds['pAx'].scatter(xScatter,yScatter
,s=s
,linewidth=0 # the linewidth of the marker edges
# Farbskala
,cmap=kwds['pAttributeColorMap']
# Normierung Farbe
,norm=normLine
# Werte
,c=colorsCBValues
,edgecolors='none'
,clip_on=kwds['pClip']
)
# CB: Axes
divider = make_axes_locatable(kwds['pAx'])
cax = divider.append_axes('right',size="{:f}%".format(kwds['CBFraction']),pad=kwds['CBHpad'])
x0, y0, w, h = kwds['pAx'].get_position().bounds
#logger.debug("{:s}ohne Änderung?!: x0: {:6.2f} y0: {:6.2f} w: {:6.2f} h: {:6.2f}".format(logStr,x0,y0,w,h))
kwds['pAx'].set_aspect(1.) #!
x0, y0, w, h = kwds['pAx'].get_position().bounds
#logger.debug("{:s}ohne Änderung?!: x0: {:6.2f} y0: {:6.2f} w: {:6.2f} h: {:6.2f}".format(logStr,x0,y0,w,h))
# CB
cB=plt.gcf().colorbar(pcN, cax=cax, orientation='vertical',extend=CBPropExtend,spacing='proportional')
# Label
cB.set_label(kwds['CBLabel'])
# CB Ticks
if kwds['CBBinTicks'] != None:
cB.set_ticks(np.linspace(minLine,maxLine,kwds['CBBinTicks']))
ticks=cB.get_ticks()
try:
ticks=np.unique(np.append(ticks,[usageStartLineValue,usageEndLineValue]))
except:
pass
cB.set_ticks(ticks)
# CB Ticklabels
labels=cB.ax.get_yticklabels()
if kwds['pAttributeColorMapUsageStart'] > 0:
idx=np.where(ticks == usageStartLineValue)
labels[idx[0][0]].set_text(labels[idx[0][0]].get_text()+" v=")
if kwds['pAttributeColorMapUsageEnd'] < 1:
idx=np.where(ticks == usageEndLineValue)
labels[idx[0][0]].set_text(labels[idx[0][0]].get_text()+" ^=")
if kwds['pAttributeColorMapMax'] != None and maxLine<maxAttr:
labels[-1].set_text(labels[-1].get_text()+" >=")
if kwds['pAttributeColorMapMin'] != None and minLine>minAttr:
labels[0].set_text(labels[0].get_text()+" <=")
cB.ax.set_yticklabels(labels)
# customized yTicks --------------------
if kwds['CBTicks'] != None:
cB.set_ticks(kwds['CBTicks'])
if kwds['CBTickLabels'] != None:
labels=cB.ax.get_yticklabels()
if len(labels)==len(kwds['CBTickLabels']):
for label,labelNew in zip(labels,kwds['CBTickLabels']):
label.set_text(labelNew)
cB.ax.set_yticklabels(labels)
else:
logStrFinal="{:s}Error: Anz. CB Ticklabels Ist: {:d} != Anz. Ticklabeles Soll: {:d} ?!".format(logStr,len(labels),len(kwds['CBTickLabels']))
logger.error(logStrFinal)
raise RmError(logStrFinal)
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
@classmethod
def pltHP(cls,pDf,**kwds):
"""
Plots a Hydraulic Profile.
Args:
DATA:
pDf: dataFrame
defining the HPLINES (xy-curves) Identification:
the different HPs in pDf are identified by the two cols
NAMECol: default: 'NAME'; set to None if NAMECol is not criteria for Identification ...
and
LayerCol: default: 'Layer'; set to None if LayerCol is not criteria for Identification ...
for each HP several lines (xy-curves) are plotted
... not criteria ...
if NAMECol is None only LayerCol is used
if LayerCol also is None, all rows are treated as "the" HPLINE
defining the HPLINES (xy-curves) Geometry:
* xCol: col in pDf for x; example: 'x'
the col is the same for all HPs and all y
* edgeNodesColSequence: cols to be used for start-node, end-node, next-node; default: ['NAME_i','NAME_k','nextNODE']
* 'NAME'_'Layer' (i.e. Nord-Süd_1) NAMECol_LayerCol is used as an Index in hpLineGeoms
* hpLineGeoms - Example - = {
'V-Abzweig_1':{'masterHP':'AGFW Symposium DH_1','masterNode':'V-3107','matchType':'starts'}
}
- masterHP: Bezugs-Schnitt
- masterNode: muss es in masterHP geben
- masterNode: muss es auch im Schnitt geben bei matchType='matches'; bei 'starts' wird der Anfang gemapped; bei 'ends' das Ende
defining the HPLINES (xy-curves) y-Achsentypen (y-Axes):
* hpLines: list of cols in pDf for y; example: ['P']
each col in hpLines defines a hpLine (a xy-curve) to be plotted
for each identified HP all defined hpLines are plotted
defining the HPLINES (xy-curves) Layout:
# 'NAME'_'Layer'_'hpLineType' (i.e. Nord-Süd_1_P) is used as an Index in hpLineProps
* hpLineProps - Example - = {
'Nord-Süd_1_P':{'label':'VL','color':'red' ,'linestyle':'-','linewidth':3}
,'Nord-Süd_2_P':{'label':'RL','color':'blue','linestyle':'-','linewidth':3}
}
if 'NAME'_'Layer'_'hpLine' not in hpLineProps:
default props are used
if hpLineProps['NAME'_'Layer'_'hpLine'] == None:
HPLINE is not plotted
y-Achsentypen (y-Axes):
* werden ermittelt aus hpLines
* der Spaltenname - z.B. 'P' - wird dabei als Bezeichner für den Achsentyp benutzt
* die Achsen werden erstellt in der Reihenfolge in der sie in hpLines auftreten
* Bezeichner wie 'P','P_1',... werden dabei als vom selben Achsentyp 'P' (selbe y-Achse also) gewertet
* P_1, P_2, ... können z.B. P zu verschiedenen Zeiten sein oder Aggregate über die Zeit wie Min/Max
* yAxesDetectionPattern: regExp mit welcher die Achsentypen ermittelt werden; default: '([\w ]+)(_)(\d+)$'
* yTwinedAxesPosDeltaHPStart: (i.d.R. negativer) Abstand der 1. y-Achse von der Zeichenfläche; default: -0.0125
* yTwinedAxesPosDeltaHP: (i.d.R. negativer) zus. Abstand jeder weiteren y-Achse von der Zeichenfläche; default: -0.05
AXES:
pAx: Axes to be plotted on; if not specified: gca() is used
Return:
yAxes: dct with AXES; key=y-Achsentypen
yLines: dct with Line2Ds; key=Index from hpLineProps
xNodeInfs: dct with NodeInformation; key=Index also used in i.e. hpLineGeoms
key: NAMECol_LayerCol
value: dct
key: node
value: dct
kwds['xCol']: x in HP
kwds['xCol']+'Plot': x in HP-Plot
pDfIdx: Index in pDf
>>> # -q -m 0 -s pltHP -y no -z no -w DHNetwork
>>> import pandas as pd
>>> import matplotlib
>>> import matplotlib.pyplot as plt
>>> import matplotlib.gridspec as gridspec
>>> import math
>>> try:
... import Rm
... except ImportError:
... from PT3S import Rm
>>> # ---
>>> xm=xms['DHNetwork']
>>> mx=mxs['DHNetwork']
>>> xm.MxAdd(mx=mx,aggReq=['TIME','TMIN','TMAX'],timeReq=3*[mx.df.index[0]],timeReq2nd=3*[mx.df.index[-1]],viewList=['vAGSN'],ForceNoH5Update=True)
>>> vAGSN=xm.dataFrames['vAGSN']
>>> for PH,P,RHO,Z in zip(['PH','PH_1','PH_2'],['P','P_1','P_2'],['RHO','RHO_1','RHO_2'],['Z','Z_1','Z_2']):
... vAGSN[PH]=vAGSN.apply(lambda row: row[P]*math.pow(10.,5.)/(row[RHO]*9.81),axis=1)
... vAGSN[PH]=vAGSN[PH]+vAGSN[Z].astype('float64')
>>> for bBzg,P,RHO,Z in zip(['bBzg','bBzg_1','bBzg_2'],['P','P_1','P_2'],['RHO','RHO_1','RHO_2'],['Z','Z_1','Z_2']):
... vAGSN[bBzg]=vAGSN.apply(lambda row: row[RHO]*9.81/math.pow(10.,5.),axis=1)
... vAGSN[bBzg]=vAGSN[P]+vAGSN[Z].astype('float64')*vAGSN[bBzg]
>>> plt.close()
>>> fig=plt.figure(figsize=Rm.DINA3q,dpi=Rm.dpiSize)
>>> gs = gridspec.GridSpec(3, 1)
>>> # --------------------------
>>> axNfd = fig.add_subplot(gs[0])
>>> yAxes,yLines,xNodeInfs=Rm.Rm.pltHP(vAGSN,pAx=axNfd
... ,hpLines=['bBzg','bBzg_1','bBzg_2','Q']
... ,hpLineProps={
... 'AGFW Symposium DH_1_bBzg':{'label':'VL','color':'red' ,'linestyle':'-','linewidth':3}
... ,'AGFW Symposium DH_2_bBzg':{'label':'RL','color':'blue','linestyle':'-','linewidth':3}
... ,'AGFW Symposium DH_2_bBzg_1':{'label':'RL min','color':'blue','linestyle':'-.','linewidth':1}
... ,'AGFW Symposium DH_1_bBzg_2':{'label':'VL max','color':'red' ,'linestyle':'-.','linewidth':1}
... ,'AGFW Symposium DH_1_bBzg_1':None
... ,'AGFW Symposium DH_2_bBzg_2':None
... ,'AGFW Symposium DH_1_Q':{'label':'VL Q','color':'magenta' ,'linestyle':'--','linewidth':2}
... ,'AGFW Symposium DH_2_Q':{'label':'RL Q','color':'lightblue','linestyle':'--','linewidth':2}
... }
... )
>>> yAxes.keys()
dict_keys(['bBzg', 'Q'])
>>> yLines.keys()
dict_keys(['AGFW Symposium DH_1_bBzg', 'AGFW Symposium DH_1_bBzg_2', 'AGFW Symposium DH_1_Q', 'AGFW Symposium DH_2_bBzg', 'AGFW Symposium DH_2_bBzg_1', 'AGFW Symposium DH_2_Q'])
>>> txt=axNfd.set_title('HP')
>>> gs.tight_layout(fig)
>>> plt.show()
>>> ###
>>> Rcuts=[
... {'NAME':'R-Abzweig','nl':['R-3107','R-3427']}
... ,{'NAME':'R-EndsTest','nl':['R-HWSU','R-HKW3S']}
... ,{'NAME':'R-MatchesTest','nl':['R-HKW1','R-2104']}
... ]
>>> Vcuts=[
... {'NAME':'V-Abzweig','nl':['V-3107','V-3427']}
... ,{'NAME':'V-EndsTest','nl':['V-HWSU','V-HKW3S']}
... ,{'NAME':'V-MatchesTest','nl':['V-HKW1','V-2104']}
... ]
>>> fV=lambda row: True if row.KVR_i=='1' and row.KVR_k=='1' else False
>>> fR=lambda row: True if row.KVR_i=='2' and row.KVR_k=='2' else False
>>> for vcut,rcut in zip(Vcuts,Rcuts):
... ret=xm.vAGSN_Add(nl=vcut['nl'],weight='L',Layer=1,AKTIV=None,NAME=vcut['NAME'],fmask=fV)
... ret=xm.vAGSN_Add(nl=rcut['nl'],weight='L',Layer=2,AKTIV=None,NAME=rcut['NAME'],fmask=fR)
>>> # Schnitte erneut mit Ergebnissen versorgen, da Schnitte neu definiert wurden
>>> xm.MxAdd(mx=mx,ForceNoH5Update=True)
>>> vAGSN=xm.dataFrames['vAGSN']
>>> for PH,P,RHO,Z in zip(['PH'],['P'],['RHO'],['Z']):
... vAGSN[PH]=vAGSN.apply(lambda row: row[P]*math.pow(10.,5.)/(row[RHO]*9.81),axis=1)
... vAGSN[PH]=vAGSN[PH]+vAGSN[Z].astype('float64')
>>> for bBzg,P,RHO,Z in zip(['bBzg'],['P'],['RHO'],['Z']):
... vAGSN[bBzg]=vAGSN.apply(lambda row: row[RHO]*9.81/math.pow(10.,5.),axis=1)
... vAGSN[bBzg]=vAGSN[P]+vAGSN[Z].astype('float64')*vAGSN[bBzg]
>>> plt.close()
>>> fig=plt.figure(figsize=Rm.DINA3q,dpi=Rm.dpiSize)
>>> gs = gridspec.GridSpec(3, 1)
>>> # --------------------------
>>> axNfd = fig.add_subplot(gs[0])
>>> yAxes,yLines,xNodeInfs=Rm.Rm.pltHP(vAGSN[vAGSN['NAME'].isin(['R-Abzweig','V-Abzweig','AGFW Symposium DH','R-EndsTest','V-EndsTest','R-MatchesTest','V-MatchesTest'])],pAx=axNfd
... ,hpLines=['bBzg','Q']
... ,hpLineGeoms={
... 'V-Abzweig_1':{'masterHP':'AGFW Symposium DH_1','masterNode':'V-3107','matchType':'starts'}
... ,'R-Abzweig_2':{'masterHP':'AGFW Symposium DH_2','masterNode':'R-3107','matchType':'starts'}
... ,'V-EndsTest_1':{'masterHP':'AGFW Symposium DH_1','masterNode':'V-HKW3S','matchType':'ends'}
... ,'R-EndsTest_2':{'masterHP':'AGFW Symposium DH_2','masterNode':'R-HKW3S','matchType':'ends'}
... ,'V-MatchesTest_1':{'masterHP':'AGFW Symposium DH_1','masterNode':'V-1312','matchType':'matches','offset':-500}
... ,'R-MatchesTest_2':{'masterHP':'AGFW Symposium DH_2','masterNode':'R-1312','matchType':'matches'}
... }
... ,hpLineProps={
... 'AGFW Symposium DH_1_bBzg':{'label':'VL','color':'red' ,'linestyle':'-','linewidth':3}
... ,'AGFW Symposium DH_2_bBzg':{'label':'RL','color':'blue','linestyle':'-','linewidth':3}
... ,'AGFW Symposium DH_1_Q':{'label':'VL Q','color':'magenta' ,'linestyle':'--','linewidth':2}
... ,'AGFW Symposium DH_2_Q':{'label':'RL Q','color':'lightblue','linestyle':'--','linewidth':2}
... ,'V-Abzweig_1_bBzg':{'label':'VL','color':'tomato' ,'linestyle':'-','linewidth':3}
... ,'R-Abzweig_2_bBzg':{'label':'RL','color':'plum' ,'linestyle':'-','linewidth':3}
... ,'V-Abzweig_1_Q':{'label':'VL Q','color':'magenta' ,'linestyle':'--','linewidth':2}
... ,'R-Abzweig_2_Q':{'label':'VL Q','color':'lightblue' ,'linestyle':'--','linewidth':2}
... ,'V-EndsTest_1_bBzg':{'label':'VL','color':'lightcoral' ,'linestyle':'-','linewidth':3}
... ,'R-EndsTest_2_bBzg':{'label':'RL','color':'aquamarine' ,'linestyle':'-','linewidth':3}
... ,'V-EndsTest_1_Q':{'label':'VL Q','color':'magenta' ,'linestyle':'--','linewidth':2}
... ,'R-EndsTest_2_Q':{'label':'VL Q','color':'lightblue' ,'linestyle':'--','linewidth':2}
... #,'V-MatchesTest_1_bBzg':{'label':'VL','color':'orange' ,'linestyle':'-','linewidth':1}
... ,'R-MatchesTest_2_bBzg':{'label':'RL','color':'slateblue' ,'linestyle':'-','linewidth':1}
... ,'V-MatchesTest_1_Q':{'label':'VL Q','color':'magenta' ,'linestyle':'--','linewidth':2}
... ,'R-MatchesTest_2_Q':{'label':'VL Q','color':'lightblue' ,'linestyle':'--','linewidth':2}
... }
... )
>>> txt=axNfd.set_title('HP')
>>> gs.tight_layout(fig)
>>> plt.show()
>>> sorted(xNodeInfs.keys())
['AGFW Symposium DH_1', 'AGFW Symposium DH_2', 'R-Abzweig_2', 'R-EndsTest_2', 'R-MatchesTest_2', 'V-Abzweig_1', 'V-EndsTest_1', 'V-MatchesTest_1']
>>> xNodeInf=xNodeInfs['R-Abzweig_2']
>>> nl=Rcuts[0]['nl']
>>> nodeInfS=xNodeInf[nl[0]]
>>> nodeInfE=xNodeInf[nl[-1]]
>>> sorted(nodeInfS.keys())
['pDfIdx', 'x', 'xPlot']
>>> dxPlot=nodeInfE['xPlot']-nodeInfS['xPlot']
>>> dxHP=nodeInfE['x']-nodeInfS['x']
>>> dxPlot==dxHP
True
>>> nodeInfE['x']=round(nodeInfE['x'],3)
>>> nodeInfE['xPlot']=round(nodeInfE['xPlot'],3)
>>> {key:value for key,value in nodeInfE.items() if key not in ['pDfIdx']}
{'x': 3285.0, 'xPlot': 20312.428}
>>>
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
keys = sorted(kwds.keys())
# AXES
if 'pAx' not in keys:
kwds['pAx']=plt.gca()
if 'NAMECol' not in keys:
kwds['NAMECol']='NAME'
if 'LayerCol' not in keys:
kwds['LayerCol']='Layer'
if 'xCol' not in keys:
kwds['xCol']='x'
if 'hpLines' not in keys:
kwds['hpLines']=['P']
if 'hpLineProps' not in keys:
kwds['hpLineProps']={'NAME_1_P':{'label':'HP NAME Layer 1 P','color':'red','linestyle':'-','linewidth':3}}
if 'hpLineGeoms' not in keys:
kwds['hpLineGeoms']=None
if 'edgeColSequence' not in keys:
kwds['edgeColSequence']=['NAME_i','NAME_k','nextNODE']
if 'yTwinedAxesPosDeltaHPStart' not in keys:
# (i.d.R. negativer) Abstand der 1. y-Achse von der Zeichenfläche; default: -0.0125
kwds['yTwinedAxesPosDeltaHPStart']=-0.0125
if 'yTwinedAxesPosDeltaHP' not in keys:
# (i.d.R. negativer) zus. Abstand jeder weiteren y-Achse von der Zeichenfläche; default: -0.05
kwds['yTwinedAxesPosDeltaHP']=-0.05
if 'yAxesDetectionPattern' not in keys:
# regExp mit welcher die Achsentypen ermittelt werden
kwds['yAxesDetectionPattern']='([\w ]+)(_)(\d+)$'
logger.debug("{:s}xCol: {:s}.".format(logStr,kwds['xCol']))
logger.debug("{:s}hpLines: {:s}.".format(logStr,str(kwds['hpLines'])))
logger.debug("{:s}hpLineProps: {:s}.".format(logStr,str(kwds['hpLineProps'])))
logger.debug("{:s}hpLineGeoms: {:s}.".format(logStr,str(kwds['hpLineGeoms'])))
logger.debug("{:s}edgeColSequence: {:s}.".format(logStr,str(kwds['edgeColSequence'])))
logger.debug("{:s}yTwinedAxesPosDeltaHPStart: {:s}.".format(logStr,str(kwds['yTwinedAxesPosDeltaHPStart'])))
logger.debug("{:s}yTwinedAxesPosDeltaHP: {:s}.".format(logStr,str(kwds['yTwinedAxesPosDeltaHP'])))
logger.debug("{:s}yAxesDetectionPattern: {:s}.".format(logStr,str(kwds['yAxesDetectionPattern'])))
# Schnitte und Layer ermitteln
if kwds['NAMECol'] != None and kwds['LayerCol'] != None:
hPs=pDf[[kwds['NAMECol'],kwds['LayerCol']]].drop_duplicates()
elif kwds['NAMECol'] != None:
hPs=pDf[[kwds['NAMECol']]].drop_duplicates()
hPs['Layer']=None
elif kwds['LayerCol'] != None:
hPs=pDf[[kwds['LayerCol']]].drop_duplicates()
hPs['NAME']=None
hPs=hPs[['NAME','Layer']]
else:
hPs=pd.DataFrame(data={'NAME':[None],'Layer':[None]})
#logger.debug("{:s}hPs: {:s}.".format(logStr,hPs.to_string()))
# hPs hat 2 Spalten: NAME und Layer
# y-Achsen-Typen ermitteln
hpLineTypesSequence=[col if re.search(kwds['yAxesDetectionPattern'],col)==None else re.search(kwds['yAxesDetectionPattern'],col).group(1) for col in kwds['hpLines']]
# y-Achsen konstruieren
yAxes={}
colType1st=hpLineTypesSequence[0]
axHP=kwds['pAx']
axHP.spines["left"].set_position(("axes",kwds['yTwinedAxesPosDeltaHPStart'] ))
axHP.set_ylabel(colType1st)
yAxes[colType1st]=axHP
logger.debug("{:s}colType: {:s} is attached to Axes pcAx .".format(logStr,colType1st))
for idx,colType in enumerate(hpLineTypesSequence[1:]):
if colType not in yAxes:
yPos=kwds['yTwinedAxesPosDeltaHPStart']+kwds['yTwinedAxesPosDeltaHP']*len(yAxes)
logger.debug("{:s}colType: {:s}: new Axes_ yPos: {:1.4f} ...".format(logStr,colType,yPos))
# weitere y-Achse
axHP = axHP.twinx()
axHP.spines["left"].set_position(("axes", yPos))
pltMakePatchSpinesInvisible(axHP)
axHP.spines['left'].set_visible(True)
axHP.yaxis.set_label_position('left')
axHP.yaxis.set_ticks_position('left')
axHP.set_ylabel(colType)
yAxes[colType]=axHP
yLines={}
xNodeInfs={}
for index,row in hPs.iterrows():
# über alle Schnitte (NAME) und Layer (Layer)
def getKeyBaseAndDf(dfSource,col1Name,col2Name,col1Value,col2Value):
#logger.debug("{:s}getKeyBaseAndDf: dfSource: {:s} ...".format(logStr,dfSource[[col1Name,col2Name,'nextNODE']].to_string()))
# dfSource bzgl. cols filtern
if col1Name != None and col2Name != None:
dfFiltered=dfSource[
(dfSource[col1Name].astype(str)==str(col1Value))
&
(dfSource[col2Name].astype(str)==str(col2Value))
]
keyBase=str(row[col1Name])+'_'+str(col2Value)+'_'#+hpLine
logger.debug("{:s}getKeyBaseAndDf: Schnitt: {!s:s} Layer: {!s:s} ...".format(logStr,col1Value,col2Value))
elif col1Name != None:
dfFiltered=dfSource[
(dfSource[col1Name].astype(str)==str(col1Value))
]
keyBase=str(col1Value)+'_'#+hpLine
logger.debug("{:s}getKeyBaseAndDf: Schnitt: {!s:s} ...".format(logStr,col1Value))
elif col2Name != None:
dfFiltered=dfSource[
(dfSource[col2Name].astype(str)==str(col2Value))
]
keyBase=str(col2Value)+'_'#+hpLine
logger.debug("{:s}getKeyBaseAndDf: Layer: {!s:s} ...".format(logStr,col2Value))
else:
dfFiltered=dfSource
keyBase=''
#logger.debug("{:s}getKeyBaseAndDf: dfFiltered: {:s} ...".format(logStr,dfFiltered[[col1Name,col2Name,'nextNODE']].to_string()))
return keyBase, dfFiltered
# Schnitt+Layer nach hPpDf filtern
keyBase,hPpDf=getKeyBaseAndDf(pDf
,kwds['NAMECol'] # Spaltenname 1
,kwds['LayerCol'] # Spaltenname 2
,row[kwds['NAMECol']] # Spaltenwert 1
,row[kwds['LayerCol']] # Spaltenwert 2
)
if hPpDf.empty:
logger.info("{:s}Schnitt: {!s:s} Layer: {!s:s}: NICHT in pDf ?! ...".format(logStr,row[kwds['NAMECol']],row[kwds['LayerCol']]))
continue
xOffset=0
xOffsetStatic=0
xFactorStatic=1
if kwds['hpLineGeoms'] != None:
if keyBase.rstrip('_') in kwds['hpLineGeoms'].keys():
hpLineGeom=kwds['hpLineGeoms'][keyBase.rstrip('_')]
logger.debug("{:s}Line: {:s}: hpLineGeom: {:s} ...".format(logStr,keyBase.rstrip('_'),str(hpLineGeom)))
if 'offset' in hpLineGeom.keys():
xOffsetStatic=hpLineGeom['offset']
if 'factor' in hpLineGeom.keys():
xFactorStatic=hpLineGeom['factor']
if 'masterHP' in hpLineGeom.keys():
masterHP=hpLineGeom['masterHP']
name=masterHP.split('_')[0]
layer=masterHP.replace(name,'')
layer=layer.replace('_','')
keyBaseMaster,hPpDfMaster=getKeyBaseAndDf(pDf
,kwds['NAMECol'] # Spaltenname 1
,kwds['LayerCol'] # Spaltenname 2
,name # Spaltenwert 1
,layer # Spaltenwert 2
)
if 'masterNode' in hpLineGeom.keys():
masterNode=hpLineGeom['masterNode']
def fGetMatchingRows(row,cols,matchNode):
for col in cols:
if row[col]==matchNode:
return True
return False
# Anker x suchen anhand der Spalten ...
if 'matchAnchorCols' in hpLineGeom.keys():
matchAnchorCols=hpLineGeom['matchAnchorCols']
else:
matchAnchorCols=[kwds['edgeColSequence'][2]]
# AnkerKnoten: Zeilen die in Frage kommen ....
hPpDfMatched=hPpDf[hPpDf.apply(fGetMatchingRows,axis=1,cols=matchAnchorCols,matchNode=masterNode)]
hPpDfMasterMatched=hPpDfMaster[hPpDfMaster.apply(fGetMatchingRows,axis=1,cols=matchAnchorCols,matchNode=masterNode)]
if 'matchType' in hpLineGeom.keys():
matchType=hpLineGeom['matchType']
else:
matchType='starts'
# Anker x suchen in Master -------------------------
if 'matchAnchor' in hpLineGeom.keys():
matchAnchor=hpLineGeom['matchAnchor']
else:
matchAnchor='max'
if hPpDfMasterMatched.empty:
logger.info("{:s}Schnitt: {!s:s}_{!s:s} Master: {!s:s}_{!s:s} masterNode: {!s:s}: in Master in den cols {!s:s} NICHT gefunden. Loesung: xMasterOffset=0.".format(logStr,row[kwds['NAMECol']],row[kwds['LayerCol']],name,layer,masterNode,matchAnchorCols))
xMasterOffset=0
else:
if matchAnchor=='min':
hPpDfMasterMatched=hPpDfMaster.loc[hPpDfMasterMatched[kwds['xCol']].idxmin(),:]
else: # matchAnchor=='max'
hPpDfMasterMatched=hPpDfMaster.loc[hPpDfMasterMatched.iloc[::-1][kwds['xCol']].idxmax(),:]
xMasterOffset=hPpDfMasterMatched[kwds['xCol']]
logger.debug("{:s}Schnitt: {!s:s}_{!s:s} Master: {!s:s}_{!s:s} masterNode: {!s:s} xMasterOffset={:9.3f} ...".format(logStr,row[kwds['NAMECol']],row[kwds['LayerCol']],name,layer,masterNode,xMasterOffset))
# Anker x suchen in HP selbst --------------------------
if 'matchAnchorChild' in hpLineGeom.keys():
matchAnchorChild=hpLineGeom['matchAnchorChild']
else:
matchAnchorChild='max'
if hPpDfMatched.empty:
logStrTmp="{:s}Schnitt: {!s:s}_{!s:s} Master: {!s:s}_{!s:s} masterNode: {!s:s}: in Child in den cols {!s:s} NICHT gefunden.".format(logStr,row[kwds['NAMECol']],row[kwds['LayerCol']],name,layer,masterNode,matchAnchorCols)
if matchType=='matches':
logger.info(logStrTmp+' Loesung: xChildOffset=0.')
else:
if matchType=='ends':
logger.debug(logStrTmp+' Child endet nicht mit masterNode. xChildOffset=0')
else:
logger.debug(logStrTmp+' Child startet evtl. mit masterNode. xChildOffset=0')
xChildOffset=0
else:
if matchAnchorChild=='min':
hPpDfMatched=hPpDf.loc[hPpDfMatched[kwds['xCol']].idxmin(),:]
else: # matchAnchorChild=='max'
hPpDfMatched=hPpDf.loc[hPpDfMatched.iloc[::-1][kwds['xCol']].idxmax(),:]
xChildOffset=hPpDfMatched[kwds['xCol']]
logger.debug("{:s}Schnitt: {!s:s}_{!s:s} Master: {!s:s}_{!s:s} masterNode: {!s:s} xChildOffset={:9.3f} ...".format(logStr,row[kwds['NAMECol']],row[kwds['LayerCol']],name,layer,masterNode,xChildOffset))
# xOffset errechnen
if matchType=='starts':
xOffset=xMasterOffset-hPpDf[kwds['xCol']].min() # der Beginn
# matchNode ist Anfang
if hPpDf[kwds['edgeColSequence'][2]].iloc[0] == hPpDf[kwds['edgeColSequence'][1]].iloc[0]:
# nextNode = k
matchNode=hPpDf[kwds['edgeColSequence'][0]].iloc[0]
else:
# nextNode = i
matchNode=hPpDf[kwds['edgeColSequence'][1]].iloc[0]
elif matchType=='ends':
xOffset=xMasterOffset-hPpDf[kwds['xCol']].max() # das Ende
# matchNode ist Ende
if hPpDf[kwds['edgeColSequence'][2]].iloc[-1] == hPpDf[kwds['edgeColSequence'][1]].iloc[-1]:
# nextNode = k
matchNode=hPpDf[kwds['edgeColSequence'][1]].iloc[-1]
else:
# nextNode = i
matchNode=hPpDf[kwds['edgeColSequence'][0]].iloc[-1]
else: # 'matches'
# per Knoten
matchNode=masterNode
xOffset=xMasterOffset-xChildOffset
# xOffset wurde berechnet
# masterNode und matchNode sind bekannt
logger.debug("{:s}hPpDfMatched: {:s} ...".format(logStr,hPpDfMatched[[kwds['NAMECol'],kwds['LayerCol'],'nextNODE',kwds['xCol'],'NAME_i','NAME_k','OBJTYPE','IptIdx']].to_string()))
logger.debug("{:s}hPpDfMasterMatched: {:s} ...".format(logStr,hPpDfMasterMatched[[kwds['NAMECol'],kwds['LayerCol'],'nextNODE',kwds['xCol'],'NAME_i','NAME_k','OBJTYPE','IptIdx']].to_string()))
else:
logger.debug("{:s}Line: {:s}: keine Geometrieeigenschaften definiert.".format(logStr,keyBase.rstrip('_')))
# xNodeInfs ermitteln
nodeList=hPpDf[kwds['edgeColSequence'][2]].copy()
if hPpDf[kwds['edgeColSequence'][2]].iloc[0] == hPpDf[kwds['edgeColSequence'][1]].iloc[0]:
# nextNode = k
# 1. Knoten i
nodeList.iloc[0]=hPpDf[kwds['edgeColSequence'][0]].iloc[0]
else:
# nextNode = i
# 1. Knoten k
nodeList.iloc[0]=hPpDf[kwds['edgeColSequence'][1]].iloc[0]
nodeList=nodeList.unique()
xNodeInf={}
for idx,node in enumerate(nodeList):
nodeInf={}
if idx==0:
nodeInf[kwds['xCol']]=0
nodeInf['pDfIdx']=hPpDf.index.values[0]
else:
nodeInf[kwds['xCol']]=hPpDf[hPpDf[kwds['edgeColSequence'][2]]==node][kwds['xCol']].max()
nodeInf['pDfIdx']=hPpDf[hPpDf[kwds['edgeColSequence'][2]]==node][kwds['xCol']].idxmax()
nodeInf[kwds['xCol']+'Plot']=nodeInf[kwds['xCol']]*xFactorStatic+xOffset+xOffsetStatic
xNodeInf[node]=nodeInf
xNodeInfs[keyBase.rstrip('_')]=xNodeInf
# über alle Spalten (d.h. darzustellenden y-Werten)
for idx,hpLine in enumerate(kwds['hpLines']):
key=keyBase+hpLine
logger.debug("{:s}Line: {:s} ...".format(logStr,key))
if key in kwds['hpLineProps']:
hpLineProp=kwds['hpLineProps'][key]
if hpLineProp == None:
logger.debug("{:s}Line: {:s} ...: kein Plot.".format(logStr,key))
continue # kein Plot
label=key
color='black'
linestyle='-'
linewidth=3
hpLineType=hpLineTypesSequence[idx]
axHP=yAxes[hpLineType]
lines=axHP.plot(hPpDf[kwds['xCol']]*xFactorStatic+xOffset+xOffsetStatic,hPpDf[hpLine],label=label,color=color,linestyle=linestyle,linewidth=linewidth)
yLines[label]=lines[0]
if key in kwds['hpLineProps']:
hpLineProp=kwds['hpLineProps'][key]
logger.debug("{:s}Line: {:s}: hpLineProp: {:s}.".format(logStr,key,str(hpLineProp)))
for prop,value in hpLineProp.items():
plt.setp(yLines[label],"{:s}".format(prop),value)
else:
logger.debug("{:s}Line: {:s}: keine Eigenschaften definiert.".format(logStr,key))
continue
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return yAxes,yLines,xNodeInfs
@classmethod
def pltTC(cls,pDf,tcLines,**kwds):
"""
Plots a Time Curve Diagram.
Args:
DATA:
pDf: dataFrame
index: times
cols: values (with mx.df colnames)
tcLines: dct
defining the Curves and their Layout:
Key:
OBJTYPE~NAME1~NAME2~ATTRTYPE is used as a key, d.h. OBJTYPE_PK ist nicht im Schluessel enthalten
* tcLines - Example - = {
'KNOT~NAME1~~PH':{'label':'VL','color':'red' ,'linestyle':'-','linewidth':3}
}
Definition der y-Achsentypen (y-Axes):
* werden ermittelt aus den verschiedenen ATTRTYPEs in tcLines
* ATTRTYPE - z.B. 'PH' - wird dabei als Bezeichner für den Achsentyp benutzt
* die Achsen werden erstellt in der Reihenfolge in der sie in tcLines auftreten
* yTwinedAxesPosDeltaHPStart: (i.d.R. negativer) Abstand der 1. y-Achse von der Zeichenfläche; default: -0.0125
* yTwinedAxesPosDeltaHP: (i.d.R. negativer) zus. Abstand jeder weiteren y-Achse von der Zeichenfläche; default: -0.05
Attribute:
* alle gültigen
* +
* forceYType
* offset
* factor
* timeStart
* timeEnd
* legendInfosFmt
* label
AXES:
pAx: Axes to be plotted on; if not specified: gca() is used
x-Achsen-Formatierung:
majLocator - Beispiele:
mdates.MinuteLocator(interval=5)
mdates.MinuteLocator(byminute=[0,5,10,15,20,25,30,35,40,45,50,55])
majFormatter - Beispiele:
mdates.DateFormatter('%d.%m.%y: %H:%M')
xTicksLabelsOff: wenn True, dann keine x-Achsen TickLabels
Return:
yAxes: dct with AXES; key=y-Achsentypen
yLines: dct with Line2Ds; key=Index from tcLines
vLines: dct with Line2Ds; key=Index from vLines
yLinesLegendLabels: dct with Legendlabels; key=Index from tcLines
>>> # -q -m 0 -s pltTC -y no -z no -w DHNetwork
>>> import pandas as pd
>>> import matplotlib
>>> import matplotlib.pyplot as plt
>>> import matplotlib.gridspec as gridspec
>>> import matplotlib.dates as mdates
>>> import math
>>> try:
... import Rm
... except ImportError:
... from PT3S import Rm
>>> # ---
>>> # xm=xms['DHNetwork']
>>> mx=mxs['DHNetwork']
>>> sir3sID=mx.getSir3sIDFromSir3sIDoPK('ALLG~~~LINEPACKGEOM') # 'ALLG~~~5151766074450398225~LINEPACKGEOM'
>>> # mx.df[sir3sID].describe()
>>> # mx.df[sir3sID].iloc[0]
>>> plt.close()
>>> fig=plt.figure(figsize=Rm.DINA3q,dpi=Rm.dpiSize)
>>> gs = gridspec.GridSpec(3, 1)
>>> # --------------------------
>>> axTC = fig.add_subplot(gs[0])
>>> yAxes,yLines,vLines,yLinesLegendLabels=Rm.Rm.pltTC(mx.df
... ,tcLines={
... 'ALLG~~~LINEPACKRATE':{'label':'Linepackrate','color':'red' ,'linestyle':'-','linewidth':3,'drawstyle':'steps','factor':10}
... ,'ALLG~~~LINEPACKGEOM':{'label':'Linepackgeometrie','color':'b' ,'linestyle':'-','linewidth':3,'offset':-mx.df[sir3sID].iloc[0]
... ,'timeStart':mx.df.index[0]+pd.Timedelta('10 Minutes')
... ,'timeEnd':mx.df.index[-1]-pd.Timedelta('10 Minutes')}
... ,'RSLW~wNA~~XA':{'label':'RSLW~wNA~~XA','color':'lime','forceYType':'N'}
... ,'PUMP~R-A-SS~R-A-DS~N':{'label':'PUMP~R-A-SS~R-A-DS~N','color':'aquamarine','linestyle':'--','legendInfosFmt':'{:4.0f}'}
... }
... ,pAx=axTC
... ,vLines={
... 'a vLine Label':{'time': mx.df.index[0] + pd.Timedelta('10 Minutes')
... ,'color':'dimgrey'
... ,'linestyle':'--'
... ,'linewidth':5.}
... }
... ,majLocator=mdates.MinuteLocator(byminute=[0,5,10,15,20,25,30,35,40,45,50,55])
... ,majFormatter=mdates.DateFormatter('%d.%m.%y: %H:%M')
... #,xTicksLabelsOff=True
... )
>>> sorted(yAxes.keys())
['LINEPACKGEOM', 'LINEPACKRATE', 'N']
>>> sorted(yLines.keys())
['ALLG~~~LINEPACKGEOM', 'ALLG~~~LINEPACKRATE', 'PUMP~R-A-SS~R-A-DS~N', 'RSLW~wNA~~XA']
>>> sorted(vLines.keys())
['a vLine Label']
>>> gs.tight_layout(fig)
>>> plt.show()
>>>
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
keys = sorted(kwds.keys())
# AXES
if 'pAx' not in keys:
kwds['pAx']=plt.gca()
if 'yTwinedAxesPosDeltaHPStart' not in keys:
# (i.d.R. negativer) Abstand der 1. y-Achse von der Zeichenfläche; default: -0.0125
kwds['yTwinedAxesPosDeltaHPStart']=-0.0125
if 'yTwinedAxesPosDeltaHP' not in keys:
# (i.d.R. negativer) zus. Abstand jeder weiteren y-Achse von der Zeichenfläche; default: -0.05
kwds['yTwinedAxesPosDeltaHP']=-0.05
logger.debug("{:s}tcLines: {:s}.".format(logStr,str(tcLines)))
logger.debug("{:s}yTwinedAxesPosDeltaHPStart: {:s}.".format(logStr,str(kwds['yTwinedAxesPosDeltaHPStart'])))
logger.debug("{:s}yTwinedAxesPosDeltaHP: {:s}.".format(logStr,str(kwds['yTwinedAxesPosDeltaHP'])))
if 'lLoc' not in keys:
kwds['lLoc']='best'
if 'lFramealpha' not in keys:
kwds['lFramealpha']=matplotlib.rcParams["legend.framealpha"]
if 'lFacecolor' not in keys:
kwds['lFacecolor']='white'
if 'lOff' not in keys:
kwds['lOff']=False
yAxes=yLines=vLines=None
# fuer jede Spalte Schluessel ohne OBJTYPE_PK ermitteln == Schluessel in tcLines
colFromTcKey={}
for col in pDf.columns.tolist():
if pd.isna(col):
continue
try:
colNew=Mx.getSir3sIDoPKFromSir3sID(col)
colFromTcKey[colNew]=col # merken welche Originalspalte zu dem tcLines Schluessel gehoert
logger.debug("{:s}Zu Spalte ohne Schlüssel: {:s} gehört Spalte: {:s} in pDf.".format(logStr,colNew,col))
except:
logger.debug("{:s}keine Zuordnung gefunden (z.B. kein Mx.getSir3sIDoPKFromSir3sID-match) fuer pDf-Spalte: {:s}. Spaltenname(n) keine vollständigen SIR 3S Schluessel (mehr)?!".format(logStr,col))
# y-Achsen-Typen ermitteln
yTypesSequence=[]
for key,props in tcLines.items():
try:
mo=re.match(Mx.reSir3sIDoPKcompiled,key)
yType=mo.group('ATTRTYPE')
if 'forceYType' in props.keys():
yType=props['forceYType']
if yType not in yTypesSequence:
yTypesSequence.append(yType)
logger.debug("{:s}neuer y-Achsentyp: {:s}.".format(logStr,yType))
except:
logger.debug("{:s}kein Achsentyp ermittelt (z.B. kein Mx.reSir3sIDoPKcompiled-match) fuer: {:s}. tcLine(s) Schluessel kein SIR 3S Schluessel oPK?!".format(logStr,key))
# y-Achsen konstruieren
yAxes={}
colType1st=yTypesSequence[0]
axTC=kwds['pAx']
axTC.spines["left"].set_position(("axes",kwds['yTwinedAxesPosDeltaHPStart'] ))
axTC.set_ylabel(colType1st)
yAxes[colType1st]=axTC
logger.debug("{:s}colType: {:s}: is attached to 1st Axes.".format(logStr,colType1st))
for idx,colType in enumerate(yTypesSequence[1:]):
# weitere y-Achse
yPos=kwds['yTwinedAxesPosDeltaHPStart']+kwds['yTwinedAxesPosDeltaHP']*len(yAxes)
logger.debug("{:s}colType: {:s}: is attached to a new Axes: yPos: {:1.4f} ...".format(logStr,colType,yPos))
axTC = axTC.twinx()
axTC.spines["left"].set_position(("axes", yPos))
pltMakePatchSpinesInvisible(axTC)
axTC.spines['left'].set_visible(True)
axTC.yaxis.set_label_position('left')
axTC.yaxis.set_ticks_position('left')
axTC.set_ylabel(colType)
yAxes[colType]=axTC
# ueber alle definierten Kurven
# max. Länge label vor Infos ermitteln
labels=[]
infos=[]
for key,props in tcLines.items():
label=key
if 'label' in props:
label=props['label']
labels.append(label)
if 'legendInfosFmt' in props:
legendInfosFmt=props['legendInfosFmt']
else:
legendInfosFmt='{:6.2f}'
if key not in colFromTcKey.keys():
logger.debug("{:s}Line: {:s}: es konnte keine Spalte in pDf ermittelt werden. Spaltenname(n) kein SIR 3S Schluessel?! Kein Plot.".format(logStr,key))
continue
else:
col=colFromTcKey[key]
logger.debug("{:s}Line: {:s}: Spalte in pDf: {:s}.".format(logStr,key,col))
if 'timeStart' in props:
timeStart=props['timeStart']
else:
timeStart=pDf.index[0]
if 'timeEnd' in props:
timeEnd=props['timeEnd']
else:
timeEnd=pDf.index[-1]
plotDf=pDf.loc[timeStart:timeEnd,:]
infos.append(legendInfosFmt.format(plotDf[col].min()))
infos.append(legendInfosFmt.format(plotDf[col].max()))
labelsLength=[len(label) for label in labels]
labelsLengthMax=max(labelsLength)
infosLength=[len(info) for info in infos]
infosLengthMax=max(infosLength)
# zeichnen
yLines={}
yLinesLegendLabels={}
# ueber alle definierten Kurven
for key,props in tcLines.items():
if key not in colFromTcKey.keys():
logger.debug("{:s}Line: {:s}: es konnte keine Spalte in pDf ermittelt werden. Spaltenname(n) kein SIR 3S Schluessel?! Kein Plot.".format(logStr,key))
continue
else:
col=colFromTcKey[key]
mo=re.match(Mx.reSir3sIDoPKcompiled,key)
yType=mo.group('ATTRTYPE')
if 'forceYType' in props.keys():
yType=props['forceYType']
axTC=yAxes[yType]
logger.debug("{:s}Line: {:s} on Axes {:s} ...".format(logStr,key,yType))
label=key
color='black'
linestyle='-'
linewidth=3
if 'offset' in props:
offset=props['offset']
else:
offset=0.
if 'factor' in props:
factor=props['factor']
else:
factor=1.
if 'timeStart' in props:
timeStart=props['timeStart']
else:
timeStart=pDf.index[0]
if 'timeEnd' in props:
timeEnd=props['timeEnd']
else:
timeEnd=pDf.index[-1]
if 'legendInfosFmt' in props:
legendInfosFmt=props['legendInfosFmt']
else:
legendInfosFmt='{:6.2f}'
plotDf=pDf.loc[timeStart:timeEnd,:]
lines=axTC.plot(plotDf.index.values,plotDf[col]*factor+offset,label=label,color=color,linestyle=linestyle,linewidth=linewidth)
yLines[key]=lines[0]
if 'label' in props:
label=props['label']
else:
label=label
legendLabelFormat="Anf.: {:s} Ende: {:s} Min: {:s} Max: {:s}"#.format(*4*[legendInfosFmt])
legendLabelFormat="{:s} "+legendLabelFormat
legendInfos=[plotDf[col].iloc[0],plotDf[col].iloc[-1],plotDf[col].min(),plotDf[col].max()]
legendInfos=[factor*legendInfo+offset for legendInfo in legendInfos]
legendLabel=legendLabelFormat.format(label.ljust(labelsLengthMax,' '),
*["{:s}".format(legendInfosFmt).format(legendInfo).rjust(infosLengthMax,' ') for legendInfo in legendInfos]
)
yLinesLegendLabels[key]=legendLabel
logger.debug("{:s}legendLabel: {:s}.".format(logStr,legendLabel))
for prop,value in props.items():
if prop not in ['forceYType','offset','factor','timeStart','timeEnd','legendInfosFmt']:
plt.setp(yLines[key],"{:s}".format(prop),value)
# x-Achse
# ueber alle Axes
for key,ax in yAxes.items():
ax.set_xlim(pDf.index[0],pDf.index[-1])
if 'majLocator' in kwds.keys():
ax.xaxis.set_major_locator(kwds['majLocator'])
if 'majFormatter' in kwds.keys():
ax.xaxis.set_major_formatter(kwds['majFormatter'])
plt.setp(ax.xaxis.get_majorticklabels(),rotation='vertical',ha='center')
ax.xaxis.grid()
# Beschriftung ausschalten
if 'xTicksLabelsOff' in kwds.keys(): # xTicksOff
if kwds['xTicksLabelsOff']:
logger.debug("{:s}Achse: {:s}: x-Achse Labels aus.".format(logStr,key))
#for tic in ax.xaxis.get_major_ticks():
# tic.tick1On = tic.tick2On = False
ax.set_xticklabels([])
# vLines
# ueber alle definierten vLines
vLines={}
if 'vLines' in kwds.keys():
for key,props in kwds['vLines'].items():
if 'time' in props.keys():
logger.debug("{:s}vLine: {:s} ....".format(logStr,key))
vLine=ax.axvline(x=props['time'], ymin=0, ymax=1, label=key)
vLines[key]=vLine
for prop,value in props.items():
if prop not in ['time']:
plt.setp(vLine,"{:s}".format(prop),value)
else:
logger.debug("{:s}vLine: {:s}: time nicht definiert.".format(logStr,key))
# Legend
import matplotlib.font_manager as font_manager
font = font_manager.FontProperties(family='monospace'
#weight='bold',
#style='normal',
#size=16
)
if not kwds['lOff']:
l=kwds['pAx'].legend(
tuple([yLines[yline] for yline in yLines])
,
tuple([yLinesLegendLabels[yLine] for yLine in yLinesLegendLabels])
,loc=kwds['lLoc']
,framealpha=kwds['lFramealpha']
,facecolor=kwds['lFacecolor']
,prop=font
)
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return yAxes,yLines,vLines,yLinesLegendLabels
def __init__(self,xm=None,mx=None):
"""
Args:
xm: Xm.Xm Object
mx: Mx.Mx Object
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
self.xm=xm
self.mx=mx
try:
vNRCV_Mx1=self.xm.dataFrames['vNRCV_Mx1'] # d.h. Sachdaten bereits annotiert mit MX1-Wissen
except:
logger.debug("{:s}{:s} not in {:s}. Sachdaten mit MX1-Wissen zu annotieren wird nachgeholt ...".format(logStr,'vNRCV_Mx1','dataFrames'))
self.xm.MxSync(mx=self.mx)
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
def pltNetDHUS(self,**kwds):
"""Plot: Net: DistrictHeatingUnderSupply.
Args (optional):
TIMEs (als TIMEDELTA zu Szenariumbeginn):
* timeDeltaToRef: Reference Scenariotime (for MeasureInRefPerc-Calculations) (default: pd.to_timedelta('0 seconds'))
* timeDeltaToT: Scenariotime (default: pd.to_timedelta('0 seconds'))
FWVB
* pFWVBFilterFunction: Filterfunction to be applied to FWVB to determine the FWVB to be plotted
* default: lambda df: (df.CONT_ID.astype(int).isin([1001])) & (df.W0LFK>0)
* CONT_IDisIn: [1001]
* um zu vermeiden, dass FWVB aus Bloecken gezeichnet werden (unwahrscheinlich, dass es solche gibt)
* W0LFK>0:
* um zu vermeiden, dass versucht wird, FWVB mit der Soll-Leistung 0 zu zeichnen (pFWVBAttribute default is 'W0LFK')
FWVB Attribute (Size, z-Order) - from vFWVB
* pFWVBAttribute: columnName (default: 'W0LFK')
* the column must be able to be converted to a float
* the conversion is done before FilterFunction
* see ApplyFunction and NaNValue for conversion details:
* pFWVBAttributeApplyFunction: Function to be applied to column pFWVBAttribute
* default: lambda x: pd.to_numeric(x,errors='coerce')
* pFWVBAttributeApplyFunctionNaNValue: Value for NaN-Values produced by pFWVBAttributeApplyFunction if any
* default: 0
* .fillna(pFWVBAttributeApplyFunktionNaNValue).astype(float) is called after ApplyFunction
* pFWVBAttributeAsc: z-Order (default: False d.h. "kleine auf große")
* pFWVBAttributeRefSize: scatter Sy-Area in pts^2 of for RefSizeValue (default: 10**2)
* corresponding RefSizeValue is Attribute.std() or Attribute.mean() if Attribute.std() is < 1
FWVB (plot only large (small, medium) FWVB ...)
* quantil_pFWVBAttributeHigh <= (default: 1.)
* quantil_pFWVBAttributeLow >= (default: .0)
* default: all FWVB are plotted
* note that Attribute >0 is a precondition
FWVB Measure (Color) - from mx
* pFWVBMeasure (default: 'FWVB~*~*~*~W')
* float() must be possible
* pFWVBMeasureInRefPerc (default: True d.h. Measure wird verarbeitet in Prozent T zu Ref)
* 0-1
* if refValue is 0 than refPerc-Result is set to 1
* pFWVBMeasureAlpha/Colormap/Clip
* 3Classes
* pFWVBMeasure3Classes (default: False)
* False:
* Measure wird nicht in 3 Klassen dargestellt
* die Belegung von MCategory gemaess FixedLimitsHigh/Low erfolgt dennoch
* CatTexts (werden verwendet wenn 3Classes Wahr gesetzt ist)
* für CBLegend (3Classes) als _zusätzliche Beschriftung rechts
* als Texte für die Spalte MCategory in return pFWVB
* pMCatTopText
* pMCatMidText
* pMCatBotText
* CatAttribs (werden verwendet wenn 3Classes Wahr gesetzt ist)
* für die Knotendarstellung
* pMCatTopAlpha/Color/Clip
* pMCatMidAlpha/Colormap/Clip
* pMCatBotAlpha/Color/Clip
* CBFixedLimits
* pFWVBMeasureCBFixedLimits (default: False d.h. Farbskala nach vorh. min./max. Wert)
* wird Wahr gesetzt sein, wenn 3Classes Wahr gesetzt ist
* damit die mittlere Farbskala den Klassengrenzen "gehorcht"
* pFWVBMeasureCBFixedLimitLow (default: .10)
* pFWVBMeasureCBFixedLimitHigh (default: .95)
CB
* CBFraction: fraction of original axes to use for colorbar (default: 0.05)
* CBHpad: fraction of original axes between colorbar and new image axes (default: 0.0275)
* CBLabelPad (default: -50)
* CBTicklabelsHPad (default: 0.)
* CBAspect: ratio of long to short dimension (default: 10.)
* CBShrink: fraction by which to shrink the colorbar (default: .3)
* CBAnchorHorizontal: horizontaler Fußpunkt der colorbar in Plot-% (default: 0.)
* CBAnchorVertical: vertikaler Fußpunkt der colorbar in Plot-% (default: 0.2)
CBLegend (3Classes) - Parameterization of the representative Symbols
* CBLe3cTopVPad (default: 1+1*1/4)
* CBLe3cMidVPad (default: .5)
* CBLe3cBotVPad (default: 0-1*1/4)
* "1" is the height of the Colorbar
* the VPads (the vertical Sy-Positions) are defined in cax.transAxes Coordinates
* cax is the Colorbar Axes
* CBLe3cSySize=10**2 (Sy-Area in pts^2)
* CBLe3cSyType='o'
ROHR
* pROHRFilterFunction: Filterfunction to be applied to PIPEs to determine the PIPEs to be plotted
* default: lambda df: (df.KVR.astype(int).isin([2])) & (df.CONT_ID.astype(int).isin([1001])) & (df.DI.astype(float)>0)
* KVRisIn: [2]
* 1: supply-line
* 2: return-line
* CONT_IDisIn: [1001]
* um zu vermeiden, dass Rohre aus Bloecken gezeichnet werden (deren Koordinaten nicht zu den Koordinaten von Rohren aus dem Ansichtsblock passen)
* DI>0:
* um zu vermeiden, dass versucht wird, Rohre mit dem Innendurchmesser 0 zu zeichnen (pROHRAttribute default is 'DI')
ROHR (PIPE-Line: Size and Color, z-Order) - from vROHR
* pROHRAttribute: columnName (default: 'DI')
* the column must be able to be converted to a float
* the conversion is done before FilterFunction
* see ApplyFunction and NaNValue for conversion details:
* pROHRAttributeApplyFunction: Function to be applied to column pROHRAttribute
* default: lambda x: pd.to_numeric(x,errors='coerce')
* pROHRAttributeApplyFunctionNaNValue: Value for NaN-Values produced by pROHRAttributeApplyFunction if any
* default: 0
* .fillna(pROHRAttributeApplyFunktionNaNValue).astype(float) is called after ApplyFunction
* pROHRAttributeAsc: z-Order (default: False d.h. "kleine auf grosse")
* pROHRAttributeLs (default: '-')
* pROHRAttributeRefSize: plot linewidth in pts for RefSizeValue (default: 1.0)
* pROHRAttributeSizeMin (default: None): if set: use pROHRAttributeSizeMin-Value as Attribute for LineSize if Attribute < pROHRAttributeSizeMin
* corresponding RefSizeValue is Attribute.std() or Attribute.mean() if Attribute.std() is < 1
* pROHRAttributeColorMap (default: plt.cm.binary)
* pROHRAttributeColorMapUsageStart (default: 1./3; Wertebereich: [0,1])
* Farbskala nach vorh. min./max. Wert
* die Farbskala wird nur ab UsageStart genutzt
* d.h. Werte die eine "kleinere" Farbe haetten, bekommen die Farbe von UsageStart
ROHR (plot only large (small, medium) pipes ...)
* quantil_pROHRAttributeHigh <= (default: 1.)
* quantil_pROHRAttributeLow >= (default: .75)
* default: only the largest 25% are plotted
* note that Attribute >0 is a precondition
ROHR (PIPE-Marker: Size and Color) - from mx
* pROHRMeasure columnName (default: 'ROHR~*~*~*~QMAV')
* pROHRMeasureApplyFunction: Function to be applied to column pROHRMeasure (default: lambda x: math.fabs(x))
* pROHRMeasureMarker (default: '.')
* pROHRMeasureRefSize: plot markersize for RefSizeValue in pts (default: 1.0)
* pROHRMeasureSizeMin (default: None): if set: use pROHRMeasureSizeMin-Value as Measure for MarkerSize if Measure < pROHRMeasureSizeMin
* corresponding RefSizeValue is Measure.std() or Measure.mean() if Measure.std() is < 1
* if pROHRMeasureRefSize is None: plot markersize will be plot linewidth
* pROHRMeasureColorMap (default: plt.cm.cool)
* pROHRMeasureColorMapUsageStart (default: 0.; Wertebereich: [0,1])
* Farbskala nach vorh. min./max. Wert
* die Farbskala wird nur ab UsageStart genutzt
* d.h. Werte die eine "kleinere" Farbe hätten, bekommen die Farbe von UsageStart
NRCVs - NumeRiCal Values to be displayed
* pFIGNrcv: List of Sir3sID RegExps to be displayed (i.e. ['KNOT~PKON-Knoten~\S*~\S+~QM']) default: None
the 1st Match is used if a RegExp matches more than 1 Channel
further Examples for RegExps (and corresponding Texts):
* WBLZ~WärmeblnzGes~\S*~\S+~WES (Generation)
* WBLZ~WärmeblnzGes~\S*~\S+~WVB (Load)
* WBLZ~WärmeblnzGes~\S*~\S+~WVERL (Loss)
WBLZ~[\S ]+~\S*~\S+~\S+: Example for a RegExp matching all Channels with OBJTYPE WBLZ
* pFIGNrcvTxt: corresponding (same length required!) List of Texts (i.e. ['Kontrolle DH']) default: None
* pFIGNrcvFmt (i.e. '{:12s}: {:8.2f} {:6s}')
* Text (from pFIGNrcvTxt)
* Value
* UNIT (determined from Channel-Data)
* pFIGNrcvPercFmt (i.e. ' {:6.1f}%')
* ValueInRefPercent
* if refValue==0: 100%
* pFIGNrcvXStart (.5 default)
* pFIGNrcvYStart (.5 default)
Category - User Heat Balances to be displayed
* pFWVBGCategory: List of Heat Balances to be displayed (i.e. ['BLNZ1u5u7']) default: None
* pFWVBGCategoryUnit: Unit of all these Balances (default: '[kW]'])
* pFWVBGCategoryXStart (.1 default)
* pFWVBGCategoryYStart (.9 default)
* pFWVBGCategoryCatFmt (i.e. '{:12s}: {:6.1f} {:4s}')
* Category NAME
* Category Load
* pFWVBGCategoryUnit
* pFWVBGCategoryPercFmt (i.e. ' {:6.1f}%')
* Last Ist/Soll
* pFWVBGCategory3cFmt (i.e. ' {:5d}/{:5d}/{:5d}')
* NOfTops
* NOfMids
* NOfBots
VICs - VeryImportantCustomers whose Values to be displayed
* pVICsDf: DataFrame with VeryImportantCustomers (Text & Specification)
columns expected:
* Kundenname (i.e. 'VIC1') - Text
* Knotenname (i.e. 'V-K007') - Specification by Supply-Node
i.e.: pd.DataFrame({'Kundenname': ['VIC1'],'Knotenname': ['V-K007']})
* pVICsPercFmt (i.e. '{:12s}: {:6.1f}%')
* Kundenname
* Load in Percent to Reference
* pVICsFmt (i.e. '{:12s}: {:6.1f} {:6s}')
* Kundenname
* Load
* pFWVBGCategoryUnit
* pVICsXStart (.5 default)
* pVICsYStart (.1 default)
Figure:
* pltTitle: title [not suptitle] (default: 'pltNetFigAx')
* figFrameon: figure frame (background): displayed or invisible (default: True)
* figEdgecolor: edge color of the Figure rectangle (default: 'black')
* figFacecolor: face color of the Figure rectangle (default: 'white')
Returns:
pFWVB
* columns changed (compared to vFWVB):
* pFWVBAttribute (wg. z.B. pFWVBAttributeApplyFunction und .astype(float))
* columns added (compared to vFWVB):
* Measure (in % zu Ref wenn pFWVBMeasureInRefPer=True)
* MeasureRef (Wert von Measure im Referenzzustand)
* MeasureOrig (Wert von Measure)
* MCategory: str (Kategorisierung von Measure mit FixedLimitHigh/Low-Werten):
* TopText or
* MidText or
* BotText
* GCategory: list (non-empty only if req. GCategories are a subset of the available Categories and object belongs to a req. Category)
* VIC (filled with Kundenname from pVICsDf)
* rows (compared to vFWVB):
* pFWVB enthaelt dieselben Objekte wie vFWVB
* aber: die geplotteten Objekte sind ggf. nur eine Teilmenge (wg. z.B. pFWVBFilterFunction)
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
keysDefined=['CBAnchorHorizontal', 'CBAnchorVertical', 'CBAspect', 'CBFraction', 'CBHpad', 'CBLabelPad'
,'CBLe3cBotVPad', 'CBLe3cMidVPad', 'CBLe3cSySize', 'CBLe3cSyType', 'CBLe3cTopVPad'
,'CBShrink', 'CBTicklabelsHPad'
,'figEdgecolor', 'figFacecolor', 'figFrameon'
,'pFIGNrcv','pFIGNrcvFmt', 'pFIGNrcvPercFmt','pFIGNrcvTxt', 'pFIGNrcvXStart', 'pFIGNrcvYStart'
,'pFWVBFilterFunction'
,'pFWVBAttribute'
,'pFWVBAttributeApplyFunction','pFWVBAttributeApplyFunctionNaNValue'
,'pFWVBAttributeAsc'
,'pFWVBAttributeRefSize'
,'pFWVBGCategory', 'pFWVBGCategoryUnit','pFWVBGCategory3cFmt','pFWVBGCategoryCatFmt', 'pFWVBGCategoryPercFmt', 'pFWVBGCategoryXStart', 'pFWVBGCategoryYStart'
,'pFWVBMeasure', 'pFWVBMeasure3Classes', 'pFWVBMeasureAlpha', 'pFWVBMeasureCBFixedLimitHigh', 'pFWVBMeasureCBFixedLimitLow', 'pFWVBMeasureCBFixedLimits', 'pFWVBMeasureClip', 'pFWVBMeasureColorMap', 'pFWVBMeasureInRefPerc'
,'pMCatBotAlpha', 'pMCatBotClip', 'pMCatBotColor', 'pMCatBotText', 'pMCatMidAlpha', 'pMCatMidClip', 'pMCatMidColorMap', 'pMCatMidText', 'pMCatTopAlpha', 'pMCatTopClip', 'pMCatTopColor', 'pMCatTopText'
,'pROHRFilterFunction'
,'pROHRAttribute'
,'pROHRAttributeApplyFunction','pROHRAttributeApplyFunctionNaNValue'
,'pROHRAttributeAsc', 'pROHRAttributeColorMap', 'pROHRAttributeColorMapUsageStart', 'pROHRAttributeLs', 'pROHRAttributeRefSize','pROHRAttributeSizeMin'
,'pROHRMeasure','pROHRMeasureApplyFunction'
,'pROHRMeasureColorMap', 'pROHRMeasureColorMapUsageStart', 'pROHRMeasureMarker', 'pROHRMeasureRefSize','pROHRMeasureSizeMin'
,'pVICsDf','pVICsPercFmt','pVICsFmt','pVICsXStart', 'pVICsYStart'
,'pltTitle'
,'quantil_pFWVBAttributeHigh', 'quantil_pFWVBAttributeLow'
,'quantil_pROHRAttributeHigh', 'quantil_pROHRAttributeLow'
,'timeDeltaToRef', 'timeDeltaToT']
keys=sorted(kwds.keys())
for key in keys:
if key in keysDefined:
value=kwds[key]
logger.debug("{0:s}kwd {1:s}: {2:s}".format(logStr,key,str(value)))
else:
logger.warning("{0:s}kwd {1:s} NOT defined!".format(logStr,key))
del kwds[key]
# TIMEs
if 'timeDeltaToRef' not in keys:
kwds['timeDeltaToRef']=pd.to_timedelta('0 seconds')
if 'timeDeltaToT' not in keys:
kwds['timeDeltaToT']=pd.to_timedelta('0 seconds')
# FWVB
if 'pFWVBFilterFunction' not in keys:
kwds['pFWVBFilterFunction']=lambda df: (df.CONT_ID.astype(int).isin([1001])) & (df.W0LFK.astype(float)>0)
# FWVB Attribute (Size)
if 'pFWVBAttribute' not in keys:
kwds['pFWVBAttribute']='W0LFK'
if 'pFWVBAttributeApplyFunction' not in keys:
kwds['pFWVBAttributeApplyFunction']=lambda x: pd.to_numeric(x,errors='coerce') # .apply(kwds['pFWVBAttributeApplyFunktion'])
if 'pFWVBAttributeApplyFunctionNaNValue' not in keys:
kwds['pFWVBAttributeApplyFunctionNaNValue']=0 # .fillna(kwds['pFWVBAttributeApplyFunktionNaNValue']).astype(float)
if 'pFWVBAttributeAsc' not in keys:
kwds['pFWVBAttributeAsc']=False
if 'pFWVBAttributeRefSize' not in keys:
kwds['pFWVBAttributeRefSize']=10**2
if 'quantil_pFWVBAttributeHigh' not in keys:
kwds['quantil_pFWVBAttributeHigh']=1.
if 'quantil_pFWVBAttributeLow' not in keys:
kwds['quantil_pFWVBAttributeLow']=.0
# FWVB Measure (Color)
if 'pFWVBMeasure' not in keys:
kwds['pFWVBMeasure']='FWVB~*~*~*~W'
if 'pFWVBMeasureInRefPerc' not in keys:
kwds['pFWVBMeasureInRefPerc']=True
if 'pFWVBMeasureAlpha' not in keys:
kwds['pFWVBMeasureAlpha']=0.9
if 'pFWVBMeasureColorMap' not in keys:
kwds['pFWVBMeasureColorMap']=plt.cm.autumn
if 'pFWVBMeasureClip' not in keys:
kwds['pFWVBMeasureClip']=False
# 3Classes
if 'pFWVBMeasure3Classes' not in keys:
kwds['pFWVBMeasure3Classes']=False
# CatTexts (werden verwendet wenn 3Classes Wahr gesetzt ist)
if 'pMCatTopText' not in keys:
kwds['pMCatTopText']='Top'
if 'pMCatMidText' not in keys:
kwds['pMCatMidText']='Middle'
if 'pMCatBotText' not in keys:
kwds['pMCatBotText']='Bottom'
# CatAttribs (werden verwendet wenn 3Classes Wahr gesetzt ist)
if 'pMCatTopAlpha' not in keys:
kwds['pMCatTopAlpha']=0.9
if 'pMCatTopColor' not in keys:
kwds['pMCatTopColor']='palegreen'
if 'pMCatTopClip' not in keys:
kwds['pMCatTopClip']=False
if 'pMCatMidAlpha' not in keys:
kwds['pMCatMidAlpha']=0.9
if 'pMCatMidColorMap' not in keys:
kwds['pMCatMidColorMap']=plt.cm.autumn
if 'pMCatMidClip' not in keys:
kwds['pMCatMidClip']=False
if 'pMCatBotAlpha' not in keys:
kwds['pMCatBotAlpha']=0.9
if 'pMCatBotColor' not in keys:
kwds['pMCatBotColor']='violet'
if 'pMCatBotClip' not in keys:
kwds['pMCatBotClip']=False
# CBFixedLimits
if 'pFWVBMeasureCBFixedLimits' not in keys:
kwds['pFWVBMeasureCBFixedLimits']=False
if 'pFWVBMeasureCBFixedLimitLow' not in keys:
kwds['pFWVBMeasureCBFixedLimitLow']=.10
if 'pFWVBMeasureCBFixedLimitHigh' not in keys:
kwds['pFWVBMeasureCBFixedLimitHigh']=.95
# CB
if 'CBFraction' not in keys:
kwds['CBFraction']=0.05
if 'CBHpad' not in keys:
kwds['CBHpad']=0.0275
if 'CBLabelPad' not in keys:
kwds['CBLabelPad']=-50
if 'CBTicklabelsHPad' not in keys:
kwds['CBTicklabelsHPad']=0
if 'CBAspect' not in keys:
kwds['CBAspect']=10.
if 'CBShrink' not in keys:
kwds['CBShrink']=0.3
if 'CBAnchorHorizontal' not in keys:
kwds['CBAnchorHorizontal']=0.
if 'CBAnchorVertical' not in keys:
kwds['CBAnchorVertical']=0.2
# CBLegend (3Classes)
if 'CBLe3cTopVPad' not in keys:
kwds['CBLe3cTopVPad']=1+1*1/4
if 'CBLe3cMidVPad' not in keys:
kwds['CBLe3cMidVPad']=.5
if 'CBLe3cBotVPad' not in keys:
kwds['CBLe3cBotVPad']=0-1*1/4
if 'CBLe3cSySize' not in keys:
kwds['CBLe3cSySize']=10**2
if 'CBLe3cSyType' not in keys:
kwds['CBLe3cSyType']='o'
# ROHR
if 'pROHRFilterFunction' not in keys:
kwds['pROHRFilterFunction']=lambda df: (df.KVR.astype(int).isin([2])) & (df.CONT_ID.astype(int).isin([1001])) & (df.DI.astype(float)>0)
# pROHR (PIPE-Line: Size and Color)
if 'pROHRAttribute' not in keys:
kwds['pROHRAttribute']='DI'
if 'pROHRAttributeApplyFunction' not in keys:
kwds['pROHRAttributeApplyFunction']=lambda x: pd.to_numeric(x,errors='coerce') # .apply(kwds['pROHRAttributeApplyFunktion'])
if 'pROHRAttributeApplyFunctionNaNValue' not in keys:
kwds['pROHRAttributeApplyFunctionNaNValue']=0 # .fillna(kwds['pROHRAttributeApplyFunktionNaNValue']).astype(float)
if 'pROHRAttributeAsc' not in keys:
kwds['pROHRAttributeAsc']=False
if 'pROHRAttributeLs' not in keys:
kwds['pROHRAttributeLs']='-'
if 'pROHRAttributeRefSize' not in keys:
kwds['pROHRAttributeRefSize']=1.
if 'pROHRAttributeSizeMin' not in keys:
kwds['pROHRAttributeSizeMin']=None
if 'pROHRAttributeColorMap' not in keys:
kwds['pROHRAttributeColorMap']=plt.cm.binary
if 'pROHRAttributeColorMapUsageStart' not in keys:
kwds['pROHRAttributeColorMapUsageStart']=1./3.
if 'quantil_pROHRAttributeHigh' not in keys:
kwds['quantil_pROHRAttributeHigh']=1.
if 'quantil_pROHRAttributeLow' not in keys:
kwds['quantil_pROHRAttributeLow']=.75
# pROHR (PIPE-Marker: Size and Color)
if 'pROHRMeasure' not in keys:
kwds['pROHRMeasure']='ROHR~*~*~*~QMAV'
if 'pROHRMeasureApplyFunction' not in keys:
kwds['pROHRMeasureApplyFunction']=lambda x: math.fabs(x)
if 'pROHRMeasureMarker' not in keys:
kwds['pROHRMeasureMarker']='.'
if 'pROHRMeasureRefSize' not in keys:
kwds['pROHRMeasureRefSize']=1.0
if 'pROHRMeasureSizeMin' not in keys:
kwds['pROHRMeasureSizeMin']=None
if 'pROHRMeasureColorMap' not in keys:
kwds['pROHRMeasureColorMap']=plt.cm.cool
if 'pROHRMeasureColorMapUsageStart' not in keys:
kwds['pROHRMeasureColorMapUsageStart']=0.
# NRCVs to be displayed
if 'pFIGNrcv' not in keys:
kwds['pFIGNrcv']=None #['KNOT~PKON-Knoten~\S*~\S+~QM']
if 'pFIGNrcvTxt' not in keys:
kwds['pFIGNrcvTxt']=None #['Kontrolle DH']
if 'pFIGNrcvFmt' not in keys:
kwds['pFIGNrcvFmt']='{:12s}: {:8.2f} {:6s}'
if 'pFIGNrcvPercFmt' not in keys:
kwds['pFIGNrcvPercFmt']=' {:6.1f}%'
if 'pFIGNrcvXStart' not in keys:
kwds['pFIGNrcvXStart']=.5
if 'pFIGNrcvYStart' not in keys:
kwds['pFIGNrcvYStart']=.5
# User Heat Balances to be displayed
if 'pFWVBGCategory' not in keys:
kwds['pFWVBGCategory']=None #['BLNZ1u5u7']
if 'pFWVBGCategoryUnit' not in keys:
kwds['pFWVBGCategoryUnit']='[kW]'
if 'pFWVBGCategoryCatFmt' not in keys:
kwds['pFWVBGCategoryCatFmt']='{:12s}: {:6.1f} {:4s}'
if 'pFWVBGCategoryPercFmt' not in keys:
kwds['pFWVBGCategoryPercFmt']=' {:6.1f}%'
if 'pFWVBGCategory3cFmt' not in keys:
kwds['pFWVBGCategory3cFmt']=' {:5d}/{:5d}/{:5d}'
if 'pFWVBGCategoryXStart' not in keys:
kwds['pFWVBGCategoryXStart']=.1
if 'pFWVBGCategoryYStart' not in keys:
kwds['pFWVBGCategoryYStart']=.9
# VICs
if 'pVICsDf' not in keys:
kwds['pVICsDf']=None #pd.DataFrame({'Kundenname': ['VIC1'],'Knotenname': ['V-K007']})
if 'pVICsPercFmt' not in keys:
kwds['pVICsPercFmt']='{:12s}: {:6.1f}%'
if 'pVICsFmt' not in keys:
kwds['pVICsFmt']='{:12s}: {:6.1f} {:6s}'
if 'pVICsXStart' not in keys:
kwds['pVICsXStart']=.5
if 'pVICsYStart' not in keys:
kwds['pVICsYStart']=.1
# Figure
if 'pltTitle' not in keys:
kwds['pltTitle']='pltNetDHUS'
if 'figFrameon' not in keys:
kwds['figFrameon']=True
if 'figEdgecolor' not in keys:
kwds['figEdgecolor']='black'
if 'figFacecolor' not in keys:
kwds['figFacecolor']='white'
# Plausis
if kwds['pFWVBMeasure3Classes'] and not kwds['pFWVBMeasureCBFixedLimits']:
kwds['pFWVBMeasureCBFixedLimits']=True
logger.debug("{0:s}kwd {1:s} set to {2:s} because kwd {3:s}={4:s}".format(logStr,'pFWVBMeasureCBFixedLimits',str(kwds['pFWVBMeasureCBFixedLimits']),'pFWVBMeasure3Classes',str(kwds['pFWVBMeasure3Classes'])))
keys = sorted(kwds.keys())
logger.debug("{0:s}keys: {1:s}".format(logStr,str(keys)))
for key in keys:
value=kwds[key]
logger.debug("{0:s}kwd {1:s}: {2:s}".format(logStr,key,str(value)))
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
try:
# 2 Szenrariumzeiten ermitteln ===============================================
firstTime=self.mx.df.index[0]
if isinstance(kwds['timeDeltaToRef'],pd.Timedelta):
timeRef=firstTime+kwds['timeDeltaToRef']
else:
logStrFinal="{:s}{:s} not Type {:s}.".format(logStr,'timeDeltaToRef','pd.Timedelta')
logger.error(logStrFinal)
raise RmError(logStrFinal)
if isinstance(kwds['timeDeltaToT'],pd.Timedelta):
timeT=firstTime+kwds['timeDeltaToT']
else:
logStrFinal="{:s}{:s} not Type {:s}.".format(logStr,'timeDeltaToT','pd.Timedelta')
logger.error(logStrFinal)
raise RmError(logStrFinal)
# Vektorergebnisse zu den 2 Zeiten holen ===============================================
timesReq=[]
timesReq.append(timeRef)
timesReq.append(timeT)
plotTimeDfs=self.mx.getMxsVecsFileData(timesReq=timesReq)
timeRefIdx=0
timeTIdx=1
# Sachdatenbasis ===============================================
vROHR=self.xm.dataFrames['vROHR']
vKNOT=self.xm.dataFrames['vKNOT']
vFWVB=self.xm.dataFrames['vFWVB']
vNRCV_Mx1=self.xm.dataFrames['vNRCV_Mx1']
if isinstance(kwds['pVICsDf'],pd.core.frame.DataFrame):
vFWVB=vFWVB.merge(kwds['pVICsDf'],left_on='NAME_i',right_on='Knotenname',how='left')
vFWVB.rename(columns={'Kundenname':'VIC'},inplace=True)
vFWVB.drop('Knotenname',axis=1,inplace=True)
# Einheit der Measures ermitteln (fuer Annotationen)
pFWVBMeasureCh=self.mx.mx1Df[self.mx.mx1Df['Sir3sID'].str.startswith(kwds['pFWVBMeasure'])]
pFWVBMeasureUNIT=pFWVBMeasureCh.iloc[0].UNIT
pFWVBMeasureATTRTYPE=pFWVBMeasureCh.iloc[0].ATTRTYPE
pROHRMeasureCh=self.mx.mx1Df[self.mx.mx1Df['Sir3sID'].str.startswith(kwds['pROHRMeasure'])]
pROHRMeasureUNIT=pROHRMeasureCh.iloc[0].UNIT
pROHRMeasureATTRTYPE=pROHRMeasureCh.iloc[0].ATTRTYPE
# Sachdaten annotieren mit Spalte Measure
# FWVB
pFWVBMeasureValueRaw=plotTimeDfs[timeTIdx][kwds['pFWVBMeasure']].iloc[0]
pFWVBMeasureValueRefRaw=plotTimeDfs[timeRefIdx][kwds['pFWVBMeasure']].iloc[0]
pFWVBMeasureValue=[None for m in pFWVBMeasureValueRaw]
pFWVBMeasureValueRef=[None for m in pFWVBMeasureValueRefRaw]
for idx in range(len(pFWVBMeasureValueRaw)):
mx2Idx=vFWVB['mx2Idx'].iloc[idx]
m=pFWVBMeasureValueRaw[mx2Idx]
pFWVBMeasureValue[idx]=m
m=pFWVBMeasureValueRefRaw[mx2Idx]
pFWVBMeasureValueRef[idx]=m
if kwds['pFWVBMeasureInRefPerc']: # auch in diesem Fall traegt die Spalte Measure das Ergebnis
pFWVBMeasureValuePerc=[float(m)/float(mRef) if float(mRef) >0 else 1 for m,mRef in zip(pFWVBMeasureValue,pFWVBMeasureValueRef)]
pFWVB=vFWVB.assign(Measure=pd.Series(pFWVBMeasureValuePerc)) #!
else:
pFWVB=vFWVB.assign(Measure= | pd.Series(pFWVBMeasureValue) | pandas.Series |
import pandas as pd
import os
from metaquantome.util.utils import DATA_DIR
import numpy as np
def write_testfile(df, name):
df.to_csv(os.path.join(DATA_DIR, 'test', name), sep='\t', index_label='peptide')
# simple: single intensity
func = pd.DataFrame({'go': ['GO:0008152', 'GO:0022610']}, index=['A', 'B'])
write_testfile(func, 'simple_func.tab')
ec = | pd.DataFrame({'ec': ['3.4.11.-', '172.16.58.3']}, index=['A', 'B']) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# In[5]:
#!/usr/bin/env python
# coding: utf-8
# In[11]:
#!/usr/bin/env python
# coding: utf-8
# In[7]:
import math
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import linear_model
from scipy.optimize import fmin_l_bfgs_b
from sklearn.metrics import mean_squared_error
from math import sqrt
#__________________TO EXCEL____________________#
import os
path = os.getcwd()
print("Current Directory", path)
# goTo parent directory
parent = os.path.abspath(os.path.join(path, os.pardir))
print(parent)
os.chdir(parent)
import xlsxwriter
workbook = xlsxwriter.Workbook()
def to_excel(data):
months = ('January', 'February', 'March',
'April','May', 'June', 'July',
'August', 'September', 'October',
'November','December'
)
workbook = xlsxwriter.Workbook('data\\CreatedByCode.xlsx')
ws = workbook.add_worksheet()
ws.write("A1", "Months")
ws.write("B1", "Values")
for i in range(len(data)):
#Months
ws.write("A{0}".format(i + 2), months[i % 12])
#Data
ws.write("B{0}".format(i + 2), data[i])
workbook.close()
#---------------------------------------------------------------------------
def Lissage():
sdata = open("C:\\Users\\SOS\\PycharmProjects\\pythonProject2\\data\\sampledata.csv")
tsA = sdata.read().split('\n')
tsA = list(map(int, tsA))
#-----------------------------------------------------------------------------------
def holtWinters(ts, p, sp, ahead, mtype, alpha = None, beta = None, gamma = None):
a, b, s = _initValues(mtype, ts, p, sp)
if alpha == None or beta == None or gamma == None:
ituning = [0.1, 0.1, 0.1]
ibounds = [(0,1), (0,1), (0,1)]
optimized = fmin_l_bfgs_b(_MSD, ituning, args = (mtype, ts, p, a, b, s[:]), bounds = ibounds, approx_grad = True)
alpha, beta, gamma = optimized[0]
MSD, params, smoothed = _expSmooth(mtype, ts, p, a, b, s[:], alpha, beta, gamma)
predicted = _predictValues(mtype, p, ahead, params)
return {'alpha': alpha, 'beta': beta, 'gamma': gamma, 'MSD': MSD, 'params': params, 'smoothed': smoothed, 'predicted': predicted}
def _initValues(mtype, ts, p, sp):
'''subroutine to calculate initial parameter values (a, b, s) based on a fixed number of starting periods'''
initSeries = pd.Series(ts[:p*sp])
if mtype == 'additive':
rawSeason = initSeries - initSeries.rolling(min_periods = p, window = p, center = True).mean()
initSeason = [np.nanmean(rawSeason[i::p]) for i in range(p)]
initSeason = pd.Series(initSeason) - np.mean(initSeason)
deSeasoned = [initSeries[v] - initSeason[v % p] for v in range(len(initSeries))]
else:
rawSeason = initSeries / initSeries.rolling(min_periods = p, window = p, center = True).mean()
initSeason = [np.nanmean(rawSeason[i::p]) for i in range(p)]
initSeason = pd.Series(initSeason) / math.pow(np.prod(np.array(initSeason)), 1/p)
deSeasoned = [initSeries[v] / initSeason[v % p] for v in range(len(initSeries))]
lm = linear_model.LinearRegression()
lm.fit(pd.DataFrame({'time': [t+1 for t in range(len(initSeries))]}), | pd.Series(deSeasoned) | pandas.Series |
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 3 14:47:20 2017
@author: Flamingo
"""
#%%
from bs4 import BeautifulSoup
import urllib
import pandas as pd
import numpy as np
CITY_NAME = | pd.read_csv('CITY_NAME2.csv') | pandas.read_csv |
import numpy as np
import pandas as pd
from scipy.stats import mode
from tqdm import tqdm
from geopy.geocoders import Nominatim
from datetime import datetime
def handle_bornIn(x):
skip_vals = ['16-Mar', '23-May', 'None']
if x not in skip_vals:
return datetime(2012, 1, 1).year - datetime(int(x), 1, 1).year
else:
return 23
def handle_gender(x):
if x == 'male':
return 1
else:
return 0
def handle_memberSince(x):
skip_vals = ['--None']
if pd.isna(x):
return datetime(2012, 1, 1)
elif x not in skip_vals:
return datetime.strptime(x, '%d-%m-%Y')
else:
return datetime(2012, 1, 1)
def process_tours_df(data_content):
dtype = {}
cols = data_content.tours_df.columns[9:]
for d in cols:
dtype[d] = np.int16
data_content.tours_df = data_content.tours_df.astype(dtype)
data_content.tours_df['area'] = data_content.tours_df['city'] + ' ' + data_content.tours_df['state'] + ' ' + \
data_content.tours_df['pincode'] + ' ' + data_content.tours_df['country']
data_content.tours_df['area'] = data_content.tours_df['area'].apply(lambda x: x.lstrip() if type(x) == str else x)
data_content.tours_df['area'] = data_content.tours_df['area'].apply(lambda x: x.rstrip() if type(x) == str else x)
data_content.tours_df.drop(['city', 'state', 'pincode', 'country'], axis=1, inplace=True)
data_content.tours_df['tour_date'] = data_content.tours_df['tour_date'].apply(
lambda x: datetime(int(x.split('-')[2]), int(x.split('-')[1]), int(x.split('-')[0]), 23, 59))
def process_tour_convoy_df(data_content):
print('Initializing tour_convoy_df...', flush=True)
data_content.tour_convoy_df['total_going'] = 0
data_content.tour_convoy_df['total_not_going'] = 0
data_content.tour_convoy_df['total_maybe'] = 0
data_content.tour_convoy_df['total_invited'] = 0
data_content.tour_convoy_df['fraction_going'] = 0
data_content.tour_convoy_df['fraction_not_going'] = 0
data_content.tour_convoy_df['fraction_maybe'] = 0
known_bikers = set()
lis = ['going', 'not_going', 'maybe', 'invited']
pbar = tqdm(total=data_content.tour_convoy_df.shape[0],
bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}')
pbar.set_description("Step 1 of 2")
for idx, _ in data_content.tour_convoy_df.iterrows():
s = [0, 0, 0]
for j, l in enumerate(lis):
if not pd.isna(data_content.tour_convoy_df.loc[idx, l]):
biker = data_content.tour_convoy_df.loc[idx, l].split()
data_content.tour_convoy_df.loc[idx, 'total_' + l] = len(biker)
if j != 3:
s[j] = len(biker)
for bik in biker:
known_bikers.add(bik)
if sum(s) != 0:
for j in range(3):
data_content.tour_convoy_df.loc[idx, 'fraction_' + lis[j]] = s[j] / sum(s)
pbar.update(1)
pbar.close()
mean = data_content.tour_convoy_df['total_invited'].mean()
std = data_content.tour_convoy_df['total_invited'].std()
data_content.tour_convoy_df['fraction_invited'] = data_content.tour_convoy_df['total_invited'].apply(
lambda x: (x - mean) / std)
biker_tour_convoy_df = dict()
for biker in list(known_bikers):
biker_tour_convoy_df[biker] = [[], [], [], []]
pbar = tqdm(total=data_content.tour_convoy_df.shape[0], bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}')
pbar.set_description("Step 2 of 2")
for idx, _ in data_content.tour_convoy_df.iterrows():
for l in lis:
if not pd.isna(data_content.tour_convoy_df.loc[idx, l]):
biker = data_content.tour_convoy_df.loc[idx, l].split()
for bik in biker:
biker_tour_convoy_df[bik][lis.index(l)] += \
[data_content.tour_convoy_df.loc[idx, 'tour_id']]
pbar.update(1)
pbar.close()
for key, _ in biker_tour_convoy_df.items():
for i in range(4):
biker_tour_convoy_df[key][i] = ' '.join(list(set(biker_tour_convoy_df[key][i])))
biker_tour_convoy_df = pd.DataFrame.from_dict(biker_tour_convoy_df, orient='index')
biker_tour_convoy_df.reset_index(inplace=True)
biker_tour_convoy_df.columns = ['biker_id'] + lis
print('tour_convoy_df ready...', flush=True)
return biker_tour_convoy_df
def get_coordinates(locations, data_content):
geolocation_map = {}
locator = Nominatim(user_agent="Kolibri")
for i in tqdm(range(len(locations)),
disable=False,
bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}'):
# noinspection PyBroadException
try:
location = locator.geocode(locations[i])
geolocation_map[locations[i]] = [location.latitude, location.longitude]
except:
# Called when there is presumably some noise in the Address location
# noinspection PyBroadException
data_content.noise += [locations[i]]
geolocation_map[locations[i]] = [np.nan, np.nan]
location_df = pd.DataFrame({'location': list(locations),
'latitude': np.array(list(geolocation_map.values()))[:, 0],
'longitude': np.array(list(geolocation_map.values()))[:, 1]})
return geolocation_map, location_df
def initialize_locations(data_content):
# noinspection PyBroadException
try:
location_df = pd.read_csv(data_content.base_dir + 'temp/location.csv')
location_from_csv = True
except:
location_df = None
location_from_csv = False
if location_from_csv:
geolocation = {}
print('Initializing Locations from DataFrame...', flush=True)
for i, l in enumerate(location_df['location'].tolist()):
geolocation[l] = [location_df.loc[i, 'latitude'], location_df.loc[i, 'longitude']]
else:
print('Initializing Locations from Nominatim...', flush=True)
biker_location = data_content.bikers_df['area'].dropna().drop_duplicates().tolist()
geolocation, location_df = get_coordinates(biker_location, data_content)
return geolocation, location_df
def impute_location_from_tour_convoy(data_content):
# From tour_convoy
unk_loc = data_content.bikers_df[pd.isna(data_content.bikers_df['latitude'])]
org_bik = list(set(data_content.convoy_df['biker_id'].drop_duplicates().tolist()).intersection(
data_content.bikers_df['biker_id'].tolist()))
groups = ['going', 'not_going', 'maybe', 'invited']
rest_trs = data_content.tours_df[data_content.tours_df['tour_id'].isin(
data_content.tour_convoy_df['tour_id'])]
rest_con = data_content.convoy_df[data_content.convoy_df['biker_id'].isin(org_bik)]
pbar = tqdm(total=unk_loc.shape[0], bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}')
pbar.set_description('Step ' + str(data_content.current_step) + ' of ' + str(data_content.total_steps))
for idx, _ in unk_loc.iterrows():
if unk_loc.loc[idx, 'biker_id'] in org_bik:
cdf = rest_con[rest_con['biker_id'] == unk_loc.loc[idx, 'biker_id']]
if cdf.shape[0] > 0:
tours = []
for g in groups:
tours += cdf[g].tolist()[0].split()
tours = (' '.join(tours)).split()
trs = rest_trs[rest_trs['tour_id'].isin(tours)]
if trs.shape[0] > 0:
m, _ = mode(trs[['latitude']], axis=0)
if not np.isnan(m[0, 0]):
index = trs[trs['latitude'] == m[0, 0]].index.tolist()[0]
lat, long, = trs.loc[index, 'latitude'], trs.loc[index, 'longitude']
data_content.bikers_df.loc[idx, 'latitude'] = lat
data_content.bikers_df.loc[idx, 'longitude'] = long
pbar.update(1)
pbar.close()
data_content.current_step += 1
def impute_location_from_tours(data_content):
# From tours_df
unk_loc = data_content.bikers_df[pd.isna(data_content.bikers_df['latitude'])]
org_bik = list(set(data_content.tours_df['biker_id'].drop_duplicates().tolist()).intersection(
data_content.bikers_df['biker_id'].tolist()))
pbar = tqdm(total=unk_loc.shape[0], bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}')
pbar.set_description('Step ' + str(data_content.current_step) + ' of ' + str(data_content.total_steps))
for idx, _ in unk_loc.iterrows():
if unk_loc.loc[idx, 'biker_id'] in org_bik:
tours = data_content.tours_df[data_content.tours_df['biker_id'] == unk_loc.loc[idx, 'biker_id']]
if tours.shape[0] > 0:
m, _ = mode(tours[['latitude']], axis=0)
if not np.isnan(m[0, 0]):
index = tours[tours['latitude'] == m[0, 0]].index.tolist()[0]
lat, long, = tours.loc[index, 'latitude'], tours.loc[index, 'longitude']
if not np.isnan(lat):
data_content.bikers_df.loc[idx, 'latitude'] = lat
data_content.bikers_df.loc[idx, 'longitude'] = long
pbar.update(1)
pbar.close()
data_content.current_step += 1
def impute_lcoation_from_friends(data_content):
biker_df = pd.merge(data_content.bikers_df,
data_content.bikers_network_df, on='biker_id', how='left').copy()
bikers_df_ids = set(data_content.bikers_df['biker_id'].tolist())
# From friends
for i in range(data_content.location_recursion):
pbar = tqdm(total=biker_df.shape[0], bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}')
pbar.set_description('Step ' + str(data_content.current_step) + ' of ' + str(data_content.total_steps))
for idx, rows in biker_df.iterrows():
if not pd.isna(biker_df.loc[idx, 'friends']):
bikers_known_friends = set(biker_df.loc[idx, 'friends'].split()).intersection(bikers_df_ids)
if len(bikers_known_friends) >= data_content.member_threshold:
temp_df = biker_df[biker_df['biker_id'].isin(bikers_known_friends)].dropna()
if temp_df.shape[0] > 0:
m, _ = mode(temp_df[['latitude']], axis=0)
if not np.isnan(m[0, 0]):
index = temp_df[temp_df['latitude'] == m[0, 0]].index.tolist()[0]
lat, long, = temp_df.loc[index, 'latitude'], temp_df.loc[index, 'longitude']
if pd.isna(data_content.bikers_df.loc[idx, 'latitude']):
data_content.bikers_df.loc[idx, 'latitude'] = lat
data_content.bikers_df.loc[idx, 'longitude'] = long
elif not np.isnan(lat):
dist = (data_content.bikers_df.loc[idx, 'latitude'] - lat) ** 2 + \
(data_content.bikers_df.loc[idx, 'longitude'] - long) ** 2
if (dist ** 0.5) > data_content.gps_threshold:
data_content.bikers_df.loc[idx, 'latitude'] = lat
data_content.bikers_df.loc[idx, 'longitude'] = long
pbar.update(1)
pbar.close()
data_content.current_step += 1
def fill_missing_locations(data_content):
impute_lcoation_from_friends(data_content)
impute_location_from_tours(data_content)
impute_location_from_tour_convoy(data_content)
def handle_locations(data_content):
print('Preprocessing bikers_df..', flush=True)
print('Initializing Locations...', flush=True)
geolocation, location_df = initialize_locations(data_content)
loc = set(location_df['location'].tolist())
for i in tqdm(range(data_content.bikers_df.shape[0]),
disable=False, desc='Step 1 of ' + str(data_content.total_steps),
bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}'):
if data_content.bikers_df.loc[i, 'area'] in loc:
data_content.bikers_df.loc[i, 'latitude'] = geolocation[data_content.bikers_df.loc[i, 'area']][0]
data_content.bikers_df.loc[i, 'longitude'] = geolocation[data_content.bikers_df.loc[i, 'area']][1]
data_content.current_step += 1
# Imputing Missing Locations
fill_missing_locations(data_content)
print('Locations Initialized...', flush=True)
print('bikers_df ready', flush=True)
def time_zone_converter(data_content):
for idx, _ in data_content.bikers_df.iterrows():
if not np.isnan(data_content.bikers_df.loc[idx, 'longitude']):
x = data_content.bikers_df.loc[idx, 'longitude']
data_content.bikers_df.loc[idx, 'time_zone'] = (np.floor((x - 7.500000001) / 15) + 1) * 60
def time_zone_for_location_imputation(data_content):
timezones = np.unique(data_content.bikers_df['time_zone'].drop_duplicates().dropna())
tz = dict()
for time in timezones:
df = data_content.bikers_df[data_content.bikers_df['time_zone'] == time]
m, _ = mode(df[['latitude']], axis=0)
if not np.isnan(m[0, 0]):
index = df[df['latitude'] == m[0, 0]].index.tolist()[0]
lat, long, = df.loc[index, 'latitude'], df.loc[index, 'longitude']
tz[time] = [lat, long]
data_content.bikers_df['time_zone'] = data_content.bikers_df['time_zone'].map(
lambda x: x if x in timezones else np.nan)
df = data_content.bikers_df[(pd.isna(data_content.bikers_df['latitude'])) & (
pd.notna(data_content.bikers_df['time_zone']))]
for idx, _ in df.iterrows():
key = df.loc[idx, 'time_zone']
if key in tz.keys():
data_content.bikers_df.loc[idx, 'latitude'] = tz[key][0]
data_content.bikers_df.loc[idx, 'longitude'] = tz[key][1]
def language_for_location_imputation(data_content):
df = data_content.bikers_df[(pd.isna(data_content.bikers_df['latitude']))]
for idx, _ in df.iterrows():
location = data_content.locale_[data_content.bikers_df.loc[idx, 'language']][3]
data_content.bikers_df.loc[idx, 'latitude'] = location[0]
data_content.bikers_df.loc[idx, 'longitude'] = location[1]
def compute_non_pop(tdf, data_content):
dont_pop = tdf[(pd.isna(tdf['latitude']))]['biker_id'].tolist()
cat = ['going', 'maybe', 'invited', 'not_going']
pbar = tqdm(total=tdf.shape[0], bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}')
pbar.set_description('Step 1 of 4')
for idx, _ in tdf.iterrows():
bik = data_content.tour_convoy_df[data_content.tour_convoy_df['tour_id'] == tdf.loc[idx, 'tour_id']]
coll = []
for c in cat:
if not pd.isna(bik[c].tolist()[0]):
coll += bik[c].tolist()[0].split()
dont_pop += coll
pbar.update(1)
pbar.close()
dont_pop = list(set(dont_pop))
return dont_pop
def initialize_network_dict(data_content):
network = {}
pbar = tqdm(total=data_content.bikers_network_df.shape[0],
bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}')
pbar.set_description("Step 2 of 4")
for idx, _ in data_content.bikers_network_df.iterrows():
bik_id = data_content.bikers_network_df.loc[idx, 'biker_id']
if not pd.isna(data_content.bikers_network_df.loc[idx, 'friends']):
bik = data_content.bikers_network_df.loc[idx, 'friends'].split()
if bik_id in network:
network[bik_id] = network[bik_id] + list(bik)
else:
network[bik_id] = list(bik)
for biker in bik:
if biker in network:
network[biker] = network[biker] + [bik_id]
else:
network[biker] = [bik_id]
pbar.update(1)
pbar.close()
return network
def process_bikers_network_df(tdf, data_content):
dont_pop = compute_non_pop(tdf, data_content)
network = initialize_network_dict(data_content)
pop_list = list(set(network.keys()) - set(dont_pop))
for ele in pop_list:
network.pop(ele)
for key, _ in network.items():
network[key] = ' '.join(list(set(network[key])))
network_df = pd.DataFrame.from_dict(network, orient='index')
network_df.reset_index(inplace=True)
network_df.columns = ['biker_id', 'friends']
return network_df
def fill_network_df(network_df, data_content):
network_df = pd.merge(network_df, data_content.bikers_df[['biker_id', 'latitude', 'longitude']], on='biker_id',
how='left')
network_df['friends'] = network_df['friends'].apply(lambda x: x.split()[0])
get_frnds = list(set(network_df['friends'].tolist()).intersection(data_content.bikers_df['biker_id'].tolist()))
grouped = network_df.groupby(by='friends')
small_df = data_content.bikers_df[data_content.bikers_df['biker_id'].isin(get_frnds)]
pbar = tqdm(total=small_df.shape[0], bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}')
pbar.set_description('Step 3 of 4')
for idx, _ in small_df.iterrows():
i = grouped.get_group(small_df.loc[idx, 'biker_id']).index
network_df.loc[i, 'latitude'] = small_df.loc[idx, 'latitude']
network_df.loc[i, 'longitude'] = small_df.loc[idx, 'longitude']
pbar.update(1)
pbar.close()
return network_df
def fill_location_for_tours_df(tdf, network_df, data_content):
tid = tdf[pd.isna(tdf['latitude'])]
pbar = tqdm(total=tid.shape[0], bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}')
pbar.set_description('Step 4 of 4')
for idx, _ in tid.iterrows():
cat = ['going', 'maybe', 'invited', 'not_going']
bik = data_content.tour_convoy_df[data_content.tour_convoy_df['tour_id'] == tdf.loc[idx, 'tour_id']]
coll = []
for c in cat:
if not pd.isna(bik[c].tolist()[0]):
coll += bik[c].tolist()[0].split()
g = network_df[network_df['biker_id'].isin(coll)]
if g.shape[0] > 0:
m, _ = mode(g[['latitude']], axis=0)
if not np.isnan(m[0, 0]):
index = g[g['latitude'] == m[0, 0]].index.tolist()[0]
lat, long = g.loc[index, 'latitude'], g.loc[index, 'longitude']
tdf.loc[idx, 'latitude'] = lat
tdf.loc[idx, 'longitude'] = long
pbar.update(1)
pbar.close()
bid = tdf[pd.isna(tdf['latitude'])]['biker_id'].drop_duplicates().tolist()
chi = data_content.tours_df[data_content.tours_df['biker_id'].isin(bid)]
chi = chi[pd.notna(chi['latitude'])].groupby('biker_id')[['latitude', 'longitude']].agg(
lambda x: x.value_counts().index[0])
chi = chi.reset_index()
for idx, _ in tdf[pd.isna(tdf['latitude'])].iterrows():
m = chi[chi['biker_id'] == tdf.loc[idx, 'biker_id']]
if m.shape[0] != 0:
tdf.loc[idx, 'latitude'] = m['latitude'].tolist()[0]
tdf.loc[idx, 'longitude'] = m['longitude'].tolist()[0]
# Using tour_convoy_df to find tours attended by biker organizing this tour
# and fill location from based on that information.
coll = []
tid = tdf[pd.isna(tdf['latitude'])]
sdf = data_content.convoy_df[data_content.convoy_df['biker_id'].isin(tid['biker_id'].tolist())]
for idx, _ in tid.iterrows():
cat = ['going', 'maybe', 'invited', 'not_going']
bik = sdf[sdf['biker_id'] == tid.loc[idx, 'biker_id']]
if bik.shape[0] > 0:
for c in cat:
if not pd.isna(bik[c].tolist()[0]):
coll += bik[c].tolist()[0].split()
small_df = data_content.tours_df[data_content.tours_df['tour_id'].isin(coll)]
for idx, _ in tid.iterrows():
cat = ['going', 'maybe', 'invited', 'not_going']
bik = sdf[sdf['biker_id'] == tdf.loc[idx, 'biker_id']]
if bik.shape[0] > 0:
coll = []
for c in cat:
if not pd.isna(bik[c].tolist()[0]):
coll += bik[c].tolist()[0].split()
g = small_df[small_df['tour_id'].isin(coll)]
if g.shape[0] > 0:
m, _ = mode(g[['latitude']], axis=0)
if not np.isnan(m[0, 0]):
index = g[g['latitude'] == m[0, 0]].index.tolist()[0]
lat, long = g.loc[index, 'latitude'], g.loc[index, 'longitude']
tdf.loc[idx, 'latitude'] = lat
tdf.loc[idx, 'longitude'] = long
return tdf
def process_bikers_df(data_content):
data_content.bikers_df['area'] = data_content.bikers_df['area'].apply(
lambda x: x.replace(' ', ', ') if not pd.isna(x) else x)
data_content.bikers_df['language'] = data_content.bikers_df['language_id'] + \
'_' + data_content.bikers_df['location_id']
data_content.bikers_df['age'] = data_content.bikers_df['bornIn'].apply(
lambda x: handle_bornIn(x))
data_content.bikers_df['gender'] = data_content.bikers_df['gender'].apply(
lambda x: handle_gender(x))
data_content.bikers_df['member_since'] = data_content.bikers_df['member_since'].apply(
lambda x: handle_memberSince(x))
handle_locations(data_content)
time_zone_converter(data_content)
time_zone_for_location_imputation(data_content)
language_for_location_imputation(data_content)
time_zone_converter(data_content)
data_content.bikers_df.drop(['bornIn', 'area', 'language_id', 'location_id'], axis=1, inplace=True)
# noinspection PyBroadException
try:
tdf = | pd.read_csv(data_content.base_dir + 'temp/tdf.csv') | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 2 21:02:58 2020
@author: RMS671214
"""
from faspy.interestrate.fixincome import date_structures, calc_customfix_structures, \
value_customfix_structures
import numpy as np
from faspy.interestrate import rmp_dates as rd
from faspy.interestrate import discount_curve as dcurve
# %%
mybond = {}
mybond['issue_date'] = np.datetime64('2018-10-22')
mybond['value_date'] = np.datetime64('2021-10-22')
mybond['maturity'] = np.datetime64('2028-10-22')
mybond['day_count'] = 'Actual/365 Fixed'
mybond['frequency'] = 'Semi-Annual'
mybond['business_day'] = 'No Adjustment'
mybond['date_generation'] = rd.date_gen_method[1]
mybond['face_value'] = 1000000
mybond['coupon'] = 10
mybond['ytm'] = None
mybond['type'] = 'Fixed Rate Bond'
structures = list(date_structures(mybond))
print("DATES")
print("=====")
print(structures)
try:
import pandas as pd
pd1 = pd.DataFrame(structures)
except:
pass
# %%
# Create Sample structure
lenstruc = len(structures)
for i in range(lenstruc):
structure = structures[i]
if i == 0:
structure["face_value"] = 10_000_000
structure["fv_flow"] = 1_000_000
structure["coupon"] = 5.00
else:
prevstruc = structures[i-1]
structure["face_value"] = prevstruc["face_value"] + prevstruc["fv_flow"]
structure["fv_flow"] = 1_000_000
structure["coupon"] = prevstruc["coupon"] + 0.25
structures[-1]["fv_flow"] = structures[-1]["face_value"]
new_structures = calc_customfix_structures(structures,
mybond["day_count"],
mybond["frequency"],
mybond["business_day"])
print("CUSTOM STRUCTURE")
print("================")
print(new_structures)
try:
import pandas as pd
pd2 = | pd.DataFrame(new_structures) | pandas.DataFrame |
import pandas as pd
import streamlit as st
import plotly.express as px
@st.cache
def load_data(file):
data = pd.read_csv(file,
na_filter=True,
na_values=[' -', '-'],
keep_default_na=False)
return data
#####################
### HTML SETTINGS ###
#####################
ucloud_color = '#006AFF'
boston_color = '#febb19'
body_color = '#F5FFFA'
header_color = 'black'
subheader_color = '#c00'
code_color = '#c00'
plt_bkg_color = body_color
header = '<style>h1{color: %s;}</style>' % (header_color)
subheader = '<style>h2{color: %s;}</style>' % (subheader_color)
body = '<style>body{background-color: %s;}</style>' % (body_color)
code = '<style>code{color: %s; }</style>' % (code_color)
sidebar = """
<style>
# .reportview-container {
# flex-direction: row-reverse;
# }
# header > .toolbar {
# flex-direction: row-reverse;
# left: 1rem;
# right: auto;
# }
# .sidebar .sidebar-collapse-control,
# .sidebar.--collapsed .sidebar-collapse-control {
# left: auto;
# right: 0.5rem;
# }
.sidebar .sidebar-content {
transition: margin-right .3s, box-shadow .3s;
background-image: linear-gradient(180deg,%s,%s);
width: 20rem;
}
# .sidebar.--collapsed .sidebar-content {
# margin-left: auto;
# margin-right: -20rem;
# }
@media (max-width: 991.98px) {
.sidebar .sidebar-content {
margin-left: auto;
}
}
</style>
""" % (ucloud_color, body_color)
st.markdown(header, unsafe_allow_html=True)
st.markdown(subheader, unsafe_allow_html=True)
st.markdown(body, unsafe_allow_html=True)
st.markdown(code, unsafe_allow_html=True)
st.markdown(sidebar, unsafe_allow_html=True)
##########################
### Plotting functions ###
##########################
def gen_line_chart(df, x_axis, y_axis):
"""
Generate line charts.
"""
cols = list(df.columns.values)
fig = px.line(df,
x=x_axis,
y=y_axis,
facet_col=cols[1],
color=cols[0],
color_discrete_map={'UCloud': ucloud_color,
'Boston Server': boston_color,
})
fig.update_xaxes(tickvals=[8, 16, 32, 64, 128, 256],
title_text="batch size",
linecolor='black',
type='log',
showgrid=True,
gridwidth=1,
gridcolor='LightGrey')
fig.update_yaxes(linecolor='black',
showgrid=True,
gridwidth=1,
gridcolor='LightGrey')
fig.update_layout({'paper_bgcolor': plt_bkg_color,
'plot_bgcolor': plt_bkg_color})
fig.update_traces(mode='lines+markers',
marker_symbol='hexagram',
marker_size=9)
return fig
#############
### TITLE ###
#############
"""
# UCloud Report
## Deep Learning Benchmark Tests
"""
st.write("-------")
###################
### DESCRIPTION ###
###################
description = """
### Last update:
November 13, 2020
### Report:
2020-3
### Author:
<NAME>, Ph.D. (<<EMAIL>>)\n
Computational Scientist \n
Research Support Lead \n
SDU eScience
### Description:
The purpose of this study is to test the
performance of the DGX A100 server provided by [Boston Limited](https://www.boston.co.uk/default.aspx) for the distributed training of
deep learning models. The results are compared with equivalent simulations performed on the UCloud system.
In all the tests we train the same model for 3 epochs and we fix the data batch size on a single device.
We use three different datasets.
The reference literature for the model and the datasets is reported [here](https://openreview.net/pdf?id=rJ4km2R5t7).
The training process is executed in the following modes:
- single-precision floating arithmetic (FP32) on NVIDIA V100 GPUs
- native TensorFloat-32 (TF32) precison on NVIDIA A100 GPUs
- automatic mixed precision (AMP) on all devices
More information can be found in this blog [post](https://blogs.nvidia.com/blog/2020/05/14/tensorfloat-32-precision-format/).
### Specs:
#### 1. UCloud
`u1-gpu-4` machine type:
- 4x NVIDIA Tesla V100 GPUs SXM2 32GB
- 64 CPU cores
- 180 GB of memory
#### 2. Boston Server
NVIDIA DGX A100:
- 8x NVIDIA Tesla A100 GPUs SXM4 40GB
- 6x NVIDIA NVSWITCHES
- 9x Mellanox ConnectX-6 200Gb/S Network Interface
- Dual 64-core AMD CPUs and 1TB System Memory
- 15TB Gen4 NVMe SSD
"""
#################
### SIDE MENU ###
#################
# logo = Image.open('figs/logo_esc.png')
# st.sidebar.image(logo, format='PNG', width=50)
st.sidebar.title("Benchmark Models")
radio = st.sidebar.radio(label="", options=["Description",
"Dataset 1",
"Dataset 2",
"Dataset 3",
])
if radio == "Dataset 1":
###############################
st.subheader("**Dataset 1**")
###############################
st.markdown("""
**Category:**
Text Classification
**Model:**
[GLUE](https://github.com/huggingface/transformers/tree/master/examples/text-classification)
**Dataset:**
The Microsoft Research Paraphrase Corpus (MRPC), with 3.7k sentence pairs
**Framework:**
PyTorch
""")
path_to_file = "training/PyTorch/TextClassification/MRPC/results.csv"
df1 = load_data(path_to_file)
cols1 = list(df1.columns.values)
if st.checkbox("Show dataset 1"):
st.table(df1)
dfp1 = df1.loc[:, [cols1[0], cols1[1], cols1[2], cols1[3], cols1[4]]]
dfp1 = dfp1.rename(columns={cols1[4]: 'Time to train (s)'})
dfp1 = dfp1.rename(columns={cols1[3]: 'Accuracy'})
dfp1['Training type'] = 'FP32/TF32'
dfp2 = df1.loc[:, [cols1[0], cols1[1], cols1[2], cols1[5], cols1[6]]]
dfp2 = dfp2.rename(columns={cols1[6]: 'Time to train (s)'})
dfp2 = dfp2.rename(columns={cols1[5]: 'Accuracy'})
dfp2['Training type'] = 'AMP'
dff = pd.concat([dfp1, dfp2])
cols = list(dff.columns.values)
train_type = st.selectbox("Select training type",
('FP32/TF32', 'AMP'))
dff_t = dff[dff['Training type'] == train_type].drop(columns='Training type')
cols_t = list(dff_t.columns.values)
# st.table(dff_t)
## Create line charts
fig1 = gen_line_chart(dff_t, cols_t[2], cols_t[4])
fig2 = gen_line_chart(dff_t, cols_t[2], cols_t[3])
st.plotly_chart(fig1)
st.plotly_chart(fig2)
elif radio == "Dataset 2":
###############################
st.subheader("**Dataset 2**")
###############################
st.markdown("""
**Category:**
Text Classification
**Model:**
[GLUE](https://github.com/huggingface/transformers/tree/master/examples/text-classification)
**Dataset:**
The Quora Question Pairs (QQP) collection, with 364k sentence pairs
**Framework:**
PyTorch
""")
path_to_file = "training/PyTorch/TextClassification/QQP/results.csv"
df2 = load_data(path_to_file)
cols2 = list(df2.columns.values)
if st.checkbox("Show dataset 2"):
st.table(df2)
dfp1 = df2.loc[:, [cols2[0], cols2[1], cols2[2], cols2[3], cols2[4]]]
dfp1 = dfp1.rename(columns={cols2[4]: 'Time to train (s)'})
dfp1 = dfp1.rename(columns={cols2[3]: 'Accuracy'})
dfp1['Training type'] = 'FP32/TF32'
dfp2 = df2.loc[:, [cols2[0], cols2[1], cols2[2], cols2[5], cols2[6]]]
dfp2 = dfp2.rename(columns={cols2[6]: 'Time to train (s)'})
dfp2 = dfp2.rename(columns={cols2[5]: 'Accuracy'})
dfp2['Training type'] = 'AMP'
dff = pd.concat([dfp1, dfp2])
cols = list(dff.columns.values)
train_type = st.selectbox("Select training type",
('FP32/TF32', 'AMP'))
dff_t = dff[dff['Training type'] == train_type].drop(columns='Training type')
cols_t = list(dff_t.columns.values)
## Create line charts
fig1 = gen_line_chart(dff_t, cols_t[2], cols_t[4])
fig2 = gen_line_chart(dff_t, cols_t[2], cols_t[3])
fig2.update_yaxes(range=[0.86, 0.9])
st.plotly_chart(fig1)
st.plotly_chart(fig2)
elif radio == "Dataset 3":
###############################
st.subheader("**Dataset 3**")
###############################
st.markdown("""
**Category:**
Text Classification
**Model:**
[GLUE](https://github.com/huggingface/transformers/tree/master/examples/text-classification)
**Dataset:**
The Multi-Genre Natural Language Inference (MNLI) corpus, with 393k sentence pairs
**Framework:**
PyTorch
""")
path_to_file = "training/PyTorch/TextClassification/MNLI/results.csv"
df3 = load_data(path_to_file)
cols3 = list(df3.columns.values)
if st.checkbox("Show dataset 3"):
st.table(df3)
dfp1 = df3.loc[:, [cols3[0], cols3[1], cols3[2], cols3[3], cols3[4]]]
dfp1 = dfp1.rename(columns={cols3[4]: 'Time to train (s)'})
dfp1 = dfp1.rename(columns={cols3[3]: 'Accuracy'})
dfp1['Training type'] = 'FP32/TF32'
dfp2 = df3.loc[:, [cols3[0], cols3[1], cols3[2], cols3[5], cols3[6]]]
dfp2 = dfp2.rename(columns={cols3[6]: 'Time to train (s)'})
dfp2 = dfp2.rename(columns={cols3[5]: 'Accuracy'})
dfp2['Training type'] = 'AMP'
dff = | pd.concat([dfp1, dfp2]) | pandas.concat |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/05-orchestrator.ipynb (unless otherwise specified).
__all__ = ['retry_request', 'if_possible_parse_local_datetime', 'SP_and_date_request', 'handle_capping',
'date_range_request', 'year_request', 'construct_year_month_pairs', 'year_and_month_request',
'clean_year_week', 'construct_year_week_pairs', 'year_and_week_request', 'non_temporal_request',
'query_orchestrator']
# Cell
import pandas as pd
from tqdm import tqdm
from warnings import warn
from requests.models import Response
from . import utils, raw
# Cell
def retry_request(raw, method, kwargs, n_attempts=3):
attempts = 0
success = False
while (attempts < n_attempts) and (success == False):
try:
r = getattr(raw, method)(**kwargs)
utils.check_status(r)
success = True
except Exception as e:
attempts += 1
if attempts == n_attempts:
raise e
return r
def if_possible_parse_local_datetime(df):
dt_cols_with_period_in_name = ['startTimeOfHalfHrPeriod', 'initialForecastPublishingPeriodCommencingTime', 'latestForecastPublishingPeriodCommencingTime', 'outTurnPublishingPeriodCommencingTime']
dt_cols = [col for col in df.columns if 'date' in col.lower() or col in dt_cols_with_period_in_name]
sp_cols = [col for col in df.columns if 'period' in col.lower() and col not in dt_cols_with_period_in_name]
if len(dt_cols)==1 and len(sp_cols)==1:
df = utils.parse_local_datetime(df, dt_col=dt_cols[0], SP_col=sp_cols[0])
return df
def SP_and_date_request(
method: str,
kwargs_map: dict,
func_params: list,
api_key: str,
start_date: str,
end_date: str,
n_attempts: int=3,
**kwargs
):
assert start_date is not None, '`start_date` must be specified'
assert end_date is not None, '`end_date` must be specified'
df = pd.DataFrame()
stream = '_'.join(method.split('_')[1:])
kwargs.update({
'APIKey': api_key,
'ServiceType': 'xml'
})
df_dates_SPs = utils.dt_rng_to_SPs(start_date, end_date)
date_SP_tuples = list(df_dates_SPs.reset_index().itertuples(index=False, name=None))[:-1]
for datetime, query_date, SP in tqdm(date_SP_tuples, desc=stream, total=len(date_SP_tuples)):
kwargs.update({
kwargs_map['date']: datetime.strftime('%Y-%m-%d'),
kwargs_map['SP']: SP,
})
missing_kwargs = list(set(func_params) - set(['SP', 'date'] + list(kwargs.keys())))
assert len(missing_kwargs) == 0, f"The following kwargs are missing: {', '.join(missing_kwargs)}"
r = retry_request(raw, method, kwargs, n_attempts=n_attempts)
df_SP = utils.parse_xml_response(r)
df = pd.concat([df, df_SP])
df = utils.expand_cols(df)
df = if_possible_parse_local_datetime(df)
return df
# Cell
def handle_capping(
r: Response,
df: pd.DataFrame,
method: str,
kwargs_map: dict,
func_params: list,
api_key: str,
end_date: str,
request_type: str,
**kwargs
):
capping_applied = utils.check_capping(r)
assert capping_applied != None, 'No information on whether or not capping limits had been breached could be found in the response metadata'
if capping_applied == True: # only subset of date range returned
dt_cols_with_period_in_name = ['startTimeOfHalfHrPeriod']
dt_cols = [col for col in df.columns if ('date' in col.lower() or col in dt_cols_with_period_in_name) and ('end' not in col.lower())]
if len(dt_cols) == 1:
start_date = pd.to_datetime(df[dt_cols[0]]).max().strftime('%Y-%m-%d')
if 'start_time' in kwargs.keys():
kwargs['start_time'] = '00:00'
if pd.to_datetime(start_date) >= pd.to_datetime(end_date):
warnings.warn(f'The `end_date` ({end_date}) was earlier than `start_date` ({start_date})\nThe `start_date` will be set one day earlier than the `end_date`.')
start_date = (pd.to_datetime(end_date) - pd.Timedelta(days=1)).strftime('%Y-%m-%d')
warn(f'Response was capped, request is rerunning for missing data from {start_date}')
df_rerun = date_range_request(
method=method,
kwargs_map=kwargs_map,
func_params=func_params,
api_key=api_key,
start_date=start_date,
end_date=end_date,
request_type=request_type,
**kwargs
)
df = pd.concat([df, df_rerun])
df = df.drop_duplicates()
else:
warn(f'Response was capped: a new `start_date` to continue requesting could not be determined automatically, please handle manually for `{method}`')
return df
def date_range_request(
method: str,
kwargs_map: dict,
func_params: list,
api_key: str,
start_date: str,
end_date: str,
request_type: str,
n_attempts: int=3,
**kwargs
):
assert start_date is not None, '`start_date` must be specified'
assert end_date is not None, '`end_date` must be specified'
kwargs.update({
'APIKey': api_key,
'ServiceType': 'xml'
})
for kwarg in ['start_time', 'end_time']:
if kwarg not in kwargs_map.keys():
kwargs_map[kwarg] = kwarg
kwargs[kwargs_map['start_date']], kwargs[kwargs_map['start_time']] = pd.to_datetime(start_date).strftime('%Y-%m-%d %H:%M:%S').split(' ')
kwargs[kwargs_map['end_date']], kwargs[kwargs_map['end_time']] = | pd.to_datetime(end_date) | pandas.to_datetime |
"""A collections of functions to facilitate
analysis of HiC data based on the cooler and cooltools
interfaces."""
import warnings
from typing import Tuple, Dict, Callable
import cooltools.expected
import cooltools.snipping
import pandas as pd
import bioframe
import cooler
import pairtools
import numpy as np
import multiprocess
from .snipping_lib import flexible_pileup
# define type aliases
CisTransPairs = Dict[str, pd.DataFrame]
PairsSamples = Dict[str, CisTransPairs]
# define functions
def get_expected(
clr: cooler.Cooler, arms: pd.DataFrame, proc: int = 20, ignore_diagonals: int = 2
) -> pd.DataFrame:
"""Takes a clr file handle and a pandas dataframe
with chromosomal arms (generated by getArmsHg19()) and calculates
the expected read number at a certain genomic distance.
The proc parameters defines how many processes should be used
to do the calculations. ingore_diags specifies how many diagonals
to ignore (0 mains the main diagonal, 1 means the main diagonal
and the flanking tow diagonals and so on)"""
with multiprocess.Pool(proc) as pool:
expected = cooltools.expected.diagsum(
clr,
tuple(arms.itertuples(index=False, name=None)),
transforms={"balanced": lambda p: p["count"] * p["weight1"] * p["weight2"]},
map=pool.map,
ignore_diags=ignore_diagonals,
)
# construct a single dataframe for all regions (arms)
expected_df = (
expected.groupby(["region", "diag"])
.aggregate({"n_valid": "sum", "count.sum": "sum", "balanced.sum": "sum"})
.reset_index()
)
# account for different number of valid bins in diagonals
expected_df["balanced.avg"] = expected_df["balanced.sum"] / expected_df["n_valid"]
return expected_df
def get_arms_hg19() -> pd.DataFrame:
"""Downloads the coordinates for chromosomal arms of the
genome assembly hg19 and returns it as a dataframe."""
# download chromosomal sizes
chromsizes = bioframe.fetch_chromsizes("hg19")
# download centromers
centromeres = bioframe.fetch_centromeres("hg19")
centromeres.set_index("chrom", inplace=True)
centromeres = centromeres.mid
# define chromosomes that are well defined (filter out unassigned contigs)
good_chroms = list(chromsizes.index[:23])
# construct arm regions (for each chromosome fro 0-centromere and from centromere to the end)
arms = [
arm
for chrom in good_chroms
for arm in (
(chrom, 0, centromeres.get(chrom, 0)),
(chrom, centromeres.get(chrom, 0), chromsizes.get(chrom, 0)),
)
]
# construct dataframe out of arms
arms = pd.DataFrame(arms, columns=["chrom", "start", "end"])
return arms
def _assign_supports(features, supports):
"""assigns supports to entries in snipping windows.
Workaround for bug in cooltools 0.2.0 that duplicate
supports are not handled correctly. Copied from cooltools.common.assign_regions"""
index_name = features.index.name # Store the name of index
features = (
features.copy().reset_index()
) # Store the original features' order as a column with original index
if "chrom" in features.columns:
overlap = bioframe.overlap(
features,
supports,
how="left",
cols1=["chrom", "start", "end"],
cols2=["chrom", "start", "end"],
keep_order=True,
return_overlap=True,
)
overlap_columns = [
"index_1",
"chrom_1",
"start_1",
"end_1",
] # To filter out duplicates later
overlap["overlap_length"] = overlap["overlap_end"] - overlap["overlap_start"]
# Filter out overlaps with multiple regions:
overlap = (
overlap.sort_values("overlap_length", ascending=False)
.drop_duplicates(overlap_columns, keep="first")
.sort_index()
).reset_index(drop=True)
# Copy single column with overlapping region name:
features["region"] = overlap["name_2"]
if "chrom1" in features.columns:
for idx in ("1", "2"):
overlap = bioframe.overlap(
features,
supports,
how="left",
cols1=[f"chrom{idx}", f"start{idx}", f"end{idx}"],
cols2=[f"chrom", f"start", f"end"],
keep_order=True,
return_overlap=True,
)
overlap_columns = [
"index_1",
f"chrom{idx}_1",
f"start{idx}_1",
f"end{idx}_1",
] # To filter out duplicates later
overlap[f"overlap_length{idx}"] = (
overlap[f"overlap_end{idx}"] - overlap[f"overlap_start{idx}"]
)
# Filter out overlaps with multiple regions:
overlap = (
overlap.sort_values(f"overlap_length{idx}", ascending=False)
.drop_duplicates(overlap_columns, keep="first")
.sort_index()
).reset_index(drop=True)
# Copy single column with overlapping region name:
features[f"region{idx}"] = overlap["name_2"]
# Form a single column with region names where region1 == region2, and np.nan in other cases:
features["region"] = np.where(
features["region1"] == features["region2"], features["region1"], np.nan
)
features = features.drop(
["region1", "region2"], axis=1
) # Remove unnecessary columns
features = features.set_index(
index_name if not index_name is None else "index"
) # Restore the original index
features.index.name = index_name # Restore original index title
return features
def assign_regions(
window: int,
binsize: int,
chroms: pd.Series,
positions: pd.Series,
arms: pd.DataFrame,
) -> pd.DataFrame:
"""Constructs a 2d region around a series of chromosomal location.
Window specifies the windowsize for the constructed regions. The total region
assigned will be pos-window until pos+window. The binsize specifies the size
of the HiC bins. The positions which represent the center of the regions
is givin the the chroms series and the positions series."""
# construct windows from the passed chromosomes and positions
snipping_windows = cooltools.snipping.make_bin_aligned_windows(
binsize, chroms.values, positions.values, window
)
# assign chromosomal arm to each position
snipping_windows = _assign_supports(snipping_windows, bioframe.parse_regions(arms))
return snipping_windows
def assign_regions_2d(
window: int,
binsize: int,
chroms1: pd.Series,
positions1: pd.Series,
chroms2: pd.Series,
positions2: pd.Series,
arms: pd.DataFrame,
) -> pd.DataFrame:
"""Constructs a 2d region around a series of chromosomal location pairs.
Window specifies the windowsize for the constructed regions. The total region
assigned will be pos-window until pos+window. The binsize specifies the size
of the HiC bins. The positions which represent the center of the regions
is given by the chroms1 and chroms2 series as well as the
positions1 and positions2 series."""
# construct windows from the passed chromosomes 1 and positions 1
windows1 = assign_regions(window, binsize, chroms1, positions1, arms)
windows1.columns = [str(i) + "1" for i in windows1.columns]
# construct windows from the passed chromosomes 1 and positions 1
windows2 = assign_regions(window, binsize, chroms2, positions2, arms)
windows2.columns = [str(i) + "2" for i in windows2.columns]
windows = pd.concat((windows1, windows2), axis=1)
# concatenate windows
windows = pd.concat((windows1, windows2), axis=1)
# filter for mapping to different regions
windows_final = windows.loc[windows["region1"] == windows["region2"], :]
# subset data and rename regions
windows_small = windows_final[
["chrom1", "start1", "end1", "chrom2", "start2", "end2", "region1"]
]
windows_small.columns = [
"chrom1",
"start1",
"end1",
"chrom2",
"start2",
"end2",
"region",
]
return windows_small
def do_pileup_obs_exp(
clr: cooler.Cooler,
expected_df: pd.DataFrame,
snipping_windows: pd.DataFrame,
proc: int = 5,
collapse: bool = True,
) -> np.ndarray:
"""Takes a cooler file handle, an expected dataframe
constructed by getExpected, snipping windows constructed
by assignRegions and performs a pileup on all these regions
based on the obs/exp value. Returns a numpy array
that contains averages of all selected regions.
The collapse parameter specifies whether to return
the average window over all piles (collapse=True), or the individual
windows (collapse=False)."""
region_frame = get_regions_from_snipping_windows(expected_df)
oe_snipper = cooltools.snipping.ObsExpSnipper(
clr, expected_df, regions=bioframe.parse_regions(region_frame)
)
# set warnings filter to ignore RuntimeWarnings since cooltools
# does not check whether there are inf or 0 values in
# the expected dataframe
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
with multiprocess.Pool(proc) as pool:
# extract a matrix of obs/exp average values for each snipping_window
oe_pile = cooltools.snipping.pileup(
snipping_windows, oe_snipper.select, oe_snipper.snip, map=pool.map
)
if collapse:
# calculate the average of all windows
collapsed_pile = np.nanmean(oe_pile[:, :, :], axis=2)
return collapsed_pile
return oe_pile
def do_pileup_iccf(
clr: cooler.Cooler,
snipping_windows: pd.DataFrame,
proc: int = 5,
collapse: bool = True,
) -> np.ndarray:
"""Takes a cooler file handle and snipping windows constructed
by assignRegions and performs a pileup on all these regions
based on the corrected HiC counts. Returns a numpy array
that contains averages of all selected regions. The collapse
parameter specifies whether to return
the average window over all piles (collapse=True), or the individual
windows (collapse=False)."""
# get regions from snipping windows
region_frame = get_regions_from_snipping_windows(snipping_windows)
iccf_snipper = cooltools.snipping.CoolerSnipper(
clr, regions=bioframe.parse_regions(region_frame)
)
with multiprocess.Pool(proc) as pool:
iccf_pile = cooltools.snipping.pileup(
snipping_windows, iccf_snipper.select, iccf_snipper.snip, map=pool.map
)
if collapse:
# calculate the average of all windows
collapsed_pile_plus = np.nanmean(iccf_pile[:, :, :], axis=2)
return collapsed_pile_plus
return iccf_pile
def sliding_diamond(
array: np.ndarray, side_len: int = 6, center_x: bool = True
) -> Tuple[np.ndarray, np.ndarray]:
"""Will slide a diamond of side length 'sideLen'
down the diagonal of the passed array and return
the average values for each position and
the relative position of each value with respect
to the center of the array (in Bin units)"""
# initialize accumulators for diamond value and x-position
diamond_accumulator = list()
bin_accumulator = list()
if side_len % 2 == 0:
half_window = side_len
for i in range(0, (array.shape[0] - half_window + 1)):
# extract diamond
diamond_array = array[i : (i + half_window), i : (i + half_window)]
# set inf to nan for calculation of mean
diamond_array[np.isinf(diamond_array)] = np.nan
diamond_accumulator.append(np.nanmean(diamond_array))
# append x-value for this particular bin
bin_accumulator.append(
np.median(
range(
i,
(i + half_window),
)
)
)
else:
half_window = side_len // 2
for i in range(half_window, (array.shape[0] - half_window)):
# extract diamond
diamond_array = array[
i - half_window : (i + half_window) + 1,
i - half_window : (i + half_window) + 1,
]
# set inf to nan for calculation of mean
diamond_array[np.isinf(diamond_array)] = np.nan
diamond_accumulator.append(np.nanmean(diamond_array))
# append x-value for this particular bin
bin_accumulator.append(
np.median(
range(
i - half_window,
(i + half_window) + 1,
)
)
)
if center_x:
x_out = np.array(bin_accumulator - np.median(bin_accumulator))
else:
x_out = np.array(bin_accumulator)
return (x_out, np.array(diamond_accumulator))
def load_pairs(path: str) -> pd.DataFrame:
"""Function to load a .pairs or .pairsam file
into a pandas dataframe.
This only works for relatively small files!"""
# get handels for header and pairs_body
header, pairs_body = pairtools._headerops.get_header(
pairtools._fileio.auto_open(path, "r")
)
# extract column names from header
cols = pairtools._headerops.extract_column_names(header)
# read data into dataframe
frame = pd.read_csv(pairs_body, sep="\t", names=cols)
return frame
def down_sample_pairs(
sample_dict: PairsSamples, distance: int = 10 ** 4
) -> PairsSamples:
"""Will downsample cis and trans reads in sampleDict to contain
as many combined cis and trans reads as the sample with the lowest readnumber of the
specified distance."""
# initialize output dictionary
out_dict = {sample: {} for sample in sample_dict}
for sample in sample_dict.keys():
# create temporary dataframes
cis_temp = sample_dict[sample]["cis"]
cis_temp["rType"] = "cis"
trans_temp = sample_dict[sample]["trans"]
trans_temp["rType"] = "trans"
# concatenate them and store in outdict
out_dict[sample]["all"] = | pd.concat((cis_temp, trans_temp)) | pandas.concat |
import os
import argparse
from configparser import ConfigParser
import time
import sys
import logging
import shutil
import pandas as pd
import numpy as np
import Metrics
parser = argparse.ArgumentParser()
parser.add_argument('--seq_len', type=int, default=6, help='sequence length of values, which should be even nums (2,4,6,12)')
parser.add_argument('--his_len', type=int, default=6, help='sequence length of observed historical values')
parser.add_argument('--month', type=str, default='202111', help='which experiment setting (month) to run')
parser.add_argument('--city', type=str, default='tokyo', help='which experiment setting (city) to run')
parser.add_argument('--channelin', type=int, default=1, help='number of input channel')
parser.add_argument('--channelout', type=int, default=1, help='number of output channel')
opt = parser.parse_args()
config = ConfigParser()
config.read('params.txt', encoding='UTF-8')
train_month = eval(config[opt.month]['train_month'])
test_month = eval(config[opt.month]['test_month'])
traffic_path = config[opt.month]['traffic_path']
subroad_path = config[opt.city]['subroad_path']
road_path = config['common']['road_path']
adj_path = config['common']['adjdis_path']
N_link = config.getint('common', 'N_link')
feature_list = ['speed_typea']
opt.channelin = len(feature_list)
_, filename = os.path.split(os.path.abspath(sys.argv[0]))
filename = os.path.splitext(filename)[0]
model_name = filename.split('_')[-1]
timestring = time.strftime('%Y%m%d%H%M%S', time.localtime())
path = f'./save/{opt.city}{opt.month}_{model_name}_c{opt.channelin}to{opt.channelout}_{timestring}'
logging_path = f'{path}/{model_name}_{timestring}_logging.txt'
score_path = f'{path}/{model_name}_{timestring}_scores.txt'
if not os.path.exists(path): os.makedirs(path)
shutil.copy2(sys.argv[0], path)
shutil.copy2(f'{model_name}.py', path)
logger = logging.getLogger(__name__)
logger.setLevel(level = logging.INFO)
class MyFormatter(logging.Formatter):
def format(self, record):
spliter = ' '
record.msg = str(record.msg) + spliter + spliter.join(map(str, record.args))
record.args = tuple() # set empty to args
return super().format(record)
formatter = MyFormatter()
handler = logging.FileHandler(logging_path, mode='a')
handler.setLevel(logging.INFO)
handler.setFormatter(formatter)
console = logging.StreamHandler()
console.setLevel(logging.INFO)
console.setFormatter(formatter)
logger.addHandler(handler)
logger.addHandler(console)
logger.info('experiment_city', opt.city)
logger.info('experiment_month', opt.month)
logger.info('model_name', model_name)
logger.info('channnel_in', opt.channelin)
logger.info('channnel_out', opt.channelout)
def get_seq_data(data, seq_len):
seq_data = [data[i:i+seq_len, ...] for i in range(0, data.shape[0]-seq_len+1)]
return np.array(seq_data)
def getXSYS(data, his_len, seq_len):
seq_data = get_seq_data(data, seq_len + his_len)
XS, YS = seq_data[:, :his_len, ...], seq_data[:, -seq_len:, ...]
return XS, YS
def MonthlyAverage():
df_train = pd.concat([pd.read_csv(config[month]['traffic_path']) for month in train_month])
df_train.loc[df_train['speed_typea']<0, 'speed_typea'] = 0
df_train.loc[df_train['speed_typea']>200, 'speed_typea'] = 100
df_train['gps_timestamp'] = pd.to_datetime(df_train['gps_timestamp'])
df_train['weekdaytime'] = df_train['gps_timestamp'].dt.weekday * 144 + (df_train['gps_timestamp'].dt.hour * 60 + df_train['gps_timestamp'].dt.minute)//10
df_train = df_train[['linkid', 'weekdaytime', 'speed_typea']]
df_train_avg = df_train.groupby(['linkid', 'weekdaytime']).mean().reset_index()
df_test = pd.concat([pd.read_csv(config[month]['traffic_path']) for month in test_month])
df_test.loc[df_test['speed_typea']<0, 'speed_typea'] = 0
df_test.loc[df_test['speed_typea']>200, 'speed_typea'] = 100
df_test['gps_timestamp'] = pd.to_datetime(df_test['gps_timestamp'])
df_test['weekdaytime'] = df_test['gps_timestamp'].dt.weekday * 144 + (df_test['gps_timestamp'].dt.hour * 60 + df_test['gps_timestamp'].dt.minute)//10
df_test = df_test[['linkid', 'gps_timestamp', 'speed_typea', 'weekdaytime']]
df = pd.merge(df_test, df_train_avg, on=['linkid', 'weekdaytime'])
df_capital_link = | pd.read_csv(road_path) | pandas.read_csv |
#!/usr/bin/env python
# coding: utf-8
# In[15]:
import pandas as pd
import numpy as np
import math
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
import datetime
import matplotlib.patches as mpatches
from datetime import datetime
# ## Exploratory data analysis
#
# Aims for the project:
# 1. Improve referral process for connecting rough sleepers to service providers and users
# 2. Better identification of duplicate notifications
# 3. understand trends over time and space
#
# In[3]:
#read file
path = 'xxxxx'
file = pd.read_csv(path, sep = ';', error_bad_lines = False)
# ### Challenge for Streetlink
#
# #### Number of alerts in various region (London, coastline)
# In[4]:
# scatter plot of longitude and latitude with alpha controlling weight/density
get_ipython().run_line_magic('matplotlib', 'inline')
file.plot(kind = 'scatter', x = 'geo_location_longitude', y = 'geo_location_latitude', alpha = .05, figsize = (10,7))
plt.legend()
# In[10]:
#region
region = pd.DataFrame(file.groupby(by = 'region')['Alert ID'].count())
region['pct'] = region['Alert ID']/sum(region['Alert ID']) * 100
print(region)
# ### On an average day in London, Streetlink receive....on a peak day, they receive...
# In[20]:
file['alert_open_date_formulaT'] = | pd.to_datetime(file['alert_open_date_formula']) | pandas.to_datetime |
# coding: utf-8
# Copyright (c) 2021 AkaiKKRteam.
# Distributed under the terms of the Apache License, Version 2.0.
from copy import deepcopy
import pandas as pd
import numpy as np
from sklearn.metrics import mean_absolute_error
_SUCCESS_ = "O"
_FAILED_ = "X"
_NO_REF_ = "-"
_FILL_STR_ = "-"
_PAD_STR_ = " "
_CURRENT_ = "current"
_REFERENCE_ = "reference"
_UPDN_LIST_ = ["up", "dn"]
_UNNUMBERED_TARGET_ = ["te", "tm", "Tc", "resis"]
_NUMBERED_TARGET_ = ["spinlocalmoment", "orbitallocalmoment",
"cnd"]
def show_chk_legend():
print("{}: passed. {}: failed, {}: no reference".format(
_SUCCESS_, _FAILED_, _NO_REF_))
print()
def chk_dic_success_all(chk_dic):
chk_array = np.array([v == _SUCCESS_ for v in chk_dic.values()])
return chk_array
def _go_fix_localmoment(result, ref, shortname=False):
"""ref can be None"""
result = deepcopy(result)
ref = deepcopy(ref)
df1 = pd.DataFrame([[x for x in result.values()]], index=[_CURRENT_],
columns=result.keys())
if ref is not None:
df2 = pd.DataFrame([[x for x in result.values()]], index=[_REFERENCE_],
columns=result.keys())
else:
df2 = None
return df1, df2
if True:
for obj in [result, ref]:
if obj is None:
continue
for label in _NUMBERED_TARGET_:
if label in obj:
v = obj[label]
if isinstance(v, list):
del obj[label]
for i, r in enumerate(v):
name = "{}{}".format(label, i+1)
obj[name] = r
result.update({"target": _CURRENT_})
if ref is not None:
ref.update({"target": _REFERENCE_})
df1 = pd.DataFrame(result, dtype=object, index=[0])
try:
df2 = pd.DataFrame(ref, dtype=object)
except ValueError:
df2 = pd.DataFrame(ref, dtype=object, index=[0])
# replace long name with short name
if shortname:
for df in [df1, df2]:
_make_df_shortname(df)
return df1, df2
class ThresBase:
def __init__(self, thres):
self.thres = thres
thres_key_type = []
thres_key_name = []
for thres_key, thres_value in thres.items():
s = thres_key.split("_")
thres_key_type.append(deepcopy(s[0]))
thres_key_name.append(deepcopy(s[1]))
thres_key_type = list(set(thres_key_type))
thres_key_name = list(set(thres_key_name))
thres_key_type.sort()
thres_key_name.sort()
self.thres_key_type = thres_key_type
self.thres_key_name = thres_key_name
@property
def key_type(self):
return self.thres_key_type
@property
def key_name(self):
return self.thres_key_name
def _make_df_diff(key, result, ref, thres):
df1, df2 = _go_fix_localmoment(result, ref)
if ref is None:
return df1
df = pd.concat([df1, df2])
res = {}
thresbase = ThresBase(thres)
for thres_key_type in thresbase.key_type:
res[thres_key_type] = {}
thres_key_type_th = thres_key_type+"_th"
res[thres_key_type_th] = {}
for thres_key, thres_value in thres.items():
s = thres_key.split("_")
thres_key_type = deepcopy(s[0])
thres_key_name = deepcopy(s[1])
thres_key_type_th = thres_key_type+"_th"
for name in df.columns:
if thres_key_name == name:
v_current = df.loc[_CURRENT_, name]
if isinstance(v_current, list):
v_current = np.array(v_current)
v_reference = df.loc[_REFERENCE_, name]
if isinstance(v_reference, list):
v_reference = np.array(v_reference)
if thres_key_type == "diff":
value = np.abs(v_current - v_reference)
res[thres_key_type].update({name: value})
res[thres_key_type_th].update({name: thres_value})
elif thres_key_type == "rdiff":
value = np.abs(v_current - v_reference) /\
np.abs(v_reference)
res[thres_key_type].update({name: value})
res[thres_key_type_th].update({name: thres_value})
elif thres_key_type == "and":
value = v_current and v_reference
res[thres_key_type].update({name: value})
res[thres_key_type_th].update({name: thres_value})
else:
print("unkown type", type)
print("thres", thres)
raise ValueError
df_list = []
for res_key, res_value in res.items():
_df = pd.DataFrame(
[[v for v in res_value.values()]], columns=res_value.keys(), index=[res_key])
df_list.append(_df)
df_comp = pd.concat(df_list, axis=0)
df_all = pd.concat([df, df_comp], axis=0)
return df_all
def _make_df_shortname(df):
return df
df = df.copy()
col2 = []
col = list(df.columns)
for s in col:
for lmtarget, abbrev in zip(["spinlocalmoment", "orbitallocalmoment", "threads"],
["sl", "ol", "thrd"]):
if lmtarget in s:
s = s.replace(lmtarget, abbrev)
col2.append(s)
df.columns = col2
return df
def __df_str_add_spc(df, pad=_PAD_STR_):
lines = []
for x in df.__str__().splitlines():
lines.append(pad+x)
return "\n".join(lines)
class DiffResult:
def __init__(self, key, result, ref, thres):
self.key = key
self.result = result
self.ref = ref
self.thres = thres
df = _make_df_diff(key, result, ref, thres)
self.df = df
def process(self, ):
df = self.df
thres = self.thres
if _REFERENCE_ not in df.index:
return df, None
res_dic = {}
for thres_op_label in thres.keys():
s = thres_op_label.split("_")
op = s[0]
label = s[1]
op_th = op+"_th"
value = df.loc[op, label]
if isinstance(value, list):
value = np.array(value)
thvalue = df.loc[op_th, label]
if op == "and":
flag = value == thvalue
else:
flag = value < thvalue
if isinstance(flag, np.ndarray):
flag = np.all(flag == True)
res_dic[thres_op_label] = flag
df_chk = pd.DataFrame(res_dic, index=["chk"])
return df, df_chk
class DiffVector:
def __init__(self, df, thres):
self.df = df
self.thres = thres
self. difflabel = "diff"
def process(self):
"""make dataframe
"""
df = self.df
difflabel = self.difflabel
v1 = df.loc[_CURRENT_, :].astype(float).values
v2 = df.loc[_REFERENCE_, :].astype(float).values
diffvalue = np.abs(v1-v2).tolist()
_df = pd.DataFrame([v1.tolist(), v2.tolist(), diffvalue], columns=df.columns,
index=[_CURRENT_, _REFERENCE_, difflabel])
self.df = _df # update self.df
return _df
def evaluate_distance(self):
_df = self.df
difflabel = self.difflabel
thres = self.thres
diff_dic = {}
for type_ in thres.keys():
if type_ == "diff_max":
value = np.max(_df.loc[difflabel, :].values)
elif type_ == "mae":
value = mean_absolute_error(_df.loc[_CURRENT_, :].values,
_df.loc[_REFERENCE_, :].values)
else:
print("unknown thres.keys()", thres.keys())
raise ValueError
diff_dic.update({type_: value})
df_diff = pd.DataFrame(
diff_dic, dtype='object', index=["value"])
th = {}
for label in ["diff_max", "mae"]:
th[label] = thres[label]
df_th = pd.DataFrame(th, index=["thres"])
df_diff = pd.concat([df_diff, df_th], axis=0)
chk = {}
for diffkey in ["diff_max", "mae"]:
chk[diffkey] = df_diff.loc["value",
diffkey] < df_diff.loc["thres", diffkey]
df_chk = pd.DataFrame(chk, index=["chk"])
df_diff = pd.concat([df_diff, df_chk], axis=0)
return df_diff
def _sort_types_inside_row(df):
"""exchange type1 and type2 as type1<type2 to compare with another data.
and add pair column.
"""
df = df.copy()
type1 = df["type1"].values
type2 = df["type2"].values
t1t2_list = []
for t1, t2 in zip(type1, type2):
_t1t2 = [t1, t2]
_t1t2.sort()
t1t2_list.append(_t1t2)
df[["type1", "type2"]] = t1t2_list
if "pair" in list(df.columns):
del df["pair"]
# add pair column
comp1 = df["comp1"].values
comp2 = df["comp2"].values
type1 = df["type1"].values
type2 = df["type2"].values
typepair = []
for t1, t2, c1, c2 in zip(type1, type2, comp1, comp2):
typepair.append("-".join([t1, t2, c1, c2]))
df_pair = pd.DataFrame({"pair": typepair})
jijdf = pd.concat([df, df_pair], axis=1)
return jijdf
def _make_jij_dataframe(result_jij, ref_jij, target="J_ij(meV)"):
# make jij dataframe
df_result_jij = pd.DataFrame(result_jij[1:], columns=result_jij[0])
df_result_jij = df_result_jij[[
"comp1", "comp2", "J_ij", "J_ij(meV)", "pair"]]
if ref_jij is not None:
df_ref_jij = pd.DataFrame(ref_jij[1:], columns=ref_jij[0])
df_ref_jij = _sort_types_inside_row(df_ref_jij)
# sort values to compare the result with another dataframe.
df_ref_jij.sort_values(by="distance", inplace=True)
df_ref_jij = df_ref_jij[[
"comp1", "comp2", "J_ij", "J_ij(meV)", "pair"]]
df_ref_jij.reset_index(drop=True)
zipped_verson_df_list = zip([_CURRENT_, _REFERENCE_], [
df_result_jij, df_ref_jij])
else:
zipped_verson_df_list = zip([_CURRENT_], [df_result_jij])
df_ref_jij = None
# replace J_ij* with {_CURRENT_}_J_ij*.
# replace distace with {_CURRENT_}_distance.
for version, _df in zipped_verson_df_list:
col2 = {}
for col in _df.columns:
if col.startswith("J_ij"):
col2[col] = "{}_{}".format(version, col)
if col.startswith("distance"):
col2[col] = "{}_{}".format(version, col)
_df.rename(columns=col2, inplace=True)
if True:
print()
print("debug df_result_jij")
print(df_result_jij)
print("debug df_ref_jij")
print(df_ref_jij)
print()
if ref_jij is not None:
# merge
col = ["comp1", "comp2", 'pair']
df = df_result_jij.merge(df_ref_jij, on=col)
else:
df = df_result_jij
# add pair-comp1-comp2 field
paircomp = []
for comp1, comp2, pair in zip(df["comp1"], df["comp2"], df["pair"]):
paircomp.append("{}_{}_{}".format(pair, comp1, comp2))
df["typecomp"] = paircomp
if "{}_{}".format(_REFERENCE_, target) in df.columns:
col = ["{}_{}".format(_CURRENT_, target), "{}_{}".format(
_REFERENCE_, target), "typecomp"]
else:
col = ["{}_{}".format(_CURRENT_, target), "typecomp"]
_df = df[col].set_index("typecomp").T
# rename index
col2 = {}
for x in col:
s = x.split("_")
col2[x] = s[0]
_df = _df.rename(index=col2)
return _df
def _spactra_df(result_totaldos, ref_totaldos,
updn_list=_UPDN_LIST_, updn_label_list=_UPDN_LIST_):
if ref_totaldos is not None:
zipped_version_dos = zip([_CURRENT_, _REFERENCE_],
[result_totaldos, ref_totaldos])
else:
zipped_version_dos = zip([_CURRENT_, ],
[result_totaldos, ])
for version, obj in zipped_version_dos:
for updn, updnlabel in zip(updn_list, updn_label_list):
if updnlabel in obj:
dos = obj[updnlabel]
if len(dos) > 0:
name = "{}_{}".format(version, updn)
obj[name] = obj.pop(updnlabel)
else:
del obj[updn]
df_result_totaldos = pd.DataFrame(result_totaldos)
if ref_totaldos is not None:
df_ref_totaldos = pd.DataFrame(ref_totaldos)
if "energy" in df_ref_totaldos:
# confirm that their eneriges are the same
energy_result = df_result_totaldos.loc[:, "energy"].values
energy_ref = df_ref_totaldos.loc[:, "energy"].values
if np.all(energy_result == energy_ref):
df = df_result_totaldos.merge(df_ref_totaldos, on="energy")
else:
print(
"energies are different between the current calculation and reference.")
print("no check applied.")
df = df_result_totaldos
else:
print("no energy in reference data. but continue.")
df = | pd.concat([df_result_totaldos, df_ref_totaldos], axis=1) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 14 19:18:18 2020
@author: <NAME>
"""
import pandas as pd
import numpy as np
import itertools
from operator import itemgetter
try:
from support_modules import role_discovery as rl
except:
import os
from importlib import util
spec = util.spec_from_file_location(
'role_discovery',
os.path.join(os.getcwd(), 'support_modules', 'role_discovery.py'))
rl = util.module_from_spec(spec)
spec.loader.exec_module(rl)
class FeaturesMannager():
def __init__(self, params):
"""constructor"""
self.model_type = params['model_type']
self.one_timestamp = params['one_timestamp']
# self.resources = pd.DataFrame
self.norm_method = params['norm_method']
self._scalers = dict()
self.scale_dispatcher = {'basic': self._scale_base,
'inter': self._scale_inter}
def calculate(self, log, add_cols):
log = self.add_calculated_times(log)
log = self.filter_features(log, add_cols)
return self.scale_features(log, add_cols)
@staticmethod
def add_resources(log, rp_sim):
# Resource pool discovery
res_analyzer = rl.ResourcePoolAnalyser(log, sim_threshold=rp_sim)
# Role discovery
resources = pd.DataFrame.from_records(res_analyzer.resource_table)
resources = resources.rename(index=str,
columns={"resource": "user"})
# Add roles information
log = log.merge(resources, on='user', how='left')
log = log[~log.task.isin(['Start', 'End'])]
log = log.reset_index(drop=True)
return log
def filter_features(self, log, add_cols):
# Add intercase features
columns = ['caseid', 'task', 'user', 'end_timestamp',
'role', 'dur', 'ac_index', 'rl_index']
if not self.one_timestamp:
columns.extend(['start_timestamp', 'wait'])
columns.extend(add_cols)
log = log[columns]
return log
def add_calculated_times(self, log):
"""Appends the indexes and relative time to the dataframe.
parms:
log: dataframe.
Returns:
Dataframe: The dataframe with the calculated features added.
"""
log['dur'] = 0
log['acc_cycle'] = 0
log['daytime'] = 0
log = log.to_dict('records')
log = sorted(log, key=lambda x: x['caseid'])
for _, group in itertools.groupby(log, key=lambda x: x['caseid']):
events = list(group)
ordk = 'end_timestamp' if self.one_timestamp else 'start_timestamp'
events = sorted(events, key=itemgetter(ordk))
for i in range(0, len(events)):
# In one-timestamp approach the first activity of the trace
# is taken as instantsince there is no previous timestamp
# to find a range
if self.one_timestamp:
if i == 0:
dur = 0
acc = 0
else:
dur = (events[i]['end_timestamp'] -
events[i-1]['end_timestamp']).total_seconds()
acc = (events[i]['end_timestamp'] -
events[0]['end_timestamp']).total_seconds()
else:
dur = (events[i]['end_timestamp'] -
events[i]['start_timestamp']).total_seconds()
acc = (events[i]['end_timestamp'] -
events[0]['start_timestamp']).total_seconds()
if i == 0:
wit = 0
else:
wit = (events[i]['start_timestamp'] -
events[i-1]['end_timestamp']).total_seconds()
events[i]['wait'] = wit if wit >= 0 else 0
events[i]['dur'] = dur
events[i]['acc_cycle'] = acc
time = events[i][ordk].time()
time = time.second + time.minute*60 + time.hour*3600
events[i]['daytime'] = time
events[i]['weekday'] = events[i]['start_timestamp'].weekday()
return | pd.DataFrame.from_dict(log) | pandas.DataFrame.from_dict |
from datetime import datetime
from functools import lru_cache
from typing import Union, Callable, Tuple
import dateparser
import pandas as pd
from dateutil.relativedelta import relativedelta
from numpy.distutils.misc_util import as_list
from wetterdienst.dwd.metadata import Parameter, TimeResolution, PeriodType
from wetterdienst.dwd.metadata.column_names import (
DWDMetaColumns,
DWDOrigDataColumns,
DWDDataColumns,
)
from wetterdienst.dwd.metadata.column_types import (
DATE_FIELDS_REGULAR,
DATE_FIELDS_IRREGULAR,
QUALITY_FIELDS,
INTEGER_FIELDS,
STRING_FIELDS,
)
from wetterdienst.dwd.metadata.datetime import DatetimeFormat
from wetterdienst.dwd.metadata.parameter import TIME_RESOLUTION_PARAMETER_MAPPING
from wetterdienst.dwd.metadata.time_resolution import (
TIME_RESOLUTION_TO_DATETIME_FORMAT_MAPPING,
)
from wetterdienst.exceptions import InvalidParameter
def check_parameters(
parameter: Parameter, time_resolution: TimeResolution, period_type: PeriodType
) -> bool:
"""
Function to check for element (alternative name) and if existing return it
Differs from foldername e.g. air_temperature -> tu
"""
check = TIME_RESOLUTION_PARAMETER_MAPPING.get(time_resolution, {}).get(
parameter, []
)
if period_type not in check:
return False
return True
def coerce_field_types(
df: pd.DataFrame, time_resolution: TimeResolution
) -> pd.DataFrame:
"""
A function used to create a unique dtype mapping for a given list of column names.
This function is needed as we want to ensure the expected dtypes of the returned
DataFrame as well as for mapping data after reading it from a stored .h5 file. This
is required as we want to store the data in this file with the same format which is
a string, thus after reading data back in the dtypes have to be matched.
Args:
df: the station_data gathered in a pandas.DataFrame
time_resolution: time resolution of the data as enumeration
Return:
station data with converted dtypes
"""
for column in df.columns:
# Station ids are handled separately as they are expected to not have any nans
if column == DWDMetaColumns.STATION_ID.value:
df[column] = df[column].astype(int)
elif column in DATE_FIELDS_REGULAR:
df[column] = pd.to_datetime(
df[column],
format=TIME_RESOLUTION_TO_DATETIME_FORMAT_MAPPING[time_resolution],
)
elif column in DATE_FIELDS_IRREGULAR:
df[column] = pd.to_datetime(
df[column], format=DatetimeFormat.YMDH_COLUMN_M.value
)
elif column in QUALITY_FIELDS or column in INTEGER_FIELDS:
df[column] = | pd.to_numeric(df[column], errors="coerce") | pandas.to_numeric |
from cytopy.data import gate
from cytopy.data.geometry import *
from scipy.spatial.distance import euclidean
from shapely.geometry import Polygon
from sklearn.datasets import make_blobs
from KDEpy import FFTKDE
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import pytest
np.random.seed(42)
def test_child_init():
test_child = gate.Child(name="test",
signature={"x": 2423, "y": 2232, "z": 4543})
assert test_child.name == "test"
assert test_child.signature.get("x") == 2423
assert test_child.signature.get("y") == 2232
assert test_child.signature.get("z") == 4543
def test_childthreshold_init():
test_child = gate.ChildThreshold(name="test",
signature={"x": 2423, "y": 2232, "z": 4543},
definition="+",
geom=ThresholdGeom(x="x",
y="y",
x_threshold=0.5,
y_threshold=0.5,
transform_x="logicle",
transform_y="logicle"))
assert test_child.name == "test"
assert test_child.signature.get("x") == 2423
assert test_child.signature.get("y") == 2232
assert test_child.signature.get("z") == 4543
assert test_child.definition == "+"
assert test_child.geom.x == "x"
assert test_child.geom.y == "y"
assert test_child.geom.x_threshold == 0.5
assert test_child.geom.y_threshold == 0.5
assert test_child.geom.transform_x == "logicle"
assert test_child.geom.transform_x == "logicle"
@pytest.mark.parametrize("definition,expected", [("+", True),
("-", False)])
def test_childthreshold_match_definition_1d(definition, expected):
test_child = gate.ChildThreshold(name="test",
signature={"x": 2423, "y": 2232, "z": 4543},
definition=definition,
geom=ThresholdGeom(x="x",
y="y",
x_threshold=0.5,
y_threshold=0.5,
transform_x="logicle",
transform_y="logicle"))
assert test_child.match_definition("+") == expected
@pytest.mark.parametrize("definition,expected", [("++", True),
("--", False),
("++,+-", True),
("--,-+", False),
("+-,-+,++", True)])
def test_childthreshold_match_definition_2d(definition, expected):
test_child = gate.ChildThreshold(name="test",
signature={"x": 2423, "y": 2232, "z": 4543},
definition=definition,
geom=ThresholdGeom(x="x",
y="y",
x_threshold=0.5,
y_threshold=0.5,
transform_x="logicle",
transform_y="logicle"))
assert test_child.match_definition("++") == expected
def test_childpolygon_init():
test_child = gate.ChildPolygon(name="test",
signature={"x": 2423, "y": 2232, "z": 4543},
geom=PolygonGeom(x="x", y="y"))
assert test_child.name == "test"
assert test_child.signature.get("x") == 2423
assert test_child.signature.get("y") == 2232
assert test_child.signature.get("z") == 4543
assert test_child.geom.x == "x"
assert test_child.geom.y == "y"
@pytest.mark.parametrize("klass,method", [(gate.Gate, "manual"),
(gate.ThresholdGate, "density"),
(gate.PolygonGate, "manual"),
(gate.EllipseGate, "GaussianMixture")])
def test_gate_init(klass, method):
g = klass(gate_name="test",
parent="test parent",
x="X",
y="Y",
method=method,
dim_reduction=dict(method="UMAP", kwargs={"n_neighbours": 100}))
assert g.gate_name == "test"
assert g.parent == "test parent"
assert g.x == "X"
assert g.y == "Y"
assert g.method == method
assert g.dim_reduction.get("method") == "UMAP"
assert g.dim_reduction.get("kwargs").get("n_neighbours") == 100
def test_transform_none():
g = gate.Gate(gate_name="test",
parent="test parent",
x="X",
y="Y",
method="manual")
data = pd.DataFrame({"X": np.random.normal(1, scale=0.5, size=1000),
"Y": np.random.normal(1, scale=0.5, size=1000)})
transformed = g.transform(data)
assert isinstance(transformed, pd.DataFrame)
assert transformed.shape[0] == 1000
assert transformed.shape[1] == 2
for i in ["X", "Y"]:
assert transformed[i].mean() == pytest.approx(1., 0.1)
assert transformed[i].std() == pytest.approx(0.5, 0.1)
def test_transform_x():
g = gate.Gate(gate_name="test",
parent="test parent",
x="X",
y="Y",
method="manual",
transform_x="logicle")
data = pd.DataFrame({"X": np.random.normal(1, scale=0.5, size=1000),
"Y": np.random.normal(1, scale=0.5, size=1000)})
transformed = g.transform(data)
assert isinstance(transformed, pd.DataFrame)
assert transformed.shape[0] == 1000
assert transformed.shape[1] == 2
assert transformed["X"].mean() != pytest.approx(1., 0.1)
assert transformed["X"].std() != pytest.approx(0.5, 0.1)
assert transformed["Y"].mean() == pytest.approx(1., 0.1)
assert transformed["Y"].std() == pytest.approx(0.5, 0.1)
def test_transform_xy():
g = gate.Gate(gate_name="test",
parent="test parent",
x="X",
y="Y",
method="manual",
transform_x="logicle",
transform_y="logicle")
data = pd.DataFrame({"X": np.random.normal(1, scale=0.5, size=1000),
"Y": np.random.normal(1, scale=0.5, size=1000)})
transformed = g.transform(data)
assert isinstance(transformed, pd.DataFrame)
assert transformed.shape[0] == 1000
assert transformed.shape[1] == 2
assert transformed["X"].mean() != pytest.approx(1., 0.1)
assert transformed["X"].std() != pytest.approx(0.5, 0.1)
assert transformed["Y"].mean() != pytest.approx(1., 0.1)
assert transformed["Y"].std() != pytest.approx(0.5, 0.1)
@pytest.mark.parametrize("kwargs", [{"method": "uniform",
"n": 500},
{"method": "faithful"},
{"method": "density"}])
def test_downsample(kwargs):
g = gate.Gate(gate_name="test",
parent="test parent",
x="X",
y="Y",
method="manual",
sampling=kwargs)
data = pd.DataFrame({"X": np.random.normal(1, scale=0.5, size=1000),
"Y": np.random.normal(1, scale=0.5, size=1000)})
sample = g._downsample(data=data)
if kwargs.get("method") is None:
assert sample is None
else:
assert sample.shape[0] < data.shape[0]
def test_upsample():
data, labels = make_blobs(n_samples=3000,
n_features=2,
centers=3,
random_state=42)
data = pd.DataFrame(data, columns=["X", "Y"])
g = gate.Gate(gate_name="test",
parent="test parent",
x="X",
y="Y",
method="manual",
sampling={"method": "uniform",
"frac": 0.5})
sample = g._downsample(data=data)
sample_labels = labels[sample.index.values]
pops = list()
for x in np.unique(sample_labels):
idx = sample.index.values[np.where(sample_labels == x)[0]]
pops.append(gate.Population(population_name=f"Pop_{x}",
parent="root",
index=idx[:498]))
pops = g._upsample(data=data, sample=sample, populations=pops)
assert isinstance(pops, list)
assert all([isinstance(p, gate.Population) for p in pops])
assert all([len(p.index) == 1000 for p in pops])
for x in np.unique(labels):
p = [i for i in pops if i.population_name == f"Pop_{x}"][0]
assert np.array_equal(p.index, np.where(labels == x)[0])
def test_dim_reduction():
g = gate.Gate(gate_name="test",
parent="test parent",
x="X",
y="Y",
method="manual",
dim_reduction={"method": "UMAP",
"n_neighbors": 100})
data = pd.DataFrame({"X": np.random.normal(1, 0.5, 1000),
"Y": np.random.normal(1, 0.5, 1000),
"Z": np.random.normal(1, 0.5, 1000),
"W": np.random.normal(1, 0.5, 1000)})
data = g._dim_reduction(data=data)
assert g.x == "UMAP1"
assert g.y == "UMAP2"
assert data.shape == (1000, 6)
assert all([f"UMAP{i + 1}" in data.columns for i in range(2)])
@pytest.mark.parametrize("d", ["++", "--", "+-", "+++", "+ -"])
def test_threshold_add_child_invalid_1d(d):
threshold = gate.ThresholdGate(gate_name="test",
parent="test parent",
method="manual",
x="X")
child = gate.ChildThreshold(name="test child",
definition=d,
geom=ThresholdGeom(x="X", x_threshold=0.56, y_threshold=0.75))
with pytest.raises(AssertionError) as err:
threshold.add_child(child)
assert str(err.value) == "Invalid child definition, should be either '+' or '-'"
@pytest.mark.parametrize("d", ["+", "-", "+--", "+++", "+ -"])
def test_threshold_add_child_invalid_2d(d):
threshold = gate.ThresholdGate(gate_name="test",
parent="test parent",
x="X",
y="Y",
method="manual")
child = gate.ChildThreshold(name="test child",
definition=d,
geom=ThresholdGeom(x_threshold=0.56, y_threshold=0.75))
with pytest.raises(AssertionError) as err:
threshold.add_child(child)
assert str(err.value) == "Invalid child definition, should be one of: '++', '+-', '-+', or '--'"
def test_threshold_add_child():
threshold = gate.ThresholdGate(gate_name="test",
parent="test parent",
x="X",
y="Y",
method="manual",
transform_x="logicle")
child = gate.ChildThreshold(name="test child",
definition="++",
geom=ThresholdGeom(x_threshold=0.56, y_threshold=0.75))
threshold.add_child(child)
assert len(threshold.children)
assert threshold.children[0].geom.x == threshold.x
assert threshold.children[0].geom.y == threshold.y
assert threshold.children[0].geom.transform_x == "logicle"
assert not threshold.children[0].geom.transform_y
def test_threshold_match_children_1d():
threshold = gate.ThresholdGate(gate_name="test",
parent="test parent",
x="X",
method="density")
data = np.random.normal(loc=1., scale=1.5, size=1000)
threshold.add_child(gate.ChildThreshold(name="positive",
definition="+",
geom=ThresholdGeom(x_threshold=0.5)))
threshold.add_child(gate.ChildThreshold(name="negative",
definition="-",
geom=ThresholdGeom(x_threshold=0.5)))
pos = gate.Population(population_name="p1",
parent="root",
definition="+",
geom=ThresholdGeom(x_threshold=0.6),
index=data[np.where(data >= 0.6)])
neg = gate.Population(population_name="p2",
parent="root",
definition="-",
geom=ThresholdGeom(x_threshold=0.6),
index=data[np.where(data >= 0.6)])
pops = threshold._match_to_children([neg, pos])
pos = [p for p in pops if p.definition == "+"][0]
assert pos.population_name == "positive"
neg = [p for p in pops if p.definition == "-"][0]
assert neg.population_name == "negative"
def test_threshold_match_children_2d():
threshold = gate.ThresholdGate(gate_name="test",
parent="test parent",
x="X",
y="Y",
method="density")
x = np.random.normal(loc=1., scale=1.5, size=1000)
y = np.random.normal(loc=1., scale=1.5, size=1000)
data = pd.DataFrame({"X": x, "Y": y})
threshold.add_child(gate.ChildThreshold(name="positive",
definition="++,+-",
geom=ThresholdGeom(x_threshold=0.5)))
threshold.add_child(gate.ChildThreshold(name="negative",
definition="--,-+",
geom=ThresholdGeom(x_threshold=0.5)))
pos = gate.Population(population_name="p1",
parent="root",
definition="++",
geom=ThresholdGeom(x_threshold=0.6),
index=data[data.X >= 0.6].index.values)
neg = gate.Population(population_name="p2",
parent="root",
definition="--,-+",
geom=ThresholdGeom(x_threshold=0.6),
index=data[data.X < 0.6].index.values)
pops = threshold._match_to_children([neg, pos])
pos = [p for p in pops if p.definition == "++"][0]
assert pos.population_name == "positive"
neg = [p for p in pops if p.definition == "--,-+"][0]
assert neg.population_name == "negative"
def test_threshold_1d():
x = np.random.normal(loc=1., scale=1.5, size=1000)
data = pd.DataFrame({"X": x})
results = gate.threshold_1d(data=data, x="X", x_threshold=0.5)
assert len(results.keys()) == 2
assert all(isinstance(df, pd.DataFrame) for df in results.values())
assert len(np.where(x >= 0.5)[0]) == results.get("+").shape[0]
assert len(np.where(x < 0.5)[0]) == results.get("-").shape[0]
def test_threshold_2d():
x = np.random.normal(loc=1., scale=1.5, size=1000)
y = np.random.normal(loc=1., scale=1.5, size=1000)
data = pd.DataFrame({"X": x,
"Y": y})
results = gate.threshold_2d(data=data, x="X", y="Y", x_threshold=0.5, y_threshold=0.5)
assert len(results.keys()) == 4
assert all(isinstance(df, pd.DataFrame) for df in results.values())
x_pos, y_pos = np.where(x >= 0.5)[0], np.where(y >= 0.5)[0]
x_neg, y_neg = np.where(x < 0.5)[0], np.where(y < 0.5)[0]
assert len(np.intersect1d(x_pos, y_pos)) == results.get("++").shape[0]
assert len(np.intersect1d(x_pos, y_neg)) == results.get("+-").shape[0]
assert len(np.intersect1d(x_neg, y_pos)) == results.get("-+").shape[0]
assert len(np.intersect1d(x_neg, y_neg)) == results.get("--").shape[0]
def test_smoothed_peak_finding():
n1 = np.random.normal(loc=0.2, scale=1, size=500)
n2 = np.random.normal(loc=2.5, scale=0.2, size=250)
n3 = np.random.normal(loc=6.5, scale=0.5, size=500)
data = np.hstack([n1, n2, n3])
smoothed, peaks = gate.smoothed_peak_finding(p=data)
assert isinstance(smoothed, np.ndarray)
assert isinstance(peaks, np.ndarray)
assert len(peaks) == 2
def test_find_local_minima():
n1 = np.random.normal(loc=2, scale=1, size=1000)
n2 = np.random.normal(loc=10, scale=0.5, size=1000)
data = np.hstack([n1, n2])
x, y = FFTKDE(kernel='gaussian', bw='silverman').fit(data).evaluate()
peak1 = np.where(y == np.max(y[np.where(x < 6)]))[0][0]
peak2 = np.where(y == np.max(y[np.where(x > 6)]))[0][0]
minima = x[np.where(y == np.min(y[np.where((x > 4) & (x < 7))]))[0][0]]
assert gate.find_local_minima(p=y, x=x, peaks=np.array([peak1, peak2])) == minima
def test_find_inflection_point():
np.random.seed(42)
n1 = np.random.normal(loc=2, scale=1, size=1000)
x, y = FFTKDE(kernel='gaussian', bw='silverman').fit(n1).evaluate()
inflection_point = gate.find_inflection_point(x=x, p=y, peak_idx=int(np.argmax(y)),
incline=False)
plt.plot(x, y)
plt.axvline(inflection_point, c="r")
plt.title("Test inflection point; incline=False")
plt.show()
assert 3 < inflection_point < 4
inflection_point = gate.find_inflection_point(x=x, p=y, peak_idx=int(np.argmax(y)),
incline=True)
plt.plot(x, y)
plt.axvline(inflection_point, c="r")
plt.title("Test inflection point; incline=True")
plt.show()
assert 0 < inflection_point < 1
def test_threshold_fit_1d():
np.random.seed(42)
n1 = np.random.normal(loc=0.2, scale=1, size=500)
n2 = np.random.normal(loc=2.5, scale=0.2, size=250)
n3 = np.random.normal(loc=6.5, scale=0.5, size=500)
data = pd.DataFrame({"X": np.hstack([n1, n2, n3])})
threshold = gate.ThresholdGate(gate_name="test",
parent="test parent",
x="X",
method="density")
threshold.fit(data=data)
assert len(threshold.children) == 2
assert threshold.children[0].geom.x_threshold == threshold.children[1].geom.x_threshold
assert round(threshold.children[0].geom.x_threshold) == 4
assert all([i in [c.definition for c in threshold.children] for i in ["+", "-"]])
def test_threshold_fit_2d():
data, labels = make_blobs(n_samples=3000,
n_features=2,
centers=[(1., 1.), (1., 5.), (5., 0.2)],
random_state=42)
data = pd.DataFrame({"X": data[:, 0], "Y": data[:, 1]})
threshold = gate.ThresholdGate(gate_name="test",
parent="test parent",
x="X",
y="Y",
method="density")
threshold.fit(data)
assert len(threshold.children) == 4
assert len(set([c.geom.x_threshold for c in threshold.children])) == 1
assert len(set([c.geom.y_threshold for c in threshold.children])) == 1
assert all([i in [c.definition for c in threshold.children] for i in ["++", "--",
"+-", "-+"]])
assert 2 < threshold.children[0].geom.x_threshold < 4
assert 2 < threshold.children[0].geom.y_threshold < 4
def test_threshold_predict_1d():
n1 = np.random.normal(loc=0.2, scale=1, size=500)
n2 = np.random.normal(loc=2.5, scale=0.2, size=250)
n3 = np.random.normal(loc=6.5, scale=0.5, size=500)
data = pd.DataFrame({"X": np.hstack([n1, n2, n3])})
threshold = gate.ThresholdGate(gate_name="test",
parent="test parent",
x="X",
method="density")
threshold.fit(data=data)
new_data = pd.DataFrame({"X": np.hstack([np.random.normal(loc=0.2, scale=1, size=500),
np.random.normal(loc=6.5, scale=0.5, size=500)])})
pops = threshold.predict(new_data)
assert len(pops) == 2
assert all([isinstance(p, gate.Population) for p in pops])
assert all([isinstance(p.geom, ThresholdGeom) for p in pops])
assert all([p.geom.x == threshold.x for p in pops])
assert all([p.geom.y == threshold.y for p in pops])
assert all(p.geom.transform_x == threshold.transform_x for p in pops)
assert all(p.geom.transform_y == threshold.transform_y for p in pops)
assert all(i in [p.definition for p in pops] for i in ["+", "-"])
neg_idx = new_data[new_data.X < threshold.children[0].geom.x_threshold].index.values
pos_idx = new_data[new_data.X >= threshold.children[0].geom.x_threshold].index.values
pos_pop = [p for p in pops if p.definition == "+"][0]
neg_pop = [p for p in pops if p.definition == "-"][0]
assert np.array_equal(neg_pop.index, neg_idx)
assert np.array_equal(pos_pop.index, pos_idx)
def test_threshold_predict_2d():
data, _ = make_blobs(n_samples=3000,
n_features=2,
centers=[(1., 1.), (1., 5.), (5., 0.2)],
random_state=42)
data = pd.DataFrame({"X": data[:, 0], "Y": data[:, 1]})
threshold = gate.ThresholdGate(gate_name="test",
parent="test parent",
x="X",
y="Y",
method="density")
threshold.fit(data=data)
new_data, _ = make_blobs(n_samples=3000,
n_features=2,
centers=[(1., 1.), (5., 0.2)],
random_state=42)
new_data = pd.DataFrame({"X": new_data[:, 0], "Y": new_data[:, 1]})
pops = threshold.predict(new_data)
assert len(pops) == 4
assert all([isinstance(p, gate.Population) for p in pops])
assert all([isinstance(p.geom, ThresholdGeom) for p in pops])
assert all([p.geom.x == threshold.x for p in pops])
assert all([p.geom.y == threshold.y for p in pops])
assert all(p.geom.transform_x == threshold.transform_x for p in pops)
assert all(p.geom.transform_y == threshold.transform_y for p in pops)
assert all(i in [p.definition for p in pops] for i in ["++", "--", "-+", "+-"])
neg_idx = new_data[(new_data.X < threshold.children[0].geom.x_threshold) &
(new_data.Y < threshold.children[0].geom.y_threshold)].index.values
pos_idx = new_data[(new_data.X >= threshold.children[0].geom.x_threshold) &
(new_data.Y >= threshold.children[0].geom.y_threshold)].index.values
negpos_idx = new_data[(new_data.X < threshold.children[0].geom.x_threshold) &
(new_data.Y >= threshold.children[0].geom.y_threshold)].index.values
posneg_idx = new_data[(new_data.X >= threshold.children[0].geom.x_threshold) &
(new_data.Y < threshold.children[0].geom.y_threshold)].index.values
pos_pop = [p for p in pops if p.definition == "++"][0]
neg_pop = [p for p in pops if p.definition == "--"][0]
posneg_pop = [p for p in pops if p.definition == "+-"][0]
negpos_pop = [p for p in pops if p.definition == "-+"][0]
assert np.array_equal(neg_pop.index, neg_idx)
assert np.array_equal(pos_pop.index, pos_idx)
assert np.array_equal(negpos_pop.index, negpos_idx)
assert np.array_equal(posneg_pop.index, posneg_idx)
def test_threshold_fit_predict_1d():
n1 = np.random.normal(loc=0.2, scale=1, size=500)
n2 = np.random.normal(loc=2.5, scale=0.2, size=250)
n3 = np.random.normal(loc=6.5, scale=0.5, size=500)
data = pd.DataFrame({"X": np.hstack([n1, n2, n3])})
threshold = gate.ThresholdGate(gate_name="test",
parent="test parent",
x="X",
method="density")
threshold.fit(data=data)
threshold.label_children({"+": "Positive",
"-": "Negative"})
new_data = pd.DataFrame({"X": np.hstack([np.random.normal(loc=0.2, scale=1, size=200),
np.random.normal(loc=6.5, scale=0.5, size=1000)])})
pops = threshold.fit_predict(new_data)
assert len(pops) == 2
assert all([isinstance(p, gate.Population) for p in pops])
assert all([isinstance(p.geom, ThresholdGeom) for p in pops])
assert all([p.geom.x == threshold.x for p in pops])
assert all([p.geom.y == threshold.y for p in pops])
assert all(p.geom.transform_x == threshold.transform_x for p in pops)
assert all(p.geom.transform_y == threshold.transform_y for p in pops)
assert all(i in [p.definition for p in pops] for i in ["+", "-"])
pos_pop = [p for p in pops if p.definition == "+"][0]
assert pos_pop.population_name == "Positive"
neg_pop = [p for p in pops if p.definition == "-"][0]
assert neg_pop.population_name == "Negative"
assert len(pos_pop.index) > len(neg_pop.index)
assert len(pos_pop.index) > 800
assert len(neg_pop.index) < 300
def test_threshold_fit_predict_2d():
data, _ = make_blobs(n_samples=4000,
n_features=2,
centers=[(1., 1.), (1., 7.), (7., 2.), (7., 6.2)],
random_state=42)
data = pd.DataFrame({"X": data[:, 0], "Y": data[:, 1]})
threshold = gate.ThresholdGate(gate_name="test",
parent="test parent",
x="X",
y="Y",
method="density")
threshold.fit(data)
threshold.label_children({"++": "Top left",
"--": "Other",
"-+": "Other",
"+-": "Other"})
data, _ = make_blobs(n_samples=3000,
n_features=2,
centers=[(1., 1.), (1., 7.), (7., 6.2)],
random_state=42)
data = pd.DataFrame({"X": data[:, 0], "Y": data[:, 1]})
pops = threshold.fit_predict(data=data)
assert len(pops) == 2
assert all([isinstance(p, gate.Population) for p in pops])
assert all([isinstance(p.geom, ThresholdGeom) for p in pops])
assert all([p.geom.x == threshold.x for p in pops])
assert all([p.geom.y == threshold.y for p in pops])
assert all(p.geom.transform_x == threshold.transform_x for p in pops)
assert all(p.geom.transform_y == threshold.transform_y for p in pops)
top_left = [p for p in pops if p.population_name == "Top left"][0]
other = [p for p in pops if p.population_name == "Other"][0]
assert top_left.definition == "++"
assert {"+-", "-+", "--"} == set(other.definition.split(","))
assert len(top_left.index) < len(other.index)
assert len(top_left.index) > 900
assert len(other.index) > 1900
def create_polygon_gate(klass,
method: str,
**kwargs):
g = klass(gate_name="test",
parent="test parent",
x="X",
y="Y",
method=method,
method_kwargs={k: v for k, v in kwargs.items()})
return g
def test_polygon_add_child():
g = create_polygon_gate(klass=gate.PolygonGate, method="MiniBatchKMeans")
data, _ = make_blobs(n_samples=3000,
n_features=2,
centers=[(1., 1.), (1., 7.), (7., 6.2)],
random_state=42)
g.add_child(gate.ChildPolygon(name="test",
geom=PolygonGeom(x_values=np.linspace(0, 1000, 1).tolist(),
y_values=np.linspace(0, 1000, 1).tolist())))
assert len(g.children) == 1
assert g.children[0].name == "test"
assert g.children[0].geom.x == g.x
assert g.children[0].geom.y == g.y
assert g.children[0].geom.transform_x == g.transform_x
assert g.children[0].geom.transform_y == g.transform_y
def test_polygon_generate_populations():
data, labels = make_blobs(n_samples=4000,
n_features=2,
cluster_std=0.5,
centers=[(1., 1.), (1., 7.), (7., 6.2), (6., 1.)],
random_state=42)
data = pd.DataFrame(data, columns=["X", "Y"])
g = create_polygon_gate(klass=gate.PolygonGate, method="MiniBatchKMeans")
polys = [Polygon([(-1., -1), (-1, 10), (3, 10), (3, -1), (-1, -1)]),
Polygon([(4, -1), (8, -1), (8, 3.8), (4, 3.8), (4, -1)]),
Polygon([(4, 4), (4, 10), (10, 10), (10, 4), (4, 4)])]
pops = g._generate_populations(data=data,
polygons=polys)
assert len(pops) == 3
assert all([isinstance(p, gate.Population) for p in pops])
assert all([isinstance(p.geom, PolygonGeom) for p in pops])
for p in pops:
assert p.geom.x == g.x
assert p.geom.y == g.y
assert p.geom.transform_x == g.transform_x
assert p.geom.transform_y == g.transform_y
assert p.parent == "test parent"
for name, n in zip(["A", "B", "C"], [2000, 1000, 1000]):
p = [p for p in pops if p.population_name == name][0]
assert len(p.index) == n
assert len(p.geom.x_values) == 5
assert len(p.geom.y_values) == 5
def test_polygon_match_to_children():
data, labels = make_blobs(n_samples=5000,
n_features=2,
cluster_std=1,
centers=[(1., 1.), (10., 6.2), (1.5, 2.), (11, 7.), (11.5, 7.5)],
random_state=42)
data_dict = [{"data": data[np.where(labels == i)],
"signature": pd.DataFrame(data[np.where(labels == i)], columns=["X", "Y"]).mean().to_dict(),
"poly": create_polygon(
*create_convex_hull(data[np.where(labels == i)][:, 0], data[np.where(labels == i)][:, 1]))}
for i in range(5)]
g = create_polygon_gate(klass=gate.PolygonGate, method="MiniBatchKMeans")
for i in [0, 1]:
g.add_child(gate.ChildPolygon(name=f"Child{i + 1}",
signature=data_dict[i].get("signature"),
geom=PolygonGeom(x_values=data_dict[i].get("poly").exterior.xy[0],
y_values=data_dict[i].get("poly").exterior.xy[1])))
pops = g._generate_populations(data= | pd.DataFrame(data, columns=["X", "Y"]) | pandas.DataFrame |
# %%
import os
import pandas as pd
import numpy as np
from sklearn.preprocessing import OneHotEncoder
from sklearn.decomposition import PCA
base_dir = os.getcwd()
# %%
train_op_df = pd.read_csv(base_dir + '/dataset/dataset2/trainset/train_op.csv')
train_trans_df = pd.read_csv(base_dir + '/dataset/dataset2/trainset/train_trans.csv')
test_a_op_df = pd.read_csv(base_dir + '/dataset/dataset2/testset/test_a_op.csv')
test_a_trans_df = pd.read_csv(base_dir + '/dataset/dataset2/testset/test_a_trans.csv')
test_b_op_df = pd.read_csv(base_dir + '/dataset/dataset2/testset/test_b_op.csv')
test_b_trans_df = pd.read_csv(base_dir + '/dataset/dataset2/testset/test_b_trans.csv')
# %%
# op_type onehot+pca
op_type = pd.concat([train_op_df['op_type'], test_a_op_df['op_type'], test_b_op_df['op_type']])
dim_op_type = 10
values_op_type_org = op_type.unique().tolist() # 原来shape的values
values_op_type = np.array(values_op_type_org).reshape(len(values_op_type_org), -1)
enc_op_type = OneHotEncoder()
enc_op_type.fit(values_op_type)
onehot_op_type = enc_op_type.transform(values_op_type).toarray()
pca_op_type = PCA(n_components=dim_op_type)
pca_op_type.fit(onehot_op_type)
result_op_type = pca_op_type.transform(onehot_op_type)
mp_op_type = dict(zip(values_op_type_org, [code for code in result_op_type]))
pd.DataFrame.from_dict(data=mp_op_type, orient='columns')\
.to_csv(base_dir + '/dataset/dataset2/encoders/enc_op_type.csv', index=False)
# %%
# op_mode onehot+pca
op_mode = pd.concat([train_op_df['op_mode'], test_a_op_df['op_mode'], test_b_op_df['op_mode']])
dim_op_mode = 10
values_op_mode_org = op_mode.unique().tolist() # 原来shape的values
values_op_mode = np.array(values_op_mode_org).reshape(len(values_op_mode_org), -1)
enc_op_mode = OneHotEncoder()
enc_op_mode.fit(values_op_mode)
onehot_op_mode = enc_op_mode.transform(values_op_mode).toarray()
pca_op_mode = PCA(n_components=dim_op_mode)
pca_op_mode.fit(onehot_op_mode)
result_op_mode = pca_op_mode.transform(onehot_op_mode)
mp_op_mode = dict(zip(values_op_mode_org, [code for code in result_op_mode]))
pd.DataFrame.from_dict(data=mp_op_mode, orient='columns')\
.to_csv(base_dir + '/dataset/dataset2/encoders/enc_op_mode.csv', index=False)
# %%
# net_type onehot
net_type = pd.concat([train_op_df['net_type'], test_a_op_df['net_type'], test_b_op_df['net_type']])
values_net_type_org = net_type.unique().tolist()
values_net_type = np.array(values_net_type_org).reshape(len(values_net_type_org), -1)
enc_net_type = OneHotEncoder()
enc_net_type.fit(values_net_type)
onehot_net_type = enc_net_type.transform(values_net_type).toarray()
mp_op_net_type = dict(zip(values_net_type_org, [code for code in onehot_net_type]))
pd.DataFrame.from_dict(data=mp_op_net_type, orient='columns')\
.to_csv(base_dir + '/dataset/dataset2/encoders/enc_op_net_type.csv', index=False)
# %%
# op_channel onehot+pca
channel = pd.concat([train_op_df['channel'], test_a_op_df['channel'], test_b_op_df['channel']])
dim_channel = 5
values_channel_org = channel.unique().tolist() # 原来shape的values
values_channel = np.array(values_channel_org).reshape(len(values_channel_org), -1)
enc_channel = OneHotEncoder()
enc_channel.fit(values_channel)
onehot_channel = enc_channel.transform(values_channel).toarray()
pca_channel = PCA(n_components=dim_channel)
pca_channel.fit(onehot_channel)
result_channel = pca_channel.transform(onehot_channel)
mp_op_channel = dict(zip(values_channel_org, [code for code in result_channel]))
pd.DataFrame.from_dict(data=mp_op_channel, orient='columns')\
.to_csv(base_dir + '/dataset/dataset2/encoders/enc_op_channel.csv', index=False)
# %%
# tran_platform onehot
platform = pd.concat([train_trans_df['platform'], test_a_trans_df['platform'], test_b_trans_df['platform']])
values_platform_org = platform.unique().tolist()
values_platform = np.array(values_platform_org).reshape(len(values_platform_org), -1)
enc_platform = OneHotEncoder()
enc_platform.fit(values_platform)
onehot_platform = enc_platform.transform(values_platform).toarray()
mp_trans_platform = dict(zip(values_platform_org, [code for code in onehot_platform]))
pd.DataFrame.from_dict(data=mp_trans_platform, orient='columns')\
.to_csv(base_dir + '/dataset/dataset2/encoders/enc_trans_platform.csv', index=False)
# %%
# tunnel_in onehot
tunnel_in = pd.concat([train_trans_df['tunnel_in'], test_a_trans_df['tunnel_in'], test_b_trans_df['tunnel_in']])
values_tunnel_in_org = tunnel_in.unique().tolist()
values_tunnel_in = np.array(values_tunnel_in_org).reshape(len(values_tunnel_in_org), -1)
enc_tunnel_in = OneHotEncoder()
enc_tunnel_in.fit(values_tunnel_in)
onehot_tunnel_in = enc_tunnel_in.transform(values_tunnel_in).toarray()
mp_trans_tunnel_in = dict(zip(values_tunnel_in_org, [code for code in onehot_tunnel_in]))
pd.DataFrame.from_dict(data=mp_trans_tunnel_in, orient='columns')\
.to_csv(base_dir + '/dataset/dataset2/encoders/enc_trans_tunnel_in.csv', index=False)
# %%
# tunnel_out onehot
tunnel_out = pd.concat([train_trans_df['tunnel_out'], test_a_trans_df['tunnel_out'], test_b_trans_df['tunnel_out']])
values_tunnel_out_org = tunnel_out.unique().tolist()
values_tunnel_out = np.array(values_tunnel_out_org).reshape(len(values_tunnel_out_org), -1)
enc_tunnel_out = OneHotEncoder()
enc_tunnel_out.fit(values_tunnel_out)
onehot_tunnel_out = enc_tunnel_out.transform(values_tunnel_out).toarray()
mp_trans_tunnel_out = dict(zip(values_tunnel_out_org, [code for code in onehot_tunnel_out]))
pd.DataFrame.from_dict(data=mp_trans_tunnel_out, orient='columns')\
.to_csv(base_dir + '/dataset/dataset2/encoders/enc_trans_tunnel_out.csv', index=False)
# %%
# trans_type1 onehot+pca
type1 = pd.concat([train_trans_df['type1'], test_a_trans_df['type1'], test_b_trans_df['type1']])
dim_type1 = 5
values_type1_org = type1.unique().tolist() # 原来shape的values
values_type1 = np.array(values_type1_org).reshape(len(values_type1_org), -1)
enc_type1 = OneHotEncoder()
enc_type1.fit(values_type1)
onehot_type1 = enc_type1.transform(values_type1).toarray()
pca_type1 = PCA(n_components=dim_type1)
pca_type1.fit(onehot_type1)
result_type1 = pca_type1.transform(onehot_type1)
mp_trans_type1 = dict(zip(values_type1_org, [code for code in result_type1]))
pd.DataFrame.from_dict(data=mp_trans_type1, orient='columns')\
.to_csv(base_dir + '/dataset/dataset2/encoders/enc_trans_type1.csv', index=False)
# %%
type2 = pd.concat([train_trans_df['type2'], test_a_trans_df['type2'], test_b_trans_df['type2']])
dim_type2 = 5
values_type2_org = type2.unique().tolist() # 原来shape的values
values_type2 = np.array(values_type2_org).reshape(len(values_type2_org), -1)
enc_type2 = OneHotEncoder()
enc_type2.fit(values_type2)
onehot_type2 = enc_type2.transform(values_type2).toarray()
pca_type2 = PCA(n_components=dim_type2)
pca_type2.fit(onehot_type2)
result_type2 = pca_type2.transform(onehot_type2)
mp_trans_type2 = dict(zip(values_type2_org, [code for code in result_type2]))
| pd.DataFrame.from_dict(data=mp_trans_type2, orient='columns') | pandas.DataFrame.from_dict |
import numpy as np
import pandas as pd
import genetic_algorithm_feature_selection.variable_selection as vs
import genetic_algorithm_feature_selection.genetic_steps as gs
from sklearn.linear_model import LinearRegression
from sklearn.datasets import make_regression
# import matplotlib.pyplot as plt
nCols = 50
nGoods = 10
data, target, coefs = make_regression(n_samples=1000,
n_features=nCols,
n_informative=nGoods,
noise=1,
effective_rank=10,
coef=True,
random_state=243)
colnames = np.array([f'X{n}' for n in range(nCols)])
data = pd.DataFrame(data, columns=colnames)
target = | pd.Series(target, name='target') | pandas.Series |
import sys
from PyQt4.QtGui import QApplication
from PyQt4.QtCore import QUrl
from PyQt4.QtWebKit import QWebPage
import bs4 as bs
import urllib.request
import pandas as pd
import requests
from bs4 import BeautifulSoup
import re
import datetime
import os
today=datetime.date.today()
camera=[]
model1=[]
urls=[]
urls1=[]
urls2=[]
model=[]
company=[]
specs=[]
country=[]
display_list = []
memory_list = []
processor_list = []
camera_list = []
battery_list = []
thickness_list = []
extras_links = []
class Client(QWebPage):
def __init__(self, url):
self.app = QApplication(sys.argv)
QWebPage.__init__(self)
self.loadFinished.connect(self.on_page_load)
self.mainFrame().load(QUrl(url))
self.app.exec_()
def on_page_load(self):
self.app.quit()
url = 'https://www.meizu.com/in/'
client_response = Client(url)
source = client_response.mainFrame().toHtml()
soup = bs.BeautifulSoup(source, 'lxml')
link= soup.find('div', class_='meizu-header-sub-wrap').find_all('a')
for l in link:
urls.append(l['href'])
model.append(l.text.strip('\n\t'))
#print(model)
for u in urls:
#print(u)
r=requests.get(u)
soup=BeautifulSoup(r.text,'html.parser')
links=soup.find_all('li',attrs={'class':'subnav-item-spec'})
for i in links:
tt=i.find_all('a')
for u in tt:
urls1.append('https://www.meizu.com'+u['href'])
#print(urls1)
for p in urls1:
d=[]
r=requests.get(p)
soup=BeautifulSoup(r.text,'html.parser')
links=soup.find_all('div',attrs={'class':'desc-font-style'})
spe=soup.find_all('div',attrs={'class':'banner-left-top clearfix'})
if links:
print(p)
country.append("CHINA")
company.append("MEIZU")
#print("inside if")
extras_links.append(p)
#specs.append("NA")
for i in links:
tt=i.find_all('p')
for u in tt:
d.append(u.text.strip('\n').replace('\n',' '))
kt=' '
pt=' '
for x in d:
if "mAH" in x or "mAh" in x:
battery_list.append(x)
if "inch" in x:
display_list.append(x)
if "processor" in x or "Processor" in x or 'PROCESSOR' in x or 'MT' in x:
processor_list.append(x)
#print("________")
if "megapixels" in x or "Megapixels" in x or "megapixel" in x :
kt=kt+x+" "
if "Thickness " in x or 'thickness ' in x or "THICKNESS" in x:
thickness_list.append(x)
if "GB" in x:
pt=pt+x+" "
camera_list.append('rear/front: '+kt)
#print("______")
memory_list.append(pt)
if spe:
l=" "
for z in spe:
tt=z.find_all('div',attrs={'class':'t2'})
for n in tt:
l=l+n.text.replace('<br>'," ").replace('\n',' ').strip()+'||'
print(l)
specs.append(l)
for i in model:
if i=='PRO 7':
pass
else:
model1.append(i)
for i in camera_list:
s=" "
match=re.findall(r'\d+-megapixel',i)
if not match:
match=re.findall(r'\d+\s+megapixels',i)
if not match:
match=re.findall(r'\d+\s+megapixel',i)
if not match:
camera.append("NOT AVAILABLE")
for k in match:
s=s+k+" "
#print(s)
camera.append("REAR/FRONT: "+ s)
print(len(country))
print(len(company))
print(len(model1))
print(len(specs))
print(len(display_list))
print(len(camera))
print(len(memory_list))
print(len(battery_list))
print(len(thickness_list))
print(len(processor_list))
print(len(extras_links))
#print(camera_list)
records=[]
for i in range(len(country)):
records.append((country[i], company[i], model1[i], specs[i], display_list[i], camera[i], memory_list[i], battery_list[i], thickness_list[i], processor_list[i], extras_links[i]))
path='C:\\LavaWebScraper\\BrandWiseFiles\\'
df = | pd.DataFrame(records, columns = ['COUNTRY', 'COMPANY', 'MODEL', 'USP', 'DISPLAY', 'CAMERA', 'MEMORY', 'BATTERY', 'THICKNESS', 'PROCESSOR', 'EXTRAS/ LINKS']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import os
import sys
from typing import List, NamedTuple
from datetime import datetime
from google.cloud import aiplatform, storage
from google.cloud.aiplatform import gapic as aip
from kfp.v2 import compiler, dsl
from kfp.v2.dsl import component, pipeline, Input, Output, Model, Metrics, Dataset, HTML
USERNAME = "<lowercase user name>" # @param username
BUCKET_NAME = "gs://<USED BUCKET>" # @param bucket name
REGION = "<REGION>" # @param region
PROJECT_ID = "<GCP PROJECT ID>" # @param project id
PROJECT_NUMBER = "<GCP PROJECT NUMBER>" # @param project number
PIPELINE_NAME = f"diamonds-predictor-serving-pipeline-{USERNAME}"
ARTIFACT_REGISTRY_NAME = "diamonds-predictor-repo"
SUPERWISE_CLIENT_ID = "<YOUR SUPERWISE ACCOUNT CLIENT ID>" # @param project number
SUPERWISE_SECRET = "<YOUR SUPERWISE ACCOUNT SECRET>" # @param project number
SUPERWISE_MODEL_NAME = "Regression - Diamonds Price Predictor"
aiplatform.init(project=PROJECT_ID, location=REGION, staging_bucket=BUCKET_NAME)
""" Vertex definitions """
API_ENDPOINT = "{}-aiplatform.googleapis.com".format(REGION)
PIPELINE_ROOT = "{}/{}_pipeline_root/workshop".format(BUCKET_NAME, USERNAME)
# Load the data Component
@component(packages_to_install=["pandas"])
def load_data(dataset: Output[Dataset]):
import pandas as pd
df = pd.read_csv("https://www.openml.org/data/get_csv/21792853/dataset")
df = df[df["price"] < 10000]
print("Load Data: ", df.head())
df.to_csv(dataset.path, index=False)
# Validate the data Component
@component(packages_to_install=["pandas"])
def validate_data(df: Input[Dataset], validated_df: Output[Dataset]):
import pandas as pd
df = | pd.read_csv(df.path) | pandas.read_csv |
import pytest
import unittest
from unittest import mock
from ops.tasks.anomalyDetection import anomalyService
from anomaly.models import Anomaly
from pandas import Timestamp
from decimal import Decimal
from mixer.backend.django import mixer
import pandas as pd
@pytest.mark.django_db(transaction=True)
def test_createAnomalyService(client, mocker):
fakedata = [{'ds': Timestamp('2021-06-01 00:00:00+0000', tz='UTC'),
'y': Decimal('1.000000000')},
{'ds': Timestamp('2021-06-02 00:00:00+0000', tz='UTC'),
'y': Decimal('1.000000000')},
{'ds': Timestamp('2021-06-03 00:00:00+0000', tz='UTC'), 'y': Decimal('0E-9')},
{'ds': Timestamp('2021-06-04 00:00:00+0000', tz='UTC'), 'y': Decimal('0E-9')},
{'ds': Timestamp('2021-06-05 00:00:00+0000', tz='UTC'),
'y': Decimal('4.000000000')},
{'ds': Timestamp('2021-06-06 00:00:00+0000', tz='UTC'), 'y': Decimal('0E-9')},
{'ds': Timestamp('2021-06-07 00:00:00+0000', tz='UTC'),
'y': Decimal('4.000000000')},
{'ds': Timestamp('2021-06-08 00:00:00+0000', tz='UTC'), 'y': Decimal('0E-9')},
{'ds': Timestamp('2021-06-09 00:00:00+0000', tz='UTC'),
'y': Decimal('2.000000000')},
{'ds': Timestamp('2021-06-10 00:00:00+0000', tz='UTC'),
'y': Decimal('1.000000000')},
{'ds': Timestamp('2021-06-11 00:00:00+0000', tz='UTC'),
'y': Decimal('1.000000000')},
{'ds': Timestamp('2021-06-12 00:00:00+0000', tz='UTC'),
'y': Decimal('1.000000000')},
{'ds': Timestamp('2021-06-13 00:00:00+0000', tz='UTC'),
'y': Decimal('1.000000000')},
{'ds': Timestamp('2021-06-14 00:00:00+0000', tz='UTC'),
'y': Decimal('2.000000000')},
{'ds': Timestamp('2021-06-15 00:00:00+0000', tz='UTC'), 'y': Decimal('0E-9')},
{'ds': Timestamp('2021-06-16 00:00:00+0000', tz='UTC'),
'y': Decimal('2.000000000')},
{'ds': Timestamp('2021-06-17 00:00:00+0000', tz='UTC'),
'y': Decimal('1.000000000')},
{'ds': Timestamp('2021-06-18 00:00:00+0000', tz='UTC'),
'y': Decimal('2.000000000')},
{'ds': Timestamp('2021-06-19 00:00:00+0000', tz='UTC'),
'y': Decimal('1.000000000')},
{'ds': Timestamp('2021-06-20 00:00:00+0000', tz='UTC'),
'y': Decimal('2.000000000')},
{'ds': Timestamp('2021-06-21 00:00:00+0000', tz='UTC'),
'y': Decimal('4.000000000')},
{'ds': Timestamp('2021-06-22 00:00:00+0000', tz='UTC'),
'y': Decimal('3.000000000')},
{'ds': Timestamp('2021-06-23 00:00:00+0000', tz='UTC'),
'y': Decimal('1.000000000')},
{'ds': Timestamp('2021-06-24 00:00:00+0000', tz='UTC'), 'y': Decimal('0E-9')},
{'ds': Timestamp('2021-06-25 00:00:00+0000', tz='UTC'),
'y': Decimal('3.000000000')},
{'ds': Timestamp('2021-06-26 00:00:00+0000', tz='UTC'),
'y': Decimal('3.000000000')},
{'ds': Timestamp('2021-06-27 00:00:00+0000', tz='UTC'),
'y': Decimal('1.000000000')},
{'ds': Timestamp('2021-06-28 00:00:00+0000', tz='UTC'), 'y': Decimal('0E-9')},
{'ds': Timestamp('2021-06-29 00:00:00+0000', tz='UTC'), 'y': Decimal('0E-9')},
{'ds': Timestamp('2021-06-30 00:00:00+0000', tz='UTC'), 'y': Decimal('0E-9')}]
df = | pd.DataFrame(fakedata) | pandas.DataFrame |
import csv
from io import StringIO
import os
import numpy as np
import pytest
from pandas.errors import ParserError
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
NaT,
Series,
Timestamp,
date_range,
read_csv,
to_datetime,
)
import pandas._testing as tm
import pandas.core.common as com
from pandas.io.common import get_handle
MIXED_FLOAT_DTYPES = ["float16", "float32", "float64"]
MIXED_INT_DTYPES = [
"uint8",
"uint16",
"uint32",
"uint64",
"int8",
"int16",
"int32",
"int64",
]
class TestDataFrameToCSV:
def read_csv(self, path, **kwargs):
params = {"index_col": 0, "parse_dates": True}
params.update(**kwargs)
return read_csv(path, **params)
def test_to_csv_from_csv1(self, float_frame, datetime_frame):
with tm.ensure_clean("__tmp_to_csv_from_csv1__") as path:
float_frame["A"][:5] = np.nan
float_frame.to_csv(path)
float_frame.to_csv(path, columns=["A", "B"])
float_frame.to_csv(path, header=False)
float_frame.to_csv(path, index=False)
# test roundtrip
# freq does not roundtrip
datetime_frame.index = datetime_frame.index._with_freq(None)
datetime_frame.to_csv(path)
recons = self.read_csv(path)
tm.assert_frame_equal(datetime_frame, recons)
datetime_frame.to_csv(path, index_label="index")
recons = self.read_csv(path, index_col=None)
assert len(recons.columns) == len(datetime_frame.columns) + 1
# no index
datetime_frame.to_csv(path, index=False)
recons = self.read_csv(path, index_col=None)
tm.assert_almost_equal(datetime_frame.values, recons.values)
# corner case
dm = DataFrame(
{
"s1": Series(range(3), index=np.arange(3)),
"s2": Series(range(2), index=np.arange(2)),
}
)
dm.to_csv(path)
recons = self.read_csv(path)
tm.assert_frame_equal(dm, recons)
def test_to_csv_from_csv2(self, float_frame):
with tm.ensure_clean("__tmp_to_csv_from_csv2__") as path:
# duplicate index
df = DataFrame(
np.random.randn(3, 3), index=["a", "a", "b"], columns=["x", "y", "z"]
)
df.to_csv(path)
result = self.read_csv(path)
tm.assert_frame_equal(result, df)
midx = MultiIndex.from_tuples([("A", 1, 2), ("A", 1, 2), ("B", 1, 2)])
df = DataFrame(np.random.randn(3, 3), index=midx, columns=["x", "y", "z"])
df.to_csv(path)
result = self.read_csv(path, index_col=[0, 1, 2], parse_dates=False)
tm.assert_frame_equal(result, df, check_names=False)
# column aliases
col_aliases = Index(["AA", "X", "Y", "Z"])
float_frame.to_csv(path, header=col_aliases)
rs = self.read_csv(path)
xp = float_frame.copy()
xp.columns = col_aliases
tm.assert_frame_equal(xp, rs)
msg = "Writing 4 cols but got 2 aliases"
with pytest.raises(ValueError, match=msg):
float_frame.to_csv(path, header=["AA", "X"])
def test_to_csv_from_csv3(self):
with tm.ensure_clean("__tmp_to_csv_from_csv3__") as path:
df1 = DataFrame(np.random.randn(3, 1))
df2 = DataFrame(np.random.randn(3, 1))
df1.to_csv(path)
df2.to_csv(path, mode="a", header=False)
xp = pd.concat([df1, df2])
rs = read_csv(path, index_col=0)
rs.columns = [int(label) for label in rs.columns]
xp.columns = [int(label) for label in xp.columns]
tm.assert_frame_equal(xp, rs)
def test_to_csv_from_csv4(self):
with tm.ensure_clean("__tmp_to_csv_from_csv4__") as path:
# GH 10833 (TimedeltaIndex formatting)
dt = pd.Timedelta(seconds=1)
df = DataFrame(
{"dt_data": [i * dt for i in range(3)]},
index=Index([i * dt for i in range(3)], name="dt_index"),
)
df.to_csv(path)
result = read_csv(path, index_col="dt_index")
result.index = pd.to_timedelta(result.index)
result["dt_data"] = pd.to_timedelta(result["dt_data"])
tm.assert_frame_equal(df, result, check_index_type=True)
def test_to_csv_from_csv5(self, timezone_frame):
# tz, 8260
with tm.ensure_clean("__tmp_to_csv_from_csv5__") as path:
timezone_frame.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=["A"])
converter = (
lambda c: to_datetime(result[c])
.dt.tz_convert("UTC")
.dt.tz_convert(timezone_frame[c].dt.tz)
)
result["B"] = converter("B")
result["C"] = converter("C")
tm.assert_frame_equal(result, timezone_frame)
def test_to_csv_cols_reordering(self):
# GH3454
chunksize = 5
N = int(chunksize * 2.5)
df = tm.makeCustomDataframe(N, 3)
cs = df.columns
cols = [cs[2], cs[0]]
with tm.ensure_clean() as path:
df.to_csv(path, columns=cols, chunksize=chunksize)
rs_c = read_csv(path, index_col=0)
tm.assert_frame_equal(df[cols], rs_c, check_names=False)
def test_to_csv_new_dupe_cols(self):
def _check_df(df, cols=None):
with tm.ensure_clean() as path:
df.to_csv(path, columns=cols, chunksize=chunksize)
rs_c = read_csv(path, index_col=0)
# we wrote them in a different order
# so compare them in that order
if cols is not None:
if df.columns.is_unique:
rs_c.columns = cols
else:
indexer, missing = df.columns.get_indexer_non_unique(cols)
rs_c.columns = df.columns.take(indexer)
for c in cols:
obj_df = df[c]
obj_rs = rs_c[c]
if isinstance(obj_df, Series):
tm.assert_series_equal(obj_df, obj_rs)
else:
tm.assert_frame_equal(obj_df, obj_rs, check_names=False)
# wrote in the same order
else:
rs_c.columns = df.columns
tm.assert_frame_equal(df, rs_c, check_names=False)
chunksize = 5
N = int(chunksize * 2.5)
# dupe cols
df = tm.makeCustomDataframe(N, 3)
df.columns = ["a", "a", "b"]
_check_df(df, None)
# dupe cols with selection
cols = ["b", "a"]
_check_df(df, cols)
@pytest.mark.slow
def test_to_csv_dtnat(self):
# GH3437
def make_dtnat_arr(n, nnat=None):
if nnat is None:
nnat = int(n * 0.1) # 10%
s = list(date_range("2000", freq="5min", periods=n))
if nnat:
for i in np.random.randint(0, len(s), nnat):
s[i] = NaT
i = np.random.randint(100)
s[-i] = NaT
s[i] = NaT
return s
chunksize = 1000
# N=35000
s1 = make_dtnat_arr(chunksize + 5)
s2 = make_dtnat_arr(chunksize + 5, 0)
# s3=make_dtnjat_arr(chunksize+5,0)
with tm.ensure_clean("1.csv") as pth:
df = DataFrame({"a": s1, "b": s2})
df.to_csv(pth, chunksize=chunksize)
recons = self.read_csv(pth).apply(to_datetime)
tm.assert_frame_equal(df, recons, check_names=False)
@pytest.mark.slow
def test_to_csv_moar(self):
def _do_test(
df, r_dtype=None, c_dtype=None, rnlvl=None, cnlvl=None, dupe_col=False
):
kwargs = {"parse_dates": False}
if cnlvl:
if rnlvl is not None:
kwargs["index_col"] = list(range(rnlvl))
kwargs["header"] = list(range(cnlvl))
with tm.ensure_clean("__tmp_to_csv_moar__") as path:
df.to_csv(path, encoding="utf8", chunksize=chunksize)
recons = self.read_csv(path, **kwargs)
else:
kwargs["header"] = 0
with tm.ensure_clean("__tmp_to_csv_moar__") as path:
df.to_csv(path, encoding="utf8", chunksize=chunksize)
recons = self.read_csv(path, **kwargs)
def _to_uni(x):
if not isinstance(x, str):
return x.decode("utf8")
return x
if dupe_col:
# read_Csv disambiguates the columns by
# labeling them dupe.1,dupe.2, etc'. monkey patch columns
recons.columns = df.columns
if rnlvl and not cnlvl:
delta_lvl = [recons.iloc[:, i].values for i in range(rnlvl - 1)]
ix = MultiIndex.from_arrays([list(recons.index)] + delta_lvl)
recons.index = ix
recons = recons.iloc[:, rnlvl - 1 :]
type_map = {"i": "i", "f": "f", "s": "O", "u": "O", "dt": "O", "p": "O"}
if r_dtype:
if r_dtype == "u": # unicode
r_dtype = "O"
recons.index = np.array(
[_to_uni(label) for label in recons.index], dtype=r_dtype
)
df.index = np.array(
[_to_uni(label) for label in df.index], dtype=r_dtype
)
elif r_dtype == "dt": # unicode
r_dtype = "O"
recons.index = np.array(
[Timestamp(label) for label in recons.index], dtype=r_dtype
)
df.index = np.array(
[Timestamp(label) for label in df.index], dtype=r_dtype
)
elif r_dtype == "p":
r_dtype = "O"
idx_list = to_datetime(recons.index)
recons.index = np.array(
[Timestamp(label) for label in idx_list], dtype=r_dtype
)
df.index = np.array(
list(map(Timestamp, df.index.to_timestamp())), dtype=r_dtype
)
else:
r_dtype = type_map.get(r_dtype)
recons.index = np.array(recons.index, dtype=r_dtype)
df.index = np.array(df.index, dtype=r_dtype)
if c_dtype:
if c_dtype == "u":
c_dtype = "O"
recons.columns = np.array(
[_to_uni(label) for label in recons.columns], dtype=c_dtype
)
df.columns = np.array(
[_to_uni(label) for label in df.columns], dtype=c_dtype
)
elif c_dtype == "dt":
c_dtype = "O"
recons.columns = np.array(
[Timestamp(label) for label in recons.columns], dtype=c_dtype
)
df.columns = np.array(
[Timestamp(label) for label in df.columns], dtype=c_dtype
)
elif c_dtype == "p":
c_dtype = "O"
col_list = to_datetime(recons.columns)
recons.columns = np.array(
[Timestamp(label) for label in col_list], dtype=c_dtype
)
col_list = df.columns.to_timestamp()
df.columns = np.array(
[Timestamp(label) for label in col_list], dtype=c_dtype
)
else:
c_dtype = type_map.get(c_dtype)
recons.columns = np.array(recons.columns, dtype=c_dtype)
df.columns = np.array(df.columns, dtype=c_dtype)
tm.assert_frame_equal(df, recons, check_names=False)
N = 100
chunksize = 1000
ncols = 4
base = chunksize // ncols
for nrows in [
2,
10,
N - 1,
N,
N + 1,
N + 2,
2 * N - 2,
2 * N - 1,
2 * N,
2 * N + 1,
2 * N + 2,
base - 1,
base,
base + 1,
]:
_do_test(
tm.makeCustomDataframe(nrows, ncols, r_idx_type="dt", c_idx_type="s"),
"dt",
"s",
)
for r_idx_type, c_idx_type in [("i", "i"), ("s", "s"), ("u", "dt"), ("p", "p")]:
for ncols in [1, 2, 3, 4]:
base = chunksize // ncols
for nrows in [
2,
10,
N - 1,
N,
N + 1,
N + 2,
2 * N - 2,
2 * N - 1,
2 * N,
2 * N + 1,
2 * N + 2,
base - 1,
base,
base + 1,
]:
_do_test(
tm.makeCustomDataframe(
nrows, ncols, r_idx_type=r_idx_type, c_idx_type=c_idx_type
),
r_idx_type,
c_idx_type,
)
for ncols in [1, 2, 3, 4]:
base = chunksize // ncols
for nrows in [
10,
N - 2,
N - 1,
N,
N + 1,
N + 2,
2 * N - 2,
2 * N - 1,
2 * N,
2 * N + 1,
2 * N + 2,
base - 1,
base,
base + 1,
]:
_do_test(tm.makeCustomDataframe(nrows, ncols))
for nrows in [10, N - 2, N - 1, N, N + 1, N + 2]:
df = tm.makeCustomDataframe(nrows, 3)
cols = list(df.columns)
cols[:2] = ["dupe", "dupe"]
cols[-2:] = ["dupe", "dupe"]
ix = list(df.index)
ix[:2] = ["rdupe", "rdupe"]
ix[-2:] = ["rdupe", "rdupe"]
df.index = ix
df.columns = cols
_do_test(df, dupe_col=True)
_do_test(DataFrame(index=np.arange(10)))
_do_test(
tm.makeCustomDataframe(chunksize // 2 + 1, 2, r_idx_nlevels=2), rnlvl=2
)
for ncols in [2, 3, 4]:
base = int(chunksize // ncols)
for nrows in [
10,
N - 2,
N - 1,
N,
N + 1,
N + 2,
2 * N - 2,
2 * N - 1,
2 * N,
2 * N + 1,
2 * N + 2,
base - 1,
base,
base + 1,
]:
_do_test(tm.makeCustomDataframe(nrows, ncols, r_idx_nlevels=2), rnlvl=2)
_do_test(tm.makeCustomDataframe(nrows, ncols, c_idx_nlevels=2), cnlvl=2)
_do_test(
tm.makeCustomDataframe(
nrows, ncols, r_idx_nlevels=2, c_idx_nlevels=2
),
rnlvl=2,
cnlvl=2,
)
def test_to_csv_from_csv_w_some_infs(self, float_frame):
# test roundtrip with inf, -inf, nan, as full columns and mix
float_frame["G"] = np.nan
f = lambda x: [np.inf, np.nan][np.random.rand() < 0.5]
float_frame["H"] = float_frame.index.map(f)
with tm.ensure_clean() as path:
float_frame.to_csv(path)
recons = self.read_csv(path)
tm.assert_frame_equal(float_frame, recons)
tm.assert_frame_equal(np.isinf(float_frame), np.isinf(recons))
def test_to_csv_from_csv_w_all_infs(self, float_frame):
# test roundtrip with inf, -inf, nan, as full columns and mix
float_frame["E"] = np.inf
float_frame["F"] = -np.inf
with tm.ensure_clean() as path:
float_frame.to_csv(path)
recons = self.read_csv(path)
tm.assert_frame_equal(float_frame, recons)
tm.assert_frame_equal(np.isinf(float_frame), np.isinf(recons))
def test_to_csv_no_index(self):
# GH 3624, after appending columns, to_csv fails
with tm.ensure_clean("__tmp_to_csv_no_index__") as path:
df = DataFrame({"c1": [1, 2, 3], "c2": [4, 5, 6]})
df.to_csv(path, index=False)
result = read_csv(path)
tm.assert_frame_equal(df, result)
df["c3"] = Series([7, 8, 9], dtype="int64")
df.to_csv(path, index=False)
result = read_csv(path)
tm.assert_frame_equal(df, result)
def test_to_csv_with_mix_columns(self):
# gh-11637: incorrect output when a mix of integer and string column
# names passed as columns parameter in to_csv
df = DataFrame({0: ["a", "b", "c"], 1: ["aa", "bb", "cc"]})
df["test"] = "txt"
assert df.to_csv() == df.to_csv(columns=[0, 1, "test"])
def test_to_csv_headers(self):
# GH6186, the presence or absence of `index` incorrectly
# causes to_csv to have different header semantics.
from_df = DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
to_df = DataFrame([[1, 2], [3, 4]], columns=["X", "Y"])
with tm.ensure_clean("__tmp_to_csv_headers__") as path:
from_df.to_csv(path, header=["X", "Y"])
recons = self.read_csv(path)
tm.assert_frame_equal(to_df, recons)
from_df.to_csv(path, index=False, header=["X", "Y"])
recons = self.read_csv(path)
return_value = recons.reset_index(inplace=True)
assert return_value is None
tm.assert_frame_equal(to_df, recons)
def test_to_csv_multiindex(self, float_frame, datetime_frame):
frame = float_frame
old_index = frame.index
arrays = np.arange(len(old_index) * 2).reshape(2, -1)
new_index = MultiIndex.from_arrays(arrays, names=["first", "second"])
frame.index = new_index
with tm.ensure_clean("__tmp_to_csv_multiindex__") as path:
frame.to_csv(path, header=False)
frame.to_csv(path, columns=["A", "B"])
# round trip
frame.to_csv(path)
df = self.read_csv(path, index_col=[0, 1], parse_dates=False)
# TODO to_csv drops column name
tm.assert_frame_equal(frame, df, check_names=False)
assert frame.index.names == df.index.names
# needed if setUp becomes a class method
float_frame.index = old_index
# try multiindex with dates
tsframe = datetime_frame
old_index = tsframe.index
new_index = [old_index, np.arange(len(old_index))]
tsframe.index = MultiIndex.from_arrays(new_index)
tsframe.to_csv(path, index_label=["time", "foo"])
recons = self.read_csv(path, index_col=[0, 1])
# TODO to_csv drops column name
tm.assert_frame_equal(tsframe, recons, check_names=False)
# do not load index
tsframe.to_csv(path)
recons = self.read_csv(path, index_col=None)
assert len(recons.columns) == len(tsframe.columns) + 2
# no index
tsframe.to_csv(path, index=False)
recons = self.read_csv(path, index_col=None)
tm.assert_almost_equal(recons.values, datetime_frame.values)
# needed if setUp becomes class method
datetime_frame.index = old_index
with tm.ensure_clean("__tmp_to_csv_multiindex__") as path:
# GH3571, GH1651, GH3141
def _make_frame(names=None):
if names is True:
names = ["first", "second"]
return DataFrame(
np.random.randint(0, 10, size=(3, 3)),
columns=MultiIndex.from_tuples(
[("bah", "foo"), ("bah", "bar"), ("ban", "baz")], names=names
),
dtype="int64",
)
# column & index are multi-index
df = tm.makeCustomDataframe(5, 3, r_idx_nlevels=2, c_idx_nlevels=4)
df.to_csv(path)
result = read_csv(path, header=[0, 1, 2, 3], index_col=[0, 1])
tm.assert_frame_equal(df, result)
# column is mi
df = tm.makeCustomDataframe(5, 3, r_idx_nlevels=1, c_idx_nlevels=4)
df.to_csv(path)
result = read_csv(path, header=[0, 1, 2, 3], index_col=0)
tm.assert_frame_equal(df, result)
# dup column names?
df = tm.makeCustomDataframe(5, 3, r_idx_nlevels=3, c_idx_nlevels=4)
df.to_csv(path)
result = read_csv(path, header=[0, 1, 2, 3], index_col=[0, 1, 2])
tm.assert_frame_equal(df, result)
# writing with no index
df = _make_frame()
df.to_csv(path, index=False)
result = read_csv(path, header=[0, 1])
tm.assert_frame_equal(df, result)
# we lose the names here
df = _make_frame(True)
df.to_csv(path, index=False)
result = read_csv(path, header=[0, 1])
assert com.all_none(*result.columns.names)
result.columns.names = df.columns.names
tm.assert_frame_equal(df, result)
# whatsnew example
df = _make_frame()
df.to_csv(path)
result = read_csv(path, header=[0, 1], index_col=[0])
tm.assert_frame_equal(df, result)
df = _make_frame(True)
df.to_csv(path)
result = read_csv(path, header=[0, 1], index_col=[0])
tm.assert_frame_equal(df, result)
# invalid options
df = _make_frame(True)
df.to_csv(path)
for i in [6, 7]:
msg = f"len of {i}, but only 5 lines in file"
with pytest.raises(ParserError, match=msg):
read_csv(path, header=list(range(i)), index_col=0)
# write with cols
msg = "cannot specify cols with a MultiIndex"
with pytest.raises(TypeError, match=msg):
df.to_csv(path, columns=["foo", "bar"])
with tm.ensure_clean("__tmp_to_csv_multiindex__") as path:
# empty
tsframe[:0].to_csv(path)
recons = self.read_csv(path)
exp = tsframe[:0]
exp.index = []
tm.assert_index_equal(recons.columns, exp.columns)
assert len(recons) == 0
def test_to_csv_interval_index(self):
# GH 28210
df = DataFrame({"A": list("abc"), "B": range(3)}, index=pd.interval_range(0, 3))
with tm.ensure_clean("__tmp_to_csv_interval_index__.csv") as path:
df.to_csv(path)
result = self.read_csv(path, index_col=0)
# can't roundtrip intervalindex via read_csv so check string repr (GH 23595)
expected = df.copy()
expected.index = expected.index.astype(str)
tm.assert_frame_equal(result, expected)
def test_to_csv_float32_nanrep(self):
df = DataFrame(np.random.randn(1, 4).astype(np.float32))
df[1] = np.nan
with tm.ensure_clean("__tmp_to_csv_float32_nanrep__.csv") as path:
df.to_csv(path, na_rep=999)
with open(path) as f:
lines = f.readlines()
assert lines[1].split(",")[2] == "999"
def test_to_csv_withcommas(self):
# Commas inside fields should be correctly escaped when saving as CSV.
df = DataFrame({"A": [1, 2, 3], "B": ["5,6", "7,8", "9,0"]})
with tm.ensure_clean("__tmp_to_csv_withcommas__.csv") as path:
df.to_csv(path)
df2 = self.read_csv(path)
tm.assert_frame_equal(df2, df)
def test_to_csv_mixed(self):
def create_cols(name):
return [f"{name}{i:03d}" for i in range(5)]
df_float = DataFrame(
np.random.randn(100, 5), dtype="float64", columns=create_cols("float")
)
df_int = DataFrame(
np.random.randn(100, 5).astype("int64"),
dtype="int64",
columns=create_cols("int"),
)
df_bool = DataFrame(True, index=df_float.index, columns=create_cols("bool"))
df_object = DataFrame(
"foo", index=df_float.index, columns=create_cols("object")
)
df_dt = DataFrame(
Timestamp("20010101"), index=df_float.index, columns=create_cols("date")
)
# add in some nans
df_float.iloc[30:50, 1:3] = np.nan
# ## this is a bug in read_csv right now ####
# df_dt.loc[30:50,1:3] = np.nan
df = pd.concat([df_float, df_int, df_bool, df_object, df_dt], axis=1)
# dtype
dtypes = {}
for n, dtype in [
("float", np.float64),
("int", np.int64),
("bool", np.bool_),
("object", object),
]:
for c in create_cols(n):
dtypes[c] = dtype
with tm.ensure_clean() as filename:
df.to_csv(filename)
rs = read_csv(
filename, index_col=0, dtype=dtypes, parse_dates=create_cols("date")
)
tm.assert_frame_equal(rs, df)
def test_to_csv_dups_cols(self):
df = DataFrame(
np.random.randn(1000, 30),
columns=list(range(15)) + list(range(15)),
dtype="float64",
)
with tm.ensure_clean() as filename:
df.to_csv(filename) # single dtype, fine
result = read_csv(filename, index_col=0)
result.columns = df.columns
tm.assert_frame_equal(result, df)
df_float = DataFrame(np.random.randn(1000, 3), dtype="float64")
df_int = DataFrame(np.random.randn(1000, 3)).astype("int64")
df_bool = DataFrame(True, index=df_float.index, columns=range(3))
df_object = DataFrame("foo", index=df_float.index, columns=range(3))
df_dt = DataFrame(Timestamp("20010101"), index=df_float.index, columns=range(3))
df = pd.concat(
[df_float, df_int, df_bool, df_object, df_dt], axis=1, ignore_index=True
)
df.columns = [0, 1, 2] * 5
with tm.ensure_clean() as filename:
df.to_csv(filename)
result = read_csv(filename, index_col=0)
# date cols
for i in ["0.4", "1.4", "2.4"]:
result[i] = to_datetime(result[i])
result.columns = df.columns
tm.assert_frame_equal(result, df)
# GH3457
N = 10
df = tm.makeCustomDataframe(N, 3)
df.columns = ["a", "a", "b"]
with tm.ensure_clean() as filename:
df.to_csv(filename)
# read_csv will rename the dups columns
result = read_csv(filename, index_col=0)
result = result.rename(columns={"a.1": "a"})
tm.assert_frame_equal(result, df)
def test_to_csv_chunking(self):
aa = DataFrame({"A": range(100000)})
aa["B"] = aa.A + 1.0
aa["C"] = aa.A + 2.0
aa["D"] = aa.A + 3.0
for chunksize in [10000, 50000, 100000]:
with tm.ensure_clean() as filename:
aa.to_csv(filename, chunksize=chunksize)
rs = read_csv(filename, index_col=0)
tm.assert_frame_equal(rs, aa)
@pytest.mark.slow
def test_to_csv_wide_frame_formatting(self):
# Issue #8621
df = DataFrame(np.random.randn(1, 100010), columns=None, index=None)
with tm.ensure_clean() as filename:
df.to_csv(filename, header=False, index=False)
rs = read_csv(filename, header=None)
tm.assert_frame_equal(rs, df)
def test_to_csv_bug(self):
f1 = StringIO("a,1.0\nb,2.0")
df = self.read_csv(f1, header=None)
newdf = DataFrame({"t": df[df.columns[0]]})
with tm.ensure_clean() as path:
newdf.to_csv(path)
recons = read_csv(path, index_col=0)
# don't check_names as t != 1
tm.assert_frame_equal(recons, newdf, check_names=False)
def test_to_csv_unicode(self):
df = DataFrame({"c/\u03c3": [1, 2, 3]})
with tm.ensure_clean() as path:
df.to_csv(path, encoding="UTF-8")
df2 = read_csv(path, index_col=0, encoding="UTF-8")
tm.assert_frame_equal(df, df2)
df.to_csv(path, encoding="UTF-8", index=False)
df2 = read_csv(path, index_col=None, encoding="UTF-8")
tm.assert_frame_equal(df, df2)
def test_to_csv_unicode_index_col(self):
buf = StringIO("")
df = DataFrame(
[["\u05d0", "d2", "d3", "d4"], ["a1", "a2", "a3", "a4"]],
columns=["\u05d0", "\u05d1", "\u05d2", "\u05d3"],
index=["\u05d0", "\u05d1"],
)
df.to_csv(buf, encoding="UTF-8")
buf.seek(0)
df2 = read_csv(buf, index_col=0, encoding="UTF-8")
tm.assert_frame_equal(df, df2)
def test_to_csv_stringio(self, float_frame):
buf = StringIO()
float_frame.to_csv(buf)
buf.seek(0)
recons = read_csv(buf, index_col=0)
tm.assert_frame_equal(recons, float_frame)
def test_to_csv_float_format(self):
df = DataFrame(
[[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]],
index=["A", "B"],
columns=["X", "Y", "Z"],
)
with tm.ensure_clean() as filename:
df.to_csv(filename, float_format="%.2f")
rs = read_csv(filename, index_col=0)
xp = DataFrame(
[[0.12, 0.23, 0.57], [12.32, 123123.20, 321321.20]],
index=["A", "B"],
columns=["X", "Y", "Z"],
)
tm.assert_frame_equal(rs, xp)
def test_to_csv_unicodewriter_quoting(self):
df = DataFrame({"A": [1, 2, 3], "B": ["foo", "bar", "baz"]})
buf = StringIO()
df.to_csv(buf, index=False, quoting=csv.QUOTE_NONNUMERIC, encoding="utf-8")
result = buf.getvalue()
expected_rows = ['"A","B"', '1,"foo"', '2,"bar"', '3,"baz"']
expected = tm.convert_rows_list_to_csv_str(expected_rows)
assert result == expected
def test_to_csv_quote_none(self):
# GH4328
df = DataFrame({"A": ["hello", '{"hello"}']})
for encoding in (None, "utf-8"):
buf = StringIO()
df.to_csv(buf, quoting=csv.QUOTE_NONE, encoding=encoding, index=False)
result = buf.getvalue()
expected_rows = ["A", "hello", '{"hello"}']
expected = tm.convert_rows_list_to_csv_str(expected_rows)
assert result == expected
def test_to_csv_index_no_leading_comma(self):
df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}, index=["one", "two", "three"])
buf = StringIO()
df.to_csv(buf, index_label=False)
expected_rows = ["A,B", "one,1,4", "two,2,5", "three,3,6"]
expected = tm.convert_rows_list_to_csv_str(expected_rows)
assert buf.getvalue() == expected
def test_to_csv_line_terminators(self):
# see gh-20353
df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}, index=["one", "two", "three"])
with tm.ensure_clean() as path:
# case 1: CRLF as line terminator
df.to_csv(path, line_terminator="\r\n")
expected = b",A,B\r\none,1,4\r\ntwo,2,5\r\nthree,3,6\r\n"
with open(path, mode="rb") as f:
assert f.read() == expected
with tm.ensure_clean() as path:
# case 2: LF as line terminator
df.to_csv(path, line_terminator="\n")
expected = b",A,B\none,1,4\ntwo,2,5\nthree,3,6\n"
with open(path, mode="rb") as f:
assert f.read() == expected
with tm.ensure_clean() as path:
# case 3: The default line terminator(=os.linesep)(gh-21406)
df.to_csv(path)
os_linesep = os.linesep.encode("utf-8")
expected = (
b",A,B"
+ os_linesep
+ b"one,1,4"
+ os_linesep
+ b"two,2,5"
+ os_linesep
+ b"three,3,6"
+ os_linesep
)
with open(path, mode="rb") as f:
assert f.read() == expected
def test_to_csv_from_csv_categorical(self):
# CSV with categoricals should result in the same output
# as when one would add a "normal" Series/DataFrame.
s = Series(pd.Categorical(["a", "b", "b", "a", "a", "c", "c", "c"]))
s2 = Series(["a", "b", "b", "a", "a", "c", "c", "c"])
res = StringIO()
s.to_csv(res, header=False)
exp = StringIO()
s2.to_csv(exp, header=False)
assert res.getvalue() == exp.getvalue()
df = DataFrame({"s": s})
df2 = DataFrame({"s": s2})
res = StringIO()
df.to_csv(res)
exp = StringIO()
df2.to_csv(exp)
assert res.getvalue() == exp.getvalue()
def test_to_csv_path_is_none(self, float_frame):
# GH 8215
# Make sure we return string for consistency with
# Series.to_csv()
csv_str = float_frame.to_csv(path_or_buf=None)
assert isinstance(csv_str, str)
recons = read_csv(StringIO(csv_str), index_col=0)
tm.assert_frame_equal(float_frame, recons)
@pytest.mark.parametrize(
"df,encoding",
[
(
DataFrame(
[[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]],
index=["A", "B"],
columns=["X", "Y", "Z"],
),
None,
),
# GH 21241, 21118
(DataFrame([["abc", "def", "ghi"]], columns=["X", "Y", "Z"]), "ascii"),
(DataFrame(5 * [[123, "你好", "世界"]], columns=["X", "Y", "Z"]), "gb2312"),
(
DataFrame(5 * [[123, "Γειά σου", "Κόσμε"]], columns=["X", "Y", "Z"]),
"cp737",
),
],
)
def test_to_csv_compression(self, df, encoding, compression):
with tm.ensure_clean() as filename:
df.to_csv(filename, compression=compression, encoding=encoding)
# test the round trip - to_csv -> read_csv
result = read_csv(
filename, compression=compression, index_col=0, encoding=encoding
)
tm.assert_frame_equal(df, result)
# test the round trip using file handle - to_csv -> read_csv
with get_handle(
filename, "w", compression=compression, encoding=encoding
) as handles:
df.to_csv(handles.handle, encoding=encoding)
assert not handles.handle.closed
result = read_csv(
filename,
compression=compression,
encoding=encoding,
index_col=0,
).squeeze("columns")
tm.assert_frame_equal(df, result)
# explicitly make sure file is compressed
with tm.decompress_file(filename, compression) as fh:
text = fh.read().decode(encoding or "utf8")
for col in df.columns:
assert col in text
with tm.decompress_file(filename, compression) as fh:
tm.assert_frame_equal(df, read_csv(fh, index_col=0, encoding=encoding))
def test_to_csv_date_format(self, datetime_frame):
with tm.ensure_clean("__tmp_to_csv_date_format__") as path:
dt_index = datetime_frame.index
datetime_frame = DataFrame(
{"A": dt_index, "B": dt_index.shift(1)}, index=dt_index
)
datetime_frame.to_csv(path, date_format="%Y%m%d")
# Check that the data was put in the specified format
test = read_csv(path, index_col=0)
datetime_frame_int = datetime_frame.applymap(
lambda x: int(x.strftime("%Y%m%d"))
)
datetime_frame_int.index = datetime_frame_int.index.map(
lambda x: int(x.strftime("%Y%m%d"))
)
tm.assert_frame_equal(test, datetime_frame_int)
datetime_frame.to_csv(path, date_format="%Y-%m-%d")
# Check that the data was put in the specified format
test = read_csv(path, index_col=0)
datetime_frame_str = datetime_frame.applymap(
lambda x: x.strftime("%Y-%m-%d")
)
datetime_frame_str.index = datetime_frame_str.index.map(
lambda x: x.strftime("%Y-%m-%d")
)
tm.assert_frame_equal(test, datetime_frame_str)
# Check that columns get converted
datetime_frame_columns = datetime_frame.T
datetime_frame_columns.to_csv(path, date_format="%Y%m%d")
test = read_csv(path, index_col=0)
datetime_frame_columns = datetime_frame_columns.applymap(
lambda x: int(x.strftime("%Y%m%d"))
)
# Columns don't get converted to ints by read_csv
datetime_frame_columns.columns = datetime_frame_columns.columns.map(
lambda x: x.strftime("%Y%m%d")
)
tm.assert_frame_equal(test, datetime_frame_columns)
# test NaTs
nat_index = to_datetime(
["NaT"] * 10 + ["2000-01-01", "1/1/2000", "1-1-2000"]
)
nat_frame = DataFrame({"A": nat_index}, index=nat_index)
nat_frame.to_csv(path, date_format="%Y-%m-%d")
test = read_csv(path, parse_dates=[0, 1], index_col=0)
tm.assert_frame_equal(test, nat_frame)
def test_to_csv_with_dst_transitions(self):
with tm.ensure_clean("csv_date_format_with_dst") as path:
# make sure we are not failing on transitions
times = date_range(
"2013-10-26 23:00",
"2013-10-27 01:00",
tz="Europe/London",
freq="H",
ambiguous="infer",
)
for i in [times, times + pd.Timedelta("10s")]:
i = i._with_freq(None) # freq is not preserved by read_csv
time_range = np.array(range(len(i)), dtype="int64")
df = DataFrame({"A": time_range}, index=i)
df.to_csv(path, index=True)
# we have to reconvert the index as we
# don't parse the tz's
result = read_csv(path, index_col=0)
result.index = to_datetime(result.index, utc=True).tz_convert(
"Europe/London"
)
tm.assert_frame_equal(result, df)
# GH11619
idx = date_range("2015-01-01", "2015-12-31", freq="H", tz="Europe/Paris")
idx = idx._with_freq(None) # freq does not round-trip
idx._data._freq = None # otherwise there is trouble on unpickle
df = DataFrame({"values": 1, "idx": idx}, index=idx)
with tm.ensure_clean("csv_date_format_with_dst") as path:
df.to_csv(path, index=True)
result = read_csv(path, index_col=0)
result.index = to_datetime(result.index, utc=True).tz_convert(
"Europe/Paris"
)
result["idx"] = to_datetime(result["idx"], utc=True).astype(
"datetime64[ns, Europe/Paris]"
)
tm.assert_frame_equal(result, df)
# assert working
df.astype(str)
with | tm.ensure_clean("csv_date_format_with_dst") | pandas._testing.ensure_clean |
import os
from dataclasses import dataclass
from typing import Callable, List, Dict
from typing import Optional
import pandas as pd
from PIL.Image import Image as Img
from dacite import from_dict
from wheel5.dataset import LMDBImageDataset, SimpleImageClassificationDataset
from wheel5.dataset import SimpleImageDetectionDataset
COCO_INSTANCE_CATEGORY_NAMES = [
'__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'N/A', 'stop sign',
'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
'elephant', 'bear', 'zebra', 'giraffe', 'N/A', 'backpack', 'umbrella', 'N/A', 'N/A',
'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',
'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket',
'bottle', 'N/A', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',
'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',
'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'N/A', 'dining table',
'N/A', 'N/A', 'toilet', 'N/A', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',
'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'N/A', 'book',
'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush'
]
@dataclass
class ClassifierDatasetConfig:
image_dir: str
metadata: Optional[str]
annotations: Optional[str]
lmdb_dir: Optional[str]
@classmethod
def from_dict(cls, d: Dict[str, str]):
return from_dict(cls, d)
@dataclass
class DetectorDatasetConfig:
image_dir: str
metadata: Optional[str]
@classmethod
def from_dict(cls, d: Dict[str, str]):
return from_dict(cls, d)
def load_classes(path: str) -> List[str]:
with open(path, 'r') as f:
lines = [line.strip() for line in f.readlines()]
return [line for line in lines if line != '']
def reverse_classes(classes: List[str]) -> Dict[str, int]:
classes_to_num = {v: k for k, v in enumerate(classes)}
assert len(classes) == len(classes_to_num)
return classes_to_num
def coco_categories_unique() -> List[str]:
na_counter = 0
categories = []
for category in COCO_INSTANCE_CATEGORY_NAMES:
if category == 'N/A':
category = f'{category}_{na_counter}'
na_counter += 1
categories.append(category)
return categories
def load_classifier_dataset(config: ClassifierDatasetConfig,
target_classes: List[str],
transform: Callable[[Img], Img] = None,
name: str = ''):
if config.metadata is None:
entries = []
for filename in os.listdir(config.image_dir):
entries.append({'path': filename, 'target': -1})
df_metadata = | pd.DataFrame(entries) | pandas.DataFrame |
import numpy as np
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
@pytest.fixture
def df_checks():
"""fixture dataframe"""
return pd.DataFrame(
{
"famid": [1, 1, 1, 2, 2, 2, 3, 3, 3],
"birth": [1, 2, 3, 1, 2, 3, 1, 2, 3],
"ht1": [2.8, 2.9, 2.2, 2, 1.8, 1.9, 2.2, 2.3, 2.1],
"ht2": [3.4, 3.8, 2.9, 3.2, 2.8, 2.4, 3.3, 3.4, 2.9],
}
)
@pytest.fixture
def df_multi():
"""MultiIndex dataframe fixture."""
return pd.DataFrame(
{
("name", "a"): {0: "Wilbur", 1: "Petunia", 2: "Gregory"},
("names", "aa"): {0: 67, 1: 80, 2: 64},
("more_names", "aaa"): {0: 56, 1: 90, 2: 50},
}
)
def test_column_level_wrong_type(df_multi):
"""Raise TypeError if wrong type is provided for column_level."""
with pytest.raises(TypeError):
df_multi.pivot_longer(index="name", column_level={0})
@pytest.mark.xfail(reason="checking is done within _select_columns")
def test_type_index(df_checks):
"""Raise TypeError if wrong type is provided for the index."""
with pytest.raises(TypeError):
df_checks.pivot_longer(index=2007)
@pytest.mark.xfail(reason="checking is done within _select_columns")
def test_type_column_names(df_checks):
"""Raise TypeError if wrong type is provided for column_names."""
with pytest.raises(TypeError):
df_checks.pivot_longer(column_names=2007)
def test_type_names_to(df_checks):
"""Raise TypeError if wrong type is provided for names_to."""
with pytest.raises(TypeError):
df_checks.pivot_longer(names_to={2007})
def test_subtype_names_to(df_checks):
"""
Raise TypeError if names_to is a sequence
and the wrong type is provided for entries
in names_to.
"""
with pytest.raises(TypeError, match="1 in names_to.+"):
df_checks.pivot_longer(names_to=[1])
def test_duplicate_names_to(df_checks):
"""Raise error if names_to contains duplicates."""
with pytest.raises(ValueError, match="y is duplicated in names_to."):
df_checks.pivot_longer(names_to=["y", "y"], names_pattern="(.+)(.)")
def test_both_names_sep_and_pattern(df_checks):
"""
Raise ValueError if both names_sep
and names_pattern is provided.
"""
with pytest.raises(
ValueError,
match="Only one of names_pattern or names_sep should be provided.",
):
df_checks.pivot_longer(
names_to=["rar", "bar"], names_sep="-", names_pattern="(.+)(.)"
)
def test_name_pattern_wrong_type(df_checks):
"""Raise TypeError if the wrong type provided for names_pattern."""
with pytest.raises(TypeError, match="names_pattern should be one of.+"):
df_checks.pivot_longer(names_to=["rar", "bar"], names_pattern=2007)
def test_name_pattern_no_names_to(df_checks):
"""Raise ValueError if names_pattern and names_to is None."""
with pytest.raises(ValueError):
df_checks.pivot_longer(names_to=None, names_pattern="(.+)(.)")
def test_name_pattern_groups_len(df_checks):
"""
Raise ValueError if names_pattern
and the number of groups
differs from the length of names_to.
"""
with pytest.raises(
ValueError,
match="The length of names_to does not match "
"the number of groups in names_pattern.+",
):
df_checks.pivot_longer(names_to=".value", names_pattern="(.+)(.)")
def test_names_pattern_wrong_subtype(df_checks):
"""
Raise TypeError if names_pattern is a list/tuple
and wrong subtype is supplied.
"""
with pytest.raises(TypeError, match="1 in names_pattern.+"):
df_checks.pivot_longer(
names_to=["ht", "num"], names_pattern=[1, "\\d"]
)
def test_names_pattern_names_to_unequal_length(df_checks):
"""
Raise ValueError if names_pattern is a list/tuple
and wrong number of items in names_to.
"""
with pytest.raises(
ValueError,
match="The length of names_to does not match "
"the number of regexes in names_pattern.+",
):
df_checks.pivot_longer(
names_to=["variable"], names_pattern=["^ht", ".+i.+"]
)
def test_names_pattern_names_to_dot_value(df_checks):
"""
Raise Error if names_pattern is a list/tuple and
.value in names_to.
"""
with pytest.raises(
ValueError,
match=".value is not accepted in names_to "
"if names_pattern is a list/tuple.",
):
df_checks.pivot_longer(
names_to=["variable", ".value"], names_pattern=["^ht", ".+i.+"]
)
def test_name_sep_wrong_type(df_checks):
"""Raise TypeError if the wrong type is provided for names_sep."""
with pytest.raises(TypeError, match="names_sep should be one of.+"):
df_checks.pivot_longer(names_to=[".value", "num"], names_sep=["_"])
def test_name_sep_no_names_to(df_checks):
"""Raise ValueError if names_sep and names_to is None."""
with pytest.raises(ValueError):
df_checks.pivot_longer(names_to=None, names_sep="_")
def test_values_to_wrong_type(df_checks):
"""Raise TypeError if the wrong type is provided for `values_to`."""
with pytest.raises(TypeError, match="values_to should be one of.+"):
df_checks.pivot_longer(values_to={"salvo"})
def test_values_to_wrong_type_names_pattern(df_checks):
"""
Raise TypeError if `values_to` is a list,
and names_pattern is not.
"""
with pytest.raises(
TypeError,
match="values_to can be a list/tuple only "
"if names_pattern is a list/tuple.",
):
df_checks.pivot_longer(values_to=["salvo"])
def test_values_to_names_pattern_unequal_length(df_checks):
"""
Raise ValueError if `values_to` is a list,
and the length of names_pattern
does not match the length of values_to.
"""
with pytest.raises(
ValueError,
match="The length of values_to does not match "
"the number of regexes in names_pattern.+",
):
df_checks.pivot_longer(
values_to=["salvo"],
names_pattern=["ht", r"\d"],
names_to=["foo", "bar"],
)
def test_values_to_names_seq_names_to(df_checks):
"""
Raise ValueError if `values_to` is a list,
and intersects with names_to.
"""
with pytest.raises(
ValueError, match="salvo in values_to already exists in names_to."
):
df_checks.pivot_longer(
values_to=["salvo"], names_pattern=["ht"], names_to="salvo"
)
def test_sub_values_to(df_checks):
"""Raise error if values_to is a sequence, and contains non strings."""
with pytest.raises(TypeError, match="1 in values_to.+"):
df_checks.pivot_longer(
names_to=["x", "y"],
names_pattern=[r"ht", r"\d"],
values_to=[1, "salvo"],
)
def test_duplicate_values_to(df_checks):
"""Raise error if values_to is a sequence, and contains duplicates."""
with pytest.raises(ValueError, match="salvo is duplicated in values_to."):
df_checks.pivot_longer(
names_to=["x", "y"],
names_pattern=[r"ht", r"\d"],
values_to=["salvo", "salvo"],
)
def test_values_to_exists_in_columns(df_checks):
"""
Raise ValueError if values_to already
exists in the dataframe's columns.
"""
with pytest.raises(ValueError):
df_checks.pivot_longer(index="birth", values_to="birth")
def test_values_to_exists_in_names_to(df_checks):
"""
Raise ValueError if values_to is in names_to.
"""
with pytest.raises(ValueError):
df_checks.pivot_longer(values_to="num", names_to="num")
def test_column_multiindex_names_sep(df_multi):
"""
Raise ValueError if the dataframe's column is a MultiIndex,
and names_sep is present.
"""
with pytest.raises(ValueError):
df_multi.pivot_longer(
column_names=[("names", "aa")],
names_sep="_",
names_to=["names", "others"],
)
def test_column_multiindex_names_pattern(df_multi):
"""
Raise ValueError if the dataframe's column is a MultiIndex,
and names_pattern is present.
"""
with pytest.raises(ValueError):
df_multi.pivot_longer(
index=[("name", "a")],
names_pattern=r"(.+)(.+)",
names_to=["names", "others"],
)
def test_index_tuple_multiindex(df_multi):
"""
Raise ValueError if index is a tuple,
instead of a list of tuples,
and the dataframe's column is a MultiIndex.
"""
with pytest.raises(ValueError):
df_multi.pivot_longer(index=("name", "a"))
def test_column_names_tuple_multiindex(df_multi):
"""
Raise ValueError if column_names is a tuple,
instead of a list of tuples,
and the dataframe's column is a MultiIndex.
"""
with pytest.raises(ValueError):
df_multi.pivot_longer(column_names=("names", "aa"))
def test_sort_by_appearance(df_checks):
"""Raise error if sort_by_appearance is not boolean."""
with pytest.raises(TypeError):
df_checks.pivot_longer(
names_to=[".value", "value"],
names_sep="_",
sort_by_appearance="TRUE",
)
def test_ignore_index(df_checks):
"""Raise error if ignore_index is not boolean."""
with pytest.raises(TypeError):
df_checks.pivot_longer(
names_to=[".value", "value"], names_sep="_", ignore_index="TRUE"
)
def test_names_to_index(df_checks):
"""
Raise ValueError if there is no names_sep/names_pattern,
.value not in names_to and names_to intersects with index.
"""
with pytest.raises(
ValueError,
match=r".+in names_to already exist as column labels.+",
):
df_checks.pivot_longer(
names_to="famid",
index="famid",
)
def test_names_sep_pattern_names_to_index(df_checks):
"""
Raise ValueError if names_sep/names_pattern,
.value not in names_to and names_to intersects with index.
"""
with pytest.raises(
ValueError,
match=r".+in names_to already exist as column labels.+",
):
df_checks.pivot_longer(
names_to=["dim", "famid"],
names_sep="_",
index="famid",
)
def test_dot_value_names_to_columns_intersect(df_checks):
"""
Raise ValueError if names_sep/names_pattern,
.value in names_to,
and names_to intersects with the new columns
"""
with pytest.raises(
ValueError,
match=r".+in names_to already exist in the new dataframe\'s columns.+",
):
df_checks.pivot_longer(
index="famid", names_to=(".value", "ht"), names_pattern="(.+)(.)"
)
def test_values_to_seq_index_intersect(df_checks):
"""
Raise ValueError if values_to is a sequence,
and intersects with the index
"""
match = ".+values_to already exist as column labels assigned "
match = match + "to the dataframe's index parameter.+"
with pytest.raises(ValueError, match=rf"{match}"):
df_checks.pivot_longer(
index="famid",
names_to=("value", "ht"),
names_pattern=["ht", r"\d"],
values_to=("famid", "foo"),
)
def test_dot_value_names_to_index_intersect(df_checks):
"""
Raise ValueError if names_sep/names_pattern,
.value in names_to,
and names_to intersects with the index
"""
match = ".+already exist as column labels assigned "
match = match + "to the dataframe's index parameter.+"
with pytest.raises(
ValueError,
match=rf"{match}",
):
df_checks.rename(columns={"famid": "ht"}).pivot_longer(
index="ht", names_to=(".value", "num"), names_pattern="(.+)(.)"
)
def test_names_pattern_list_empty_any(df_checks):
"""
Raise ValueError if names_pattern is a list,
and not all matches are returned.
"""
with pytest.raises(
ValueError, match="No match was returned for the regex.+"
):
df_checks.pivot_longer(
index=["famid", "birth"],
names_to=["ht"],
names_pattern=["rar"],
)
def test_names_pattern_no_match(df_checks):
"""Raise error if names_pattern is a regex and returns no matches."""
with pytest.raises(
ValueError, match="Column labels .+ could not be matched with any .+"
):
df_checks.pivot_longer(
index="famid",
names_to=[".value", "value"],
names_pattern=r"(rar)(.)",
)
def test_names_pattern_incomplete_match(df_checks):
"""
Raise error if names_pattern is a regex
and returns incomplete matches.
"""
with pytest.raises(
ValueError, match="Column labels .+ could not be matched with any .+"
):
df_checks.pivot_longer(
index="famid",
names_to=[".value", "value"],
names_pattern=r"(ht)(.)",
)
def test_names_sep_len(df_checks):
"""
Raise error if names_sep,
and the number of matches returned
is not equal to the length of names_to.
"""
with pytest.raises(ValueError):
df_checks.pivot_longer(names_to=".value", names_sep="(\\d)")
def test_pivot_index_only(df_checks):
"""Test output if only index is passed."""
result = df_checks.pivot_longer(
index=["famid", "birth"],
names_to="dim",
values_to="num",
)
actual = df_checks.melt(
["famid", "birth"], var_name="dim", value_name="num"
)
assert_frame_equal(result, actual)
def test_pivot_column_only(df_checks):
"""Test output if only column_names is passed."""
result = df_checks.pivot_longer(
column_names=["ht1", "ht2"],
names_to="dim",
values_to="num",
ignore_index=False,
)
actual = df_checks.melt(
["famid", "birth"],
var_name="dim",
value_name="num",
ignore_index=False,
)
| assert_frame_equal(result, actual) | pandas.testing.assert_frame_equal |
# -*- coding: utf-8 -*-
from datetime import timedelta
import operator
from string import ascii_lowercase
import warnings
import numpy as np
import pytest
from pandas.compat import lrange
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
Categorical, DataFrame, MultiIndex, Series, Timestamp, date_range, isna,
notna, to_datetime, to_timedelta)
import pandas.core.algorithms as algorithms
import pandas.core.nanops as nanops
import pandas.util.testing as tm
def assert_stat_op_calc(opname, alternative, frame, has_skipna=True,
check_dtype=True, check_dates=False,
check_less_precise=False, skipna_alternative=None):
"""
Check that operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
alternative : function
Function that opname is tested against; i.e. "frame.opname()" should
equal "alternative(frame)".
frame : DataFrame
The object that the tests are executed on
has_skipna : bool, default True
Whether the method "opname" has the kwarg "skip_na"
check_dtype : bool, default True
Whether the dtypes of the result of "frame.opname()" and
"alternative(frame)" should be checked.
check_dates : bool, default false
Whether opname should be tested on a Datetime Series
check_less_precise : bool, default False
Whether results should only be compared approximately;
passed on to tm.assert_series_equal
skipna_alternative : function, default None
NaN-safe version of alternative
"""
f = getattr(frame, opname)
if check_dates:
df = DataFrame({'b': date_range('1/1/2001', periods=2)})
result = getattr(df, opname)()
assert isinstance(result, Series)
df['a'] = lrange(len(df))
result = getattr(df, opname)()
assert isinstance(result, Series)
assert len(result)
if has_skipna:
def wrapper(x):
return alternative(x.values)
skipna_wrapper = tm._make_skipna_wrapper(alternative,
skipna_alternative)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
tm.assert_series_equal(result0, frame.apply(wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
# HACK: win32
tm.assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False,
check_less_precise=check_less_precise)
else:
skipna_wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
tm.assert_series_equal(result0, frame.apply(skipna_wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
if opname in ['sum', 'prod']:
expected = frame.apply(skipna_wrapper, axis=1)
tm.assert_series_equal(result1, expected, check_dtype=False,
check_less_precise=check_less_precise)
# check dtypes
if check_dtype:
lcd_dtype = frame.values.dtype
assert lcd_dtype == result0.dtype
assert lcd_dtype == result1.dtype
# bad axis
with pytest.raises(ValueError, match='No axis named 2'):
f(axis=2)
# all NA case
if has_skipna:
all_na = frame * np.NaN
r0 = getattr(all_na, opname)(axis=0)
r1 = getattr(all_na, opname)(axis=1)
if opname in ['sum', 'prod']:
unit = 1 if opname == 'prod' else 0 # result for empty sum/prod
expected = pd.Series(unit, index=r0.index, dtype=r0.dtype)
tm.assert_series_equal(r0, expected)
expected = pd.Series(unit, index=r1.index, dtype=r1.dtype)
tm.assert_series_equal(r1, expected)
def assert_stat_op_api(opname, float_frame, float_string_frame,
has_numeric_only=False):
"""
Check that API for operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
float_frame : DataFrame
DataFrame with columns of type float
float_string_frame : DataFrame
DataFrame with both float and string columns
has_numeric_only : bool, default False
Whether the method "opname" has the kwarg "numeric_only"
"""
# make sure works on mixed-type frame
getattr(float_string_frame, opname)(axis=0)
getattr(float_string_frame, opname)(axis=1)
if has_numeric_only:
getattr(float_string_frame, opname)(axis=0, numeric_only=True)
getattr(float_string_frame, opname)(axis=1, numeric_only=True)
getattr(float_frame, opname)(axis=0, numeric_only=False)
getattr(float_frame, opname)(axis=1, numeric_only=False)
def assert_bool_op_calc(opname, alternative, frame, has_skipna=True):
"""
Check that bool operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
alternative : function
Function that opname is tested against; i.e. "frame.opname()" should
equal "alternative(frame)".
frame : DataFrame
The object that the tests are executed on
has_skipna : bool, default True
Whether the method "opname" has the kwarg "skip_na"
"""
f = getattr(frame, opname)
if has_skipna:
def skipna_wrapper(x):
nona = x.dropna().values
return alternative(nona)
def wrapper(x):
return alternative(x.values)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
tm.assert_series_equal(result0, frame.apply(wrapper))
tm.assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False) # HACK: win32
else:
skipna_wrapper = alternative
wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
tm.assert_series_equal(result0, frame.apply(skipna_wrapper))
tm.assert_series_equal(result1, frame.apply(skipna_wrapper, axis=1),
check_dtype=False)
# bad axis
with pytest.raises(ValueError, match='No axis named 2'):
f(axis=2)
# all NA case
if has_skipna:
all_na = frame * np.NaN
r0 = getattr(all_na, opname)(axis=0)
r1 = getattr(all_na, opname)(axis=1)
if opname == 'any':
assert not r0.any()
assert not r1.any()
else:
assert r0.all()
assert r1.all()
def assert_bool_op_api(opname, bool_frame_with_na, float_string_frame,
has_bool_only=False):
"""
Check that API for boolean operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
float_frame : DataFrame
DataFrame with columns of type float
float_string_frame : DataFrame
DataFrame with both float and string columns
has_bool_only : bool, default False
Whether the method "opname" has the kwarg "bool_only"
"""
# make sure op works on mixed-type frame
mixed = float_string_frame
mixed['_bool_'] = np.random.randn(len(mixed)) > 0.5
getattr(mixed, opname)(axis=0)
getattr(mixed, opname)(axis=1)
if has_bool_only:
getattr(mixed, opname)(axis=0, bool_only=True)
getattr(mixed, opname)(axis=1, bool_only=True)
getattr(bool_frame_with_na, opname)(axis=0, bool_only=False)
getattr(bool_frame_with_na, opname)(axis=1, bool_only=False)
class TestDataFrameAnalytics(object):
# ---------------------------------------------------------------------
# Correlation and covariance
@td.skip_if_no_scipy
def test_corr_pearson(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'pearson')
@td.skip_if_no_scipy
def test_corr_kendall(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'kendall')
@td.skip_if_no_scipy
def test_corr_spearman(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'spearman')
def _check_method(self, frame, method='pearson'):
correls = frame.corr(method=method)
expected = frame['A'].corr(frame['C'], method=method)
tm.assert_almost_equal(correls['A']['C'], expected)
@td.skip_if_no_scipy
def test_corr_non_numeric(self, float_frame, float_string_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
# exclude non-numeric types
result = float_string_frame.corr()
expected = float_string_frame.loc[:, ['A', 'B', 'C', 'D']].corr()
tm.assert_frame_equal(result, expected)
@td.skip_if_no_scipy
@pytest.mark.parametrize('meth', ['pearson', 'kendall', 'spearman'])
def test_corr_nooverlap(self, meth):
# nothing in common
df = DataFrame({'A': [1, 1.5, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1.5, 1],
'C': [np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan]})
rs = df.corr(meth)
assert isna(rs.loc['A', 'B'])
assert isna(rs.loc['B', 'A'])
assert rs.loc['A', 'A'] == 1
assert rs.loc['B', 'B'] == 1
assert isna(rs.loc['C', 'C'])
@td.skip_if_no_scipy
@pytest.mark.parametrize('meth', ['pearson', 'spearman'])
def test_corr_constant(self, meth):
# constant --> all NA
df = DataFrame({'A': [1, 1, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1, 1]})
rs = df.corr(meth)
assert isna(rs.values).all()
def test_corr_int(self):
# dtypes other than float64 #1761
df3 = DataFrame({"a": [1, 2, 3, 4], "b": [1, 2, 3, 4]})
df3.cov()
df3.corr()
@td.skip_if_no_scipy
def test_corr_int_and_boolean(self):
# when dtypes of pandas series are different
# then ndarray will have dtype=object,
# so it need to be properly handled
df = DataFrame({"a": [True, False], "b": [1, 0]})
expected = DataFrame(np.ones((2, 2)), index=[
'a', 'b'], columns=['a', 'b'])
for meth in ['pearson', 'kendall', 'spearman']:
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore", RuntimeWarning)
result = df.corr(meth)
tm.assert_frame_equal(result, expected)
def test_corr_cov_independent_index_column(self):
# GH 14617
df = pd.DataFrame(np.random.randn(4 * 10).reshape(10, 4),
columns=list("abcd"))
for method in ['cov', 'corr']:
result = getattr(df, method)()
assert result.index is not result.columns
assert result.index.equals(result.columns)
def test_corr_invalid_method(self):
# GH 22298
df = pd.DataFrame(np.random.normal(size=(10, 2)))
msg = ("method must be either 'pearson', "
"'spearman', 'kendall', or a callable, ")
with pytest.raises(ValueError, match=msg):
df.corr(method="____")
def test_cov(self, float_frame, float_string_frame):
# min_periods no NAs (corner case)
expected = float_frame.cov()
result = float_frame.cov(min_periods=len(float_frame))
tm.assert_frame_equal(expected, result)
result = float_frame.cov(min_periods=len(float_frame) + 1)
assert isna(result.values).all()
# with NAs
frame = float_frame.copy()
frame['A'][:5] = np.nan
frame['B'][5:10] = np.nan
result = float_frame.cov(min_periods=len(float_frame) - 8)
expected = float_frame.cov()
expected.loc['A', 'B'] = np.nan
expected.loc['B', 'A'] = np.nan
# regular
float_frame['A'][:5] = np.nan
float_frame['B'][:10] = np.nan
cov = float_frame.cov()
tm.assert_almost_equal(cov['A']['C'],
float_frame['A'].cov(float_frame['C']))
# exclude non-numeric types
result = float_string_frame.cov()
expected = float_string_frame.loc[:, ['A', 'B', 'C', 'D']].cov()
tm.assert_frame_equal(result, expected)
# Single column frame
df = DataFrame(np.linspace(0.0, 1.0, 10))
result = df.cov()
expected = DataFrame(np.cov(df.values.T).reshape((1, 1)),
index=df.columns, columns=df.columns)
tm.assert_frame_equal(result, expected)
df.loc[0] = np.nan
result = df.cov()
expected = DataFrame(np.cov(df.values[1:].T).reshape((1, 1)),
index=df.columns, columns=df.columns)
tm.assert_frame_equal(result, expected)
def test_corrwith(self, datetime_frame):
a = datetime_frame
noise = Series(np.random.randn(len(a)), index=a.index)
b = datetime_frame.add(noise, axis=0)
# make sure order does not matter
b = b.reindex(columns=b.columns[::-1], index=b.index[::-1][10:])
del b['B']
colcorr = a.corrwith(b, axis=0)
tm.assert_almost_equal(colcorr['A'], a['A'].corr(b['A']))
rowcorr = a.corrwith(b, axis=1)
tm.assert_series_equal(rowcorr, a.T.corrwith(b.T, axis=0))
dropped = a.corrwith(b, axis=0, drop=True)
tm.assert_almost_equal(dropped['A'], a['A'].corr(b['A']))
assert 'B' not in dropped
dropped = a.corrwith(b, axis=1, drop=True)
assert a.index[-1] not in dropped.index
# non time-series data
index = ['a', 'b', 'c', 'd', 'e']
columns = ['one', 'two', 'three', 'four']
df1 = DataFrame(np.random.randn(5, 4), index=index, columns=columns)
df2 = DataFrame(np.random.randn(4, 4),
index=index[:4], columns=columns)
correls = df1.corrwith(df2, axis=1)
for row in index[:4]:
tm.assert_almost_equal(correls[row],
df1.loc[row].corr(df2.loc[row]))
def test_corrwith_with_objects(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame()
cols = ['A', 'B', 'C', 'D']
df1['obj'] = 'foo'
df2['obj'] = 'bar'
result = df1.corrwith(df2)
expected = df1.loc[:, cols].corrwith(df2.loc[:, cols])
tm.assert_series_equal(result, expected)
result = df1.corrwith(df2, axis=1)
expected = df1.loc[:, cols].corrwith(df2.loc[:, cols], axis=1)
tm.assert_series_equal(result, expected)
def test_corrwith_series(self, datetime_frame):
result = datetime_frame.corrwith(datetime_frame['A'])
expected = datetime_frame.apply(datetime_frame['A'].corr)
tm.assert_series_equal(result, expected)
def test_corrwith_matches_corrcoef(self):
df1 = DataFrame(np.arange(10000), columns=['a'])
df2 = DataFrame(np.arange(10000) ** 2, columns=['a'])
c1 = df1.corrwith(df2)['a']
c2 = np.corrcoef(df1['a'], df2['a'])[0][1]
tm.assert_almost_equal(c1, c2)
assert c1 < 1
def test_corrwith_mixed_dtypes(self):
# GH 18570
df = pd.DataFrame({'a': [1, 4, 3, 2], 'b': [4, 6, 7, 3],
'c': ['a', 'b', 'c', 'd']})
s = pd.Series([0, 6, 7, 3])
result = df.corrwith(s)
corrs = [df['a'].corr(s), df['b'].corr(s)]
expected = pd.Series(data=corrs, index=['a', 'b'])
tm.assert_series_equal(result, expected)
def test_corrwith_index_intersection(self):
df1 = pd.DataFrame(np.random.random(size=(10, 2)),
columns=["a", "b"])
df2 = pd.DataFrame(np.random.random(size=(10, 3)),
columns=["a", "b", "c"])
result = df1.corrwith(df2, drop=True).index.sort_values()
expected = df1.columns.intersection(df2.columns).sort_values()
tm.assert_index_equal(result, expected)
def test_corrwith_index_union(self):
df1 = pd.DataFrame(np.random.random(size=(10, 2)),
columns=["a", "b"])
df2 = pd.DataFrame(np.random.random(size=(10, 3)),
columns=["a", "b", "c"])
result = df1.corrwith(df2, drop=False).index.sort_values()
expected = df1.columns.union(df2.columns).sort_values()
tm.assert_index_equal(result, expected)
def test_corrwith_dup_cols(self):
# GH 21925
df1 = pd.DataFrame(np.vstack([np.arange(10)] * 3).T)
df2 = df1.copy()
df2 = pd.concat((df2, df2[0]), axis=1)
result = df1.corrwith(df2)
expected = pd.Series(np.ones(4), index=[0, 0, 1, 2])
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_corrwith_spearman(self):
# GH 21925
df = pd.DataFrame(np.random.random(size=(100, 3)))
result = df.corrwith(df**2, method="spearman")
expected = Series(np.ones(len(result)))
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_corrwith_kendall(self):
# GH 21925
df = pd.DataFrame(np.random.random(size=(100, 3)))
result = df.corrwith(df**2, method="kendall")
expected = Series(np.ones(len(result)))
| tm.assert_series_equal(result, expected) | pandas.util.testing.assert_series_equal |
import pandas as pd
import numpy as np
import math
import re
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import matplotlib as mpl
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from matplotlib.patches import Rectangle
import io
from IPython.display import display, HTML, Markdown, SVG
from datetime import timedelta
import scipy
import scipy.signal
plt.style.use('seaborn-darkgrid')
plt.rcParams['figure.dpi'] = 300
plt.rcParams['font.size'] = 10 # controls default text sizes
plt.rcParams['axes.titlesize'] = 'small' # fontsize of the axes title
plt.rcParams['axes.labelsize'] = 'small' # fontsize of the x and y labels
plt.rcParams['xtick.labelsize'] = 'x-small' # fontsize of the tick labels
plt.rcParams['ytick.labelsize'] = 'x-small' # fontsize of the tick labels
plt.rcParams['legend.fontsize'] = 'x-small' # legend fontsize
plt.rcParams['figure.titlesize'] = 'small' # fontsize of the figure title
plt.rcParams['axes.axisbelow'] = True # axis behind the graphs
def get_admission(admissionid, con):
sql_admissions_diagnoses = """
WITH diagnosis_groups AS (
SELECT admissionid,
item,
value as diagnosis_group,
CASE
WHEN itemid = 13110 AND valueid BETWEEN 1 AND 3 THEN 1 --D_Hoofdgroep
WHEN itemid = 16651 AND valueid BETWEEN 7 AND 9 THEN 1 --DMC_Hoofdgroep
WHEN itemid = 16997 AND valueid BETWEEN 11 AND 20 THEN 1 --APACHE IV Groepen
WHEN itemid = 18588 AND valueid BETWEEN 1 AND 7 THEN 1 --Apache II Hoofdgroep
ELSE 0
END AS surgical,
ROW_NUMBER() OVER(PARTITION BY admissionid
ORDER BY measuredat DESC) AS rownum
FROM listitems
WHERE itemid IN (
--MAIN GROUP - LEVEL 0
13110, --D_Hoofdgroep
16651 --DMC_Hoofdgroep, Medium Care
)
),diagnosis_subgroups AS (
SELECT admissionid,
item,
value as diagnosis_subgroup,
ROW_NUMBER() OVER(PARTITION BY admissionid
ORDER BY measuredat DESC) AS rownum
FROM listitems
WHERE itemid IN (
--SUB GROUP - LEVEL 1
13111, --D_Subgroep_Thoraxchirurgie
16669, --DMC_Subgroep_Thoraxchirurgie
13112, --D_Subgroep_Algemene chirurgie
16665, --DMC_Subgroep_Algemene chirurgie
13113, --D_Subgroep_Neurochirurgie
16667, --DMC_Subgroep_Neurochirurgie
13114, --D_Subgroep_Neurologie
16668, --DMC_Subgroep_Neurologie
13115, --D_Subgroep_Interne geneeskunde
16666 --DMC_Subgroep_Interne geneeskunde
)
), diagnoses AS (
SELECT admissionid,
item,
value as diagnosis,
ROW_NUMBER() OVER(PARTITION BY admissionid
ORDER BY measuredat DESC) AS rownum
FROM listitems
WHERE itemid IN (
-- Diagnosis - LEVEL 2
--SURGICAL
13116, --D_Thoraxchirurgie_CABG en Klepchirurgie
16671, --DMC_Thoraxchirurgie_CABG en Klepchirurgie
13117, --D_Thoraxchirurgie_Cardio anders
16672, --DMC_Thoraxchirurgie_Cardio anders
13118, --D_Thoraxchirurgie_Aorta chirurgie
16670, --DMC_Thoraxchirurgie_Aorta chirurgie
13119, --D_Thoraxchirurgie_Pulmonale chirurgie
16673, --DMC_Thoraxchirurgie_Pulmonale chirurgie
13141, --D_Algemene chirurgie_Algemeen
16642, --DMC_Algemene chirurgie_Algemeen
13121, --D_Algemene chirurgie_Buikchirurgie
16643, --DMC_Algemene chirurgie_Buikchirurgie
13123, --D_Algemene chirurgie_Endocrinologische chirurgie
16644, --DMC_Algemene chirurgie_Endocrinologische chirurgie
13145, --D_Algemene chirurgie_KNO/Overige
16645, --DMC_Algemene chirurgie_KNO/Overige
13125, --D_Algemene chirurgie_Orthopedische chirurgie
16646, --DMC_Algemene chirurgie_Orthopedische chirurgie
13122, --D_Algemene chirurgie_Transplantatie chirurgie
16647, --DMC_Algemene chirurgie_Transplantatie chirurgie
13124, --D_Algemene chirurgie_Trauma
16648, --DMC_Algemene chirurgie_Trauma
13126, --D_Algemene chirurgie_Urogenitaal
16649, --DMC_Algemene chirurgie_Urogenitaal
13120, --D_Algemene chirurgie_Vaatchirurgie
16650, --DMC_Algemene chirurgie_Vaatchirurgie
13128, --D_Neurochirurgie _Vasculair chirurgisch
16661, --DMC_Neurochirurgie _Vasculair chirurgisch
13129, --D_Neurochirurgie _Tumor chirurgie
16660, --DMC_Neurochirurgie _Tumor chirurgie
13130, --D_Neurochirurgie_Overige
16662, --DMC_Neurochirurgie_Overige
--MEDICAL
13133, --D_Interne Geneeskunde_Cardiovasculair
16653, --DMC_Interne Geneeskunde_Cardiovasculair
13134, --D_Interne Geneeskunde_Pulmonaal
16658, --DMC_Interne Geneeskunde_Pulmonaal
13135, --D_Interne Geneeskunde_Abdominaal
16652, --DMC_Interne Geneeskunde_Abdominaal
13136, --D_Interne Geneeskunde_Infectieziekten
16655, --DMC_Interne Geneeskunde_Infectieziekten
13137, --D_Interne Geneeskunde_Metabool
16656, --DMC_Interne Geneeskunde_Metabool
13138, --D_Interne Geneeskunde_Renaal
16659, --DMC_Interne Geneeskunde_Renaal
13139, --D_Interne Geneeskunde_Hematologisch
16654, --DMC_Interne Geneeskunde_Hematologisch
13140, --D_Interne Geneeskunde_Overige
16657, --DMC_Interne Geneeskunde_Overige
13131, --D_Neurologie_Vasculair neurologisch
16664, --DMC_Neurologie_Vasculair neurologisch
13132, --D_Neurologie_Overige
16663, --DMC_Neurologie_Overige
13127 --D_KNO/Overige
)
),
interventions AS (
SELECT
admissionid,
CASE
WHEN SUM((item ILIKE \'%ECMO%\')::INT) > 0 THEN TRUE
ELSE FALSE
END AS ecmo,
CASE
WHEN SUM((item ILIKE \'%CVVH%\')::INT) > 0 THEN TRUE
ELSE FALSE
END AS crrt,
CASE
WHEN SUM((item ILIKE \'%SWAN%\')::INT) > 0 THEN TRUE
ELSE FALSE
END AS pac
FROM processitems
GROUP BY admissionid
)
SELECT
a.admissionid,
a.admissioncount,
a.location,
a.urgency,
a.origin,
a.admittedat,
a.admissionyeargroup,
a.dischargedat,
a.lengthofstay,
a.destination,
a.gender,
a.agegroup,
a.dateofdeath,
a.weightgroup,
a.heightgroup,
a.specialty,
diagnoses.diagnosis,
diagnosis_subgroups.diagnosis_subgroup,
diagnosis_groups.diagnosis_group,
i.pac,
i.ecmo,
i.crrt
FROM admissions a
LEFT JOIN diagnoses ON a.admissionid = diagnoses.admissionid
LEFT JOIN diagnosis_subgroups ON a.admissionid = diagnosis_subgroups.admissionid
LEFT JOIN diagnosis_groups ON a.admissionid = diagnosis_groups.admissionid
LEFT JOIN interventions i ON a.admissionid = i.admissionid
WHERE
(diagnoses.rownum = 1 OR diagnoses.rownum IS NULL)
AND (diagnosis_subgroups.rownum = 1 OR diagnosis_subgroups.rownum IS NULL)
AND (diagnosis_groups.rownum = 1 OR diagnosis_groups.rownum IS NULL) --only last updated record
AND a.admissionid = {admissionid}
"""
return pd.read_sql(sql_admissions_diagnoses.format(admissionid=admissionid), con)
def get_admissiondiags(con):
#gets all admissions
return get_admission("a.admissionid", con)
# small utility function to 'apply' a new index to a (grouped) dataframe
def reindex_by_date(df, index_start, index_end, interval, method=None, limit=None, fill_value=0):
new_index = pd.date_range(index_start, index_end, freq=interval)
return df.reindex(new_index, method=method, limit=limit, fill_value=fill_value)
def add_section_box(axis, title):
# adds section headers
x = -0.5
y = 0
width = 0.075
height = 1
rect = Rectangle((x, y), width, height, transform=axis.transAxes, clip_on=False)
axis.add_patch(rect)
axis.text(s=title, x=(x + width / 2), y=height / 2, rotation=90, transform=axis.transAxes,
color='white', fontweight='bold', horizontalalignment='center', verticalalignment='center')
def twinax_match_ticks(ax, twin):
# get info of primary axis
ylim1 = ax.get_ylim()
len1 = ylim1[1] - ylim1[0]
yticks1 = ax.get_yticks()
# gets the distances between ticks for the primary axis
rel_dist = [(y - ylim1[0]) / len1 for y in yticks1]
# gets info of secondary axis
ylim2 = twin.get_ylim()
len2 = ylim2[1] - ylim2[0]
# sets the same distances for the ticks of the twin axis
yticks2 = [ry * len2 + ylim2[0] for ry in rel_dist]
# change the ticks of the twin axis (secondary y-axis)
twin.set_yticks(yticks2) # changes the thicks
twin.set_ylim(ylim2) # restores the previous limits
twin.grid(False) # hides the unneccessary lines
def show_chart(admissionid, con):
admission = get_admission(admissionid, con)
admittedat = admission['admittedat'].values[0]
#size of medication action icons vs infusion bar
scale_factor = 0.3
los = admission['lengthofstay'].values[0]
los_rounded = math.ceil(los / 12) * 12
if los_rounded < 7*24:
fluids_interval = timedelta(hours=8)
else:
fluids_interval = timedelta(hours=24)
flowsheet_interval = timedelta(hours=int(los_rounded/5))
signals_interval = timedelta(minutes=10)
drugs_info_interval = timedelta(hours=int(los_rounded/12))
# defines all subplots here for sharing the x-axis among them
fig, (ax1, ax2, ax3, ax4, ax5, ax6) = plt.subplots(6, 1, sharex=True, figsize=(8, 12), constrained_layout=True,
gridspec_kw={'height_ratios': [1.25, 0.75, 1, 1.25, 1, 1]})
# date format for x-axes
ax1.xaxis_date()
ax2.xaxis_date()
ax3.xaxis_date()
ax4.xaxis_date()
ax5.xaxis_date()
ax6.xaxis_date()
#######################################
# Vitals
#######################################
ax1_twin = ax1.twinx() # second axis sharing x-axis for SpO2
ax2_twin = ax2.twinx() # second axis sharing x-axis for SvO2
ax2_twin_right = ax2.twinx() # second axis for temperatures
sql_signals = """
SELECT
n.measuredat AS time,
n.value,
n.itemid,
n.item,
n.unit
FROM numericitems n
LEFT JOIN admissions a ON
n.admissionid = a.admissionid
WHERE
n.admissionid = {admissionid}
AND n.measuredat >= a.admittedat AND n.measuredat <= a.dischargedat
--vital signs
AND (
n.itemid IN (
--vitals
6640, --Heart rate
6642, -- mABP
6641, -- sABP
6643, -- dABP
6709, -- SpO2
--circulation
6656, --Cardiac Output
10053, --Lactaat (bloed)
--temperatures
8658, --Temp Bloed
8659, --Temperatuur Perifeer 2
8662, --Temperatuur Perifeer 1
13058, --Temp Rectaal
13059, --Temp Lies
13060, --Temp Axillair
13061, --Temp Oraal
13062, --Temp Oor
13063, --Temp Huid
13952, --Temp Blaas
16110 --Temp Oesophagus
)
)
UNION
--query for getting (mixed/central) venous samples
SELECT
n.measuredat AS time,
CASE
WHEN n.itemid = 12311 THEN
CASE
WHEN n.value <= 1 THEN n.value*100
ELSE n.value
END
ELSE n.value
END AS value,
n.itemid,
CASE
WHEN n.itemid = 12311 THEN 'SvO2'
ELSE n.item
END AS item,
n.unit
FROM numericitems n
LEFT JOIN freetextitems f ON
n.admissionid = f.admissionid
AND n.measuredat = f.measuredat
AND f.itemid = 11646 --Afname (bloed): source of specimen
LEFT JOIN admissions a ON
n.admissionid = a.admissionid
WHERE
n.admissionid = {admissionid}
AND n.measuredat >= a.admittedat AND n.measuredat <= a.dischargedat
AND n.itemid IN (
12311 --Saturatie (bloed)
)
AND LOWER(f.value) LIKE '%ven%' -- source is (mixed) venous
ORDER BY time
""".format(admissionid=admissionid)
signals = pd.read_sql(sql_signals, con)
signals['time'] = pd.to_datetime(signals['time'], unit='ms')
# downsample to 1-hour for readability of device data
signals = signals.set_index(['time', 'item', 'itemid', 'unit']).groupby(
['item', 'itemid', 'unit']).resample('1H', level=0).mean().reset_index().dropna()
# reasonable physiological limits for axes
ax1.set_ylim(top=300, bottom=0) # vitals
ax1_twin.set_ylim(top=100, bottom=40) # oxygenation
ax2.set_ylim(top=10, bottom=0) # circulation
ax2_twin.set_ylim(top=100, bottom=0) # SvO2
ax2_twin_right.set_ylim(top=42, bottom=32) # temperature
# formatting of the twin axes
ax1_twin.tick_params('y', colors='c') # cyan tick labels
ax2_twin.spines["left"].set_position(("axes", 0)) #
ax2_twin.yaxis.set_label_position('left')
ax2_twin.yaxis.set_ticks_position('left')
ax2_twin.spines["left"].set_visible(True)
ax2_twin.tick_params('y', direction='in', pad=-2, colors='b') # blue tick labels
plt.setp(ax2_twin.get_yticklabels(), ha="left")
ax2_twin_right.tick_params('y', colors='r') # red tick labels
signalids = [
##itemid, color, z_order, axis, fill_between, id/value, id or value
(6641, 'v-r', 10, ax1, 'no_fill'), # sbp
(6642, '.-r', 11, ax1, 'no_fill'), # mabp
(6643, '^-r', 12, ax1, 'no_fill'), # dabp
(6641, 'r', 13, ax1, 'fill_between', 'fill_id', 6643), # fill in between SBP and DABP
(6640, 'x-g', 14, ax1, 'no_fill'), # heart rate
(6709, 'c', 15, ax1_twin, 'no_fill'), # SpO2
(6709, 'c', 16, ax1_twin, 'fill_between', 'fill_value', 100), # SpO2 fill between 100
(10053, 'o-m', 20, ax2, 'no_fill'), # lactate
(10053, 'm', 20, ax2, 'fill_between', 'fill_value', 0), # lactate fill between 0
(6656, 'o-y', 21, ax2, 'no_fill'), # cadiac output
(6656, 'y', 21, ax2, 'fill_between', 'fill_value', 0), # cadiac output fill between 0
(12311, 'o--b', 20, ax2_twin, 'no_fill'), # venous saturation
(12311, 'b', 21, ax2_twin, 'fill_between', 'fill_value', 100), # cadiac output fill between 0
(8658, '-r', 20, ax2_twin_right, 'no_fill'), # Temp Bloed
(8659, '--r', 20, ax2_twin_right, 'no_fill'), # Temperatuur Perifeer 2
(8662, '--r', 20, ax2_twin_right, 'no_fill'), # Temperatuur Perifeer 1
(13058, ':r', 20, ax2_twin_right, 'no_fill'), # Temp Rectaal
(13059, '-.r', 20, ax2_twin_right, 'no_fill'), # Temp Lies
(13060, '-.r', 20, ax2_twin_right, 'no_fill'), # Temp Axillair
(13061, '-.r', 20, ax2_twin_right, 'no_fill'), # Temp Oraal
(13062, '-.r', 20, ax2_twin_right, 'no_fill'), # Temp Oor
(13063, '-.r', 20, ax2_twin_right, 'no_fill'), # Temp Huid
(13952, '--r', 20, ax2_twin_right, 'no_fill'), # Temp Blaas
(16110, ':r', 20, ax2_twin_right, 'no_fill') # Temp Oesophagus
]
# English translations
signal_labels = {
6641: 'ABP systolic',
6642: 'ABP mean',
6643: 'ABP diastolic',
6640: 'Heart rate',
6709: 'SpO2',
10053: 'Lactate',
6656: 'Cardiac output',
12311: 'SvO2',
8658: 'Temperature blood',
8659: 'Temperature peripheral 2',
8662: 'Temperature peripheral 1',
13058: 'Temperature rectal',
13059: 'Temperature inguinal',
13060: 'Temperature axillary',
13061: 'Temperature oral',
13062: 'Temperature tympanic',
13063: 'Temperature skin',
13952: 'Temperature bladder',
16110: 'Temperature esophaghus'
}
for s in signalids:
ax = s[3] # axis
signal = signals[signals['itemid'] == s[0]]
if len(signal) == 0:
continue
if not s[4] == 'fill_between': # regular plot (not fill between)
ax.plot(signal['time'],
signal['value'],
s[1], # fmt = '[marker][line][color]',
markersize=0, # hide for readability
label=signal_labels[s[0]],
zorder=s[2])
else: # fill between
if s[5] == 'fill_id':
other_signal = signals[signals['itemid'] == s[6]]
if len(other_signal) > len(signal):
signal = signal.reindex(other_signal.index, method='nearest')
elif len(signal) > len(other_signal):
other_signal = other_signal.reindex(signal.index, method='nearest')
other_signal_values = other_signal['value']
else:
other_signal_values = s[6]
ax.fill_between(signal['time'],
other_signal_values,
y2=signal['value'],
facecolor=s[1],
alpha=0.1,
zorder=s[2])
# create the legends outside the axes
ax1.legend(bbox_to_anchor=(-.1, 1), loc='upper right', borderaxespad=0, markerfirst=False)
ax1_twin.legend(bbox_to_anchor=(1.1, 1), loc='upper left', borderaxespad=0)
ax2.legend(bbox_to_anchor=(-.1, 1), loc='upper right', borderaxespad=0, markerfirst=False)
ax2_twin.legend(bbox_to_anchor=(-.1, 0), loc='lower right', borderaxespad=0, markerfirst=False)
plt.setp(ax2_twin.get_legend().get_texts(), color='b')
ax2_twin_right.legend(bbox_to_anchor=(1.1, 1), loc='upper left', borderaxespad=0)
# create a banner
x = -0.5
y = 1.05
width = 2
height = 0.2
admissionyeargroup = admission['admissionyeargroup'].values[0]
agegroup = admission['agegroup'].values[0]
gender = admission['gender'].values[0]
diagnosis = admission['diagnosis'].values[0]
# translation
if diagnosis == 'Na reanimatie':
diagnosis = 'Post CPR'
if gender == 'Man':
gender = 'Male'
elif gender == 'Vrouw':
gender = 'Female'
title = 'AmsterdamUMCdb admissionid: {} ({} - {} y - {})\nDiagnosis: {}'.format(admissionid, admissionyeargroup,
agegroup, gender, diagnosis)
rect = Rectangle((x, y), width, height, transform=ax1.transAxes, clip_on=False)
ax1.add_patch(rect)
ax1.text(s=title, x=0.5, y=(y + height / 2), rotation=0, transform=ax1.transAxes,
color='white', fontweight='bold', horizontalalignment='center', verticalalignment='center')
##############################################
# FLOWSHEET STYLE DATA
##############################################
sql_flowsheet = """
WITH gcs_components AS (
SELECT
eyes.admissionid,
CASE eyes.itemid
WHEN 6732 THEN 5 - eyes.valueid --Actief openen van de ogen
END AS eyes_score,
CASE motor.itemid
WHEN 6734 THEN 7 - motor.valueid --Beste motore reactie van de armen
END AS motor_score,
CASE verbal.itemid
WHEN 6735 THEN 6 - verbal.valueid --Beste verbale reactie
END AS verbal_score,
eyes.registeredby,
eyes.measuredat AS time
FROM listitems eyes
LEFT JOIN listitems motor ON
eyes.admissionid = motor.admissionid AND
eyes.measuredat = motor.measuredat AND
motor.itemid IN (
6734 --Beste motore reactie van de armen
)
LEFT JOIN listitems verbal ON
eyes.admissionid = verbal.admissionid AND
eyes.measuredat = verbal.measuredat AND
verbal.itemid IN (
6735 --Beste verbale reactie
)
WHERE
eyes.itemid IN (
6732 --Actief openen van de ogen
)
AND eyes.registeredby IN (
'ICV_IC-Verpleegkundig',
'ICV_MC-Verpleegkundig'
)
AND eyes.admissionid = {admissionid}
)
SELECT
time,
'GCS score' AS item,
'E' || eyes_score || 'M' || motor_score || 'V' || (
CASE
WHEN verbal_score < 1 THEN 1
ELSE verbal_score
END)
--|| '=' || (
-- eyes_score + motor_score + (
-- CASE
-- WHEN verbal_score < 1 THEN 1
-- ELSE verbal_score
-- END
-- )
--)
AS value,
eyes_score + motor_score + (
CASE
WHEN verbal_score < 1 THEN 1
ELSE verbal_score
END
)
AS valueid,
'00. Glasgow Coma Scale' AS category
FROM gcs_components
UNION
SELECT
measuredat AS time,
item,
value,
valueid,
CASE
WHEN itemid IN (
9534, --Type beademing Evita 1
6685, --Type Beademing Evita 4
12290 --Ventilatie Mode (Set)
) THEN '02-1. Respiratory support'
WHEN itemid IN (
8189 --Toedieningsweg
) THEN '03-1. Oxygen delivery device'
WHEN itemid IN (
6671 --Hartritme
) THEN '01. Heart rhythm'
END AS category
FROM listitems
WHERE
itemid IN (
9534, --Type beademing Evita 1
6685, --Type Beademing Evita 4
12290, --Ventilatie Mode (Set)
8189, --Toedieningsweg
6671 --Hartritme
)
AND admissionid = {admissionid}
UNION
SELECT
measuredat AS time,
item,
CAST(value AS varchar),
0 AS valueid, --to allow UNION both tables
CASE
WHEN itemid IN (
6699, --FiO2 %: setting on Evita ventilator
12279, --O2 concentratie --measurement by Servo-i/Servo-U ventilator
12369 --SET %O2: used with BiPap Vision ventilator
) THEN '02-2. FiO2'
WHEN itemid IN (
-- Peak pressures on ventilator
8852, --P max
8877, --Peak druk -- Evita
12281, --Piek druk --Servo-i
16239 --Zephyros Ppeak
) THEN '02-3. P peak'
WHEN itemid IN (
--PEEP settings on respiratory support
12284, --PEEP (Set): setting on Servo-i ventilator
8862, --PEEP/CPAP: setting on Evita ventilator
--8879, --PEEP (gemeten): measured by Evita ventilator
16250 --Zephyros PEEP
) THEN '02-4. PEEP'
WHEN itemid IN (
--Oxygen flow
8845, -- O2 l/min
10387, --Zuurstof toediening (bloed)
18587 --Zuurstof toediening
) THEN '03-2. Oxygen flow'
END AS category
FROM numericitems
WHERE
itemid IN (
-- FiO2
6699, --FiO2 %: setting on Evita ventilator
12279, --O2 concentratie --measurement by Servo-i/Servo-U ventilator
12369, --SET %O2: used with BiPap Vision ventilator
-- Peak pressures on ventilator
8852, --P max
8877, --Peak druk -- Evita
12281, --Piek druk --Servo-i
16239, --Zephyros Ppeak
--PEEP settings on respiratory support
12284, --PEEP (Set): setting on Servo-i ventilator
8862, --PEEP/CPAP: setting on Evita ventilator
--8879, --PEEP (gemeten): measured by Evita ventilator
16250, --Zephyros PEEP
--Oxygen flow
8845, -- O2 l/min
10387, --Zuurstof toediening (bloed)
18587 --Zuurstof toediening
)
AND admissionid = {admissionid}
""".format(admissionid=admissionid)
flowsheet = pd.read_sql(sql_flowsheet, con)
flowsheet['time'] = pd.to_datetime(flowsheet['time'], unit='ms')
# downsample to hourly, re-index to create same numer of rows per category and the downsample to requested interval
flowsheet_resampled_hourly = flowsheet.set_index(['time', 'category']).sort_values(['valueid'],
ascending=False).groupby(
['category']).resample(timedelta(hours=1), level=0).first()
index_start = flowsheet_resampled_hourly.index.get_level_values('time').min()
index_end = flowsheet_resampled_hourly.index.get_level_values('time').max()
flowsheet_reindexed_hourly = flowsheet_resampled_hourly.reset_index().set_index('time').groupby(
['category']).apply(
reindex_by_date, index_start=index_start, index_end=index_end, interval=timedelta(hours=1),
method=None, fill_value=np.NaN)[['value', 'valueid']]
flowsheet_reindexed_hourly.index.set_names('time', level=1, inplace=True)
flowsheet_resampled = flowsheet_reindexed_hourly.sort_values(['valueid'], ascending=False).groupby(
['category']).resample(flowsheet_interval, level=1).first()
# translate some values:
flowsheet_resampled.loc[flowsheet_resampled['value'] == 'Kunstneus', 'value'] = 'HME'
flowsheet_resampled.loc[flowsheet_resampled['value'] == 'O2-bril', 'value'] = 'Prongs'
labels = []
ticks = []
pos = 0
# display flowsheet
flowsheet_groups = flowsheet_resampled.fillna('').reset_index().groupby(['category'])
for name, group in flowsheet_groups:
label = re.sub(r'[0-9\-].+\.\s', '', name)
labels.append(label) # saves the label for ticks
ticks.append(pos) ##saves the position for ticks
for index, row in group.iterrows():
ax3.barh(pos, flowsheet_interval, left=row['time'], height=4, facecolor='white', alpha=0.0,
edgecolor='white', linewidth=1)
ax3.annotate(row['value'], xy=(row['time'] + flowsheet_interval / 2, pos), fontsize='x-small',
color='black', horizontalalignment='center', verticalalignment='center')
ax3.axhline(y=pos + 2, ls='-', color='white') # horizontal gridline
pos = pos - 4
ax3.axhline(y=pos + 2, ls='-', color='white')
# shows the labels and a flowsheet grid
ax3.set_yticks(ticks)
ax3.set_yticklabels(labels)
ax3.grid(False, which='major', axis='y')
##############################################
# CONTINUOUS INFUSIONS
##############################################
sql_drugitems_continuous = """
SELECT
ordercategoryid,
ordercategory,
itemid,
item,
CASE
WHEN rate >= 0 THEN rate
ELSE dose
END AS rate,
start AS time,
stop - start AS duration,
action
FROM drugitems
WHERE
iscontinuous = B'1'
AND NOT itemid IN (
--from ordercategoryid 65 (syringe pumps)
9424, --NaCL 0,9% spuit
19129, --Insuline aspart (Novorapid)
9001, --Kaliumchloride (KCL)
18783, --Calciumgluconaat 10%
--other
7257, --Glucose 5 %
7291, --NaCl 0,45%/Glucose 2,5%
7293, --NaCl 0,9 %
7316, --Ri-Lac (Ringers lactaat)
8937, --Drukzak
8939, --Medicijnlijn medicatie
12610, --Nutrison Sterilized water
16904 --Drukzak IABP
)
AND admissionid = {admissionid}
ORDER BY itemid, start
""".format(admissionid=admissionid)
drugitems_continuous = pd.read_sql(sql_drugitems_continuous, con)
drugitems_continuous['time'] = pd.to_datetime(drugitems_continuous['time'], unit='ms')
drugitems_continuous['duration'] = | pd.to_timedelta(drugitems_continuous['duration'], unit='ms') | pandas.to_timedelta |
# import Ipynb_importer
import pandas as pd
from .public_fun import *
# 全局变量
class glv:
def _init():
global _global_dict
_global_dict = {}
def set_value(key,value):
_global_dict[key] = value
def get_value(key,defValue=None):
try:
return _global_dict[key]
except KeyError:
return defValue
## fun_01to06
class fun_01to06(object):
def __init__(self, data):
self.cf = [2, 1, 1, 17, 1, 2]
self.cf_a = hexlist2(self.cf)
self.o = data[0:self.cf_a[-1]]
self.list_o = [
"起始符",
"命令标识",
"应答标志",
"唯一识别码",
"数据单元加密方式",
"数据单元长度"
]
self.oj = list2dict(self.o, self.list_o, self.cf_a)
self.ol = pd.DataFrame([self.oj]).reindex(columns=self.list_o)
self.pj = {
"起始符":hex2str(self.oj["起始符"]),
"命令标识":dict_list_replace('02', self.oj['命令标识']),
"应答标志":dict_list_replace('03', self.oj['应答标志']),
"唯一识别码":hex2str(self.oj["唯一识别码"]),
"数据单元加密方式":dict_list_replace('05', self.oj['数据单元加密方式']),
"数据单元长度":hex2dec(self.oj["数据单元长度"]),
}
self.pl = pd.DataFrame([self.pj]).reindex(columns=self.list_o)
self.next = data[len(self.o):]
self.nextMark = data[len(self.o):len(self.o)+2]
self.mo = self.oj["命令标识"]
glv.set_value('data_f', self.next)
glv.set_value('data_mo', self.mo)
glv.set_value('data_01to07', self.o)
print('fun_01to06 done!')
## fun_07
class fun_07:
def __init__(self, data):
self.mo = glv.get_value("data_mo")
if self.mo == '01':
self.o = fun_07_01(glv.get_value('data_f'))
elif self.mo == '02' or self.mo == '03':
self.o = fun_07_02(glv.get_value('data_f'))
elif self.mo == '04':
self.o = fun_07_04(glv.get_value('data_f'))
elif self.mo == '05':
self.o = fun_07_05(glv.get_value('data_f'))
elif self.mo == '06':
self.o = fun_07_06(glv.get_value('data_f'))
else :
print('命令标识:',self.mo,'有误')
self.c = fun_07_cursor(glv.get_value('data_f'))
self.oj = dict(self.o.oj, **self.c.oj)
self.oj2 = {'数据单元':self.oj}
self.ol = pd.merge(self.o.ol, self.c.ol, left_index=True, right_index=True)
self.pj = dict(self.o.pj, **self.c.pj)
self.pj2 = {'数据单元':self.pj}
self.pl = pd.merge(self.o.pl, self.c.pl, left_index=True, right_index=True)
print('fun_07 done!')
## fun_07_01
class fun_07_01(object):
def __init__(self, data):
self.cf = [6, 2, 20, 1, 1]
self.cf_a = hexlist2(self.cf)
self.n = hex2dec(data[self.cf_a[3]:self.cf_a[4]])
self.m = hex2dec(data[self.cf_a[4]:self.cf_a[5]])
self.cf.append(self.n*self.m)
self.cf_a = hexlist2(self.cf)
self.o = data[0:self.cf_a[-1]]
self.list_o = [
"数据采集时间",
"登入流水号",
"ICCID",
"可充电储能子系统数",
"可充电储能系统编码长度",
"可充电储能系统编码",
]
self.oj = list2dict(self.o, self.list_o, self.cf_a)
self.oj2 = {'车辆登入': self.oj}
self.ol = pd.DataFrame([self.oj]).reindex(columns=self.list_o)
self.pj = {
"数据采集时间":get_datetime(self.oj['数据采集时间']),
"登入流水号":hex2dec(self.oj['登入流水号']),
"ICCID":hex2str(self.oj['ICCID']),
"可充电储能子系统数":hex2dec(self.oj['可充电储能子系统数']),
"可充电储能系统编码长度":hex2dec(self.oj['可充电储能系统编码长度']),
"可充电储能系统编码":fun_07_01.fun_07_01_06(self.oj['可充电储能系统编码'], self.oj['可充电储能子系统数'], self.oj['可充电储能系统编码长度']),
}
self.pj2 = {'车辆登入': self.pj}
self.pl = pd.DataFrame([self.pj]).reindex(columns=self.list_o)
self.next = data[len(self.o):]
self.nextMark = data[len(self.o):len(self.o)+2]
glv.set_value('data_f', self.next)
glv.set_value('data_07_01', self.o)
print('fun_07_01 done!')
def fun_07_01_06(data, n, m):
if m=='00':
return "NA"
else :
n = hex2dec(n)
m = hex2dec(m) * 2
output = []
for i in range(n):
output_unit = hex2str(data[i * m: i* m +m])
output.append(output_unit)
return output
## fun_07_04
class fun_07_04(object):
def __init__(self, data):
self.cf = [6, 2]
self.cf_a = hexlist2(self.cf)
self.o = data[0:self.cf_a[-1]]
self.list_o = [
"登出时间",
"登出流水号",
]
self.oj = list2dict(self.o, self.list_o, self.cf_a)
self.ol = pd.DataFrame([self.oj]).reindex(columns=self.list_o)
self.pj = {
"登出时间":get_datetime(self.oj['登出时间']),
"登出流水号":hex2dec(self.oj['登出流水号']),
}
self.pl = pd.DataFrame([self.pj]).reindex(columns=self.list_o)
self.next = data[len(self.o):]
self.nextMark = data[len(self.o):len(self.o)+2]
glv.set_value('data_f', self.next)
glv.set_value('data_07_04', self.o)
print('fun_07_04 done!')
## fun_07_05
class fun_07_05(object):
def __init__(self, data):
self.cf = [6, 2, 12, 20, 1]
self.cf_a = hexlist2(self.cf)
self.o = data[0:self.cf_a[-1]]
self.list_o = [
"平台登入时间",
"登入流水号",
"平台用户名",
"平台密码",
"加密规则",
]
self.oj = list2dict(self.o, self.list_o, self.cf_a)
self.ol = pd.DataFrame([self.oj]).reindex(columns=self.list_o)
self.pj = {
"平台登入时间":get_datetime(self.oj['平台登入时间']),
"登入流水号":hex2dec(self.oj['登入流水号']),
"平台用户名":hex2str(self.oj['平台用户名']),
"平台密码":hex2str(self.oj['平台密码']),
"加密规则":dict_list_replace('07_05_05',self.oj['加密规则']),
}
self.pl = pd.DataFrame([self.pj]).reindex(columns=self.list_o)
self.next = data[len(self.o):]
self.nextMark = data[len(self.o):len(self.o)+2]
glv.set_value('data_f', self.next)
glv.set_value('data_07_05', self.o)
print('fun_07_05 done!')
## fun_07_06
class fun_07_06(object):
def __init__(self, data):
self.cf = [6, 2]
self.cf_a = hexlist2(self.cf)
self.o = data[0:self.cf_a[-1]]
self.list_o = [
"登出时间",
"登出流水号",
]
self.oj = list2dict(self.o, self.list_o, self.cf_a)
print(self.oj)
self.ol = pd.DataFrame([self.oj]).reindex(columns=self.list_o)
self.pj = {
"登出时间":get_datetime(self.oj['登出时间']),
"登出流水号":hex2dec(self.oj['登出流水号']),
}
self.pl = pd.DataFrame([self.pj]).reindex(columns=self.list_o)
self.next = data[len(self.o):]
self.nextMark = data[len(self.o):len(self.o)+2]
glv.set_value('data_f', self.next)
glv.set_value('data_07_06', self.o)
print('fun_07_06 done!')
## fun_07_02
class fun_07_02:
def __init__(self, data):
self.o = data
self.oj = {'数据采集时间': self.o[:12]}
self.ol = pd.DataFrame({'01':['01']})
self.pj = {'数据采集时间': get_datetime(self.oj['数据采集时间'])}
self.pl = pd.DataFrame({'01':['01']})
glv.set_value('data_f', data[12:])
glv.set_value('m_07_02', data[12:14])
self.mo_list = glv.get_value('model')
self.do_list = []
while(glv.get_value('m_07_02') in self.mo_list):
# 记录已执行的
self.do_list.append(glv.get_value('m_07_02'))
# 删除已执行的
self.mo_list.remove(glv.get_value('m_07_02'))
if glv.get_value('m_07_02') == '01':
self.f_01 = fun_07_02_01(glv.get_value('data_f'))
elif glv.get_value('m_07_02') == '02':
self.f_02 = fun_07_02_02(glv.get_value('data_f'))
elif glv.get_value('m_07_02') == '03':
self.f_03 = fun_07_02_03(glv.get_value('data_f'))
elif glv.get_value('m_07_02') == '04':
self.f_04 = fun_07_02_04(glv.get_value('data_f'))
elif glv.get_value('m_07_02') == '05':
self.f_05 = fun_07_02_05(glv.get_value('data_f'))
elif glv.get_value('m_07_02') == '06':
self.f_06 = fun_07_02_06(glv.get_value('data_f'))
elif glv.get_value('m_07_02') == '07':
self.f_07 = fun_07_02_07(glv.get_value('data_f'))
elif glv.get_value('m_07_02') == '08':
self.f_08 = fun_07_02_08(glv.get_value('data_f'))
elif glv.get_value('m_07_02') == '09':
self.f_09 = fun_07_02_09(glv.get_value('data_f'))
else:
print("fun_07_02 done")
print(glv.get_value('data_f'))
print(glv.get_value('m_07_02'))
self.do_list.sort()
for i in self.do_list:
if i == '01':
self.oj = dict(self.oj,**self.f_01.oj2)
self.ol = pd.merge(self.ol, self.f_01.ol, left_index=True, right_index=True)
self.pj = dict(self.pj,**self.f_01.pj2)
self.pl = pd.merge(self.pl, self.f_01.pl, left_index=True, right_index=True)
elif i == '02':
self.oj = dict(self.oj,**self.f_02.oj2)
self.ol = pd.merge(self.ol, self.f_02.ol, left_index=True, right_index=True)
self.pj = dict(self.pj,**self.f_02.pj2)
self.pl = pd.merge(self.pl, self.f_02.pl, left_index=True, right_index=True)
elif i == '03':
self.oj = dict(self.oj,**self.f_03.oj2)
self.ol = pd.merge(self.ol, self.f_03.ol, left_index=True, right_index=True)
self.pj = dict(self.pj,**self.f_03.pj2)
self.pl = pd.merge(self.pl, self.f_03.pl, left_index=True, right_index=True)
elif i == '04':
self.oj = dict(self.oj,**self.f_04.oj2)
self.ol = pd.merge(self.ol, self.f_04.ol, left_index=True, right_index=True)
self.pj = dict(self.pj,**self.f_04.pj2)
self.pl = pd.merge(self.pl, self.f_04.pl, left_index=True, right_index=True)
elif i == '05':
self.oj = dict(self.oj,**self.f_05.oj2)
self.ol = pd.merge(self.ol, self.f_05.ol, left_index=True, right_index=True)
self.pj = dict(self.pj,**self.f_05.pj2)
self.pl = pd.merge(self.pl, self.f_05.pl, left_index=True, right_index=True)
elif i == '06':
self.oj = dict(self.oj,**self.f_06.oj2)
self.ol = pd.merge(self.ol, self.f_06.ol, left_index=True, right_index=True)
self.pj = dict(self.pj,**self.f_06.pj2)
self.pl = pd.merge(self.pl, self.f_06.pl, left_index=True, right_index=True)
elif i == '07':
self.oj = dict(self.oj,**self.f_07.oj2)
self.ol = pd.merge(self.ol, self.f_07.ol, left_index=True, right_index=True)
self.pj = dict(self.pj,**self.f_07.pj2)
self.pl = pd.merge(self.pl, self.f_07.pl, left_index=True, right_index=True)
elif i == '08':
self.oj = dict(self.oj,**self.f_08.oj2)
self.ol = pd.merge(self.ol, self.f_08.ol, left_index=True, right_index=True)
self.pj = dict(self.pj,**self.f_08.pj2)
self.pl = pd.merge(self.pl, self.f_08.pl, left_index=True, right_index=True)
elif i == '09':
self.oj = dict(self.oj,**self.f_09.oj2)
self.ol = | pd.merge(self.ol, self.f_09.ol, left_index=True, right_index=True) | pandas.merge |
#!/usr/bin/env python
# Main script for foul ball risk analysis. Performs web scraping, data ingest
# data cleaning, summarization and statistical analyses.
import warnings
from bs4 import BeautifulSoup
import numpy as np
import nbinom_fit
import pandas as pd
import os
import subprocess
import argparse
import datetime
import calendar
import matplotlib.pyplot as plt
import seaborn as sns
import statsmodels.api as sm
sns.set()
data_dir = 'data'
pix_dir = 'pix'
results_file_name = os.path.join(data_dir, 'results.txt')
teams_file_name = os.path.join(data_dir, 'teams.csv')
extracted_file_name = os.path.join(data_dir, 'extracted_raw.csv')
mlb_stats_file_name = os.path.join(data_dir, 'basic_MLB_stats.csv')
merged_file_name = os.path.join(data_dir, 'merged.csv')
missing_game_file_name = os.path.join(data_dir, 'missing_game_estimates.csv')
missing_summary_file_name = os.path.join(data_dir, 'missing_summary.csv')
neg_binom_params_file_name = os.path.join(data_dir, 'neg_binom.csv')
pure_med_file_name = os.path.join(data_dir, 'pure_med_stats.csv')
injuries_file_name = os.path.join(data_dir, 'injuries.csv')
#####################
# Utility functions #
#####################
results_fp = None
skip_cache = False
def lprint(x, suppress_stdout=False):
if not suppress_stdout:
print(x)
global results_fp
if results_fp is None:
results_fp = open(results_file_name, 'w')
results_fp.write(str(datetime.datetime.now())+'\n')
results_fp.write('%s\n' % x)
def prettyprint(func):
def pp(*args, **kwargs):
lprint('')
lprint(60 * '#')
lprint('## %s %s' % (
func.__name__, datetime.datetime.now()))
lprint(60 * '#')
return(func(*args, **kwargs))
return(pp)
def check_cache(file_name_list):
if type(file_name_list) == str:
file_name_list = [file_name_list]
if skip_cache:
lprint('Skipping file cache for %s' % (', '.join(file_name_list)))
return(False)
lprint('Check if cached files exist: %s' % (', '.join(file_name_list)))
for file_name in file_name_list:
if not(os.path.exists(file_name)):
lprint('Cache files missing; processing.')
return(False)
lprint('Cache files present; skipping...')
return(True)
##################
# Data functions #
##################
def grab_teams():
if not os.path.exists(teams_file_name):
print(' User must provide file %f' % teams_file_name)
print(' See README.md for a description and sample_teams.csv')
print(' for an example.')
raise ValueError('Missing teams.csv file')
teams_df = pd.read_csv(teams_file_name)
required_columns = ['team', 'team_full_name',
'excel_sheet_name', 'anonymized_name', 'event_type']
for c in required_columns:
if c not in teams_df.columns:
raise ValueError('teams.csv missing column %s' % c)
if len(teams_df) == 0:
raise ValueError('teams.csv has no rows!')
return(teams_df)
@prettyprint
def parse_raw_excel(raw_medical_file_name=None):
print('Parsing raw Excel medical data')
# If we have previously parsed the raw excel data, just
# skip this step
if check_cache(extracted_file_name):
return
# Otherwise, extract the part of the Excel file we will use
if raw_medical_file_name is None:
raise ValueError('Must specify input Excel file with --raw')
if not os.path.exists(raw_medical_file_name):
raise ValueError('Cannot find file %s' % raw_medical_file_name)
teams_df = grab_teams()
xls = pd.ExcelFile(raw_medical_file_name)
combined_df = pd.DataFrame()
for index, row in teams_df.iterrows():
with warnings.catch_warnings():
# Suppress irrelevant "Data Validation extension" warning
warnings.simplefilter("ignore")
accidents_df = pd.read_excel(xls, row.excel_sheet_name)
if len(accidents_df) == 0:
raise ValueError('Excel sheet %f is empty!' % row.excel_sheet_name)
assert len(accidents_df) > 0
core_columns = [
'Date', 'Age', 'Gender', 'Mechanism', 'Meds + Rxs', 'Primary Dx',
'Inj body part', 'Abnormal vitals?', 'Treatment', 'Disposition']
# Restrict to columns of interest
accidents_df = accidents_df[core_columns]
# Saner column names
accidents_df.rename(columns={
'Primary Dx': 'Diagnosis',
'Inj body part': 'Location of Injury',
'Abnormal vitals?': 'Vital Signs',
}, inplace=True)
# If all data is missing, we should certainly skip the row!
accidents_df.dropna(axis='index', how='all', inplace=True)
# If crucial columns are missing, we also need to drop row
accidents_df.dropna(axis='index', subset=['Date', 'Mechanism'],
inplace=True)
accidents_df.reset_index(drop=True, inplace=True)
# Standardize labeling of "Gender" column
# Strip whitespace (e.g., "F" instead of " F")
accidents_df['Gender'] = accidents_df['Gender'].str.strip()
# Capitalize (e.g., "F" instead of "f")
accidents_df['Gender'] = accidents_df['Gender'].str.upper()
# Construct alternate representations of "Date"
accidents_df[['epoch_second', 'year', 'month', 'day']] = 0
ns = 1e-9
for i in range(len(accidents_df)):
d = accidents_df['Date'][i]
for c in ['year', 'month', 'day']:
accidents_df[c].values[i] = getattr(d, c)
epoch = int(accidents_df['Date'].values[i].item()*ns)
accidents_df['epoch_second'].values[i] = epoch
# Broadcast the teams.csv info to the remaining rows
for c in teams_df.columns:
accidents_df[c] = row[c]
combined_df = combined_df.append(accidents_df, ignore_index=True)
combined_df.to_csv(extracted_file_name, index=False)
@prettyprint
def grab_basic_game_stats():
print('Downloading basic baseball stats (like which days had games')
print('during the years of interest.)')
if check_cache(mlb_stats_file_name):
return
# Figure out which teams and time ranges we need
extracted_df = pd.read_csv(extracted_file_name)
combined_df = pd.DataFrame()
for team, team_df in extracted_df.groupby('team'):
first_year = int(team_df.year.min())
last_year = int(team_df.year.max())
df = grab_one_team_stats(
team=team, first_year=first_year, last_year=last_year)
combined_df = combined_df.append(df, ignore_index=True)
combined_df.to_csv(mlb_stats_file_name, index=False)
def grab_one_team_stats(team=None, first_year=None, last_year=None):
lprint('==== %s (%d-%d) ====' % (team, first_year, last_year))
for year in range(first_year, last_year+1):
out_filename = os.path.join(data_dir, '%s_%d.shtml' % (team, year))
if not os.path.exists(out_filename):
cmd = 'curl -o %s https://www.baseball-reference.com/teams/%s/%d-schedule-scores.shtml' % ( # noqa
out_filename, team, year)
subprocess.call(cmd, shell=True)
output_rows = []
MLB_columns = []
for year in range(first_year, last_year+1):
html = open('data/%s_%d.shtml' % (team, year)).read()
soup = BeautifulSoup(html, 'html.parser')
table = soup.find("table")
# Figure out data fields on first pass through.
# Note that the website may change these fields,
# so we need to be somewhat paranoid about handling them.
if MLB_columns == []:
for table_header in table.findAll('thead'):
for t in table_header.findAll('th'):
MLB_columns.append(t.string)
# As of March 2021, this reads:
# Gm#, Date, None, Tm, \xa0, Opp, W/L, R, RA, Inn, W-L,
# Rank, GB, Win, Loss, Save, Time, D/N, Attendance, cLI,
# Streak, Orig. Scheduled
# (Note that "None" is the Python None, not the string
# "None".)
# Need to overwrite some weirdnesses. Hope that the
# ordering of these fields doesn't change.
MLB_columns[0] = 'Year' # Weird, but correct
MLB_columns[2] = 'Boxscore'
MLB_columns[4] = 'Home_game'
# Relabel with saner names when possible
relabels = {
'Tm': 'Team', 'Opp': 'Opposing_team',
'W/L': 'Win_loss_tie',
'R': 'Runs_scored_allowed', 'RA': 'Runs_allowed',
'Inn': 'Innings', 'W-L': 'Win_loss_record_after_game',
'GB': 'Games_back', 'DN': 'Daytime', 'D/N': 'Daytime',
'CLI': 'Championship_Leverage_Index',
'Orig. Scheduled': 'Orig_Scheduled'}
MLB_columns = [relabels.get(c, c) for c in MLB_columns]
# Extract data
for table_row in table.findAll('tr'):
columns = table_row.findAll('td')
output_row = [year]
for column in columns:
output_row.append(column.text)
if len(output_row) == 1:
continue
output_rows.append(output_row)
df = pd.DataFrame(output_rows, columns=MLB_columns)
# Represent data in cleaner ways
df.Home_game = (df.Home_game.values != '@')
df.Innings.values[df.Innings.values == ''] = '9'
df.Innings = df.Innings.astype(int)
# Attendance may have a few missing values. Check that it's
# not out of hand, and drop them.
df['Attendance'].values[df.Attendance.values == ''] = np.nan
NaN_attendance = np.sum(df.Attendance.isna())
lprint(' Number of games missing attendance data: %d' % NaN_attendance)
if NaN_attendance > 10:
raise ValueError(
'Suspiciously many null attendance entries! (%d of %d)' %
(NaN_attendance, len(df)))
df.dropna(axis='index', subset=['Attendance'], inplace=True)
df['Attendance'] = df.Attendance.str.replace(',', '')
df['Attendance'] = df['Attendance'].astype(int)
df.drop(columns=['Boxscore'], axis='columns', inplace=True)
df['Daytime'] = (df['Daytime'].values == 'D')
df['walkoff'] = False
df['epoch_second'] = 0
month_to_int = {'Jan': 1, 'Feb': 2, 'Mar': 3, 'Apr': 4, 'May': 5,
'Jun': 6, 'Jul': 7, 'Aug': 8, 'Sep': 9, 'Oct': 10,
'Nov': 11, 'Dec': 12}
df['double_header'] = False
df['double_header_game_count'] = 0
df['game_length_minutes'] = 0
df['day_of_week'] = ''
for i in range(len(df)):
# Observed values:
# ['W', 'L-wo', 'L', 'W-wo', 'L &V']
if df['Win_loss_tie'].values[i].endswith('-wo'):
df['walkoff'].values[i] = True
df['Win_loss_tie'].values[i] = df[
'Win_loss_tie'].values[i][:-3]
if df['Win_loss_tie'].values[i].endswith(' &V'):
df['Win_loss_tie'].values[i] = df[
'Win_loss_tie'].values[i][:-3]
hours, minutes = df['Time'].values[i].split(':')
df['game_length_minutes'].values[i] = 60 * \
int(hours) + int(minutes)
year = df['Year'].values[i]
splitted = df['Date'].values[i].split()
if len(splitted) == 3:
(_, month, day_of_month) = splitted
elif len(splitted) == 4:
(_, month, day_of_month, game_count) = splitted
assert game_count[0] == '(' and game_count[2] == ')'
game_count = int(game_count[1]) - 1
df['double_header'].values[i] = True
df['double_header_game_count'].values[i] = game_count
else:
assert False
month = month_to_int[month]
day_of_month = int(day_of_month)
dt = datetime.datetime(year=year, month=month, day=day_of_month)
epoch_second = int(calendar.timegm(dt.timetuple()))
df['epoch_second'].values[i] = epoch_second
df['day_of_week'].values[i] = (
datetime.datetime.fromtimestamp(epoch_second).strftime("%A"))
# Restrict to only home games
df = df[df['Home_game'].values]
# Figure out what fraction of the season corresponds to each game.
# There are 183 days in a standard season (prior to 2018):
starting_day = {}
for year in range(first_year, 1+last_year):
starting_day[year] = (
df[df['Year'].values == year]['epoch_second'].min())
df['fraction_of_season'] = 0.0
season_length_seconds = 183 * 86400
for i in range(len(df)):
sd = starting_day[df['Year'].values[i]]
frac = 1.0 * (df['epoch_second'].values[i] -
sd) / season_length_seconds
frac = min(frac, 1.0)
assert frac >= 0.0
df['fraction_of_season'].values[i] = frac
df.reset_index(drop=True, inplace=True)
lprint(' Total home games: %d' % len(df))
lprint(' Total home game attendance: %d' % (df.Attendance.sum()))
return(df)
@prettyprint
def merge_mlb_and_medical_data():
print()
if check_cache([merged_file_name, pure_med_file_name]):
return
df_key = pd.read_csv(teams_file_name)
df_key.set_index('team', inplace=True)
# First, aggregate medical data by day. That essentially means
# "by game", except in the case of double-headers (although
# we'll be discarding those below anyway.)
med_df = pd.read_csv(extracted_file_name)
med_df['foul_ball_injuries'] = 0
med_df['non_foul_ball_injuries'] = 0
for index, row in med_df.iterrows():
# Looking for string "foul ball", but let's be a little paranoid
if 'foul' in row['Mechanism']:
med_df['foul_ball_injuries'].values[index] = 1
else:
med_df['non_foul_ball_injuries'].values[index] = 1
raw_df = med_df.copy()
keep_columns = ['Date', 'epoch_second', 'team',
'foul_ball_injuries', 'non_foul_ball_injuries',
'anonymized_name']
med_df = med_df[keep_columns].groupby(
['Date', 'epoch_second', 'team']).sum().reset_index()
med_df.rename(columns={'team': 'Team'}, inplace=True)
# Next, clean up MLB data
mlb_df = pd.read_csv(mlb_stats_file_name)
# Drop unlikely-to-be-used columns
keep_columns = [
'Year', 'Team', 'Win_loss_tie', 'Innings', 'Daytime', 'Attendance',
'epoch_second', 'game_length_minutes', 'fraction_of_season',
'double_header']
mlb_df = mlb_df[keep_columns]
# Join on game date.
joint = ['Team', 'epoch_second']
combined_df = mlb_df.merge(med_df, on=joint)
# Drop double-headers but record how much we're dropping
print('Filtering out double headers:')
print(' Team #games = #singles + #doubles (#days w/double headers)')
for team, combo_team_df in combined_df.groupby(['Team']):
num_all_games = len(combo_team_df)
num_single_headers = np.sum(
~(combo_team_df.double_header.values))
num_double_headers = np.sum(
combo_team_df.double_header.values)
num_double_header_dates = (
len(combo_team_df[combo_team_df.double_header.values
].epoch_second.unique()))
print(' %s %3d %3d %3d (%d)' % (
team, num_all_games, num_single_headers, num_double_headers,
num_double_header_dates))
single_header_index = ~(combined_df.double_header.values)
print('Reducing games from %d to %d' % (
len(combined_df), np.sum(single_header_index)))
combined_df = combined_df[single_header_index].reset_index(drop=True)
combined_df.drop(columns='double_header', inplace=True)
# There should only be one match for each date; let's double-check.
assert len(combined_df) == len(combined_df.groupby(joint).groups)
combined_df.to_csv(merged_file_name, index=False)
lprint('Total accident counts (only MLB games, no double-headers)')
for team, team_df in combined_df.groupby(['Team']):
lprint('%s: FB=%d non-FB=%d' % (
team,
team_df.foul_ball_injuries.sum(),
team_df.non_foul_ball_injuries.sum()))
###################################
# Next, take the raw extracted medical records and restrict to the dates of
# MLB ball games
good_epoch_set = set(combined_df.epoch_second.values)
pure_med_df = raw_df[raw_df.epoch_second.isin(good_epoch_set)
].reset_index(drop=True)
lprint('Ages of victims. [min < 25th < median < 75th < max]')
for team, team_df in pure_med_df.groupby(['team']):
age_dist = team_df.Age.values
age_dist = age_dist[np.isfinite(age_dist)]
lprint('%s: %d < %d < %d < %d < %3d (N=%5d) Foul balls' % (
team,
np.quantile(age_dist, 0),
np.quantile(age_dist, .25),
np.quantile(age_dist, .50),
np.quantile(age_dist, .75),
np.quantile(age_dist, 1),
len(age_dist)))
pure_med_df.to_csv(pure_med_file_name, index=False)
@ prettyprint
def estimate_missing_games():
if check_cache([missing_game_file_name, neg_binom_params_file_name,
missing_summary_file_name]):
return
df_key = | pd.read_csv(teams_file_name) | pandas.read_csv |
#!/bin/env python
# -*- coding: utf-8 -*-
"""
A Python package that aids the user in making dynamic cuts to data in various
parameter spaces, using a simple GUI.
.. versioncreated:: 0.1
.. versionchanged:: 0.6
.. codeauthor:: <NAME> <<EMAIL>>
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
from matplotlib.widgets import Slider, Button
import glob as glob
import pandas as pd
from .external_functions import *
class open:
def __init__(self, _core_df, _namex, _namey):
'''
A class that initialises the barbershop class which all other content is
appended to.
Parameters:
_core_df (pandas.core.frame.DataFrame): A dataframe containing all
data the user wishes to call in this module.
_namex (str): The name of the X values in _core_df
_namey (str): The name of the Y values in _core_df
'''
#Check contents is a dataframe
if not isinstance(_core_df, pd.core.frame.DataFrame):
print('Please enter in a pandas DataFrame containing the data.')
self.close_shop()
return None
self.core_df = _core_df
self.namex = _namex
self.namey = _namey
self.X = self.core_df[self.namex]
self.Y = self.core_df[self.namey]
#Premeptively turn both histograms off
self.hist_x_on = False
self.hist_y_on = False
#Initializing other metadata
self.clients = 0
self.seating = pd.DataFrame({self.namex: self.X, self.namey : self.Y})
self.lowers = pd.DataFrame()
self.uppers = pd.DataFrame()
self.floc = 'dataframe_cut.csv'
self.cloc = 'cuts.csv'
#Check X and Y are of equal length
if len(self.X) != len(self.Y):
print('X and Y are not of equal length.')
self.close_shop()
return None
if any(type(word) != str for word in [self.namex, self.namey]):
print('Please enter "name" as a string.')
self.close_shop()
return None
def histograms_on(self,x=False,y=False):
'''Turn on optional histograms for x and y parameter spaces.
Parameters:
x (bool): Default False. Set True to display histogram in x.
y (bool): Default False. Set True to display histogram in y.
'''
self.hist_x_on = x
self.hist_y_on = y
def add_client(self, name, lower=-np.inf, upper=np.inf):
'''
A function that allows the user to add a parameter to make cuts in, up to
a maximum of five. The user has the option of setting lower and upper
limits on the cuts in this parameter space. If values are given for either
'lower', 'upper', or 'both', these are used to make initial cuts to the
data.
Parameters:
client (ndarray): an array as the same length as self.X and self.Y,
arranged identically, with parameter values for each data point.
name (str): a string containing the name of the dataframe column to
make cuts in, which will be referred to in other functions and
printed on the GUI.
lower (float): Default -Inf. The lowest possible value of the cut
in this parameter space. If no value is given, it takes the
minimum value in the 'client' ndarray.
upper (float): Default Inf. The highest possible value of the cut
in this parameter space. If no value is given, it takes the
highest value in the 'client' ndarray.
'''
#Call the data from the core dataframe
try:
client = self.core_df[name]
except KeyError:
print('The handle "'+str(name)+'" is not affiliated with a dataframe column.')
print('Please enter a correct handle, or re-open the barbershop with a different dataframe.')
print('Client leaving the barbershop.')
print('Number of seats in use : '+str(self.clients)+'/5.')
return None
#Check that the list of clients isn't already full
if self.clients == 5:
print('The barbershop is full, please proceed to plot the GUI, or remove clients using the evict_client(name) command.')
return None
#Check length of the client is in agreement with X and Y
if len(client) != len(self.X):
print('Client is not of equal length with X and Y.')
print('Client leaving the barbershop.')
print('Number of seats in use : '+str(self.clients)+'/5.')
return None
#Check that name is a string
if type(name) != str:
print('Please enter "name" as a string.')
print('Client leaving the barbershop.')
print('Number of seats in use : '+str(self.clients)+'/5.')
return None
#Adding the data to the existing class dataframe 'self.seating'
self.seating[name] = client
#Save the lower and upper values
if not np.isfinite(lower):
self.lowers[name] = [np.nanmin(client)]
else:
self.lowers[name] = [lower]
if not np.isfinite(upper):
self.uppers[name] = [np.nanmax(client)]
else:
self.uppers[name] = [upper]
self.clients += 1
print('Number of seats in use : '+str(self.clients)+'/5.')
if self.clients == 5:
print('The barbershop is now full (5 parameter spaces)')
print('Please evict a client if you wish to add another.')
def evict_client(self, name):
'''
Simple function that allows the user to remove a set of data in a given
parameter space by passing the name it was given when added to the
'add_client' function.
Parameters:
name (str): a string containing the name of the ndarray client,
which will be referred to in other functions and printed on the
GUI.
'''
#Check that name is a string
if type(name) != str:
print('Please enter "name" as a string.')
return None
#Check this name is actually included in the list of clients
if not any(word == name for word in list(self.seating)):
print('There is no set of parameters in the list of clients with this name.')
return None
#Remove client of title 'name' from the list of parameters
del self.seating[name]
del self.lowers[name]
del self.uppers[name]
self.clients -= 1
print('Client '+str(name)+' has been evicted.')
print('Number of seats in use : '+str(self.clients)+'/5.')
def close_shop(self):
'''
Simple function that allows the user to reset the barbershop class
completely by deleting all existing metadata.
'''
del self.X
del self.Y
del self.hist_x_on
del self.hist_y_on
del self.clients
del self.seating
del self.lowers
del self.uppers
del self.namex
del self.namey
print('All array and cuts metadata have been deleted from memory.')
print('Please re-initialize the module.')
print('\n\n')
def get_regular(self, sfile):
'''
A function that allows the user to apply identical cuts to the same parameter
spaces for a new dataframe saved from a previous set of cuts.
NOTE: This class overwrites all loaded clients.
Parameters:
sfile (str): The location of the .csv file containing the cuts to be
applied to the self.core_df dataframe.
'''
try:
reg = pd.DataFrame.from_csv(sfile,sep=' ')
except IOError:
print('This file does not exist. Please fill in a correct file path.')
return None
try:
l = reg.loc['lower']
u = reg.loc['upper']
self.lowers = pd.DataFrame()
self.uppers = | pd.DataFrame() | pandas.DataFrame |
#############################################################################
# Copyright (C) 2020-2021 German Aerospace Center (DLR-SC)
#
# Authors:
#
# Contact: <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#############################################################################
from logging import exception
from sys import exec_prefix
import unittest
from pyfakefs import fake_filesystem_unittest
from collections import namedtuple
import os
import pandas as pd
import numpy as np
from memilio.epidata import getPopulationData as gpd
from memilio.epidata import getDataIntoPandasDataFrame as gd
from unittest.mock import patch
class Test_getPopulationData(fake_filesystem_unittest.TestCase):
path = '/home/x'
Data = namedtuple("Data", "filename item columns_wanted filename_out")
d1 = Data("FullDataB", '5dc2fc92850241c3be3d704aa0945d9c_2', ["LAN_ew_RS", 'LAN_ew_GEN', 'LAN_ew_EWZ'],
"PopulStates")
d2 = Data("FullDataL", 'b2e6d8854d9744ca88144d30bef06a76_1',
['RS', 'GEN', 'EWZ'], "PopulCounties")
test_string1 = ("""[\
{"FID":1,"LAN_ew_RS":1,"LAN_ew_AGS":1,"LAN_ew_SDV_RS":10020000000,"LAN_ew_GEN":"Schleswig-Holstein",\
"LAN_ew_BEZ":"Land","LAN_ew_IBZ":20,"LAN_ew_BEM":"--","LAN_ew_SN_L":1,"LAN_ew_SN_R":0,"LAN_ew_SN_K":0,"LAN_ew_SN_V1":0,\
"LAN_ew_SN_V2":0,"LAN_ew_SN_G":0,"LAN_ew_FK_S3":0,"LAN_ew_NUTS":"DEF","LAN_ew_WSK":"2012\/02\/01 00:00:00",\
"LAN_ew_EWZ":2889821,"LAN_ew_KFL":15804.35,"SHAPE_Length":20.9191264621,"SHAPE_Area":2.1595456768},\
{"FID":2,"LAN_ew_RS":2,"LAN_ew_AGS":2,"LAN_ew_SDV_RS":20000000000,"LAN_ew_GEN":"Hamburg",\
"LAN_ew_BEZ":"Freie und Hansestadt","LAN_ew_IBZ":22,"LAN_ew_BEM":"--","LAN_ew_SN_L":2,"LAN_ew_SN_R":0,"LAN_ew_SN_K":0,\
"LAN_ew_SN_V1":0,"LAN_ew_SN_V2":0,"LAN_ew_SN_G":0,"LAN_ew_FK_S3":0,"LAN_ew_NUTS":"DE6",\
"LAN_ew_WSK":"1974\/01\/01 00:00:00","LAN_ew_EWZ":1830584,"LAN_ew_KFL":755.09,"SHAPE_Length":3.1095198283,\
"SHAPE_Area":0.1001785991},\
{"FID":3,"LAN_ew_RS":3,"LAN_ew_AGS":3,"LAN_ew_SDV_RS":32410001001,"LAN_ew_GEN":"Niedersachsen","LAN_ew_BEZ":"Land",\
"LAN_ew_IBZ":20,"LAN_ew_BEM":"--","LAN_ew_SN_L":3,"LAN_ew_SN_R":0,"LAN_ew_SN_K":0,"LAN_ew_SN_V1":0,"LAN_ew_SN_V2":0,\
"LAN_ew_SN_G":0,"LAN_ew_FK_S3":0,"LAN_ew_NUTS":"DE9","LAN_ew_WSK":"2015\/01\/01 00:00:00","LAN_ew_EWZ":7962775,\
"LAN_ew_KFL":47709.8,"SHAPE_Length":29.8156067698,"SHAPE_Area":6.3454724588}]""")
test_string1r = ("""[\
{"ID_State":1,"State":"Schleswig-Holstein","Population":2889821},\
{"ID_State":2,"State":"Hamburg","Population":1830584},\
{"ID_State":3,"State":"Niedersachsen","Population":7962775}]""")
test_string2 = """[\
{"FID":1,"RS":1001,"AGS":1001,"SDV_RS":10010000000,"GEN":"Flensburg","BEZ":"Kreisfreie Stadt","IBZ":40,"BEM":"--",\
"SN_L":1,"SN_R":0,"SN_K":1,"SN_V1":0,"SN_V2":0,"SN_G":0,"FK_S3":"R","NUTS":"DEF01","WSK":"2008\/01\/01 00:00:00",\
"EWZ":88519,"KFL":56.73,"Kennziffer":1001,"EWZ_18":null,"SHAPE_Length":0.5247234366,"SHAPE_Area":0.0068727541},\
{"FID":2,"RS":1002,"AGS":1002,"SDV_RS":10020000000,"GEN":"Kiel","BEZ":"Kreisfreie Stadt","IBZ":40,"BEM":"--",\
"SN_L":1,"SN_R":0,"SN_K":2,"SN_V1":0,"SN_V2":0,"SN_G":0,"FK_S3":"R","NUTS":"DEF02","WSK":"2006\/01\/01 00:00:00",\
"EWZ":247943,"KFL":118.65,"Kennziffer":1002,"EWZ_18":null,"SHAPE_Length":1.2755450552,"SHAPE_Area":0.0155057123}]"""
# {"FID":3,"RS":1003,"AGS":1003,"SDV_RS":10030000000,"GEN":"L\u00fcbeck","BEZ":"Kreisfreie Stadt","IBZ":40,"BEM":"--",
# "SN_L":1,"SN_R":0,"SN_K":3,"SN_V1":0,"SN_V2":0,"SN_G":0,"FK_S3":"R","NUTS":"DEF03","WSK":"2006\/02\/01 00:00:00",
# "EWZ":216318,"KFL":214.19,"Kennziffer":1003,"EWZ_18":null,"SHAPE_Length":1.8350372077,"SHAPE_Area":0.0289309207}
test_string2r = """[{"ID_County":1001,"County":"Flensburg","Population":88519},\
{"ID_County":1002,"County":"Kiel","Population":247943}]"""
# {"ID_County":1003,"County":"L\u00fcbeck","Population":216318}
test_old_counties = np.zeros((18, 2))
test_old_counties[:, 0] = [3152, 3156, 13056, 13002, 13055, 13052, 13051, 13053, 13061, 13005, 13057, 13006, 13058,
13059, 13062, 13001, 13054, 13060]
test_old_counties[:, 1] = np.arange(len(test_old_counties))
test_new_counties = np.zeros((7, 2))
test_new_counties[:, 0] = [3159, 13071, 13072, 13073, 13074, 13075, 13076]
test_new_counties[:, 1] = [1, 14, 13, 27, 23, 42, 33]
data = np.zeros((5, 30))
data[:, 0] = np.arange(1, 6)
for i in range(len(data)):
data[i, 3] = 22*(i+1)
data[i, 4] = 11*(i+1)
data[i, 5:-2] = 1*(i+1)
data[i, 16] = 11*(i+1)
test_zensus = pd.DataFrame(data, columns=["FID", "DES", "Name", "EWZ", "Gesamt_Maennlich", 'M_Unter_3', 'M_3_bis_5',
'M_6_bis_14', 'M_15_bis_17', 'M_18_bis_24', 'M_25_bis_29', 'M_30_bis_39',
'M_40_bis_49', 'M_50_bis_64', 'M_65_bis_74', 'M_75_und_aelter', "Gesamt_Weiblich",
'W_Unter_3', 'W_3_bis_5', 'W_6_bis_14', 'W_15_bis_17', 'W_18_bis_24',
'W_25_bis_29', 'W_30_bis_39', 'W_40_bis_49', 'W_50_bis_64',
'W_65_bis_74', 'W_75_und_aelter', 'SHAPE_Length', 'SHAPE_Area'])
test_zensus["DES"] = "Kreis"
test_zensus["Name"] = ["Hogwarts", "Narnia",
"MittelErde", "Westeros", "Wakanda"]
data = np.zeros((5, 3))
data[:, 0] = [1001, 1002, 1003, 1004, 1005]
data[:, 2] = [(x+1)*22/1000 for x in range(len(data))]
test_reg_key = pd.DataFrame(data, columns=['AGS', 'NAME', 'Zensus_EWZ'])
test_reg_key['NAME'] = ["Hogwarts", "Narnia",
"MittelErde", "Westeros", "Wakanda"]
data = np.zeros((5, 2))
data[:, 0] = [1001, 1002, 1003, 1004, 1005]
data[:, 1] = [(x+1)*44 for x in range(len(data))]
test_counties = pd.DataFrame(
data, columns=['Schlüssel-nummer', 'Bevölkerung2)'])
columns = ['ID_County', 'Total', '<3 years', '3-5 years', '6-14 years', '15-17 years', '18-24 years',
'25-29 years', '30-39 years', '40-49 years', '50-64 years',
'65-74 years', '>74 years']
data = np.zeros((5, len(columns)))
for i in range(len(data)):
data[i, 0] = 1001 + i
data[i, 1] = 22*(i+1)
data[i, 2:] = 2*(i+1)
test_population_result = pd.DataFrame(data, columns=columns)
test_population_result = test_population_result.astype('int64')
data = np.zeros((5, len(columns)))
for i in range(len(data)):
data[i, 0] = 1001 + i
data[i, 1] = 44 * (i+1)
data[i, 2:] = 4 * (i+1)
test_current_population_result = pd.DataFrame(data, columns=columns)
test_current_population_result = test_current_population_result.astype(
'int64')
def setUp(self):
self.setUpPyfakefs()
@patch('memilio.epidata.getPopulationData.gd.loadCsv')
def test_gpd_download_data1(self, mock_loadCSV):
mock_loadCSV.return_value = | pd.read_json(self.test_string1) | pandas.read_json |
import os
import sys
import numpy as np
import pandas as pd
import time
import scipy.sparse
import scipy.sparse.linalg
from scipy import stats
from scipy.optimize import minimize
np.set_printoptions(threshold=sys.maxsize)
# Add lib to the python path.
from genTestDat import genTestData2D, prodMats2D
from est2d import *
from est3d import *
from npMatrix2d import *
from npMatrix3d import *
# ==================================================================================
#
# The below code runs multiple simulations in serial. It takes the following inputs:
#
# ----------------------------------------------------------------------------------
#
# - desInd: Integer value between 1 and 3 representing which design to run. The
# designs are as follows:
# - Design 1: nlevels=[50], nraneffs=[2]
# - Design 2: nlevels=[50,10], nraneffs=[3,2]
# - Design 3: nlevels=[100,50,10], nraneffs=[4,3,2]
# - OutDir: The output directory.
# - nsim: Number of simulations (default=1000)
# - mode: String indicating whether to run parameter estimation simulations (mode=
# 'param') or T statistic simulations (mode='Tstat').
# - REML: Boolean indicating whether to use ML or ReML estimation.
#
# ----------------------------------------------------------------------------------
#
# Author: <NAME> (06/04/2020)
#
# ==================================================================================
def sim2D(desInd, OutDir, nsim=1000, mode='param', REML=False):
# Loop through and run simulations
for simInd in range(1,nsim+1):
runSim(simInd, desInd, OutDir, mode, REML)
# ==================================================================================
#
# The below simulates random test data and runs all methods described in the LMM
# paper on the simulated data. It requires the following inputs:
#
# ----------------------------------------------------------------------------------
#
# - SimInd: An index to represent the simulation. All output for this simulation will
# be saved in files with the index specified by this argument. The
# simulation with index 1 will also perform any necessary additional setup
# and should therefore be run before any others.
# - desInd: Integer value between 1 and 3 representing which design to run. The
# designs are as follows:
# - Design 1: nlevels=[50], nraneffs=[2]
# - Design 2: nlevels=[50,10], nraneffs=[3,2]
# - Design 3: nlevels=[100,50,10], nraneffs=[4,3,2]
# - OutDir: The output directory.
# - mode: String indicating whether to run parameter estimation simulations (mode=
# 'param') or T statistic simulations (mode='Tstat').
# - REML: Boolean indicating whether to use ML or ReML estimation.
#
# ----------------------------------------------------------------------------------
#
# Author: <NAME> (06/04/2020)
#
# ==================================================================================
def runSim(simInd, desInd, OutDir, mode='param', REML=False):
# Make sure simInd is an int
simInd = int(simInd)
#===============================================================================
# Setup
#===============================================================================
# Decide whether we wish to run T statistics/degrees of freedom estimation
if mode=='param':
runDF = False
else:
runDF = True
# Different designs
if desInd==1:
nlevels = np.array([50])
nraneffs = np.array([2])
if desInd==2:
nlevels = np.array([50,25])
nraneffs = np.array([3,2])
if desInd==3:
nlevels = np.array([100,30,10])
nraneffs = np.array([4,3,2])
# Number of observations
n = 1000
# If we are doing a degrees of freedom simulation, create the factor vectors, X and Z if
# this is the first run. These will then be used across all following simulations. If we
# are doing a simulation to look at parameter estimation, we recreate the design on every
# run as our focus is to stress test the performance of the algorithms, rather than compare
# performance of one specific model in particular.
if simInd == 1 or not runDF:
# Delete any factor vectors from a previous batch of simulations.
if runDF:
for i in range(len(nlevels)):
if os.path.isfile(os.path.join(OutDir, 'fv_' + str(desInd) + '_' + str(i) + '.csv')):
os.remove(os.path.join(OutDir, 'fv_' + str(desInd) + '_' + str(i) + '.csv'))
fvs = None
X = None
Z = None
# Otherwise read the factor vectors, X and Z in from file.
else:
# Initialize empty factor vectors dict
fvs = dict()
# Loop through factors and save factor vectors
for i in range(len(nlevels)):
fvs[i] = pd.io.parsers.read_csv(os.path.join(OutDir, 'fv_' + str(desInd) + '_' + str(i) + '.csv'), header=None).values
X = pd.io.parsers.read_csv(os.path.join(OutDir, 'X_' + str(desInd) + '.csv'), header=None).values
Z = pd.io.parsers.read_csv(os.path.join(OutDir, 'Z_' + str(desInd) + '.csv'), header=None).values
# Generate test data
Y,X,Z,nlevels,nraneffs,beta,sigma2,b,D, fvs = genTestData2D(n=n, p=5, nlevels=nlevels, nraneffs=nraneffs, save=True, simInd=simInd, desInd=desInd, OutDir=OutDir, factorVectors=fvs, X=X, Z=Z)
# Save the new factor vectors if this is the first run.
if simInd == 1 and runDF:
# Loop through the factors saving them
for i in range(len(nlevels)):
pd.DataFrame(fvs[i]).to_csv(os.path.join(OutDir, 'fv_' + str(desInd) + '_' + str(i) + '.csv'), index=False, header=None)
pd.DataFrame(X).to_csv(os.path.join(OutDir, 'X_' + str(desInd) + '.csv'), index=False, header=None)
pd.DataFrame(Z).to_csv(os.path.join(OutDir, 'Z_' + str(desInd) + '.csv'), index=False, header=None)
# Work out number of observations, parameters, random effects, etc
n = X.shape[0]
p = X.shape[1]
q = np.sum(nraneffs*nlevels)
qu = np.sum(nraneffs*(nraneffs+1)//2)
r = nlevels.shape[0]
# Tolerance
tol = 1e-6
# Work out factor indices.
facInds = np.cumsum(nraneffs*nlevels)
facInds = np.insert(facInds,0,0)
# Convert D to dict
Ddict=dict()
for k in np.arange(len(nlevels)):
Ddict[k] = D[facInds[k]:(facInds[k]+nraneffs[k]),facInds[k]:(facInds[k]+nraneffs[k])]
# Get the product matrices
XtX, XtY, XtZ, YtX, YtY, YtZ, ZtX, ZtY, ZtZ = prodMats2D(Y,Z,X)
# -----------------------------------------------------------------------------
# Create empty data frame for results:
# -----------------------------------------------------------------------------
# Row indices
indexVec = np.array(['Time', 'nit', 'llh'])
for i in np.arange(p):
indexVec = np.append(indexVec, 'beta'+str(i+1))
# Sigma2
indexVec = np.append(indexVec, 'sigma2')
# Dk
for k in np.arange(r):
for j in np.arange(nraneffs[k]*(nraneffs[k]+1)//2):
indexVec = np.append(indexVec, 'D'+str(k+1)+','+str(j+1))
# Sigma2*Dk
for k in np.arange(r):
for j in np.arange(nraneffs[k]*(nraneffs[k]+1)//2):
indexVec = np.append(indexVec, 'sigma2*D'+str(k+1)+','+str(j+1))
# If we're doing a T statistic simulation add the T statistics, p values and
# degrees of freedom rows to the dataframe.
if runDF:
# T value p value and Satterthwaite degrees of freedom estimate.
indexVec = np.append(indexVec,'T')
indexVec = np.append(indexVec,'p')
indexVec = np.append(indexVec,'swdf')
# Construct dataframe
results = pd.DataFrame(index=indexVec, columns=['Truth', 'FS', 'fFS', 'SFS', 'fSFS', 'cSFS'])
# ------------------------------------------------------------------------------------
# Truth
# ------------------------------------------------------------------------------------
# Default time and number of iterations
results.at['Time','Truth']=0
results.at['nit','Truth']=0
# Construct parameter vector
paramVec_true = beta[:]
paramVec_true = np.concatenate((paramVec_true,np.array(sigma2).reshape(1,1)),axis=0)
# Add D to parameter vector
facInds = np.cumsum(nraneffs*nlevels)
facInds = np.insert(facInds,0,0)
# Convert D to vector
for k in np.arange(len(nlevels)):
vechD = mat2vech2D(D[facInds[k]:(facInds[k]+nraneffs[k]),facInds[k]:(facInds[k]+nraneffs[k])])/sigma2
paramVec_true = np.concatenate((paramVec_true,vechD),axis=0)
# Add results to parameter vector
for i in np.arange(3,p+qu+4):
results.at[indexVec[i],'Truth']=paramVec_true[i-3,0]
# Record D*sigma2
for i in np.arange(4+p,p+qu+4):
results.at[indexVec[i+qu],'Truth']=paramVec_true[p,0]*paramVec_true[i-3,0]
# Matrices needed for
Zte = ZtY - ZtX @ beta
ete = ssr2D(YtX, YtY, XtX, beta)
DinvIplusZtZD = D @ np.linalg.inv(np.eye(q) + ZtZ @ D)
# True log likelihood
llh = llh2D(n, ZtZ, Zte, ete, sigma2, DinvIplusZtZD,D,REML,XtX,XtZ,ZtX)[0,0]
# Add back on constant term
if REML:
llh = llh - (n-p)/2*np.log(2*np.pi)
else:
llh = llh - n/2*np.log(2*np.pi)
# Add ground truth log likelihood
results.at['llh','Truth']=llh
# Get the ground truth degrees of freedom if running a T statistic simulation
if runDF:
# Contrast vector (1 in last place 0 elsewhere)
L = np.zeros(p)
L[-1] = 1
L = L.reshape(1,p)
v = groundTruth_TDF(X, Z, beta, sigma2, D, L, nlevels, nraneffs, tol)
results.at[indexVec[p+6+2*qu],'Truth']=v[0,0]
#===============================================================================
# fSFS
#===============================================================================
# Get the indices for the individual random factor covariance parameters.
DkInds = np.zeros(len(nlevels)+1)
DkInds[0]=np.int(p+1)
for k in np.arange(len(nlevels)):
DkInds[k+1] = np.int(DkInds[k] + nraneffs[k]*(nraneffs[k]+1)//2)
# Run Full Simplified Fisher Scoring
t1 = time.time()
paramVector_fSFS,_,nit,llh = fSFS2D(XtX, XtY, ZtX, ZtY, ZtZ, XtZ, YtZ, YtY, YtX, nlevels, nraneffs, tol, n, reml=REML, init_paramVector=None)
t2 = time.time()
# Add back on constant term for llh
if REML:
llh = llh - (n-p)/2*np.log(2*np.pi)
else:
llh = llh - n/2*np.log(2*np.pi)
# Record Time and number of iterations
results.at['Time','fSFS']=t2-t1
results.at['nit','fSFS']=nit
results.at['llh','fSFS']=llh
# Record parameters
for i in np.arange(3,p+qu+4):
results.at[indexVec[i],'fSFS']=paramVector_fSFS[i-3,0]
# Record D*sigma2
for i in np.arange(4+p,p+qu+4):
results.at[indexVec[i+qu],'fSFS']=paramVector_fSFS[p,0]*paramVector_fSFS[i-3,0]
# If running a T statistic simulation...
if runDF:
# Get T statistic, p value and Satterthwaite degrees of freedom
T,Pval,df = simT(paramVector_fSFS, XtX, XtY, XtZ, YtX, YtY, YtZ, ZtX, ZtY, ZtZ, nraneffs, nlevels, n)
results.at[indexVec[p+4+2*qu],'fSFS']=T[0,0]
results.at[indexVec[p+5+2*qu],'fSFS']=Pval[0,0]
results.at[indexVec[p+6+2*qu],'fSFS']=df[0,0]
#===============================================================================
# cSFS
#===============================================================================
# Run Cholesky Simplified Fisher Scoring
t1 = time.time()
paramVector_cSFS,_,nit,llh = cSFS2D(XtX, XtY, ZtX, ZtY, ZtZ, XtZ, YtZ, YtY, YtX, nlevels, nraneffs, tol, n, reml=REML, init_paramVector=None)
t2 = time.time()
# Add back on constant term for llh
if REML:
llh = llh - (n-p)/2*np.log(2*np.pi)
else:
llh = llh - n/2*np.log(2*np.pi)
# Record time and number of iterations
results.at['Time','cSFS']=t2-t1
results.at['nit','cSFS']=nit
results.at['llh','cSFS']=llh
# Save parameters
for i in np.arange(3,p+qu+4):
results.at[indexVec[i],'cSFS']=paramVector_cSFS[i-3,0]
# Record D*sigma2
for i in np.arange(4+p,p+qu+4):
results.at[indexVec[i+qu],'cSFS']=paramVector_cSFS[p,0]*paramVector_cSFS[i-3,0]
# If running a T statistic simulation...
if runDF:
# Get T statistic, p value and Satterthwaite degrees of freedom
T,Pval,df = simT(paramVector_cSFS, XtX, XtY, XtZ, YtX, YtY, YtZ, ZtX, ZtY, ZtZ, nraneffs, nlevels, n)
results.at[indexVec[p+4+2*qu],'cSFS']=T[0,0]
results.at[indexVec[p+5+2*qu],'cSFS']=Pval[0,0]
results.at[indexVec[p+6+2*qu],'cSFS']=df[0,0]
#===============================================================================
# FS
#===============================================================================
# Run Fisher Scoring
t1 = time.time()
paramVector_FS,_,nit,llh = FS2D(XtX, XtY, ZtX, ZtY, ZtZ, XtZ, YtZ, YtY, YtX, nlevels, nraneffs, tol, n, reml=REML, init_paramVector=None)
t2 = time.time()
# Add back on constant term for llh
if REML:
llh = llh - (n-p)/2*np.log(2*np.pi)
else:
llh = llh - n/2*np.log(2*np.pi)
# Record time and number of iterations
results.at['Time','FS']=t2-t1
results.at['nit','FS']=nit
results.at['llh','FS']=llh
# Save parameters
for i in np.arange(3,p+qu+4):
results.at[indexVec[i],'FS']=paramVector_FS[i-3,0]
# Record D*sigma2
for i in np.arange(4+p,p+qu+4):
results.at[indexVec[i+qu],'FS']=paramVector_FS[p,0]*paramVector_FS[i-3,0]
# If running a T statistic simulation...
if runDF:
# Get T statistic, p value and Satterthwaite degrees of freedom
T,Pval,df = simT(paramVector_FS, XtX, XtY, XtZ, YtX, YtY, YtZ, ZtX, ZtY, ZtZ, nraneffs, nlevels, n)
results.at[indexVec[p+4+2*qu],'FS']=T[0,0]
results.at[indexVec[p+5+2*qu],'FS']=Pval[0,0]
results.at[indexVec[p+6+2*qu],'FS']=df[0,0]
#===============================================================================
# SFS
#===============================================================================
# Run Simplified Fisher Scoring
t1 = time.time()
paramVector_SFS,_,nit,llh = SFS2D(XtX, XtY, ZtX, ZtY, ZtZ, XtZ, YtZ, YtY, YtX, nlevels, nraneffs, tol, n, reml=REML, init_paramVector=None)
t2 = time.time()
# Add back on constant term for llh
if REML:
llh = llh - (n-p)/2*np.log(2*np.pi)
else:
llh = llh - n/2*np.log(2*np.pi)
# Record time and number of iterations
results.at['Time','SFS']=t2-t1
results.at['nit','SFS']=nit
results.at['llh','SFS']=llh
# Save parameters
for i in np.arange(3,p+qu+4):
results.at[indexVec[i],'SFS']=paramVector_SFS[i-3,0]
# Record D*sigma2
for i in np.arange(4+p,p+qu+4):
results.at[indexVec[i+qu],'SFS']=paramVector_SFS[p,0]*paramVector_SFS[i-3,0]
# If running a T statistic simulation...
if runDF:
# Get T statistic, p value and Satterthwaite degrees of freedom
T,Pval,df = simT(paramVector_SFS, XtX, XtY, XtZ, YtX, YtY, YtZ, ZtX, ZtY, ZtZ, nraneffs, nlevels, n)
results.at[indexVec[p+4+2*qu],'SFS']=T[0,0]
results.at[indexVec[p+5+2*qu],'SFS']=Pval[0,0]
results.at[indexVec[p+6+2*qu],'SFS']=df[0,0]
#===============================================================================
# fFS
#===============================================================================
# Run Full Fisher Scoring
t1 = time.time()
paramVector_fFS,_,nit,llh = fFS2D(XtX, XtY, ZtX, ZtY, ZtZ, XtZ, YtZ, YtY, YtX, nlevels, nraneffs, tol, n, reml=REML, init_paramVector=None)
t2 = time.time()
# Add back on constant term for llh
if REML:
llh = llh - (n-p)/2*np.log(2*np.pi)
else:
llh = llh - n/2*np.log(2*np.pi)
# Record time and number of iterations
results.at['Time','fFS']=t2-t1
results.at['nit','fFS']=nit
results.at['llh','fFS']=llh
# Save parameters
for i in np.arange(3,p+qu+4):
results.at[indexVec[i],'fFS']=paramVector_fFS[i-3,0]
# Record D*sigma2
for i in np.arange(4+p,p+qu+4):
results.at[indexVec[i+qu],'fFS']=paramVector_fFS[p,0]*paramVector_fFS[i-3,0]
# If running a T statistic simulation...
if runDF:
# Get T statistic, p value and Satterthwaite degrees of freedom
T,Pval,df = simT(paramVector_fFS, XtX, XtY, XtZ, YtX, YtY, YtZ, ZtX, ZtY, ZtZ, nraneffs, nlevels, n)
results.at[indexVec[p+4+2*qu],'fFS']=T[0,0]
results.at[indexVec[p+5+2*qu],'fFS']=Pval[0,0]
results.at[indexVec[p+6+2*qu],'fFS']=df[0,0]
# Save results
results.to_csv(os.path.join(OutDir,'Sim'+str(simInd)+'_Design'+str(desInd)+'_results.csv'))
# ==================================================================================
#
# The below function collates the performance metrics for the parameter estimation
# simulations, prints summaries of the results and saves the results as csv files.
#
# ----------------------------------------------------------------------------------
#
# - desInd: Integer value between 1 and 3 representing which design to run. The
# designs are as follows:
# - Design 1: nlevels=[50], nraneffs=[2]
# - Design 2: nlevels=[50,10], nraneffs=[3,2]
# - Design 3: nlevels=[100,50,10], nraneffs=[4,3,2]
# - OutDir: The output directory.
# - nsim: Number of simulations to be collated.
#
# ----------------------------------------------------------------------------------
#
# Author: <NAME> (06/04/2020)
#
# ==================================================================================
def performanceTables(desInd, OutDir, nsim=1000):
# Make row indices
row = ['sim'+str(i) for i in range(1,nsim+1)]
# Make column indices
col = ['FS','fFS','SFS','fSFS','cSFS','lmer']
#-----------------------------------------------------------------------------
# Work out timing stats
#-----------------------------------------------------------------------------
# Make timing table
timesTable = pd.DataFrame(index=row, columns=col)
# Make sure pandas knows the table is numeric
timesTable = timesTable.apply(pd.to_numeric)
for simInd in range(1,nsim+1):
# Name of results file
results_file = os.path.join(OutDir,'Sim'+str(simInd)+'_Design'+str(desInd)+'_results.csv')
# Read in results file
results_table = pd.read_csv(results_file, index_col=0)
# Get the times
simTimes = results_table.loc['Time','FS':]
# Add them to the table
timesTable.loc['sim'+str(simInd),:]=simTimes
# Save computation times to csv file
timesTable.to_csv(os.path.join(OutDir,'timesTable.csv'))
# Print summary
print(' ')
print('--------------------------------------------------------------------------')
print(' ')
print('Summary of computation times')
print(timesTable.describe().to_string())
#-----------------------------------------------------------------------------
# Work out number of iteration stats
#-----------------------------------------------------------------------------
# Make timing table
nitTable = pd.DataFrame(index=row, columns=col)
# Make sure pandas knows the table is numeric
nitTable = nitTable.apply(pd.to_numeric)
for simInd in range(1,nsim+1):
# Name of results file
results_file = os.path.join(OutDir,'Sim'+str(simInd)+'_Design'+str(desInd)+'_results.csv')
# Read in results file
results_table = pd.read_csv(results_file, index_col=0)
# Get the times
simNIT = results_table.loc['nit','FS':]
# Add them to the table
nitTable.loc['sim'+str(simInd),:]=simNIT
# Save number of iterations to csv file
nitTable.to_csv(os.path.join(OutDir,'nitTable.csv'))
# Print summary
print(' ')
print('--------------------------------------------------------------------------')
print(' ')
print('Summary of number of iterations')
print(nitTable.describe().to_string())
#-----------------------------------------------------------------------------
# Work out log-likelihood stats
#-----------------------------------------------------------------------------
# Make timing table
llhTable = pd.DataFrame(index=row, columns=col)
# Make sure pandas knows the table is numeric
llhTable = nitTable.apply(pd.to_numeric)
for simInd in range(1,nsim+1):
# Name of results file
results_file = os.path.join(OutDir,'Sim'+str(simInd)+'_Design'+str(desInd)+'_results.csv')
# Read in results file
results_table = pd.read_csv(results_file, index_col=0)
# Get the log-likelihoods
simllh = results_table.loc['llh','FS':]
# Add them to the table
llhTable.loc['sim'+str(simInd),:]=simllh
# Save log likelihoods to csv file
llhTable.to_csv(os.path.join(OutDir,'llhTable.csv'))
# Print summary
print(' ')
print('--------------------------------------------------------------------------')
print(' ')
print('Summary of maximized log-likelihoods')
print(llhTable.describe().to_string())
# ==================================================================================
#
# The below function collates the MAE and MRD metrics for the parameter estimation
# simulations, prints summaries of the results and saves the results as csv files.
#
# ----------------------------------------------------------------------------------
#
# - desInd: Integer value between 1 and 3 representing which design to run. The
# designs are as follows:
# - Design 1: nlevels=[50], nraneffs=[2]
# - Design 2: nlevels=[50,10], nraneffs=[3,2]
# - Design 3: nlevels=[100,50,10], nraneffs=[4,3,2]
# - OutDir: The output directory.
# - nsim: Number of simulations to be collated.
#
# ----------------------------------------------------------------------------------
#
# Author: <NAME> (06/04/2020)
#
# ==================================================================================
def differenceMetrics(desInd, OutDir, nsim=1000):
# Make row indices
row = ['sim'+str(i) for i in range(1,nsim+1)]
# Make column indices
col = ['FS','fFS','SFS','fSFS','cSFS','lmer']
#-----------------------------------------------------------------------------
# Work out absolute difference metrics for lmer
#-----------------------------------------------------------------------------
# Make difference tables
diffTableBetas = pd.DataFrame(index=row, columns=col)
diffTableVar = pd.DataFrame(index=row, columns=col)
# Make sure pandas knows the table is numeric
diffTableBetas = diffTableBetas.apply(pd.to_numeric)
diffTableVar = diffTableVar.apply(pd.to_numeric)
for simInd in range(1,nsim+1):
# Name of results file
results_file = os.path.join(OutDir,'Sim'+str(simInd)+'_Design'+str(desInd)+'_results.csv')
# Read in results file
results_table = pd.read_csv(results_file, index_col=0)
# Get the betas
simBetas = results_table.loc['beta1':'beta5',:]
if desInd==1:
# Get the variance components
simVar = results_table.loc['sigma2*D1,1':'sigma2*D1,3',:]
if desInd==2:
# Get the variance components
simVar = results_table.loc['sigma2*D1,1':'sigma2*D2,3',:]
if desInd==3:
# Get the variance components
simVar = results_table.loc['sigma2*D1,1':'sigma2*D3,3',:]
# Work out the maximum absolute errors for betas
maxAbsErrBetas = (simBetas.sub(simBetas['lmer'], axis=0)).abs().max()
# Work out the maximum absolute errors for sigma2D
if desInd==1:
maxAbsErrVar = (simVar.sub(simVar['lmer'], axis=0)).abs().max()
if desInd==2:
maxAbsErrVar = (simVar.sub(simVar['lmer'], axis=0)).abs().max()
if desInd==3:
maxAbsErrVar = (simVar.sub(simVar['lmer'], axis=0)).abs().max()
# Add them to the tables
diffTableBetas.loc['sim'+str(simInd),:]=maxAbsErrBetas
diffTableVar.loc['sim'+str(simInd),:]=maxAbsErrVar
# Print summary
print(' ')
print('--------------------------------------------------------------------------')
print(' ')
print('Summary of MAE values for beta estimates (compared to lmer)')
print(diffTableBetas.describe().to_string())
print(' ')
print('--------------------------------------------------------------------------')
print(' ')
print('Summary of MAE values for variance estimates (compared to lmer)')
print(diffTableVar.describe().to_string())
# Save MAE values for lmer to csv
diffTableVar.to_csv(os.path.join(OutDir,'diffTableVar_lmer_abs.csv'))
diffTableBetas.to_csv(os.path.join(OutDir,'diffTableBetas_lmer_abs.csv'))
#-----------------------------------------------------------------------------
# Work out absolute difference metrics for Truth
#-----------------------------------------------------------------------------
# Make difference tables
diffTableBetas = pd.DataFrame(index=row, columns=col)
diffTableVar = pd.DataFrame(index=row, columns=col)
# Make sure pandas knows the table is numeric
diffTableBetas = diffTableBetas.apply(pd.to_numeric)
diffTableVar = diffTableVar.apply(pd.to_numeric)
for simInd in range(1,nsim+1):
# Name of results file
results_file = os.path.join(OutDir,'Sim'+str(simInd)+'_Design'+str(desInd)+'_results.csv')
# Read in results file
results_table = | pd.read_csv(results_file, index_col=0) | pandas.read_csv |
import pandas as pd
import numpy as np
import datetime
import calendar
from math import e
from brightwind.analyse import plot as plt
# noinspection PyProtectedMember
from brightwind.analyse.analyse import dist_by_dir_sector, dist_12x24, coverage, _convert_df_to_series
from ipywidgets import FloatProgress
from IPython.display import display
from IPython.display import clear_output
import re
import warnings
pd.options.mode.chained_assignment = None
__all__ = ['Shear']
class Shear:
class TimeSeries:
def __init__(self, wspds, heights, min_speed=3, calc_method='power_law', max_plot_height=None,
maximise_data=False):
"""
Calculates alpha, using the power law, or the roughness coefficient, using the log law, for each timestamp
of a wind series.
:param wspds: pandas DataFrame, list of pandas.Series or list of wind speeds to be used for calculating shear.
:type wspds: pandas.DataFrame, list of pandas.Series or list.
:param heights: List of anemometer heights.
:type heights: list
:param min_speed: Only speeds higher than this would be considered for calculating shear, default is 3.
:type min_speed: float
:param calc_method: method to use for calculation, either 'power_law' (returns alpha) or 'log_law'
(returns the roughness coefficient).
:type calc_method: str
:param max_plot_height: height to which the wind profile plot is extended.
:type max_plot_height: float
:param maximise_data: If maximise_data is True, calculations will be carried out on all data where two or
more anemometers readings exist for a timestamp. If False, calculations will only be
carried out on timestamps where readings exist for all anemometers.
:type maximise_data: Boolean
:return TimeSeries object containing calculated alpha/roughness coefficient values, a plot
and other data.
:rtype TimeSeries object
**Example usage**
::
import brightwind as bw
import pprint
# Load anemometer data to calculate exponents
data = bw.load_csv(C:\\Users\\Stephen\\Documents\\Analysis\\demo_data)
anemometers = data[['Spd80mS', 'Spd60mS','Spd40mS']]
heights = [80, 60, 40]
# Using with a DataFrame of wind speeds
timeseries_power_law = bw.Shear.TimeSeries(anemometers, heights, maximise_data=True)
timeseries_log_law = bw.Shear.TimeSeries(anemometers, heights, calc_method='log_law',
max_plot_height=120)
# Get the alpha or roughness values calculated
timeseries_power_law.alpha
timeseries_log_law.roughness
# View plot
timeseries_power_law.plot
timeseries_log_law.plot
# View input anemometer data
timeseries_power_law.wspds
timeseries_log_law.wspds
# View other information
pprint.pprint(timeseries_power_law.info)
pprint.pprint(timeseries_log_law.info)
"""
print('This may take a while...')
wspds, cvg = Shear._data_prep(wspds=wspds, heights=heights, min_speed=min_speed, maximise_data=maximise_data)
if calc_method == 'power_law':
alpha_c = (wspds[(wspds > min_speed).all(axis=1)].apply(Shear._calc_power_law, heights=heights,
return_coeff=True,
maximise_data=maximise_data, axis=1))
alpha = pd.Series(alpha_c.iloc[:, 0], name='alpha')
self._alpha = alpha
elif calc_method == 'log_law':
slope_intercept = (wspds[(wspds > min_speed).all(axis=1)].apply(Shear._calc_log_law, heights=heights,
return_coeff=True,
maximise_data=maximise_data, axis=1))
slope = slope_intercept.iloc[:, 0]
intercept = slope_intercept.iloc[:, 1]
roughness_coefficient = pd.Series(Shear._calc_roughness(slope=slope, intercept=intercept),
name='roughness_coefficient')
self._roughness = roughness_coefficient
clear_output()
avg_plot = Shear.Average(wspds=wspds, heights=heights, calc_method=calc_method,
max_plot_height=max_plot_height)
self.origin = 'TimeSeries'
self.calc_method = calc_method
self.wspds = wspds
self.plot = avg_plot.plot
self.info = Shear._create_info(self, heights=heights, cvg=cvg, min_speed=min_speed)
@property
def alpha(self):
return self._alpha
@property
def roughness(self):
return self._roughness
def apply(self, wspds, height, shear_to):
""""
Applies shear calculated to a wind speed time series and scales wind speed from one height to
another for each matching timestamp.
:param self: TimeSeries object to use when applying shear to the data.
:type self: TimeSeries object
:param wspds: Wind speed time series to apply shear to.
:type wspds: pandas.Series
:param height: height of above wspds.
:type height: float
:param shear_to: height to which wspds should be scaled to.
:type shear_to: float
:return: a pandas.Series of the scaled wind speeds.
:rtype: pandas.Series
**Example Usage**
::
import brightwind as bw
# Load anemometer data to calculate exponents
data = bw.load_csv(C:\\Users\\Stephen\\Documents\\Analysis\\demo_data)
anemometers = data[['Spd80mS', 'Spd60mS','Spd40mS']]
heights = [80, 60, 40]
# Get power law object
timeseries_power_law = bw.Shear.TimeSeries(anemometers, heights)
timeseries_log_law = bw.Shear.TimeSeries(anemometers, heights, calc_method='log_law')
# Scale wind speeds using calculated exponents
timeseries_power_law.apply(data['Spd40mN'], height=40, shear_to=70)
timeseries_log_law.apply(data['Spd40mN'], height=40, shear_to=70)
"""
return Shear._apply(self, wspds, height, shear_to)
class TimeOfDay:
def __init__(self, wspds, heights, min_speed=3, calc_method='power_law', by_month=True, segment_start_time=7,
segments_per_day=24, plot_type='line'):
"""
Calculates alpha, using the power law, or the roughness coefficient, using the log law, for a wind series
binned by time of the day and (optionally by) month, depending on the user's inputs. The alpha/roughness
coefficient values are calculated based on the average wind speeds at each measurement height in each bin.
:param wspds: pandas.DataFrame, list of pandas.Series or list of wind speeds to be used for calculating
shear.
:type wspds: pandas.DataFrame, list of pandas.Series or list.
:param heights: List of anemometer heights..
:type heights: list
:param min_speed: Only speeds higher than this would be considered for calculating shear, default is 3
:type min_speed: float
:param calc_method: method to use for calculation, either 'power_law' (returns alpha) or 'log_law'
(returns the roughness coefficient).
:type calc_method: str
:param by_month: If True, calculate alpha or roughness coefficient values for each daily segment and month.
If False, average alpha or roughness coefficient values are calculated for each daily
segment across all months.
:type by_month: Boolean
:param segment_start_time: Starting time for first segment.
:type segment_start_time: int
:param segments_per_day: Number of segments into which each 24 period is split. Must be a divisor of 24.
:type segments_per_day: int
:param plot_type: Type of plot to be generated. Options include 'line', 'step' and '12x24'.
:type plot_type: str
:return: TimeOfDay object containing calculated alpha/roughness coefficient values, a plot
and other data.
:rtype: TimeOfDay object
**Example usage**
::
import brightwind as bw
import pprint
# Load anemometer data to calculate exponents
data = bw.load_csv(C:\\Users\\Stephen\\Documents\\Analysis\\demo_data)
anemometers = data[['Spd80mS', 'Spd60mS','Spd40mS']]
heights = [80, 60, 40]
# Using with a DataFrame of wind speeds
timeofday_power_law = bw.Shear.TimeOfDay(anemometers, heights, daily_segments=2, segment_start_time=7)
timeofday_log_law = bw.Shear.TimeOfDay(anemometers, heights, calc_method='log_law', by_month=False)
# Get alpha or roughness values calculated
timeofday_power_law.alpha
timeofday_log_law.roughness
# View plot
timeofday_power_law.plot
timeofday_log_law.plot
# View input data
timeofday_power_law.wspds
timeofday_log_law.wspds
# View other information
pprint.pprint(timeofday_power_law.info)
pprint.pprint(timeofday_log_law.info)
"""
wspds, cvg = Shear._data_prep(wspds=wspds, heights=heights, min_speed=min_speed)
# initialise empty series for later use
start_times = pd.Series([])
time_wspds = pd.Series([])
mean_time_wspds = pd.Series([])
c = pd.Series([])
slope = pd.Series([])
intercept = pd.Series([])
alpha = | pd.Series([]) | pandas.Series |
import json
from django.http import HttpResponse
from .models import (
Invoice,
Seller,
Receiver,
)
from .serializers import (
InvoiceSerializer,
SellerSerializer,
ReceiverSerializer,
)
import re
from django.views import View
from django.http import Http404
import pandas as pd
import datetime as dt
def get_object_invoice(pk):
try:
return Invoice.objects.get(pk=pk)
except Invoice.DoesNotExist:
raise Http404
def get_object_seller(pk):
try:
return Seller.objects.get(pk=pk)
except Seller.DoesNotExist:
raise Http404
def get_object_receiver(pk):
try:
return Receiver.objects.get(pk=pk)
except Receiver.DoesNotExist:
raise Http404
class InvoiceShowDelete(View):
def get(self, request, pk):
invoice = get_object_invoice(pk)
serializer = InvoiceSerializer(invoice)
return HttpResponse(json.dumps(serializer.data), status=200)
def delete(self, request, pk):
invoice = get_object_invoice(pk)
invoice.delete()
return HttpResponse(status=204)
class InvoiceCreateList(View):
def get(self, request):
invoices = Invoice.objects.all()
serializer = InvoiceSerializer(invoices, many=True)
return HttpResponse(json.dumps(serializer.data))
def post(self, request):
dict_invoice = {}
dict_seller = {}
dict_receiver = {}
json_dict = None
if request.body:
json_dict = json.loads(request.body)
elif request.POST:
json_dict = request.POST
# access_key, uf_code_seller, cnpj_seller, number
access_key = json_dict['main_access_key'].replace(' ', '')
uf_code_seller = access_key[0:2]
cnpj_seller = access_key[6:20]
number = access_key[25:34]
dict_invoice['access_key'] = access_key
dict_invoice['number'] = number
dict_seller['uf_code'] = uf_code_seller
dict_seller['cnpj'] = cnpj_seller
# cpf_cnpj_receiver
cpf_cnpj_receiver = json_dict['sender_cnpj_cpf']
cpf_cnpj_receiver = re.search(
r'\d{11}|\d{14}|\d{3}\.\d{3}\.\d{3}\-\d{2}|\d{2}\.\d{3}\.\d{3}\/\d{4}\-\d{2}',
cpf_cnpj_receiver,
re.M | re.I
)
cpf_cnpj_receiver = str(cpf_cnpj_receiver.group())
cpf_cnpj_receiver = cpf_cnpj_receiver.replace('-', '')
cpf_cnpj_receiver = cpf_cnpj_receiver.replace('.', '')
cpf_cnpj_receiver = cpf_cnpj_receiver.replace('/', '')
cpf_cnpj_receiver = cpf_cnpj_receiver.replace(' ', '')
dict_receiver['cpf_cnpj'] = cpf_cnpj_receiver
# operation_nature
dict_invoice['operation_nature'] = json_dict['main_nature_operation']
# authorization_protocol
dict_invoice['authorization_protocol'] = json_dict['main_protocol_authorization_use']
# state_registration
dict_invoice['state_registration'] = json_dict['main_state_registration']
# emission_date
emission_date = json_dict['sender_emission_date']
emission_date = re.search(r'\d{2}\/\d{2}\/\d{4}', emission_date, re.M | re.I)
emission_date = str(emission_date.group())
emission_date = emission_date.split('/')
emission_date = emission_date[2] + '-' + emission_date[1] + '-' + emission_date[0]
dict_invoice['emission_date'] = emission_date
# entry_exit_datetime
entry_exit_datetime = json_dict['sender_out_input_date']
entry_exit_datetime = entry_exit_datetime.split('/')
time = json_dict['sender_output_time']
u = entry_exit_datetime[2] + '-' + entry_exit_datetime[1] + '-' + entry_exit_datetime[0] + 'T' + time
entry_exit_datetime = u
dict_invoice['entry_exit_datetime'] = entry_exit_datetime
# total_products_value
total_products_value = json_dict['tax_total_cost_products']
total_products_value = total_products_value.replace('.', '')
total_products_value = total_products_value.replace(',', '.')
dict_invoice['total_products_value'] = float(total_products_value)
# total_invoice_value
total_invoice_value = json_dict['tax_cost_total_note']
total_invoice_value = total_invoice_value.replace('.', '')
total_invoice_value = total_invoice_value.replace(',', '.')
dict_invoice['total_invoice_value'] = float(total_invoice_value)
# basis_calculation_icms
basis_calculation_icms = json_dict['tax_icms_basis']
basis_calculation_icms = basis_calculation_icms.replace('.', '')
basis_calculation_icms = basis_calculation_icms.replace(',', '.')
dict_invoice['basis_calculation_icms'] = float(basis_calculation_icms)
# freight_value
freight_value = json_dict['tax_cost_freight']
freight_value = freight_value.replace('.', '')
freight_value = freight_value.replace(',', '.')
dict_invoice['freight_value'] = float(freight_value)
# insurance_value
insurance_value = json_dict['tax_cost_insurance']
insurance_value = insurance_value.replace('.', '')
insurance_value = insurance_value.replace(',', '.')
dict_invoice['insurance_value'] = float(insurance_value)
# icms_value
icms_value = json_dict['tax_cost_icms']
icms_value = icms_value.replace('.', '')
icms_value = icms_value.replace(',', '.')
dict_invoice['icms_value'] = float(icms_value)
# discount_value
discount_value = json_dict['tax_discount']
discount_value = discount_value.replace('.', '')
discount_value = discount_value.replace(',', '.')
dict_invoice['discount_value'] = float(discount_value)
# basis_calculation_icms_st
basis_calculation_icms_st = json_dict['tax_icms_basis_st']
basis_calculation_icms_st = basis_calculation_icms_st.replace('.', '')
basis_calculation_icms_st = basis_calculation_icms_st.replace(',', '.')
dict_invoice['basis_calculation_icms_st'] = float(basis_calculation_icms_st)
# icms_value_st
icms_value_st = json_dict['tax_cost_icms_replacement']
icms_value_st = icms_value_st.replace('.', '')
icms_value_st = icms_value_st.replace(',', '.')
dict_invoice['icms_value_st'] = float(icms_value_st)
# other_expenditure
other_expenditure = json_dict['tax_other_expenditure']
other_expenditure = other_expenditure.replace('.', '')
other_expenditure = other_expenditure.replace(',', '.')
dict_invoice['other_expenditure'] = float(other_expenditure)
# ipi_value
ipi_value = json_dict['tax_cost_ipi']
ipi_value = ipi_value.replace('.', '')
ipi_value = ipi_value.replace(',', '.')
dict_invoice['ipi_value'] = float(ipi_value)
# receiver
dict_receiver['name'] = json_dict['sender_name_social']
dict_receiver['address'] = json_dict['sender_address']
dict_receiver['neighborhood'] = json_dict['sender_neighborhood_district']
dict_receiver['cep'] = json_dict['sender_cep'].replace('-', '')
dict_receiver['county'] = json_dict['sender_county']
dict_receiver['uf'] = json_dict['sender_uf']
dict_receiver['phone'] = json_dict['sender_phone_fax']
# ------------------------
if Receiver.objects.filter(cpf_cnpj=cpf_cnpj_receiver).count() == 1:
receiver = Receiver.objects.get(cpf_cnpj=cpf_cnpj_receiver)
dict_invoice['receiver'] = receiver.pk
else:
receiver_serializer = ReceiverSerializer(data=dict_receiver)
if receiver_serializer.is_valid():
receiver_serializer.save()
else:
return HttpResponse(
json.dumps([
receiver_serializer.errors,
]),
status=400
)
dict_invoice['receiver'] = receiver_serializer.data['id']
if Seller.objects.filter(cnpj=cnpj_seller).count() == 1:
seller = Seller.objects.get(cnpj=cnpj_seller)
dict_invoice['seller'] = seller.pk
else:
seller_serializer = SellerSerializer(data=dict_seller)
if seller_serializer.is_valid():
seller_serializer.save()
else:
return HttpResponse(
json.dumps([
seller_serializer.errors,
]),
status=400
)
dict_invoice['seller'] = seller_serializer.data['id']
invoice_serializer = InvoiceSerializer(data=dict_invoice)
if invoice_serializer.is_valid():
invoice_serializer.save()
else:
return HttpResponse(
json.dumps(
invoice_serializer.errors
),
status=400
)
return HttpResponse(
json.dumps([
invoice_serializer.data,
]),
status=200
)
def sellerShow(request, pk):
if request.method == 'GET':
seller = get_object_seller(pk)
serializer = SellerSerializer(seller)
return HttpResponse(json.dumps(serializer.data), status=200)
return HttpResponse(status=400)
def receiverShow(request, pk):
if request.method == 'GET':
receiver = get_object_receiver(pk)
serializer = ReceiverSerializer(receiver)
return HttpResponse(json.dumps(serializer.data), status=200)
return HttpResponse(status=400)
def sellerList(request):
if request.method == 'GET':
seller = Seller.objects.all()
serializer = SellerSerializer(seller, many=True)
return HttpResponse(json.dumps(serializer.data))
return HttpResponse(status=400)
def receiverList(request):
if request.method == 'GET':
receiver = Receiver.objects.all()
serializer = ReceiverSerializer(receiver, many=True)
return HttpResponse(json.dumps(serializer.data))
return HttpResponse(status=400)
def chart_total_value_per_time(request):
if request.method == 'GET':
invoices = Invoice.objects.all()
date = []
total = []
for invoice in invoices:
date.append(invoice.emission_date)
total.append(invoice.total_invoice_value)
df = pd.DataFrame({'date': date, 'total': total})
df = df.sort_values(by='date')
sf = df.groupby('date')['total'].sum()
df = pd.DataFrame({'date': sf.index, 'total': sf.values})
df['date'] = pd.to_datetime(df['date']).apply(lambda x: x.strftime('%d/%m/%Y'))
df['total'] = pd.to_numeric(df['total'].apply(lambda x: round(x, 2)))
data = df.to_dict('list')
df = pd.DataFrame({'dateM': date, 'totalM': total})
df = df.sort_values(by='dateM')
df['dateM'] = pd.to_datetime(df['dateM']).apply(lambda x: x.strftime('%Y-%m'))
sf = df.groupby('dateM')['totalM'].sum()
df = pd.DataFrame({'dateM': sf.index, 'totalM': sf.values})
df['dateM'] = pd.to_datetime(df['dateM']).apply(lambda x: x.strftime('%m/%Y'))
df['totalM'] = pd.to_numeric(df['totalM'].apply(lambda x: round(x, 2)))
data['dateM'] = df.to_dict('list')['dateM']
data['totalM'] = df.to_dict('list')['totalM']
df = pd.DataFrame({'dateY': date, 'totalY': total})
df = df.sort_values(by='dateY')
df['dateY'] = pd.to_datetime(df['dateY']).apply(lambda x: x.strftime('%Y'))
sf = df.groupby('dateY')['totalY'].sum()
df = pd.DataFrame({'dateY': sf.index, 'totalY': sf.values})
df['totalY'] = pd.to_numeric(df['totalY'].apply(lambda x: round(x, 2)))
data['dateY'] = df.to_dict('list')['dateY']
data['totalY'] = df.to_dict('list')['totalY']
return HttpResponse(json.dumps(data))
return HttpResponse(status=400)
def chart_qtd_per_time(request):
if request.method == 'GET':
invoices = Invoice.objects.all()
date = []
for invoice in invoices:
date.append(invoice.emission_date)
df = pd.DataFrame({'date': date})
df = df.sort_values(by='date')
df['date'] = pd.to_datetime(df['date']).apply(lambda x: x.strftime('%Y-%m'))
sf = df.groupby('date').size()
df = pd.DataFrame({'date': sf.index, 'count': sf.values})
df['date'] = pd.to_datetime(df['date']).apply(lambda x: x.strftime('%m/%Y'))
data = df.to_dict('list')
dfY = pd.DataFrame({'dateY': date})
dfY = dfY.sort_values(by='dateY')
dfY['dateY'] = pd.to_datetime(dfY['dateY']).apply(lambda x: x.strftime('%Y'))
sf = dfY.groupby('dateY').size()
dfY = pd.DataFrame({'dateY': sf.index, 'countY': sf.values})
data['dateY'] = dfY.to_dict('list')['dateY']
data['countY'] = dfY.to_dict('list')['countY']
return HttpResponse(json.dumps(data))
return HttpResponse(status=400)
def chart_total_value_per_chosen_date(request):
if request.method == 'GET':
invoices = Invoice.objects.all()
date = []
total = []
for invoice in invoices:
date.append(invoice.emission_date)
total.append(invoice.total_invoice_value)
df = pd.DataFrame({'date': date, 'total': total})
df = df.sort_values(by='date')
sf = df.groupby('date')['total'].sum()
df = | pd.DataFrame({'date': sf.index, 'total': sf.values}) | pandas.DataFrame |
import os
import ast
import glob
import numpy as np
import pandas as pd
from tqdm import tqdm
from itertools import chain
from astropy.io import ascii
import multiprocessing as mp
from astropy.stats import mad_std
from astropy.timeseries import LombScargle as lomb
from pysyd import __file__
from pysyd.plots import set_plot_params
from pysyd.models import *
#####################################################################
# HIGHER-LEVEL FUNCTIONALITY OF THE SOFTWARE
#
def get_info(args):
"""
Loads todo.txt, sets up file paths, loads in any available star information, saves the
relevant parameters for each of the two main routines and sets the plotting parameters.
Parameters
----------
args : argparse.Namespace
command-line arguments
parallel : bool
if pysyd will be running in parallel mode
CLI : bool, optional
if CLI is not being used (i.e. `False`), the modules draw default values from a different location
Returns
-------
args : argparse.Namespace
the updated command-line arguments
"""
# Get parameters for all modules
args = get_parameters(args)
# Get invidual/specific star info from csv file (if it exists)
args = get_csv_info(args)
if args.cli:
# Check the input variables
check_input_args(args)
args = get_command_line(args)
set_plot_params()
return args
def get_parameters(args):
"""
Basic function to call the individual functions that load and
save parameters for different modules.
Parameters
----------
args : argparse.Namespace
command-line arguments
Returns
-------
args : argparse.Namespace
the updated command-line arguments
"""
# Initialize main 'params' dictionary
args = get_main_params(args)
args = get_groups(args)
# Initialize parameters for the find excess routine
args = get_excess_params(args)
# Initialize parameters for the fit background routine
args = get_background_params(args)
# Initialize parameters relevant for estimating global parameters
args = get_global_params(args)
return args
def get_main_params(args, cli=False, stars=None, excess=True, background=True, globe=True,
verbose=False, command='run', parallel=False, show=False, testing=False,
save=True, kep_corr=False, of_actual=None, of_new=None, overwrite=True):
"""
Get the parameters for the find excess routine.
Parameters
----------
args : argparse.Namespace
the command line arguments
stars : List[str], optional
list of targets to process. If `None`, will read in from `info/todo.txt` (default).
verbose : bool, optional
turn on verbose output. Default is `False`.
show : bool, optional
show output figures. Default is `False`.
save : bool, optional
save all data products. Default is `True`.
kep_corr : bool, optional
use the module that corrects for known kepler artefacts. Default is `False`.
of_actual : int, optional
oversampling factor of input PS. Default value is `None`.
of_new : int, optional
oversampling factor of newly-computed PS. Default value is `None`.
Returns
-------
args : argparse.Namespace
the updated command line arguments
args.params : Dict[str,object]
the parameters of higher-level functionality
"""
vars = ['stars', 'inpdir', 'outdir', 'cli', 'command', 'info', 'show', 'save', 'testing',
'overwrite', 'excess', 'background', 'global', 'verbose']
if args.cli:
vals = [args.stars, args.inpdir, args.outdir, args.cli, args.command, args.info,
args.show, args.save, args.testing, args.overwrite, args.excess, args.background,
args.globe, args.verbose]
else:
args.todo = os.path.join(os.path.abspath(os.getcwd()), 'info', 'todo.txt')
info = os.path.join(os.path.abspath(os.getcwd()), 'info', 'star_info.csv')
inpdir = os.path.join(os.path.abspath(os.getcwd()), 'data')
args.command, args.parallel, args.of_actual, args.of_new, args.kep_corr, args.verbose = command, parallel, of_actual, of_new, kep_corr, verbose
vals = [stars, inpdir, os.path.join(os.path.abspath(os.getcwd()), 'results'), cli, command,
info, show, save, testing, overwrite, excess, background, globe, verbose]
args.params = dict(zip(vars,vals))
# Open star list
if args.params['stars'] is None or args.params['stars'] == []:
with open(args.todo, "r") as f:
args.params['stars'] = [line.strip().split()[0] for line in f.readlines()]
# Set file paths and make directories if they don't yet exist
for star in args.params['stars']:
args.params[star] = {}
args.params[star]['path'] = os.path.join(args.params['outdir'], star)
if args.params['save'] and not os.path.exists(args.params[star]['path']):
os.makedirs(args.params[star]['path'])
args.params[star]['ech_mask'] = None
return args
#####################################################################
# Sets up star "groups" -> mostly for parallel processing
#
def get_groups(args):
"""
Sets up star groups to run in parallel based on the number of threads.
Parameters
----------
args : argparse.Namespace
command line arguments
parallel : bool
run pySYD in parallel
Returns
-------
args : argparse.Namespace
the updated command line arguments
args.params['groups'] : ndarray
star groups to process (groups == number of threads)
Returns
----------
None
"""
if args.parallel:
todo = np.array(args.params['stars'])
if args.n_threads == 0:
args.n_threads = mp.cpu_count()
if len(todo) < args.n_threads:
args.n_threads = len(todo)
# divide stars into groups set by number of cpus/nthreads available
digitized = np.digitize(np.arange(len(todo))%args.n_threads,np.arange(args.n_threads))
args.params['groups'] = np.array([todo[digitized == i] for i in range(1, args.n_threads+1)], dtype=object)
else:
args.params['groups'] = np.array(args.params['stars'])
return args
#####################################################################
# Parameters relevant to (optionally) estimate numax
#
def get_excess_params(args, n_trials=3, step=0.25, binning=0.005, smooth_width=20.0,
mode='mean', lower_ex=1.0, upper_ex=8000., ask=False,):
"""
Get the parameters for the find excess routine.
Parameters
----------
args : argparse.Namespace
the command line arguments
ask : bool, optional
If `True`, it will ask which trial to use as the estimate for numax.
n_trials : int, optional
the number of trials. Default value is `3`.
step : float, optional
TODO: Write description. Default value is `0.25`.
binning : float, optional
logarithmic binning width. Default value is `0.005`.
mode : {'mean', 'median', 'gaussian'}
mode to use when binning
Returns
-------
args : argparse.Namespace
the updated command line arguments
args.findex : Dict[str,object]
the parameters of the find excess routine
"""
vars = ['step', 'binning', 'mode', 'smooth_width', 'ask', 'n_trials', 'lower_ex', 'upper_ex', 'results']
if args.cli:
vals = [args.step, args.binning, args.mode, args.smooth_width, args.ask, args.n_trials, args.lower_ex, args.upper_ex, {}]
else:
vals = [step, binning, mode, smooth_width, ask, n_trials, lower_ex, upper_ex, {}]
args.excess = dict(zip(vars,vals))
return args
#####################################################################
# Parameters relevant to background-fitting
#
def get_background_params(args, ind_width=20.0, box_filter=1.0, n_rms=20, metric='bic', include=False,
mc_iter=1, samples=False, n_laws=None, fix_wn=False, basis='tau_sigma',
lower_bg=1.0, upper_bg=8000.,):
"""
Get the parameters for the background-fitting routine.
Parameters
----------
args : argparse.Namespace
the command line arguments
box_filter : float
the size of the 1D box smoothing filter (in muHz). Default value is `1.0`.
ind_width : float
the independent average smoothing width (in muHz). Default value is `20.0`.
n_rms : int
number of data points to estimate red noise contributions. Default value is `20`.
metric : str
which metric to use (i.e. bic or aic) for model selection. Default is `'bic'`.
include : bool
include metric values in verbose output. Default is `False`.
basis : str
which basis to use for background fitting, e.g. {a,b} parametrization. Default is `tau_sigma`.
n_laws : int
force number of Harvey-like components in background fit. Default value is `None`.
fix_wn : bool
fix the white noise level in the background fit. Default is `False`.
mc_iter : int
number of samples used to estimate uncertainty. Default value is `1`.
samples : bool
if true, will save the monte carlo samples to a csv. Default value is `False`.
Returns
-------
args : argparse.Namespace
the updated command line arguments
args.fitbg : Dict[str,object]
the parameters relevant for the fit background routine
"""
vars = ['ind_width', 'box_filter', 'n_rms', 'n_laws', 'fix_wn', 'basis', 'metric', 'include',
'functions', 'mc_iter', 'samples', 'lower_bg', 'upper_bg', 'results']
if args.cli:
vals = [args.ind_width, args.box_filter, args.n_rms, args.n_laws, args.fix_wn, args.basis,
args.metric, args.include, get_dict(type='functions'), args.mc_iter, args.samples,
args.lower_bg, args.upper_bg, {}]
else:
vals = [ind_width, box_filter, n_rms, n_laws, fix_wn, basis, metric, include,
get_dict(type='functions'), mc_iter, samples, lower_bg, upper_bg, {}]
args.background = dict(zip(vars,vals))
return args
#####################################################################
# Features related to determining numax and dnu
#
def get_global_params(args, sm_par=None, lower_ps=None, upper_ps=None, width=1.0,
method='D', smooth_ps=2.5, threshold=1.0, n_peaks=5, cmap='binary',
clip_value=3.0, smooth_ech=None, interp_ech=False, lower_ech=None,
upper_ech=None, nox=50, noy=0, notching=False):
"""
Get the parameters relevant for finding global asteroseismic parameters numax and dnu.
Parameters
----------
args : argparse.Namespace
the command line arguments
sm_par : float
Gaussian filter width for determining smoothed numax (values are typically between 1-4)
method : str
method to determine dnu, choices are ~['M','A','D'] (default is `'D'`).
lower_ps : float
lower bound of power excess (in muHz). Default value is `None`.
upper_ps : float
upper bound of power excess (in muHz). Default value is `None`.
width : float
fractional width to use for power excess centerd on numax. Default value is `1.0`.
smooth_ps : float
box filter [in muHz] for PS smoothing before calculating ACF. Default value is `1.5`.
threshold : float
fractional width of FWHM to use in ACF for later iterations. Default value is `1.0`.
n_peaks : int
the number of peaks to select. Default value is `5`.
lower_ech : float
lower bound of folded PS (in muHz) to 'whiten' mixed modes. Default value is `None`.
upper_ech : float
upper bound of folded PS (in muHz) to 'whiten' mixed modes. Default value is `None`.
clip_value : float
the minimum frequency of the echelle plot. Default value is `0.0`.
smooth_ech : float
option to smooth the output of the echelle plot
interp_ech : bool
turns on the bilinear smoothing in echelle plot
nox : int
x-axis resolution on the echelle diagram. Default value is `50`. (NOT CURRENTLY IMPLEMENTED YET)
noy : int
how many radial orders to plot on the echelle diagram. Default value is `5`. (NOT CURRENTLY IMPLEMENTED YET)
Returns
-------
args : argparse.Namespace
the updated command line arguments
args.globe : Dict[str,object]
the parameters relevant for determining the global parameters routine
"""
vars = ['sm_par', 'width', 'smooth_ps', 'threshold', 'n_peaks', 'method', 'cmap', 'clip_value',
'smooth_ech', 'interp_ech', 'nox', 'noy', 'notching', 'results']
if args.cli:
vals = [args.sm_par, args.width, args.smooth_ps, args.threshold, args.n_peaks, args.method, args.cmap,
args.clip_value, args.smooth_ech, args.interp_ech, args.nox, args.noy, args.notching, {}]
else:
vals = [sm_par, width, smooth_ps, threshold, n_peaks, method, clip_value, cmap, smooth_ech,
interp_ech, nox, noy, notching, {}]
args.globe = dict(zip(vars,vals))
return args
#####################################################################
# Can store different settings for individual stars
#
def get_csv_info(args, force=False, guess=None):
"""
Reads in any star information provided via args.info and is 'info/star_info.csv' by default.
** Please note that this is NOT required for pySYD to run successfully **
Parameters
----------
args : argparse.Namespace
the command line arguments
force : float
if not false (i.e. non-zero) will force dnu to be the equal to this value.
guess : float
estimate or guess for dnu
Returns
-------
args : argparse.Namespace
the updated command line arguments
"""
constants = Constants()
columns = get_dict(type='columns')['required']
# Open file if it exists
if os.path.exists(args.info):
df = pd.read_csv(args.info)
stars = [str(each) for each in df.stars.values.tolist()]
for i, star in enumerate(args.params['stars']):
args.params[star]['excess'] = args.params['excess']
args.params[star]['force'] = force
args.params[star]['guess'] = guess
if star in stars:
idx = stars.index(star)
# Update information from columns
for column in columns:
if not np.isnan(float(df.loc[idx,column])):
args.params[star][column] = float(df.loc[idx, column])
else:
args.params[star][column] = None
# Add estimate of numax if the column exists
if args.params[star]['numax'] is not None:
args.params[star]['excess'] = False
args.params[star]['dnu'] = 0.22*(args.params[star]['numax']**0.797)
elif args.params[star]['dnu'] is not None:
args.params[star]['force'] = True
args.params[star]['guess'] = args.params[star]['dnu']
# Otherwise estimate using other stellar parameters
else:
if args.params[star]['radius'] is not None and args.params[star]['logg'] is not None:
args.params[star]['mass'] = ((((args.params[star]['radius']*constants.r_sun)**(2.0))*10**(args.params[star]['logg'])/constants.G)/constants.m_sun)
args.params[star]['numax'] = constants.numax_sun*args.params[star]['mass']*(args.params[star]['radius']**(-2.0))*((args.params[star]['teff']/constants.teff_sun)**(-0.5))
args.params[star]['dnu'] = constants.dnu_sun*(args.params[star]['mass']**(0.5))*(args.params[star]['radius']**(-1.5))
# if target isn't in csv, still save basic parameters called througout the code
else:
for column in columns:
args.params[star][column] = None
# same if the file does not exist
else:
for star in args.stars:
args.params[star]['excess'] = args.params['excess']
args.params[star]['force'] = False
for column in columns:
args.params[star][column] = None
return args
#####################################################################
# If running from command line, checks input types and array lengths
#
def check_input_args(args, max_laws=3):
"""
Make sure that any command-line inputs are the proper lengths, types, etc.
Parameters
----------
args : argparse.Namespace
the command line arguments
max_laws : int
maximum number of resolvable Harvey components
Yields
------
???
"""
checks={'lower_ps':args.lower_ps,'upper_ps':args.upper_ps,'lower_ech':args.lower_ech,
'upper_ech':args.upper_ech,'dnu':args.dnu,'numax':args.numax}
for check in checks:
if checks[check] is not None:
assert len(args.stars) == len(checks[check]), "The number of values provided for %s does not equal the number of stars"%check
if args.of_actual is not None:
assert isinstance(args.of_actual,int), "The oversampling factor for the input PS must be an integer"
if args.of_new is not None:
assert isinstance(args.of_new,int), "The new oversampling factor must be an integer"
if args.n_laws is not None:
assert args.n_laws <= max_laws, "We likely cannot resolve %d Harvey-like components for point sources. Please select a smaller number."%args.n_laws
def get_command_line(args, numax=None, dnu=None, lower_ps=None, upper_ps=None,
lower_ech=None, upper_ech=None):
"""
If certain CLI options are provided, it saves it to the appropriate star. This
is called after the csv is checked and therefore, this will override any duplicated
information provided there (if applicable).
Parameters
----------
args : argparse.Namespace
the command line arguments
args.lower_ps : float, optional
the lower frequency bound for numax (in muHz). Default is `None`.
args.upper_ps : float, optional
the upper frequency bound for numax (in muHz). Default is `None`.
args.numax : List[float], optional
the estimated numax (in muHz). Default is `None`.
args.dnu : List[float], optional
the estimated frequency spacing or dnu (in muHz). Default is `None`.
args.lower_ech : List[float], optional
the lower frequency for whitening the folded PS (in muHz). Default is `None`.
args.upper_ech : List[float], optional
the upper frequency for whitening the folded PS (in muHz). Default is `None`.
Returns
-------
args : argparse.Namespace
the updated command line arguments
"""
override = {
'lower_ps': args.lower_ps,
'upper_ps': args.upper_ps,
'numax': args.numax,
'dnu': args.dnu,
'lower_ech': args.lower_ech,
'upper_ech': args.upper_ech,
}
for i, star in enumerate(args.params['stars']):
for each in override:
if override[each] is not None:
# if numax is provided via CLI, findex is skipped
if each == 'numax':
args.params[star]['excess'] = False
args.params[star]['numax'] = override[each][i]
args.params[star]['dnu'] = 0.22*(args.params[star]['numax']**0.797)
# if dnu is provided via CLI, this value is used instead of the derived dnu
elif each == 'dnu':
args.params[star]['force'] = True
args.params[star]['guess'] = override[each][i]
else:
args.params[star][each] = override[each][i]
if args.params[star]['lower_ech'] is not None and args.params[star]['upper_ech'] is not None:
args.params[star]['ech_mask'] = [args.params[star]['lower_ech'],args.params[star]['upper_ech']]
else:
args.params[star]['ech_mask'] = None
return args
#####################################################################
# Data and information related to a processed star
#
def load_data(star, args):
"""
Loads both the light curve and power spectrum data in for a given star,
which will return `False` if unsuccessful and therefore, not run the rest
of the pipeline.
Parameters
----------
star : target.Target
the pySYD pipeline object
args : argparse.Namespace
command line arguments
Returns
-------
star : target.Target
the pySYD pipeline object
star.lc : bool
will return `True` if the light curve data was loaded in properly otherwise `False`
star.ps : bool
will return `True` if the power spectrum file was successfully loaded otherwise `False`
"""
if not star.params['cli']:
star.pickles=[]
# Now done at beginning to make sure it only does this once per star
if glob.glob(os.path.join(args.inpdir,'%s*'%str(star.name))) != []:
if star.verbose:
print('\n\n------------------------------------------------------')
print('Target: %s'%str(star.name))
print('------------------------------------------------------')
# Load light curve
args, star, note = load_time_series(args, star)
if star.verbose:
print(note)
# Load power spectrum
args, star, note = load_power_spectrum(args, star)
if star.verbose:
print(note)
return star
def load_file(path):
"""
Load a light curve or a power spectrum from a basic 2xN txt file
and stores the data into the `x` (independent variable) and `y`
(dependent variable) arrays, where N is the length of the series.
Parameters
----------
path : str
the file path of the data file
Returns
-------
x : numpy.array
the independent variable i.e. the time or frequency array
y : numpy.array
the dependent variable, in this case either the flux or power array
"""
f = open(path, "r")
lines = f.readlines()
f.close()
# Set values
x = np.array([float(line.strip().split()[0]) for line in lines])
y = np.array([float(line.strip().split()[1]) for line in lines])
return x, y
def load_time_series(args, star, note=''):
"""
If available, star.lc is set to `True`, the time series data
is loaded in, and then it calculates the cadence and nyquist
freqency. If time series data is not provided, either the
cadence or nyquist frequency must be provided via CLI
Parameters
----------
star : target.Target
the pySYD pipeline object
args : argparse.Namespace
command line arguments
args.cadence : int
cadence of time series data (if known but data is not available)
args.nyquist : float
nyquist frequency of the provided power spectrum
note : str
optional suppressed verbose output
Returns
-------
star : target.Target
the pySYD pipeline object
star.lc : bool
will return `True` if the light curve data was loaded in properly otherwise `False`
star.time : numpy.array
time array in days
star.flux : numpy.array
relative or normalized flux array
"""
star.lc = False
star.nyquist = None
# Try loading the light curve
if os.path.exists(os.path.join(args.inpdir, '%s_LC.txt'%star.name)):
star.lc = True
star.time, star.flux = load_file(os.path.join(args.inpdir, '%s_LC.txt'%star.name))
star.time -= min(star.time)
star.cadence = int(round(np.nanmedian(np.diff(star.time)*24.0*60.0*60.0),0))
star.nyquist = 10**6./(2.0*star.cadence)
star.baseline = (max(star.time)-min(star.time))*24.*60.*60.
star.tau_upper = star.baseline/2.
note += '# LIGHT CURVE: %d lines of data read\n# Time series cadence: %d seconds'%(len(star.time),star.cadence)
return args, star, note
def load_power_spectrum(args, star, note='', long=10**6):
"""
Loads in the power spectrum data in for a given star,
which will return `False` if unsuccessful and therefore, not run the rest
of the pipeline.
Parameters
----------
star : target.Target
the pySYD pipeline object
args : argparse.Namespace
command line arguments
args.kep_corr : bool
if true, will run the module to mitigate the Kepler artefacts in the power spectrum. Default is `False`.
args.of_actual : int
the oversampling factor, if the power spectrum is already oversampled. Default is `1`, assuming a critically sampled PS.
args.of_new : float
the oversampling factor to use for the first iterations. Default is `5`.
note : str
optional suppressed verbose output
long : int
will display a warning if length of PS is longer than 10**6 lines
Returns
-------
star : target.Target
the pySYD pipeline object
star.ps : bool
will return `True` if the power spectrum file was successfully loaded otherwise `False`
star.frequency : numpy.array
frequency array in muHz
star.power : numpy.array
power spectral density array
"""
star.ps = False
# Try loading the power spectrum
if not os.path.exists(os.path.join(args.inpdir, '%s_PS.txt'%star.name)):
note += '# ERROR: %s/%s_PS.txt not found\n'%(args.inpdir, star.name)
else:
star.ps = True
star.frequency, star.power = load_file(os.path.join(args.inpdir, '%s_PS.txt'%star.name))
note += '# POWER SPECTRUM: %d lines of data read\n'%len(star.frequency)
if len(star.frequency) >= long:
note += '# WARNING: PS is large and will slow down the software'
star.resolution = star.frequency[1]-star.frequency[0]
if args.kep_corr:
note += '# **using Kepler artefact correction**\n'
star = remove_artefact(star)
if star.params[star.name]['ech_mask'] is not None:
note += '# **whitening the PS to remove mixed modes**\n'
star = whiten_mixed(star)
args, star, note = check_input_data(args, star, note)
return args, star, note
#####################################################################
# Relevant for Kepler (artefact) correction function
# -> this will save the seed for reproducibiity purposes
#
def set_seed(star, lower=1, upper=10**7, size=1):
"""
For Kepler targets that require a correction via CLI (--kc), a random seed is generated
from U~[1,10^7] and stored in stars_info.csv for reproducible results in later runs.
Parameters
----------
star : target.Target
the pySYD pipeline object
lower : int
lower limit for random seed value. Default value is `1`.
upper : int
upper limit for random seed value. Default value is `10**7`.
size : int
number of seed values returned. Default value is `1`.
Returns
-------
star : target.Target
the pySYD pipeline object
"""
seed = list(np.random.randint(lower,high=upper,size=size))
df = pd.read_csv(star.params['info'])
stars = [str(each) for each in df.stars.values.tolist()]
idx = stars.index(star.name)
df.loc[idx,'seed'] = int(seed[0])
star.params[star.name]['seed'] = seed[0]
df.to_csv(star.params['info'],index=False)
return star
#####################################################################
# Routine to correct for 1/LC Kepler harmonics, as well as
# known high frequency artefacts and the low frequency artefacts
# (primarily in Q0-Q3 data)
#
def remove_artefact(star, lcp=1.0/(29.4244*60*1e-6), lf_lower=[240.0,500.0], lf_upper =[380.0,530.0],
hf_lower = [4530.0,5011.0,5097.0,5575.0,7020.0,7440.0,7864.0],
hf_upper = [4534.0,5020.0,5099.0,5585.0,7030.0,7450.0,7867.0],):
"""
Removes SC artefacts in Kepler power spectra by replacing them with noise (using linear interpolation)
following a chi-squared distribution.
Known artefacts are:
1) 1./LC harmonics
2) high frequency artefacts (>5000 muHz)
3) low frequency artefacts 250-400 muHz (mostly present in Q0 and Q3 data)
Parameters
----------
star : target.Target
the pySYD pipeline object
lcp : float
long cadence period in Msec
lf_lower : List[float]
lower limit of low frequency artefact
lf_upper : List[float]
upper limit of low frequency artefact
hf_lower : List[float]
lower limit of high frequency artefact
hf_upper : List[float]
upper limit of high frequency artefact
Returns
-------
star : target.Target
the pySYD pipeline object
"""
if star.params[star.name]['seed'] is None:
star = set_seed(star)
# LC period in Msec -> 1/LC ~muHz
artefact = (1.0+np.arange(14))*lcp
# Estimate white noise
white = np.mean(star.power[(star.frequency >= max(star.frequency)-100.0)&(star.frequency <= max(star.frequency)-50.0)])
np.random.seed(int(star.params[star.name]['seed']))
# Routine 1: remove 1/LC artefacts by subtracting +/- 5 muHz given each artefact
for i in range(len(artefact)):
if artefact[i] < np.max(star.frequency):
mask = np.ma.getmask(np.ma.masked_inside(star.frequency, artefact[i]-5.0*star.resolution, artefact[i]+5.0*star.resolution))
if np.sum(mask) != 0:
star.power[mask] = white*np.random.chisquare(2,np.sum(mask))/2.0
np.random.seed(int(star.params[star.name]['seed']))
# Routine 2: fix high frequency artefacts
for lower, upper in zip(hf_lower, hf_upper):
if lower < np.max(star.frequency):
mask = np.ma.getmask(np.ma.masked_inside(star.frequency, lower, upper))
if np.sum(mask) != 0:
star.power[mask] = white*np.random.chisquare(2,np.sum(mask))/2.0
np.random.seed(int(star.params[star.name]['seed']))
# Routine 3: remove wider, low frequency artefacts
for lower, upper in zip(lf_lower, lf_upper):
low = np.ma.getmask(np.ma.masked_outside(star.frequency, lower-20., lower))
upp = np.ma.getmask(np.ma.masked_outside(star.frequency, upper, upper+20.))
# Coeffs for linear fit
m, b = np.polyfit(star.frequency[~(low*upp)], star.power[~(low*upp)], 1)
mask = np.ma.getmask(np.ma.masked_inside(star.frequency, lower, upper))
# Fill artefact frequencies with noise
star.power[mask] = ((star.frequency[mask]*m)+b)*(np.random.chisquare(2, np.sum(mask))/2.0)
return star
#####################################################################
# For subgiants with mixed modes, this will "whiten" these modes
# by adding noise (by drawing from a chi-squared distribution with 2
# dof) to properly estimate dnu.
#
def whiten_mixed(star):
"""
Generates random white noise in place of ell=1 for subgiants with mixed modes to better
constrain the characteristic frequency spacing.
Parameters
----------
star : target.Target
pySYD pipeline target
star.frequency : np.ndarray
the frequency of the power spectrum
star.power : np.ndarray
the power spectrum
"""
if star.params[star.name]['seed'] is None:
star = set_seed(star)
# Estimate white noise
if not star.globe['notching']:
white = np.mean(star.power[(star.frequency >= max(star.frequency)-100.0)&(star.frequency <= max(star.frequency)-50.0)])
else:
white = min(star.power[(star.frequency >= max(star.frequency)-100.0)&(star.frequency <= max(star.frequency)-50.0)])
# Take the provided dnu and "fold" the power spectrum
folded_freq = np.copy(star.frequency)%star.params[star.name]['guess']
mask = np.ma.getmask(np.ma.masked_inside(folded_freq, star.params[star.name]['ech_mask'][0], star.params[star.name]['ech_mask'][1]))
np.random.seed(int(star.params[star.name]['seed']))
# Makes sure the mask is not empty
if np.sum(mask) != 0:
if star.globe['notching']:
star.power[mask] = white
else:
star.power[mask] = white*np.random.chisquare(2,np.sum(mask))/2.0
# Typically if dnu is provided, it will assume you want to "force" that value
# so we need to adjust this back
star.params[star.name]['force'] = False
star.params[star.name]['guess'] = None
return star
def check_input_data(args, star, note):
"""
Checks the type(s) of input data and creates any additional, optional
arrays as well as critically-sampled power spectra (when applicable).
Parameters
----------
args : argparse.Namespace
command line arguments
star : target.Target
pySYD target object
note : str, optional
optional verbose output
Returns
-------
args : argparse.Namespace
updated command line arguments
star : target.Target
updated pySYD target object
note : str, optional
updated optional verbose output
"""
if star.lc:
args.of_actual = int(round((1./((max(star.time)-min(star.time))*0.0864))/(star.frequency[1]-star.frequency[0])))
star.freq_cs = np.array(star.frequency[args.of_actual-1::args.of_actual])
star.pow_cs = np.array(star.power[args.of_actual-1::args.of_actual])
if args.of_new is not None:
note += '# Computing new PS using oversampling of %d\n'%args.of_new
freq_os, pow_os = lomb(star.time, star.flux).autopower(method='fast', samples_per_peak=args.of_new, maximum_frequency=star.nyquist)
star.freq_os = freq_os*(10.**6/(24.*60.*60.))
star.pow_os = 4.*pow_os*np.var(star.flux*1e6)/(np.sum(pow_os)*(star.freq_os[1]-star.freq_os[0]))
else:
star.freq_os, star.pow_os = np.copy(star.frequency), np.copy(star.power)
else:
if args.of_actual is not None:
star.freq_cs = np.array(star.frequency[args.of_actual-1::args.of_actual])
star.pow_cs = np.array(star.power[args.of_actual-1::args.of_actual])
star.freq_os, star.pow_os = np.copy(star.frequency), np.copy(star.power)
else:
star.freq_cs, star.pow_cs = np.copy(star.frequency), np.copy(star.power)
star.freq_os, star.pow_os = np.copy(star.frequency), np.copy(star.power)
note += '# WARNING: using input PS with no additional information\n'
if args.mc_iter > 1:
note += '# **uncertainties may not be reliable unless using a critically-sampled PS**'
star.baseline = 1./((star.freq_cs[1]-star.freq_cs[0])*10**-6.)
star.tau_upper = star.baseline/2.
if args.of_actual is not None and args.of_actual != 1:
note += '# PS is oversampled by a factor of %d\n'%args.of_actual
else:
note += '# PS is critically-sampled\n'
note += '# PS resolution: %.6f muHz'%(star.freq_cs[1]-star.freq_cs[0])
return args, star, note
#####################################################################
# Sets data up for first optional module
#
def get_estimates(star, max_trials=6):
"""
Parameters used with the first module, which is automated method to identify
power excess due to solar-like oscillations.
Parameters
----------
star : target.Target
pySYD target object
Returns
-------
star : target.Target
updated pySYD target object
"""
# If running the first module, mask out any unwanted frequency regions
star.frequency, star.power = np.copy(star.freq_os), np.copy(star.pow_os)
star.resolution = star.frequency[1]-star.frequency[0]
# mask out any unwanted frequencies
if star.params[star.name]['lower_ex'] is not None:
lower = star.params[star.name]['lower_ex']
else:
if star.excess['lower_ex'] is not None:
lower = star.excess['lower_ex']
else:
lower = min(star.frequency)
if star.params[star.name]['upper_ex'] is not None:
upper = star.params[star.name]['upper_ex']
else:
if star.excess['upper_ex'] is not None:
upper = star.excess['upper_ex']
else:
upper = max(star.frequency)
if star.nyquist is not None and star.nyquist < upper:
upper = star.nyquist
star.freq = star.frequency[(star.frequency >= lower)&(star.frequency <= upper)]
star.pow = star.power[(star.frequency >= lower)&(star.frequency <= upper)]
if star.excess['n_trials'] > max_trials:
star.excess['n_trials'] = max_trials
if (star.params[star.name]['numax'] is not None and star.params[star.name]['numax'] <= 500.) or (star.nyquist is not None and star.nyquist <= 300.):
star.boxes = np.logspace(np.log10(0.5), np.log10(25.), star.excess['n_trials'])
else:
star.boxes = np.logspace(np.log10(50.), np.log10(500.), star.excess['n_trials'])
return star
#####################################################################
# Checks if there's an estimate for numax (but no longer requires it)
# Still needs to be tested: no estimate of numax but global fit
#
def check_numax(star):
"""
Checks if there is a starting value for numax as pySYD needs this information to begin the
second module (whether be it from the first module, CLI or saved to info/star_info.csv).
Returns
-------
result : bool
will return `True` if there is prior value for numax otherwise `False`.
"""
# THIS MUST BE FIXED TOO
# Check if numax was provided as input
if star.params[star.name]['numax'] is None:
# If not, checks if findex was run
if not star.params['overwrite']:
dir = os.path.join(star.params[star.name]['path'],'estimates*')
else:
dir = os.path.join(star.params[star.name]['path'],'estimates.csv')
if glob.glob(dir) != []:
if not star.params['overwrite']:
list_of_files = glob.glob(os.path.join(star.params[star.name]['path'],'estimates*'))
file = max(list_of_files, key=os.path.getctime)
else:
file = os.path.join(star.params[star.name]['path'],'estimates.csv')
df = pd.read_csv(file)
for col in ['numax', 'dnu', 'snr']:
star.params[star.name][col] = df.loc[0, col]
# No estimate for numax provided and/or determined
else:
return False
return True
#####################################################################
# Sets data up for the derivation of asteroseismic parameters
#
def get_initial(star, lower_bg=1.0):
"""
Gets initial guesses for granulation components (i.e. timescales and amplitudes) using
solar scaling relations. This resets the power spectrum and has its own independent
filter (i.e. [lower,upper] mask) to use for this subroutine.
Parameters
----------
star : target.Target
pySYD target object
star.oversample : bool
if `True`, it will use an oversampled power spectrum for the first iteration or 'step'
minimum_freq : float
minimum frequency to use for the power spectrum if `None` is provided (via info/star_info.csv). Default = `10.0` muHz. Please note: this is typically sufficient for most stars but may affect evolved stars!
maximum_freq : float
maximum frequency to use for the power spectrum if `None` is provided (via info/star_info.csv). Default = `5000.0` muHz.
Returns
-------
star : target.Target
updated pySYD target object
"""
star.frequency, star.power = np.copy(star.freq_os), np.copy(star.pow_os)
star.resolution = star.frequency[1]-star.frequency[0]
if star.params[star.name]['lower_bg'] is not None:
lower = star.params[star.name]['lower_bg']
else:
lower = lower_bg
if star.params[star.name]['upper_bg'] is not None:
upper = star.params[star.name]['upper_bg']
else:
upper = max(star.frequency)
if star.nyquist is not None and star.nyquist < upper:
upper = star.nyquist
star.params[star.name]['bg_mask']=[lower,upper]
# Mask power spectrum for fitbg module
mask = np.ma.getmask(np.ma.masked_inside(star.frequency, star.params[star.name]['bg_mask'][0], star.params[star.name]['bg_mask'][1]))
star.frequency, star.power = np.copy(star.frequency[mask]), np.copy(star.power[mask])
star.random_pow = np.copy(star.power)
# Get other relevant initial conditions
star.i = 0
if star.params['background']:
star.background['results'][star.name] = {}
if star.params['global']:
star.globe['results'][star.name] = {}
star.globe['results'][star.name] = {'numax_smooth':[],'A_smooth':[],'numax_gauss':[],'A_gauss':[],'FWHM':[],'dnu':[]}
if star.params['testing']:
star.test='----------------------------------------------------\n\nTESTING INFORMATION:\n'
# Use scaling relations from sun to get starting points
star = solar_scaling(star)
return star
#####################################################################
# We use scaling relations to estimate initial guesses for
# several parameters
#
def solar_scaling(star, scaling='tau_sun_single', max_laws=3, times=1.5, scale=1.0):
"""
Uses scaling relations from the Sun to:
1) estimate the width of the region of oscillations using numax
2) guess starting values for granulation timescales
Parameters
----------
max_laws : int
the maximum number of resolvable Harvey-like components
"""
constants = Constants()
# Checks if there's an estimate for numax
# Use "excess" for different meaning now - i.e. is there a power excess
# as in, if it's (True by default, it will search for it but if it's False, it's saying there isn't any)
if check_numax(star):
star.exp_numax = star.params[star.name]['numax']
# Use scaling relations to estimate width of oscillation region to mask out of the background fit
width = constants.width_sun*(star.exp_numax/constants.numax_sun)
maxpower = [star.exp_numax-(width*star.globe['width']), star.exp_numax+(width*star.globe['width'])]
if star.params[star.name]['lower_ps'] is not None:
maxpower[0] = star.params[star.name]['lower_ps']
if star.params[star.name]['upper_ps'] is not None:
maxpower[1] = star.params[star.name]['upper_ps']
star.params[star.name]['ps_mask'] = [maxpower[0],maxpower[1]]
# Use scaling relation for granulation timescales from the sun to get starting points
scale = constants.numax_sun/star.exp_numax
# If not, uses entire power spectrum
else:
maxpower = [np.median(star.frequency), np.median(star.frequency)]
if star.params[star.name]['lower_ps'] is not None:
maxpower[0] = star.params[star.name]['lower_ps']
if star.params[star.name]['upper_ps'] is not None:
maxpower[1] = star.params[star.name]['upper_ps']
star.params[star.name]['ps_mask'] = [maxpower[0],maxpower[1]]
# Estimate granulation time scales
if scaling == 'tau_sun_single':
taus = np.array(constants.tau_sun_single)*scale
else:
taus = np.array(constants.tau_sun)*scale
taus = taus[taus <= star.baseline]
b = taus*10**-6.
mnu = (1.0/taus)*10**5.
star.b = b[mnu >= min(star.frequency)]
star.mnu = mnu[mnu >= min(star.frequency)]
if len(star.mnu)==0:
star.b = b[mnu >= 10.]
star.mnu = mnu[mnu >= 10.]
elif len(star.mnu) > max_laws:
star.b = b[mnu >= min(star.frequency)][-max_laws:]
star.mnu = mnu[mnu >= min(star.frequency)][-max_laws:]
else:
pass
# Save copies for plotting after the analysis
star.nlaws = len(star.mnu)
star.nlaws_orig = len(star.mnu)
star.mnu_orig = np.copy(star.mnu)
star.b_orig = np.copy(star.b)
return star
#####################################################################
# Save information
#
def save_file(star, formats=[">15.8f", ">18.10e"]):
"""
Saves the corrected power spectrum, which is computed by subtracting
the best-fit stellar background model from the power spectrum.
Parameters
----------
star : target.Target
the pySYD pipeline target
formats : List[str]
2x1 list of formats to save arrays as
star.params[star.name]['path'] : str
path to save the background-corrected power spectrum
star.frequency : ndarray
frequency array
star.bg_corr_sub : ndarray
background-subtracted power spectrum
"""
f_name = os.path.join(star.params[star.name]['path'],'bgcorr_ps.txt')
if not star.params['overwrite']:
f_name = get_next(star,'bgcorr_ps.txt')
with open(f_name, "w") as f:
for x, y in zip(star.frequency, star.bg_corr):
values = [x, y]
text = '{:{}}'*len(values) + '\n'
fmt = sum(zip(values, formats), ())
f.write(text.format(*fmt))
f.close()
if star.verbose:
print(' **background-corrected PS saved**')
def save_estimates(star):
"""
Save the results of the find excess routine into the save folder of the current star.
Parameters
----------
star : target.Target
pipeline target with the results of the `find_excess` routine
"""
best = star.excess['results'][star.name]['best']
variables = ['star', 'numax', 'dnu', 'snr']
results = [star.name, star.excess['results'][star.name][best]['numax'], star.excess['results'][star.name][best]['dnu'], star.excess['results'][star.name][best]['snr']]
save_path = os.path.join(star.params[star.name]['path'],'estimates.csv')
if not star.params['overwrite']:
save_path = get_next(star,'estimates.csv')
ascii.write(np.array(results), save_path, names=variables, delimiter=',', overwrite=True)
def save_results(star):
"""
Saves the results of the `fit_background` module.
Parameters
----------
star : target.Target
pipeline target with the results of the `fit_background` routine
"""
results={}
if star.params['background']:
results.update(star.background['results'][star.name])
if star.params['global']:
results.update(star.globe['results'][star.name])
df = pd.DataFrame(results)
star.df = df.copy()
new_df = pd.DataFrame(columns=['parameter', 'value', 'uncertainty'])
for c, col in enumerate(df.columns.values.tolist()):
new_df.loc[c, 'parameter'] = col
new_df.loc[c, 'value'] = df.loc[0,col]
if star.background['mc_iter'] > 1:
new_df.loc[c, 'uncertainty'] = mad_std(df[col].values)
else:
new_df.loc[c, 'uncertainty'] = '--'
if not star.params['overwrite']:
new_df.to_csv(get_next(star,'global.csv'), index=False)
else:
new_df.to_csv(os.path.join(star.params[star.name]['path'],'global.csv'), index=False)
if star.background['samples']:
df.to_csv(os.path.join(star.params[star.name]['path'],'samples.csv'), index=False)
#####################################################################
# Optional verbose output function
#
def verbose_output(star):
"""
If `True`, prints results from the global asteroseismic fit.
"""
note=''
params = get_dict()
if not star.params['overwrite']:
list_of_files = glob.glob(os.path.join(star.params[star.name]['path'],'global*'))
file = max(list_of_files, key=os.path.getctime)
else:
file = os.path.join(star.params[star.name]['path'],'global.csv')
df = pd.read_csv(file)
if star.background['mc_iter'] > 1:
note+='\nOutput parameters:'
line='\n%s: %.2f +/- %.2f %s'
for idx in df.index.values.tolist():
note+=line%(df.loc[idx,'parameter'],df.loc[idx,'value'],df.loc[idx,'uncertainty'],params[df.loc[idx,'parameter']]['unit'])
else:
note+='------------------------------------------------------\nOutput parameters:'
line='\n%s: %.2f %s'
for idx in df.index.values.tolist():
note+=line%(df.loc[idx,'parameter'],df.loc[idx,'value'],params[df.loc[idx,'parameter']]['unit'])
note+='\n------------------------------------------------------'
print(note)
#####################################################################
# Concatenates data for individual stars into a single csv
#
def scrape_output(args):
"""
Grabs each individual star's results and concatenates results into a single csv in info/ for each submodule
(i.e. excess.csv and background.csv). This is automatically called if the pySYD is successfully executed for
at least one star.
"""
path = os.path.join(args.params['outdir'],'**','')
# Findex outputs
files = glob.glob('%s*estimates.csv'%path)
if files != []:
df = | pd.read_csv(files[0]) | pandas.read_csv |
from sklearn.feature_selection import RFE
from sklearn.svm import SVR
import pandas as pd
from sklearn import preprocessing
from sklearn.preprocessing import StandardScaler
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA
def get_data_RFE():
# from this function we can get a data set to train our RFE model to select variable
trainset = pd.read_csv('TrainingData.csv')
lb = preprocessing.LabelBinarizer()
wi=lb.fit_transform(np.array(trainset.loc[:,['Working Ion']]))
cs=lb.fit_transform(np.array(trainset.loc[:,['Crystal System']]))
sn=lb.fit_transform(np.array(trainset.loc[:,['Spacegroup Number']]))
el=np.array(trainset.loc[:,['mean_Number', 'mean_MendeleevNumber',
'mean_AtomicWeight', 'mean_MeltingT', 'mean_Column', 'mean_Row',
'mean_CovalentRadius', 'mean_Electronegativity', 'mean_NsValence',
'mean_NpValence', 'mean_NdValence', 'mean_NfValence', 'mean_NValance',
'mean_NsUnfilled', 'mean_NpUnfilled', 'mean_NdUnfilled',
'mean_NfUnfilled', 'mean_NUnfilled', 'mean_GSvolume_pa',
'mean_GSbandgap', 'mean_GSmagmom', 'mean_SpaceGroupNumber',
'dev_Number', 'dev_MendeleevNumber', 'dev_AtomicWeight', 'dev_MeltingT',
'dev_Column', 'dev_Row', 'dev_CovalentRadius', 'dev_Electronegativity',
'dev_NsValence', 'dev_NpValence', 'dev_NdValence', 'dev_NfValence',
'dev_NValance', 'dev_NsUnfilled', 'dev_NpUnfilled', 'dev_NdUnfilled',
'dev_NfUnfilled', 'dev_NUnfilled', 'dev_GSvolume_pa', 'dev_GSbandgap',
'dev_GSmagmom', 'dev_SpaceGroupNumber', 'mean_Number.1',
'mean_MendeleevNumber.1', 'mean_AtomicWeight.1', 'mean_MeltingT.1',
'mean_Column.1', 'mean_Row.1', 'mean_CovalentRadius.1',
'mean_Electronegativity.1', 'mean_NsValence.1', 'mean_NpValence.1',
'mean_NdValence.1', 'mean_NfValence.1', 'mean_NValance.1',
'mean_NsUnfilled.1', 'mean_NpUnfilled.1', 'mean_NdUnfilled.1',
'mean_NfUnfilled.1', 'mean_NUnfilled.1', 'mean_GSvolume_pa.1',
'mean_GSbandgap.1', 'mean_GSmagmom.1', 'mean_SpaceGroupNumber.1',
'dev_Number.1', 'dev_MendeleevNumber.1', 'dev_AtomicWeight.1',
'dev_MeltingT.1', 'dev_Column.1', 'dev_Row.1', 'dev_CovalentRadius.1',
'dev_Electronegativity.1', 'dev_NsValence.1', 'dev_NpValence.1',
'dev_NdValence.1', 'dev_NfValence.1', 'dev_NValance.1',
'dev_NsUnfilled.1', 'dev_NpUnfilled.1', 'dev_NdUnfilled.1',
'dev_NfUnfilled.1', 'dev_NUnfilled.1', 'dev_GSvolume_pa.1',
'dev_GSbandgap.1', 'dev_GSmagmom.1', 'dev_SpaceGroupNumber.1']])
prop=np.hstack((wi, cs, sn, el))
ss = StandardScaler()
pss = ss.fit_transform(prop)
standard_data = pd.DataFrame(pss)
outputs=pd.read_csv('NEWTrainingData_StandardScaler.csv').loc[:,['Gravimetric Capacity (units)', 'Volumetric Capacity', 'Max Delta Volume']]
X_train,X_test, y_train, y_test =train_test_split(standard_data,outputs,test_size=0.2, random_state=0)
return X_train,X_test, y_train, y_test
def SVR_linear_RFE(X_train,X_test, y_train, y_test):
#http://scikit-learn.org/stable/modules/generated/sklearn.feature_selection.RFE.html#sklearn.feature_selection.RFE
# a coef_ attribute or a feature_importances_ attribute
# get a dataframe for Gravimetric Capacity (units) after variable selection
select1 = RFE(SVR(kernel='linear', degree=3, gamma='scale', coef0=0.0, tol=0.001, C=1.0, epsilon=0.1, \
shrinking=True, cache_size=200, verbose=False, max_iter=-1), n_features_to_select=208)
GC_df = pd.DataFrame(select1.fit_transform(X_train, y_train['Gravimetric Capacity (units)']))
# get a dataframe for Volumetric Capacity after variable selection
select2 = RFE(SVR(kernel='linear', degree=3, gamma='scale', coef0=0.0, tol=0.001, C=1.0, epsilon=0.1, \
shrinking=True, cache_size=200, verbose=False, max_iter=-1), n_features_to_select=207)
VC_df = pd.DataFrame(select2.fit_transform(X_train, y_train['Volumetric Capacity']))
# get a dataframe for Max Delta Volume after variable selection
select3 = RFE(SVR(kernel='linear', degree=3, gamma='scale', coef0=0.0, tol=0.001, C=1.0, epsilon=0.1, \
shrinking=True, cache_size=200, verbose=False, max_iter=-1), n_features_to_select=205)
MDV_df = pd.DataFrame(select3.fit_transform(X_train, y_train['Max Delta Volume']))
return GC_df, VC_df, MDV_df
def PCA_get_CSV(GC_df, VC_df, MDV_df):
# get a .csv file for Gravimetric Capacity (units) after PCA
pca = PCA(n_components=133)
newdata1=pca.fit_transform(GC_df)
newdf1 = pd.DataFrame(newdata1)
newdf1.to_csv('./Data/Data for svr/GC_CPA.csv')
# get a .csv file for Volumetric Capacity after PCA
pca = PCA(n_components=134)
newdata2=pca.fit_transform(VC_df)
newdf2 = | pd.DataFrame(newdata2) | pandas.DataFrame |
"""
usage: parking_utilisation.py [-h] --park PARK [--pfile PFILE]
[--dbname DBNAME] [--dbhost DBHOST]
[--dbuser DBUSER] --dbpwd DBPWD
[--veh_type VEHT] [--granular G]
Script to plot parking utilisation by time of day.
optional arguments:
-h, --help show this help message and exit
--park PARK Parking Stored Procedure (default: None)
--pfile PFILE Path of parkingInfo.csv (default: parkingInfo.csv)
--dbname DBNAME Database Name Containing DAS (default: simmobility_l2nic2b)
--dbhost DBHOST Database IP Address (default: 172.25.184.156)
--dbuser DBUSER Database Username (default: postgres)
--dbpwd DBPWD Database Password (default: <PASSWORD>)
--veh_type VEHT Vehicle Type Table (default: supply2.vehicle_type)
--granular G Granularity of plot (Points are plotted for every G
minutes.) (default: 5)
Author: <NAME>
Date: 06.09.2018
Output:
1) parking_utilisation.png: The plot
2) parking_utilisation.csv: The underlying data
"""
import pandas as pd
import numpy as np
import sys
import csv
import datetime
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.markers as mkr
import psycopg2
import argparse
from argparse import ArgumentParser
########################
## INPUT PARAMETERS
########################
class HelpParser(argparse.ArgumentParser):
def error(self, message):
sys.stderr.write('error: %s\n' % message)
self.print_help()
sys.exit(2)
parser = HelpParser(description="Script to plot parking utilisation by time of day.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--park",dest="park", type=str, required=True,
help="Parking Stored Procedure")
parser.add_argument("--pfile",dest="pfile", type=str, default="parkingInfo.csv",
help="Path of parkingInfo.csv")
parser.add_argument("--dbname",dest="dbname", type=str, default="simmobility_l2nic2b",
help="Database Name Containing DAS")
parser.add_argument("--dbhost",dest="dbhost",type=str, default="172.25.184.156",
help="Database IP Address")
parser.add_argument("--dbuser",dest="dbuser", type=str, default="postgres",
help="Database Username")
parser.add_argument("--dbpwd",dest="dbpwd", type=str, required=True,
help="Database Password")
parser.add_argument("--veh_type",dest="veht", type=str, default="supply2.vehicle_type",
help="Vehicle Type Table")
parser.add_argument("--granular",dest="g",type=int, default=5,
help="Granularity of plot (Points are plotted for every G minutes.)")
options = parser.parse_args()
##############################
## HELPER FUNCTIONS
##############################
def ceilTime(dt):
new_dt = datetime.datetime(dt.year, dt.month, dt.day, dt.hour,options.g*(dt.minute / options.g))
if (dt.minute % options.g == 0 and dt.second == 0):
return (new_dt)
else:
return (new_dt + datetime.timedelta(minutes=options.g))
##############################
## LOAD PARKINGINFO STATISTICS FOR EACH vehicle id type
##############################
timeLimits = [None,None];
parked = []; isParkedL = lambda l : l[7] == "PARKED";
exited = []; isExitedL = lambda l : (l[7] == "EXIT_PARKING" or l[7] == "SHIFT_END");
with open(options.pfile, 'rt') as file:
reader = csv.reader(file)
for line in reader:
curTime = pd.to_datetime(line[3], format="%H:%M:%S")
if (timeLimits[0] is None or timeLimits[0] > curTime):
timeLimits[0] = curTime
if (timeLimits[1] is None or timeLimits[1] < curTime):
timeLimits[1] = curTime
if isParkedL(line):
parked.append({
"time" : curTime,
"parking_id" : line[4],
"veh_type_id" : line[1]
})
elif isExitedL(line):
exited.append({
"time" : curTime,
"parking_id": line[4],
"veh_type_id" : line[1]
})
# Create Data Frame
def toTable(arr, col_names):
output = pd.DataFrame(arr)
if output.empty==False:
output=output[col_names]
return(output)
parked = toTable(parked,['time','parking_id', 'veh_type_id'])
exited = toTable(exited,['time','parking_id', 'veh_type_id'])
# Group by Time
def timeResample(df, var_type):
df.time = pd.DatetimeIndex(df.time)
df.time = df.time.apply(ceilTime)
if var_type not in df:
df[var_type] = 1
return(df.groupby(['time', 'parking_id', 'veh_type_id']).sum())
parkedTmp = timeResample(parked, "spotParked")
exitedTmp = timeResample(exited, "spotExited")
# Get Cumulative Parked and Exited to Find out the # of parking spots occupied then
parked = parkedTmp.merge(exitedTmp, left_index=True, right_index=True, how="outer")
parked.fillna(0, inplace=True)
##############################
## LOAD PARKING CAPACITIES
##############################
# Initialize Database
dbConn = psycopg2.connect("dbname='" + options.dbname + "' " + \
"user='" + options.dbuser + "' " + \
"host='" + options.dbhost + "' " + \
"password='" + options.dbpwd + "'")
cur = dbConn.cursor()
# Execute Query
cols = {'parking_id' : 'parking_id',
'f1.veh_type_id' : 'veh_type_id',
'start_time' : 'start_time',
'end_time' : 'end_time',
'capacity_pcu' : 'capacity_pcu',
'pcu' : 'pcu' }
query = "SELECT " + ", ".join(cols.keys()) + " FROM " + options.park + "('00:00:00','23:59:59') f1 JOIN " + \
options.veht + " f2 ON f1.veh_type_id = f2.veh_type_id"
cur.execute(query)
dbConn.commit()
# Create time slots
time_slots = pd.date_range(start=ceilTime(timeLimits[0]),
end=ceilTime(timeLimits[1]), freq='5T')
time_slots = pd.DataFrame(time_slots)
time_slots.columns = ['time']
time_slots['key'] = 0
# Get Parking ID X Vehicle Type Data Frame
park_slots = pd.DataFrame(cur.fetchall())
park_slots.columns = cols.values()
park_slots.veh_type_id = [ str(v) for v in park_slots.veh_type_id ]
park_slots.start_time = pd.DatetimeIndex(pd.to_datetime(park_slots.start_time, format="%H:%M:%S"))
park_slots.end_time = pd.DatetimeIndex( | pd.to_datetime(park_slots.end_time, format="%H:%M:%S") | pandas.to_datetime |
# -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Date: 2020/10/10 13:46
Desc: 东方财富网-数据中心-COMEX库存数据
http://data.eastmoney.com/pmetal/comex/by.html
"""
import demjson
import pandas as pd
import requests
def futures_comex_inventory(symbol: str = "黄金") -> pd.DataFrame:
"""
东方财富网-数据中心-COMEX库存数据
http://data.eastmoney.com/pmetal/comex/by.html
:param symbol: choice of {"黄金", "白银"}
:type symbol: str
:return: COMEX库存数据
:rtype: pandas.DataFrame
"""
symbol_map = {
"黄金": "(ID='EMI00069026')",
"白银": "(ID='EMI00069027')",
}
url = "http://dcfm.eastmoney.com/em_mutisvcexpandinterface/api/js/get"
params = {
"type": "HJBY_KC",
"token": "<PASSWORD>",
"p": "1",
"ps": "5000",
"st": "DATADATE",
"sr": "-1",
"filter": symbol_map[symbol],
"js": "var hVtWMLwm={pages:(tp),data:(x)}",
"rt": "53367096",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[data_text.find("{"):])
temp_df = | pd.DataFrame(data_json["data"]) | pandas.DataFrame |
#!/usr/bin/env python
import os
from collections import defaultdict
import pandas as pd
import click
import numpy as np
from scipy.signal import argrelmax
from HotGauge.thermal.ICE import load_3DICE_grid_file
from HotGauge.utils.io import open_file_or_stdout
################################################################################
############################## Analysis Functions ##############################
################################################################################
def compute_MLTDs(thermal_grid, xy_location, region_offsets):
x_idx, y_idx = xy_location
t_center = thermal_grid[x_idx, y_idx]
region = ((x_idx + x_off, y_idx + y_off) for x_off, y_off in region_offsets)
tmin_in_radius, tmax_in_radius = _range_within_region(thermal_grid, region)
pos_MLTD = tmax_in_radius - t_center
neg_MLTD = t_center - tmin_in_radius
return neg_MLTD, pos_MLTD
# TODO: add gradient calculations back in?
# Possibly optionally (since they are computationally expensive and not currently used)
def characterize_maxima(thermal_grid, pixel_radius, in_both_dimensions=True, as_df=True):
# First filter candidates in either both dimensions or in either dimension
if in_both_dimensions == True:
candidates = _local_max_indices_2D(thermal_grid)
else:
candidates = _local_max_indices_1D(thermal_grid)
circle_offsets = list(_circle_region_offsets(pixel_radius))
data = defaultdict(list)
for xy_location in candidates:
neg_MLTD, pos_MLTD = compute_MLTDs(thermal_grid, xy_location, circle_offsets)
x_idx, y_idx = xy_location
data['x_idx'].append(x_idx)
data['y_idx'].append(y_idx)
data['temp_xy'].append(thermal_grid[x_idx, y_idx])
data['neg_MLTD'].append(neg_MLTD)
data['pos_MLTD'].append(pos_MLTD)
if as_df:
return _local_max_stats_dict_to_df(data)
return data
def characterize_maxima_from_trace(thermal_trace, pixel_radius, in_both_dimensions=True, as_df=True):
all_data = defaultdict(list)
for time_step, thermal_grid in enumerate(thermal_trace):
data = characterize_maxima(thermal_grid, pixel_radius, in_both_dimensions, as_df=False)
data['time_step'] = [time_step] * len(data['x_idx'])
for k, v in data.items():
all_data[k].extend(v)
if as_df:
return _local_max_stats_dict_to_df(all_data)
return all_data
def local_max_stats_df(ice_grid_output, mltd_radius_px, in_both_dimensions=True):
return _local_max_stats_fn(ice_grid_output, mltd_radius_px, True, in_both_dimensions=True)
def local_max_stats_dict(ice_grid_output, mltd_radius_px, in_both_dimensions=True):
return _local_max_stats_fn(ice_grid_output, mltd_radius_px, False, in_both_dimensions=True)
def _local_max_stats_fn(ice_grid_output, mltd_radius_px, as_df, in_both_dimensions=True):
t_trace = load_3DICE_grid_file(ice_grid_output)
maxima_data = characterize_maxima_from_trace(t_trace, mltd_radius_px,
in_both_dimensions=in_both_dimensions, as_df=False)
if as_df:
return _local_max_stats_dict_to_df(maxima_data)
return maxima_data
def _local_max_stats_dict_to_df(maxima_data):
df = pd.DataFrame(maxima_data)
df.x_idx = df.x_idx.astype(int)
df.y_idx = df.y_idx.astype(int)
df.time_step = df.time_step.astype(int)
return df
def local_max_stats_to_file(local_max_stats_df, output_file=None):
with open_file_or_stdout(output_file) as f:
columns = ['time_step', 'x_idx', 'y_idx', 'temp_xy', 'pos_MLTD', 'neg_MLTD']
line_frmt = '\t'.join(['{}'] * len(columns)) + '\n'
f.write(line_frmt.format(*columns))
for _, row in local_max_stats_df.astype('O').iterrows():
values = [row[col] for col in columns]
f.write(line_frmt.format(*values))
def local_max_stats_from_file(local_max_stats_file):
def _load_pkl():
return | pd.read_pickle(local_max_stats_file) | pandas.read_pickle |
from mbf_genomics.annotator import Annotator, FromFile
import pandas as pd
class Description(Annotator):
"""Add the description for the genes from genome.
@genome may be None (default), then the ddf is queried for a '.genome'
Requires a genome with df_genes_meta - e.g. EnsemblGenomes
"""
columns = ["description"]
def __init__(self, genome=None):
self.genome = genome
def calc_ddf(self, ddf):
if self.genome is None:
try:
genome = ddf.genome
except AttributeError:
raise AttributeError(
"ddf had no .genome and no genome was passed to Description"
)
else:
genome = self.genome
lookup = dict(genome.df_genes_meta["description"].items())
result = []
for gene_stable_id in ddf.df["gene_stable_id"]:
result.append(lookup.get(gene_stable_id, ""))
return | pd.Series(result, index=ddf.df.index) | pandas.Series |
import asyncio
from collections import defaultdict, namedtuple
from dataclasses import dataclass, fields as dataclass_fields
from datetime import date, datetime, timedelta, timezone
from enum import Enum
from itertools import chain, repeat
import logging
import pickle
from typing import Collection, Dict, Generator, Iterable, Iterator, KeysView, List, \
Mapping, Optional, Sequence, Set, Tuple, Union
import aiomcache
import numpy as np
import pandas as pd
from pandas.core.common import flatten
from sqlalchemy import sql
from sqlalchemy.orm import aliased
from sqlalchemy.orm.attributes import InstrumentedAttribute
from sqlalchemy.sql.elements import BinaryExpression
from athenian.api import metadata
from athenian.api.async_utils import gather, read_sql_query
from athenian.api.cache import cached, CancelCache, short_term_exptime
from athenian.api.controllers.logical_repos import coerce_logical_repos
from athenian.api.controllers.miners.filters import JIRAFilter, LabelFilter
from athenian.api.controllers.miners.github.commit import BRANCH_FETCH_COMMITS_COLUMNS, \
DAG, fetch_precomputed_commit_history_dags, fetch_repository_commits_no_branch_dates
from athenian.api.controllers.miners.github.dag_accelerated import searchsorted_inrange
from athenian.api.controllers.miners.github.label import fetch_labels_to_filter
from athenian.api.controllers.miners.github.logical import split_logical_repositories
from athenian.api.controllers.miners.github.precomputed_prs import \
discover_inactive_merged_unreleased_prs, MergedPRFactsLoader, OpenPRFactsLoader, \
update_unreleased_prs
from athenian.api.controllers.miners.github.release_load import ReleaseLoader
from athenian.api.controllers.miners.github.release_match import PullRequestToReleaseMapper, \
ReleaseToPullRequestMapper
from athenian.api.controllers.miners.github.released_pr import matched_by_column
from athenian.api.controllers.miners.jira.issue import generate_jira_prs_query
from athenian.api.controllers.miners.types import DeploymentConclusion, MinedPullRequest, \
nonemax, nonemin, PRParticipants, PRParticipationKind, PullRequestFacts, PullRequestFactsMap
from athenian.api.controllers.prefixer import Prefixer
from athenian.api.controllers.settings import LogicalRepositorySettings, ReleaseMatch, \
ReleaseSettings
from athenian.api.db import add_pdb_misses, Database, DatabaseLike
from athenian.api.defer import AllEvents, defer
from athenian.api.int_to_str import int_to_str
from athenian.api.models.metadata.github import Base, NodePullRequestJiraIssues, \
PullRequest, PullRequestComment, PullRequestCommit, PullRequestLabel, PullRequestReview, \
PullRequestReviewComment, PullRequestReviewRequest, PushCommit, Release
from athenian.api.models.metadata.jira import Component, Issue
from athenian.api.models.persistentdata.models import DeploymentNotification
from athenian.api.models.precomputed.models import GitHubPullRequestDeployment
from athenian.api.tracing import sentry_span
@dataclass
class PRDataFrames(Mapping[str, pd.DataFrame]):
"""Set of dataframes with all the PR data we can reach."""
prs: pd.DataFrame
commits: pd.DataFrame
releases: pd.DataFrame
jiras: pd.DataFrame
reviews: pd.DataFrame
review_comments: pd.DataFrame
review_requests: pd.DataFrame
comments: pd.DataFrame
labels: pd.DataFrame
deployments: pd.DataFrame
def __iter__(self) -> Iterator[str]:
"""Implement iter() - return an iterator over the field names."""
return iter((f.name for f in dataclass_fields(self)))
def __getitem__(self, key: str) -> pd.DataFrame:
"""Implement self[key]."""
try:
return getattr(self, key)
except AttributeError:
raise KeyError(key) from None
def __setitem__(self, key: str, value: pd.DataFrame) -> None:
"""Implement self[key] = value."""
for f in dataclass_fields(self):
if key == f.name:
break
else:
raise KeyError(key)
setattr(self, key, value)
def __len__(self) -> int:
"""Implement len()."""
return len(dataclass_fields(self))
class PullRequestMiner:
"""Load all the information related to Pull Requests from the metadata DB. Iterate over it \
to access individual PR objects."""
CACHE_TTL = short_term_exptime
log = logging.getLogger("%s.PullRequestMiner" % metadata.__package__)
ReleaseMappers = namedtuple("ReleaseMappers", [
"map_releases_to_prs", "map_prs_to_releases", "load_releases"])
mappers = ReleaseMappers(
map_releases_to_prs=ReleaseToPullRequestMapper.map_releases_to_prs,
map_prs_to_releases=PullRequestToReleaseMapper.map_prs_to_releases,
load_releases=ReleaseLoader.load_releases,
)
def __init__(self, dfs: PRDataFrames):
"""Initialize a new instance of `PullRequestMiner`."""
self._dfs = dfs
@property
def dfs(self) -> PRDataFrames:
"""Return the bound dataframes with PR information."""
return self._dfs
def __len__(self) -> int:
"""Return the number of loaded pull requests."""
return len(self._dfs.prs)
def __iter__(self) -> Generator[MinedPullRequest, None, None]:
"""Iterate over the individual pull requests."""
assert self._dfs.prs.index.nlevels == 2
df_fields = [f.name for f in dataclass_fields(MinedPullRequest) if f.name != "pr"]
dfs = []
grouped_df_iters = []
index_backup = []
for k in df_fields:
plural = k.endswith("s")
df = getattr(self._dfs, k if plural else (k + "s")) # type: pd.DataFrame
dfs.append(df)
# our very own groupby() allows us to call take() with reduced overhead
node_ids = df.index.get_level_values(0).values.astype(int, copy=False)
with_repos = k == "release"
if df.index.nlevels > 1:
# the second level adds determinism to the iteration order
second_level = df.index.get_level_values(1).values
node_ids_bytes = int_to_str(node_ids)
if second_level.dtype == int:
order_keys = np.char.add(node_ids_bytes, int_to_str(second_level))
else:
order_keys = np.char.add(node_ids_bytes,
second_level.astype("S", copy=False))
else:
order_keys = node_ids
df_order = np.argsort(order_keys)
if not with_repos:
unique_node_ids, node_ids_unique_counts = np.unique(node_ids, return_counts=True)
offsets = np.zeros(len(node_ids_unique_counts) + 1, dtype=int)
np.cumsum(node_ids_unique_counts, out=offsets[1:])
groups = self._iter_by_split(df_order, offsets)
grouped_df_iters.append(iter(zip(unique_node_ids, repeat(None), groups)))
else:
_, unique_counts = np.unique(order_keys, return_counts=True)
node_ids = node_ids[df_order]
repos = df.index.get_level_values(1).values[df_order].astype("U")
offsets = np.zeros(len(unique_counts) + 1, dtype=int)
np.cumsum(unique_counts, out=offsets[1:])
groups = self._iter_by_split(df_order, offsets)
grouped_df_iters.append(iter(zip(
node_ids[offsets[:-1]], repos[offsets[:-1]], groups)))
if plural:
index_backup.append(df.index)
df.index = df.index.droplevel(0)
else:
index_backup.append(None)
try:
grouped_df_states = []
for i in grouped_df_iters:
try:
grouped_df_states.append(next(i))
except StopIteration:
grouped_df_states.append((None, None, None))
empty_df_cache = {}
pr_columns = [PullRequest.node_id.name, PullRequest.repository_full_name.name]
pr_columns.extend(self._dfs.prs.columns)
if not self._dfs.prs.index.is_monotonic_increasing:
raise IndexError("PRs index must be pre-sorted ascending: "
"prs.sort_index(inplace=True)")
for pr_tuple in self._dfs.prs.itertuples():
(pr_node_id, repo), *pr_tuple = pr_tuple
items = {"pr": dict(zip(pr_columns, [pr_node_id, repo] + pr_tuple))}
for i, (k, (state_pr_node_id, state_repo, gdf), git, df) in enumerate(zip(
df_fields, grouped_df_states, grouped_df_iters, dfs)):
while state_pr_node_id is not None and (
state_pr_node_id < pr_node_id
or (state_pr_node_id == pr_node_id
and state_repo is not None
and state_repo < repo)):
try:
state_pr_node_id, state_repo, gdf = next(git)
except StopIteration:
state_pr_node_id, state_repo, gdf = None, None, None
grouped_df_states[i] = state_pr_node_id, state_repo, gdf
if state_pr_node_id == pr_node_id and \
(state_repo is None or state_repo == repo):
if not k.endswith("s"):
# much faster than items.iloc[gdf[0]]
gdf = {c: v for c, v in zip(df.columns, df._data.fast_xs(gdf[0]))}
else:
gdf = df.take(gdf)
items[k] = gdf
else:
try:
items[k] = empty_df_cache[k]
except KeyError:
if k.endswith("s"):
empty_val = df.iloc[:0].copy()
else:
empty_val = {c: None for c in df.columns}
items[k] = empty_df_cache[k] = empty_val
yield MinedPullRequest(**items)
finally:
for df, index in zip(dfs, index_backup):
if index is not None:
df.index = index
def drop(self, node_ids: Collection[int]) -> pd.Index:
"""
Remove PRs from the given collection of PR node IDs in-place.
Node IDs don't have to be all present.
:return: Actually removed node IDs.
"""
removed = self._dfs.prs.index.get_level_values(0).intersection(node_ids)
if removed.empty:
return removed
self._dfs.prs.drop(removed, inplace=True)
for df in self._dfs.values():
df.drop(removed, inplace=True, errors="ignore",
level=0 if isinstance(df.index, pd.MultiIndex) else None)
return removed
def _deserialize_mine_cache(buffer: bytes) -> Tuple[PRDataFrames,
PullRequestFactsMap,
Set[str],
PRParticipants,
LabelFilter,
JIRAFilter,
Dict[str, ReleaseMatch],
asyncio.Event]:
stuff = pickle.loads(buffer)
event = asyncio.Event()
event.set()
return (*stuff, event)
@sentry_span
def _postprocess_cached_prs(
result: Tuple[PRDataFrames,
PullRequestFactsMap,
Set[str],
PRParticipants,
LabelFilter,
JIRAFilter,
bool,
Dict[str, ReleaseMatch],
asyncio.Event],
date_to: date,
repositories: Set[str],
participants: PRParticipants,
labels: LabelFilter,
jira: JIRAFilter,
with_jira_map: bool,
pr_blacklist: Optional[Tuple[Collection[int], Dict[str, List[int]]]],
truncate: bool,
**_) -> Tuple[PRDataFrames,
PullRequestFactsMap,
Set[str],
PRParticipants,
LabelFilter,
JIRAFilter,
bool,
Dict[str, ReleaseMatch],
asyncio.Event]:
dfs, _, cached_repositories, cached_participants, cached_labels, cached_jira, \
cached_with_jira_map, _, _ = result
if with_jira_map and not cached_with_jira_map:
raise CancelCache()
cls = PullRequestMiner
if (repositories - cached_repositories or
not cls._check_participants_compatibility(cached_participants, participants) or
not cached_labels.compatible_with(labels) or
not cached_jira.compatible_with(jira)):
raise CancelCache()
to_remove = set()
if pr_blacklist is not None:
to_remove.update(pr_blacklist[0])
if no_logical_repos := (coerce_logical_repos(repositories).keys() == repositories):
to_remove.update(dfs.prs.index.get_level_values(0).values[
np.in1d(dfs.prs.index.get_level_values(1).values,
list(repositories), assume_unique=True, invert=True),
])
time_to = None if truncate else pd.Timestamp(date_to, tzinfo=timezone.utc)
to_remove.update(cls._find_drop_by_participants(dfs, participants, time_to))
to_remove.update(cls._find_drop_by_labels(dfs, labels))
to_remove.update(cls._find_drop_by_jira(dfs, jira))
cls._drop(dfs, to_remove)
if not no_logical_repos:
dfs.prs = dfs.prs.take(np.flatnonzero(
np.in1d(dfs.prs.index.get_level_values(1).values,
list(repositories), assume_unique=True),
))
return result
@classmethod
@sentry_span
@cached(
exptime=lambda cls, **_: cls.CACHE_TTL,
serialize=lambda r: pickle.dumps(r[:-1]),
deserialize=_deserialize_mine_cache,
key=lambda date_from, date_to, exclude_inactive, release_settings, logical_settings, updated_min, updated_max, pr_blacklist, truncate, **_: ( # noqa
date_from.toordinal(), date_to.toordinal(), exclude_inactive,
release_settings, logical_settings,
updated_min.timestamp() if updated_min is not None else None,
updated_max.timestamp() if updated_max is not None else None,
",".join(map(str, sorted(pr_blacklist[0]) if pr_blacklist is not None else [])),
truncate,
),
postprocess=_postprocess_cached_prs,
)
async def _mine(cls,
date_from: date,
date_to: date,
repositories: Set[str],
participants: PRParticipants,
labels: LabelFilter,
jira: JIRAFilter,
with_jira_map: bool,
branches: pd.DataFrame,
default_branches: Dict[str, str],
exclude_inactive: bool,
release_settings: ReleaseSettings,
logical_settings: LogicalRepositorySettings,
updated_min: Optional[datetime],
updated_max: Optional[datetime],
pr_blacklist: Optional[Tuple[Collection[int], Dict[str, List[int]]]],
truncate: bool,
prefixer: Prefixer,
account: int,
meta_ids: Tuple[int, ...],
mdb: Database,
pdb: Database,
rdb: Database,
cache: Optional[aiomcache.Client],
) -> Tuple[PRDataFrames,
PullRequestFactsMap,
Set[str],
PRParticipants,
LabelFilter,
JIRAFilter,
bool,
Dict[str, ReleaseMatch],
asyncio.Event]:
assert isinstance(date_from, date) and not isinstance(date_from, datetime)
assert isinstance(date_to, date) and not isinstance(date_to, datetime)
assert isinstance(repositories, set)
assert isinstance(mdb, Database)
assert isinstance(pdb, Database)
assert isinstance(rdb, Database)
assert (updated_min is None) == (updated_max is None)
time_from, time_to = (pd.Timestamp(t, tzinfo=timezone.utc) for t in (date_from, date_to))
pr_blacklist_expr = ambiguous = None
if pr_blacklist is not None:
pr_blacklist, ambiguous = pr_blacklist
if len(pr_blacklist) > 0:
pr_blacklist_expr = PullRequest.node_id.notin_any_values(pr_blacklist)
if logical_settings.has_logical_prs():
physical_repos = coerce_logical_repos(repositories).keys()
else:
physical_repos = repositories
pdags = await fetch_precomputed_commit_history_dags(physical_repos, account, pdb, cache)
fetch_branch_dags_task = asyncio.create_task(
cls._fetch_branch_dags(
physical_repos, pdags, branches, account, meta_ids, mdb, pdb, cache),
name="_fetch_branch_dags",
)
# the heaviest task should always go first
tasks = [
cls.mappers.map_releases_to_prs(
repositories, branches, default_branches, time_from, time_to,
participants.get(PRParticipationKind.AUTHOR, []),
participants.get(PRParticipationKind.MERGER, []),
jira, release_settings, logical_settings, updated_min, updated_max, pdags,
prefixer, account, meta_ids, mdb, pdb, rdb, cache, pr_blacklist_expr, None,
truncate=truncate),
cls.fetch_prs(
time_from, time_to, physical_repos, participants, labels, jira,
exclude_inactive, pr_blacklist_expr, None, branches, pdags, account, meta_ids,
mdb, pdb, cache, updated_min=updated_min, updated_max=updated_max,
fetch_branch_dags_task=fetch_branch_dags_task),
cls.map_deployments_to_prs(
physical_repos, time_from, time_to, participants,
labels, jira, updated_min, updated_max, prefixer, branches, pdags,
account, meta_ids, mdb, pdb, cache, pr_blacklist,
fetch_branch_dags_task=fetch_branch_dags_task),
]
# the following is a very rough approximation regarding updated_min/max:
# we load all of none of the inactive merged PRs
# see also: load_precomputed_done_candidates() which generates `ambiguous`
if not exclude_inactive and (updated_min is None or updated_min <= time_from):
tasks.append(cls._fetch_inactive_merged_unreleased_prs(
time_from, time_to, repositories, participants, labels, jira, default_branches,
release_settings, logical_settings.has_logical_prs(),
prefixer, account, meta_ids, mdb, pdb, cache))
# we don't load inactive released undeployed PRs because nobody needs them
(
(released_prs, releases, release_settings, matched_bys,
release_dags, precomputed_observed),
(prs, branch_dags, _),
deployed_prs,
*unreleased,
) = await gather(*tasks)
del pr_blacklist_expr
deployed_releases_task = None
if not deployed_prs.empty:
covered_prs = prs.index.union(released_prs.index)
if unreleased:
covered_prs = covered_prs.union(unreleased[0].index)
new_prs = deployed_prs.index.difference(covered_prs)
if not new_prs.empty:
new_prs = deployed_prs[[
PullRequest.merged_at.name, PullRequest.repository_full_name.name,
]].loc[new_prs]
min_deployed_merged = new_prs[PullRequest.merged_at.name].min()
if min_deployed_merged < time_from:
deployed_releases_task = asyncio.create_task(
cls.mappers.load_releases(
new_prs[PullRequest.repository_full_name.name].unique(),
branches, default_branches, min_deployed_merged, time_from,
release_settings, logical_settings, prefixer, account, meta_ids,
mdb, pdb, rdb, cache),
name="PullRequestMiner.mine/deployed_releases",
)
concatenated = [prs, released_prs, deployed_prs, *unreleased]
missed_prs = cls._extract_missed_prs(ambiguous, pr_blacklist, deployed_prs, matched_bys)
if missed_prs:
add_pdb_misses(pdb, "PullRequestMiner.mine/blacklist",
sum(len(v) for v in missed_prs.values()))
# these PRs are released by branch and not by tag, and we require by tag.
# we have not fetched them yet because they are in pr_blacklist
# and they are in pr_blacklist because we have previously loaded them in
# load_precomputed_done_candidates();
# now fetch only these `missed_prs`, respecting the filters.
pr_whitelist = PullRequest.node_id.in_(
list(chain.from_iterable(missed_prs.values())))
tasks = [
cls.mappers.map_releases_to_prs(
missed_prs, branches, default_branches, time_from, time_to,
participants.get(PRParticipationKind.AUTHOR, []),
participants.get(PRParticipationKind.MERGER, []),
jira, release_settings, logical_settings, updated_min, updated_max, pdags,
prefixer, account, meta_ids, mdb, pdb, rdb, cache, None, pr_whitelist,
truncate, precomputed_observed=precomputed_observed),
cls.fetch_prs(
time_from, time_to, missed_prs.keys(), participants, labels, jira,
exclude_inactive, None, pr_whitelist, branches, branch_dags, account, meta_ids,
mdb, pdb, cache, updated_min=updated_min, updated_max=updated_max,
fetch_branch_dags_task=fetch_branch_dags_task),
]
missed_released_prs, (missed_prs, *_) = await gather(*tasks)
concatenated.extend([missed_released_prs, missed_prs])
fetch_branch_dags_task.cancel() # 99.999% that it was awaited, but still
prs = pd.concat(concatenated, copy=False)
prs.reset_index(inplace=True)
prs.drop_duplicates([PullRequest.node_id.name, PullRequest.repository_full_name.name],
inplace=True)
prs.set_index(PullRequest.node_id.name, inplace=True)
prs.sort_index(inplace=True)
if unreleased:
unreleased = np.array([
unreleased[0].index.values,
unreleased[0][PullRequest.repository_full_name.name].values,
], dtype=object).T
tasks = [
# bypass the useless inner caching by calling _mine_by_ids directly
cls._mine_by_ids(
prs, unreleased, repositories, time_to, releases, matched_bys,
branches, default_branches, release_dags, release_settings, logical_settings,
prefixer, account, meta_ids, mdb, pdb, rdb, cache,
truncate=truncate, with_jira=with_jira_map,
extra_releases_task=deployed_releases_task,
physical_repositories=physical_repos),
OpenPRFactsLoader.load_open_pull_request_facts(prs, repositories, account, pdb),
]
(dfs, unreleased_facts, unreleased_prs_event), open_facts = await gather(
*tasks, op="PullRequestMiner.mine/external_data")
to_drop = cls._find_drop_by_participants(dfs, participants, None if truncate else time_to)
to_drop |= cls._find_drop_by_labels(dfs, labels)
if exclude_inactive:
to_drop |= cls._find_drop_by_inactive(dfs, time_from, time_to)
cls._drop(dfs, to_drop)
facts = open_facts
for k, v in unreleased_facts.items(): # merged unreleased PR precomputed facts
if v is not None: # it can be None because the pdb table is filled in two steps
facts[k] = v
dfs.prs = split_logical_repositories(
dfs.prs, dfs.labels, repositories, logical_settings)
return dfs, facts, repositories, participants, labels, jira, with_jira_map, matched_bys, \
unreleased_prs_event
_deserialize_mine_cache = staticmethod(_deserialize_mine_cache)
_postprocess_cached_prs = staticmethod(_postprocess_cached_prs)
def _deserialize_mine_by_ids_cache(
buffer: bytes) -> Tuple[PRDataFrames,
PullRequestFactsMap,
asyncio.Event]:
dfs, facts = pickle.loads(buffer)
event = asyncio.Event()
event.set()
return dfs, facts, event
@classmethod
@cached(
exptime=lambda cls, **_: cls.CACHE_TTL,
serialize=lambda r: pickle.dumps(r[:-1]),
deserialize=_deserialize_mine_by_ids_cache,
key=lambda prs, unreleased, releases, time_to, logical_settings, truncate=True, with_jira=True, **_: ( # noqa
",".join(map(str, prs.index.values)),
",".join(map(str, unreleased)),
",".join(map(str, releases[Release.node_id.name].values)),
time_to.timestamp(),
logical_settings,
truncate,
with_jira,
),
)
async def mine_by_ids(cls,
prs: pd.DataFrame,
unreleased: Collection[Tuple[int, str]],
logical_repositories: Union[Set[str], KeysView[str]],
time_to: datetime,
releases: pd.DataFrame,
matched_bys: Dict[str, ReleaseMatch],
branches: pd.DataFrame,
default_branches: Dict[str, str],
dags: Dict[str, DAG],
release_settings: ReleaseSettings,
logical_settings: LogicalRepositorySettings,
prefixer: Prefixer,
account: int,
meta_ids: Tuple[int, ...],
mdb: Database,
pdb: Database,
rdb: Database,
cache: Optional[aiomcache.Client],
truncate: bool = True,
with_jira: bool = True,
physical_repositories: Optional[Union[Set[str], KeysView[str]]] = None,
) -> Tuple[PRDataFrames,
PullRequestFactsMap,
asyncio.Event]:
"""
Fetch PR metadata for certain PRs.
:param prs: pandas DataFrame with fetched PullRequest-s. Only the details about those PRs \
will be loaded from the DB.
:param truncate: Do not load anything after `time_to`.
:param with_jira: Value indicating whether to load the mapped JIRA issues.
:return: 1. List of mined DataFrame-s. \
2. mapping to PullRequestFacts of unreleased merged PRs. \
3. Synchronization for updating the pdb table with merged unreleased PRs.
"""
return await cls._mine_by_ids(
prs, unreleased, logical_repositories, time_to, releases, matched_bys,
branches, default_branches, dags, release_settings, logical_settings, prefixer,
account, meta_ids, mdb, pdb, rdb, cache, truncate=truncate, with_jira=with_jira,
physical_repositories=physical_repositories)
_deserialize_mine_by_ids_cache = staticmethod(_deserialize_mine_by_ids_cache)
@classmethod
@sentry_span
async def _mine_by_ids(cls,
prs: pd.DataFrame,
unreleased: Collection[Tuple[int, str]],
logical_repositories: Union[Set[str], KeysView[str]],
time_to: datetime,
releases: pd.DataFrame,
matched_bys: Dict[str, ReleaseMatch],
branches: pd.DataFrame,
default_branches: Dict[str, str],
dags: Dict[str, DAG],
release_settings: ReleaseSettings,
logical_settings: LogicalRepositorySettings,
prefixer: Prefixer,
account: int,
meta_ids: Tuple[int, ...],
mdb: Database,
pdb: Database,
rdb: Database,
cache: Optional[aiomcache.Client],
truncate: bool = True,
with_jira: bool = True,
extra_releases_task: Optional[asyncio.Task] = None,
physical_repositories: Optional[Union[Set[str], KeysView[str]]] = None,
) -> Tuple[PRDataFrames,
PullRequestFactsMap,
asyncio.Event]:
assert prs.index.nlevels == 1
node_ids = prs.index if len(prs) > 0 else set()
facts = {} # precomputed PullRequestFacts about merged unreleased PRs
unreleased_prs_event: asyncio.Event = None
merged_unreleased_indexes = []
@sentry_span
async def fetch_reviews():
return await cls._read_filtered_models(
PullRequestReview, node_ids, time_to, meta_ids, mdb,
columns=[PullRequestReview.submitted_at, PullRequestReview.state,
PullRequestReview.user_login, PullRequestReview.user_node_id],
created_at=truncate)
@sentry_span
async def fetch_review_comments():
return await cls._read_filtered_models(
PullRequestReviewComment, node_ids, time_to, meta_ids, mdb,
columns=[PullRequestReviewComment.created_at, PullRequestReviewComment.user_login,
PullRequestReviewComment.user_node_id],
created_at=truncate)
@sentry_span
async def fetch_review_requests():
return await cls._read_filtered_models(
PullRequestReviewRequest, node_ids, time_to, meta_ids, mdb,
columns=[PullRequestReviewRequest.created_at],
created_at=truncate)
@sentry_span
async def fetch_comments():
return await cls._read_filtered_models(
PullRequestComment, node_ids, time_to, meta_ids, mdb,
columns=[PullRequestComment.created_at, PullRequestComment.user_login,
PullRequestComment.user_node_id],
created_at=truncate)
@sentry_span
async def fetch_commits():
return await cls._read_filtered_models(
PullRequestCommit, node_ids, time_to, meta_ids, mdb,
columns=[PullRequestCommit.authored_date, PullRequestCommit.committed_date,
PullRequestCommit.author_login, PullRequestCommit.committer_login,
PullRequestCommit.author_user_id, PullRequestCommit.committer_user_id],
created_at=truncate)
@sentry_span
async def fetch_labels():
return await cls._read_filtered_models(
PullRequestLabel, node_ids, time_to, meta_ids, mdb,
columns=[sql.func.lower(PullRequestLabel.name).label(PullRequestLabel.name.name),
PullRequestLabel.description,
PullRequestLabel.color],
created_at=False)
fetch_labels_task = asyncio.create_task(
fetch_labels(), name="PullRequestMiner.mine_by_ids/fetch_labels")
@sentry_span
async def map_releases():
anyhow_merged_mask = prs[PullRequest.merged_at.name].notnull().values
if truncate:
merged_mask = (prs[PullRequest.merged_at.name] < time_to).values
nonlocal merged_unreleased_indexes
merged_unreleased_indexes = np.flatnonzero(anyhow_merged_mask & ~merged_mask)
else:
merged_mask = anyhow_merged_mask
if len(unreleased):
prs_index = np.char.add(
int_to_str(prs.index.values),
(prs_repos := prs[PullRequest.repository_full_name.name].values.astype("S")),
)
if isinstance(unreleased, np.ndarray):
unreleased_index = np.char.add(
int_to_str(unreleased[:, 0].astype(int)),
unreleased[:, 1].astype(prs_repos.dtype),
)
else:
unreleased_index = np.char.add(
int_to_str(np.fromiter((p[0] for p in unreleased), int, len(unreleased))),
np.array([p[1] for p in unreleased], dtype=prs_repos.dtype),
)
merged_mask &= np.in1d(prs_index, unreleased_index, invert=True)
merged_prs = prs.take(np.flatnonzero(merged_mask))
nonlocal releases
if extra_releases_task is not None:
await extra_releases_task
extra_releases, _ = extra_releases_task.result()
releases = releases.append(extra_releases, ignore_index=True)
labels = None
if logical_settings.has_logical_prs():
nonlocal physical_repositories
if physical_repositories is None:
physical_repositories = coerce_logical_repos(logical_repositories).keys()
if logical_settings.has_prs_by_label(physical_repositories):
await fetch_labels_task
labels = fetch_labels_task.result()
merged_prs = split_logical_repositories(
merged_prs, labels, logical_repositories, logical_settings)
else:
merged_prs = split_logical_repositories(merged_prs, None, set(), logical_settings)
df_facts, other_facts = await gather(
cls.mappers.map_prs_to_releases(
merged_prs, releases, matched_bys, branches, default_branches, time_to,
dags, release_settings, prefixer, account, meta_ids, mdb, pdb, cache,
labels=labels),
MergedPRFactsLoader.load_merged_unreleased_pull_request_facts(
prs.take(np.flatnonzero(anyhow_merged_mask & ~merged_mask)),
nonemax(releases[Release.published_at.name].nonemax(), time_to),
LabelFilter.empty(), matched_bys, default_branches, release_settings,
prefixer, account, pdb),
)
nonlocal facts
nonlocal unreleased_prs_event
df, facts, unreleased_prs_event = df_facts
facts.update(other_facts)
return df
async def _fetch_labels():
await fetch_labels_task
return fetch_labels_task.result()
@sentry_span
async def fetch_jira():
_map = aliased(NodePullRequestJiraIssues, name="m")
_issue = aliased(Issue, name="i")
_issue_epic = aliased(Issue, name="e")
selected = [
PullRequest.node_id, _issue.key, _issue.title, _issue.type, _issue.status,
_issue.created, _issue.updated, _issue.resolved, _issue.labels, _issue.components,
_issue.acc_id, _issue_epic.key.label("epic"),
]
if not with_jira:
df = pd.DataFrame(columns=[col.name for col in selected
if col not in (_issue.acc_id, _issue.components)])
df[PullRequest.node_id.name] = df[PullRequest.node_id.name].astype(int)
return df.set_index([PullRequest.node_id.name, _issue.key.name])
df = await read_sql_query(
sql.select(selected).select_from(sql.join(
PullRequest, sql.join(
_map, sql.join(_issue, _issue_epic, sql.and_(
_issue.epic_id == _issue_epic.id,
_issue.acc_id == _issue_epic.acc_id), isouter=True),
sql.and_(_map.jira_id == _issue.id,
_map.jira_acc == _issue.acc_id)),
sql.and_(PullRequest.node_id == _map.node_id,
PullRequest.acc_id == _map.node_acc),
)).where(sql.and_(PullRequest.node_id.in_(node_ids),
PullRequest.acc_id.in_(meta_ids),
_issue.is_deleted.is_(False))),
mdb, columns=selected, index=[PullRequest.node_id.name, _issue.key.name])
if df.empty:
df.drop([Issue.acc_id.name, Issue.components.name], inplace=True, axis=1)
return df
components = df[[Issue.acc_id.name, Issue.components.name]] \
.groupby(Issue.acc_id.name, sort=False).aggregate(lambda s: set(flatten(s)))
rows = await mdb.fetch_all(
sql.select([Component.acc_id, Component.id, Component.name])
.where(sql.or_(*(sql.and_(Component.id.in_(vals),
Component.acc_id == int(acc))
for acc, vals in zip(components.index.values,
components[Issue.components.name].values)))))
cmap = {}
for r in rows:
cmap.setdefault(r[0], {})[r[1]] = r[2].lower()
df[Issue.labels.name] = (
df[Issue.labels.name].apply(lambda i: [s.lower() for s in (i or [])])
+
df[[Issue.acc_id.name, Issue.components.name]]
.apply(lambda row: ([cmap[row[Issue.acc_id.name]][c]
for c in row[Issue.components.name]]
if row[Issue.components.name] is not None else []),
axis=1)
)
df.drop([Issue.acc_id.name, Issue.components.name], inplace=True, axis=1)
return df
# the order is important: it provides the best performance
# we launch coroutines from the heaviest to the lightest
dfs = await gather(
fetch_commits(),
map_releases(),
fetch_jira(),
fetch_reviews(),
fetch_review_comments(),
fetch_review_requests(),
fetch_comments(),
_fetch_labels(),
cls.fetch_pr_deployments(node_ids, prefixer, account, pdb, rdb),
)
dfs = PRDataFrames(prs, *dfs)
if len(merged_unreleased_indexes):
# if we truncate and there are PRs merged after `time_to`
merged_unreleased_prs = prs.take(merged_unreleased_indexes)
label_matches = np.flatnonzero(np.in1d(
dfs.labels.index.get_level_values(0).values,
merged_unreleased_prs.index.values))
labels = {}
for k, v in zip(dfs.labels.index.get_level_values(0).values[label_matches],
dfs.labels[PullRequestLabel.name.name].values[label_matches]):
try:
labels[k].append(v)
except KeyError:
labels[k] = [v]
other_unreleased_prs_event = asyncio.Event()
unreleased_prs_event = AllEvents(unreleased_prs_event, other_unreleased_prs_event)
merged_unreleased_prs = split_logical_repositories(
merged_unreleased_prs, dfs.labels, logical_repositories, logical_settings)
await defer(update_unreleased_prs(
merged_unreleased_prs, pd.DataFrame(), time_to, labels, matched_bys,
default_branches, release_settings, account, pdb, other_unreleased_prs_event),
"update_unreleased_prs/truncate(%d)" % len(merged_unreleased_indexes))
return dfs, facts, unreleased_prs_event
@classmethod
@sentry_span
async def mine(cls,
date_from: date,
date_to: date,
time_from: datetime,
time_to: datetime,
repositories: Set[str],
participants: PRParticipants,
labels: LabelFilter,
jira: JIRAFilter,
with_jira_map: bool,
branches: pd.DataFrame,
default_branches: Dict[str, str],
exclude_inactive: bool,
release_settings: ReleaseSettings,
logical_settings: LogicalRepositorySettings,
prefixer: Prefixer,
account: int,
meta_ids: Tuple[int, ...],
mdb: Database,
pdb: Database,
rdb: Database,
cache: Optional[aiomcache.Client],
updated_min: Optional[datetime] = None,
updated_max: Optional[datetime] = None,
pr_blacklist: Optional[Tuple[Collection[int], Dict[str, List[int]]]] = None,
truncate: bool = True,
) -> Tuple["PullRequestMiner",
PullRequestFactsMap,
Dict[str, ReleaseMatch],
asyncio.Event]:
"""
Mine metadata about pull requests according to the numerous filters.
:param account: State DB account ID.
:param meta_ids: Metadata (GitHub) account IDs.
:param date_from: Fetch PRs created starting from this date, inclusive.
:param date_to: Fetch PRs created ending with this date, inclusive.
:param time_from: Precise timestamp of since when PR events are allowed to happen.
:param time_to: Precise timestamp of until when PR events are allowed to happen.
:param repositories: PRs must belong to these repositories (prefix excluded).
:param participants: PRs must have these user IDs in the specified participation roles \
(OR aggregation). An empty dict means everybody.
:param labels: PRs must be labeled according to this filter's include & exclude sets.
:param jira: JIRA filters for those PRs that are matched with JIRA issues.
:param with_jira_map: Value indicating whether we must load JIRA issues mapped to PRs. \
This is independent from filtering PRs by `jira`.
:param branches: Preloaded DataFrame with branches in the specified repositories.
:param default_branches: Mapping from repository names to their default branch names.
:param exclude_inactive: Ors must have at least one event in the given time frame.
:param release_settings: Release match settings of the account.
:param logical_settings: Logical repository settings of the account.
:param updated_min: PRs must have the last update timestamp not older than it.
:param updated_max: PRs must have the last update timestamp not newer than or equal to it.
:param mdb: Metadata db instance.
:param pdb: Precomputed db instance.
:param rdb: Persistentdata db instance.
:param cache: memcached client to cache the collected data.
:param pr_blacklist: completely ignore the existence of these PR node IDs. \
The second tuple element is the ambiguous PRs: released by branch \
while there were no tag releases and the strategy is `tag_or_branch`.
:param truncate: activate the "time machine" and erase everything after `time_to`.
:return: 1. New `PullRequestMiner` with the PRs satisfying to the specified filters. \
2. Precomputed facts about unreleased pull requests. \
This is an optimization which breaks the abstraction a bit. \
3. `matched_bys` - release matches for each repository. \
4. Synchronization for updating the pdb table with merged unreleased PRs. \
Another abstraction leakage that we have to deal with.
"""
date_from_with_time = datetime.combine(date_from, datetime.min.time(), tzinfo=timezone.utc)
date_to_with_time = datetime.combine(date_to, datetime.min.time(), tzinfo=timezone.utc)
assert time_from >= date_from_with_time
assert time_to <= date_to_with_time
dfs, facts, _, _, _, _, _, matched_bys, event = await cls._mine(
date_from, date_to, repositories, participants, labels, jira, with_jira_map, branches,
default_branches, exclude_inactive, release_settings, logical_settings,
updated_min, updated_max, pr_blacklist, truncate, prefixer, account, meta_ids,
mdb, pdb, rdb, cache)
cls._truncate_prs(dfs, time_from, time_to)
return cls(dfs), facts, matched_bys, event
@classmethod
@sentry_span
async def fetch_prs(cls,
time_from: Optional[datetime],
time_to: datetime,
repositories: Union[Set[str], KeysView[str]],
participants: PRParticipants,
labels: LabelFilter,
jira: JIRAFilter,
exclude_inactive: bool,
pr_blacklist: Optional[BinaryExpression],
pr_whitelist: Optional[BinaryExpression],
branches: pd.DataFrame,
dags: Optional[Dict[str, DAG]],
account: int,
meta_ids: Tuple[int, ...],
mdb: Database,
pdb: Database,
cache: Optional[aiomcache.Client],
columns=PullRequest,
updated_min: Optional[datetime] = None,
updated_max: Optional[datetime] = None,
fetch_branch_dags_task: Optional[asyncio.Task] = None,
with_labels: bool = False,
) -> Tuple[pd.DataFrame, Dict[str, DAG], Optional[pd.DataFrame]]:
"""
Query pull requests from mdb that satisfy the given filters.
Note: we cannot filter by regular PR labels here due to the DB schema limitations,
so the caller is responsible for fetching PR labels and filtering by them afterward.
Besides, we cannot filter by participation roles different from AUTHOR and MERGER.
Note: we cannot load PRs that closed before time_from but released between
`time_from` and `time_to`. Hence the caller should map_releases_to_prs separately.
There can be duplicates: PR closed between `time_from` and `time_to` and released
between `time_from` and `time_to`.
Note: we cannot load PRs that closed before time_from but deployed between
`time_from` and `time_to`. Hence the caller should map_deployments_to_prs separately.
There can be duplicates: PR closed between `time_from` and `time_to` and deployed
between `time_from` and `time_to`.
We have to resolve the merge commits of rebased PRs so that they do not appear
force-push-dropped.
:return: pandas DataFrame with the PRs indexed by node_id; \
commit DAGs that contain the branch heads; \
(if was required) DataFrame with PR labels.
"""
assert isinstance(mdb, Database)
assert isinstance(pdb, Database)
pr_list_coro = cls._fetch_prs_by_filters(
time_from, time_to, repositories, participants, labels, jira, exclude_inactive,
pr_blacklist, pr_whitelist, meta_ids, mdb, cache, columns=columns,
updated_min=updated_min, updated_max=updated_max,
)
if columns is not PullRequest and PullRequest.merge_commit_id not in columns and \
PullRequest.merge_commit_sha not in columns:
prs, labels = await pr_list_coro
return prs, dags, labels if with_labels else None
if fetch_branch_dags_task is None:
fetch_branch_dags_task = cls._fetch_branch_dags(
repositories, dags, branches, account, meta_ids, mdb, pdb, cache)
dags, (prs, labels) = await gather(fetch_branch_dags_task, pr_list_coro)
async def load_labels():
if not with_labels:
return None
if labels is not None:
return labels
return await fetch_labels_to_filter(prs.index.values, meta_ids, mdb)
prs, labels = await gather(
cls.mark_dead_prs(prs, branches, dags, meta_ids, mdb, columns),
load_labels(),
)
return prs, dags, labels
@classmethod
async def mark_dead_prs(cls,
prs: pd.DataFrame,
branches: pd.DataFrame,
dags: Dict[str, DAG],
meta_ids: Tuple[int, ...],
mdb: Database,
columns=PullRequest,
) -> pd.DataFrame:
"""
Add and fill "dead" column in the `prs` DataFrame.
A PR is considered dead (force-push-dropped) if it does not exit in the commit DAG and \
we cannot detect its rebased clone.
"""
prs["dead"] = False
if branches.empty:
return prs
merged_prs = prs.take(np.nonzero((
prs[PullRequest.merged_at.name] <= datetime.now(timezone.utc) - timedelta(hours=1)
).values)[0])
# timedelta(hours=1) must match the `exptime` of `fetch_repository_commits()`
# commits DAGs are cached and may be not fully up to date, so otherwise some PRs may
# appear as wrongly force push dropped; see also: DEV-554
if merged_prs.empty:
return prs
pr_numbers = merged_prs[PullRequest.number.name].values
assert merged_prs.index.nlevels == 1
pr_node_ids = merged_prs.index.values
pr_repos = merged_prs[PullRequest.repository_full_name.name].values
repo_order = np.argsort(pr_repos)
unique_pr_repos, pr_repo_counts = np.unique(pr_repos, return_counts=True)
pr_merge_hashes = \
merged_prs[PullRequest.merge_commit_sha.name].values.astype("S40")[repo_order]
pos = 0
queries = []
dead = []
acc_id_cond = PushCommit.acc_id.in_(meta_ids)
min_commit_date = merged_prs[PullRequest.merged_at.name].min()
committed_date_cond = PushCommit.committed_date >= min_commit_date
substr = sql.func.substr(PushCommit.message, 1, 32)
sqlite = mdb.url.dialect == "sqlite"
for repo, n_prs in zip(unique_pr_repos, pr_repo_counts):
begin_pos = pos
end_pos = pos + n_prs
pos += n_prs
repo_pr_merge_hashes = pr_merge_hashes[begin_pos:end_pos]
dag_hashes = dags[repo][0]
if len(dag_hashes) == 0:
# no branches found in `fetch_repository_commits()`
continue
not_found = dag_hashes[
searchsorted_inrange(dag_hashes, repo_pr_merge_hashes)
] != repo_pr_merge_hashes
indexes = repo_order[begin_pos:end_pos][not_found]
dead.extend(dead_node_ids := pr_node_ids[indexes])
repo_cond = PushCommit.repository_full_name == repo
for pr_node_id, n in zip(dead_node_ids, pr_numbers[indexes]):
if sqlite:
# SQLite does not support parameter recycling
acc_id_cond = PushCommit.acc_id.in_(meta_ids)
committed_date_cond = PushCommit.committed_date >= min_commit_date
substr = sql.func.substr(PushCommit.message, 1, 32)
repo_cond = PushCommit.repository_full_name == repo
queries.append(
sql.select([PushCommit.node_id.label("commit_node_id"),
PushCommit.sha.label("sha"),
sql.literal_column("'" + repo + "'").label("repo"),
sql.literal_column(str(pr_node_id)).label("pr_node_id"),
PushCommit.committed_date,
PushCommit.pushed_date])
.where(sql.and_(acc_id_cond,
repo_cond,
committed_date_cond,
substr.like("Merge pull request #%d from %%" % n))))
if not queries:
return prs
prs.loc[dead, "dead"] = True
# we may have MANY queries here and Postgres responds with StatementTooComplexError
# split them by 100-sized batches to stay below the resource limits
batch_size = 100
tasks = []
for batch_index in range(0, len(queries), batch_size):
batch = queries[batch_index:batch_index + batch_size]
if len(batch) == 1:
query = batch[0]
else:
query = sql.union_all(*batch)
tasks.append(read_sql_query(query, mdb, [
"commit_node_id", "sha", "repo", "pr_node_id",
PushCommit.committed_date, PushCommit.pushed_date,
]))
resolveds = await gather(*tasks, op="mark_dead_prs commit SQL UNION ALL-s")
resolved = pd.concat(resolveds)
# look up the candidates in the DAGs
pr_repos = resolved["repo"].values
repo_order = np.argsort(pr_repos)
unique_pr_repos, pr_repo_counts = np.unique(pr_repos, return_counts=True)
pr_merge_hashes = resolved["sha"].values.astype("S")[repo_order]
pos = 0
alive_indexes = []
for repo, n_prs in zip(unique_pr_repos, pr_repo_counts):
begin_pos = pos
end_pos = pos + n_prs
pos += n_prs
repo_pr_merge_hashes = pr_merge_hashes[begin_pos:end_pos]
dag_hashes = dags[repo][0]
found = dag_hashes[
searchsorted_inrange(dag_hashes, repo_pr_merge_hashes)
] == repo_pr_merge_hashes
alive_indexes.extend(repo_order[begin_pos:end_pos][found])
if (resolved := resolved.take(alive_indexes)).empty:
return prs
# take the commit that was committed the latest; if there are multiple, prefer the one
# with pushed_date = null
resolved.sort_values([PushCommit.committed_date.name, PushCommit.pushed_date.name],
ascending=False, inplace=True, na_position="first")
resolved.drop_duplicates("pr_node_id", inplace=True)
# patch the commit IDs and the hashes
alive_node_ids = resolved["pr_node_id"].values
if columns is PullRequest or PullRequest.merge_commit_id in columns:
prs.loc[alive_node_ids, PullRequest.merge_commit_id.name] = \
resolved["commit_node_id"].values
if columns is PullRequest or PullRequest.merge_commit_sha in columns:
prs.loc[alive_node_ids, PullRequest.merge_commit_sha.name] = resolved["sha"].values
prs.loc[alive_node_ids, "dead"] = False
return prs
@classmethod
async def _fetch_branch_dags(cls,
repositories: Iterable[str],
dags: Optional[Dict[str, DAG]],
branches: pd.DataFrame,
account: int,
meta_ids: Tuple[int, ...],
mdb: Database,
pdb: Database,
cache: Optional[aiomcache.Client],
) -> Dict[str, DAG]:
if dags is None:
dags = await fetch_precomputed_commit_history_dags(
repositories, account, pdb, cache)
return await fetch_repository_commits_no_branch_dates(
dags, branches, BRANCH_FETCH_COMMITS_COLUMNS, True, account, meta_ids,
mdb, pdb, cache)
@classmethod
@sentry_span
async def _fetch_prs_by_filters(cls,
time_from: Optional[datetime],
time_to: datetime,
repositories: Set[str],
participants: PRParticipants,
labels: LabelFilter,
jira: JIRAFilter,
exclude_inactive: bool,
pr_blacklist: Optional[BinaryExpression],
pr_whitelist: Optional[BinaryExpression],
meta_ids: Tuple[int, ...],
mdb: Database,
cache: Optional[aiomcache.Client],
columns=PullRequest,
updated_min: Optional[datetime] = None,
updated_max: Optional[datetime] = None,
) -> Tuple[pd.DataFrame, Optional[pd.DataFrame]]:
assert (updated_min is None) == (updated_max is None)
filters = [
(sql.case(
[(PullRequest.closed, PullRequest.closed_at)],
else_=sql.text("'3000-01-01'"), # backed up with a DB index
) >= time_from) if time_from is not None else sql.true(),
PullRequest.created_at < time_to,
PullRequest.acc_id.in_(meta_ids),
PullRequest.hidden.is_(False),
PullRequest.repository_full_name.in_(repositories),
]
if exclude_inactive and updated_min is None:
# this does not provide 100% guarantee because it can be after time_to,
# we need to properly filter later
filters.append(PullRequest.updated_at >= time_from)
if updated_min is not None:
filters.append(PullRequest.updated_at.between(updated_min, updated_max))
if pr_blacklist is not None:
filters.append(pr_blacklist)
if pr_whitelist is not None:
filters.append(pr_whitelist)
if len(participants) == 1:
if PRParticipationKind.AUTHOR in participants:
filters.append(PullRequest.user_login.in_(
participants[PRParticipationKind.AUTHOR]))
elif PRParticipationKind.MERGER in participants:
filters.append(
PullRequest.merged_by_login.in_(participants[PRParticipationKind.MERGER]))
elif len(participants) == 2 and PRParticipationKind.AUTHOR in participants and \
PRParticipationKind.MERGER in participants:
filters.append(sql.or_(
PullRequest.user_login.in_(participants[PRParticipationKind.AUTHOR]),
PullRequest.merged_by_login.in_(participants[PRParticipationKind.MERGER]),
))
if columns is PullRequest:
selected_columns = [PullRequest]
remove_acc_id = False
else:
selected_columns = columns = list(columns)
if remove_acc_id := (PullRequest.acc_id not in selected_columns):
selected_columns.append(PullRequest.acc_id)
if PullRequest.merge_commit_id in columns or PullRequest.merge_commit_sha in columns:
# needed to resolve rebased merge commits
if PullRequest.number not in selected_columns:
selected_columns.append(PullRequest.number)
if labels:
singles, multiples = LabelFilter.split(labels.include)
embedded_labels_query = not multiples
if all_in_labels := (set(singles + list(chain.from_iterable(multiples)))):
filters.append(
sql.exists().where(sql.and_(
PullRequestLabel.acc_id == PullRequest.acc_id,
PullRequestLabel.pull_request_node_id == PullRequest.node_id,
sql.func.lower(PullRequestLabel.name).in_(all_in_labels),
)))
if labels.exclude:
filters.append(
sql.not_(sql.exists().where(sql.and_(
PullRequestLabel.acc_id == PullRequest.acc_id,
PullRequestLabel.pull_request_node_id == PullRequest.node_id,
sql.func.lower(PullRequestLabel.name).in_(labels.exclude),
))))
if not jira:
query = sql.select(selected_columns).where(sql.and_(*filters))
else:
query = await generate_jira_prs_query(
filters, jira, None, mdb, cache, columns=selected_columns)
prs = await read_sql_query(query, mdb, columns, index=PullRequest.node_id.name)
if remove_acc_id:
del prs[PullRequest.acc_id.name]
if PullRequest.closed.name in prs:
cls.adjust_pr_closed_merged_timestamps(prs)
_, first_encounters = np.unique(prs.index.values, return_index=True)
if len(first_encounters) < len(prs):
prs = prs.take(first_encounters)
if not labels or embedded_labels_query:
return prs, None
df_labels = await fetch_labels_to_filter(prs.index, meta_ids, mdb)
left = cls.find_left_by_labels(
prs.index, df_labels.index, df_labels[PullRequestLabel.name.name].values, labels)
prs = prs.take(np.flatnonzero(prs.index.isin(left)))
return prs, df_labels
@staticmethod
def adjust_pr_closed_merged_timestamps(prs_df: pd.DataFrame) -> None:
"""Force set `closed_at` and `merged_at` to NULL if not `closed`. Remove `closed`."""
not_closed = ~prs_df[PullRequest.closed.name].values
prs_df.loc[not_closed, PullRequest.closed_at.name] = pd.NaT
prs_df.loc[not_closed, PullRequest.merged_at.name] = pd.NaT
prs_df.drop(columns=PullRequest.closed.name, inplace=True)
@classmethod
@sentry_span
async def _fetch_inactive_merged_unreleased_prs(
cls,
time_from: datetime,
time_to: datetime,
repos: Union[Set[str], KeysView[str]],
participants: PRParticipants,
labels: LabelFilter,
jira: JIRAFilter,
default_branches: Dict[str, str],
release_settings: ReleaseSettings,
has_logical_repos: bool,
prefixer: Prefixer,
account: int,
meta_ids: Tuple[int, ...],
mdb: Database,
pdb: Database,
cache: Optional[aiomcache.Client]) -> pd.DataFrame:
node_id_map = await discover_inactive_merged_unreleased_prs(
time_from, time_to, repos, participants, labels, default_branches, release_settings,
prefixer, account, pdb, cache)
if not jira:
return await read_sql_query(sql.select([PullRequest])
.where(PullRequest.node_id.in_(node_id_map)),
mdb, PullRequest, index=PullRequest.node_id.name)
df = await cls.filter_jira(node_id_map, jira, meta_ids, mdb, cache)
if not has_logical_repos:
return df
append = defaultdict(list)
node_ids = df.index.values
repository_full_names = df[PullRequest.repository_full_name.name].values
for i, (pr_node_id, physical_repo) in enumerate(zip(node_ids, repository_full_names)):
logical_repos = node_id_map[pr_node_id]
if physical_repo != (first_logical_repo := logical_repos[0]):
repository_full_names[i] = first_logical_repo
for logical_repo in logical_repos[1:]:
append[logical_repo].append(i)
if append:
chunks = []
for logical_repo, indexes in append.items():
subdf = df.take(indexes)
subdf[PullRequest.repository_full_name.name] = logical_repo
chunks.append(subdf)
df = | pd.concat([df] + chunks) | pandas.concat |
import pandas as pd
df_ab = pd.DataFrame({'a': ['a_1', 'a_2', 'a_3'], 'b': ['b_1', 'b_2', 'b_3']})
df_ac = pd.DataFrame({'a': ['a_1', 'a_2', 'a_4'], 'c': ['c_1', 'c_2', 'c_4']})
print(df_ab)
# a b
# 0 a_1 b_1
# 1 a_2 b_2
# 2 a_3 b_3
print(df_ac)
# a c
# 0 a_1 c_1
# 1 a_2 c_2
# 2 a_4 c_4
print(pd.merge(df_ab, df_ac))
# a b c
# 0 a_1 b_1 c_1
# 1 a_2 b_2 c_2
print(df_ab.merge(df_ac))
# a b c
# 0 a_1 b_1 c_1
# 1 a_2 b_2 c_2
print(pd.merge(df_ab, df_ac, on='a'))
# a b c
# 0 a_1 b_1 c_1
# 1 a_2 b_2 c_2
df_ac_ = df_ac.rename(columns={'a': 'a_'})
print(df_ac_)
# a_ c
# 0 a_1 c_1
# 1 a_2 c_2
# 2 a_4 c_4
print(pd.merge(df_ab, df_ac_, left_on='a', right_on='a_'))
# a b a_ c
# 0 a_1 b_1 a_1 c_1
# 1 a_2 b_2 a_2 c_2
print(pd.merge(df_ab, df_ac_, left_on='a', right_on='a_').drop(columns='a_'))
# a b c
# 0 a_1 b_1 c_1
# 1 a_2 b_2 c_2
print(pd.merge(df_ab, df_ac, on='a', how='inner'))
# a b c
# 0 a_1 b_1 c_1
# 1 a_2 b_2 c_2
print(pd.merge(df_ab, df_ac, on='a', how='left'))
# a b c
# 0 a_1 b_1 c_1
# 1 a_2 b_2 c_2
# 2 a_3 b_3 NaN
print(pd.merge(df_ab, df_ac, on='a', how='right'))
# a b c
# 0 a_1 b_1 c_1
# 1 a_2 b_2 c_2
# 2 a_4 NaN c_4
print(pd.merge(df_ab, df_ac, on='a', how='outer'))
# a b c
# 0 a_1 b_1 c_1
# 1 a_2 b_2 c_2
# 2 a_3 b_3 NaN
# 3 a_4 NaN c_4
print(pd.merge(df_ab, df_ac, on='a', how='inner', indicator=True))
# a b c _merge
# 0 a_1 b_1 c_1 both
# 1 a_2 b_2 c_2 both
print(pd.merge(df_ab, df_ac, on='a', how='outer', indicator=True))
# a b c _merge
# 0 a_1 b_1 c_1 both
# 1 a_2 b_2 c_2 both
# 2 a_3 b_3 NaN left_only
# 3 a_4 NaN c_4 right_only
print(pd.merge(df_ab, df_ac, on='a', how='outer', indicator='indicator'))
# a b c indicator
# 0 a_1 b_1 c_1 both
# 1 a_2 b_2 c_2 both
# 2 a_3 b_3 NaN left_only
# 3 a_4 NaN c_4 right_only
df_ac_b = df_ac.rename(columns={'c': 'b'})
print(df_ac_b)
# a b
# 0 a_1 c_1
# 1 a_2 c_2
# 2 a_4 c_4
print(pd.merge(df_ab, df_ac_b, on='a'))
# a b_x b_y
# 0 a_1 b_1 c_1
# 1 a_2 b_2 c_2
print(pd.merge(df_ab, df_ac_b, on='a', suffixes=['_left', '_right']))
# a b_left b_right
# 0 a_1 b_1 c_1
# 1 a_2 b_2 c_2
df_abx = df_ab.assign(x=['x_2', 'x_2', 'x_3'])
df_acx = df_ac.assign(x=['x_1', 'x_2', 'x_2'])
print(df_abx)
# a b x
# 0 a_1 b_1 x_2
# 1 a_2 b_2 x_2
# 2 a_3 b_3 x_3
print(df_acx)
# a c x
# 0 a_1 c_1 x_1
# 1 a_2 c_2 x_2
# 2 a_4 c_4 x_2
print(pd.merge(df_abx, df_acx))
# a b x c
# 0 a_2 b_2 x_2 c_2
print(pd.merge(df_abx, df_acx, on=['a', 'x']))
# a b x c
# 0 a_2 b_2 x_2 c_2
print(pd.merge(df_abx, df_acx, on='a'))
# a b x_x c x_y
# 0 a_1 b_1 x_2 c_1 x_1
# 1 a_2 b_2 x_2 c_2 x_2
df_acx_ = df_acx.rename(columns={'x': 'x_'})
print(df_acx_)
# a c x_
# 0 a_1 c_1 x_1
# 1 a_2 c_2 x_2
# 2 a_4 c_4 x_2
print( | pd.merge(df_abx, df_acx_, left_on=['a', 'x'], right_on=['a', 'x_']) | pandas.merge |
import pandas as pd
import argparse
import os
import sys
script_path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(script_path+'/src')
from utils import *
from loguru import logger
import numpy as np
############### parameters for the program #################
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input',default = None, help='input file with mobile element id')
parser.add_argument('-t','--thresh', default = None, type =float, help= 'fdr threshold to filter out variants')
#parser.add_argument('-eqtl','--eqtl_genes', default = False, help= 'eQTL genes')
parser.add_argument('-tad', '--tad_domain', default = None, help='custom tad domain file, else will use default file provided')
parser.add_argument('-rr', '--repressive_region', default = script_path+'/data/blood_repressive_marks_state.bed', help ='bed file containing regions with repressive mark')
parser.add_argument('-ar', '--active_region', default = script_path+'/data/blood_active_transcription_marks_state.bed', help ='bed file containing regions with active transcription mark')
parser.add_argument('-gd', '--gene_density', default = None, help='gene density in the tad domains, mean gene density will be used as default')
parser.add_argument('-o', '--output', default='./results', help ='ouput folder name')
parser.add_argument('-br','--blacklist_region', default =None, help='bed file with the coordinates that the user does not want to include in output')
parser.add_argument('-af', '--allele_freq', default = None, type =float, help ='allele frequency threshold for the variant')
parser.add_argument('-hic', '--hic_interaction', default = script_path+'/data/blood_hic_interaction.bed', help='chromatin interaction bed file')
parser.add_argument('-l', '--nearby_cancer_genes', default = 0, help='default = 0, takes number as input representing the distance user wants to check for oncogenes or tumor repressor genes in upstream or downstream of the pMEI')
parser.add_argument('-fname','--file_name', default='result.csv', help='output file name')
args = parser.parse_args()
if not os.path.exists(args.output):
os.makedirs(os.path.join(args.output, 'temp_files'))
logger_path = os.path.abspath(args.output)
logger.add(logger_path+'/safe_harbor.log', rotation='10 MB',mode='w')
logger.level("WARNING", color="<bold><red>")
logger.info('COMMAND USED:\npython ' + ' '.join(sys.argv) +'\n')
#sys.exit()
dir_ = os.path.abspath(args.output)
if args.input == None:
logger.error('input file not provided. Exiting....')
parser.print_help()
sys.exit(1)
dist = int(args.nearby_cancer_genes)/1000
info = "\n\nParameter information:\n\n" + "FDR threshold:\t{}\n".format(args.thresh) + "Variant Allele frequency:\t{}\n".format(args.allele_freq)
if args.tad_domain == None:
info = info + "tad domain file used:\t{}\n".format(script_path+'/data/merged_gm12878.bed')
else:
info = info + "tad domain file used:\t{}\n".format(args.tad_domain)
info = info + "Repressive region:\t{}\n".format(args.repressive_region) + "Active region:\t{}\n".format(args.active_region) + "Chromatin interaction data:\t{}\n".format(args.hic_interaction)
# tumor repressor and oncogenes list
tumor_repressor_gene = pd.read_csv(script_path+'/data/oncogenes_and_tumor_suppressor_genes.bed',sep='\t')
tumor_repressor_genes_list = tumor_repressor_gene.loc[3].tolist() #column 3 has name of genes
#list of dosage sensitive genes
#dosage_sensitive_genes = list(filter(None, open('./data/dosage_sensitive_genes.txt','r').read().split('\n')))
dosage_sensitive_genes = pd.read_csv(script_path+'/data/dosage_sensitive_genes.bed', header=None, sep='\t').iloc[:,3].tolist()
# getting the list of TADS that consist of onco genes and tumor supressor genes
if args.tad_domain == None:
cancer_tad = | pd.read_csv(script_path+'/data/cancer_genes_tad.bed',header=None, sep='\t') | pandas.read_csv |
#Importing the required packages
from flask import Flask, render_template, request
import os
import pandas as pd
from pandas import ExcelFile
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import warnings
warnings.filterwarnings('ignore')
from sklearn.preprocessing import StandardScaler, LabelEncoder
from sklearn.model_selection import *
from sklearn.metrics import *
from sklearn.model_selection import cross_val_score
import itertools
from sklearn import datasets
from sklearn.linear_model import LogisticRegression
from sklearn import metrics
app = Flask(__name__)
#Routing to initial home page
@app.route('/')
def home():
return render_template('home.html')
@app.route('/admin_login')
def admin_login():
return render_template('admin_login.html')
@app.route('/admin', methods=['GET','POST'])
def admin():
user=request.form['un']
pas=request.form['pw']
cr=pd.read_excel('admin_cred.xlsx')
un=np.asarray(cr['Username']).tolist()
pw=np.asarray(cr['Password']).tolist()
cred = dict(zip(un, pw))
if user in un:
if(cred[user]==pas):
return render_template('admin.html')
else:
k=1
return render_template('admin_login.html',k=k)
else:
k=1
return render_template('admin_login.html',k=k)
@app.route('/admin_printed', methods=['GET','POST'])
def admin_printed():
trainfile=request.files['admin_doc']
t=pd.read_excel(trainfile)
t.to_excel('trainfile.xlsx')
return render_template('admin_printed.html')
@app.route('/login')
def login():
return render_template('login.html')
@app.route('/index', methods=['GET','POST'])
def index():
user=request.form['un']
pas=request.form['pw']
cr=pd.read_excel('cred.xlsx')
un=np.asarray(cr['Username']).tolist()
pw=np.asarray(cr['Password']).tolist()
cred = dict(zip(un, pw))
if user in un:
if(cred[user]==pas):
return render_template('index.html')
else:
k=1
return render_template('login.html',k=k)
else:
k=1
return render_template('login.html',k=k)
#Routing to page when File Upload is selected
@app.route('/file_upload')
def file_upload():
return render_template("file_upload.html")
@app.route('/upload_printed', methods=['GET','POST'])
def upload_printed():
abc=request.files['printed_doc']
test1=pd.read_excel(abc)
test=test1
train= | pd.read_excel('trainfile.xlsx') | pandas.read_excel |
"""This script is designed to perform statistics of demographic information
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.stats import pearsonr,spearmanr,kendalltau
import sys
sys.path.append(r'D:\My_Codes\LC_Machine_Learning\lc_rsfmri_tools\lc_rsfmri_tools_python')
import os
from eslearn.utils.lc_read_write_mat import read_mat, write_mat
#%% ----------------------------------Our center 550----------------------------------
uid_path_550 = r'D:\WorkStation_2018\SZ_classification\Scale\selected_550.txt'
scale_path_550 = r'D:\WorkStation_2018\SZ_classification\Scale\10-24大表.xlsx'
headmotion_file = r'D:\WorkStation_2018\SZ_classification\Scale\头动参数_1322.xlsx'
scale_data_550 = pd.read_excel(scale_path_550)
uid_550 = pd.read_csv(uid_path_550, header=None)
scale_selected_550 = pd.merge(uid_550, scale_data_550, left_on=0, right_on='folder', how='inner')
describe_bprs_550 = scale_selected_550.groupby('诊断')['BPRS_Total'].describe()
describe_age_550 = scale_selected_550.groupby('诊断')['年龄'].describe()
describe_duration_550 = scale_selected_550.groupby('诊断')['病程月'].describe()
describe_durgnaive_550 = scale_selected_550.groupby('诊断')['用药'].value_counts()
describe_sex_550 = scale_selected_550.groupby('诊断')['性别'].value_counts()
# Demographic
demographic_info_dataset1 = scale_selected_550[['folder', '诊断', '年龄', '性别', '病程月']]
headmotion = pd.read_excel(headmotion_file)
headmotion = headmotion[['Subject ID','mean FD_Power']]
demographic_info_dataset1 = pd.merge(demographic_info_dataset1, headmotion, left_on='folder', right_on='Subject ID', how='inner')
demographic_info_dataset1 = demographic_info_dataset1.drop(columns=['Subject ID'])
site_dataset1 = pd.DataFrame(np.zeros([len(demographic_info_dataset1),1]))
site_dataset1.columns = ['site']
demographic_dataset1_all = pd.concat([demographic_info_dataset1 , site_dataset1], axis=1)
demographic_dataset1_all.columns = ['ID','Diagnosis', 'Age', 'Sex', 'Duration', 'MeanFD', 'Site']
demographic_dataset1 = demographic_dataset1_all[['ID','Diagnosis', 'Age', 'Sex', 'MeanFD', 'Site']]
demographic_dataset1['Diagnosis'] = np.int32(demographic_dataset1['Diagnosis'] == 3)
# Duration and age
demographic_duration_dataset1 = demographic_dataset1_all[['Duration', 'Age']].dropna()
np.corrcoef(demographic_duration_dataset1['Duration'], demographic_duration_dataset1['Age'])
pearsonr(demographic_duration_dataset1['Duraton'], demographic_duration_dataset1['Age'])
#%% ----------------------------------BeiJing 206----------------------------------
uid_path_206 = r'D:\WorkStation_2018\SZ_classification\Scale\北大精分人口学及其它资料\SZ_NC_108_100.xlsx'
scale_path_206 = r'D:\WorkStation_2018\SZ_classification\Scale\北大精分人口学及其它资料\SZ_NC_108_100-WF.csv'
headmotion_file_206 = r'D:\WorkStation_2018\SZ_classification\Scale\北大精分人口学及其它资料\parameters\FD_power'
uid_to_remove = ['SZ010109','SZ010009']
scale_data_206 = pd.read_csv(scale_path_206)
scale_data_206 = scale_data_206.drop(np.array(scale_data_206.index)[scale_data_206['ID'].isin(uid_to_remove)])
scale_data_206['PANSStotal1'] = np.array([np.float64(duration) if duration.strip() !='' else 0 for duration in scale_data_206['PANSStotal1'].values])
Pscore = pd.DataFrame(scale_data_206[['P1', 'P2', 'P3', 'P4', 'P4', 'P5', 'P6', 'P7']].iloc[:106,:], dtype = np.float64)
Pscore = np.sum(Pscore, axis=1).describe()
Nscore = pd.DataFrame(scale_data_206[['N1', 'N2', 'N3', 'N4', 'N4', 'N5', 'N6', 'N7']].iloc[:106,:], dtype=np.float64)
Nscore = np.sum(Nscore, axis=1).describe()
Gscore = pd.DataFrame(scale_data_206[['G1', 'G2', 'G3', 'G4', 'G4', 'G5', 'G6', 'G7', 'G8', 'G9', 'G10', 'G11', 'G12', 'G13', 'G14', 'G15', 'G16']].iloc[:106,:])
Gscore = np.array(Gscore)
for i, itemi in enumerate(Gscore):
for j, itemj in enumerate(itemi):
print(itemj)
if itemj.strip() != '':
Gscore[i,j] = np.float64(itemj)
else:
Gscore[i, j] = np.nan
Gscore = pd.DataFrame(Gscore)
Gscore = np.sum(Gscore, axis=1).describe()
describe_panasstotol_206 = scale_data_206.groupby('group')['PANSStotal1'].describe()
describe_age_206 = scale_data_206.groupby('group')['age'].describe()
scale_data_206['duration'] = np.array([np.float64(duration) if duration.strip() !='' else 0 for duration in scale_data_206['duration'].values])
describe_duration_206 = scale_data_206.groupby('group')['duration'].describe()
describe_sex_206 = scale_data_206.groupby('group')['sex'].value_counts()
# Demographic
uid = pd.DataFrame(scale_data_206['ID'])
uid['ID'] = uid['ID'].str.replace('NC','10');
uid['ID'] = uid['ID'].str.replace('SZ','20');
uid = pd.DataFrame(uid, dtype=np.int32)
demographic_info_dataset2 = scale_data_206[['group','age', 'sex']]
demographic_info_dataset2 = pd.concat([uid, demographic_info_dataset2], axis=1)
headmotion_name_dataset2 = os.listdir(headmotion_file_206)
headmotion_file_path_dataset2 = [os.path.join(headmotion_file_206, name) for name in headmotion_name_dataset2]
meanfd = []
for i, file in enumerate(headmotion_file_path_dataset2):
fd = np.loadtxt(file)
meanfd.append(np.mean(fd))
meanfd_dataset2 = pd.DataFrame(meanfd)
headmotion_name_dataset2 = pd.Series(headmotion_name_dataset2)
headmotion_name_dataset2 = headmotion_name_dataset2.str.findall('(NC.*[0-9]\d*|SZ.*[0-9]\d*)')
headmotion_name_dataset2 = [str(id[0]) if id != [] else 0 for id in headmotion_name_dataset2]
headmotion_name_dataset2 = pd.DataFrame([''.join(id.split('_')) if id != 0 else '0' for id in headmotion_name_dataset2])
headmotion_name_dataset2[0] = headmotion_name_dataset2[0].str.replace('NC','10');
headmotion_name_dataset2[0] = headmotion_name_dataset2[0].str.replace('SZ','20');
headmotion_name_dataset2 = | pd.DataFrame(headmotion_name_dataset2, dtype=np.int32) | pandas.DataFrame |
# ---
# jupyter:
# jupytext:
# formats: jupyter_scripts//ipynb,ifis_tools//py
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.3'
# jupytext_version: 1.0.0
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # database_tools:
#
# Set of tools to connect to the data base, put and get data from them.
import psycopg2
from psycopg2 import sql
import pandas as pd
from datetime import datetime
from climata.usgs import InstantValueIO, DailyValueIO
import numpy as np
from ifis_tools import auxiliar as aux
data_usr = None
data_pass = None
data_host = "s-iihr51.iihr.uiowa.edu"
data_base = "research_environment"
data_port = "5435"
# +
def DataBaseConnect(user = "iihr_student", password = "<PASSWORD>", host = data_host,
port = "5435", database = "research_environment"):
'''Connect to the database that hsa stored the usgs information'''
con = psycopg2.connect(user = user,
password = password,
host = host,
port = port,
database = database)
return con
def SQL_getSubLinks(linkid):
'''returns the list of links that belong to a certain link.'''
con = DataBaseConnect(user='nicolas',password='<PASSWORD>',database='rt_precipitation')
query = 'SELECT nodeX.link_id AS link_id FROM students.env_master_km AS nodeX, students.env_master_km AS parentX WHERE (nodeX.left BETWEEN parentX.left AND parentX.right) AND parentX.link_id = '+str(linkid)
Data = pd.read_sql(query, con)
Data = Data.values.T[0]
Data.sort()
con.close()
return Data
def SQL_read_USGS_Streamflow(usgs_id, date1, date2, schema = 'pers_nico',
table = 'data_usgs', time_name = 'unix_time', data_name = 'val', usgs_name = 'usgs_id'):
'''Read streamflow data from IIHR database "research_environment"
and returns it as a pandas.DataFrame element.
Parameters:
- usgs_id: code of the usgs.
- date1: initial date of the query.
- date2: final date of the query.
Optional:
- schema: where to obtain data in the databse.
- table: master table with the usgs data.
- time_name: the name of the column that has the time.
- data_name: the name of the column that has the data.
- usgs_name: the name of the column that has the id of the usgs stations.
Returns:
- pandas.DataFrame containing the streamflow data.'''
#make the connection
con = DataBaseConnect(user = 'nicolas', password = '<PASSWORD>')
#Work with dates and usgs id
date1 = str(aux.__datetime2unix__(date1))
date2 = str(aux.__datetime2unix__(date2))
if type(usgs_id) is not str:
usgs_id = str(usgs_id)
#make the querty
query = sql.SQL("SELECT "+time_name+", "+data_name+" FROM "+schema+"."+table+" WHERE "+time_name+" BETWEEN "+date1+" and "+date2+" AND "+usgs_name+"='"+usgs_id+"'")
#Make the consult.
Data = pd.read_sql(query, con, index_col='unix_time',parse_dates={'unix_time':{'unit':'s'}})
con.close()
return Data
def WEB_Get_USGS(usgs_code, date1, date2, variable = '00060'):
'''Get USGS data from the web using the climdata interface
Parameters (debe ser probado):
- usgs_code: the code of the station to obtain.
- date1: initial date.
- date2: final date.
- variable:
- 00060 for streamflow.
- 00065 for height'''
#Get the data form the web
data = InstantValueIO(
start_date = pd.Timestamp(date1),
end_date = | pd.Timestamp(date2) | pandas.Timestamp |
import sys, re
import pandas as pd, numpy as np
from data_processing import split_wrd, space_fill
def df_format_print(df,file=sys.stdout,index=False,align='c',squeeze=False,uwidth=2,spcwidth=1,kind="simple",margin=None):
lengths = []
if index: df = df.reset_index()
collen = len(df.columns)
delta = uwidth - 1
# fill align
align = list(align)
if len(align) < collen:
align += align[-1]*(collen - len(align))
# lengths of columns
lengths = df.columns.map(lambda x: int(len(str(x)) + delta*(len(x.encode('utf-8')) - len(x))//2))
dfshap = df.copy()
# lenths of values
for c in range(len(dfshap.columns)):
dfshap.iloc[:,c] = dfshap.iloc[:,c].map(lambda x: int(len(str(x)) + delta*(len(x.encode('utf-8')) - len(x))//2))
if kind=="normal":
if not margin: margin = 0
lengths = np.max([lengths,dfshap.max()],axis=0)+margin
print('+'+'+'.join(['-'*i for i in lengths])+'+',file=file)
dcfl = [space_fill(df.columns[i],length=lengths[i],align=align[i],uwidth=uwidth,spcwidth=spcwidth) for i in range(collen)]
print('|'+'|'.join(dcfl)+'|',file=file)
print('+'+'+'.join(['='*i for i in lengths])+'+',file=file)
ddsm = np.array([df.ix[:,c].map(lambda x: True if x in '|-' else False) for c in range(collen)])
ddfl = [df.ix[:,c].map(lambda x: space_fill(x,lengths[c],align[c],uwidth=uwidth,spcwidth=spcwidth) if x not in '|-' else ' '*lengths[c]) for c in range(collen)]
if squeeze: ddflout = '\n'.join(['|'+'|'.join(i)+'|' for i in pd.DataFrame(ddfl).T.values])
#else: ddflout = ('\n+'+'+'.join(['-'*i for i in lengths])+'+\n')\
#.join(['|'+'|'.join(i)+'|' for i in pd.DataFrame(ddfl).T.values])
else:
dfproc = pd.DataFrame(ddfl).T.values
ddflout = ['|'+'|'.join(dfproc[0])+'|']
for ind in range(1,len(df)):
#ddflout.append('+'+'+'.join([(' ' if ddsm[j][ind] else '-')*i for i,j in zip(lengths,range(ddsm.shape[0]))])+'+')
line = '|' if ddsm[0][ind] else '+'
for i,j in zip(lengths,range(ddsm.shape[0]-1)):
line += (' ' if ddsm[j][ind] else '-')*i + ('|' if (ddsm[j][ind] and ddsm[j+1][ind]) else '+')
line += (' ' if ddsm[j+1][ind] else '-')*lengths[-1] + ('|' if ddsm[j+1][ind] else '+')
ddflout.append(line)
ddflout.append('|'+'|'.join(dfproc[ind])+'|')
ddflout = '\n'.join(ddflout)
print(ddflout,file=file)
print('+'+'+'.join(['-'*i for i in lengths])+'+',file=file)
elif kind=="simple":
if not margin: margin = 2
lengths = np.max([lengths,dfshap.max()],axis=0)+margin
print(' '.join(['-'*i for i in lengths]),file=file)
dcfl = [space_fill(df.columns[i],length=lengths[i],align=align[i],uwidth=uwidth,spcwidth=spcwidth) for i in range(collen)]
print(' '.join(dcfl),file=file)
print(' '.join(['-'*i for i in lengths]),file=file)
ddfl = [df.ix[:,c].map(lambda x: space_fill(x,lengths[c],align[c],uwidth=uwidth,spcwidth=spcwidth)) for c in range(collen)]
if squeeze: ddflout = '\n'.join([' '.join(i) for i in | pd.DataFrame(ddfl) | pandas.DataFrame |
import pandas as pd
import networkx as nx
import pytest
from kgextension.feature_selection import hill_climbing_filter, hierarchy_based_filter, tree_based_filter
from kgextension.generator import specific_relation_generator, direct_type_generator
class TestHillCLimbingFilter:
def test1_high_beta(self):
input_df = pd.read_csv("test/data/feature_selection/hill_climbing_test1_input.csv")
input_DG = nx.DiGraph()
labels = ['http://chancellor', 'http://president', 'http://European_politician',
'http://head_of_state', 'http://politician', 'http://man', 'http://person', 'http://being']
input_DG.add_nodes_from(labels)
input_DG.add_edges_from([('http://chancellor', 'http://politician'), ('http://president', 'http://politician'),
('http://chancellor', 'http://head_of_state'), ('http://president', 'http://head_of_state'), ('http://head_of_state', 'http://person'),
('http://European_politician', 'http://politician'), ('http://politician', 'http://person'),
('http://man', 'http://person'), ('http://person', 'http://being')])
expected_df = pd.read_csv("test/data/feature_selection/hill_climbing_test1_expected.csv")
output_df = hill_climbing_filter(input_df, 'uri_bool_http://class', G= input_DG, beta=0.5, k=2)
pd.testing.assert_frame_equal(output_df, expected_df, check_like=True)
def test2_generator_data_low_beta(self):
df = pd.DataFrame({
'entities': ['Paris', 'Buenos Aires', 'Mannheim', "München"],
'link': ['http://dbpedia.org/resource/Paris', 'http://dbpedia.org/resource/Buenos_Aires',
'http://dbpedia.org/resource/Mannheim', 'http://dbpedia.org/resource/Munich']
})
input_df = specific_relation_generator(
df, columns=['link'], hierarchy_relation='http://www.w3.org/2004/02/skos/core#broader')
expected_df = pd.read_csv("test/data/feature_selection/hill_climbing_test2_expected.csv")
output_df = hill_climbing_filter(input_df, 'link_in_boolean_http://dbpedia.org/resource/Category:Prefectures_in_France', beta=0.05, k=3)
pd.testing.assert_frame_equal(output_df, expected_df, check_like=True)
def test3_nan(self):
input_df = pd.read_csv("test/data/feature_selection/hill_climbing_test3_input.csv")
input_DG = nx.DiGraph()
labels = ['http://chancellor', 'http://president', 'http://European_politician',
'http://head_of_state', 'http://politician', 'http://man', 'http://person', 'http://being']
input_DG.add_nodes_from(labels)
input_DG.add_edges_from([('http://chancellor', 'http://politician'), ('http://president', 'http://politician'),
('http://chancellor', 'http://head_of_state'), ('http://president', 'http://head_of_state'), ('http://head_of_state', 'http://person'),
('http://European_politician', 'http://politician'), ('http://politician', 'http://person'),
('http://man', 'http://person'), ('http://person', 'http://being')])
expected_df = pd.read_csv("test/data/feature_selection/hill_climbing_test3_expected.csv")
output_df = hill_climbing_filter(input_df, 'class', G= input_DG, beta=0.5, k=2)
pd.testing.assert_frame_equal(output_df, expected_df, check_like=True)
def test4_callable_function(self):
input_df = pd.read_csv("test/data/feature_selection/hill_climbing_test1_input.csv")
input_DG = nx.DiGraph()
labels = ['http://chancellor', 'http://president', 'http://European_politician',
'http://head_of_state', 'http://politician', 'http://man', 'http://person', 'http://being']
input_DG.add_nodes_from(labels)
input_DG.add_edges_from([('http://chancellor', 'http://politician'), ('http://president', 'http://politician'),
('http://chancellor', 'http://head_of_state'), ('http://president', 'http://head_of_state'), ('http://head_of_state', 'http://person'),
('http://European_politician', 'http://politician'), ('http://politician', 'http://person'),
('http://man', 'http://person'), ('http://person', 'http://being')])
def fake_metric(df, class_col, param=5):
return 1/((df.sum(axis=1)*class_col).sum()/param)
expected_df = pd.read_csv("test/data/feature_selection/hill_climbing_test4_expected.csv")
output_df = hill_climbing_filter(input_df, 'uri_bool_http://class', metric=fake_metric, G= input_DG, param=6)
pd.testing.assert_frame_equal(output_df, expected_df, check_like=True)
def test5_no_graph(self):
input_df = pd.read_csv("test/data/feature_selection/hill_climbing_test3_input.csv")
with pytest.raises(RuntimeError) as excinfo:
_ = hill_climbing_filter(input_df, 'class', beta=0.5, k=2)
assert "df.attrs['hierarchy]" in str(excinfo.value)
class TestHierarchyBasedFilter():
def test1_no_pruning_info_gain_with_G(self):
df = pd.DataFrame({
'entities': ['Paris', 'Buenos Aires', 'Mannheim', "München"],
'link': ['http://dbpedia.org/resource/Paris', 'http://dbpedia.org/resource/Buenos_Aires',
'http://dbpedia.org/resource/Mannheim', 'http://dbpedia.org/resource/Munich']
})
expected_df = pd.read_csv("test\data\feature_selection\hierarchy_based_test1_expected.csv")
input_df = direct_type_generator(df, ["link"], regex_filter=['A'], result_type="boolean", bundled_mode=True, hierarchy=True)
input_DG = input_df.attrs['hierarchy']
output_df = hierarchy_based_filter(input_df, "link", threshold=0.99, G=input_DG, metric="info_gain", pruning=False)
pd.testing.assert_frame_equal(output_df, expected_df, check_like=True)
def test2_no_pruning_correlation(self):
df = pd.DataFrame({
'entities': ['Paris', 'Buenos Aires', 'Mannheim', "München"],
'link': ['http://dbpedia.org/resource/Paris', 'http://dbpedia.org/resource/Buenos_Aires',
'http://dbpedia.org/resource/Mannheim', 'http://dbpedia.org/resource/Munich']
})
expected_df = pd.read_csv("test\data\feature_selection\hierarchy_based_test2_expected.csv")
input_df = direct_type_generator(df, ["link"], regex_filter=['A'], result_type="boolean", bundled_mode=True, hierarchy=True)
output_df = hierarchy_based_filter(input_df, "link", threshold=0.99, G=input_DG, metric="correlation", pruning=False)
pd.testing.assert_frame_equal(output_df, expected_df, check_like=True)
def test3_pruning_info_gain_all_remove_True(self):
df = pd.DataFrame({
'entities': ['Paris', 'Buenos Aires', 'Mannheim', "München"],
'link': ['http://dbpedia.org/resource/Paris', 'http://dbpedia.org/resource/Buenos_Aires',
'http://dbpedia.org/resource/Mannheim', 'http://dbpedia.org/resource/Munich']
})
expected_df = pd.read_csv("test\data\feature_selection\hierarchy_based_test3_expected.csv")
input_df = direct_type_generator(df, ["link"], regex_filter=['A'], result_type="boolean", bundled_mode=True, hierarchy=True)
input_DG = input_df.attrs['hierarchy']
output_df = hierarchy_based_filter(input_df, "link", G=input_DG, threshold=0.99, metric="info_gain", pruning=True)
pd.testing.assert_frame_equal(output_df, expected_df, check_like=True)
def test4_pruning_correlation_all_remove_True(self):
df = pd.DataFrame({
'entities': ['Paris', 'Buenos Aires', 'Mannheim', "München"],
'link': ['http://dbpedia.org/resource/Paris', 'http://dbpedia.org/resource/Buenos_Aires',
'http://dbpedia.org/resource/Mannheim', 'http://dbpedia.org/resource/Munich']
})
expected_df = pd.read_csv("test\data\feature_selection\hierarchy_based_test4_expected.csv")
input_df = direct_type_generator(df, ["link"], regex_filter=['A'], result_type="boolean", bundled_mode=True, hierarchy=True)
input_DG = input_df.attrs['hierarchy']
output_df = hierarchy_based_filter(input_df, "link", G=input_DG, threshold=0.99, metric="correlation", pruning=True)
pd.testing.assert_frame_equal(output_df, expected_df, check_like = True)
def test5_pruning_info_gain_all_remove_False(self):
df = pd.DataFrame({
'entities': ['Paris', 'Buenos Aires', 'Mannheim', "München"],
'link': ['http://dbpedia.org/resource/Paris', 'http://dbpedia.org/resource/Buenos_Aires',
'http://dbpedia.org/resource/Mannheim', 'http://dbpedia.org/resource/Munich']
})
expected_df = pd.read_csv("test\data\feature_selection\hierarchy_based_test5_expected.csv")
input_df = direct_type_generator(df, ["link"], regex_filter=['A'], result_type="boolean", bundled_mode=True, hierarchy=True)
input_DG = input_df.attrs['hierarchy']
output_df = hierarchy_based_filter(input_df, "link", G=input_DG, threshold=0.99, metric="info_gain", pruning=True, all_remove=False)
pd.testing.assert_frame_equal(output_df, expected_df, check_like = True)
def test6_pruning_correlation_all_remove_False(self):
df = pd.DataFrame({
'entities': ['Paris', 'Buenos Aires', 'Mannheim', "München"],
'link': ['http://dbpedia.org/resource/Paris', 'http://dbpedia.org/resource/Buenos_Aires',
'http://dbpedia.org/resource/Mannheim', 'http://dbpedia.org/resource/Munich']
})
expected_df = pd.read_csv("test\data\feature_selection\hierarchy_based_test6_expected.csv")
input_df = direct_type_generator(df, ["link"], regex_filter=['A'], result_type="boolean", bundled_mode=True, hierarchy=True)
input_DG = input_df.attrs['hierarchy']
output_df = hierarchy_based_filter(input_df, "link", G=input_DG, threshold=0.99, metric="correlation", pruning=True,
all_remove=False)
pd.testing.assert_frame_equal(output_df, expected_df, check_like = True)
def test7_no_input_G(self):
df = pd.DataFrame({
'entities': ['Paris', 'Buenos Aires', 'Mannheim', "München"],
'link': ['http://dbpedia.org/resource/Paris', 'http://dbpedia.org/resource/Buenos_Aires',
'http://dbpedia.org/resource/Mannheim', 'http://dbpedia.org/resource/Munich']
})
expected_df = pd.read_csv("test\data\feature_selection\hierarchy_based_test7_expected.csv")
input_df = direct_type_generator(df, ["link"], regex_filter=['A'], result_type="boolean", bundled_mode=True, hierarchy=True)
output_df = hierarchy_based_filter(input_df, "link", threshold=0.99, metric="correlation", pruning=True,
all_remove=False)
pd.testing.assert_frame_equal(output_df, expected_df, check_like = True)
def test8_nan(self):
input_df = pd.read_csv("test/data/feature_selection/hill_climbing_test3_input.csv")
input_DG = nx.DiGraph()
labels = ['http://chancellor', 'http://president', 'http://European_politician',
'http://head_of_state', 'http://politician', 'http://man', 'http://person', 'http://being']
input_DG.add_nodes_from(labels)
input_DG.add_edges_from([('http://chancellor', 'http://politician'), ('http://president', 'http://politician'),
('http://chancellor', 'http://head_of_state'), ('http://president', 'http://head_of_state'), ('http://head_of_state', 'http://person'),
('http://European_politician', 'http://politician'), ('http://politician', 'http://person'),
('http://man', 'http://person'), ('http://person', 'http://being')])
expected_df = | pd.read_csv("test/data/feature_selection/hierarchy_based_test8_expected.csv") | pandas.read_csv |
# internal modules
import os
from typing import Tuple
from app_logic import dataframe_creation
from data_structures.annotation_data import AnnotationData
from data_structures.raw_data import RawData
# python modules
import logging
# dependencies
import numpy as np
import pandas as pd
# DEFINITIONS
from util.definitions import BASELINE_NAME, EVENT_TYPE, HUMAN_RATING_LABEL, SLEEP_CLASSIFIERS, definitions_as_string
from util import settings
class PSG:
""" Perform automated detection of motoric arousals during REM sleep consistent with RBD.
:param input_path: absolute path to directory that contains an EDF file to be evaluated
and all relevant annotation files
:param output_path: absolute path to directory in which to create the result files
"""
def __init__(self, input_path: str = '', output_path: str = ''):
self._input_path = input_path
self._output_path = output_path
self._raw_data: RawData = None # content of edf file
self._annotation_data: AnnotationData = None # content of txt files
self._calculated_data: pd.DataFrame = None # dataframe with all currently calculated data
logging.info('Definitions:\n'
f'{str(definitions_as_string())}\n'
f'{str(settings.settings_as_string())}'
)
logging.debug('New PSG Object created')
# PUBLIC FUNCTIONS
@property
def input_path(self):
return self._input_path
@input_path.setter
def input_path(self, input_path):
self._input_path = input_path
@property
def output_path(self):
return self._output_path
@output_path.setter
def output_path(self, output_path):
self._output_path = output_path
@staticmethod
def prepare_evaluation(raw_data, annotation_data, signal_names, assess_flow_events):
signal_names = signal_names.copy()
# extract start of PSG, sample rate of chin EMG channel and number of chin EMG samples to create datetime index
start_datetime = raw_data.get_header()['startdate']
sample_rate = raw_data.get_data_channels()[settings.SIGNALS_TO_EVALUATE[0]].get_sample_rate()
sample_length = len(raw_data.get_data_channels()[settings.SIGNALS_TO_EVALUATE[0]].get_signal())
# prepare DataFrame with DatetimeIndex
preliminary_idx = dataframe_creation.create_datetime_index(start_datetime, sample_rate, sample_length)
# df = pd.DataFrame(index=preliminary_idx)
# add sleep profile to df
df = pd.concat(PSG.create_sleep_profile_column(preliminary_idx, annotation_data), axis=1)
# add signals to DataFrame
for signal_type in signal_names.copy():
logging.debug(signal_type + ' start')
# Check if signal type exists in edf file
try:
signal_array = raw_data.get_data_channels()[signal_type].get_signal()
except KeyError:
signal_names.remove(signal_type)
continue
# Resample to 256 Hz and add to df
sample_rate = raw_data.get_data_channels()[signal_type].get_sample_rate()
df[signal_type] = dataframe_creation.signal_to_hz_rate_datetimeindexed_series(
settings.RATE, sample_rate, signal_array, signal_type, start_datetime)
# add global artifacts to df
df['is_global_artifact'] = PSG.add_artifacts_to_df(df.index, annotation_data, assess_flow_events)
return df[signal_names], df['is_REM'], df['is_global_artifact'], signal_names, df['sleep_phase']
@staticmethod
def find_artifact_free_REM_sleep_epochs_and_miniepochs(idx: pd.DatetimeIndex,
artifact_signal_series: pd.Series,
is_REM_series: pd.Series):
"""
:param idx:
:param artifact_signal_series:
:param is_REM_series:
:return: Tuple of pandas Series (Artifact-free REM epochs, Artifact-free REM miniepochs)
"""
df = pd.DataFrame(index=idx)
artifact_in_3s_miniepoch = artifact_signal_series \
.resample('3s') \
.sum() \
.gt(0)
df['miniepoch_contains_artifact'] = artifact_in_3s_miniepoch
df['miniepoch_contains_artifact'] = df['miniepoch_contains_artifact'].ffill()
df['artifact_free_rem_sleep_miniepoch'] = is_REM_series & ~df['miniepoch_contains_artifact']
# find all 30s epochs of global artifact-free REM sleep for tonic event detection
artifact_in_30s_epoch = artifact_signal_series \
.resample('30s') \
.sum() \
.gt(0)
df['epoch_contains_artifact'] = artifact_in_30s_epoch
df['epoch_contains_artifact'] = df['epoch_contains_artifact'].ffill()
df['artifact_free_rem_sleep_epoch'] = is_REM_series & ~df['epoch_contains_artifact']
return df['artifact_free_rem_sleep_epoch'], df['artifact_free_rem_sleep_miniepoch']
@staticmethod
def prepare_human_signal_artifacts(annotation_data, index, signal_names):
"""
:param annotation_data:
:param index:
:param signal_names:
:return: pd.DataFrame with index 'index' containing 3 columns per signal in 'signal_names'.
Their column names follow the following schematic:
- '<signal_name>_human_artifact' (artifacts of either human rater)
- '<signal_name>_human1_artifact' (artifacts of human rater 1)
- '<signal_name>_human2_artifact' (artifacts of human rater 2)
"""
try:
# process human rating for artifact evaluation per signal and event
human_rating1 = annotation_data.human_rating[0][1]
human_rating_label_dict1 = human_rating1.groupby('event').groups
logging.debug(human_rating_label_dict1)
except IndexError as e:
raise FileNotFoundError("Human rating does not exist.")
try:
# process second human rating for artifact extraction per signal and event
human_rating2 = annotation_data.human_rating[1][1]
human_rating_label_dict2 = human_rating2.groupby('event').groups
logging.debug(human_rating_label_dict2)
except IndexError as e:
logging.info("Only one human rater file found.")
human_rating2 = None
df_artifacts = pd.DataFrame(index=index)
for signal_type in signal_names.copy():
#add human rating boolean arrays
df_artifacts[signal_type + '_human1_artifact'] = \
PSG.transform_human_artifact_rating_for_signal_type_to_series(
human_rating1, human_rating_label_dict1, signal_type, df_artifacts.index)
df_artifacts[signal_type + '_human_artifact'] = df_artifacts[signal_type + '_human1_artifact']
if human_rating2 is not None:
df_artifacts[signal_type + '_human2_artifact'] = \
PSG.transform_human_artifact_rating_for_signal_type_to_series(
human_rating2, human_rating_label_dict2, signal_type, df_artifacts.index)
# merge artifacts of both raters
df_artifacts[signal_type + '_human_artifact'] = np.logical_or(
df_artifacts[signal_type + '_human1_artifact'],
df_artifacts[signal_type + '_human2_artifact']
)
return df_artifacts
@staticmethod
def find_signal_artifact_free_REM_sleep_epochs_and_miniepochs(index, is_REM_series, is_global_artifact_series,
signal_artifacts, signal_names):
df = pd.DataFrame(index=index)
df_help = pd.DataFrame(index=index)
signal_artifacts_used = 1
# find artifact-free REM sleep miniepochs per signal:
for signal_name in signal_names:
# global and signal artifacts
if signal_artifacts is not None:
artifact_signal_series = np.logical_or(
is_global_artifact_series, signal_artifacts[signal_name + '_signal_artifact'])
else:
artifact_signal_series = is_global_artifact_series
signal_artifacts_used = 0
df[signal_name + '_artifact_free_rem_sleep_epoch'], df[signal_name + '_artifact_free_rem_sleep_miniepoch']\
= PSG.find_artifact_free_REM_sleep_epochs_and_miniepochs(
index, artifact_signal_series=artifact_signal_series, is_REM_series=is_REM_series)
if not signal_artifacts_used:
logging.info('No special signal artifacts were used. All signals use global artifacts only.')
return df
@staticmethod
def find_baselines(df_signals, signal_names, use_human_baselines=False, is_rem_series=None,
artifact_free_rem_sleep_per_signal=None,
annotation_data=None):
"""
Finds baseline per signal. If human baseline is used, annotation_data must be given.
If human baseline is not used the respective baselines per signal are calculated. In this case is_rem_series and
artifact_free_REM_sleep_per_signal must exist.
:param df_signals:
:param signal_names:
:param use_human_baselines:
:param artifact_free_rem_sleep_per_signal:
:param annotation_data:
:return:
"""
df_baselines = pd.DataFrame(index=df_signals.index)
df_baseline_artifacts = pd.DataFrame(index=df_signals.index)
if use_human_baselines:
for signal_name in signal_names:
# add signal type baseline column
df_baselines[signal_name + '_baseline'] = PSG.add_signal_baseline_to_df(df_signals, annotation_data,
signal_name)
else:
for signal_name in signal_names:
# TODO: move to settings
MIN_REM_BLOCK_LENGTH_IN_S = 150
MIN_BASELINE_VOLTAGE = 0.05
baseline_time_window_in_s = 30
baseline_artifact_window_in_s = 5
# find REM blocks
all_rem_sleep_numbered, min_rem_block, numbered_rem_blocks, small_rem_pieces = PSG.find_rem_blocks(
MIN_REM_BLOCK_LENGTH_IN_S, is_rem_series)
# detect baseline artifacts
baseline_artifact = df_signals.loc[is_rem_series, signal_name].pow(2) \
.rolling(str(baseline_artifact_window_in_s) + 'S',
min_periods=settings.RATE * baseline_artifact_window_in_s) \
.mean() \
.apply(np.sqrt) \
.lt(MIN_BASELINE_VOLTAGE)
df_baseline_artifacts[signal_name + '_baseline_artifact'] = baseline_artifact
df_baseline_artifacts[signal_name + '_baseline_artifact'] = \
df_baseline_artifacts[signal_name + '_baseline_artifact'].ffill().fillna(False)
block_baselines = pd.Series(index=range(1, all_rem_sleep_numbered.max() + 1))
while True:
# calculate baseline per REM block
for block_number in block_baselines.index.values:
# find artifact-free REM block
rem_block = df_signals.loc[numbered_rem_blocks == block_number, signal_name]
artifact_free_rem_block = \
rem_block.loc[artifact_free_rem_sleep_per_signal[
signal_name + '_artifact_free_rem_sleep_miniepoch']]
artifact_free_rem_block = \
artifact_free_rem_block[~df_baseline_artifacts[signal_name + '_baseline_artifact']]
# find baseline
baseline_in_rolling_window = artifact_free_rem_block.pow(2)\
.rolling(str(baseline_time_window_in_s) + 'S',
min_periods=settings.RATE * baseline_time_window_in_s)\
.mean() \
.apply(np.sqrt)
if (baseline_in_rolling_window is not None) and baseline_in_rolling_window.empty:
min_baseline_for_block = np.nan
else:
min_baseline_for_block = np.nanmin(baseline_in_rolling_window)
block_baselines[block_number] = min_baseline_for_block
block_baselines = block_baselines.ffill().bfill()
if block_baselines.isna().any():
if baseline_time_window_in_s != 15:
baseline_time_window_in_s = 15
continue
else:
logging.info(f'For signal {signal_name} a baseline cannot be calculated.')
block_baselines = None
break
else:
break
if block_baselines is None:
continue
for block_number in block_baselines.index.values:
rem_block = df_signals.loc[all_rem_sleep_numbered == block_number, signal_name]
df_baselines.loc[rem_block.index, signal_name + '_baseline'] = block_baselines[block_number]
return df_baselines, df_baseline_artifacts
@staticmethod
def detect_rbd_events(df_signals, df_baselines, artifact_free_rem_sleep_per_signal, signal_names, annotation_data):
"""
Detects RBD events in
:return: Calculation results in DataFrame
"""
df = | pd.concat([df_signals, df_baselines], axis=1) | pandas.concat |
from typing import List
import datetime
import requests
from matplotlib import pyplot as plt
from matplotlib.ticker import FuncFormatter
from matplotlib.rcsetup import cycler
import pandas as pd
DATA_GOUV_2_OPEN = {
"date": "date",
"granularite": "granularite",
"maille_code": "maille_code",
"maille_nom": "maille_nom",
"rea": "reanimation",
"hosp": "hospitalises",
"dchosp": "deces",
"incid_hosp": "nouvelles_hospitalisations",
"incid_rea": "nouvelles_reanimations",
"conf": "cas_confirmes",
"esms_dc": "deces_ehpad",
"esms_cas": "cas_confirmes_ehpad",
"source_url": "source_url",
}
def download_france_data() -> pd.DataFrame:
"""Download and merges data from OpenCovid19-fr and data.gouv.fr
"""
oc19_file = "opencovid19-fr-chiffres-cles.csv"
gouv_file = "data-gouv-fr-chiffres-cles.csv"
oc19_url = "https://raw.githubusercontent.com/opencovid19-fr/data/master/dist/chiffres-cles.csv"
gouv_url = (
"https://www.data.gouv.fr/fr/datasets/r/f335f9ea-86e3-4ffa-9684-93c009d5e617"
)
# run requests to download and save the data
myfile = requests.get(oc19_url)
with open(oc19_file, "wb") as f:
f.write(myfile.content)
file = requests.get(gouv_url)
with open(gouv_file, "wb") as f:
f.write(file.content)
# Load both csv into pandas
data = pd.read_csv(oc19_file)
data_gouv = | pd.read_csv(gouv_file) | pandas.read_csv |
import pandas as pd
from functools import reduce
from fooltrader.contract.files_contract import *
import re
import json
class agg_future_dayk(object):
funcs={}
def __init__(self):
self.funcs['shfeh']=self.getShfeHisData
self.funcs['shfec']=self.getShfeCurrentYearData
self.funcs['ineh']=self.getIneHisData
self.funcs['inec']=self.getIneCurrentYearData
self.funcs['dceh']=self.getDceHisData
self.funcs['dcec']=self.getDceCurrentYearData
self.funcs['czceh']=self.getCzceHisData
self.funcs['czcec']=self.getCzceCurrentYearData
self.funcs['cffexh']=self.getCffexHisData
self.funcs['cffexc']=self.getCffexCurrentYearData
def getCurrentYearAllData(self,exchange=None):
if exchange is None:
exchanges=['cffex','dce','czce','shfe',"ine"]
pds = list(map(lambda x:self.getCurrentYearData(x),exchanges))
finalpd = pd.concat(pds)
else:
finalpd= pd.concat([self.getCurrentYearData(exchange)])
for i in ['volume','inventory']:
finalpd[i]=finalpd[i].apply(lambda x:pd.to_numeric(str(x).replace(",", "")))
finalpd.set_index(['date','fproduct','symbol'], inplace=True)
finalpd.sort_index(inplace=True)
return finalpd
def getAllData(self,exchange=None):
if exchange is None:
exchanges=['cffex','dce','czce','shfe',"ine"]
pds = list(map(lambda x:self.getHisData(x),exchanges))+list(map(lambda x:self.getCurrentYearData(x),exchanges))
finalpd = pd.concat(pds)
else:
finalpd= pd.concat([self.getHisData(exchange),self.getCurrentYearData(exchange)])
for i in ['volume','inventory']:
finalpd[i]=finalpd[i].apply(lambda x:pd.to_numeric(str(x).replace(",", "")))
finalpd.set_index(['date','fproduct','symbol'], inplace=True)
finalpd.sort_index(inplace=True)
return finalpd
def getHisData(self,exchange):
return self.funcs[exchange+'h']()
def getCurrentYearData(self,exchange):
return self.funcs[exchange+'c']()
def getShfeHisData(self):
pattern = re.compile(r'(\D{1,3})(\d{3,4}).*')
dfs=[]
dir = get_exchange_cache_dir(security_type='future',exchange='shfe')+"/his/"
for j in os.listdir(dir):
a = pd.read_excel(dir+j, header=2, skipfooter=5,
usecols=list(range(0, 14))).fillna(method='ffill')
dfs.append(a)
totaldf = reduce(lambda x,y:x.append(y),dfs)
totaldf['日期']=pd.to_datetime(totaldf['日期'],format='%Y%m%d')
totaldf=totaldf[pd.isnull(totaldf['合约'])==False]
totaldf['fproduct'] = totaldf['合约'].apply(lambda x:pattern.match(x).groups()[0])
totaldf['settleDate'] = totaldf['合约'].apply(lambda x:pd.to_datetime('20'+pattern.match(x).groups()[1],format='%Y%m'))
renameMap={
'合约':'symbol',
'日期':'date',
'前收盘':'preClose',
'前结算':'preSettle',
'开盘价':'open',
'最高价':'high',
'最低价':'low',
'收盘价':'close',
'结算价':'settle',
'涨跌1':'range',
'涨跌2':'range2',
'成交量':'volume',
'成交金额':'amount',
'持仓量':'inventory'
}
totaldf.rename(index=str,columns=renameMap,inplace=True)
totaldf=totaldf[['symbol','date','open','high','low','close','settle','range','range2','volume','inventory','fproduct','settleDate']]
print("done")
# totaldf.to_pickle('testdf.pickle')
return totaldf
def getShfeCurrentYearData(self):
dir = os.path.join(get_exchange_cache_dir(security_type='future',exchange='shfe'),"2020_day_kdata")
file_list=os.listdir(dir)
tempdfs=[]
for file in file_list:
if len(file)==8:
with open(os.path.join(dir,file)) as f:
load_dict = json.load(f)
temp_df = | pd.DataFrame(data=load_dict['o_curinstrument']) | pandas.DataFrame |
__all__ = [
"tran_shapley_cohort",
"tf_shapley_cohort",
]
from grama import add_pipe, pipe
from itertools import chain, combinations
from numpy import all, number, sum, zeros, empty, NaN
from pandas import concat, DataFrame
from scipy.special import comb
from toolz import curry
## Helper
def powerset(iterable):
s = list(iterable)
return list(chain.from_iterable(combinations(s, r) for r in range(len(s) + 1)))
## Cohort Shapley
@curry
def tran_shapley_cohort(df, var=None, out=None, bins=20, inds=None):
"""Compute cohort shapley values
Assess the impact of each variable on selected observations via cohort
shapley [1]. Shapley values are a game-theoretic way to assess the
importance of input variables (var) on each of a set of outputs (out). Since
values are computed on each observation, cohort shapley can distinguish
cases where a variable has a positive impact on one observation, and a
negative impact on a different observation.
Note that cohort shapley is combinatorialy expensive in the number of
variables, and this expense is multiplied by the number of observations. Use
with caution in cases of high dimensionality. Consider using the `inds`
argument to analyze a small subset of your observations.
Args:
df (DataFrame): Variable and output data to analyze
var (list of strings): Input variables
out (list of strings): Outputs variables
bins (integer): Number of "bins" to define coordinate refinement distance
inds (iterable of indices or None): Indices of rows to analyze
References:
- [1] Mase, Owen, and Seiler, "Explaining black box decisions by Shapley cohort refinement" (2019) Arxiv
Examples:
>>> import grama as gr
>>> from grama.data import df_stang
>>> X = gr.Intention()
>>> # Analyze all observations
>>> (
>>> gr.tran_shapley_cohort(
>>> df_stang,
>>> var=["thick", "ang"],
>>> out=["E"],
>>> )
>>> >> gr.tf_bind_cols(df_stang)
>>> >> gr.tf_filter(X.E_thick < 0)
>>> )
>>> # Compute subset of values
>>> (
>>> gr.tran_shapley_cohort(
>>> df_stang,
>>> var=["thick", "ang"],
>>> out=["E"],
>>> inds=(
>>> df_stang
>>> >> gr.tf_filter(X.thick > 0.08)
>>> ).index
>>> )
>>> >> gr.tf_bind_cols(df_stang)
>>> )
"""
## Check invariants
if not set(var).issubset(set(df.columns)):
raise ValueError("var must be subset of df.columns")
if not set(out).issubset(set(df.columns)):
raise ValueError("out must be subset of df.columns")
if len(set(var).intersection(set(out))) != 0:
raise ValueError("var and out must have empty intersection")
if inds is None:
inds = range(df.shape[0])
## Setup
s = df.shape[0] # Number of observations (subjects)
n = len(var)
# Determine numeric and categorical columns
var_numeric = list(df[var].select_dtypes(include=[number]).columns)
var_cat = list(df[var].drop(columns=var_numeric).columns)
# Compute distances for coordinate similarity
df_dist = DataFrame(
data={col: [(df[col].max() - df[col].min()) / bins] for col in var_numeric}
)
# Compute coordinate similarity boolean DataFrame
df_sim = DataFrame(columns=["_i0", "_i1"] + list(df[var].columns))
for i in range(s):
## Numeric comparison
df_tmp = (
df[var_numeric].iloc[i] - df[var_numeric].iloc[(i + 1) :]
).abs() <= df_dist[var_numeric].values
## Categorical comparison
df_tmp[var_cat] = df[var_cat].iloc[i] == df[var_cat].iloc[(i + 1) :]
## Add subject indices
df_tmp["_i0"] = [i] * df_tmp.shape[0]
df_tmp["_i1"] = range((i + 1), s)
## Concatenate
df_sim = concat((df_sim, df_tmp), axis=0, sort=False)
# Internal functions
def cohort_indices(t, varset):
"""Build set of cohort indices
Args:
t (integer): Target sample index
varset (iterable): Variables for cohort refinement
"""
if len(varset) == 0:
return list(range(s))
# Find all pairs similar along given variables
flags_cohort = all(
df_sim.drop(columns=["_i0", "_i1"])[[var[i] for i in varset]], axis=1
).values
# Filter to pairs including target t
df_tmp = df_sim[flags_cohort]
df_cohort = df_tmp[(df_tmp["_i0"] == t) | (df_tmp["_i1"] == t)]
# Consolidate index set
return list(
set(df_cohort["_i0"]).union(set(df_cohort["_i1"])).union(set((t,)))
)
def cohort_mean(t, varset):
c = cohort_indices(t, varset)
return df[out].iloc[c].mean().to_frame().T
def cohort_shapley(j):
"""Cohort shapley for all observations, single variable
"""
poset = powerset(set(range(n)).difference({j}))
data = zeros((s, len(out)))
df_tmp = DataFrame(columns=out, data=data)
for p in poset:
den = n * comb(n - 1, len(p))
for t in range(s):
if t in inds:
t1 = cohort_mean(t, list(set(p).union({j})))
t0 = cohort_mean(t, p)
df_tmp.iloc[t] = df_tmp.iloc[t] + (t1 - t0).loc[0] / den
else:
df_tmp.iloc[t] = NaN
return df_tmp
## Compute cohort shapley over all variables
df_res = | DataFrame() | pandas.DataFrame |
#coding=utf-8
import pandas as pd
import numpy as np
import sys
import os
from sklearn import preprocessing
import datetime
import scipy as sc
from sklearn.preprocessing import MinMaxScaler,StandardScaler
from sklearn.externals import joblib
#import joblib
class FEbase(object):
"""description of class"""
def __init__(self, **kwargs):
pass
def create(self,*DataSetName):
#print (self.__class__.__name__)
(filepath, tempfilename) = os.path.split(DataSetName[0])
(filename, extension) = os.path.splitext(tempfilename)
#bufferstring='savetest2017.csv'
bufferstringoutput=filepath+'/'+filename+'_'+self.__class__.__name__+extension
if(os.path.exists(bufferstringoutput)==False):
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
df_all=self.core(DataSetName)
df_all.to_csv(bufferstringoutput)
return bufferstringoutput
def core(self,df_all,Data_adj_name=''):
return df_all
def real_FE():
return 0
class FEg30eom0110network(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
intflag=True
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
if(intflag):
df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
if(intflag):
df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
if(intflag):
df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min',True)
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min',True)
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max',True)
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max',True)
df_all,_=FEsingle.HighLowRange(df_all,8,True)
df_all,_=FEsingle.HighLowRange(df_all,25,True)
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
if(intflag):
df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
if(intflag):
df_all['pct_chg_abs_rank']=df_all['pct_chg_abs_rank']*10//2
df_all=FEsingle.PctChgAbsSumRank(df_all,6,True)
df_all=FEsingle.PctChgSumRank(df_all,3,True)
df_all=FEsingle.PctChgSumRank(df_all,6,True)
df_all=FEsingle.PctChgSumRank(df_all,12,True)
df_all=FEsingle.AmountChgRank(df_all,12,True)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
if(intflag):
df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']>18]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
class FEg30eom0110onlinew6d(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all['sm_amount_pos']=df_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['lg_amount_pos']=df_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['net_mf_amount_pos']=df_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['sm_amount_pos']=df_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_all['lg_amount_pos']=df_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_all['net_mf_amount_pos']=df_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_all['sm_amount']=df_all.groupby('ts_code')['sm_amount'].shift(1)
df_all['lg_amount']=df_all.groupby('ts_code')['lg_amount'].shift(1)
df_all['net_mf_amount']=df_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
##排除科创版
#print(df_all)
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
#df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']>18]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
class FE_a23(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos']
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos']
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos']
#df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
#df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
#df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'net_mf_amount')
print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
df_all['dayofweek']=pd.to_datetime(df_all['trade_date'],format='%Y%m%d')
df_all['dayofweek']=df_all['dayofweek'].dt.dayofweek
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.PctChgSum(df_all,24)
#df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']>18]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='left', on=['ts_code','trade_date'])
print(df_all)
#df_all.drop(['turnover_rate','volume_ratio','pe','pb'],axis=1,inplace=True)
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
#这里打一个问号
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
#df_all.drop(['change','vol'],axis=1,inplace=True)
df_all['ts_code'] = df_all['ts_code'].astype('str') #将原本的int数据类型转换为文本
df_all['ts_code'] = df_all['ts_code'].str.zfill(6) #用的时候必须加上.str前缀
print(df_all)
##排除科创版
#print(df_all)
df_all[["ts_code"]]=df_all[["ts_code"]].astype(str)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['adj_factor']=df_all['adj_factor'].fillna(0)
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all['real_price']=df_all.groupby('ts_code')['real_price'].shift(1)
df_all['real_price']=df_all['real_price']*(1+df_all['pct_chg']/100)
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
#===================================================================================================================================#
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*10//2
#df_all=FEsingle.PctChgSumRank_Common(df_all,5,'high')
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
#删除市值过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['chg_rank']>0.7]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['total_mv_rank']<12]
df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price','amount','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
month_sec=df_all['trade_date'].max()
df_all=df_all[df_all['trade_date']==month_sec]
print(df_all)
df_all=df_all.reset_index(drop=True)
df_all.to_csv('today_train.csv')
dwdw=1
class FE_a29(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos']
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos']
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos']
#df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
#df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
#df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'net_mf_amount')
#df_money_all['sm_amount_25_diff']=df_money_all['sm_amount_25']-df_money_all['sm_amount_12']
#df_money_all['sm_amount_12_diff']=df_money_all['sm_amount_12']-df_money_all['sm_amount_5']
#df_money_all['lg_amount_25_diff']=df_money_all['lg_amount_25']-df_money_all['lg_amount_12']
#df_money_all['lg_amount_12_diff']=df_money_all['lg_amount_12']-df_money_all['lg_amount_5']
#df_money_all['net_mf_amount_25_diff']=df_money_all['net_mf_amount_25']-df_money_all['net_mf_amount_12']
#df_money_all['net_mf_amount_12_diff']=df_money_all['net_mf_amount_12']-df_money_all['net_mf_amount_5']
print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
df_all['dayofweek']=pd.to_datetime(df_all['trade_date'],format='%Y%m%d')
df_all['dayofweek']=df_all['dayofweek'].dt.dayofweek
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all['25_pct_rank_min_diff']=df_all['25_pct_rank_min']-df_all['12_pct_rank_min']
df_all['12_pct_rank_min_diff']=df_all['12_pct_rank_min']-df_all['5_pct_rank_min']
df_all['25_pct_rank_max_diff']=df_all['25_pct_rank_max']-df_all['12_pct_rank_max']
df_all['12_pct_rank_max_diff']=df_all['12_pct_rank_max']-df_all['5_pct_rank_max']
df_all['25_pct_Rangerank_diff']=df_all['25_pct_Rangerank']-df_all['12_pct_Rangerank']
df_all['12_pct_Rangerank_diff']=df_all['12_pct_Rangerank']-df_all['5_pct_Rangerank']
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.PctChgSum(df_all,24)
df_all['chg_rank_24_diff']=df_all['chg_rank_24']-df_all['chg_rank_12']
df_all['chg_rank_12_diff']=df_all['chg_rank_12']-df_all['chg_rank_6']
df_all['chg_rank_6_diff']=df_all['chg_rank_6']-df_all['chg_rank_3']
df_all['pct_chg_24_diff']=df_all['pct_chg_24']-df_all['pct_chg_12']
df_all['pct_chg_12_diff']=df_all['pct_chg_12']-df_all['pct_chg_6']
df_all['pct_chg_6_diff']=df_all['pct_chg_6']-df_all['pct_chg_3']
#df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']>15]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
#df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='left', on=['ts_code','trade_date'])
print(df_all)
#df_all.drop(['turnover_rate','volume_ratio','pe','pb'],axis=1,inplace=True)
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
#这里打一个问号
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
#df_all.drop(['change','vol'],axis=1,inplace=True)
df_all['ts_code'] = df_all['ts_code'].astype('str') #将原本的int数据类型转换为文本
df_all['ts_code'] = df_all['ts_code'].str.zfill(6) #用的时候必须加上.str前缀
print(df_all)
##排除科创版
#print(df_all)
df_all[["ts_code"]]=df_all[["ts_code"]].astype(str)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['adj_factor']=df_all['adj_factor'].fillna(0)
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all['real_price']=df_all.groupby('ts_code')['real_price'].shift(1)
df_all['real_price']=df_all['real_price']*(1+df_all['pct_chg']/100)
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
#===================================================================================================================================#
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*10//2
#df_all=FEsingle.PctChgSumRank_Common(df_all,5,'high')
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
#删除市值过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['chg_rank']>0.7]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['total_mv_rank']<12]
df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price','amount','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
month_sec=df_all['trade_date'].max()
df_all=df_all[df_all['trade_date']==month_sec]
print(df_all)
df_all=df_all.reset_index(drop=True)
df_all.to_csv('today_train.csv')
dwdw=1
class FE_a29_Volatility(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos']
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos']
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos']
#df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
#df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
#df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'net_mf_amount')
#df_money_all['sm_amount_25_diff']=df_money_all['sm_amount_25']-df_money_all['sm_amount_12']
#df_money_all['sm_amount_12_diff']=df_money_all['sm_amount_12']-df_money_all['sm_amount_5']
#df_money_all['lg_amount_25_diff']=df_money_all['lg_amount_25']-df_money_all['lg_amount_12']
#df_money_all['lg_amount_12_diff']=df_money_all['lg_amount_12']-df_money_all['lg_amount_5']
#df_money_all['net_mf_amount_25_diff']=df_money_all['net_mf_amount_25']-df_money_all['net_mf_amount_12']
#df_money_all['net_mf_amount_12_diff']=df_money_all['net_mf_amount_12']-df_money_all['net_mf_amount_5']
print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
df_all['dayofweek']=pd.to_datetime(df_all['trade_date'],format='%Y%m%d')
df_all['dayofweek']=df_all['dayofweek'].dt.dayofweek
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all['25_pct_rank_min_diff']=df_all['25_pct_rank_min']-df_all['12_pct_rank_min']
df_all['12_pct_rank_min_diff']=df_all['12_pct_rank_min']-df_all['5_pct_rank_min']
df_all['25_pct_rank_max_diff']=df_all['25_pct_rank_max']-df_all['12_pct_rank_max']
df_all['12_pct_rank_max_diff']=df_all['12_pct_rank_max']-df_all['5_pct_rank_max']
df_all['25_pct_Rangerank_diff']=df_all['25_pct_Rangerank']-df_all['12_pct_Rangerank']
df_all['12_pct_Rangerank_diff']=df_all['12_pct_Rangerank']-df_all['5_pct_Rangerank']
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.PctChgSum(df_all,24)
df_all['chg_rank_24_diff']=df_all['chg_rank_24']-df_all['chg_rank_12']
df_all['chg_rank_12_diff']=df_all['chg_rank_12']-df_all['chg_rank_6']
df_all['chg_rank_6_diff']=df_all['chg_rank_6']-df_all['chg_rank_3']
df_all['pct_chg_24_diff']=df_all['pct_chg_24']-df_all['pct_chg_12']
df_all['pct_chg_12_diff']=df_all['pct_chg_12']-df_all['pct_chg_6']
df_all['pct_chg_6_diff']=df_all['pct_chg_6']-df_all['pct_chg_3']
#df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']>15]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
#df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='left', on=['ts_code','trade_date'])
print(df_all)
#df_all.drop(['turnover_rate','volume_ratio','pe','pb'],axis=1,inplace=True)
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
#这里打一个问号
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
#df_all.drop(['change','vol'],axis=1,inplace=True)
df_all['ts_code'] = df_all['ts_code'].astype('str') #将原本的int数据类型转换为文本
df_all['ts_code'] = df_all['ts_code'].str.zfill(6) #用的时候必须加上.str前缀
print(df_all)
##排除科创版
#print(df_all)
df_all[["ts_code"]]=df_all[["ts_code"]].astype(str)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['adj_factor']=df_all['adj_factor'].fillna(0)
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all['real_price']=df_all.groupby('ts_code')['real_price'].shift(1)
df_all['real_price']=df_all['real_price']*(1+df_all['pct_chg']/100)
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
#===================================================================================================================================#
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*10//2
#df_all=FEsingle.PctChgSumRank_Common(df_all,5,'high')
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
#删除市值过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['chg_rank']>0.7]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['total_mv_rank']<12]
df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price','amount','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
month_sec=df_all['trade_date'].max()
df_all=df_all[df_all['trade_date']==month_sec]
print(df_all)
df_all=df_all.reset_index(drop=True)
df_all.to_csv('today_train.csv')
dwdw=1
class FE_a31(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos']
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos']
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos']
#df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
#df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
#df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'net_mf_amount')
#df_money_all['sm_amount_25_diff']=df_money_all['sm_amount_25']-df_money_all['sm_amount_12']
#df_money_all['sm_amount_12_diff']=df_money_all['sm_amount_12']-df_money_all['sm_amount_5']
#df_money_all['lg_amount_25_diff']=df_money_all['lg_amount_25']-df_money_all['lg_amount_12']
#df_money_all['lg_amount_12_diff']=df_money_all['lg_amount_12']-df_money_all['lg_amount_5']
#df_money_all['net_mf_amount_25_diff']=df_money_all['net_mf_amount_25']-df_money_all['net_mf_amount_12']
#df_money_all['net_mf_amount_12_diff']=df_money_all['net_mf_amount_12']-df_money_all['net_mf_amount_5']
print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
df_all['dayofweek']=pd.to_datetime(df_all['trade_date'],format='%Y%m%d')
df_all['dayofweek']=df_all['dayofweek'].dt.dayofweek
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all['25_pct_rank_min_diff']=df_all['25_pct_rank_min']-df_all['12_pct_rank_min']
df_all['12_pct_rank_min_diff']=df_all['12_pct_rank_min']-df_all['5_pct_rank_min']
df_all['25_pct_rank_max_diff']=df_all['25_pct_rank_max']-df_all['12_pct_rank_max']
df_all['12_pct_rank_max_diff']=df_all['12_pct_rank_max']-df_all['5_pct_rank_max']
df_all['25_pct_Rangerank_diff']=df_all['25_pct_Rangerank']-df_all['12_pct_Rangerank']
df_all['12_pct_Rangerank_diff']=df_all['12_pct_Rangerank']-df_all['5_pct_Rangerank']
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.PctChgSum(df_all,24)
df_all['chg_rank_24_diff']=df_all['chg_rank_24']-df_all['chg_rank_12']
df_all['chg_rank_12_diff']=df_all['chg_rank_12']-df_all['chg_rank_6']
df_all['chg_rank_6_diff']=df_all['chg_rank_6']-df_all['chg_rank_3']
df_all['pct_chg_24_diff']=df_all['pct_chg_24']-df_all['pct_chg_12']
df_all['pct_chg_12_diff']=df_all['pct_chg_12']-df_all['pct_chg_6']
df_all['pct_chg_6_diff']=df_all['pct_chg_6']-df_all['pct_chg_3']
#df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
df_all=df_all[df_all['total_mv_rank']<6]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
#df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='left', on=['ts_code','trade_date'])
print(df_all)
#df_all.drop(['turnover_rate','volume_ratio','pe','pb'],axis=1,inplace=True)
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
#这里打一个问号
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
#df_all.drop(['change','vol'],axis=1,inplace=True)
df_all['ts_code'] = df_all['ts_code'].astype('str') #将原本的int数据类型转换为文本
df_all['ts_code'] = df_all['ts_code'].str.zfill(6) #用的时候必须加上.str前缀
print(df_all)
##排除科创版
#print(df_all)
df_all[["ts_code"]]=df_all[["ts_code"]].astype(str)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['adj_factor']=df_all['adj_factor'].fillna(0)
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all['real_price']=df_all.groupby('ts_code')['real_price'].shift(1)
df_all['real_price']=df_all['real_price']*(1+df_all['pct_chg']/100)
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
#===================================================================================================================================#
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*10//2
#df_all=FEsingle.PctChgSumRank_Common(df_all,5,'high')
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
#删除市值过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['chg_rank']>0.7]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['total_mv_rank']<12]
df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price','amount','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
month_sec=df_all['trade_date'].max()
df_all=df_all[df_all['trade_date']==month_sec]
print(df_all)
df_all=df_all.reset_index(drop=True)
df_all.to_csv('today_train.csv')
dwdw=1
class FE_a31_full(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos']
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos']
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos']
#df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
#df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
#df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'net_mf_amount')
#df_money_all['sm_amount_25_diff']=df_money_all['sm_amount_25']-df_money_all['sm_amount_12']
#df_money_all['sm_amount_12_diff']=df_money_all['sm_amount_12']-df_money_all['sm_amount_5']
#df_money_all['lg_amount_25_diff']=df_money_all['lg_amount_25']-df_money_all['lg_amount_12']
#df_money_all['lg_amount_12_diff']=df_money_all['lg_amount_12']-df_money_all['lg_amount_5']
#df_money_all['net_mf_amount_25_diff']=df_money_all['net_mf_amount_25']-df_money_all['net_mf_amount_12']
#df_money_all['net_mf_amount_12_diff']=df_money_all['net_mf_amount_12']-df_money_all['net_mf_amount_5']
print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
#df_all['st_or_otherwrong']=0
#df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
df_all['dayofweek']=pd.to_datetime(df_all['trade_date'],format='%Y%m%d')
df_all['dayofweek']=df_all['dayofweek'].dt.dayofweek
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all['25_pct_rank_min_diff']=df_all['25_pct_rank_min']-df_all['12_pct_rank_min']
df_all['12_pct_rank_min_diff']=df_all['12_pct_rank_min']-df_all['5_pct_rank_min']
df_all['25_pct_rank_max_diff']=df_all['25_pct_rank_max']-df_all['12_pct_rank_max']
df_all['12_pct_rank_max_diff']=df_all['12_pct_rank_max']-df_all['5_pct_rank_max']
df_all['25_pct_Rangerank_diff']=df_all['25_pct_Rangerank']-df_all['12_pct_Rangerank']
df_all['12_pct_Rangerank_diff']=df_all['12_pct_Rangerank']-df_all['5_pct_Rangerank']
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
#df_all['high_stop']=0
#df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.PctChgSum(df_all,24)
df_all['chg_rank_24_diff']=df_all['chg_rank_24']-df_all['chg_rank_12']
df_all['chg_rank_12_diff']=df_all['chg_rank_12']-df_all['chg_rank_6']
df_all['chg_rank_6_diff']=df_all['chg_rank_6']-df_all['chg_rank_3']
df_all['pct_chg_24_diff']=df_all['pct_chg_24']-df_all['pct_chg_12']
df_all['pct_chg_12_diff']=df_all['pct_chg_12']-df_all['pct_chg_6']
df_all['pct_chg_6_diff']=df_all['pct_chg_6']-df_all['pct_chg_3']
#df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
df_all=df_all[df_all['total_mv_rank']<6]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
#df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['amount','close','real_price'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='left', on=['ts_code','trade_date'])
print(df_all)
#df_all.drop(['turnover_rate','volume_ratio','pe','pb'],axis=1,inplace=True)
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
#这里打一个问号
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
#df_all.drop(['change','vol'],axis=1,inplace=True)
df_all['ts_code'] = df_all['ts_code'].astype('str') #将原本的int数据类型转换为文本
df_all['ts_code'] = df_all['ts_code'].str.zfill(6) #用的时候必须加上.str前缀
print(df_all)
##排除科创版
#print(df_all)
df_all[["ts_code"]]=df_all[["ts_code"]].astype(str)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['adj_factor']=df_all['adj_factor'].fillna(0)
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all['real_price']=df_all.groupby('ts_code')['real_price'].shift(1)
df_all['real_price']=df_all['real_price']*(1+df_all['pct_chg']/100)
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
#===================================================================================================================================#
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*10//2
#df_all=FEsingle.PctChgSumRank_Common(df_all,5,'high')
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
#删除市值过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['chg_rank']>0.7]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['total_mv_rank']<12]
df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price','amount','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
month_sec=df_all['trade_date'].max()
df_all=df_all[df_all['trade_date']==month_sec]
print(df_all)
df_all=df_all.reset_index(drop=True)
df_all.to_csv('today_train.csv')
dwdw=1
class FE_a29_full(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos']
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos']
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos']
#df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
#df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
#df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'net_mf_amount')
#df_money_all['sm_amount_25_diff']=df_money_all['sm_amount_25']-df_money_all['sm_amount_12']
#df_money_all['sm_amount_12_diff']=df_money_all['sm_amount_12']-df_money_all['sm_amount_5']
#df_money_all['lg_amount_25_diff']=df_money_all['lg_amount_25']-df_money_all['lg_amount_12']
#df_money_all['lg_amount_12_diff']=df_money_all['lg_amount_12']-df_money_all['lg_amount_5']
#df_money_all['net_mf_amount_25_diff']=df_money_all['net_mf_amount_25']-df_money_all['net_mf_amount_12']
#df_money_all['net_mf_amount_12_diff']=df_money_all['net_mf_amount_12']-df_money_all['net_mf_amount_5']
print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
#df_all['st_or_otherwrong']=0
#df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
df_all['dayofweek']=pd.to_datetime(df_all['trade_date'],format='%Y%m%d')
df_all['dayofweek']=df_all['dayofweek'].dt.dayofweek
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all['25_pct_rank_min_diff']=df_all['25_pct_rank_min']-df_all['12_pct_rank_min']
df_all['12_pct_rank_min_diff']=df_all['12_pct_rank_min']-df_all['5_pct_rank_min']
df_all['25_pct_rank_max_diff']=df_all['25_pct_rank_max']-df_all['12_pct_rank_max']
df_all['12_pct_rank_max_diff']=df_all['12_pct_rank_max']-df_all['5_pct_rank_max']
df_all['25_pct_Rangerank_diff']=df_all['25_pct_Rangerank']-df_all['12_pct_Rangerank']
df_all['12_pct_Rangerank_diff']=df_all['12_pct_Rangerank']-df_all['5_pct_Rangerank']
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
#df_all['high_stop']=0
#df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.PctChgSum(df_all,24)
df_all['chg_rank_24_diff']=df_all['chg_rank_24']-df_all['chg_rank_12']
df_all['chg_rank_12_diff']=df_all['chg_rank_12']-df_all['chg_rank_6']
df_all['chg_rank_6_diff']=df_all['chg_rank_6']-df_all['chg_rank_3']
df_all['pct_chg_24_diff']=df_all['pct_chg_24']-df_all['pct_chg_12']
df_all['pct_chg_12_diff']=df_all['pct_chg_12']-df_all['pct_chg_6']
df_all['pct_chg_6_diff']=df_all['pct_chg_6']-df_all['pct_chg_3']
#df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']<6]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
#df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['amount','close','real_price'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='left', on=['ts_code','trade_date'])
print(df_all)
#df_all.drop(['turnover_rate','volume_ratio','pe','pb'],axis=1,inplace=True)
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
#这里打一个问号
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
#df_all.drop(['change','vol'],axis=1,inplace=True)
df_all['ts_code'] = df_all['ts_code'].astype('str') #将原本的int数据类型转换为文本
df_all['ts_code'] = df_all['ts_code'].str.zfill(6) #用的时候必须加上.str前缀
print(df_all)
##排除科创版
#print(df_all)
df_all[["ts_code"]]=df_all[["ts_code"]].astype(str)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['adj_factor']=df_all['adj_factor'].fillna(0)
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all['real_price']=df_all.groupby('ts_code')['real_price'].shift(1)
df_all['real_price']=df_all['real_price']*(1+df_all['pct_chg']/100)
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
#===================================================================================================================================#
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*10//2
#df_all=FEsingle.PctChgSumRank_Common(df_all,5,'high')
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
#删除市值过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['chg_rank']>0.7]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['total_mv_rank']<12]
df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price','amount','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
month_sec=df_all['trade_date'].max()
df_all=df_all[df_all['trade_date']==month_sec]
print(df_all)
df_all=df_all.reset_index(drop=True)
df_all.to_csv('today_train.csv')
dwdw=1
class FE_qliba2(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
df_all=FEsingle.PredictDaysTrend(df_all,5)
print(df_all)
df_all=df_all.loc[:,['ts_code','trade_date','tomorrow_chg','tomorrow_chg_rank']]
print(df_all.dtypes)
print(df_all)
#===================================================================================================================================#
#获取qlib特征
###df_qlib_1=pd.read_csv('zzztest.csv',header=0)
###df_qlib_2=pd.read_csv('zzztest2.csv',header=0)
##df_qlib_1=pd.read_csv('2013.csv',header=0)
###df_qlib_1=df_qlib_1.iloc[:,0:70]
##df_qlib_all_l=df_qlib_1.iloc[:,0:2]
##df_qlib_all_r=df_qlib_1.iloc[:,70:]
##df_qlib_1 = pd.concat([df_qlib_all_l,df_qlib_all_r],axis=1)
##print(df_qlib_1.head(10))
##df_qlib_2=pd.read_csv('2015.csv',header=0)
##df_qlib_all_l=df_qlib_2.iloc[:,0:2]
##df_qlib_all_r=df_qlib_2.iloc[:,70:]
##df_qlib_2 = pd.concat([df_qlib_all_l,df_qlib_all_r],axis=1)
##df_qlib_3=pd.read_csv('2017.csv',header=0)
##df_qlib_all_l=df_qlib_3.iloc[:,0:2]
##df_qlib_all_r=df_qlib_3.iloc[:,70:]
##df_qlib_3 = pd.concat([df_qlib_all_l,df_qlib_all_r],axis=1)
##df_qlib_4=pd.read_csv('2019.csv',header=0)
##df_qlib_all_l=df_qlib_4.iloc[:,0:2]
##df_qlib_all_r=df_qlib_4.iloc[:,70:]
##df_qlib_4 = pd.concat([df_qlib_all_l,df_qlib_all_r],axis=1)
##df_qlib_all=pd.concat([df_qlib_2,df_qlib_1])
##df_qlib_all=pd.concat([df_qlib_3,df_qlib_all])
##df_qlib_all=pd.concat([df_qlib_4,df_qlib_all])
##df_qlib_all.drop_duplicates()
##print(df_qlib_all.head(10))
##df_qlib_all.drop(['LABEL0'],axis=1,inplace=True)
##df_qlib_all.to_csv("13to21_first70plus.csv")
df_qlib_all=pd.read_csv('13to21_first70plus.csv',header=0)
#df_qlib_all.drop(['LABEL0'],axis=1,inplace=True)
print(df_qlib_all)
df_qlib_all.rename(columns={'datetime':'trade_date','instrument':'ts_code','score':'mix'}, inplace = True)
print(df_qlib_all.dtypes)
print(df_qlib_all)
df_qlib_all['trade_date'] = pd.to_datetime(df_qlib_all['trade_date'], format='%Y-%m-%d')
df_qlib_all['trade_date']=df_qlib_all['trade_date'].apply(lambda x: x.strftime('%Y%m%d'))
df_qlib_all['trade_date'] = df_qlib_all['trade_date'].astype(int)
df_qlib_all['ts_codeL'] = df_qlib_all['ts_code'].str[:2]
df_qlib_all['ts_codeR'] = df_qlib_all['ts_code'].str[2:]
df_qlib_all['ts_codeR'] = df_qlib_all['ts_codeR'].apply(lambda s: s+'.')
df_qlib_all['ts_code']=df_qlib_all['ts_codeR'].str.cat(df_qlib_all['ts_codeL'])
df_qlib_all.drop(['ts_codeL','ts_codeR'],axis=1,inplace=True)
print(df_qlib_all.dtypes)
print(df_qlib_all)
df_qlib_all=df_qlib_all.fillna(value=0)
df_all=pd.merge(df_all, df_qlib_all, how='left', on=['ts_code','trade_date'])
print(df_all)
df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='left', on=['ts_code','trade_date'])
print(df_all)
#df_all.drop(['turnover_rate','volume_ratio','pe','pb'],axis=1,inplace=True)
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
#这里打一个问号
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
#df_all.drop(['change','vol'],axis=1,inplace=True)
df_all['ts_code'] = df_all['ts_code'].astype('str') #将原本的int数据类型转换为文本
df_all['ts_code'] = df_all['ts_code'].str.zfill(6) #用的时候必须加上.str前缀
print(df_all)
##排除科创版
#print(df_all)
df_all[["ts_code"]]=df_all[["ts_code"]].astype(str)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['adj_factor']=df_all['adj_factor'].fillna(0)
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all['real_price']=df_all.groupby('ts_code')['real_price'].shift(1)
df_all['real_price']=df_all['real_price']*(1+df_all['pct_chg']/100)
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
#===================================================================================================================================#
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*10//2
#df_all=FEsingle.PctChgSumRank_Common(df_all,5,'high')
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
#删除市值过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['chg_rank']>0.7]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['total_mv_rank']<12]
df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price','amount','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
month_sec=df_all['trade_date'].max()
df_all=df_all[df_all['trade_date']==month_sec]
print(df_all)
df_all=df_all.reset_index(drop=True)
df_all.to_csv('today_train.csv')
dwdw=1
class FEonlinew_a31(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos']
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos']
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos']
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'net_mf_amount')
print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.PctChgSum(df_all,24)
#df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']>18]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_all=pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='left', on=['ts_code','trade_date'])
df_all['sm_amount']=df_all.groupby('ts_code')['sm_amount'].shift(1)
df_all['lg_amount']=df_all.groupby('ts_code')['lg_amount'].shift(1)
df_all['net_mf_amount']=df_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=FEsingle.InputChgSum(df_all,5,'sm_amount')
df_all=FEsingle.InputChgSum(df_all,5,'lg_amount')
df_all=FEsingle.InputChgSum(df_all,5,'net_mf_amount')
df_all=FEsingle.InputChgSum(df_all,12,'sm_amount')
df_all=FEsingle.InputChgSum(df_all,12,'lg_amount')
df_all=FEsingle.InputChgSum(df_all,12,'net_mf_amount')
df_all=FEsingle.InputChgSum(df_all,25,'sm_amount')
df_all=FEsingle.InputChgSum(df_all,25,'lg_amount')
df_all=FEsingle.InputChgSum(df_all,25,'net_mf_amount')
df_all=pd.merge(df_all, df_long_all, how='left', on=['ts_code','trade_date'])
print(df_all)
#df_all.drop(['turnover_rate','volume_ratio','pe','pb'],axis=1,inplace=True)
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['dayofweek']=pd.to_datetime(df_all['trade_date'],format='%Y%m%d')
df_all['dayofweek']=df_all['dayofweek'].dt.dayofweek
#这里打一个问号
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
#df_all.drop(['change','vol'],axis=1,inplace=True)
df_all['ts_code'] = df_all['ts_code'].astype('str') #将原本的int数据类型转换为文本
df_all['ts_code'] = df_all['ts_code'].str.zfill(6) #用的时候必须加上.str前缀
print(df_all)
##排除科创版
#print(df_all)
df_all[["ts_code"]]=df_all[["ts_code"]].astype(str)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['adj_factor']=df_all['adj_factor'].fillna(0)
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all['real_price']=df_all.groupby('ts_code')['real_price'].shift(1)
df_all['real_price']=df_all['real_price']*(1+df_all['pct_chg']/100)
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
#===================================================================================================================================#
df_all['25_pct_rank_min_diff']=df_all['25_pct_rank_min']-df_all['12_pct_rank_min']
df_all['12_pct_rank_min_diff']=df_all['12_pct_rank_min']-df_all['5_pct_rank_min']
df_all['25_pct_rank_max_diff']=df_all['25_pct_rank_max']-df_all['12_pct_rank_max']
df_all['12_pct_rank_max_diff']=df_all['12_pct_rank_max']-df_all['5_pct_rank_max']
df_all['25_pct_Rangerank_diff']=df_all['25_pct_Rangerank']-df_all['12_pct_Rangerank']
df_all['12_pct_Rangerank_diff']=df_all['12_pct_Rangerank']-df_all['5_pct_Rangerank']
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.PctChgSum(df_all,24)
df_all['chg_rank_24_diff']=df_all['chg_rank_24']-df_all['chg_rank_12']
df_all['chg_rank_12_diff']=df_all['chg_rank_12']-df_all['chg_rank_6']
df_all['chg_rank_6_diff']=df_all['chg_rank_6']-df_all['chg_rank_3']
df_all['pct_chg_24_diff']=df_all['pct_chg_24']-df_all['pct_chg_12']
df_all['pct_chg_12_diff']=df_all['pct_chg_12']-df_all['pct_chg_6']
df_all['pct_chg_6_diff']=df_all['pct_chg_6']-df_all['pct_chg_3']
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*10//2
#df_all=FEsingle.PctChgSumRank_Common(df_all,5,'high')
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
#删除市值过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['chg_rank']>0.7]
df_all=df_all[df_all['amount']>15000]
df_all=df_all[df_all['total_mv_rank']<6]
df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price','amount','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
month_sec=df_all['trade_date'].max()
df_all=df_all[df_all['trade_date']==month_sec]
print(df_all)
df_all=df_all.reset_index(drop=True)
df_all.to_csv('today_train.csv')
dwdw=1
class FEfast_a23(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos']
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos']
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
#print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
df_all['dayofweek']=pd.to_datetime(df_all['trade_date'],format='%Y%m%d')
df_all['dayofweek']=df_all['dayofweek'].dt.dayofweek
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#df_all['ts_code_try']=df_all['ts_code'].map(lambda x : x[:-3])
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
#df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
#df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
#df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
#df_all=FEsingle.PctChgAbsSumRank(df_all,6)
#df_all=FEsingle.PctChgSumRank(df_all,3)
#df_all=FEsingle.PctChgSumRank(df_all,6)
#df_all=FEsingle.PctChgSumRank(df_all,12)
#df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
#df_all=FEsingle.PctChgSum(df_all,6)
#df_all=FEsingle.PctChgSum(df_all,12)
#df_all=FEsingle.PctChgSum(df_all,24)
#df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,24)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*19.9//2
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']>18]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all= | pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date']) | pandas.merge |
from urllib.request import urlopen
from dateutil.parser import parse
import os
from urllib.error import HTTPError
import pandas as pd
import xarray as xr
import glob
from datetime import timedelta
def _create_unid(x, haz_type):
r"""Creates a unique id for each svrgis report.
The unid format is as follows:
YYYYMMDDHHmmz000000000_HAZ
Where the date information is from the UTC time of the report, the
numbers are a zero-padded 9 digit number corresponding to the 'om'
field, and HAZ is the given hazard type ('tor', 'hail', 'wind').
Parameters
----------
x: Series
A single row from a pandas DataFrame
haz_type: str
Hazard identifier to add to unid.
Returns
-------
unid: str
A unique id based on the information in a given pandas DataFrame row.
"""
unid = "{}{:02d}{:02d}{:02d}{:02d}z{:09d}_{}"
unid = unid.format(x['date_utc'].year, x['date_utc'].month,
x['date_utc'].day, x['date_utc'].hour,
x['date_utc'].minute, x['om'], haz_type)
return unid
def _create_dtime(x):
r"""Generates datetimes from given DataFrame row columns date
and time. Always adds 6 hours to the expected CST time to convert
to UTC.
Parameters
----------
x: Series
A single row from a pandas DataFrame.
Returns
-------
unid: str
A unique id based on the information in a given pandas DataFrame row.
"""
dstr = "{}-{}".format(x['date'], x['time'])
dtime = parse(dstr)
dtime += timedelta(hours=6)
return dtime
def _preprocess_svrgis_table(csv_in):
r"""Read in a csv in "svrgis" format and modify it to be compatible with
the expected SVRIMG format. The original CST date and time is preserved in the
'CST_date' and 'CST_time' columns.
The SVRIMG format expects UTC time and the following updated columns:
uid, date_utc, yr, mo, dy, hr
See also: _create_dtime, _create_unid
Parameters
----------
csv_in: pandas DataFrame
DataFrame of svrgis csv that will be modified into the SVRIMG
expected format.
Returns
-------
csv_in: DataFrame
A pandas DataFrame containing the formatted svrgis data.
"""
csv_in['CST_date'] = csv_in['date']
csv_in['CST_time'] = csv_in['time']
csv_in['date_utc'] = csv_in.apply(lambda x: _create_dtime(x), axis=1)
csv_in['yr'] = csv_in['date_utc'].dt.year
csv_in['mo'] = csv_in['date_utc'].dt.month
csv_in['dy'] = csv_in['date_utc'].dt.day
csv_in['hr'] = csv_in['date_utc'].dt.hour
return csv_in
def _create_svrgis_table(in_name, out_name, haz_type, data_dir="../data/csvs",
start_year=1996, end_year=2017):
r"""Opens a given svrgis table from data_dir + in_name and returns a pandas
DataFrame. If the table is already created, nothing will happe. Otherwise,
it saves data_dir + out_name. This function assumes that 'data_dir' exists.
NOTE: If UTC is true, all report times are incremented by 6 hours. This
is because SPC stores the dates as central standard time (CST) for every
day of the year.
Parameters
----------
in_name: str
Name of original svrgis csv file that you downloaded from SPC.
out_name: str
Name of output csv file.
haz_type: str
Optionally add this string to the end of the unid.
data_dir: str
Location where the original svrgis csv file is and where the
new file will be saved. Default is "../data/csvs/"
start_year: int
First year from which to return data. Default is 1996.
end_year: int
Last year from which to return data. Default is 2017.
Returns
-------
td: DataFrame
A pandas DataFrame containing the formatted svrgis data.
"""
out_filename = "{}/{}".format(data_dir, out_name)
in_filename = "{}/{}".format(data_dir, in_name)
if os.path.exists(out_filename):
print("File exists!", out_filename)
else:
td = pd.read_csv(in_filename)
td = _preprocess_svrgis_table(td)
td = td[(td.yr >= start_year) & (td.yr <= end_year)]
td['uid'] = td.apply(lambda x: _create_unid(x, haz_type), axis=1)
td = td.set_index('uid')
td.to_csv(out_filename)
return td
def _create_index_table(out_name, haz_type, data_dir="../data/csvs",
url="http://svrimg.niu.edu/data/raw_img/", start_year=1996,
end_year=2017):
r"""Attempts to download and concatenate monthly tables from svrimg for
a given hazard type. If the file doesn't exist, saves result out_name
in data_dir. Otherwise, just returns DataFrame from existing file.
Parameters
----------
out_name: str
Name of output csv file.
haz_type: str
Optionally add this string to the end of the unid.
data_dir: str
Location where the original svrgis csv file is and where the
new file will be saved. Default is "../data/csvs"
url: str
Base url directory where the table data is located.
Default is "http://svrimg.niu.edu/data/".
start_year: int
First year from which to return data. Default is 1996.
end_year: int
Last year from which to return data. Default is 2017.
Returns
-------
td: DataFrame
A pandas DataFrame containing the formatted svrimg index data.
"""
out_filename = "{}/{}".format(data_dir, out_name)
if os.path.exists(out_filename):
return pd.read_csv(out_filename, index_col='unid')
else:
csvs = []
for year in range(start_year, end_year+1):
for month in range(1, 13):
csv_name = "report_box_indexer_{:02d}.csv".format(month)
file_url = "{}/{}/{}/{}".format(url, haz_type,
year, csv_name)
try:
tmp_csv = pd.read_csv(file_url, index_col='unid')
csvs.append(tmp_csv)
except HTTPError as e:
print(e, file_url)
csvs = pd.concat(csvs)
csvs.to_csv(out_filename)
return csvs
def get_table(which, haz_type, data_dir="../data/csvs",
url="http://svrimg.niu.edu/data/"):
r"""Downloads svrimg index or svrgis report table from the given url
and returns a pandas DataFrame. If the table is already downloaded,
it simply returns a pandas DataFrame. This assumes that 'data_dir'
exists.
Parameters
----------
which: str
Either 'svrimg' for image indexes or 'svrgis' for report attributes.
haz_type: str
Identify what hazard key to request. Expecting 'tor', 'hail',
or 'wind'.
data_dir: str
Base directory in which to save the csv file. Default is
"../data/csv/".
url: str
Base url directory where the table data is located.
Default is "http://svrimg.niu.edu/data/".
Returns
-------
table_data: DataFrame
A pandas DataFrame containing svrimg index information.
"""
if which == 'svrimg':
csv_name = "96-17_{}_utc_svrimg_index.csv".format(haz_type)
id_col = 'unid'
elif which == 'svrgis':
csv_name = "96-17_{}_utc_gridrad.csv".format(haz_type)
id_col = 'uid'
else:
raise ValueError("Expected 'svrimg' or 'svrgis', not {}.".format(which))
file_url = "{}/{}".format(url, csv_name)
file_name = "{}/{}".format(data_dir, csv_name)
if not os.path.exists(file_name):
tmp_csv = pd.read_csv(file_url, index_col=id_col)
tmp_csv.to_csv(file_name)
return tmp_csv
else:
return pd.read_csv(file_name, index_col=id_col)
def get_pred_tables(data_dir, url="http://svrimg.niu.edu/data/", example=True,
default_name="*_table_*.csv", csv_name="eg_classes_96-17",
remove_first_row=False):
r"""Either downloads example predictions if 'example' is true, or combines your prediction
tables in 'data_dir' into one table using the default naming format of
'*_table_*.csv' or whatever is passed into default_name. This will
attempt to grab every year from 1996 - 2017, but will not fail if a year is missing.
By default, the first row in every year's table is example data on svrimg.org, and
it can be removed as long as 'remove_first_row' is True. By default, if there is a
repeated UNID, the last one is kept. The theory here is that if you accidentally
clicked something, you would go back and fix it. Thus, the nth time is likely
more accurate.
Parameters
----------
data_dir: str
Base directory in which to save the csv file.
url: str
Base url directory where the table data is located.
Default is "http://svrimg.niu.edu/data/".
example: bool
If True, download example data. If false, look for local
yearly tables. Default is True.
default_name: str
Naming format for local csv files. Stars are used as wildcards.
Default is '*_table_*.csv'.
csv_name: str
Default name of new csv file containing classifications.
remove_first_row: bool
Removes first row from each year of local table data if True, ignores
first row if false. Default is False.
Returns
-------
csv: DataFrame
A pandas DataFrame of UNIDs and their predictions.
"""
if example:
if not os.path.exists("{}/{}.csv".format(data_dir, csv_name)):
_url = url + "sample_classifications_96-17.csv"
c = pd.read_csv(_url, index_col='UNID')
c = c.sort_index()
c.to_csv("{}/{}.csv".format(data_dir, csv_name))
else:
csvs = []
for fname in glob.glob(data_dir + default_name):
print("Reading", fname)
a = | pd.read_csv(fname) | pandas.read_csv |
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.api.types import is_float, is_float_dtype, is_scalar
from pandas.core.arrays import IntegerArray, integer_array
from pandas.tests.extension.base import BaseOpsUtil
class TestArithmeticOps(BaseOpsUtil):
def _check_divmod_op(self, s, op, other, exc=None):
super()._check_divmod_op(s, op, other, None)
def _check_op(self, s, op_name, other, exc=None):
op = self.get_op_from_name(op_name)
result = op(s, other)
# compute expected
mask = s.isna()
# if s is a DataFrame, squeeze to a Series
# for comparison
if isinstance(s, pd.DataFrame):
result = result.squeeze()
s = s.squeeze()
mask = mask.squeeze()
# other array is an Integer
if isinstance(other, IntegerArray):
omask = getattr(other, "mask", None)
mask = getattr(other, "data", other)
if omask is not None:
mask |= omask
# 1 ** na is na, so need to unmask those
if op_name == "__pow__":
mask = np.where(~s.isna() & (s == 1), False, mask)
elif op_name == "__rpow__":
other_is_one = other == 1
if isinstance(other_is_one, pd.Series):
other_is_one = other_is_one.fillna(False)
mask = np.where(other_is_one, False, mask)
# float result type or float op
if (
is_float_dtype(other)
or is_float(other)
or op_name in ["__rtruediv__", "__truediv__", "__rdiv__", "__div__"]
):
rs = s.astype("float")
expected = op(rs, other)
self._check_op_float(result, expected, mask, s, op_name, other)
# integer result type
else:
rs = pd.Series(s.values._data, name=s.name)
expected = op(rs, other)
self._check_op_integer(result, expected, mask, s, op_name, other)
def _check_op_float(self, result, expected, mask, s, op_name, other):
# check comparisons that are resulting in float dtypes
expected[mask] = np.nan
if "floordiv" in op_name:
# Series op sets 1//0 to np.inf, which IntegerArray does not do (yet)
mask2 = np.isinf(expected) & np.isnan(result)
expected[mask2] = np.nan
tm.assert_series_equal(result, expected)
def _check_op_integer(self, result, expected, mask, s, op_name, other):
# check comparisons that are resulting in integer dtypes
# to compare properly, we convert the expected
# to float, mask to nans and convert infs
# if we have uints then we process as uints
# then convert to float
# and we ultimately want to create a IntArray
# for comparisons
fill_value = 0
# mod/rmod turn floating 0 into NaN while
# integer works as expected (no nan)
if op_name in ["__mod__", "__rmod__"]:
if is_scalar(other):
if other == 0:
expected[s.values == 0] = 0
else:
expected = expected.fillna(0)
else:
expected[
(s.values == 0).fillna(False)
& ((expected == 0).fillna(False) | expected.isna())
] = 0
try:
expected[
((expected == np.inf) | (expected == -np.inf)).fillna(False)
] = fill_value
original = expected
expected = expected.astype(s.dtype)
except ValueError:
expected = expected.astype(float)
expected[
((expected == np.inf) | (expected == -np.inf)).fillna(False)
] = fill_value
original = expected
expected = expected.astype(s.dtype)
expected[mask] = pd.NA
# assert that the expected astype is ok
# (skip for unsigned as they have wrap around)
if not s.dtype.is_unsigned_integer:
original = pd.Series(original)
# we need to fill with 0's to emulate what an astype('int') does
# (truncation) for certain ops
if op_name in ["__rtruediv__", "__rdiv__"]:
mask |= original.isna()
original = original.fillna(0).astype("int")
original = original.astype("float")
original[mask] = np.nan
tm.assert_series_equal(original, expected.astype("float"))
# assert our expected result
tm.assert_series_equal(result, expected)
def test_arith_integer_array(self, data, all_arithmetic_operators):
# we operate with a rhs of an integer array
op = all_arithmetic_operators
s = pd.Series(data)
rhs = pd.Series([1] * len(data), dtype=data.dtype)
rhs.iloc[-1] = np.nan
self._check_op(s, op, rhs)
def test_arith_series_with_scalar(self, data, all_arithmetic_operators):
# scalar
op = all_arithmetic_operators
s = | pd.Series(data) | pandas.Series |
import unittest
import pdb
import pandas as pd
import numpy as np
from pandas.util.testing import assert_frame_equal, assert_index_equal
from ..models.condition_models import RuleKPI, RuleCondition, RuleConditionalOperator, RuleConditionGroup, RuleConditionGroupOperator
class Test_conditional_operator(unittest.TestCase):
def setUp(self):
"""
Create sample data
"""
d = {
"datum": pd.Series([3., 2., 1., np.nan]),
"criterion": pd.Series([np.nan, 1., 2., 3.]),
}
self.df = pd.DataFrame(d)
def test_greater(self):
"""
Test filtering by greater than
"""
operator = RuleConditionalOperator.greater
index = operator.selectedIndex(self.df, "criterion", 2.)
self.assertEqual(index, pd.Int64Index([3]))
def test_less(self):
"""
Test filtering by greater than
"""
operator = RuleConditionalOperator.less
index = operator.selectedIndex(self.df, "criterion", 2.)
self.assertEqual(index, pd.Int64Index([1]))
def test_greater_than_or_equal(self):
"""
Test filtering by greater than
"""
operator = RuleConditionalOperator.greaterThanOrEqual
index = operator.selectedIndex(self.df, "criterion", 2.)
assert_index_equal(index, pd.Int64Index([2, 3]))
def test_less_than_or_equal(self):
"""
Test filtering by greater than
"""
operator = RuleConditionalOperator.lessThanOrEqual
index = operator.selectedIndex(self.df, "criterion", 2.)
assert_index_equal(index, pd.Int64Index([1, 2]))
def test_equal(self):
"""
Test filtering by greater than
"""
operator = RuleConditionalOperator.equal
index = operator.selectedIndex(self.df, "criterion", 2.)
assert_index_equal(index, pd.Int64Index([2]))
class Test_condiition(unittest.TestCase):
"""
Test module for the search ads condition classes
"""
def test_spend(self):
"""
Test filtering by total spend
"""
d = {
"keywordId": pd.Series([1, 2, 1]),
"localSpend": pd.Series([1., 3., 3.]),
}
df = pd.DataFrame(d)
condition = RuleCondition(kpi=RuleKPI("totalSpend"),
operator=RuleConditionalOperator("greater"),
comparisonValue=3.)
index = condition.selectedIndex(df, groupByID="keywordId")
assert_index_equal(index, pd.Int64Index([0, 2]))
dataIndex = [0, 1, 2]
d = {
"keywordId": pd.Series([1, 2, 1], index=dataIndex),
"localSpend": pd.Series([1., 3., 3.], index=dataIndex),
"totalSpend": pd.Series([4., 3., 4.], index=dataIndex),
}
assert_frame_equal(df.sort_index(axis=1), pd.DataFrame(d).sort_index(axis=1))
def test_cpt(self):
"""
Test filtering by total CPT
"""
d = {
"keywordId": pd.Series([1, 2, 1]),
"localSpend": pd.Series([1., 3., 3.]),
"taps": pd.Series([0, 0, 2.]),
}
df = pd.DataFrame(d)
condition = RuleCondition(kpi=RuleKPI("reavgCPT"),
operator=RuleConditionalOperator("less"),
comparisonValue=3.)
index = condition.selectedIndex(df, groupByID="keywordId")
assert_index_equal(index, pd.Int64Index([0, 2]))
dataIndex = [0, 1, 2]
d = {
"keywordId": pd.Series([1, 2, 1], index=dataIndex),
"localSpend": pd.Series([1., 3., 3.], index=dataIndex),
"taps": pd.Series([0, 0, 2.], index=dataIndex),
"totalSpend": pd.Series([4., 3., 4.], index=dataIndex),
"reavgCPT": pd.Series([2., np.nan, 2.], index=dataIndex),
"totalTaps": pd.Series([2., 0, 2.], index=dataIndex),
}
assert_frame_equal(df.sort_index(axis=1), pd.DataFrame(d).sort_index(axis=1))
def test_cpa(self):
"""
Test filtering by total CPA
"""
d = {
"keywordId": pd.Series([1, 2, 1]),
"localSpend": pd.Series([1., 3., 3.]),
"installs": pd.Series([0, 1., 2.]),
}
df = pd.DataFrame(d)
condition = RuleCondition(kpi=RuleKPI("reavgCPA"),
operator=RuleConditionalOperator("less"),
comparisonValue=3.)
index = condition.selectedIndex(df, groupByID="keywordId")
assert_index_equal(index, pd.Int64Index([0, 2]))
dataIndex = [0, 1, 2]
d = {
"keywordId": pd.Series([1, 2, 1], index=dataIndex),
"localSpend": pd.Series([1., 3., 3.], index=dataIndex),
"installs": pd.Series([0, 1., 2.], index=dataIndex),
"totalSpend": | pd.Series([4., 3., 4.], index=dataIndex) | pandas.Series |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.