prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
#%%
import os
import sys
try:
os.chdir('/Volumes/GoogleDrive/My Drive/python_code/connectome_tools/')
print(os.getcwd())
except:
pass
# %%
import sys
sys.path.append('/Volumes/GoogleDrive/My Drive/python_code/maggot_models/')
sys.path.append('/Volumes/GoogleDrive/My Drive/python_code/connectome_tools/')
from pymaid_creds import url, name, password, token
import pymaid
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import pandas as pd
from src.data import load_metagraph
from src.visualization import CLASS_COLOR_DICT, adjplot
# allows text to be editable in Illustrator
plt.rcParams['pdf.fonttype'] = 42
plt.rcParams['ps.fonttype'] = 42
plt.rcParams.update({'font.size': 6})
rm = pymaid.CatmaidInstance(url, token, name, password)
mg = load_metagraph("Gad", version="2020-06-10", path = '/Volumes/GoogleDrive/My Drive/python_code/maggot_models/data/processed/')
mg.calculate_degrees(inplace=True)
adj = mg.adj # adjacency matrix from the "mg" object
'''
# repeat for other connection types
mg_aa = load_metagraph("Gaa", version="2020-06-10", path = '/Volumes/GoogleDrive/My Drive/python_code/maggot_models/data/processed/')
mg_aa.calculate_degrees(inplace=True)
adj_aa = mg_aa.adj
mg_dd = load_metagraph("Gdd", version="2020-06-10", path = '/Volumes/GoogleDrive/My Drive/python_code/maggot_models/data/processed/')
mg_dd.calculate_degrees(inplace=True)
adj_dd = mg_dd.adj
mg_da = load_metagraph("Gda", version="2020-06-10", path = '/Volumes/GoogleDrive/My Drive/python_code/maggot_models/data/processed/')
mg_da.calculate_degrees(inplace=True)
adj_da = mg_da.adj
'''
clusters = pd.read_csv('cascades/data/meta-method=color_iso-d=8-bic_ratio=0.95-min_split=32.csv', index_col = 0, header = 0)
lvl7 = clusters.groupby('lvl7_labels')
# separate meta file with median_node_visits from sensory for each node
# determined using iterative random walks
meta_with_order = pd.read_csv('data/meta_data_w_order.csv', index_col = 0, header = 0)
order_df = []
for key in lvl7.groups:
skids = lvl7.groups[key]
node_visits = meta_with_order.loc[skids, :].median_node_visits
order_df.append([key, np.nanmean(node_visits)])
order_df = pd.DataFrame(order_df, columns = ['cluster', 'node_visit_order'])
order_df = order_df.sort_values(by = 'node_visit_order')
order = list(order_df.cluster)
# %%
# pull sensory annotations and then pull associated skids
input_names = pymaid.get_annotated('mw brain inputs').name
input_skids_list = list(map(pymaid.get_skids_by_annotation, pymaid.get_annotated('mw brain inputs').name))
input_skids = [val for sublist in input_skids_list for val in sublist]
output_order = [1, 0, 2]
output_names = pymaid.get_annotated('mw brain outputs').name
output_skids_list = list(map(pymaid.get_skids_by_annotation, pymaid.get_annotated('mw brain outputs').name))
output_skids = [val for sublist in output_skids_list for val in sublist]
output_names_reordered = [output_names[i] for i in output_order]
output_skids_list_reordered = [output_skids_list[i] for i in output_order]
# level 7 clusters
clusters = pd.read_csv('cascades/data/meta-method=color_iso-d=8-bic_ratio=0.95-min_split=32.csv', index_col = 0, header = 0)
lvl7 = clusters.groupby('lvl7_labels')
meta_with_order = pd.read_csv('data/meta_data_w_order.csv', index_col = 0, header = 0)
# ordering by mean node visit from sensory
order_df = []
for key in lvl7.groups:
skids = lvl7.groups[key]
node_visits = meta_with_order.loc[skids, :].median_node_visits
order_df.append([key, np.nanmean(node_visits)])
order_df = pd.DataFrame(order_df, columns = ['cluster', 'node_visit_order'])
order_df = order_df.sort_values(by = 'node_visit_order')
order = list(order_df.cluster)
# getting skids of each cluster
cluster_lvl7 = []
for key in order:
cluster_lvl7.append(lvl7.groups[key].values)
# %%
## cascades from each cluster, ending at brain inputs/outputs
# maybe should switch to sensory second-order?
def skid_to_index(skid, mg):
index_match = np.where(mg.meta.index == skid)[0]
if(len(index_match)==1):
return(index_match[0])
if(len(index_match)!=1):
print('Not one match for skid %i!' %skid)
from src.traverse import Cascade, to_transmission_matrix
from src.traverse import TraverseDispatcher
from src.visualization import matrixplot
# order skids within groups and convert to indices
cluster_lvl7_indices_list = []
sorted_skids = []
for skids in cluster_lvl7:
skids_median_visit = meta_with_order.loc[skids, 'median_node_visits']
skids_sorted = skids_median_visit.sort_values().index
indices = []
for skid in skids_sorted:
index = skid_to_index(skid, mg)
indices.append(index)
cluster_lvl7_indices_list.append(indices)
sorted_skids.append(skids_sorted)
# delist
sorted_skids = [val for sublist in sorted_skids for val in sublist]
sorted_indices = []
for skid in sorted_skids:
sorted_indices.append(skid_to_index(skid, mg))
p = 0.05
max_hops = 5
n_init = 100
simultaneous = True
transition_probs = to_transmission_matrix(adj, p)
cdispatch = TraverseDispatcher(
Cascade,
transition_probs,
stop_nodes = [],
max_hops=max_hops,
allow_loops = False,
n_init=n_init,
simultaneous=simultaneous,
)
# run cascades parallel from each individual node
from joblib import Parallel, delayed
from tqdm import tqdm
#neuron_indices_list = [val for sublist in cluster_lvl7_indices_list for val in sublist]
def run_cascades_from_node(i, cdispatch):
return(cdispatch.multistart(start_nodes = i))
cluster_hit_hist_list = []
for indices in tqdm(cluster_lvl7_indices_list):
hit_hist_list = Parallel(n_jobs=-1)(delayed(run_cascades_from_node)(i, cdispatch) for i in indices)
cluster_hit_hist_list.append(hit_hist_list)
# %%
# plot in feedback/feedforward matrix
# delist
neuron_hit_hist_list = [val for sublist in cluster_hit_hist_list for val in sublist]
# sort matrices correctly and sum
neuron_hit_hist_hop_summed = []
for hit_hist in neuron_hit_hist_list:
hop_summed = hit_hist[sorted_indices, 0:4].sum(axis = 1)
neuron_hit_hist_hop_summed.append(hop_summed)
neuron_hit_hist_hop_summed = pd.DataFrame(neuron_hit_hist_hop_summed).T
neuron_hit_hist_hop_summed.to_csv('cascades/feedback_through_brain/cascades_from_single_neurons.csv')
#neuron_hit_hist_hop_summed = pd.read_csv('cascades/feedback_through_brain/cascades_from_single_neurons.csv')
import cmasher as cmr
plt.imshow(neuron_hit_hist_hop_summed, cmap=cmr.ember, interpolation='none')
plt.savefig('cascades/feedback_through_brain/plots/feedback_vs_feedforward_neurons_4hops_ad.pdf', bbox_inches='tight')
# %%
# feedback character of individual neurons (output)
feedback_mat_ad = neuron_hit_hist_hop_summed
ff_fb_character_ad_output = []
for i in range(len(feedback_mat_ad.columns)):
cols = feedback_mat_ad.columns
column = feedback_mat_ad.loc[:, cols[i]]
fb = sum(column[0:i])
ff = sum(column[(i+1):len(column)])
if((ff>0) | (fb>0)):
ff_fb_character_ad_output.append([column.name, ff, fb, ff/(ff+fb), fb/(ff+fb)])
if((ff==0) & (fb==0)):
ff_fb_character_ad_output.append([column.name, 0, 0, 0, 0])
ff_fb_character_ad_output = | pd.DataFrame(ff_fb_character_ad_output, columns = ['neuron', 'feedforward', 'feedback', 'p_ff', 'p_fb']) | pandas.DataFrame |
'''
@ Author : <NAME>
@ E-mail : <EMAIL>
@ Github : https://github.com/WooilJeong/PublicDataReader
@ Blog : https://wooiljeong.github.io
'''
import pandas as pd
import numpy as np
import datetime
import requests
from bs4 import BeautifulSoup
from PublicDataReader.PublicDataPortal.__init__ import *
class AptTradeReader(Common):
def __init__(self, serviceKey):
super().__init__(serviceKey)
# ServiceKey 유효성 검사
api_url = "http://openapi.molit.go.kr:8081/OpenAPI_ToolInstallPackage/service/rest/RTMSOBJSvc/getRTMSDataSvcAptTrade?serviceKey=" + self.serviceKey
super().test(api_url)
def CodeFinder(self, name):
'''
국토교통부 실거래가 정보 오픈API는 법정동코드 10자리 중 앞 5자리인 구를 나타내는 지역코드를 사용합니다.
API에 사용할 구 별 코드를 조회하는 메소드이며, 문자열 지역 명을 입력받고, 조회 결과를 Pandas DataFrame형식으로 출력합니다.
'''
result = self.code[self.code['법정동명'].str.contains(name)][['법정동명','법정구코드']]
result.index = range(len(result))
return result
def DataReader(self, LAWD_CD, DEAL_YMD):
'''
지역코드와 계약월을 입력받고, 아파트 실거래 정보를 Pandas DataFrame 형식으로 출력합니다.
'''
# URL
url_1="http://openapi.molit.go.kr:8081/OpenAPI_ToolInstallPackage/service/rest/RTMSOBJSvc/getRTMSDataSvcAptTrade?LAWD_CD="+LAWD_CD
url_2="&DEAL_YMD=" + DEAL_YMD
url_3="&serviceKey=" + self.serviceKey
url_4="&numOfRows=99999"
url = url_1+url_2+url_3+url_4
try:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, 'lxml-xml')
# Filtering
te = xmlsoup.findAll("item")
# Creating Pandas Data Frame
df = pd.DataFrame()
variables = ['법정동','지역코드','아파트','지번','년','월','일','건축년도','전용면적','층','거래금액']
for t in te:
for variable in variables:
try :
globals()[variable] = t.find(variable).text
except :
globals()[variable] = np.nan
data = pd.DataFrame(
[[법정동,지역코드,아파트,지번,년,월,일,건축년도,전용면적,층,거래금액]],
columns = variables
)
df = pd.concat([df, data])
# Set Columns
colNames = ['지역코드','법정동','거래일','아파트','지번','전용면적','층','건축년도','거래금액']
# Feature Engineering
try:
if len(df['년']!=0) & len(df['월']!=0) & len(df['일']!=0):
df['거래일'] = df['년'] + '-' + df['월'] + '-' + df['일']
df['거래일'] = pd.to_datetime(df['거래일'])
df['거래금액'] = pd.to_numeric(df['거래금액'].str.replace(',',''))
except:
df = pd.DataFrame(columns=colNames)
print("조회할 자료가 없습니다.")
# Arange Columns
df = df[colNames]
df = df.sort_values(['법정동','거래일'])
df['법정동'] = df['법정동'].str.strip()
df.index = range(len(df))
return df
except:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, 'lxml-xml')
# Filtering
te = xmlsoup.findAll("header")
# 정상 요청시 에러 발생 -> Python 코드 에러
if te[0].find('resultCode').text == "00":
print(">>> Python Logic Error. e-mail : <EMAIL>")
# Open API 서비스 제공처 오류
else:
print(">>> Open API Error: {}".format(te[0].find['resultMsg']))
def DataCollector(self, LAWD_CD, start_date, end_date):
'''
특정 기간 동안의 데이터 수집 메소드
'''
end_date = datetime.datetime.strptime(end_date, "%Y-%m")
end_date = end_date + datetime.timedelta(days=31)
end_date = datetime.datetime.strftime(end_date, "%Y-%m")
ts = | pd.date_range(start=start_date, end=end_date, freq='m') | pandas.date_range |
"""
>>> from blaze.expr import Symbol
>>> from blaze.compute.pandas import compute
>>> accounts = Symbol('accounts', 'var * {name: string, amount: int}')
>>> deadbeats = accounts[accounts['amount'] < 0]['name']
>>> from pandas import DataFrame
>>> data = [['Alice', 100], ['Bob', -50], ['Charlie', -20]]
>>> df = DataFrame(data, columns=['name', 'amount'])
>>> compute(deadbeats, df)
1 Bob
2 Charlie
Name: name, dtype: object
"""
from __future__ import absolute_import, division, print_function
import pandas as pd
from pandas.core.generic import NDFrame
from pandas import DataFrame, Series
from pandas.core.groupby import DataFrameGroupBy, SeriesGroupBy
import numpy as np
from collections import defaultdict
from toolz import merge as merge_dicts
import fnmatch
from datashape.predicates import isscalar
from ..api.into import into
from ..dispatch import dispatch
from ..expr import (Projection, Field, Sort, Head, Broadcast, Selection,
Reduction, Distinct, Join, By, Summary, Label, ReLabel,
Map, Apply, Merge, Union, std, var, Like, Slice,
ElemWise, DateTime, Millisecond, Expr, Symbol)
from ..expr import UnaryOp, BinOp
from ..expr import Symbol, common_subexpression
from .core import compute, compute_up, base
from ..compatibility import _inttypes
__all__ = []
@dispatch(Projection, DataFrame)
def compute_up(t, df, **kwargs):
return df[list(t.fields)]
@dispatch(Field, (DataFrame, DataFrameGroupBy))
def compute_up(t, df, **kwargs):
return df[t.fields[0]]
@dispatch(Broadcast, DataFrame)
def compute_up(t, df, **kwargs):
d = dict((t._child[c]._expr, df[c]) for c in t._child.fields)
return compute(t._expr, d)
@dispatch(Broadcast, Series)
def compute_up(t, s, **kwargs):
return compute_up(t, s.to_frame(), **kwargs)
@dispatch(BinOp, Series, (Series, base))
def compute_up(t, lhs, rhs, **kwargs):
return t.op(lhs, rhs)
@dispatch(BinOp, (Series, base), Series)
def compute_up(t, lhs, rhs, **kwargs):
return t.op(lhs, rhs)
@dispatch(UnaryOp, NDFrame)
def compute_up(t, df, **kwargs):
f = getattr(t, 'op', getattr(np, t.symbol, None))
if f is None:
raise ValueError('%s is not a valid operation on %s objects' %
(t.symbol, type(df).__name__))
return f(df)
@dispatch(Selection, (Series, DataFrame))
def compute_up(t, df, **kwargs):
predicate = compute(t.predicate, {t._child: df})
return df[predicate]
@dispatch(Symbol, DataFrame)
def compute_up(t, df, **kwargs):
if not list(t.fields) == list(df.names):
# TODO also check dtype
raise ValueError("Schema mismatch: \n\nTable:\n%s\n\nDataFrame:\n%s"
% (t, df))
return df
@dispatch(Join, DataFrame, DataFrame)
def compute_up(t, lhs, rhs, **kwargs):
""" Join two pandas data frames on arbitrary columns
The approach taken here could probably be improved.
To join on two columns we force each column to be the index of the
dataframe, perform the join, and then reset the index back to the left
side's original index.
"""
result = pd.merge(lhs, rhs,
left_on=t.on_left, right_on=t.on_right,
how=t.how)
return result.reset_index()[t.fields]
@dispatch(Symbol, (DataFrameGroupBy, SeriesGroupBy))
def compute_up(t, gb, **kwargs):
return gb
def post_reduction(result):
# pandas may return an int, numpy scalar or non scalar here so we need to
# program defensively so that things are JSON serializable
try:
return result.item()
except (AttributeError, ValueError):
return result
@dispatch(Reduction, (Series, SeriesGroupBy))
def compute_up(t, s, **kwargs):
result = post_reduction(getattr(s, t.symbol)())
if t.keepdims:
result = Series([result], name=s.name)
return result
@dispatch((std, var), (Series, SeriesGroupBy))
def compute_up(t, s, **kwargs):
result = post_reduction(getattr(s, t.symbol)(ddof=t.unbiased))
if t.keepdims:
result = Series([result], name=s.name)
return result
@dispatch(Distinct, DataFrame)
def compute_up(t, df, **kwargs):
return df.drop_duplicates()
@dispatch(Distinct, Series)
def compute_up(t, s, **kwargs):
s2 = Series(s.unique())
s2.name = s.name
return s2
def unpack(seq):
""" Unpack sequence of length one
>>> unpack([1, 2, 3])
[1, 2, 3]
>>> unpack([1])
1
"""
seq = list(seq)
if len(seq) == 1:
seq = seq[0]
return seq
Grouper = ElemWise, Series, list
@dispatch(By, list, DataFrame)
def get_grouper(c, grouper, df):
return grouper
@dispatch(By, (ElemWise, Series), NDFrame)
def get_grouper(c, grouper, df):
return compute(grouper, {c._child: df})
@dispatch(By, (Field, Projection), NDFrame)
def get_grouper(c, grouper, df):
return grouper.fields
@dispatch(By, Reduction, Grouper, NDFrame)
def compute_by(t, r, g, df):
names = [r._name]
preapply = compute(r._child, {t._child: df})
# Pandas and Blaze column naming schemes differ
# Coerce DataFrame column names to match Blaze's names
preapply = preapply.copy()
if isinstance(preapply, Series):
preapply.name = names[0]
else:
preapply.names = names
group_df = concat_nodup(df, preapply)
gb = group_df.groupby(g)
groups = gb[names[0] if isscalar(t.apply._child.dshape.measure) else names]
return compute_up(r, groups) # do reduction
@dispatch(By, Summary, Grouper, NDFrame)
def compute_by(t, s, g, df):
names = s.fields
preapply = DataFrame(dict(zip(names,
(compute(v._child, {t._child: df})
for v in s.values))))
df2 = concat_nodup(df, preapply)
groups = df2.groupby(g)
d = defaultdict(list)
for name, v in zip(names, s.values):
d[name].append(getattr(Series, v.symbol))
result = groups.agg(dict(d))
# Rearrange columns to match names order
result = result[sorted(result.columns, key=lambda t: names.index(t[0]))]
result.columns = t.apply.fields # flatten down multiindex
return result
@dispatch(Expr, DataFrame)
def post_compute_by(t, df):
return df.reset_index(drop=True)
@dispatch((Summary, Reduction), DataFrame)
def post_compute_by(t, df):
return df.reset_index()
@dispatch(By, NDFrame)
def compute_up(t, df, **kwargs):
grouper = get_grouper(t, t.grouper, df)
result = compute_by(t, t.apply, grouper, df)
return post_compute_by(t.apply, into(DataFrame, result))
def concat_nodup(a, b):
""" Concatenate two dataframes/series without duplicately named columns
>>> df = DataFrame([[1, 'Alice', 100],
... [2, 'Bob', -200],
... [3, 'Charlie', 300]],
... columns=['id','name', 'amount'])
>>> concat_nodup(df, df)
id name amount
0 1 Alice 100
1 2 Bob -200
2 3 Charlie 300
>>> concat_nodup(df.name, df.amount)
name amount
0 Alice 100
1 Bob -200
2 Charlie 300
>>> concat_nodup(df, df.amount + df.id)
id name amount 0
0 1 Alice 100 101
1 2 Bob -200 -198
2 3 Charlie 300 303
"""
if isinstance(a, DataFrame) and isinstance(b, DataFrame):
return pd.concat([a, b[[c for c in b.columns if c not in a.columns]]],
axis=1)
if isinstance(a, DataFrame) and isinstance(b, Series):
if b.name not in a.columns:
return pd.concat([a, b], axis=1)
else:
return a
if isinstance(a, Series) and isinstance(b, DataFrame):
return | pd.concat([a, b[[c for c in b.columns if c != a.name]]], axis=1) | pandas.concat |
import csv
from io import StringIO
import os
import numpy as np
import pytest
from pandas.errors import ParserError
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
NaT,
Series,
Timestamp,
date_range,
read_csv,
to_datetime,
)
import pandas._testing as tm
import pandas.core.common as com
from pandas.io.common import get_handle
MIXED_FLOAT_DTYPES = ["float16", "float32", "float64"]
MIXED_INT_DTYPES = [
"uint8",
"uint16",
"uint32",
"uint64",
"int8",
"int16",
"int32",
"int64",
]
class TestDataFrameToCSV:
def read_csv(self, path, **kwargs):
params = {"index_col": 0, "parse_dates": True}
params.update(**kwargs)
return read_csv(path, **params)
def test_to_csv_from_csv1(self, float_frame, datetime_frame):
with tm.ensure_clean("__tmp_to_csv_from_csv1__") as path:
float_frame["A"][:5] = np.nan
float_frame.to_csv(path)
float_frame.to_csv(path, columns=["A", "B"])
float_frame.to_csv(path, header=False)
float_frame.to_csv(path, index=False)
# test roundtrip
# freq does not roundtrip
datetime_frame.index = datetime_frame.index._with_freq(None)
datetime_frame.to_csv(path)
recons = self.read_csv(path)
tm.assert_frame_equal(datetime_frame, recons)
datetime_frame.to_csv(path, index_label="index")
recons = self.read_csv(path, index_col=None)
assert len(recons.columns) == len(datetime_frame.columns) + 1
# no index
datetime_frame.to_csv(path, index=False)
recons = self.read_csv(path, index_col=None)
tm.assert_almost_equal(datetime_frame.values, recons.values)
# corner case
dm = DataFrame(
{
"s1": Series(range(3), index=np.arange(3)),
"s2": Series(range(2), index=np.arange(2)),
}
)
dm.to_csv(path)
recons = self.read_csv(path)
tm.assert_frame_equal(dm, recons)
def test_to_csv_from_csv2(self, float_frame):
with tm.ensure_clean("__tmp_to_csv_from_csv2__") as path:
# duplicate index
df = DataFrame(
np.random.randn(3, 3), index=["a", "a", "b"], columns=["x", "y", "z"]
)
df.to_csv(path)
result = self.read_csv(path)
tm.assert_frame_equal(result, df)
midx = MultiIndex.from_tuples([("A", 1, 2), ("A", 1, 2), ("B", 1, 2)])
df = DataFrame(np.random.randn(3, 3), index=midx, columns=["x", "y", "z"])
df.to_csv(path)
result = self.read_csv(path, index_col=[0, 1, 2], parse_dates=False)
tm.assert_frame_equal(result, df, check_names=False)
# column aliases
col_aliases = Index(["AA", "X", "Y", "Z"])
float_frame.to_csv(path, header=col_aliases)
rs = self.read_csv(path)
xp = float_frame.copy()
xp.columns = col_aliases
tm.assert_frame_equal(xp, rs)
msg = "Writing 4 cols but got 2 aliases"
with pytest.raises(ValueError, match=msg):
float_frame.to_csv(path, header=["AA", "X"])
def test_to_csv_from_csv3(self):
with tm.ensure_clean("__tmp_to_csv_from_csv3__") as path:
df1 = DataFrame(np.random.randn(3, 1))
df2 = DataFrame(np.random.randn(3, 1))
df1.to_csv(path)
df2.to_csv(path, mode="a", header=False)
xp = pd.concat([df1, df2])
rs = read_csv(path, index_col=0)
rs.columns = [int(label) for label in rs.columns]
xp.columns = [int(label) for label in xp.columns]
tm.assert_frame_equal(xp, rs)
def test_to_csv_from_csv4(self):
with tm.ensure_clean("__tmp_to_csv_from_csv4__") as path:
# GH 10833 (TimedeltaIndex formatting)
dt = pd.Timedelta(seconds=1)
df = DataFrame(
{"dt_data": [i * dt for i in range(3)]},
index=Index([i * dt for i in range(3)], name="dt_index"),
)
df.to_csv(path)
result = read_csv(path, index_col="dt_index")
result.index = pd.to_timedelta(result.index)
result["dt_data"] = pd.to_timedelta(result["dt_data"])
tm.assert_frame_equal(df, result, check_index_type=True)
def test_to_csv_from_csv5(self, timezone_frame):
# tz, 8260
with tm.ensure_clean("__tmp_to_csv_from_csv5__") as path:
timezone_frame.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=["A"])
converter = (
lambda c: to_datetime(result[c])
.dt.tz_convert("UTC")
.dt.tz_convert(timezone_frame[c].dt.tz)
)
result["B"] = converter("B")
result["C"] = converter("C")
tm.assert_frame_equal(result, timezone_frame)
def test_to_csv_cols_reordering(self):
# GH3454
chunksize = 5
N = int(chunksize * 2.5)
df = tm.makeCustomDataframe(N, 3)
cs = df.columns
cols = [cs[2], cs[0]]
with tm.ensure_clean() as path:
df.to_csv(path, columns=cols, chunksize=chunksize)
rs_c = read_csv(path, index_col=0)
tm.assert_frame_equal(df[cols], rs_c, check_names=False)
def test_to_csv_new_dupe_cols(self):
def _check_df(df, cols=None):
with tm.ensure_clean() as path:
df.to_csv(path, columns=cols, chunksize=chunksize)
rs_c = read_csv(path, index_col=0)
# we wrote them in a different order
# so compare them in that order
if cols is not None:
if df.columns.is_unique:
rs_c.columns = cols
else:
indexer, missing = df.columns.get_indexer_non_unique(cols)
rs_c.columns = df.columns.take(indexer)
for c in cols:
obj_df = df[c]
obj_rs = rs_c[c]
if isinstance(obj_df, Series):
tm.assert_series_equal(obj_df, obj_rs)
else:
tm.assert_frame_equal(obj_df, obj_rs, check_names=False)
# wrote in the same order
else:
rs_c.columns = df.columns
tm.assert_frame_equal(df, rs_c, check_names=False)
chunksize = 5
N = int(chunksize * 2.5)
# dupe cols
df = tm.makeCustomDataframe(N, 3)
df.columns = ["a", "a", "b"]
_check_df(df, None)
# dupe cols with selection
cols = ["b", "a"]
_check_df(df, cols)
@pytest.mark.slow
def test_to_csv_dtnat(self):
# GH3437
def make_dtnat_arr(n, nnat=None):
if nnat is None:
nnat = int(n * 0.1) # 10%
s = list(date_range("2000", freq="5min", periods=n))
if nnat:
for i in np.random.randint(0, len(s), nnat):
s[i] = NaT
i = np.random.randint(100)
s[-i] = NaT
s[i] = NaT
return s
chunksize = 1000
# N=35000
s1 = make_dtnat_arr(chunksize + 5)
s2 = make_dtnat_arr(chunksize + 5, 0)
# s3=make_dtnjat_arr(chunksize+5,0)
with tm.ensure_clean("1.csv") as pth:
df = DataFrame({"a": s1, "b": s2})
df.to_csv(pth, chunksize=chunksize)
recons = self.read_csv(pth).apply(to_datetime)
tm.assert_frame_equal(df, recons, check_names=False)
@pytest.mark.slow
def test_to_csv_moar(self):
def _do_test(
df, r_dtype=None, c_dtype=None, rnlvl=None, cnlvl=None, dupe_col=False
):
kwargs = {"parse_dates": False}
if cnlvl:
if rnlvl is not None:
kwargs["index_col"] = list(range(rnlvl))
kwargs["header"] = list(range(cnlvl))
with tm.ensure_clean("__tmp_to_csv_moar__") as path:
df.to_csv(path, encoding="utf8", chunksize=chunksize)
recons = self.read_csv(path, **kwargs)
else:
kwargs["header"] = 0
with tm.ensure_clean("__tmp_to_csv_moar__") as path:
df.to_csv(path, encoding="utf8", chunksize=chunksize)
recons = self.read_csv(path, **kwargs)
def _to_uni(x):
if not isinstance(x, str):
return x.decode("utf8")
return x
if dupe_col:
# read_Csv disambiguates the columns by
# labeling them dupe.1,dupe.2, etc'. monkey patch columns
recons.columns = df.columns
if rnlvl and not cnlvl:
delta_lvl = [recons.iloc[:, i].values for i in range(rnlvl - 1)]
ix = MultiIndex.from_arrays([list(recons.index)] + delta_lvl)
recons.index = ix
recons = recons.iloc[:, rnlvl - 1 :]
type_map = {"i": "i", "f": "f", "s": "O", "u": "O", "dt": "O", "p": "O"}
if r_dtype:
if r_dtype == "u": # unicode
r_dtype = "O"
recons.index = np.array(
[_to_uni(label) for label in recons.index], dtype=r_dtype
)
df.index = np.array(
[_to_uni(label) for label in df.index], dtype=r_dtype
)
elif r_dtype == "dt": # unicode
r_dtype = "O"
recons.index = np.array(
[Timestamp(label) for label in recons.index], dtype=r_dtype
)
df.index = np.array(
[Timestamp(label) for label in df.index], dtype=r_dtype
)
elif r_dtype == "p":
r_dtype = "O"
idx_list = to_datetime(recons.index)
recons.index = np.array(
[Timestamp(label) for label in idx_list], dtype=r_dtype
)
df.index = np.array(
list(map(Timestamp, df.index.to_timestamp())), dtype=r_dtype
)
else:
r_dtype = type_map.get(r_dtype)
recons.index = np.array(recons.index, dtype=r_dtype)
df.index = np.array(df.index, dtype=r_dtype)
if c_dtype:
if c_dtype == "u":
c_dtype = "O"
recons.columns = np.array(
[_to_uni(label) for label in recons.columns], dtype=c_dtype
)
df.columns = np.array(
[_to_uni(label) for label in df.columns], dtype=c_dtype
)
elif c_dtype == "dt":
c_dtype = "O"
recons.columns = np.array(
[Timestamp(label) for label in recons.columns], dtype=c_dtype
)
df.columns = np.array(
[Timestamp(label) for label in df.columns], dtype=c_dtype
)
elif c_dtype == "p":
c_dtype = "O"
col_list = to_datetime(recons.columns)
recons.columns = np.array(
[Timestamp(label) for label in col_list], dtype=c_dtype
)
col_list = df.columns.to_timestamp()
df.columns = np.array(
[Timestamp(label) for label in col_list], dtype=c_dtype
)
else:
c_dtype = type_map.get(c_dtype)
recons.columns = np.array(recons.columns, dtype=c_dtype)
df.columns = np.array(df.columns, dtype=c_dtype)
tm.assert_frame_equal(df, recons, check_names=False)
N = 100
chunksize = 1000
ncols = 4
base = chunksize // ncols
for nrows in [
2,
10,
N - 1,
N,
N + 1,
N + 2,
2 * N - 2,
2 * N - 1,
2 * N,
2 * N + 1,
2 * N + 2,
base - 1,
base,
base + 1,
]:
_do_test(
tm.makeCustomDataframe(nrows, ncols, r_idx_type="dt", c_idx_type="s"),
"dt",
"s",
)
for r_idx_type, c_idx_type in [("i", "i"), ("s", "s"), ("u", "dt"), ("p", "p")]:
for ncols in [1, 2, 3, 4]:
base = chunksize // ncols
for nrows in [
2,
10,
N - 1,
N,
N + 1,
N + 2,
2 * N - 2,
2 * N - 1,
2 * N,
2 * N + 1,
2 * N + 2,
base - 1,
base,
base + 1,
]:
_do_test(
tm.makeCustomDataframe(
nrows, ncols, r_idx_type=r_idx_type, c_idx_type=c_idx_type
),
r_idx_type,
c_idx_type,
)
for ncols in [1, 2, 3, 4]:
base = chunksize // ncols
for nrows in [
10,
N - 2,
N - 1,
N,
N + 1,
N + 2,
2 * N - 2,
2 * N - 1,
2 * N,
2 * N + 1,
2 * N + 2,
base - 1,
base,
base + 1,
]:
_do_test(tm.makeCustomDataframe(nrows, ncols))
for nrows in [10, N - 2, N - 1, N, N + 1, N + 2]:
df = tm.makeCustomDataframe(nrows, 3)
cols = list(df.columns)
cols[:2] = ["dupe", "dupe"]
cols[-2:] = ["dupe", "dupe"]
ix = list(df.index)
ix[:2] = ["rdupe", "rdupe"]
ix[-2:] = ["rdupe", "rdupe"]
df.index = ix
df.columns = cols
_do_test(df, dupe_col=True)
_do_test(DataFrame(index=np.arange(10)))
_do_test(
tm.makeCustomDataframe(chunksize // 2 + 1, 2, r_idx_nlevels=2), rnlvl=2
)
for ncols in [2, 3, 4]:
base = int(chunksize // ncols)
for nrows in [
10,
N - 2,
N - 1,
N,
N + 1,
N + 2,
2 * N - 2,
2 * N - 1,
2 * N,
2 * N + 1,
2 * N + 2,
base - 1,
base,
base + 1,
]:
_do_test(tm.makeCustomDataframe(nrows, ncols, r_idx_nlevels=2), rnlvl=2)
_do_test(tm.makeCustomDataframe(nrows, ncols, c_idx_nlevels=2), cnlvl=2)
_do_test(
tm.makeCustomDataframe(
nrows, ncols, r_idx_nlevels=2, c_idx_nlevels=2
),
rnlvl=2,
cnlvl=2,
)
def test_to_csv_from_csv_w_some_infs(self, float_frame):
# test roundtrip with inf, -inf, nan, as full columns and mix
float_frame["G"] = np.nan
f = lambda x: [np.inf, np.nan][np.random.rand() < 0.5]
float_frame["H"] = float_frame.index.map(f)
with tm.ensure_clean() as path:
float_frame.to_csv(path)
recons = self.read_csv(path)
tm.assert_frame_equal(float_frame, recons)
tm.assert_frame_equal(np.isinf(float_frame), np.isinf(recons))
def test_to_csv_from_csv_w_all_infs(self, float_frame):
# test roundtrip with inf, -inf, nan, as full columns and mix
float_frame["E"] = np.inf
float_frame["F"] = -np.inf
with tm.ensure_clean() as path:
float_frame.to_csv(path)
recons = self.read_csv(path)
tm.assert_frame_equal(float_frame, recons)
tm.assert_frame_equal(np.isinf(float_frame), np.isinf(recons))
def test_to_csv_no_index(self):
# GH 3624, after appending columns, to_csv fails
with tm.ensure_clean("__tmp_to_csv_no_index__") as path:
df = DataFrame({"c1": [1, 2, 3], "c2": [4, 5, 6]})
df.to_csv(path, index=False)
result = read_csv(path)
tm.assert_frame_equal(df, result)
df["c3"] = Series([7, 8, 9], dtype="int64")
df.to_csv(path, index=False)
result = read_csv(path)
tm.assert_frame_equal(df, result)
def test_to_csv_with_mix_columns(self):
# gh-11637: incorrect output when a mix of integer and string column
# names passed as columns parameter in to_csv
df = DataFrame({0: ["a", "b", "c"], 1: ["aa", "bb", "cc"]})
df["test"] = "txt"
assert df.to_csv() == df.to_csv(columns=[0, 1, "test"])
def test_to_csv_headers(self):
# GH6186, the presence or absence of `index` incorrectly
# causes to_csv to have different header semantics.
from_df = DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
to_df = DataFrame([[1, 2], [3, 4]], columns=["X", "Y"])
with tm.ensure_clean("__tmp_to_csv_headers__") as path:
from_df.to_csv(path, header=["X", "Y"])
recons = self.read_csv(path)
tm.assert_frame_equal(to_df, recons)
from_df.to_csv(path, index=False, header=["X", "Y"])
recons = self.read_csv(path)
return_value = recons.reset_index(inplace=True)
assert return_value is None
tm.assert_frame_equal(to_df, recons)
def test_to_csv_multiindex(self, float_frame, datetime_frame):
frame = float_frame
old_index = frame.index
arrays = np.arange(len(old_index) * 2).reshape(2, -1)
new_index = MultiIndex.from_arrays(arrays, names=["first", "second"])
frame.index = new_index
with tm.ensure_clean("__tmp_to_csv_multiindex__") as path:
frame.to_csv(path, header=False)
frame.to_csv(path, columns=["A", "B"])
# round trip
frame.to_csv(path)
df = self.read_csv(path, index_col=[0, 1], parse_dates=False)
# TODO to_csv drops column name
tm.assert_frame_equal(frame, df, check_names=False)
assert frame.index.names == df.index.names
# needed if setUp becomes a class method
float_frame.index = old_index
# try multiindex with dates
tsframe = datetime_frame
old_index = tsframe.index
new_index = [old_index, np.arange(len(old_index))]
tsframe.index = MultiIndex.from_arrays(new_index)
tsframe.to_csv(path, index_label=["time", "foo"])
recons = self.read_csv(path, index_col=[0, 1])
# TODO to_csv drops column name
tm.assert_frame_equal(tsframe, recons, check_names=False)
# do not load index
tsframe.to_csv(path)
recons = self.read_csv(path, index_col=None)
assert len(recons.columns) == len(tsframe.columns) + 2
# no index
tsframe.to_csv(path, index=False)
recons = self.read_csv(path, index_col=None)
tm.assert_almost_equal(recons.values, datetime_frame.values)
# needed if setUp becomes class method
datetime_frame.index = old_index
with tm.ensure_clean("__tmp_to_csv_multiindex__") as path:
# GH3571, GH1651, GH3141
def _make_frame(names=None):
if names is True:
names = ["first", "second"]
return DataFrame(
np.random.randint(0, 10, size=(3, 3)),
columns=MultiIndex.from_tuples(
[("bah", "foo"), ("bah", "bar"), ("ban", "baz")], names=names
),
dtype="int64",
)
# column & index are multi-index
df = tm.makeCustomDataframe(5, 3, r_idx_nlevels=2, c_idx_nlevels=4)
df.to_csv(path)
result = read_csv(path, header=[0, 1, 2, 3], index_col=[0, 1])
tm.assert_frame_equal(df, result)
# column is mi
df = tm.makeCustomDataframe(5, 3, r_idx_nlevels=1, c_idx_nlevels=4)
df.to_csv(path)
result = read_csv(path, header=[0, 1, 2, 3], index_col=0)
tm.assert_frame_equal(df, result)
# dup column names?
df = tm.makeCustomDataframe(5, 3, r_idx_nlevels=3, c_idx_nlevels=4)
df.to_csv(path)
result = read_csv(path, header=[0, 1, 2, 3], index_col=[0, 1, 2])
tm.assert_frame_equal(df, result)
# writing with no index
df = _make_frame()
df.to_csv(path, index=False)
result = read_csv(path, header=[0, 1])
tm.assert_frame_equal(df, result)
# we lose the names here
df = _make_frame(True)
df.to_csv(path, index=False)
result = read_csv(path, header=[0, 1])
assert com.all_none(*result.columns.names)
result.columns.names = df.columns.names
tm.assert_frame_equal(df, result)
# whatsnew example
df = _make_frame()
df.to_csv(path)
result = read_csv(path, header=[0, 1], index_col=[0])
tm.assert_frame_equal(df, result)
df = _make_frame(True)
df.to_csv(path)
result = read_csv(path, header=[0, 1], index_col=[0])
tm.assert_frame_equal(df, result)
# invalid options
df = _make_frame(True)
df.to_csv(path)
for i in [6, 7]:
msg = f"len of {i}, but only 5 lines in file"
with pytest.raises(ParserError, match=msg):
read_csv(path, header=list(range(i)), index_col=0)
# write with cols
msg = "cannot specify cols with a MultiIndex"
with pytest.raises(TypeError, match=msg):
df.to_csv(path, columns=["foo", "bar"])
with tm.ensure_clean("__tmp_to_csv_multiindex__") as path:
# empty
tsframe[:0].to_csv(path)
recons = self.read_csv(path)
exp = tsframe[:0]
exp.index = []
tm.assert_index_equal(recons.columns, exp.columns)
assert len(recons) == 0
def test_to_csv_interval_index(self):
# GH 28210
df = DataFrame({"A": list("abc"), "B": range(3)}, index=pd.interval_range(0, 3))
with tm.ensure_clean("__tmp_to_csv_interval_index__.csv") as path:
df.to_csv(path)
result = self.read_csv(path, index_col=0)
# can't roundtrip intervalindex via read_csv so check string repr (GH 23595)
expected = df.copy()
expected.index = expected.index.astype(str)
tm.assert_frame_equal(result, expected)
def test_to_csv_float32_nanrep(self):
df = DataFrame(np.random.randn(1, 4).astype(np.float32))
df[1] = np.nan
with tm.ensure_clean("__tmp_to_csv_float32_nanrep__.csv") as path:
df.to_csv(path, na_rep=999)
with open(path) as f:
lines = f.readlines()
assert lines[1].split(",")[2] == "999"
def test_to_csv_withcommas(self):
# Commas inside fields should be correctly escaped when saving as CSV.
df = DataFrame({"A": [1, 2, 3], "B": ["5,6", "7,8", "9,0"]})
with tm.ensure_clean("__tmp_to_csv_withcommas__.csv") as path:
df.to_csv(path)
df2 = self.read_csv(path)
tm.assert_frame_equal(df2, df)
def test_to_csv_mixed(self):
def create_cols(name):
return [f"{name}{i:03d}" for i in range(5)]
df_float = DataFrame(
np.random.randn(100, 5), dtype="float64", columns=create_cols("float")
)
df_int = DataFrame(
np.random.randn(100, 5).astype("int64"),
dtype="int64",
columns=create_cols("int"),
)
df_bool = DataFrame(True, index=df_float.index, columns=create_cols("bool"))
df_object = DataFrame(
"foo", index=df_float.index, columns=create_cols("object")
)
df_dt = DataFrame(
Timestamp("20010101"), index=df_float.index, columns=create_cols("date")
)
# add in some nans
df_float.iloc[30:50, 1:3] = np.nan
# ## this is a bug in read_csv right now ####
# df_dt.loc[30:50,1:3] = np.nan
df = pd.concat([df_float, df_int, df_bool, df_object, df_dt], axis=1)
# dtype
dtypes = {}
for n, dtype in [
("float", np.float64),
("int", np.int64),
("bool", np.bool_),
("object", object),
]:
for c in create_cols(n):
dtypes[c] = dtype
with tm.ensure_clean() as filename:
df.to_csv(filename)
rs = read_csv(
filename, index_col=0, dtype=dtypes, parse_dates=create_cols("date")
)
tm.assert_frame_equal(rs, df)
def test_to_csv_dups_cols(self):
df = DataFrame(
np.random.randn(1000, 30),
columns=list(range(15)) + list(range(15)),
dtype="float64",
)
with tm.ensure_clean() as filename:
df.to_csv(filename) # single dtype, fine
result = read_csv(filename, index_col=0)
result.columns = df.columns
tm.assert_frame_equal(result, df)
df_float = DataFrame(np.random.randn(1000, 3), dtype="float64")
df_int = DataFrame(np.random.randn(1000, 3)).astype("int64")
df_bool = DataFrame(True, index=df_float.index, columns=range(3))
df_object = DataFrame("foo", index=df_float.index, columns=range(3))
df_dt = DataFrame(Timestamp("20010101"), index=df_float.index, columns=range(3))
df = pd.concat(
[df_float, df_int, df_bool, df_object, df_dt], axis=1, ignore_index=True
)
df.columns = [0, 1, 2] * 5
with tm.ensure_clean() as filename:
df.to_csv(filename)
result = read_csv(filename, index_col=0)
# date cols
for i in ["0.4", "1.4", "2.4"]:
result[i] = to_datetime(result[i])
result.columns = df.columns
tm.assert_frame_equal(result, df)
# GH3457
N = 10
df = tm.makeCustomDataframe(N, 3)
df.columns = ["a", "a", "b"]
with tm.ensure_clean() as filename:
df.to_csv(filename)
# read_csv will rename the dups columns
result = read_csv(filename, index_col=0)
result = result.rename(columns={"a.1": "a"})
tm.assert_frame_equal(result, df)
def test_to_csv_chunking(self):
aa = DataFrame({"A": range(100000)})
aa["B"] = aa.A + 1.0
aa["C"] = aa.A + 2.0
aa["D"] = aa.A + 3.0
for chunksize in [10000, 50000, 100000]:
with tm.ensure_clean() as filename:
aa.to_csv(filename, chunksize=chunksize)
rs = read_csv(filename, index_col=0)
tm.assert_frame_equal(rs, aa)
@pytest.mark.slow
def test_to_csv_wide_frame_formatting(self):
# Issue #8621
df = DataFrame(np.random.randn(1, 100010), columns=None, index=None)
with tm.ensure_clean() as filename:
df.to_csv(filename, header=False, index=False)
rs = read_csv(filename, header=None)
tm.assert_frame_equal(rs, df)
def test_to_csv_bug(self):
f1 = StringIO("a,1.0\nb,2.0")
df = self.read_csv(f1, header=None)
newdf = DataFrame({"t": df[df.columns[0]]})
with tm.ensure_clean() as path:
newdf.to_csv(path)
recons = read_csv(path, index_col=0)
# don't check_names as t != 1
tm.assert_frame_equal(recons, newdf, check_names=False)
def test_to_csv_unicode(self):
df = DataFrame({"c/\u03c3": [1, 2, 3]})
with tm.ensure_clean() as path:
df.to_csv(path, encoding="UTF-8")
df2 = read_csv(path, index_col=0, encoding="UTF-8")
tm.assert_frame_equal(df, df2)
df.to_csv(path, encoding="UTF-8", index=False)
df2 = read_csv(path, index_col=None, encoding="UTF-8")
tm.assert_frame_equal(df, df2)
def test_to_csv_unicode_index_col(self):
buf = StringIO("")
df = DataFrame(
[["\u05d0", "d2", "d3", "d4"], ["a1", "a2", "a3", "a4"]],
columns=["\u05d0", "\u05d1", "\u05d2", "\u05d3"],
index=["\u05d0", "\u05d1"],
)
df.to_csv(buf, encoding="UTF-8")
buf.seek(0)
df2 = read_csv(buf, index_col=0, encoding="UTF-8")
tm.assert_frame_equal(df, df2)
def test_to_csv_stringio(self, float_frame):
buf = StringIO()
float_frame.to_csv(buf)
buf.seek(0)
recons = read_csv(buf, index_col=0)
tm.assert_frame_equal(recons, float_frame)
def test_to_csv_float_format(self):
df = DataFrame(
[[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]],
index=["A", "B"],
columns=["X", "Y", "Z"],
)
with tm.ensure_clean() as filename:
df.to_csv(filename, float_format="%.2f")
rs = read_csv(filename, index_col=0)
xp = DataFrame(
[[0.12, 0.23, 0.57], [12.32, 123123.20, 321321.20]],
index=["A", "B"],
columns=["X", "Y", "Z"],
)
tm.assert_frame_equal(rs, xp)
def test_to_csv_unicodewriter_quoting(self):
df = DataFrame({"A": [1, 2, 3], "B": ["foo", "bar", "baz"]})
buf = StringIO()
df.to_csv(buf, index=False, quoting=csv.QUOTE_NONNUMERIC, encoding="utf-8")
result = buf.getvalue()
expected_rows = ['"A","B"', '1,"foo"', '2,"bar"', '3,"baz"']
expected = tm.convert_rows_list_to_csv_str(expected_rows)
assert result == expected
def test_to_csv_quote_none(self):
# GH4328
df = DataFrame({"A": ["hello", '{"hello"}']})
for encoding in (None, "utf-8"):
buf = StringIO()
df.to_csv(buf, quoting=csv.QUOTE_NONE, encoding=encoding, index=False)
result = buf.getvalue()
expected_rows = ["A", "hello", '{"hello"}']
expected = tm.convert_rows_list_to_csv_str(expected_rows)
assert result == expected
def test_to_csv_index_no_leading_comma(self):
df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}, index=["one", "two", "three"])
buf = StringIO()
df.to_csv(buf, index_label=False)
expected_rows = ["A,B", "one,1,4", "two,2,5", "three,3,6"]
expected = tm.convert_rows_list_to_csv_str(expected_rows)
assert buf.getvalue() == expected
def test_to_csv_line_terminators(self):
# see gh-20353
df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}, index=["one", "two", "three"])
with tm.ensure_clean() as path:
# case 1: CRLF as line terminator
df.to_csv(path, line_terminator="\r\n")
expected = b",A,B\r\none,1,4\r\ntwo,2,5\r\nthree,3,6\r\n"
with open(path, mode="rb") as f:
assert f.read() == expected
with tm.ensure_clean() as path:
# case 2: LF as line terminator
df.to_csv(path, line_terminator="\n")
expected = b",A,B\none,1,4\ntwo,2,5\nthree,3,6\n"
with open(path, mode="rb") as f:
assert f.read() == expected
with tm.ensure_clean() as path:
# case 3: The default line terminator(=os.linesep)(gh-21406)
df.to_csv(path)
os_linesep = os.linesep.encode("utf-8")
expected = (
b",A,B"
+ os_linesep
+ b"one,1,4"
+ os_linesep
+ b"two,2,5"
+ os_linesep
+ b"three,3,6"
+ os_linesep
)
with open(path, mode="rb") as f:
assert f.read() == expected
def test_to_csv_from_csv_categorical(self):
# CSV with categoricals should result in the same output
# as when one would add a "normal" Series/DataFrame.
s = Series(pd.Categorical(["a", "b", "b", "a", "a", "c", "c", "c"]))
s2 = Series(["a", "b", "b", "a", "a", "c", "c", "c"])
res = StringIO()
s.to_csv(res, header=False)
exp = StringIO()
s2.to_csv(exp, header=False)
assert res.getvalue() == exp.getvalue()
df = DataFrame({"s": s})
df2 = DataFrame({"s": s2})
res = StringIO()
df.to_csv(res)
exp = StringIO()
df2.to_csv(exp)
assert res.getvalue() == exp.getvalue()
def test_to_csv_path_is_none(self, float_frame):
# GH 8215
# Make sure we return string for consistency with
# Series.to_csv()
csv_str = float_frame.to_csv(path_or_buf=None)
assert isinstance(csv_str, str)
recons = read_csv(StringIO(csv_str), index_col=0)
tm.assert_frame_equal(float_frame, recons)
@pytest.mark.parametrize(
"df,encoding",
[
(
DataFrame(
[[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]],
index=["A", "B"],
columns=["X", "Y", "Z"],
),
None,
),
# GH 21241, 21118
(DataFrame([["abc", "def", "ghi"]], columns=["X", "Y", "Z"]), "ascii"),
(DataFrame(5 * [[123, "你好", "世界"]], columns=["X", "Y", "Z"]), "gb2312"),
(
DataFrame(5 * [[123, "Γειά σου", "Κόσμε"]], columns=["X", "Y", "Z"]),
"cp737",
),
],
)
def test_to_csv_compression(self, df, encoding, compression):
with tm.ensure_clean() as filename:
df.to_csv(filename, compression=compression, encoding=encoding)
# test the round trip - to_csv -> read_csv
result = read_csv(
filename, compression=compression, index_col=0, encoding=encoding
)
tm.assert_frame_equal(df, result)
# test the round trip using file handle - to_csv -> read_csv
with get_handle(
filename, "w", compression=compression, encoding=encoding
) as handles:
df.to_csv(handles.handle, encoding=encoding)
assert not handles.handle.closed
result = read_csv(
filename,
compression=compression,
encoding=encoding,
index_col=0,
).squeeze("columns")
tm.assert_frame_equal(df, result)
# explicitly make sure file is compressed
with tm.decompress_file(filename, compression) as fh:
text = fh.read().decode(encoding or "utf8")
for col in df.columns:
assert col in text
with tm.decompress_file(filename, compression) as fh:
tm.assert_frame_equal(df, read_csv(fh, index_col=0, encoding=encoding))
def test_to_csv_date_format(self, datetime_frame):
with tm.ensure_clean("__tmp_to_csv_date_format__") as path:
dt_index = datetime_frame.index
datetime_frame = DataFrame(
{"A": dt_index, "B": dt_index.shift(1)}, index=dt_index
)
datetime_frame.to_csv(path, date_format="%Y%m%d")
# Check that the data was put in the specified format
test = read_csv(path, index_col=0)
datetime_frame_int = datetime_frame.applymap(
lambda x: int(x.strftime("%Y%m%d"))
)
datetime_frame_int.index = datetime_frame_int.index.map(
lambda x: int(x.strftime("%Y%m%d"))
)
tm.assert_frame_equal(test, datetime_frame_int)
datetime_frame.to_csv(path, date_format="%Y-%m-%d")
# Check that the data was put in the specified format
test = read_csv(path, index_col=0)
datetime_frame_str = datetime_frame.applymap(
lambda x: x.strftime("%Y-%m-%d")
)
datetime_frame_str.index = datetime_frame_str.index.map(
lambda x: x.strftime("%Y-%m-%d")
)
tm.assert_frame_equal(test, datetime_frame_str)
# Check that columns get converted
datetime_frame_columns = datetime_frame.T
datetime_frame_columns.to_csv(path, date_format="%Y%m%d")
test = read_csv(path, index_col=0)
datetime_frame_columns = datetime_frame_columns.applymap(
lambda x: int(x.strftime("%Y%m%d"))
)
# Columns don't get converted to ints by read_csv
datetime_frame_columns.columns = datetime_frame_columns.columns.map(
lambda x: x.strftime("%Y%m%d")
)
tm.assert_frame_equal(test, datetime_frame_columns)
# test NaTs
nat_index = to_datetime(
["NaT"] * 10 + ["2000-01-01", "1/1/2000", "1-1-2000"]
)
nat_frame = DataFrame({"A": nat_index}, index=nat_index)
nat_frame.to_csv(path, date_format="%Y-%m-%d")
test = read_csv(path, parse_dates=[0, 1], index_col=0)
tm.assert_frame_equal(test, nat_frame)
def test_to_csv_with_dst_transitions(self):
with | tm.ensure_clean("csv_date_format_with_dst") | pandas._testing.ensure_clean |
import pandas as pd
import numpy as np
import os
pd.options.mode.chained_assignment = None
sp_dir = '/Users/rwang/RMI/Climate Action Engine - Documents/OCI Phase 2'
opem_dir = '/Users/rwang/Documents/OCI+/Downstream/opem'
print('Merging upstream and midstream results...')
# import sqlite3
# connection = sqlite3.connect("../OCI_Database.db")
upstream = pd.read_csv(sp_dir + '/Upstream/upstream_data_pipeline_sp/Postprocessed_Outputs_2/upstream_postprocessed_fix.csv')
midstream = pd.read_csv(sp_dir + '/Upstream/upstream_data_pipeline_sp/Postprocessed_Outputs_2/midstream_postprocessed.csv')
# Calculate Crude to Refinery in bbl/d from Energy Summary tab of OPGEE model. Formula is based on cell G6 in OPEM input tab
#https://rockmtnins.sharepoint.com/:x:/r/sites/CAE/_layouts/15/Doc.aspx?sourcedoc=%7B5E0994C9-8E35-440B-8BB4-31DF5167F60C%7D&file=OCI%20site%20input%20table%20sources.xlsx&action=default&mobileredirect=true&cid=544ad233-565f-438c-9ce2-d7d5b607b1da
# ('Energy Summary'!E89*1e3/'Energy Summary'!M133)/(158.9873*141.5/(131.5+Results!G34))
upstream['crude_to_refinery(bbl/d)']= (upstream['ES_Crude_output(mmbut/d)']*1e3/upstream['ES_Energy_Density_crude(mmbtu/t)'])/(158.9873*141.5/(131.5+upstream['API gravity']))
# For oil sands, the crude to refinery volume is bigger than the input oil production volume.
# It's interesting and worth exploring why. Is there a mistake in Raghav's mmbtu/d to boe/d calculation?
#upstream[['Field_name','year']][upstream['crude_to_refinery(bbl/d)']>upstream['Oil production volume']]
# Calculate NGL_C2 export from the field, formula based on cell G8 in OPEM input tab
# (Flowsheet!W17+Flowsheet!CP17)*1000/(5.61458350903291*20.98*2.2)
upstream['NGL_C2(boed)'] = (upstream['FS_LPG_export_C2(t/d)']+upstream['FS_Ethane_to_Petchem(t/d)'])*1000/(5.61458350903291*20.98*2.2)
# Calculate NGL_C3 export from the field, formula based on cell G9 in OPEM input tab
# Flowsheet!W18*1000000/(42*1920)+Flowsheet!W9*(1000000)*0.75/(1923*42) assuming 75% of LPG exported is C3
upstream['NGL_C3(boed)'] = upstream['FS_LPG_export_C3(t/d)']*1000000/(42*1920)+upstream['FS_LPG_export_LPG(t/d)']*(1000000)*0.75/(1923*42)
# Calculate NGL_C4 export from the field, formula based on cell G10 in OPEM input tab
# Flowsheet!W19*1000000/(42*2213)+Flowsheet!W9*(1000000)*0.25/(1923*42) assuming 25% of LPG exported is C4
upstream['NGL_C4(boed)'] = upstream['FS_LPG_export_C4(t/d)']*1000000/(42*2213)+upstream['FS_LPG_export_LPG(t/d)']*(1000000)*0.25/(1923*42)
# Calculate NGL_C5+ export from the field, formula based on cell G11 in OPEM input tab
# Assuming all NGL minus C2, C3 and C4 is C5+
# ('Energy Summary'!E87-(((Flowsheet!W17+Flowsheet!CP17)*'Energy Summary'!M141) +
# (Flowsheet!W18*'Energy Summary'!M142) + (Flowsheet!W19*'Energy Summary'!M143) +
#(Flowsheet!W9*84950/1923)))/(42*.110)
upstream['NGL_C5(boed)'] = (upstream['ES_NGL_output(mmbtu/d)']-(((upstream['FS_LPG_export_C2(t/d)']+ \
upstream['FS_Ethane_to_Petchem(t/d)'])*upstream['ES_Energy_Density_C2(mmbtu/t)']) + \
(upstream['FS_LPG_export_C3(t/d)']*upstream['ES_Energy_Density_C3(mmbtu/t)']) + \
(upstream['FS_LPG_export_C4(t/d)']*upstream['ES_Energy_Density_C4(mmbtu/t)']) + \
(upstream['FS_LPG_export_LPG(t/d)']*84950/1923)))/(42*.110)
# Calculate petcoke from the field, formula based on cell G12 in OPEM input tab
# 1000*(Flowsheet!HG7-'Energy Summary'!E77/'Energy Summary'!M135)
upstream['petcoke(kg/d)'] = 1000*(upstream['FS_Petcoke_to_stock(t/d)']-upstream['ES_Petcoke_fuel(mmbtu/d)']/upstream['ES_Energy_Density_petcoke(mmbtu/t)'])
upstream_midstream = upstream.merge(midstream,right_on=
['Field name','Field location (Country)','gwp'],left_on =
['Field_name', 'Field location (Country)','gwp'], how = 'left',indicator = True)
if upstream_midstream[upstream_midstream['_merge']!='both'].shape[0]!=0:
print('not fully merged. please check')
print(upstream_midstream[upstream_midstream['_merge']!='both'])
else:
upstream_midstream.drop(columns = '_merge',inplace = True)
def run_opem(upstream_midstream, gwp):
upstream_midstream_for_opem = upstream_midstream[upstream_midstream['gwp']==gwp]
upstream_midstream_for_opem['OPEM_field_name']=upstream_midstream_for_opem['Field_name']+';'+upstream_midstream_for_opem['original_file']
print('Getting data for OPEM Product Slates...')
upstream_midstream_for_opem['volume_flow_bbl']=''
upstream_midstream_for_opem['Product Slate (bbl product per day)'] = ''
upstream_midstream_for_opem['energy_flow_MJ']=''
upstream_midstream_for_opem['mass_flow_kg']=''
upstream_midstream_for_opem['Liquefied Petroleum Gases (LPG)_bbl']= upstream_midstream_for_opem['Liquified Petroleum Gas (LPG).1']/(2.04*0.159*270)
upstream_midstream_for_opem['Petrochemical Feedstocks_bbl']=upstream_midstream_for_opem['Petrochemical Feedstocks.1']/(1.264*0.159*270)
upstream_midstream_for_opem['Asphalt_bbl']= 0
upstream_midstream_for_opem['emission(kgCO2eq/bbl)']='kgCO2eq/bbl'
upstream_midstream_for_opem['Per energy content of refinery product (gCO2eq/MJ)'] = 'Emission by Product (gCO2eq/MJ)'
upstream_midstream_for_opem['Allocation to gasoline (g CO2eq/MJ)']='Process Emission (gCO2eq/MJ)'
opem_product_slate = upstream_midstream_for_opem[['OPEM_field_name',
'Product Slate (bbl product per day)',
'volume_flow_bbl','throughput','Gasoline',
'Jet Fuel',
'Diesel',
'Fuel Oil',
'Petroleum Coke',
'Residual Fuels',
'Refinery Fuel Gas (RFG)',
'Liquefied Petroleum Gases (LPG)_bbl',
'Petrochemical Feedstocks_bbl',
'Asphalt_bbl',
'energy_flow_MJ',
'Blended Gasoline',
'Jet-A/AVTUR',
'ULSD',
'Fuel Oil.1',
'Coke',
'Liquid Heavy Ends',
'RFG',
'Surplus NCR H2',
'Liquified Petroleum Gas (LPG)',
'Petrochemical Feedstocks',
'Asphalt',
'Gasoline S wt%',
'Gasoline H2 wt%','mass_flow_kg',
'Blended Gasoline.1',
'Jet-A/AVTUR.1',
'ULSD.1',
'Fuel Oil.2',
'Coke.1',
'Liquid Heavy Ends.1',
'Sulphur',
'RFG.1',
'Surplus NCR H2.1',
'Liquified Petroleum Gas (LPG).1',
'Petrochemical Feedstocks.1',
'Asphalt.1']]
opem_product_slate = opem_product_slate.T
opem_product_slate.columns = opem_product_slate.iloc[0]
opem_product_slate = opem_product_slate.iloc[1:,:]
slate_index = pd.read_csv(opem_dir + '/src/opem/products/product_slates/all_product_slates.csv')
opem_product_slate.index = slate_index.iloc[:,0]
#opem_product_slate.to_excel('../Downstream/Analytics/all_product_slates.xlsx')
opem_product_slate.to_csv(opem_dir+'/src/opem/products/product_slates/all_product_slates.csv')
print('Preparing data for opem_input...')
opem_input = pd.DataFrame()
opem_input['OPEM_field_name']=upstream_midstream_for_opem['OPEM_field_name']
opem_input['Upstream Field Selection']=''
opem_input['Gas Production Volume (MCFD)']=upstream_midstream_for_opem['ES_Gas_output(mmbtu/d)']*1000/983
opem_input['Oil Production Volume (BOED)']=upstream_midstream_for_opem['crude_to_refinery(bbl/d)']
opem_input['NGL Volume source']=2
opem_input['NGL C2 Volume (BOED)']=upstream_midstream_for_opem['NGL_C2(boed)']
opem_input['NGL C3 Volume (BOED)']=upstream_midstream_for_opem['NGL_C3(boed)']
opem_input['NGL C4 Volume (BOED)']=upstream_midstream_for_opem['NGL_C4(boed)']
opem_input['NGL C5+ Volume (BOED)']=upstream_midstream_for_opem['NGL_C5(boed)']
opem_input['Total field NGL volume (BOED)']=''
opem_input['OPGEE Coke mass (kg/d)']=upstream_midstream_for_opem['petcoke(kg/d)']
opem_input['% Field NGL C2 Volume allocated to Ethylene converstion']=1
opem_input['GWP selection (yr period, 100 or 20)']=gwp
opem_input_T = opem_input.set_index('OPEM_field_name').T
#opem_input_T.to_excel('../Downstream/Analytics/opem_input.xlsx')
opem_input_index = pd.read_csv(opem_dir + '/opem_input.csv',header=0)
opem_input_T.reset_index(inplace = True)
# Get the index from opem_input.csv and update it with opem input values
df = pd.concat([opem_input_index.iloc[:,:5],opem_input_T.iloc[:,1:]],axis = 1)
df.to_csv('./opem_input.csv',index=False)
print('Running opem...')
os.system('opem')
opem_output = | pd.read_csv('./opem_output.csv',header=1) | pandas.read_csv |
"""
Copyright 2019 <NAME>.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import datetime as dt
import json
import pandas as pd
from typing import Dict, Optional, List, Tuple
from pydash import get
from gs_quant.api.gs.assets import Currency, GsAsset, GsAssetApi
from gs_quant.common import DateLimit
from gs_quant.data.fields import DataMeasure
from gs_quant.entities.entity import EntityType, PositionedEntity
from gs_quant.errors import MqValueError
from gs_quant.instrument import Instrument
from gs_quant.json_encoder import JSONEncoder
from gs_quant.markets.securities import Asset, AssetType
from gs_quant.markets.indices_utils import *
from gs_quant.target.data import DataQuery
from gs_quant.entities.tree_entity import AssetTreeNode, TreeHelper
class Index(Asset, PositionedEntity):
"""
Index which tracks an evolving portfolio of securities, and can be traded through cash or derivatives markets.
Includes support for STS indices.
"""
def __init__(self,
id_: str,
asset_class: AssetClass,
name: str,
exchange: Optional[str] = None,
currency: Optional[Currency] = None,
entity: Optional[Dict] = None):
Asset.__init__(self, id_, asset_class, name, exchange, currency, entity=entity)
PositionedEntity.__init__(self, id_, EntityType.ASSET)
if entity:
self.asset_type = AssetType(entity['type'])
else:
self.asset_type = AssetType.INDEX
if self.__is_sts_index():
self.tree_helper = TreeHelper(id_, tree_underlier_dataset='STS_UNDERLIER_WEIGHTS')
self.tree_df = pd.DataFrame()
def __str__(self):
return self.name
def get_type(self) -> AssetType:
return self.asset_type
def get_currency(self) -> Optional[Currency]:
return self.currency
def get_return_type(self) -> ReturnType:
if self.parameters is None or self.parameters.index_return_type is None:
return ReturnType.TOTAL_RETURN
return ReturnType(self.parameters.index_return_type)
@classmethod
def get(cls, identifier: str) -> Optional['Index']:
"""
Fetch an existing index
:param identifier: Any common identifier for an index(ric, ticker, etc.)
:return: Index object
**Usage**
Get existing Index instance
**Examples**
Get index details:
>>> from gs_quant.markets.index import Index
>>>
>>> index = Index.get("GSMBXXXX")
"""
gs_asset = cls.__get_gs_asset(identifier)
asset_entity: Dict = json.loads(json.dumps(gs_asset.as_dict(), cls=JSONEncoder))
if gs_asset.type.value in STSIndexType.to_list() or gs_asset.type.value == 'Index':
return cls(gs_asset.id, gs_asset.asset_class, gs_asset.name, exchange=gs_asset.exchange,
currency=gs_asset.currency, entity=asset_entity)
else:
raise MqValueError(f'{identifier} is not an Index identifier')
def get_fundamentals(self,
start: dt.date = DateLimit.LOW_LIMIT.value,
end: dt.date = dt.date.today(),
period: DataMeasure = DataMeasure.ONE_YEAR.value,
direction: DataMeasure = DataMeasure.FORWARD.value,
metrics: List[DataMeasure] = DataMeasure.list_fundamentals()) -> pd.DataFrame:
"""
Retrieve fundamentals data for an index across a date range. Currently supports STS indices only
:param start: start date (default is 1 January, 1970)
:param end: end date (default is today)
:param period: period for the relevant metric. Can be one of ONE_YEAR('1y'), TWO_YEARS('2y'), \
THREE_YEARS('3y') (default is 1y)
:param direction: direction of the outlook period. Can be one of 'forward' or 'trailing' (default is forward)
:param metrics: list of fundamentals metrics. (default is all)
:return: dataframe with fundamentals information
**Usage**
Retrieve fundamentals data for an index across a date range
**Examples**
Retrieve historical dividend yield data for an index
>>> from gs_quant.markets.index import Index
>>> from gs_quant.data.fields import DataMeasure
>>>
>>> index = Index.get("GSMBXXXX")
>>> index.get_fundamentals(metrics=[DataMeasure.DIVIDEND_YIELD])
"""
if self.__is_sts_index():
where = dict(assetId=self.id, period=period, periodDirection=direction, metric=metrics)
query = DataQuery(where=where, start_date=start, end_date=end)
response = GsDataApi.query_data(query=query, dataset_id=IndicesDatasets.STS_FUNDAMENTALS.value)
return pd.DataFrame(response)
else:
raise MqValueError('This method currently supports STS indices only')
def get_latest_close_price(self, price_type: List[PriceType] = None) -> pd.DataFrame:
"""
Retrieve latest close prices for an index. Only STS indices support indicative prices.
:param price_type: Type of prices to return. Default returns official close price
:return: dataframe with latest close price
**Usage**
Retrieve latest close prices for an index
**Examples**
>>> from gs_quant.markets.index import Index
>>>
>>> index = Index.get("GSMBXXXX")
>>> index.get_latest_close_price([PriceType.OFFICIAL_CLOSE_PRICE, PriceType.INDICATIVE_CLOSE_PRICE])
"""
if (not price_type) or (price_type == [PriceType.OFFICIAL_CLOSE_PRICE]):
return super().get_latest_close_price()
prices = pd.DataFrame()
if PriceType.OFFICIAL_CLOSE_PRICE in price_type:
official_level = super().get_latest_close_price()
prices['date'] = official_level.index
prices['closePrice'] = official_level[0]
if PriceType.INDICATIVE_CLOSE_PRICE in price_type:
if self.__is_sts_index():
where = dict(assetId=self.id)
query = DataQuery(where=where)
response = GsDataApi.last_data(query=query, dataset_id=IndicesDatasets.STS_INDICATIVE_LEVELS.value)
indicative_level = pd.DataFrame(response).iloc[-1:][['date', 'indicativeClosePrice']]
prices['date'] = indicative_level['date'].iat[0]
prices['indicativeClosePrice'] = indicative_level['indicativeClosePrice'].iat[0]
else:
raise MqValueError('PriceType.INDICATIVE_CLOSE_PRICE currently supports STS indices only')
return prices
def get_close_price_for_date(self,
date: dt.date = dt.date.today(),
price_type: List[PriceType] = None) -> pd.DataFrame:
"""
Retrieve close prices for an index. Only STS indices support indicative prices.
:param date: date of the required prices (default is today)
:param price_type: Type of prices to return. Default returns official close price
:return: dataframe with date's close prices
**Usage**
Retrieve the close prices for an index for a given date
**Examples**
>>> from gs_quant.markets.index import Index
>>>
>>> index = Index.get("GSMBXXXX")
>>> index.get_close_price_for_date(dt.date(2021, 1, 7), \
[PriceType.OFFICIAL_CLOSE_PRICE, PriceType.INDICATIVE_CLOSE_PRICE])
"""
if (not price_type) or (price_type == [PriceType.OFFICIAL_CLOSE_PRICE]):
return super().get_close_price_for_date(date)
prices = pd.DataFrame()
if PriceType.OFFICIAL_CLOSE_PRICE in price_type:
official_level = super().get_close_price_for_date(date)
prices['date'] = official_level.index
prices['closePrice'] = official_level[0]
if PriceType.INDICATIVE_CLOSE_PRICE in price_type:
if self.__is_sts_index():
response = self.__query_indicative_levels_dataset(start=date, end=date)
indicative_level = | pd.DataFrame(response) | pandas.DataFrame |
import numpy as np
import pandas as pd
from PyQuantum.Common.Matrix import *
from PyQuantum.TC.FullBase import *
class HamiltonianL:
def set_base(self, base):
self.base = base
def __init__(self, capacity, cavity, RWA=True, reduced=True):
self.capacity = capacity
self.cavity = cavity
self.size = 0
HC = {}
size_start = 0
self.states = {}
for c in range(capacity, -1, -1):
Hc = Hamiltonian(capacity=c, cavity=cavity,
RWA=RWA, reduced=reduced)
HC[c] = Hc
for i in Hc.base.base:
# for i in Hc.states.values():
print(i)
print()
for k, v in Hc.states.items():
self.states[size_start + k] = v
size_start += HC[c].size
# self.states += Hc.states
# print(Hc.states)
self.size += Hc.size
# for i in self.states.values():
# print(i)
I = np.zeros([self.size, self.size], dtype=np.complex128)
# print(self.states)
size_start = 0
for c in range(capacity, -1, -1):
# print("c=", c)
# print(HC[c])
# print(HC[c].size)
# print(HC[c].states)
# print(HC[c].matrix.data)
I[size_start:size_start+HC[c].size,
size_start:size_start+HC[c].size] = HC[c].matrix.data
size_start += HC[c].size
self.matrix = Matrix(self.size, self.size, dtype=np.complex128)
self.matrix.data = I
# exit(0)
def iprint(self):
df = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
## jupyter console
import numpy as np
import pandas as pd
from pandas import Series, DataFrame
## merge
data1 = pd.DataFrame({'key':['b','b','a','c','a','a','b'], 'data1':range(7)})
data1
data2 = pd.DataFrame({'key':['a','b','d'], 'data2':range(3)})
data2
pd.merge(data1, data2)
| pd.merge(data1, data2, on='key') | pandas.merge |
import re
import os
import subprocess
from functools import lru_cache
from typing import Dict, List
import pandas as pd
from helpers import (
NOW,
RemoteCommand,
Settings,
create_settings,
nix_build,
spawn,
flamegraph_env,
read_stats,
write_stats,
scone_env
)
from network import Network, NetworkKind, setup_remote_network
from storage import Storage, StorageKind
@lru_cache(maxsize=1)
def sysbench_command(settings: Settings) -> RemoteCommand:
path = nix_build("sysbench")
return settings.remote_command(path)
@lru_cache(maxsize=1)
def nc_command(settings: Settings) -> RemoteCommand:
path = nix_build("netcat-native")
return settings.remote_command(path)
def parse_sysbench(output: str) -> Dict[str, str]:
stats_found = False
section = ""
data = {}
for line in output.split("\n"):
print(line)
if line.startswith("SQL statistics"):
stats_found = True
if stats_found:
col = line.split(":")
if len(col) != 2:
continue
name = col[0].strip()
# remove trailing statistics, e.g.:
# transform
# transactions: 3228 (322.42 per sec.)
# to
# transactions: 3228
value = re.sub(r"\([^)]+\)$", "", col[1]).strip()
if value == "" and name != "queries performed":
section = name
continue
data[f"{section} {name}"] = value
return data
def process_sysbench(output: str, system: str, stats: Dict[str, List]) -> None:
data = parse_sysbench(output)
for k, v in data.items():
stats[k].append(v)
stats["system"].append(system)
class Benchmark:
def __init__(self, settings: Settings) -> None:
self.settings = settings
self.network = Network(settings)
self.storage = Storage(settings)
def run(
self,
attr: str,
system: str,
mnt: str,
stats: Dict[str, List],
extra_env: Dict[str, str] = {},
) -> None:
env = dict(SGXLKL_CWD=mnt)
env.update(flamegraph_env(f"{os.getcwd()}/mysql-{system}"))
env.update(extra_env)
mysql = nix_build(attr)
sysbench = sysbench_command(self.storage.settings)
with spawn(
mysql,
"bin/mysqld",
f"--datadir={mnt}/var/lib/mysql",
"--socket=/tmp/mysql.sock",
extra_env=env,
):
common_flags = [
f"--mysql-host={self.settings.local_dpdk_ip}",
"--mysql-db=root",
"--mysql-user=root",
"--mysql-password=<PASSWORD>",
"--mysql-ssl=on",
"--table-size=500000",
f"{sysbench.nix_path}/share/sysbench/oltp_read_write.lua",
]
while True:
try:
proc = nc_command(self.settings).run(
"bin/nc", ["-z", "-v", self.settings.local_dpdk_ip, "3306"]
)
break
except subprocess.CalledProcessError:
print(".")
pass
sysbench.run("bin/sysbench", common_flags + ["prepare"])
proc = sysbench.run("bin/sysbench", common_flags + ["run"])
process_sysbench(proc.stdout, system, stats)
sysbench.run("bin/sysbench", common_flags + ["cleanup"])
def benchmark_native(benchmark: Benchmark, stats: Dict[str, List]) -> None:
extra_env = benchmark.network.setup(NetworkKind.NATIVE)
mount = benchmark.storage.setup(StorageKind.NATIVE)
extra_env.update(mount.extra_env())
with mount as mnt:
benchmark.run("mysql-native", "native", mnt, stats, extra_env=extra_env)
def benchmark_sgx_lkl(benchmark: Benchmark, stats: Dict[str, List]) -> None:
extra_env = benchmark.network.setup(NetworkKind.TAP)
mount = benchmark.storage.setup(StorageKind.LKL)
extra_env.update(mount.extra_env())
with mount as mnt:
benchmark.run("mysql-sgx-lkl", "sgx-lkl", mnt, stats, extra_env=extra_env)
def benchmark_sgx_io(benchmark: Benchmark, stats: Dict[str, List]) -> None:
extra_env = benchmark.network.setup(NetworkKind.DPDK)
mount = benchmark.storage.setup(StorageKind.SPDK)
extra_env.update(mount.extra_env())
with mount as mnt:
benchmark.run("mysql-sgx-io", "sgx-io", mnt, stats, extra_env=extra_env)
def benchmark_scone(benchmark: Benchmark, stats: Dict[str, List]) -> None:
mount = benchmark.storage.setup(StorageKind.SCONE)
with mount as mnt:
extra_env = scone_env(mnt)
extra_env.update(benchmark.network.setup(NetworkKind.NATIVE))
extra_env.update(mount.extra_env())
benchmark.run("mysql-scone", "scone", mnt, stats, extra_env=extra_env)
def main() -> None:
stats = read_stats("mysql.json")
settings = create_settings()
benchmark = Benchmark(settings)
benchmarks = {
"native": benchmark_native,
"sgx-lkl": benchmark_sgx_lkl,
"sgx-io": benchmark_sgx_io,
"scone": benchmark_scone,
}
setup_remote_network(settings)
system = set(stats["system"])
for name, benchmark_func in benchmarks.items():
if name in system:
print(f"skip {name} benchmark")
continue
benchmark_func(benchmark, stats)
write_stats("mysql.json", stats)
csv = f"mysql-{NOW}.tsv"
print(csv)
throughput_df = | pd.DataFrame(stats) | pandas.DataFrame |
import pandas as pd
import os
from elab_queries import import_elab_pull
# sort columns in elab queries for PPBC blocks/frozens or DLP (edit line 9 & 11)
# edit sorted_filename - eLab_<storage/PPBC>_samples.csv
def sort_columns_PPBC(pull_filename, sorted_filename):
elab_pull = import_elab_pull(pull_filename)
#DLP elab_pull = elab_pull.loc[elab_pull['Excluded'] == "No"]
elab_pull = elab_pull.loc[elab_pull['Tissue Type'] == "FFPE Block"]
#DLP elab_pull = elab_pull.loc[elab_pull['Downstream Submission'] == "Storage Only"]
elab_pull = elab_pull[["Project", "Patient ID", "MRN", "Surgery #", "Surgery Type", "Surgery Date", "Surgeon", "Surgery Location", "Room", "Study Protocol", "Excluded", "Final Pathology", "Processing Date", "Specimen Site", "Primary Site", "Tissue Type", "PPBC Bank #", "PPBC Aliquot #", "PPBC Accession #"]]
#DLP elab_pull = elab_pull[
# ["Project", "Patient ID", "MRN", "Surgery Type", "Surgery Date", "Surgeon", "Surgery Location", "Room",
# "Study Protocol", "Excluded", "Final Pathology", "Processing Date", "Specimen Site", "Site Details",
# "Primary Site", "Tissue Type", "Preservation Method", "Total Cell Count (cells/ml)", "Live Cell Count (cells/ml)",
# "Total Volume (ml)", "Viability (%)", "Downstream Submission", "Storage Populations", "Storage Facility",
# "IGO Storage ID", "IGO Sample ID"]]
#print(elab_pull)
elab_pull.to_csv(sorted_filename, index=False)
# compare sorted columns sheet to submitted FFPE samples for mpIF/IF and WGS-T (PPBC) and DLP
# submitted spreadsheet format (Patient ID, Specimen Site, Nomenclature)
def compare_sorted_to_submitted_PPBC(sorted_filename, submitted_filename, saved_filename):
fileDir = os.path.dirname(os.path.realpath('__file__'))
submitted_doc = os.path.join(fileDir, "/Volumes/GynLab/Weigelt Lab/Jamie/wet_lab/SPECTRUM/" + submitted_filename)
sorted_file = pd.read_csv(sorted_filename)
submitted_file = pd.read_excel(submitted_doc)
merged_files = pd.merge(submitted_file, sorted_file,
on=['Patient ID', 'Specimen Site'], how='left')
merged_files = merged_files[["Project", "Patient ID", "MRN", "Surgery Type", "Surgery Date", "Surgeon", "Surgery Location", "Room", "Study Protocol", "Excluded", "Final Pathology", "Processing Date", "Specimen Site", "Site Details", "Primary Site", "Tissue Type", "PPBC Bank #", "PPBC Aliquot #", "PPBC Accession #", "Nomenclature"]]
#DLP merged_files = merged_files[
# ["Patient ID", "MRN", "Surgery Type", "Surgery Date", "Surgeon", "Surgery Location", "Room",
# "Study Protocol", "Excluded", "Final Pathology", "Processing Date", "Specimen Site", "Site Details",
# "Primary Site", "Tissue Type", "Preservation Method", "Total Cell Count (cells/ml)",
# "Live Cell Count (cells/ml)", "Total Volume (ml)", "Viability (%)", "Downstream Submission",
# "Storage Facility", "IGO Storage ID", "IGO Sample ID"]]
merged_files = merged_files.drop_duplicates()
#print(merged_files)
saved_file = os.path.join(fileDir, "/Volumes/GynLab/Weigelt Lab/Jamie/wet_lab/SPECTRUM/Sample Submissions and Data/mpIF-IF/" + saved_filename)
merged_files.to_csv(saved_file, index=False)
# compare sorted columns sheet to submitted FFPE samples for mpIF/IF (Diagnostic)
# 1. pull reference_file query from elab (Excluded=No)
# 2. remove duplicates from reference_file (Col U, AD), save as xlsx
def compare_sorted_to_submitted_diag(reference_filename, submitted_filename, saved_filename):
fileDir = os.path.dirname(os.path.realpath('__file__'))
submitted_doc = os.path.join(fileDir, "/Volumes/GynLab/Weigelt Lab/Jamie/wet_lab/SPECTRUM/" + submitted_filename)
reference_file = pd.read_excel(reference_filename)
submitted_file = | pd.read_excel(submitted_doc) | pandas.read_excel |
#%%
import os
import sys
try:
os.chdir('/Volumes/GoogleDrive/My Drive/python_code/connectome_tools/')
sys.path.append('/Volumes/GoogleDrive/My Drive/python_code/maggot_models/')
sys.path.append('/Volumes/GoogleDrive/My Drive/python_code/connectome_tools/')
except:
pass
from pymaid_creds import url, name, password, token
import pymaid
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import pandas as pd
import numpy.random as random
import gzip
import csv
import connectome_tools.process_matrix as pm
import connectome_tools.process_graph as pg
from tqdm import tqdm
from joblib import Parallel, delayed
import networkx as nx
# allows text to be editable in Illustrator
plt.rcParams['pdf.fonttype'] = 42
plt.rcParams['ps.fonttype'] = 42
# font settings
plt.rcParams['font.size'] = 5
plt.rcParams['font.family'] = 'arial'
rm = pymaid.CatmaidInstance(url, token, name, password)
adj = | pd.read_csv('VNC_interaction/data/brA1_axon-dendrite.csv', header = 0, index_col = 0) | pandas.read_csv |
import os
import pandas as pd
import config as cfg
from src.utils.data_processing import medea_path
directory = medea_path('data', 'raw', 'AggregatedGenerationPerType')
df_ror = pd.DataFrame()
for file in os.listdir(directory):
filename = os.fsdecode(file)
print(filename)
if filename.endswith('.csv'):
df_tmpfile = | pd.DataFrame() | pandas.DataFrame |
"""Tests suite for Period handling.
Parts derived from scikits.timeseries code, original authors:
- <NAME> & <NAME>
- pierregm_at_uga_dot_edu - mattknow_ca_at_hotmail_dot_com
"""
from unittest import TestCase
from datetime import datetime, timedelta
from numpy.ma.testutils import assert_equal
from pandas.tseries.period import Period, PeriodIndex
from pandas.tseries.index import DatetimeIndex, date_range
from pandas.tseries.tools import to_datetime
import pandas.core.datetools as datetools
import numpy as np
from pandas import Series, TimeSeries
from pandas.util.testing import assert_series_equal
class TestPeriodProperties(TestCase):
"Test properties such as year, month, weekday, etc...."
#
def __init__(self, *args, **kwds):
TestCase.__init__(self, *args, **kwds)
def test_interval_constructor(self):
i1 = Period('1/1/2005', freq='M')
i2 = Period('Jan 2005')
self.assertEquals(i1, i2)
i1 = Period('2005', freq='A')
i2 = Period('2005')
i3 = Period('2005', freq='a')
self.assertEquals(i1, i2)
self.assertEquals(i1, i3)
i4 = Period('2005', freq='M')
i5 = Period('2005', freq='m')
self.assert_(i1 != i4)
self.assertEquals(i4, i5)
i1 = Period.now('Q')
i2 = Period(datetime.now(), freq='Q')
i3 = Period.now('q')
self.assertEquals(i1, i2)
self.assertEquals(i1, i3)
# Biz day construction, roll forward if non-weekday
i1 = Period('3/10/12', freq='B')
i2 = Period('3/12/12', freq='D')
self.assertEquals(i1, i2.asfreq('B'))
i3 = Period('3/10/12', freq='b')
self.assertEquals(i1, i3)
i1 = Period(year=2005, quarter=1, freq='Q')
i2 = Period('1/1/2005', freq='Q')
self.assertEquals(i1, i2)
i1 = Period(year=2005, quarter=3, freq='Q')
i2 = Period('9/1/2005', freq='Q')
self.assertEquals(i1, i2)
i1 = Period(year=2005, month=3, day=1, freq='D')
i2 = Period('3/1/2005', freq='D')
self.assertEquals(i1, i2)
i3 = Period(year=2005, month=3, day=1, freq='d')
self.assertEquals(i1, i3)
i1 = Period(year=2012, month=3, day=10, freq='B')
i2 = Period('3/12/12', freq='B')
self.assertEquals(i1, i2)
i1 = Period('2005Q1')
i2 = Period(year=2005, quarter=1, freq='Q')
i3 = Period('2005q1')
self.assertEquals(i1, i2)
self.assertEquals(i1, i3)
i1 = Period('05Q1')
self.assertEquals(i1, i2)
lower = Period('05q1')
self.assertEquals(i1, lower)
i1 = Period('1Q2005')
self.assertEquals(i1, i2)
lower = Period('1q2005')
self.assertEquals(i1, lower)
i1 = Period('1Q05')
self.assertEquals(i1, i2)
lower = Period('1q05')
self.assertEquals(i1, lower)
i1 = Period('4Q1984')
self.assertEquals(i1.year, 1984)
lower = Period('4q1984')
self.assertEquals(i1, lower)
i1 = Period('1982', freq='min')
i2 = Period('1982', freq='MIN')
self.assertEquals(i1, i2)
i2 = Period('1982', freq=('Min', 1))
self.assertEquals(i1, i2)
def test_freq_str(self):
i1 = Period('1982', freq='Min')
self.assert_(i1.freq[0] != '1')
i2 = Period('11/30/2005', freq='2Q')
self.assertEquals(i2.freq[0], '2')
def test_to_timestamp(self):
intv = Period('1982', freq='A')
start_ts = intv.to_timestamp(which_end='S')
aliases = ['s', 'StarT', 'BEGIn']
for a in aliases:
self.assertEquals(start_ts, intv.to_timestamp(which_end=a))
end_ts = intv.to_timestamp(which_end='E')
aliases = ['e', 'end', 'FINIsH']
for a in aliases:
self.assertEquals(end_ts, intv.to_timestamp(which_end=a))
from_lst = ['A', 'Q', 'M', 'W', 'B',
'D', 'H', 'Min', 'S']
for i, fcode in enumerate(from_lst):
intv = Period('1982', freq=fcode)
result = intv.to_timestamp().to_period(fcode)
self.assertEquals(result, intv)
self.assertEquals(intv.start_time(), intv.to_timestamp('S'))
self.assertEquals(intv.end_time(), intv.to_timestamp('E'))
def test_properties_annually(self):
# Test properties on Periods with annually frequency.
a_date = Period(freq='A', year=2007)
assert_equal(a_date.year, 2007)
def test_properties_quarterly(self):
# Test properties on Periods with daily frequency.
qedec_date = Period(freq="Q-DEC", year=2007, quarter=1)
qejan_date = Period(freq="Q-JAN", year=2007, quarter=1)
qejun_date = Period(freq="Q-JUN", year=2007, quarter=1)
#
for x in range(3):
for qd in (qedec_date, qejan_date, qejun_date):
assert_equal((qd + x).qyear, 2007)
assert_equal((qd + x).quarter, x + 1)
def test_properties_monthly(self):
# Test properties on Periods with daily frequency.
m_date = Period(freq='M', year=2007, month=1)
for x in range(11):
m_ival_x = m_date + x
assert_equal(m_ival_x.year, 2007)
if 1 <= x + 1 <= 3:
assert_equal(m_ival_x.quarter, 1)
elif 4 <= x + 1 <= 6:
assert_equal(m_ival_x.quarter, 2)
elif 7 <= x + 1 <= 9:
assert_equal(m_ival_x.quarter, 3)
elif 10 <= x + 1 <= 12:
assert_equal(m_ival_x.quarter, 4)
assert_equal(m_ival_x.month, x + 1)
def test_properties_weekly(self):
# Test properties on Periods with daily frequency.
w_date = Period(freq='WK', year=2007, month=1, day=7)
#
assert_equal(w_date.year, 2007)
assert_equal(w_date.quarter, 1)
assert_equal(w_date.month, 1)
assert_equal(w_date.week, 1)
assert_equal((w_date - 1).week, 52)
def test_properties_daily(self):
# Test properties on Periods with daily frequency.
b_date = Period(freq='B', year=2007, month=1, day=1)
#
assert_equal(b_date.year, 2007)
assert_equal(b_date.quarter, 1)
assert_equal(b_date.month, 1)
assert_equal(b_date.day, 1)
assert_equal(b_date.weekday, 0)
assert_equal(b_date.day_of_year, 1)
#
d_date = Period(freq='D', year=2007, month=1, day=1)
#
assert_equal(d_date.year, 2007)
assert_equal(d_date.quarter, 1)
assert_equal(d_date.month, 1)
assert_equal(d_date.day, 1)
assert_equal(d_date.weekday, 0)
assert_equal(d_date.day_of_year, 1)
def test_properties_hourly(self):
# Test properties on Periods with hourly frequency.
h_date = Period(freq='H', year=2007, month=1, day=1, hour=0)
#
assert_equal(h_date.year, 2007)
assert_equal(h_date.quarter, 1)
assert_equal(h_date.month, 1)
assert_equal(h_date.day, 1)
assert_equal(h_date.weekday, 0)
assert_equal(h_date.day_of_year, 1)
assert_equal(h_date.hour, 0)
#
def test_properties_minutely(self):
# Test properties on Periods with minutely frequency.
t_date = Period(freq='Min', year=2007, month=1, day=1, hour=0,
minute=0)
#
assert_equal(t_date.quarter, 1)
assert_equal(t_date.month, 1)
assert_equal(t_date.day, 1)
assert_equal(t_date.weekday, 0)
assert_equal(t_date.day_of_year, 1)
assert_equal(t_date.hour, 0)
assert_equal(t_date.minute, 0)
def test_properties_secondly(self):
# Test properties on Periods with secondly frequency.
s_date = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
#
assert_equal(s_date.year, 2007)
assert_equal(s_date.quarter, 1)
assert_equal(s_date.month, 1)
assert_equal(s_date.day, 1)
assert_equal(s_date.weekday, 0)
assert_equal(s_date.day_of_year, 1)
assert_equal(s_date.hour, 0)
assert_equal(s_date.minute, 0)
assert_equal(s_date.second, 0)
def noWrap(item):
return item
class TestFreqConversion(TestCase):
"Test frequency conversion of date objects"
def __init__(self, *args, **kwds):
TestCase.__init__(self, *args, **kwds)
def test_conv_annual(self):
# frequency conversion tests: from Annual Frequency
ival_A = Period(freq='A', year=2007)
ival_AJAN = Period(freq="A-JAN", year=2007)
ival_AJUN = Period(freq="A-JUN", year=2007)
ival_ANOV = Period(freq="A-NOV", year=2007)
ival_A_to_Q_start = Period(freq='Q', year=2007, quarter=1)
ival_A_to_Q_end = Period(freq='Q', year=2007, quarter=4)
ival_A_to_M_start = Period(freq='M', year=2007, month=1)
ival_A_to_M_end = Period(freq='M', year=2007, month=12)
ival_A_to_W_start = Period(freq='WK', year=2007, month=1, day=1)
ival_A_to_W_end = Period(freq='WK', year=2007, month=12, day=31)
ival_A_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_A_to_B_end = Period(freq='B', year=2007, month=12, day=31)
ival_A_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_A_to_D_end = Period(freq='D', year=2007, month=12, day=31)
ival_A_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_A_to_H_end = Period(freq='H', year=2007, month=12, day=31,
hour=23)
ival_A_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_A_to_T_end = Period(freq='Min', year=2007, month=12, day=31,
hour=23, minute=59)
ival_A_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_A_to_S_end = Period(freq='S', year=2007, month=12, day=31,
hour=23, minute=59, second=59)
ival_AJAN_to_D_end = Period(freq='D', year=2007, month=1, day=31)
ival_AJAN_to_D_start = Period(freq='D', year=2006, month=2, day=1)
ival_AJUN_to_D_end = Period(freq='D', year=2007, month=6, day=30)
ival_AJUN_to_D_start = Period(freq='D', year=2006, month=7, day=1)
ival_ANOV_to_D_end = Period(freq='D', year=2007, month=11, day=30)
ival_ANOV_to_D_start = Period(freq='D', year=2006, month=12, day=1)
assert_equal(ival_A.asfreq('Q', 'S'), ival_A_to_Q_start)
assert_equal(ival_A.asfreq('Q', 'e'), ival_A_to_Q_end)
assert_equal(ival_A.asfreq('M', 's'), ival_A_to_M_start)
assert_equal(ival_A.asfreq('M', 'E'), ival_A_to_M_end)
assert_equal(ival_A.asfreq('WK', 'S'), ival_A_to_W_start)
assert_equal(ival_A.asfreq('WK', 'E'), ival_A_to_W_end)
assert_equal(ival_A.asfreq('B', 'S'), ival_A_to_B_start)
assert_equal(ival_A.asfreq('B', 'E'), ival_A_to_B_end)
assert_equal(ival_A.asfreq('D', 'S'), ival_A_to_D_start)
assert_equal(ival_A.asfreq('D', 'E'), ival_A_to_D_end)
assert_equal(ival_A.asfreq('H', 'S'), ival_A_to_H_start)
assert_equal(ival_A.asfreq('H', 'E'), ival_A_to_H_end)
assert_equal(ival_A.asfreq('min', 'S'), ival_A_to_T_start)
assert_equal(ival_A.asfreq('min', 'E'), ival_A_to_T_end)
assert_equal(ival_A.asfreq('T', 'S'), ival_A_to_T_start)
assert_equal(ival_A.asfreq('T', 'E'), ival_A_to_T_end)
assert_equal(ival_A.asfreq('S', 'S'), ival_A_to_S_start)
assert_equal(ival_A.asfreq('S', 'E'), ival_A_to_S_end)
assert_equal(ival_AJAN.asfreq('D', 'S'), ival_AJAN_to_D_start)
assert_equal(ival_AJAN.asfreq('D', 'E'), ival_AJAN_to_D_end)
assert_equal(ival_AJUN.asfreq('D', 'S'), ival_AJUN_to_D_start)
assert_equal(ival_AJUN.asfreq('D', 'E'), ival_AJUN_to_D_end)
assert_equal(ival_ANOV.asfreq('D', 'S'), ival_ANOV_to_D_start)
assert_equal(ival_ANOV.asfreq('D', 'E'), ival_ANOV_to_D_end)
assert_equal(ival_A.asfreq('A'), ival_A)
def test_conv_quarterly(self):
# frequency conversion tests: from Quarterly Frequency
ival_Q = Period(freq='Q', year=2007, quarter=1)
ival_Q_end_of_year = Period(freq='Q', year=2007, quarter=4)
ival_QEJAN = Period(freq="Q-JAN", year=2007, quarter=1)
ival_QEJUN = Period(freq="Q-JUN", year=2007, quarter=1)
ival_Q_to_A = Period(freq='A', year=2007)
ival_Q_to_M_start = Period(freq='M', year=2007, month=1)
ival_Q_to_M_end = Period(freq='M', year=2007, month=3)
ival_Q_to_W_start = Period(freq='WK', year=2007, month=1, day=1)
ival_Q_to_W_end = Period(freq='WK', year=2007, month=3, day=31)
ival_Q_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_Q_to_B_end = Period(freq='B', year=2007, month=3, day=30)
ival_Q_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_Q_to_D_end = Period(freq='D', year=2007, month=3, day=31)
ival_Q_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_Q_to_H_end = Period(freq='H', year=2007, month=3, day=31,
hour=23)
ival_Q_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_Q_to_T_end = Period(freq='Min', year=2007, month=3, day=31,
hour=23, minute=59)
ival_Q_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_Q_to_S_end = Period(freq='S', year=2007, month=3, day=31,
hour=23, minute=59, second=59)
ival_QEJAN_to_D_start = Period(freq='D', year=2006, month=2, day=1)
ival_QEJAN_to_D_end = Period(freq='D', year=2006, month=4, day=30)
ival_QEJUN_to_D_start = Period(freq='D', year=2006, month=7, day=1)
ival_QEJUN_to_D_end = Period(freq='D', year=2006, month=9, day=30)
assert_equal(ival_Q.asfreq('A'), ival_Q_to_A)
assert_equal(ival_Q_end_of_year.asfreq('A'), ival_Q_to_A)
assert_equal(ival_Q.asfreq('M', 'S'), ival_Q_to_M_start)
assert_equal(ival_Q.asfreq('M', 'E'), ival_Q_to_M_end)
assert_equal(ival_Q.asfreq('WK', 'S'), ival_Q_to_W_start)
assert_equal(ival_Q.asfreq('WK', 'E'), ival_Q_to_W_end)
assert_equal(ival_Q.asfreq('B', 'S'), ival_Q_to_B_start)
assert_equal(ival_Q.asfreq('B', 'E'), ival_Q_to_B_end)
assert_equal(ival_Q.asfreq('D', 'S'), ival_Q_to_D_start)
assert_equal(ival_Q.asfreq('D', 'E'), ival_Q_to_D_end)
assert_equal(ival_Q.asfreq('H', 'S'), ival_Q_to_H_start)
assert_equal(ival_Q.asfreq('H', 'E'), ival_Q_to_H_end)
assert_equal(ival_Q.asfreq('Min', 'S'), ival_Q_to_T_start)
assert_equal(ival_Q.asfreq('Min', 'E'), ival_Q_to_T_end)
assert_equal(ival_Q.asfreq('S', 'S'), ival_Q_to_S_start)
assert_equal(ival_Q.asfreq('S', 'E'), ival_Q_to_S_end)
assert_equal(ival_QEJAN.asfreq('D', 'S'), ival_QEJAN_to_D_start)
assert_equal(ival_QEJAN.asfreq('D', 'E'), ival_QEJAN_to_D_end)
assert_equal(ival_QEJUN.asfreq('D', 'S'), ival_QEJUN_to_D_start)
assert_equal(ival_QEJUN.asfreq('D', 'E'), ival_QEJUN_to_D_end)
assert_equal(ival_Q.asfreq('Q'), ival_Q)
def test_conv_monthly(self):
# frequency conversion tests: from Monthly Frequency
ival_M = Period(freq='M', year=2007, month=1)
ival_M_end_of_year = Period(freq='M', year=2007, month=12)
ival_M_end_of_quarter = Period(freq='M', year=2007, month=3)
ival_M_to_A = Period(freq='A', year=2007)
ival_M_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_M_to_W_start = Period(freq='WK', year=2007, month=1, day=1)
ival_M_to_W_end = Period(freq='WK', year=2007, month=1, day=31)
ival_M_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_M_to_B_end = Period(freq='B', year=2007, month=1, day=31)
ival_M_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_M_to_D_end = Period(freq='D', year=2007, month=1, day=31)
ival_M_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_M_to_H_end = Period(freq='H', year=2007, month=1, day=31,
hour=23)
ival_M_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_M_to_T_end = Period(freq='Min', year=2007, month=1, day=31,
hour=23, minute=59)
ival_M_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_M_to_S_end = Period(freq='S', year=2007, month=1, day=31,
hour=23, minute=59, second=59)
assert_equal(ival_M.asfreq('A'), ival_M_to_A)
assert_equal(ival_M_end_of_year.asfreq('A'), ival_M_to_A)
assert_equal(ival_M.asfreq('Q'), ival_M_to_Q)
assert_equal(ival_M_end_of_quarter.asfreq('Q'), ival_M_to_Q)
assert_equal(ival_M.asfreq('WK', 'S'), ival_M_to_W_start)
assert_equal(ival_M.asfreq('WK', 'E'), ival_M_to_W_end)
assert_equal(ival_M.asfreq('B', 'S'), ival_M_to_B_start)
assert_equal(ival_M.asfreq('B', 'E'), ival_M_to_B_end)
assert_equal(ival_M.asfreq('D', 'S'), ival_M_to_D_start)
assert_equal(ival_M.asfreq('D', 'E'), ival_M_to_D_end)
assert_equal(ival_M.asfreq('H', 'S'), ival_M_to_H_start)
assert_equal(ival_M.asfreq('H', 'E'), ival_M_to_H_end)
assert_equal(ival_M.asfreq('Min', 'S'), ival_M_to_T_start)
assert_equal(ival_M.asfreq('Min', 'E'), ival_M_to_T_end)
assert_equal(ival_M.asfreq('S', 'S'), ival_M_to_S_start)
assert_equal(ival_M.asfreq('S', 'E'), ival_M_to_S_end)
assert_equal(ival_M.asfreq('M'), ival_M)
def test_conv_weekly(self):
# frequency conversion tests: from Weekly Frequency
ival_W = Period(freq='WK', year=2007, month=1, day=1)
ival_WSUN = Period(freq='WK', year=2007, month=1, day=7)
ival_WSAT = Period(freq='WK-SAT', year=2007, month=1, day=6)
ival_WFRI = Period(freq='WK-FRI', year=2007, month=1, day=5)
ival_WTHU = Period(freq='WK-THU', year=2007, month=1, day=4)
ival_WWED = Period(freq='WK-WED', year=2007, month=1, day=3)
ival_WTUE = Period(freq='WK-TUE', year=2007, month=1, day=2)
ival_WMON = Period(freq='WK-MON', year=2007, month=1, day=1)
ival_WSUN_to_D_start = | Period(freq='D', year=2007, month=1, day=1) | pandas.tseries.period.Period |
import tweepy
from pandas import DataFrame
import time
# Twitter Credentials
consumer_key = 'riiUgzG0nHSkUGt5c521LgcnD'
consumer_key_secret = '<KEY>'
access_token = '<KEY>'
access_token_secret = '<KEY>'
bearer_token = '<KEY>'
# For Request
tweetsPerQuery = 100
maxTweets = 1000
users = ['realDonaldTrump', 'shanedawson', 'Zendaya', 'justinbieber',
'KylieJenner', 'ArianaGrande', 'KimKardashian']
# Authentication
auth = tweepy.OAuthHandler(consumer_key, consumer_key_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
for userId in users:
# Crawling Data
print("working on", userId)
all_tweets = []
tweets = api.user_timeline(screen_name=userId, count=200,
include_rts=False, tweet_mode="extended")
all_tweets.extend(tweets)
old_id = tweets[-1].id
while True:
try:
tweets = api.user_timeline(screen_name=userId,
count=200, include_rts=False, max_id=old_id-1, tweet_mode="extended")
if len(tweets) == 0:
break
old_id = tweets[-1].id
all_tweets.extend(tweets)
if len(all_tweets) >= 3000:
break
print("N of tweets download till now {}".format(len(all_tweets)))
except tweepy.TweepError:
print("TweepError wait...")
time.sleep(120)
# Write to csv file
outtweets = [[tweet.id_str, tweet.created_at, tweet.full_text.encode("utf-8").decode("utf-8")] for idx, tweet in enumerate(all_tweets)]
df = | DataFrame(outtweets, columns=["id", "created_at", "text"]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Thu May 17 10:37:45 2018
@author: l_cry
"""
import pandas as pd
'''
pfo_path:投组的文件路径;
code:股票代码,
name:股票名称,
num:股票数量,
ref_path:参照指数的文件路径;需包含日期date、当日指数收盘价close
-------------
return:
pfo:df
ref:df
'''
def pre_solving(pfo_path,ref_path):
pd.read_excel(ref_path)
pd.read_excel(pfo_path)
def track_portfolio(pfo,ref,start_date,end_date=None):
'''
计算投组价值
Parameters:
---------------
pfo:投组df;
code:代码
name:名称
num:数量
ref:对照表df,用于计算投组日间价值
index
date:日期
columns
code:代码
contents
不同代码的日收盘价
Returns:
---------------
DateFrame:
index:
date:日期
columns:
code:代码
content:
代码不同日的市值
'''
df=pd.DataFrame(index=ref.index[ref.index>start_date])
codes=ref.columns.values
for code,num in zip(pfo.code,pfo.num):
if code in codes:
df[code]=num*ref[code][ref.index>start_date]
else:
raise Exception('some codes are not included')
return df
def portfolio_profit(pfo,ref,index,start_date,end_date=None):
'''
计算投组价值
Parameters:
---------------
pfo:投组df;
code:代码
name:名称
num:数量
ref:对照表df,用于计算投组日间价值
columns
date:日期
code:代码
contents
不同代码的日收盘价
index:指数df,用于pfo表现的比较
date:日期
close:收盘价
Returns:
---------------
DateFrame:
date:日期,
mv:市值,
ivg:指数收益,
pvg:投组收益,
alp:超额收益
'''
df=track_portfolio(pfo,ref,start_date,end_date=None)
return df.apply(sum,axis=1).to_frame(name='pv').merge(index,left_index=True,right_index=True,how='left')
import os
path='C:/Users/l_cry/Nextcloud/HW/gp/'
path="C:/Users/l_cry/Documents/Tencent Files/1204027935/FileRecv/投组20180521/"
files=os.listdir(path)
file_map={}
dfhs300 = pd.read_excel('C:/Users/l_cry/Documents/Tencent Files/1204027935/FileRecv/gp/hs300gp20170101.xlsx',index_col=0)
index= | pd.read_excel('C:/Users/l_cry/Desktop/指数行情序列hs300.xls',sheet_name='Sheet2',index_col=0) | pandas.read_excel |
"""
The `star' class is the core of PBjam and refers to a single target that is to
be peakbagged. Each `star' instance is assigned an ID and physical input
parameters, as well as a time series or power spectrum.
The different steps in the peakbagging process are then passed the `star'
instance, updating it with the results of each step. The outputs of each step
are stored in a dedicated directory created with the star ID.
The `session' class wraps one or more star class instances and peakbags them all
sequentially. The recommended use of PBjam is the use the `session' class, and
only use the `star' class for more granular control of the peakbagging process.
"""
import os, warnings, re, time
from .asy_peakbag import asymptotic_fit
from .priors import kde
from .peakbag import peakbag
from .jar import get_priorpath, to_log10, references, isvalid
from .plotting import plotting
import pandas as pd
import numpy as np
from astroquery.mast import ObservationsClass as AsqMastObsCl
from astroquery.mast import Catalogs
from astroquery.simbad import Simbad
import astropy.units as units
class star(plotting):
""" Class for each star to be peakbagged
Additional attributes are added for each step of the peakbagging process
Note spectrum is flattened (background divided out.)
Examples
--------
Peakbag using the star class. Note that the star class only takes Lightkurve
periodograms, pg, as spectrum input.
>>> st = pbjam.star(ID='KIC4448777', pg=pg, numax=[220.0, 3.0],
dnu=[16.97, 0.01], teff=[4750, 100],
bp_rp = [1.34, 0.01])
>>> st(make_plots=True)
Parameters
----------
ID : string, int
Target identifier. If custom timeseries/periodogram is provided, it
must be resolvable by LightKurve (KIC, TIC, EPIC, HD, etc.).
pg : lightkurve.periodogram.Periodogram object
A lightkurve periodogram object containing frequencies in units of
microhertz and power (in arbitrary units).
numax : list
List of the form [numax, numax_error].
dnu : list
List of the form [dnu, dnu_error].
teff : list
List of the form [teff, teff_error].
bp_rp : list
List of the form [bp_rp, bp_rp_error].
path : str, optional
The path at which to store output. If no path is set but make_plots is
True, output will be saved in the current working directory. Default is
the current working directory.
prior_file : str, optional
Path to the csv file containing the prior data. Default is
pbjam/data/prior_data.csv
Attributes
----------
f : array
Array of power spectrum frequencies
s : array
power spectrum
"""
def __init__(self, ID, pg, numax, dnu, teff=[None,None], bp_rp=[None,None],
path=None, prior_file=None):
self.ID = ID
if numax[0] < 25:
warnings.warn('The input numax is less than 25. The prior is not well defined here, so be careful with the result.')
self.numax = numax
self.dnu = dnu
self.references = references()
self.references._addRef(['numpy', 'python', 'lightkurve', 'astropy'])
teff, bp_rp = self._checkTeffBpRp(teff, bp_rp)
self.teff = teff
self.bp_rp = bp_rp
self.pg = pg.flatten() # in case user supplies unormalized spectrum
self.f = self.pg.frequency.value
self.s = self.pg.power.value
self._obs = {'dnu': self.dnu, 'numax': self.numax, 'teff': self.teff,
'bp_rp': self.bp_rp}
self._log_obs = {x: to_log10(*self._obs[x]) for x in self._obs.keys() if x != 'bp_rp'}
self._set_outpath(path)
if prior_file is None:
self.prior_file = get_priorpath()
else:
self.prior_file = prior_file
def _checkTeffBpRp(self, teff, bp_rp):
""" Set the Teff and/or bp_rp values
Checks the input Teff and Gbp-Grp values to see if any are missing.
If Gbp-Grp is missing it will be looked up online either from the TIC or
the Gaia archive.
Teff and Gbp-Grp provide a lot of the same information, so only one of
them need to be provided to start with. If one is not provided, PBjam
will assume a wide prior on it.
Parameters
----------
teff : list
List of the form [teff, teff_error]. For multiple targets, use a list
of lists.
bp_rp : list
List of the form [bp_rp, bp_rp_error]. For multiple targets, use a list
of lists.
Returns
-------
teff : list
The checked teff value. List of the form [teff, teff_error].
bp_rp : list
The checked bp_rp value. List of the form [bp_rp, bp_rp_error].
"""
if isvalid(bp_rp[0]) is False:
try:
bp_rp = [get_bp_rp(self.ID), 0.1]
except:
bp_rp = [np.nan, np.nan]
if not isvalid(teff[0]) and not isvalid(bp_rp[0]):
raise ValueError('Must provide either teff or bp_rp arguments when initializing the star class.')
elif not isvalid(teff[0]):
teff = [4889, 1500] # these are rough esimates from the prior
elif not np.isfinite(bp_rp[0]):
bp_rp = [1.2927, 0.5] # these are rough esimates from the prior
self.references._addRef(['Evans2018'])
return teff, bp_rp
def _get_outpath(self, fname):
""" Get basepath or make full file path name.
Convenience function for either setting the base path name for the star,
or if given fname as input, will append this to the basepath name to
create a full path to the file in question.
Parameters
----------
fname : str, optional
If not None, will append this to the pathname of the star. Use this
to store files such as plots or tables.
Returns
-------
path : str
If fname is None, path is the path name of the star. Otherwise it is
the full file path name for the file in question.
"""
if fname is None:
return self.path
elif isinstance(fname, str):
path = os.path.join(*[self.path, fname])
else:
raise ValueError(f'Unrecognized input {fname}.')
if not os.path.isdir(self.path):
raise IOError(f'You are trying to access {self.path} which is a directory that does not exist.')
else:
return path
def _set_outpath(self, path):
""" Sets the path attribute for star
If path is a string it is assumed to be a path name, if not the
current working directory will be used.
Attempts to create an output directory for all the results that PBjam
produces. A directory is created when a star class instance is
initialized, so a session might create multiple directories.
Parameters
----------
path : str
Directory to place the star subdirectory.
"""
if isinstance(path, str):
# If path is str, presume user wants to put stuff somewhere specific.
self.path = os.path.join(*[path, f'{self.ID}'])
else:
# Otherwise just create a subdir in cwd.
self.path = os.path.join(*[os.getcwd(), f'{self.ID}'])
# Check if self.path exists, if not try to create it
if not os.path.isdir(self.path):
try:
os.makedirs(self.path)
except Exception as ex:
message = "Could not create directory for Star {0} because an exception of type {1} occurred. Arguments:\n{2!r}".format(self.ID, type(ex).__name__, ex.args)
print(message)
def run_kde(self, bw_fac=1.0, make_plots=False, store_chains=False):
""" Run all steps involving KDE.
Starts by creating a KDE based on the prior data sample. Then samples
this KDE for initial starting positions for asy_peakbag.
Parameters
----------
bw_fac : float
Scaling factor for the KDE bandwidth. By default the bandwidth is
automatically set, but may be scaled to adjust for sparsity of the
prior sample.
make_plots : bool, optional
Whether or not to produce plots of the results. Default is False.
store_chains : bool, optional
Whether or not to store posterior samples on disk. Default is False.
"""
print('Starting KDE estimation')
# Init
kde(self)
# Call
self.kde(dnu=self.dnu, numax=self.numax, teff=self.teff,
bp_rp=self.bp_rp, bw_fac=bw_fac)
# Store
if make_plots:
self.kde.plot_corner(path=self.path, ID=self.ID,
savefig=make_plots)
self.kde.plot_spectrum(pg=self.pg, path=self.path, ID=self.ID,
savefig=make_plots)
self.kde.plot_echelle(path=self.path, ID=self.ID,
savefig=make_plots)
self.references._addRef('matplotlib')
if store_chains:
kde_samps = pd.DataFrame(self.kde.samples, columns=self.kde.par_names)
kde_samps.to_csv(self._get_outpath(f'kde_chains_{self.ID}.csv'), index=False)
def run_asy_peakbag(self, norders, make_plots=False,
store_chains=False, method='emcee',
developer_mode=False):
""" Run all steps involving asy_peakbag.
Performs a fit of the asymptotic relation to the spectrum (l=2,0 only),
using initial guesses and prior for the fit parameters from KDE.
Parameters
----------
norders : int
Number of orders to include in the fits.
make_plots : bool, optional
Whether or not to produce plots of the results. Default is False.
store_chains : bool, optional
Whether or not to store posterior samples on disk. Default is False.
method : string
Method to be used for sampling the posterior. Options are 'emcee' or
'cpnest. Default method is 'emcee' that will call emcee, alternative
is 'cpnest' to call nested sampling with CPnest.
developer_mode : bool
Run asy_peakbag in developer mode. Currently just retains the input
value of dnu and numax as priors, for the purposes of expanding
the prior sample. Important: This is not good practice for getting
science results!
"""
print('Starting asymptotic peakbagging')
# Init
asymptotic_fit(self, norders=norders)
# Call
self.asy_fit(method, developer_mode)
self.references._addRef(method)
# Store
self.asy_fit.summary.to_csv(self._get_outpath(f'asymptotic_fit_summary_{self.ID}.csv'),
index=True, index_label='name')
self.asy_fit.modeID.to_csv(self._get_outpath(f'asymptotic_fit_modeID_{self.ID}.csv'),
index=False)
if make_plots:
self.asy_fit.plot_spectrum(path=self.path, ID=self.ID,
savefig=make_plots)
self.asy_fit.plot_corner(path=self.path, ID=self.ID,
savefig=make_plots)
self.asy_fit.plot_echelle(path=self.path, ID=self.ID,
savefig=make_plots)
self.references._addRef('matplotlib')
if store_chains:
asy_samps = | pd.DataFrame(self.asy_fit.samples, columns=self.asy_fit.par_names) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
import html
import os
import re
import pandas as pd
import requests
target_url = {"jp": "http://scp-jp.wikidot.com/foundation-tales-jp",
"en": "http://scp-jp.wikidot.com/foundation-tales",
"ru": "http://scp-jp.wikidot.com/foundation-tales-ru",
"cn": "http://scp-jp.wikidot.com/foundation-tales-cn",
"fr": "http://scp-jp.wikidot.com/foundation-tales-fr",
"pl": 'http://scp-jp.wikidot.com/foundation-tales-pl',
"es": 'http://scp-jp.wikidot.com/foundation-tales-es',
"de": 'http://scp-jp.wikidot.com/foundation-tales-de',
"th": 'http://scp-jp.wikidot.com/foundation-tales-th',
"it": 'http://scp-jp.wikidot.com/foundation-tales-it',
"ua": 'http://scp-jp.wikidot.com/foundation-tales-ua',
"pt": 'http://scp-jp.wikidot.com/foundation-tales-pt',
"ko": 'http://scp-jp.wikidot.com/foundation-tales-ko'
}
start_word = {"jp": '<p>アルファベット順著者</p>',
"en": '<p>アルファベット順著者</p>',
"ru": '<h1 id="toc0"><span>著作者順</span></h1>',
"cn": '<h1 id="toc0"><span>著作者順</span></h1>',
"fr": '<h1 id="toc1"><span>著作者順</span></h1>',
"pl": '<h1 id="toc0"><span>著作者順</span></h1>',
"es": '<h1 id="toc0"><span>著作者順</span></h1>',
"de": '<h1 id="toc0"><span>著作者順</span></h1>',
"th": '<h1 id="toc0"><span>著作者順</span></h1>',
"it": '<h1 id="toc0"><span>著作者順</span></h1>',
"ua": '<h1 id="toc0"><span>著作者順</span></h1>',
"pt": '<h1 id="toc0"><span>著作者順</span></h1>',
"ko": '<h1 id="toc0"><span>著作者順</span></h1>',
}
end_word = {"jp": '<td>その他<a name="misc"></a></td>',
"en": '<td>その他<a name="misc"></a></td>',
"ru": '</div>',
"cn": '</div>',
"fr": '</div>',
"pl": '</div>',
"es": '</div>',
"de": '</div>',
"th": '</div>',
"it": '</div>',
"ua": '</div>',
"pt": '</div>',
"ko": '</div>',
}
exclusion_list = ['#top',
'http://scp-jp.wikidot.com/forum/t-6047066/',
# '<td><a href="/scp',
'<td><a href="/1600con17"',
'<td><a href="/warucontest">',
'<td><a href="/author:',
'<td><a href="/venture-contest-2018',
'<td><a href="/lily-s-proposal">',
'<td><a href="/newface-contest-hub',
'<td><a href="/personnel-the-origin-hub',
]
def tale():
urls = []
titles = []
authors = []
brts = []
author = ""
title = ""
url = ""
masterpath = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
for key in target_url.keys():
response = requests.get(target_url[key])
if response.status_code is not requests.codes.ok:
print(f"\t{key} request err : {response.status_code}")
continue
scp_lines = response.text.split("\n")
tales_start = scp_lines.index(start_word[key])
for line in scp_lines:
if re.match(r'<td>\s</td>', line):
scp_lines.remove(line)
elif re.match('<td>.<a name="."></a></td>', line):
scp_lines.remove(line)
elif '⇑' in line:
scp_lines.remove(line)
for line in scp_lines[tales_start:]:
line = html.unescape(line)
if end_word[key] in line:
break
# author start
elif '<th style="font-size:125%"' in line:
author = re.search('alt=".*?"', line)
if author is not None:
author = author.group()[5:-1]
if author is None:
author = re.search("<strong>.*?</strong>", line)
if author is not None:
author = author.group()[8:-9]
if author is None:
author = re.search("<em>.*?</em>", line)
if author is not None:
author = author.group()[4:-5]
if author is None:
author = "Unknown pattern of author"
elif '<p><strong>' in line:
author = line.replace("<p><strong>", "")
author = author.replace("</strong></p>", "")
elif '<p><span class="printuser">' in line:
author = line[line.find(
'return false;">') + len('return false;">'):]
author = author.replace("</a></span></p>", "")
elif '<p><span class="error-inline">' in line:
author = line[line.find('<p><span class="error-inline"><em>') +
len('<p><span class="error-inline"><em>'): -
len('</em> does not match any existing user name</span></p>')]
# author end
# url,title start
elif any([s for s in exclusion_list if s in line]):
pass
else:
if "<td><a href=" in line:
sp_line = re.split('[<>]', line)
url = sp_line[3].replace('"', "").replace("a href=", "")
title = sp_line[4]
elif '<td><a target="_blank" href="' in line:
sp_line = re.split('[<>]', line)
url = sp_line[3].replace('"', "").replace(
'a target=_blank href=http://scp-jp.wikidot.com', "")
title = sp_line[4]
elif '<li><a href="' in line:
sp_line = re.split('[<>]', line)
url = sp_line[3].replace('"', "").replace("a href=", "")
title = sp_line[4]
else:
continue
if 'http://scp-jp.wikidot.com/' in url:
url = url.replace("http://scp-jp.wikidot.com", '')
urls.append(url)
titles.append(title)
authors.append(author)
brts.append(key)
print(f"\tpage:{key}のデータ取得が完了しました。")
df = | pd.DataFrame(columns=['url', 'title', 'author', 'branches']) | pandas.DataFrame |
from __future__ import print_function, absolute_import
import sys, gzip, time, datetime, random, os, logging, gc,\
scipy, sklearn, sklearn.model_selection,\
sklearn.utils, sklearn.externals.joblib, inspect, bcolz, pickle
import numpy as np
import pandas as pd
from pandas import Series, DataFrame
def debug(msg):
if not cfg['debug']: return
log.info(msg)
_message_timers = {}
def start(msg, id=None, force=False):
if not force and not cfg['debug']: return
if id is None:
s = inspect.stack()
if len(s) > 0 and len(s[1]) > 2: id = s[1][3]
else: id = 'global'
_message_timers[id] = time.time()
log.info(msg)
def stop(msg, id=None, force=False):
if not force and not cfg['debug']: return
if id is None:
s = inspect.stack()
if len(s) > 0 and len(s[1]) > 2: id = s[1][3]
else: id = 'global'
took = datetime.timedelta(seconds=time.time() - _message_timers[id]) \
if id in _message_timers else 'unknown'
msg += (', took: %s' % str(took))
log.info(msg)
if id in _message_timers: del _message_timers[id]
return msg
def reseed(clf):
s = cfg['sys_seed']
if clf is not None:
clf.random_state = s
random.seed(s)
np.random.seed(s)
try:
import torch
torch.manual_seed(s)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(s)
except ImportError:pass
return clf
def seed(seed):
cfg['sys_seed'] = seed
reseed(None)
def do_cv(clf, X, y, n_samples=None, n_iter=3, test_size=None, quiet=False,
scoring=None, stratified=False, n_jobs=-1, fit_params=None, prefix='CV'):
if not quiet: start('starting ' + prefix)
reseed(clf)
if n_samples is None: n_samples = len(y)
if X.shape[0] > len(y): X = X[:len(y)]
elif type(n_samples) is float: n_samples = int(n_samples)
if scoring is None: scoring = cfg['scoring']
if test_size is None: test_size = 1./n_iter
try:
if (n_samples > X.shape[0]): n_samples = X.shape[0]
except: pass
if cfg['custom_cv'] is not None:
cv = cfg['custom_cv']
elif stratified:
cv = sklearn.model_selection.StratifiedShuffleSplit(n_iter, test_size=test_size, random_state=cfg['sys_seed'])
else:
cv = sklearn.model_selection.ShuffleSplit(n_iter, test_size=test_size, random_state=cfg['sys_seed'])
if n_jobs == -1 and cfg['cv_n_jobs'] > 0: n_jobs = cfg['cv_n_jobs']
test_scores = sklearn.model_selection.cross_val_score(
clf, X, y, cv=cv, scoring=scoring, n_jobs=n_jobs,
fit_params=fit_params)
score_desc = ("{0:.5f} (+/-{1:.5f})").format(np.mean(test_scores), scipy.stats.sem(test_scores))
if not quiet: stop('done %s: %s' % (prefix, score_desc))
return (np.mean(test_scores), scipy.stats.sem(test_scores))
def score_classifier_vals(prop, vals, clf, X, y, n_iter=3):
results = []
for v in vals:
clf = sklearn.base.clone(clf)
target_clf = clf.base_classifier if hasattr(clf, 'base_classifier') else clf
setattr(target_clf, prop, v)
score = do_cv(clf, X, y, n_iter=n_iter, prefix='CV - prop[%s] val[%s]' % (prop, str(v)))
results.append({'prop': prop, 'v':v, 'score': score})
sorted_results = sorted(results, key=lambda r: r['score'][0], reverse=True)
best = {'prop': prop, 'value': sorted_results[0]['v'], 'score': sorted_results[0]['score']}
dbg('\n\n\n\n', best)
return sorted_results
def score_operations_on_cols(clf, X, y, columns, operations, operator, n_iter=5):
best = X.cv(clf, y, n_iter=n_iter)
if not cfg['scoring_higher_better']: best *= -1
results = []
for c in columns:
if c not in X: continue
col_best = best
col_best_op = 'no-op'
for op in operations:
X2 = operator(X.copy(), c, op)
score = X2.cv(clf, y, n_iter=n_iter)
if not cfg['scoring_higher_better']: score *= -1
if score[0] < col_best[0]:
col_best = score
col_best_op = str(op)
r = {'column': c, 'best': col_best_op, 'score': col_best[0], 'improvement': best[0] - col_best[0]}
results.append(r)
dbg(r)
return results
def do_gs(clf, X, y, params, n_samples=1.0, n_iter=3,
n_jobs=-2, scoring=None, fit_params=None,
random_iterations=None):
start('starting grid search')
if type(n_samples) is float: n_samples = int(len(y) * n_samples)
reseed(clf)
cv = sklearn.model_selection.ShuffleSplit(n_samples, n_iter=n_iter, random_state=cfg['sys_seed'])
if random_iterations is None:
gs = sklearn.model_selection.GridSearchCV(clf, params, cv=cv,
n_jobs=n_jobs, verbose=2, scoring=scoring or cfg['scoring'], fit_params=fit_params)
else:
gs = sklearn.model_selection.RandomizedSearchCV(clf, params, random_iterations, cv=cv,
n_jobs=n_jobs, verbose=2, scoring=scoring or cfg['scoring'],
fit_params=fit_params, refit=False)
X2, y2 = sklearn.utils.shuffle(X, y, random_state=cfg['sys_seed'])
gs.fit(X2[:n_samples], y2[:n_samples])
stop('done grid search')
dbg(gs.best_params_, gs.best_score_)
return gs
def save_array(fname, arr): c=bcolz.carray(arr, rootdir=fname, mode='w'); c.flush()
def load_array(fname, opt_fallback=None):
if not os.path.isdir(fname):
arr = opt_fallback()
if hasattr(arr, 'values'): arr = arr.values
save_array(fname, arr)
return arr
return bcolz.open(fname)[:]
def dump(file, data, force=False):
if file.endswith('.pickle.gz'):
if os.path.isfile(file) and not force: raise Exception('file: ' + file + ' already exists. Set force=True to overwrite.')
with gzip.open(file, 'wb') as f: # c
pickle.dump(data, f, protocol=pickle.HIGHEST_PROTOCOL)
return
if not '/' in file:
if not os.path.isdir('data/pickles'): os.makedirs('data/pickles')
file = 'data/pickles/' + file
if not '.' in file: file += '.pickle'
if os.path.isfile(file) and not force: raise Exception('file: ' + file + ' already exists. Set force=True to overwrite.')
sklearn.externals.joblib.dump(data, file);
def load(file, opt_fallback=None, fail_if_missing=False):
start('loading file: ' + file)
if not '/' in file: file = 'data/pickles/' + file
if not '.' in file: file += '.pickle'
if not os.path.isfile(file):
if opt_fallback is None:
if fail_if_missing: raise Exception('could not find the file: %s' % file)
return None
data = opt_fallback()
dump(file, data)
return data
if file.endswith('.pickle.gz'):
with gzip.open(file,'rb') as f: return pickle.load(f)
if file.endswith('.npy'): return np.load(file)
else: return sklearn.externals.joblib.load(file);
def read_df(file, nrows=None, sheetname=None, header=0):
start('reading dataframe 2: ' + file)
if file.endswith('.pickle'):
df = load(file)
else:
sep = '\t' if '.tsv' in file else ','
if file.endswith('.xls') or file.endswith('.xlsx'):
skip_footer = 0
if nrows is not None:
xl = pd.ExcelFile(file)
total_rows = xl.book.sheet_by_index(0).nrows
if sheetname is not None: total_rows = xl.book.sheet_by_name(sheetname).nrows
skip_footer = total_rows - nrows
df = pd.read_excel(file, sheetname=sheetname, skip_footer=skip_footer, header=header);
elif file.endswith('.7z'):
import libarchive
with libarchive.reader(file) as reader:
df = pd.read_csv(reader, nrows=nrows, sep=sep);
elif file.endswith('.zip'):
import zipfile
zf = zipfile.ZipFile(file)
if len(zf.filelist) != 1: raise Exception('zip files with multiple files not supported')
with zf.open(zf.filelist[0].filename) as reader:
df = | pd.read_csv(reader, nrows=nrows, sep=sep) | pandas.read_csv |
import sys, os
sys.path.append("../ern/")
sys.path.append("../..dies/dies/")
sys.path.append(os.path.expanduser("~/workspace/prophesy_code/"))
import pandas as pd
import numpy as np
import glob, argparse, copy, tqdm
from ern.shift_features import ShiftFeatures
from ern.utils import to_short_name
import pathlib
from ern.utils_data import (
create_consistent_number_of_sampler_per_day,
get_data_with_intersected_timestamps,
)
import prophesy
from prophesy.utils.utils import get_blacklist
def get_df(file):
df = | pd.read_csv(file, sep=";") | pandas.read_csv |
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime, timedelta
import functools
import itertools
import numpy as np
import numpy.ma as ma
import numpy.ma.mrecords as mrecords
from numpy.random import randn
import pytest
from pandas.compat import (
PY3, PY36, OrderedDict, is_platform_little_endian, lmap, long, lrange,
lzip, range, zip)
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas.core.dtypes.common import is_integer_dtype
import pandas as pd
from pandas import (
Categorical, DataFrame, Index, MultiIndex, Series, Timedelta, Timestamp,
compat, date_range, isna)
from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
MIXED_FLOAT_DTYPES = ['float16', 'float32', 'float64']
MIXED_INT_DTYPES = ['uint8', 'uint16', 'uint32', 'uint64', 'int8', 'int16',
'int32', 'int64']
class TestDataFrameConstructors(TestData):
def test_constructor(self):
df = DataFrame()
assert len(df.index) == 0
df = DataFrame(data={})
assert len(df.index) == 0
def test_constructor_mixed(self):
index, data = tm.getMixedTypeDict()
# TODO(wesm), incomplete test?
indexed_frame = DataFrame(data, index=index) # noqa
unindexed_frame = DataFrame(data) # noqa
assert self.mixed_frame['foo'].dtype == np.object_
def test_constructor_cast_failure(self):
foo = DataFrame({'a': ['a', 'b', 'c']}, dtype=np.float64)
assert foo['a'].dtype == object
# GH 3010, constructing with odd arrays
df = DataFrame(np.ones((4, 2)))
# this is ok
df['foo'] = np.ones((4, 2)).tolist()
# this is not ok
pytest.raises(ValueError, df.__setitem__, tuple(['test']),
np.ones((4, 2)))
# this is ok
df['foo2'] = np.ones((4, 2)).tolist()
def test_constructor_dtype_copy(self):
orig_df = DataFrame({
'col1': [1.],
'col2': [2.],
'col3': [3.]})
new_df = pd.DataFrame(orig_df, dtype=float, copy=True)
new_df['col1'] = 200.
assert orig_df['col1'][0] == 1.
def test_constructor_dtype_nocast_view(self):
df = DataFrame([[1, 2]])
should_be_view = DataFrame(df, dtype=df[0].dtype)
should_be_view[0][0] = 99
assert df.values[0, 0] == 99
should_be_view = DataFrame(df.values, dtype=df[0].dtype)
should_be_view[0][0] = 97
assert df.values[0, 0] == 97
def test_constructor_dtype_list_data(self):
df = DataFrame([[1, '2'],
[None, 'a']], dtype=object)
assert df.loc[1, 0] is None
assert df.loc[0, 1] == '2'
def test_constructor_list_frames(self):
# see gh-3243
result = DataFrame([DataFrame([])])
assert result.shape == (1, 0)
result = DataFrame([DataFrame(dict(A=lrange(5)))])
assert isinstance(result.iloc[0, 0], DataFrame)
def test_constructor_mixed_dtypes(self):
def _make_mixed_dtypes_df(typ, ad=None):
if typ == 'int':
dtypes = MIXED_INT_DTYPES
arrays = [np.array(np.random.rand(10), dtype=d)
for d in dtypes]
elif typ == 'float':
dtypes = MIXED_FLOAT_DTYPES
arrays = [np.array(np.random.randint(
10, size=10), dtype=d) for d in dtypes]
zipper = lzip(dtypes, arrays)
for d, a in zipper:
assert(a.dtype == d)
if ad is None:
ad = dict()
ad.update({d: a for d, a in zipper})
return DataFrame(ad)
def _check_mixed_dtypes(df, dtypes=None):
if dtypes is None:
dtypes = MIXED_FLOAT_DTYPES + MIXED_INT_DTYPES
for d in dtypes:
if d in df:
assert(df.dtypes[d] == d)
# mixed floating and integer coexinst in the same frame
df = _make_mixed_dtypes_df('float')
_check_mixed_dtypes(df)
# add lots of types
df = _make_mixed_dtypes_df('float', dict(A=1, B='foo', C='bar'))
_check_mixed_dtypes(df)
# GH 622
df = _make_mixed_dtypes_df('int')
_check_mixed_dtypes(df)
def test_constructor_complex_dtypes(self):
# GH10952
a = np.random.rand(10).astype(np.complex64)
b = np.random.rand(10).astype(np.complex128)
df = DataFrame({'a': a, 'b': b})
assert a.dtype == df.a.dtype
assert b.dtype == df.b.dtype
def test_constructor_dtype_str_na_values(self, string_dtype):
# https://github.com/pandas-dev/pandas/issues/21083
df = DataFrame({'A': ['x', None]}, dtype=string_dtype)
result = df.isna()
expected = DataFrame({"A": [False, True]})
tm.assert_frame_equal(result, expected)
assert df.iloc[1, 0] is None
df = DataFrame({'A': ['x', np.nan]}, dtype=string_dtype)
assert np.isnan(df.iloc[1, 0])
def test_constructor_rec(self):
rec = self.frame.to_records(index=False)
if PY3:
# unicode error under PY2
rec.dtype.names = list(rec.dtype.names)[::-1]
index = self.frame.index
df = DataFrame(rec)
tm.assert_index_equal(df.columns, pd.Index(rec.dtype.names))
df2 = DataFrame(rec, index=index)
tm.assert_index_equal(df2.columns, pd.Index(rec.dtype.names))
tm.assert_index_equal(df2.index, index)
rng = np.arange(len(rec))[::-1]
df3 = DataFrame(rec, index=rng, columns=['C', 'B'])
expected = DataFrame(rec, index=rng).reindex(columns=['C', 'B'])
tm.assert_frame_equal(df3, expected)
def test_constructor_bool(self):
df = DataFrame({0: np.ones(10, dtype=bool),
1: np.zeros(10, dtype=bool)})
assert df.values.dtype == np.bool_
def test_constructor_overflow_int64(self):
# see gh-14881
values = np.array([2 ** 64 - i for i in range(1, 10)],
dtype=np.uint64)
result = DataFrame({'a': values})
assert result['a'].dtype == np.uint64
# see gh-2355
data_scores = [(6311132704823138710, 273), (2685045978526272070, 23),
(8921811264899370420, 45),
(long(17019687244989530680), 270),
(long(9930107427299601010), 273)]
dtype = [('uid', 'u8'), ('score', 'u8')]
data = np.zeros((len(data_scores),), dtype=dtype)
data[:] = data_scores
df_crawls = DataFrame(data)
assert df_crawls['uid'].dtype == np.uint64
@pytest.mark.parametrize("values", [np.array([2**64], dtype=object),
np.array([2**65]), [2**64 + 1],
np.array([-2**63 - 4], dtype=object),
np.array([-2**64 - 1]), [-2**65 - 2]])
def test_constructor_int_overflow(self, values):
# see gh-18584
value = values[0]
result = DataFrame(values)
assert result[0].dtype == object
assert result[0][0] == value
def test_constructor_ordereddict(self):
import random
nitems = 100
nums = lrange(nitems)
random.shuffle(nums)
expected = ['A%d' % i for i in nums]
df = DataFrame(OrderedDict(zip(expected, [[0]] * nitems)))
assert expected == list(df.columns)
def test_constructor_dict(self):
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2})
# col2 is padded with NaN
assert len(self.ts1) == 30
assert len(self.ts2) == 25
tm.assert_series_equal(self.ts1, frame['col1'], check_names=False)
exp = pd.Series(np.concatenate([[np.nan] * 5, self.ts2.values]),
index=self.ts1.index, name='col2')
tm.assert_series_equal(exp, frame['col2'])
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2},
columns=['col2', 'col3', 'col4'])
assert len(frame) == len(self.ts2)
assert 'col1' not in frame
assert isna(frame['col3']).all()
# Corner cases
assert len(DataFrame({})) == 0
# mix dict and array, wrong size - no spec for which error should raise
# first
with pytest.raises(ValueError):
DataFrame({'A': {'a': 'a', 'b': 'b'}, 'B': ['a', 'b', 'c']})
# Length-one dict micro-optimization
frame = DataFrame({'A': {'1': 1, '2': 2}})
tm.assert_index_equal(frame.index, pd.Index(['1', '2']))
# empty dict plus index
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx)
assert frame.index is idx
# empty with index and columns
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx, columns=idx)
assert frame.index is idx
assert frame.columns is idx
assert len(frame._series) == 3
# with dict of empty list and Series
frame = DataFrame({'A': [], 'B': []}, columns=['A', 'B'])
tm.assert_index_equal(frame.index, Index([], dtype=np.int64))
# GH 14381
# Dict with None value
frame_none = DataFrame(dict(a=None), index=[0])
frame_none_list = DataFrame(dict(a=[None]), index=[0])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none.get_value(0, 'a') is None
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none_list.get_value(0, 'a') is None
tm.assert_frame_equal(frame_none, frame_none_list)
# GH10856
# dict with scalar values should raise error, even if columns passed
msg = 'If using all scalar values, you must pass an index'
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7})
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7}, columns=['a'])
@pytest.mark.parametrize("scalar", [2, np.nan, None, 'D'])
def test_constructor_invalid_items_unused(self, scalar):
# No error if invalid (scalar) value is in fact not used:
result = DataFrame({'a': scalar}, columns=['b'])
expected = DataFrame(columns=['b'])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [2, np.nan, None, float('nan')])
def test_constructor_dict_nan_key(self, value):
# GH 18455
cols = [1, value, 3]
idx = ['a', value]
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = DataFrame(data).sort_values(1).sort_values('a', axis=1)
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values('a', axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [np.nan, None, float('nan')])
def test_constructor_dict_nan_tuple_key(self, value):
# GH 18455
cols = Index([(11, 21), (value, 22), (13, value)])
idx = Index([('a', value), (value, 2)])
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = (DataFrame(data)
.sort_values((11, 21))
.sort_values(('a', value), axis=1))
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values(('a', value), axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.skipif(not PY36, reason='Insertion order for Python>=3.6')
def test_constructor_dict_order_insertion(self):
# GH19018
# initialization ordering: by insertion order if python>= 3.6
d = {'b': self.ts2, 'a': self.ts1}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ba'))
tm.assert_frame_equal(frame, expected)
@pytest.mark.skipif(PY36, reason='order by value for Python<3.6')
def test_constructor_dict_order_by_values(self):
# GH19018
# initialization ordering: by value if python<3.6
d = {'b': self.ts2, 'a': self.ts1}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ab'))
tm.assert_frame_equal(frame, expected)
def test_constructor_multi_index(self):
# GH 4078
# construction error with mi and all-nan frame
tuples = [(2, 3), (3, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
tuples = [(3, 3), (2, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
def test_constructor_error_msgs(self):
msg = "Empty data passed with indices specified."
# passing an empty array with columns specified.
with pytest.raises(ValueError, match=msg):
DataFrame(np.empty(0), columns=list('abc'))
msg = "Mixing dicts with non-Series may lead to ambiguous ordering."
# mix dict and array, wrong size
with pytest.raises(ValueError, match=msg):
DataFrame({'A': {'a': 'a', 'b': 'b'},
'B': ['a', 'b', 'c']})
# wrong size ndarray, GH 3105
msg = r"Shape of passed values is \(3, 4\), indices imply \(3, 3\)"
with pytest.raises(ValueError, match=msg):
DataFrame(np.arange(12).reshape((4, 3)),
columns=['foo', 'bar', 'baz'],
index=pd.date_range('2000-01-01', periods=3))
# higher dim raise exception
with pytest.raises(ValueError, match='Must pass 2-d input'):
DataFrame(np.zeros((3, 3, 3)), columns=['A', 'B', 'C'], index=[1])
# wrong size axis labels
msg = ("Shape of passed values "
r"is \(3, 2\), indices "
r"imply \(3, 1\)")
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B', 'C'], index=[1])
msg = ("Shape of passed values "
r"is \(3, 2\), indices "
r"imply \(2, 2\)")
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B'], index=[1, 2])
msg = ("If using all scalar "
"values, you must pass "
"an index")
with pytest.raises(ValueError, match=msg):
DataFrame({'a': False, 'b': True})
def test_constructor_with_embedded_frames(self):
# embedded data frames
df1 = DataFrame({'a': [1, 2, 3], 'b': [3, 4, 5]})
df2 = DataFrame([df1, df1 + 10])
df2.dtypes
str(df2)
result = df2.loc[0, 0]
tm.assert_frame_equal(result, df1)
result = df2.loc[1, 0]
tm.assert_frame_equal(result, df1 + 10)
def test_constructor_subclass_dict(self):
# Test for passing dict subclass to constructor
data = {'col1': tm.TestSubDict((x, 10.0 * x) for x in range(10)),
'col2': tm.TestSubDict((x, 20.0 * x) for x in range(10))}
df = DataFrame(data)
refdf = DataFrame({col: dict(compat.iteritems(val))
for col, val in compat.iteritems(data)})
tm.assert_frame_equal(refdf, df)
data = tm.TestSubDict(compat.iteritems(data))
df = DataFrame(data)
tm.assert_frame_equal(refdf, df)
# try with defaultdict
from collections import defaultdict
data = {}
self.frame['B'][:10] = np.nan
for k, v in compat.iteritems(self.frame):
dct = defaultdict(dict)
dct.update(v.to_dict())
data[k] = dct
frame = DataFrame(data)
tm.assert_frame_equal(self.frame.sort_index(), frame)
def test_constructor_dict_block(self):
expected = np.array([[4., 3., 2., 1.]])
df = DataFrame({'d': [4.], 'c': [3.], 'b': [2.], 'a': [1.]},
columns=['d', 'c', 'b', 'a'])
tm.assert_numpy_array_equal(df.values, expected)
def test_constructor_dict_cast(self):
# cast float tests
test_data = {
'A': {'1': 1, '2': 2},
'B': {'1': '1', '2': '2', '3': '3'},
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 3
assert frame['B'].dtype == np.float64
assert frame['A'].dtype == np.float64
frame = DataFrame(test_data)
assert len(frame) == 3
assert frame['B'].dtype == np.object_
assert frame['A'].dtype == np.float64
# can't cast to float
test_data = {
'A': dict(zip(range(20), tm.makeStringIndex(20))),
'B': dict(zip(range(15), randn(15)))
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 20
assert frame['A'].dtype == np.object_
assert frame['B'].dtype == np.float64
def test_constructor_dict_dont_upcast(self):
d = {'Col1': {'Row1': 'A String', 'Row2': np.nan}}
df = DataFrame(d)
assert isinstance(df['Col1']['Row2'], float)
dm = DataFrame([[1, 2], ['a', 'b']], index=[1, 2], columns=[1, 2])
assert isinstance(dm[1][1], int)
def test_constructor_dict_of_tuples(self):
# GH #1491
data = {'a': (1, 2, 3), 'b': (4, 5, 6)}
result = DataFrame(data)
expected = DataFrame({k: list(v) for k, v in compat.iteritems(data)})
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_dict_multiindex(self):
def check(result, expected):
return tm.assert_frame_equal(result, expected, check_dtype=True,
check_index_type=True,
check_column_type=True,
check_names=True)
d = {('a', 'a'): {('i', 'i'): 0, ('i', 'j'): 1, ('j', 'i'): 2},
('b', 'a'): {('i', 'i'): 6, ('i', 'j'): 5, ('j', 'i'): 4},
('b', 'c'): {('i', 'i'): 7, ('i', 'j'): 8, ('j', 'i'): 9}}
_d = sorted(d.items())
df = DataFrame(d)
expected = DataFrame(
[x[1] for x in _d],
index=MultiIndex.from_tuples([x[0] for x in _d])).T
expected.index = MultiIndex.from_tuples(expected.index)
check(df, expected)
d['z'] = {'y': 123., ('i', 'i'): 111, ('i', 'j'): 111, ('j', 'i'): 111}
_d.insert(0, ('z', d['z']))
expected = DataFrame(
[x[1] for x in _d],
index=Index([x[0] for x in _d], tupleize_cols=False)).T
expected.index = Index(expected.index, tupleize_cols=False)
df = DataFrame(d)
df = df.reindex(columns=expected.columns, index=expected.index)
check(df, expected)
def test_constructor_dict_datetime64_index(self):
# GH 10160
dates_as_str = ['1984-02-19', '1988-11-06', '1989-12-03', '1990-03-15']
def create_data(constructor):
return {i: {constructor(s): 2 * i}
for i, s in enumerate(dates_as_str)}
data_datetime64 = create_data(np.datetime64)
data_datetime = create_data(lambda x: datetime.strptime(x, '%Y-%m-%d'))
data_Timestamp = create_data(Timestamp)
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timestamp(dt) for dt in dates_as_str])
result_datetime64 = DataFrame(data_datetime64)
result_datetime = DataFrame(data_datetime)
result_Timestamp = DataFrame(data_Timestamp)
tm.assert_frame_equal(result_datetime64, expected)
tm.assert_frame_equal(result_datetime, expected)
tm.assert_frame_equal(result_Timestamp, expected)
def test_constructor_dict_timedelta64_index(self):
# GH 10160
td_as_int = [1, 2, 3, 4]
def create_data(constructor):
return {i: {constructor(s): 2 * i}
for i, s in enumerate(td_as_int)}
data_timedelta64 = create_data(lambda x: np.timedelta64(x, 'D'))
data_timedelta = create_data(lambda x: timedelta(days=x))
data_Timedelta = create_data(lambda x: Timedelta(x, 'D'))
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timedelta(td, 'D') for td in td_as_int])
result_timedelta64 = DataFrame(data_timedelta64)
result_timedelta = DataFrame(data_timedelta)
result_Timedelta = DataFrame(data_Timedelta)
tm.assert_frame_equal(result_timedelta64, expected)
tm.assert_frame_equal(result_timedelta, expected)
tm.assert_frame_equal(result_Timedelta, expected)
def test_constructor_period(self):
# PeriodIndex
a = pd.PeriodIndex(['2012-01', 'NaT', '2012-04'], freq='M')
b = pd.PeriodIndex(['2012-02-01', '2012-03-01', 'NaT'], freq='D')
df = pd.DataFrame({'a': a, 'b': b})
assert df['a'].dtype == a.dtype
assert df['b'].dtype == b.dtype
# list of periods
df = pd.DataFrame({'a': a.astype(object).tolist(),
'b': b.astype(object).tolist()})
assert df['a'].dtype == a.dtype
assert df['b'].dtype == b.dtype
def test_nested_dict_frame_constructor(self):
rng = pd.period_range('1/1/2000', periods=5)
df = DataFrame(randn(10, 5), columns=rng)
data = {}
for col in df.columns:
for row in df.index:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
data.setdefault(col, {})[row] = df.get_value(row, col)
result = DataFrame(data, columns=rng)
tm.assert_frame_equal(result, df)
data = {}
for col in df.columns:
for row in df.index:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
data.setdefault(row, {})[col] = df.get_value(row, col)
result = DataFrame(data, index=rng).T
tm.assert_frame_equal(result, df)
def _check_basic_constructor(self, empty):
# mat: 2d matrix with shape (3, 2) to input. empty - makes sized
# objects
mat = empty((2, 3), dtype=float)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
# 1-D input
frame = DataFrame(empty((3,)), columns=['A'], index=[1, 2, 3])
assert len(frame.index) == 3
assert len(frame.columns) == 1
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
assert frame.values.dtype == np.int64
# wrong size axis labels
msg = r'Shape of passed values is \(3, 2\), indices imply \(3, 1\)'
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=['A', 'B', 'C'], index=[1])
msg = r'Shape of passed values is \(3, 2\), indices imply \(2, 2\)'
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=['A', 'B'], index=[1, 2])
# higher dim raise exception
with pytest.raises(ValueError, match='Must pass 2-d input'):
DataFrame(empty((3, 3, 3)), columns=['A', 'B', 'C'],
index=[1])
# automatic labeling
frame = DataFrame(mat)
tm.assert_index_equal(frame.index, pd.Index(lrange(2)))
tm.assert_index_equal(frame.columns, pd.Index(lrange(3)))
frame = DataFrame(mat, index=[1, 2])
tm.assert_index_equal(frame.columns, pd.Index(lrange(3)))
frame = DataFrame(mat, columns=['A', 'B', 'C'])
tm.assert_index_equal(frame.index, pd.Index(lrange(2)))
# 0-length axis
frame = DataFrame(empty((0, 3)))
assert len(frame.index) == 0
frame = DataFrame(empty((3, 0)))
assert len(frame.columns) == 0
def test_constructor_ndarray(self):
self._check_basic_constructor(np.ones)
frame = DataFrame(['foo', 'bar'], index=[0, 1], columns=['A'])
assert len(frame) == 2
def test_constructor_maskedarray(self):
self._check_basic_constructor(ma.masked_all)
# Check non-masked values
mat = ma.masked_all((2, 3), dtype=float)
mat[0, 0] = 1.0
mat[1, 2] = 2.0
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert 1.0 == frame['A'][1]
assert 2.0 == frame['C'][2]
# what is this even checking??
mat = ma.masked_all((2, 3), dtype=float)
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert np.all(~np.asarray(frame == frame))
def test_constructor_maskedarray_nonfloat(self):
# masked int promoted to float
mat = ma.masked_all((2, 3), dtype=int)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.float64)
assert frame.values.dtype == np.float64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert 1 == frame['A'][1]
assert 2 == frame['C'][2]
# masked np.datetime64 stays (use NaT as null)
mat = ma.masked_all((2, 3), dtype='M8[ns]')
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert isna(frame).values.all()
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
assert frame.values.dtype == np.int64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert 1 == frame['A'].view('i8')[1]
assert 2 == frame['C'].view('i8')[2]
# masked bool promoted to object
mat = ma.masked_all((2, 3), dtype=bool)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=object)
assert frame.values.dtype == object
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = True
mat2[1, 2] = False
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert frame['A'][1] is True
assert frame['C'][2] is False
def test_constructor_mrecarray(self):
# Ensure mrecarray produces frame identical to dict of masked arrays
# from GH3479
assert_fr_equal = functools.partial(tm.assert_frame_equal,
check_index_type=True,
check_column_type=True,
check_frame_type=True)
arrays = [
('float', np.array([1.5, 2.0])),
('int', np.array([1, 2])),
('str', np.array(['abc', 'def'])),
]
for name, arr in arrays[:]:
arrays.append(('masked1_' + name,
np.ma.masked_array(arr, mask=[False, True])))
arrays.append(('masked_all', np.ma.masked_all((2,))))
arrays.append(('masked_none',
np.ma.masked_array([1.0, 2.5], mask=False)))
# call assert_frame_equal for all selections of 3 arrays
for comb in itertools.combinations(arrays, 3):
names, data = zip(*comb)
mrecs = mrecords.fromarrays(data, names=names)
# fill the comb
comb = {k: (v.filled() if hasattr(v, 'filled') else v)
for k, v in comb}
expected = DataFrame(comb, columns=names)
result = DataFrame(mrecs)
assert_fr_equal(result, expected)
# specify columns
expected = DataFrame(comb, columns=names[::-1])
result = DataFrame(mrecs, columns=names[::-1])
assert_fr_equal(result, expected)
# specify index
expected = DataFrame(comb, columns=names, index=[1, 2])
result = DataFrame(mrecs, index=[1, 2])
assert_fr_equal(result, expected)
def test_constructor_corner_shape(self):
df = DataFrame(index=[])
assert df.values.shape == (0, 0)
@pytest.mark.parametrize("data, index, columns, dtype, expected", [
(None, lrange(10), ['a', 'b'], object, np.object_),
(None, None, ['a', 'b'], 'int64', np.dtype('int64')),
(None, lrange(10), ['a', 'b'], int, np.dtype('float64')),
({}, None, ['foo', 'bar'], None, np.object_),
({'b': 1}, lrange(10), list('abc'), int, np.dtype('float64'))
])
def test_constructor_dtype(self, data, index, columns, dtype, expected):
df = DataFrame(data, index, columns, dtype)
assert df.values.dtype == expected
def test_constructor_scalar_inference(self):
data = {'int': 1, 'bool': True,
'float': 3., 'complex': 4j, 'object': 'foo'}
df = DataFrame(data, index=np.arange(10))
assert df['int'].dtype == np.int64
assert df['bool'].dtype == np.bool_
assert df['float'].dtype == np.float64
assert df['complex'].dtype == np.complex128
assert df['object'].dtype == np.object_
def test_constructor_arrays_and_scalars(self):
df = DataFrame({'a': randn(10), 'b': True})
exp = DataFrame({'a': df['a'].values, 'b': [True] * 10})
tm.assert_frame_equal(df, exp)
with pytest.raises(ValueError, match='must pass an index'):
DataFrame({'a': False, 'b': True})
def test_constructor_DataFrame(self):
df = DataFrame(self.frame)
tm.assert_frame_equal(df, self.frame)
df_casted = DataFrame(self.frame, dtype=np.int64)
assert df_casted.values.dtype == np.int64
def test_constructor_more(self):
# used to be in test_matrix.py
arr = randn(10)
dm = DataFrame(arr, columns=['A'], index=np.arange(10))
assert dm.values.ndim == 2
arr = randn(0)
dm = DataFrame(arr)
assert dm.values.ndim == 2
assert dm.values.ndim == 2
# no data specified
dm = DataFrame(columns=['A', 'B'], index=np.arange(10))
assert dm.values.shape == (10, 2)
dm = DataFrame(columns=['A', 'B'])
assert dm.values.shape == (0, 2)
dm = DataFrame(index=np.arange(10))
assert dm.values.shape == (10, 0)
# can't cast
mat = np.array(['foo', 'bar'], dtype=object).reshape(2, 1)
with pytest.raises(ValueError, match='cast'):
DataFrame(mat, index=[0, 1], columns=[0], dtype=float)
dm = DataFrame(DataFrame(self.frame._series))
tm.assert_frame_equal(dm, self.frame)
# int cast
dm = DataFrame({'A': np.ones(10, dtype=int),
'B': np.ones(10, dtype=np.float64)},
index=np.arange(10))
assert len(dm.columns) == 2
assert dm.values.dtype == np.float64
def test_constructor_empty_list(self):
df = DataFrame([], index=[])
expected = DataFrame(index=[])
tm.assert_frame_equal(df, expected)
# GH 9939
df = DataFrame([], columns=['A', 'B'])
expected = DataFrame({}, columns=['A', 'B'])
tm.assert_frame_equal(df, expected)
# Empty generator: list(empty_gen()) == []
def empty_gen():
return
yield
df = DataFrame(empty_gen(), columns=['A', 'B'])
tm.assert_frame_equal(df, expected)
def test_constructor_list_of_lists(self):
# GH #484
df = DataFrame(data=[[1, 'a'], [2, 'b']], columns=["num", "str"])
assert is_integer_dtype(df['num'])
assert df['str'].dtype == np.object_
# GH 4851
# list of 0-dim ndarrays
expected = DataFrame({0: np.arange(10)})
data = [np.array(x) for x in range(10)]
result = DataFrame(data)
tm.assert_frame_equal(result, expected)
def test_constructor_sequence_like(self):
# GH 3783
# collections.Squence like
class DummyContainer(compat.Sequence):
def __init__(self, lst):
self._lst = lst
def __getitem__(self, n):
return self._lst.__getitem__(n)
def __len__(self, n):
return self._lst.__len__()
lst_containers = [DummyContainer([1, 'a']), DummyContainer([2, 'b'])]
columns = ["num", "str"]
result = DataFrame(lst_containers, columns=columns)
expected = DataFrame([[1, 'a'], [2, 'b']], columns=columns)
tm.assert_frame_equal(result, expected, check_dtype=False)
# GH 4297
# support Array
import array
result = DataFrame({'A': array.array('i', range(10))})
expected = DataFrame({'A': list(range(10))})
tm.assert_frame_equal(result, expected, check_dtype=False)
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([array.array('i', range(10)),
array.array('i', range(10))])
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_iterable(self):
# GH 21987
class Iter():
def __iter__(self):
for i in range(10):
yield [1, 2, 3]
expected = DataFrame([[1, 2, 3]] * 10)
result = DataFrame(Iter())
tm.assert_frame_equal(result, expected)
def test_constructor_iterator(self):
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([range(10), range(10)])
tm.assert_frame_equal(result, expected)
def test_constructor_generator(self):
# related #2305
gen1 = (i for i in range(10))
gen2 = (i for i in range(10))
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([gen1, gen2])
tm.assert_frame_equal(result, expected)
gen = ([i, 'a'] for i in range(10))
result = DataFrame(gen)
expected = DataFrame({0: range(10), 1: 'a'})
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_list_of_dicts(self):
data = [OrderedDict([['a', 1.5], ['b', 3], ['c', 4], ['d', 6]]),
OrderedDict([['a', 1.5], ['b', 3], ['d', 6]]),
OrderedDict([['a', 1.5], ['d', 6]]),
OrderedDict(),
OrderedDict([['a', 1.5], ['b', 3], ['c', 4]]),
OrderedDict([['b', 3], ['c', 4], ['d', 6]])]
result = DataFrame(data)
expected = DataFrame.from_dict(dict(zip(range(len(data)), data)),
orient='index')
tm.assert_frame_equal(result, expected.reindex(result.index))
result = DataFrame([{}])
expected = DataFrame(index=[0])
tm.assert_frame_equal(result, expected)
def test_constructor_ordered_dict_preserve_order(self):
# see gh-13304
expected = DataFrame([[2, 1]], columns=['b', 'a'])
data = OrderedDict()
data['b'] = [2]
data['a'] = [1]
result = DataFrame(data)
tm.assert_frame_equal(result, expected)
data = OrderedDict()
data['b'] = 2
data['a'] = 1
result = DataFrame([data])
tm.assert_frame_equal(result, expected)
def test_constructor_ordered_dict_conflicting_orders(self):
# the first dict element sets the ordering for the DataFrame,
# even if there are conflicting orders from subsequent ones
row_one = OrderedDict()
row_one['b'] = 2
row_one['a'] = 1
row_two = OrderedDict()
row_two['a'] = 1
row_two['b'] = 2
row_three = {'b': 2, 'a': 1}
expected = DataFrame([[2, 1], [2, 1]], columns=['b', 'a'])
result = DataFrame([row_one, row_two])
tm.assert_frame_equal(result, expected)
expected = DataFrame([[2, 1], [2, 1], [2, 1]], columns=['b', 'a'])
result = DataFrame([row_one, row_two, row_three])
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_series(self):
data = [OrderedDict([['a', 1.5], ['b', 3.0], ['c', 4.0]]),
OrderedDict([['a', 1.5], ['b', 3.0], ['c', 6.0]])]
sdict = OrderedDict(zip(['x', 'y'], data))
idx = Index(['a', 'b', 'c'])
# all named
data2 = [Series([1.5, 3, 4], idx, dtype='O', name='x'),
Series([1.5, 3, 6], idx, name='y')]
result = DataFrame(data2)
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result, expected)
# some unnamed
data2 = [Series([1.5, 3, 4], idx, dtype='O', name='x'),
Series([1.5, 3, 6], idx)]
result = DataFrame(data2)
sdict = OrderedDict(zip(['x', 'Unnamed 0'], data))
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result.sort_index(), expected)
# none named
data = [OrderedDict([['a', 1.5], ['b', 3], ['c', 4], ['d', 6]]),
OrderedDict([['a', 1.5], ['b', 3], ['d', 6]]),
OrderedDict([['a', 1.5], ['d', 6]]),
OrderedDict(),
OrderedDict([['a', 1.5], ['b', 3], ['c', 4]]),
OrderedDict([['b', 3], ['c', 4], ['d', 6]])]
data = [Series(d) for d in data]
result = DataFrame(data)
sdict = OrderedDict(zip(range(len(data)), data))
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result, expected.reindex(result.index))
result2 = DataFrame(data, index=np.arange(6))
tm.assert_frame_equal(result, result2)
result = DataFrame([Series({})])
expected = DataFrame(index=[0])
tm.assert_frame_equal(result, expected)
data = [ | OrderedDict([['a', 1.5], ['b', 3.0], ['c', 4.0]]) | pandas.compat.OrderedDict |
# -*- coding: utf-8 -*-
# @Author: <NAME>
# @Date: 2016-11-16 16:23:55
# @Last Modified by: <NAME>
# @Last Modified time: 2017-01-17 15:11:17
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import glob
import os
pd.set_option("display.width", None)
# v4
# including branching timings in more detail
# ... but now measured cleanly
def df_from_sloppy_json_list_file(fn):
with open(fn, 'r') as fh:
json_array_inside_text = fh.read()
json_array_text = "[" + json_array_inside_text[:-2] + "]" # :-1 removes ,\n
df = pd.read_json(json_array_text)
return df
def merge_dataframes(*dataframes):
return reduce(pd.merge, dataframes)
def concat_dataframes(*dataframes):
return reduce(pd.concat, dataframes)
def df_from_json_incl_meta(fn, fn_meta=None,
drop_meta=['N_gaussians', 'N_observables',
'N_parameters', 'parallel_interleave',
'seed'],
drop_nan=False):
if fn_meta is None:
fn_meta = os.path.join(os.path.dirname(fn), 'timing_meta.json')
main_df = df_from_sloppy_json_list_file(fn)
meta_df = df_from_sloppy_json_list_file(fn_meta).drop(drop_meta, axis=1)
# not just single process runs, also master processes in multi-process runs:
single_process = pd.merge(main_df, meta_df, how='left', on='pid')
if 'ppid' in main_df.columns:
single_process = single_process.drop('ppid', axis=1)
multi_process = pd.merge(main_df, meta_df, how='left',
left_on='ppid', right_on='pid').drop('pid_y', axis=1)
multi_process.rename(columns={'pid_x': 'pid'}, inplace=True)
result = [single_process, multi_process]
else:
result = [single_process]
if drop_nan:
result = [df.dropna() for df in result]
return result
"""
cd ~/projects/apcocsm/code/scaling
rsync -av --progress nikhef:"/user/pbos/project_atlas/apcocsm_code/scaling/*.allier.nikhef.nl" ./
"""
savefig_dn = '/home/patrick/projects/apcocsm/code/scaling/unbinned_scaling_4/analysis/'
#### LOAD DATA FROM FILES
# dnlist = sorted(glob.glob("unbinned_scaling_4_orig/17510*.allier.nikhef.nl")) # run_unbinned_scaling_3.sh
# dnlist = sorted(glob.glob("unbinned_scaling_4/*.allier.nikhef.nl")) # run_unbinned_scaling_4.sh
dnlist = sorted(glob.glob("unbinned_scaling_4/17513*.allier.nikhef.nl")) # run_unbinned_scaling_4.sh after later additional runs
dnlist = [dn for dn in dnlist if len(glob.glob(dn + '/*.json')) > 1]
fnlist = reduce(lambda x, y: x + y, [glob.glob(dn + '/*.json') for dn in dnlist])
fnlist = [fn for fn in fnlist if 'timing_meta.json' not in fn]
uniquefns = np.unique([fn.split('/')[-1] for fn in fnlist]).tolist()
dfkeys = [u[7:-5] for u in uniquefns]
dfs_split = {fn: df_from_json_incl_meta(fn) for fn in fnlist}
dfs_split_sp = {fn: dflist[0] for fn, dflist in dfs_split.iteritems()}
dfs_split_mp = {fn: dflist[1] for fn, dflist in dfs_split.iteritems() if len(dflist) > 1}
dfs_sp = {k: pd.concat([df for fn, df in dfs_split_sp.iteritems() if k in fn]) for k in dfkeys}
dfs_mp = {k: pd.concat([df for fn, df in dfs_split_mp.iteritems() if k in fn]) for k in dfkeys if k in "".join(dfs_split_mp.keys())}
#### AGGREGATE AND ANNOTATE ROWS AND RENAME COLUMNS FOR EASY ANALYSIS
#### refactor and combine MPFE_evaluate_client timings
mpfe_eval_wall = dfs_sp['wall_RRMPFE_evaluate_client']
mpfe_eval_cpu = dfs_sp['cpu_RRMPFE_evaluate_client']
# add after_retrieve columns
mpfe_eval_wall['RRMPFE_evaluate_client_after_retrieve_wall_s'] = mpfe_eval_wall['RRMPFE_evaluate_client_wall_s'] - mpfe_eval_wall['RRMPFE_evaluate_client_before_retrieve_wall_s'] - mpfe_eval_wall['RRMPFE_evaluate_client_retrieve_wall_s']
mpfe_eval_cpu['RRMPFE_evaluate_client_after_retrieve_cpu_s'] = mpfe_eval_cpu['RRMPFE_evaluate_client_cpu_s'] - mpfe_eval_cpu['RRMPFE_evaluate_client_before_retrieve_cpu_s'] - mpfe_eval_cpu['RRMPFE_evaluate_client_retrieve_cpu_s']
# refactor for nice factorplotting; rename columns and add timing type column
# ... cpu/wall column
mpfe_eval_wall['cpu/wall'] = 'wall'
mpfe_eval_cpu['cpu/wall'] = 'cpu'
# ... give each timing column its own row
mpfe_eval = pd.DataFrame(columns=['pid', 'N_events', 'num_cpu', 'time s', 'cpu/wall', 'segment'])
cols_base = ['pid', 'N_events', 'num_cpu', 'cpu/wall']
cols_wall = [('all', 'RRMPFE_evaluate_client_wall_s'),
('before_retrieve', 'RRMPFE_evaluate_client_before_retrieve_wall_s'),
('retrieve', 'RRMPFE_evaluate_client_retrieve_wall_s'),
('after_retrieve', 'RRMPFE_evaluate_client_after_retrieve_wall_s')]
cols_cpu = [('all', 'RRMPFE_evaluate_client_cpu_s'),
('before_retrieve', 'RRMPFE_evaluate_client_before_retrieve_cpu_s'),
('retrieve', 'RRMPFE_evaluate_client_retrieve_cpu_s'),
('after_retrieve', 'RRMPFE_evaluate_client_after_retrieve_cpu_s')]
for segment_id, col in cols_wall:
segment_timings = mpfe_eval_wall[cols_base + [col]].copy()
segment_timings['segment'] = segment_id
segment_timings.rename(columns={col: 'time s'}, inplace=True)
mpfe_eval = mpfe_eval.append(segment_timings, ignore_index=True)
for segment_id, col in cols_cpu:
segment_timings = mpfe_eval_cpu[cols_base + [col]].copy()
segment_timings['segment'] = segment_id
segment_timings.rename(columns={col: 'time s'}, inplace=True)
mpfe_eval = mpfe_eval.append(segment_timings, ignore_index=True)
# correct types
mpfe_eval.N_events = mpfe_eval.N_events.astype(np.int)
mpfe_eval.num_cpu = mpfe_eval.num_cpu.astype(np.int)
mpfe_eval.pid = mpfe_eval.pid.astype(np.int)
#### add MPFE evaluate full timings
mpfe_eval_full = dfs_sp['RRMPFE_evaluate_full']
mpfe_eval_full.rename(columns={'RRMPFE_evaluate_wall_s': 'time s'}, inplace=True)
mpfe_eval_full['cpu/wall'] = 'wall+INLINE'
mpfe_eval_full['segment'] = 'all'
mpfe_eval = mpfe_eval.append(mpfe_eval_full)
#### total time per run (== per pid, but the other columns are also grouped-by to prevent from summing over them)
mpfe_eval_total = mpfe_eval.groupby(['pid', 'N_events', 'num_cpu', 'cpu/wall', 'segment'], as_index=False).sum()
#### MPFE calculate
mpfe_calc = dfs_sp['RRMPFE_calculate_client']
mpfe_calc.rename(columns={'RRMPFE_calculate_client_wall_s': 'walltime s'}, inplace=True)
mpfe_calc_total = mpfe_calc.groupby(['pid', 'N_events', 'num_cpu'], as_index=False).sum()
#### full minimize
df_totals = dfs_sp['full_minimize']
### ADD IDEAL TIMING BASED ON SINGLE CORE RUNS
single_core = df_totals[df_totals.num_cpu == 1]
# estimate ideal curve from fastest single_core run:
single_core_fastest = single_core.groupby('N_events', as_index=False).min()
df_ideal = single_core_fastest.copy()
for num_cpu in df_totals.num_cpu.unique():
if num_cpu != 1:
ideal_num_cpu_i = single_core_fastest.copy()
ideal_num_cpu_i.full_minimize_wall_s /= num_cpu
ideal_num_cpu_i.num_cpu = num_cpu
df_ideal = df_ideal.append(ideal_num_cpu_i)
df_totals['timing_type'] = pd.Series(len(df_totals) * ('real',), index=df_totals.index)
df_ideal['timing_type'] = pd.Series(len(df_ideal) * ('ideal',), index=df_ideal.index)
df_totals = df_totals.append(df_ideal)
# add combination of two categories
df_totals['N_events/timing_type'] = df_totals.N_events.astype(str) + '/' + df_totals.timing_type.astype(str)
#### RATS evaluate full
rats_eval_sp = dfs_sp['RATS_evaluate_full'].dropna()
rats_eval_mp = dfs_mp['RATS_evaluate_full'].dropna()
rats_eval_sp_total = rats_eval_sp.groupby(['pid', 'N_events', 'num_cpu', 'mode'], as_index=False).sum()
rats_eval_mp_total = rats_eval_mp.groupby(['ppid', 'N_events', 'num_cpu', 'mode'], as_index=False).sum()\
.drop('pid', axis=1)
rats_eval_mp_by_ppid = rats_eval_mp.groupby(['pid', 'ppid', 'N_events', 'num_cpu', 'mode'], as_index=False)\
.sum()\
.groupby('ppid')
rats_eval_mp_maxppid = rats_eval_mp_by_ppid.max()\
.rename(columns={'RATS_evaluate_wall_s': 'ppid-max wall s'})
rats_eval_mp_minppid = rats_eval_mp_by_ppid.min()\
.rename(columns={'RATS_evaluate_wall_s': 'ppid-min wall s'})
#### RATS evaluate per CPU iteration
rats_eval_itcpu_sp = dfs_sp['RATS_evaluate_mpmaster_perCPU']
rats_eval_itcpu_mp = dfs_mp['RATS_evaluate_mpmaster_perCPU']
rats_eval_itcpu_sp['sp/mp'] = 'sp'
rats_eval_itcpu_mp['sp/mp'] = 'mp'
## dataframe met versch. itX timings per rij en een klasse kolom die het itX X nummer bevat
rats_eval_itcpu = pd.DataFrame(columns=['pid', 'N_events', 'num_cpu', 'walltime s', 'it_nr', 'sp/mp'])
itX_cols = [(ix, 'RATS_evaluate_mpmaster_it%i_wall_s' % ix)
for ix in range(max(rats_eval_itcpu_sp.num_cpu))]
cols_base = ['pid', 'N_events', 'num_cpu', 'sp/mp']
for X, col in itX_cols:
itX_timings = rats_eval_itcpu_sp[cols_base + [col]].copy().dropna()
itX_timings['it_nr'] = X
itX_timings.rename(columns={col: 'walltime s'}, inplace=True)
rats_eval_itcpu = rats_eval_itcpu.append(itX_timings, ignore_index=True)
itX_timings = rats_eval_itcpu_mp[cols_base + [col]].copy().dropna()
itX_timings['it_nr'] = X
itX_timings.rename(columns={col: 'walltime s'}, inplace=True)
rats_eval_itcpu = rats_eval_itcpu.append(itX_timings, ignore_index=True)
# correct types
rats_eval_itcpu.pid = rats_eval_itcpu.pid.astype(np.int)
rats_eval_itcpu.N_events = rats_eval_itcpu.N_events.astype(np.int)
rats_eval_itcpu.num_cpu = rats_eval_itcpu.num_cpu.astype(np.int)
rats_eval_itcpu.it_nr = rats_eval_itcpu.it_nr.astype(np.int)
rats_eval_itcpu_total = rats_eval_itcpu.groupby(['pid', 'N_events', 'num_cpu', 'it_nr', 'sp/mp'], as_index=False).sum()
#### ADD mpfe_eval COLUMN OF CPU_ID, ***PROBABLY***, WHICH SEEMS TO EXPLAIN DIFFERENT TIMINGS QUITE WELL
mpfe_eval_cpu_split = | pd.DataFrame(columns=mpfe_eval.columns) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 23 11:21:16 2018
@author: Chathuranga_08290
"""
# Importing the libraries
import tensorflow as tf # module for deep learning
import numpy as np # module for numerical calculations + linear algebra
import pandas as pd # module for data processing
import matplotlib.pyplot as plt # module for plotting
import datetime as dt # module for manipulating dates and times
from sklearn.model_selection import train_test_split
from sklearn.externals import joblib
from scipy import stats
import sklearn.neighbors
# Data loading
data_path ='D:\Datasets\drivendata\\'
metadata = pd.read_csv(data_path+'metadata.csv')
metadata_df = pd.DataFrame(metadata)
#metadata_df.head()
training_data = pd.read_csv(data_path+'train.csv')
train_df = pd.DataFrame(training_data)
#train_df.head()
submission_frequency = pd.read_csv(data_path+'submission_frequency.csv')
submission_frequency_df = pd.DataFrame(submission_frequency)
submission_frequency_df['ForecastPeriodMin'] = submission_frequency_df['ForecastPeriodNS']/(1000000000*60)
del submission_frequency_df['ForecastPeriodNS']
#submission_frequency_df.head()
holiday_data = pd.read_csv(data_path+'holidays.csv')
holiday_df = pd.DataFrame(holiday_data)
holiday_df['Date'] = pd.to_datetime(holiday_df['Date'], format='%Y-%m-%d')
#print(type(holiday_df['Date'][0]))
#holiday_df.head()
result_df = pd.merge(train_df, metadata_df, on='SiteId')
result_df = | pd.merge(result_df, submission_frequency_df, on='ForecastId') | pandas.merge |
# -*- coding: utf-8 -*-
"""Demo39_Pandas.ipynb
# MUNG - FU PANDA
Welcome to the Pandas tutorial. Pandas is an excellent tool for data wrangling also known as data munging.
It refers to the cleaning and preperation of data from Raw format to a usable and suitable format for our use.
- Python Basics
- Object Oriented Python
- Python for Data Science
- NumPy
- **Pandas**
- Plotting
- Matplotlib
- Seaborn
Let's get coding !
### SERIES AND DATAFRAMES
Series and dataframes are the main data types Pandas introduces.
"""
import numpy as np
import pandas as pd
Student_ID = list(range(10,20))
Grades = ['A','B','A','A','F','C','F','F','D','A']
arr_Grades = np.array(Grades)
print(pd.Series(data = Grades))
print(pd.Series(data = Grades, index = Student_ID))
print(pd.Series(arr_Grades))
d = {'Pakistan':11, 'Germany':4, 'Brazil':5, 'Argentina':6}
S = pd.Series(d)
print(S['Pakistan'])
print(S + S)
print(S-S)
print(S**S)
arr = np.random.randint(1,10,5)
df = pd.DataFrame(arr, ['A','B','C','D','E'])
print(df)
df = pd.DataFrame(np.random.randint(1,10,(5,3)), ['A','B','C','D','E'],['Q','W','E'])
print(df)
print(df['W'])
print(df.W)
print(type(df.W))
print(type(df['W']))
print(df[['Q','W']])
df['New Column'] = 0
print(df)
df.drop('New Column', axis = 1, inplace = True)
print(df)
print(df.loc['C'])
print(type(df.loc['C']))
print(df.iloc[2])
print(type(df.iloc[2]))
print(df.iloc[1:4,1:3])
print(df.loc[['A','D'],['Q','E']])
print(df>5)
print(df[df>5])
print(df[df['Q']>5])
print(df[(df['Q']>5) & (df['E']>5)])
print(df[(df['Q']>5) | (df['E']>5)])
df = df.reset_index()
print(df)
print(df['index'])
print(df.set_index('index'))
"""### DATA MANIPULATIONS"""
col1 = [1,2,np.nan,np.nan,5]
col2 = [5,6,7,np.nan,8]
col3 = [12,np.nan,13,14,15]
d = {'A':col1, 'B':col2, 'C':col3}
df = pd.DataFrame(d)
print(df)
print(df.dropna())
print(df.dropna(thresh=2))
print(df.fillna(value="VALUE"))
print(df['A'].fillna(value=df['A'].mean()))
"""### MERGE
Merging refers to a "Glue-ing" technique that does not care about the index.
"""
left = pd.DataFrame({'Key': ['K0', 'K1', 'K2', 'K3'],
'A': ['A0', 'A1', 'A2', 'A3'],
'B': ['B0', 'B1', 'B2', 'B3']})
right = pd.DataFrame({'Key': ['K0', 'K1', 'K2', 'K4'],
'C': ['C0', 'C1', 'C2', 'C3'],
'D': ['D0', 'D1', 'D2', 'D3']})
print(left)
print(right)
#Inner join only looks at the intersection
print(pd.merge(left, right, on="Key", how="inner"))
#Left join gives us the left df + the intersection
print(pd.merge(left, right, on="Key", how="left"))
#Right join gives us the right df + the intersection
print(pd.merge(left, right, on="Key", how="right"))
#Outer join gives us the right df + the intersection + the left df
print(pd.merge(left, right, on="Key", how="outer"))
"""### JOIN
Joining refers to a "Glue-ing" technique that does care about the index
"""
left = pd.DataFrame({'A': ['A0', 'A1', 'A2', 'A3'],
'B': ['B0', 'B1', 'B2', 'B3']},
index = ['K0', 'K1', 'K2', 'K3'])
right = pd.DataFrame({'C': ['C0', 'C1', 'C2', 'C3'],
'D': ['D0', 'D1', 'D2', 'D3']},
index = ['K0', 'K1', 'K2', 'K4'],)
print(left)
print(right)
print(left.join(right))
print(left.join(right, how="inner"))
print(left.join(right, how="outer"))
print(right.join(left))
"""### CONCATENATE"""
df1 = pd.DataFrame({'C0': ['00', '10','20', '30'],
'C1': ['01', '11', '21', '31'],
'C2': ['02', '12', '22', '32'],
'C3': ['03', '13', '23', '33']},
index=[0, 1, 2, 3])
print(df1)
df2 = pd.DataFrame({'C0': ['40', '50','60', '70'],
'C1': ['41', '51', '61', '71'],
'C2': ['42', '52', '62', '72'],
'C3': ['43', '53', '63', '73']},
index=[4, 5, 6, 7])
print(df2)
df3 = pd.DataFrame({'C0': ['80', '90','10,0', '11,0'],
'C1': ['81', '91', '10,1', '11,1'],
'C2': ['82', '92', '10,2', '11,2'],
'C3': ['83', '93', '10,3', '11,3']},
index=[8, 9, 10, 11])
print(df3)
print(pd.concat([df1,df2,df3]))
print(pd.concat([df1,df2,df3], axis=1))
"""### GROUPBY"""
import pandas as pd
school = ['SEECS','SEECS','SMME','SEECS','SCME','SMME','SADA']
student = ['Mahnoor','Usman','Mustafa','Abdullah','Mahum','Armughan','Ayesha']
cgpa = [3.12,4,3.17,4,3.14,3.04,3.04]
data = {'School':school, 'Student':student,'CGPA':cgpa}
df = pd.DataFrame(data)
print(df)
bySchool = df.groupby('School')
bySchool
print(bySchool.mean())
print(bySchool.std())
print(bySchool.sum())
print(df.groupby('School').std().loc['SMME'])
print(df.groupby('School').describe())
"""### OPERATIONS IN PANDAS"""
school = ['SEECS','SEECS','SMME','SMME','SCME','SMME','SADA']
student = ['Mahnoor','Usman','Mustafa','Abdullah','Mahum','Armughan','Ayesha']
cgpa = [3.12,4,4,3.13,3.14,3.04,3.04]
age = [21,18,22,21,20,21,24]
df= pd.DataFrame({'Student':student, 'School':school, "CGPA":cgpa, 'Age':age})
print(df)
print(df[(df['School']=='SEECS') & (df['CGPA']==4)])
(df['School'].value_counts())
print(df.sort_values(by='CGPA'))
print(df['School'].unique())
print(df['School'].nunique())
print(df.drop('School', axis=1))
print(df.columns)
print(df.index)
print(df.isnull())
"""### APPLYING CUSTOM FUNCTIONS"""
def square(x): return x*x
# lambda x : x*x
print(df.CGPA.apply(square))
print(df.CGPA.apply(lambda x:x*x))
"""### DATA I/O
You need to install sqlalchemy and lxml for this section of the tutorial
"""
import pandas as pd
df = pd.read_csv('../Datasets/CustomerList.csv')
print(df[1:10])
df.to_csv('Output',index=False)
df = | pd.read_excel('../Datasets/Churn-Modelling.xlsx') | pandas.read_excel |
import pandas as pd
from sqlalchemy import create_engine
from library import cf
import talib.abstract as ta
import pymysql.cursors
import numpy as np
from library.logging_pack import *
logger.debug("subindex시작!!!!")
pymysql.install_as_MySQLdb()
daily_craw_engine=create_engine(
"mysql+mysqldb://" + cf.db_id + ":" + cf.db_passwd + "@" + cf.db_ip + ":" + cf.db_port + "/daily_craw",
encoding='utf-8')
daily_buy_list_engine = create_engine(
"mysql+mysqldb://" + cf.db_id + ":" + cf.db_passwd + "@" + cf.db_ip + ":" + cf.db_port + "/daily_buy_list" ,
encoding='utf-8')
simul_engine=create_engine(
"mysql+mysqldb://" + cf.db_id + ":" + cf.db_passwd + "@" + cf.db_ip + ":" + cf.db_port + "/simulator11",
encoding='utf-8')
min_craw_engine = create_engine("mysql+mysqldb://" + cf.db_id + ":" + cf.db_passwd + "@" + cf.db_ip + ":" + cf.db_port + "/min_craw",
encoding='utf-8')
stand_date = '20070903'
#데이터 변환
class subindex:
def __init__(self):
logger.debug("subindex 함수로 들어왔다!!")
def collecting(self):
co_sql = f"select TABLE_NAME FROM information_schema.tables WHERE table_schema = 'daily_craw'"
target_code = daily_craw_engine.execute(co_sql).fetchall()
num = len(target_code)
for i in range(num):
self.db_name = target_code[i][0]
self.db_name = self.db_name.replace("%", "%%")
self.collect_db()
print(self.db_name , "을 가져온다!")
def collect_db(self):
# 데이터 불러오기
sql = "select date,code,vol10,code_name,open,close,low,high,volume from daily_craw.`%s` where Date >= %s order by Date "
rows = daily_craw_engine.execute(sql%(self.db_name,stand_date)).fetchall()
three_s = pd.DataFrame(rows, columns=['date', 'code','vol10' ,'code_name','open' ,'close', 'low', 'high', 'volume'])
three_s = three_s.fillna(0)
# 데이터 변환
th_date = list(np.asarray(three_s['date'].tolist()))
th_date_np = np.array(th_date, dtype='f8')
th_close = list(np.asarray(three_s['close'].tolist()))
th_close_np = np.array(th_close, dtype='f8')
th_high = list(np.asarray(three_s['high'].tolist()))
th_high_np = np.array(th_high, dtype='f8')
th_low = list(np.asarray(three_s['low'].tolist()))
th_low_np = np.array(th_low, dtype='f8')
th_volume = list(np.asarray(three_s['volume'].tolist()))
th_volume_np = np.array(th_volume, dtype='f8')
th_open = list(np.asarray(three_s['open'].tolist()))
th_open_np = np.array(th_open, dtype='f8')
th_vol10 = list(np.asarray(three_s['vol10'].tolist()))
th_vol10_np = np.array(th_vol10, dtype='f8')
#주가고가저가 변동폭
th_diff =((three_s['high']-three_s['low'])/three_s['high'])*100
# 30일간 주가최저최고 변동폭 클때
th_diff30 = th_diff.rolling(30).max()
# 보조지표 계산
th_cci = ta._ta_lib.CCI(th_high_np, th_low_np, th_close_np, 9)
th_cci60 = ta._ta_lib.CCI(th_high_np, th_low_np, th_close_np, 60)
##rsi
th_rsi = ta._ta_lib.RSI(th_close_np, 14)
th_rsi5 = ta._ta_lib.RSI(th_close_np, 5)
th_OBV = ta._ta_lib.OBV(th_close_np, th_volume_np)
th_macd, th_macd_signal, th_macd_hist = ta._ta_lib.MACD(th_close_np, fastperiod=12, slowperiod=26,
signalperiod=9)
th_stoch_slowk, th_stoch_slowd = ta._ta_lib.STOCH(th_high_np, th_low_np, th_close_np,
fastk_period=10, slowk_period=2, slowk_matype=0,
slowd_period=2, slowd_matype=0)
##책에따라 12일선 기준으로 바꿈
th_BBAND_U, th_BBAND_M, th_BBAND_L = ta._ta_lib.BBANDS(th_close_np, timeperiod=20, nbdevup=2, nbdevdn=2,
matype=0)
th_BBAND_U14, th_BBAND_M14, th_BBAND_L14 = ta._ta_lib.BBANDS(th_close_np, timeperiod=14, nbdevup=2, nbdevdn=2,
matype=0)
th_BBAND_WIDE = (th_BBAND_U-th_BBAND_L)/th_BBAND_M
th_BBAND_WIDE14 = (th_BBAND_U14 - th_BBAND_L14) / th_BBAND_M14
th_pb=(th_close_np-th_BBAND_L) / (th_BBAND_U-th_BBAND_L)
th_pb14 = (th_close_np - th_BBAND_L14) / (th_BBAND_U14 - th_BBAND_L14)
th_sar = ta._ta_lib.SAR(th_high_np, th_low_np,0.04,0.4)
th_ibs = (th_close_np -th_low_np)/(th_high_np-th_low_np)
th_dema5 = ta._ta_lib.DEMA(th_close_np, 5)
th_dema20 = ta._ta_lib.DEMA(th_close_np,20)
th_dema60 = ta._ta_lib.DEMA(th_close_np, 60)
th_tema5 = ta._ta_lib.TEMA(th_close_np,5)
th_tema20 = ta._ta_lib.TEMA(th_close_np, 20)
th_tema60 = ta._ta_lib.TEMA(th_close_np, 60)
#ema = 지수이동평균
th_ema5 = ta._ta_lib.EMA(th_close_np, 5)
th_ema20 = ta._ta_lib.EMA(th_close_np, 20)
th_ema60 = ta._ta_lib.EMA(th_close_np, 60)
th_ema112 = ta._ta_lib.EMA(th_close_np, 112)
th_ema224 = ta._ta_lib.EMA(th_close_np, 224)
th_ema448 = ta._ta_lib.EMA(th_close_np, 448)
th_ema448diff = ((th_close_np-th_ema448)/th_close_np * 100)
th_ema224diff = ((th_close_np-th_ema224)/th_close_np*100)
th_ema112diff = ((th_close_np-th_ema112)/th_close_np*100)
#ma 이동평균
th_ma112 = ta._ta_lib.MA(th_close_np, 112)
th_ma224 = ta._ta_lib.MA(th_close_np, 224)
th_ma448 = ta._ta_lib.MA(th_close_np, 448)
th_clo5diff = ((th_close_np - ta._ta_lib.MA(th_close_np, 5)) / th_close_np * 100)
th_clo20diff = ((th_close_np - ta._ta_lib.MA(th_close_np, 20)) / th_close_np * 100)
#dmi값들 14->11로 고쳐씀
th_pdi = ta._ta_lib.PLUS_DI(th_high_np,th_low_np,th_close_np, 11)
th_mdi = ta._ta_lib.MINUS_DI(th_high_np, th_low_np, th_close_np, 11)
th_dm = ta._ta_lib.PLUS_DM(th_high_np,th_low_np, 11)
th_adx = ta._ta_lib.ADX(th_high_np,th_low_np,th_close_np, 14)
th_adxr = ta._ta_lib.ADXR(th_high_np, th_low_np, th_close_np, 14)
th_obvsig9 =ta._ta_lib.MA(ta._ta_lib.OBV(th_close_np, th_volume_np),9)
#윌리엄 변동율
th_williumr = ta._ta_lib.WILLR(th_high_np,th_low_np,th_close_np, 14)
th_mfi = ta._ta_lib.MFI(th_high_np,th_low_np,th_close_np,th_volume_np, 14)
#거래량 오실레이터공식 10일
th_ad = ((th_close_np-th_open_np)/(th_high_np-th_low_np) * th_volume_np / th_vol10_np*10)
# #일중강도
th_ll = (2*th_close_np-th_high_np-th_low_np)/(th_high_np-th_low_np) * th_volume_np
# nan을 모두 0으로 전환
np.nan_to_num(th_cci, copy=False)
np.nan_to_num(th_cci60, copy=False)
np.nan_to_num(th_rsi, copy=False)
np.nan_to_num(th_macd, copy=False)
np.nan_to_num(th_macd_signal, copy=False)
np.nan_to_num(th_macd_hist, copy=False)
np.nan_to_num(th_stoch_slowk, copy=False)
np.nan_to_num(th_stoch_slowd, copy=False)
np.nan_to_num(th_BBAND_L, copy=False)
np.nan_to_num(th_BBAND_M, copy=False)
np.nan_to_num(th_BBAND_U, copy=False)
np.nan_to_num(th_BBAND_L14, copy=False)
np.nan_to_num(th_BBAND_M14, copy=False)
np.nan_to_num(th_BBAND_U14, copy=False)
np.nan_to_num(th_OBV, copy=False)
np.nan_to_num(th_sar, copy=False)
np.nan_to_num(th_dema5, copy=False)
np.nan_to_num(th_dema20, copy=False)
np.nan_to_num(th_dema60, copy=False)
np.nan_to_num(th_tema5, copy=False)
np.nan_to_num(th_tema20, copy=False)
np.nan_to_num(th_tema60, copy=False)
np.nan_to_num(th_ema5, copy=False)
np.nan_to_num(th_ema112diff, copy=False)
np.nan_to_num(th_ema224diff, copy=False)
np.nan_to_num(th_ema448diff, copy=False)
np.nan_to_num(th_ema20, copy=False)
np.nan_to_num(th_ema60, copy=False)
np.nan_to_num(th_ema112, copy=False)
np.nan_to_num(th_ema224, copy=False)
np.nan_to_num(th_ema448, copy=False)
np.nan_to_num(th_ma112, copy=False)
np.nan_to_num(th_ma224, copy=False)
np.nan_to_num(th_ma448, copy=False)
np.nan_to_num(th_pdi, copy=False)
np.nan_to_num(th_mdi, copy=False)
np.nan_to_num(th_dm, copy=False)
np.nan_to_num(th_adx, copy=False)
np.nan_to_num(th_adxr, copy=False)
np.nan_to_num(th_williumr, copy=False)
np.nan_to_num(th_pb, copy=False)
np.nan_to_num(th_pb14, copy=False)
np.nan_to_num(th_BBAND_WIDE, copy=False)
np.nan_to_num(th_BBAND_WIDE14, copy=False)
np.nan_to_num(th_mfi, copy=False)
np.nan_to_num(th_ll, copy=False)
np.nan_to_num(th_ad, copy=False)
np.nan_to_num(th_rsi5, copy=False)
np.nan_to_num(th_ibs, copy=False)
np.nan_to_num(th_diff, copy=False)
np.nan_to_num(th_diff30, copy=False)
np.nan_to_num(th_obvsig9, copy=False)
# DataFrame 화 하기
df_ad = pd.DataFrame(th_ad, columns=['ad'])
df_cci = pd.DataFrame(th_cci, columns=['cci'])
df_cci60 = pd.DataFrame(th_cci, columns=['cci60'])
df_rsi5 = pd.DataFrame(th_rsi5, columns=['rsi5'])
df_rsi = pd.DataFrame(th_rsi, columns=['rsi'])
df_macd = pd.DataFrame(th_macd, columns=['macd'])
df_macd_signal = pd.DataFrame(th_macd_signal, columns=['macd_signal'])
df_macd_hist = pd.DataFrame(th_macd_hist, columns=['macd_hist'])
df_stoch_slowk = pd.DataFrame(th_stoch_slowk, columns=['stoch_slowk'])
df_stoch_slowd = pd.DataFrame(th_stoch_slowd, columns=['stoch_slowd'])
#볼린저밴드
df_BBand_U = pd.DataFrame(th_BBAND_U, columns=['BBand_U'])
df_BBand_M = pd.DataFrame(th_BBAND_M, columns=['BBand_M'])
df_BBand_L = pd.DataFrame(th_BBAND_L, columns=['BBand_L'])
df_BBand_U14 = pd.DataFrame(th_BBAND_U, columns=['BBand_U14'])
df_BBand_M14 = pd.DataFrame(th_BBAND_M, columns=['BBand_M14'])
df_BBand_L14 = pd.DataFrame(th_BBAND_L, columns=['BBand_L14'])
df_ibs = pd.DataFrame(th_ibs, columns=['ibs'])
df_pb14 = pd.DataFrame(th_pb, columns=['pb14'])
df_obvsig9 = pd.DataFrame(th_obvsig9, columns=['obvsig9'])
df_OBV = pd.DataFrame(th_OBV, columns=['OBV'])
df_sar = pd.DataFrame(th_sar, columns=['sar'])
# 2중종합지수
df_dema5 = pd.DataFrame(th_dema5, columns=['dema5'])
df_dema20 = pd.DataFrame(th_dema20, columns=['dema20'])
df_dema60 = pd.DataFrame(th_dema60, columns=['dema60'])
#3중종합지수
df_tema5 = pd.DataFrame(th_tema5, columns=['tema5'])
df_tema20 = pd.DataFrame(th_tema20, columns=['tema20'])
df_tema60 = pd.DataFrame(th_tema60, columns=['tema60'])
# 평균지수
df_ema5 = pd.DataFrame(th_ema5, columns=['ema5'])
df_ema112diff = pd.DataFrame(abs(th_ema112diff), columns=['ema112diff'])
df_ema224diff = pd.DataFrame(abs(th_ema224diff), columns=['ema224diff'])
df_ema448diff = pd.DataFrame(abs(th_ema448diff), columns=['ema448diff'])
df_ema20 = pd.DataFrame(th_ema20, columns=['ema20'])
df_ema60 = pd.DataFrame(th_ema60, columns=['ema60'])
df_ema112 = pd.DataFrame(th_ema112, columns=['ema112'])
df_ema224 = pd.DataFrame(th_ema224, columns=['ema224'])
df_ema448 = pd.DataFrame(th_ema224, columns=['ema448'])
df_ma112 = | pd.DataFrame(th_ma112, columns=['ma112']) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
调用wset函数的部分
下载数据的方法
1.在时间上使用折半可以最少的下载数据,但已经下了一部分,要补下时如果挪了一位,又得全重下
2.在文件上,三个文件一组,三组一样,删中间一个,直到不能删了,退出
"""
import os
import pandas as pd
from .utils import asDateTime
def download_sectorconstituent(w, date, sector, windcode, field='wind_code'):
"""
板块成份
中信证券一级行业指数:时间好像没有必要,因为日历日也会查询出来
风险警示股票:日期就是查询的日期,股票也是最新名,没有啥用
w.wset("sectorconstituent","date=2017-03-03;sectorid=a001010100000000;field=wind_code")
w.wset("sectorconstituent","date=2017-03-03;sectorid=a001010100000000")
w.wset("sectorconstituent","date=2017-03-03;windcode=000300.SH")
:param w:
:param sector:
:param date:
:return:
"""
param = 'date=%s' % date
if sector:
param += ';sector=%s' % sector
if windcode:
param += ';windcode=%s' % windcode
if field:
param += ';field=%s' % field
w.asDateTime = asDateTime
w_wset_data = w.wset("sectorconstituent", param)
df = pd.DataFrame(w_wset_data.Data)
df = df.T
df.columns = w_wset_data.Fields
try:
df['date'] = pd.to_datetime(df['date'])
except KeyError:
pass
return df
def download_indexconstituent(w, date, windcode, field='wind_code,i_weight'):
"""
指数权重
如果指定日期不是交易日,会返回时前一个交易日的信息
:param w:
:param windcode:
:param date:
:return:
"""
param = 'date=%s' % date
if windcode:
param += ';windcode=%s' % windcode
if field:
param += ';field=%s' % field
w.asDateTime = asDateTime
w_wset_data = w.wset("indexconstituent", param)
df = pd.DataFrame(w_wset_data.Data)
df = df.T
df.columns = w_wset_data.Fields
return df
def download_optioncontractbasicinfo(w, exchange='sse', windcode='510050.SH', status='trading',
field='wind_code,trade_code,sec_name,contract_unit,listed_date,expire_date,reference_price'):
"""
指数权重
如果指定日期不是交易日,会返回时前一个交易日的信息
:param w:
:param windcode:
:param date:
:return:
"""
param = 'exchange=%s' % exchange
param += ';windcode=%s' % windcode
param += ';status=%s' % status
if field:
param += ';field=%s' % field
w.asDateTime = asDateTime
w_wset_data = w.wset("optioncontractbasicinfo", param)
df = pd.DataFrame(w_wset_data.Data)
df = df.T
df.columns = w_wset_data.Fields
return df
def download_optionchain(w, date='2017-11-28', us_code='510050.SH',
field='option_code,option_name,strike_price,multiplier'):
"""
下载指定日期期权数据
w_wset_data = vba_wset("optionchain","date=2017-11-28;us_code=510050.SH;option_var=全部;call_put=全部;field=option_code,option_name,strike_price,multiplier",)
:param w:
:param windcode:
:param date:
:return:
"""
param = 'date=%s' % date
param += ';us_code=%s' % us_code
if field:
param += ';field=%s' % field
w.asDateTime = asDateTime
w_wset_data = w.wset("optionchain", param)
df = pd.DataFrame(w_wset_data.Data)
df = df.T
df.columns = w_wset_data.Fields
return df
def read_constituent(path):
"""
读取板块文件
:param path:
:return:
"""
try:
df = pd.read_csv(path, encoding='utf-8-sig', parse_dates=True)
except Exception as e:
return None
try:
df['date'] = pd.to_datetime(df['date'])
except KeyError:
pass
return df
def read_sectorconstituent_from_dir(path, key_field='wind_code'):
"""
从目录中读取整个文件
:param path:
:param key_field:
:return:
"""
last_set = None
df = None
for parent, dirnames, filenames in os.walk(path):
for filename in filenames:
filepath = os.path.join(parent, filename)
curr_df = read_constituent(filepath)
# 由于两头数据可能一样,这样处理,只留第一个,可以加快处理速度
curr_set = set(curr_df[key_field])
if last_set == curr_set:
last_set = curr_set
continue
last_set = curr_set
data_date_str = filename[:-4]
curr_df['_datetime_'] = pd.to_datetime(data_date_str)
if df is None:
df = curr_df
else:
df = pd.concat([df, curr_df])
return df
def write_constituent(path, df):
df.to_csv(path, encoding='utf-8-sig', date_format='%Y-%m-%d', index=False)
def read_indexconstituent_from_dir(path):
"""
由于权重每天都不一样,只能根据用户指定的日期下载才行
:param path:
:return:
"""
last_set = None
df = None
for parent, dirnames, filenames in os.walk(path):
for filename in filenames:
filepath = os.path.join(parent, filename)
curr_df = read_constituent(filepath)
# 2016-12-12,有成份新加入,但权重为nan
curr_df.fillna(0, inplace=True)
data_date_str = filename[:-4]
curr_df['_datetime_'] = pd.to_datetime(data_date_str)
if df is None:
df = curr_df
else:
df = pd.concat([df, curr_df])
return df
def download_corporationaction(w, startdate, enddate, windcode):
"""
分红送转
如何获取某一天的分红情况,开始与结束设成同一天,不设置wind_code
实际上只取一个如600000.SH发现很早以前的信息可能缺失
> w.wset('CorporationAction','startdate=20150605;enddate=20150605')
:param w:
:param windcode:
:param date:
:return:
"""
param = 'startdate=%s' % startdate
if enddate:
param += ';enddate=%s' % enddate
if windcode:
param += ';windcode=%s' % windcode
w.asDateTime = asDateTime
w_wset_data = w.wset("corporationaction", param)
df = pd.DataFrame(w_wset_data.Data)
df = df.T
df.columns = w_wset_data.Fields
df['ex_dividend_date'] = | pd.to_datetime(df['ex_dividend_date']) | pandas.to_datetime |
# pylint: disable-msg=W0612,E1101,W0141
import nose
from numpy.random import randn
import numpy as np
from pandas.core.index import Index, MultiIndex
from pandas import Panel, DataFrame, Series, notnull, isnull
from pandas.util.testing import (assert_almost_equal,
assert_series_equal,
assert_frame_equal,
assertRaisesRegexp)
import pandas.core.common as com
import pandas.util.testing as tm
from pandas.compat import (range, lrange, StringIO, lzip, u, cPickle,
product as cart_product, zip)
import pandas as pd
import pandas.index as _index
class TestMultiLevel(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
self.frame = DataFrame(np.random.randn(10, 3), index=index,
columns=Index(['A', 'B', 'C'], name='exp'))
self.single_level = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]],
names=['first'])
# create test series object
arrays = [['bar', 'bar', 'baz', 'baz', 'qux', 'qux', 'foo', 'foo'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
tuples = lzip(*arrays)
index = MultiIndex.from_tuples(tuples)
s = Series(randn(8), index=index)
s[3] = np.NaN
self.series = s
tm.N = 100
self.tdf = tm.makeTimeDataFrame()
self.ymd = self.tdf.groupby([lambda x: x.year, lambda x: x.month,
lambda x: x.day]).sum()
# use Int64Index, to make sure things work
self.ymd.index.set_levels([lev.astype('i8')
for lev in self.ymd.index.levels],
inplace=True)
self.ymd.index.set_names(['year', 'month', 'day'],
inplace=True)
def test_append(self):
a, b = self.frame[:5], self.frame[5:]
result = a.append(b)
tm.assert_frame_equal(result, self.frame)
result = a['A'].append(b['A'])
tm.assert_series_equal(result, self.frame['A'])
def test_dataframe_constructor(self):
multi = DataFrame(np.random.randn(4, 4),
index=[np.array(['a', 'a', 'b', 'b']),
np.array(['x', 'y', 'x', 'y'])])
tm.assert_isinstance(multi.index, MultiIndex)
self.assertNotIsInstance(multi.columns, MultiIndex)
multi = DataFrame(np.random.randn(4, 4),
columns=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
tm.assert_isinstance(multi.columns, MultiIndex)
def test_series_constructor(self):
multi = Series(1., index=[np.array(['a', 'a', 'b', 'b']),
np.array(['x', 'y', 'x', 'y'])])
tm.assert_isinstance(multi.index, MultiIndex)
multi = Series(1., index=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
tm.assert_isinstance(multi.index, MultiIndex)
multi = Series(lrange(4), index=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
tm.assert_isinstance(multi.index, MultiIndex)
def test_reindex_level(self):
# axis=0
month_sums = self.ymd.sum(level='month')
result = month_sums.reindex(self.ymd.index, level=1)
expected = self.ymd.groupby(level='month').transform(np.sum)
assert_frame_equal(result, expected)
# Series
result = month_sums['A'].reindex(self.ymd.index, level=1)
expected = self.ymd['A'].groupby(level='month').transform(np.sum)
assert_series_equal(result, expected)
# axis=1
month_sums = self.ymd.T.sum(axis=1, level='month')
result = month_sums.reindex(columns=self.ymd.index, level=1)
expected = self.ymd.groupby(level='month').transform(np.sum).T
assert_frame_equal(result, expected)
def test_binops_level(self):
def _check_op(opname):
op = getattr(DataFrame, opname)
month_sums = self.ymd.sum(level='month')
result = op(self.ymd, month_sums, level='month')
broadcasted = self.ymd.groupby(level='month').transform(np.sum)
expected = op(self.ymd, broadcasted)
assert_frame_equal(result, expected)
# Series
op = getattr(Series, opname)
result = op(self.ymd['A'], month_sums['A'], level='month')
broadcasted = self.ymd['A'].groupby(
level='month').transform(np.sum)
expected = op(self.ymd['A'], broadcasted)
assert_series_equal(result, expected)
_check_op('sub')
_check_op('add')
_check_op('mul')
_check_op('div')
def test_pickle(self):
def _test_roundtrip(frame):
pickled = cPickle.dumps(frame)
unpickled = cPickle.loads(pickled)
assert_frame_equal(frame, unpickled)
_test_roundtrip(self.frame)
_test_roundtrip(self.frame.T)
_test_roundtrip(self.ymd)
_test_roundtrip(self.ymd.T)
def test_reindex(self):
reindexed = self.frame.ix[[('foo', 'one'), ('bar', 'one')]]
expected = self.frame.ix[[0, 3]]
assert_frame_equal(reindexed, expected)
def test_reindex_preserve_levels(self):
new_index = self.ymd.index[::10]
chunk = self.ymd.reindex(new_index)
self.assertIs(chunk.index, new_index)
chunk = self.ymd.ix[new_index]
self.assertIs(chunk.index, new_index)
ymdT = self.ymd.T
chunk = ymdT.reindex(columns=new_index)
self.assertIs(chunk.columns, new_index)
chunk = ymdT.ix[:, new_index]
self.assertIs(chunk.columns, new_index)
def test_sort_index_preserve_levels(self):
result = self.frame.sort_index()
self.assertEquals(result.index.names, self.frame.index.names)
def test_repr_to_string(self):
repr(self.frame)
repr(self.ymd)
repr(self.frame.T)
repr(self.ymd.T)
buf = StringIO()
self.frame.to_string(buf=buf)
self.ymd.to_string(buf=buf)
self.frame.T.to_string(buf=buf)
self.ymd.T.to_string(buf=buf)
def test_repr_name_coincide(self):
index = MultiIndex.from_tuples([('a', 0, 'foo'), ('b', 1, 'bar')],
names=['a', 'b', 'c'])
df = DataFrame({'value': [0, 1]}, index=index)
lines = repr(df).split('\n')
self.assert_(lines[2].startswith('a 0 foo'))
def test_getitem_simple(self):
df = self.frame.T
col = df['foo', 'one']
assert_almost_equal(col.values, df.values[:, 0])
self.assertRaises(KeyError, df.__getitem__, ('foo', 'four'))
self.assertRaises(KeyError, df.__getitem__, 'foobar')
def test_series_getitem(self):
s = self.ymd['A']
result = s[2000, 3]
result2 = s.ix[2000, 3]
expected = s.reindex(s.index[42:65])
expected.index = expected.index.droplevel(0).droplevel(0)
assert_series_equal(result, expected)
result = s[2000, 3, 10]
expected = s[49]
self.assertEquals(result, expected)
# fancy
result = s.ix[[(2000, 3, 10), (2000, 3, 13)]]
expected = s.reindex(s.index[49:51])
assert_series_equal(result, expected)
# key error
self.assertRaises(KeyError, s.__getitem__, (2000, 3, 4))
def test_series_getitem_corner(self):
s = self.ymd['A']
# don't segfault, GH #495
# out of bounds access
self.assertRaises(IndexError, s.__getitem__, len(self.ymd))
# generator
result = s[(x > 0 for x in s)]
expected = s[s > 0]
assert_series_equal(result, expected)
def test_series_setitem(self):
s = self.ymd['A']
s[2000, 3] = np.nan
self.assert_(isnull(s.values[42:65]).all())
self.assert_(notnull(s.values[:42]).all())
self.assert_(notnull(s.values[65:]).all())
s[2000, 3, 10] = np.nan
self.assert_(isnull(s[49]))
def test_series_slice_partial(self):
pass
def test_frame_getitem_setitem_boolean(self):
df = self.frame.T.copy()
values = df.values
result = df[df > 0]
expected = df.where(df > 0)
assert_frame_equal(result, expected)
df[df > 0] = 5
values[values > 0] = 5
assert_almost_equal(df.values, values)
df[df == 5] = 0
values[values == 5] = 0
assert_almost_equal(df.values, values)
# a df that needs alignment first
df[df[:-1] < 0] = 2
np.putmask(values[:-1], values[:-1] < 0, 2)
assert_almost_equal(df.values, values)
with assertRaisesRegexp(TypeError, 'boolean values only'):
df[df * 0] = 2
def test_frame_getitem_setitem_slice(self):
# getitem
result = self.frame.ix[:4]
expected = self.frame[:4]
assert_frame_equal(result, expected)
# setitem
cp = self.frame.copy()
cp.ix[:4] = 0
self.assert_((cp.values[:4] == 0).all())
self.assert_((cp.values[4:] != 0).all())
def test_frame_getitem_setitem_multislice(self):
levels = [['t1', 't2'], ['a', 'b', 'c']]
labels = [[0, 0, 0, 1, 1], [0, 1, 2, 0, 1]]
midx = MultiIndex(labels=labels, levels=levels, names=[None, 'id'])
df = DataFrame({'value': [1, 2, 3, 7, 8]}, index=midx)
result = df.ix[:, 'value']
assert_series_equal(df['value'], result)
result = df.ix[1:3, 'value']
assert_series_equal(df['value'][1:3], result)
result = df.ix[:, :]
assert_frame_equal(df, result)
result = df
df.ix[:, 'value'] = 10
result['value'] = 10
assert_frame_equal(df, result)
df.ix[:, :] = 10
assert_frame_equal(df, result)
def test_frame_getitem_multicolumn_empty_level(self):
f = DataFrame({'a': ['1', '2', '3'],
'b': ['2', '3', '4']})
f.columns = [['level1 item1', 'level1 item2'],
['', 'level2 item2'],
['level3 item1', 'level3 item2']]
result = f['level1 item1']
expected = DataFrame([['1'], ['2'], ['3']], index=f.index,
columns=['level3 item1'])
assert_frame_equal(result, expected)
def test_frame_setitem_multi_column(self):
df = DataFrame(randn(10, 4), columns=[['a', 'a', 'b', 'b'],
[0, 1, 0, 1]])
cp = df.copy()
cp['a'] = cp['b']
assert_frame_equal(cp['a'], cp['b'])
# set with ndarray
cp = df.copy()
cp['a'] = cp['b'].values
assert_frame_equal(cp['a'], cp['b'])
#----------------------------------------
# #1803
columns = MultiIndex.from_tuples([('A', '1'), ('A', '2'), ('B', '1')])
df = DataFrame(index=[1, 3, 5], columns=columns)
# Works, but adds a column instead of updating the two existing ones
df['A'] = 0.0 # Doesn't work
self.assertTrue((df['A'].values == 0).all())
# it broadcasts
df['B', '1'] = [1, 2, 3]
df['A'] = df['B', '1']
assert_series_equal(df['A', '1'], df['B', '1'])
assert_series_equal(df['A', '2'], df['B', '1'])
def test_getitem_tuple_plus_slice(self):
# GH #671
df = DataFrame({'a': lrange(10),
'b': lrange(10),
'c': np.random.randn(10),
'd': np.random.randn(10)})
idf = df.set_index(['a', 'b'])
result = idf.ix[(0, 0), :]
expected = idf.ix[0, 0]
expected2 = idf.xs((0, 0))
assert_series_equal(result, expected)
assert_series_equal(result, expected2)
def test_getitem_setitem_tuple_plus_columns(self):
# GH #1013
df = self.ymd[:5]
result = df.ix[(2000, 1, 6), ['A', 'B', 'C']]
expected = df.ix[2000, 1, 6][['A', 'B', 'C']]
assert_series_equal(result, expected)
def test_getitem_multilevel_index_tuple_unsorted(self):
index_columns = list("abc")
df = DataFrame([[0, 1, 0, "x"], [0, 0, 1, "y"]],
columns=index_columns + ["data"])
df = df.set_index(index_columns)
query_index = df.index[:1]
rs = df.ix[query_index, "data"]
xp = Series(['x'], index=MultiIndex.from_tuples([(0, 1, 0)]))
assert_series_equal(rs, xp)
def test_xs(self):
xs = self.frame.xs(('bar', 'two'))
xs2 = self.frame.ix[('bar', 'two')]
assert_series_equal(xs, xs2)
assert_almost_equal(xs.values, self.frame.values[4])
# GH 6574
# missing values in returned index should be preserrved
acc = [
('a','abcde',1),
('b','bbcde',2),
('y','yzcde',25),
('z','xbcde',24),
('z',None,26),
('z','zbcde',25),
('z','ybcde',26),
]
df = DataFrame(acc, columns=['a1','a2','cnt']).set_index(['a1','a2'])
expected = DataFrame({ 'cnt' : [24,26,25,26] }, index=Index(['xbcde',np.nan,'zbcde','ybcde'],name='a2'))
result = df.xs('z',level='a1')
assert_frame_equal(result, expected)
def test_xs_partial(self):
result = self.frame.xs('foo')
result2 = self.frame.ix['foo']
expected = self.frame.T['foo'].T
assert_frame_equal(result, expected)
assert_frame_equal(result, result2)
result = self.ymd.xs((2000, 4))
expected = self.ymd.ix[2000, 4]
assert_frame_equal(result, expected)
# ex from #1796
index = MultiIndex(levels=[['foo', 'bar'], ['one', 'two'], [-1, 1]],
labels=[[0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 0, 0, 1, 1],
[0, 1, 0, 1, 0, 1, 0, 1]])
df = DataFrame(np.random.randn(8, 4), index=index,
columns=list('abcd'))
result = df.xs(['foo', 'one'])
expected = df.ix['foo', 'one']
assert_frame_equal(result, expected)
def test_xs_level(self):
result = self.frame.xs('two', level='second')
expected = self.frame[self.frame.index.get_level_values(1) == 'two']
expected.index = expected.index.droplevel(1)
assert_frame_equal(result, expected)
index = MultiIndex.from_tuples([('x', 'y', 'z'), ('a', 'b', 'c'),
('p', 'q', 'r')])
df = DataFrame(np.random.randn(3, 5), index=index)
result = df.xs('c', level=2)
expected = df[1:2]
expected.index = expected.index.droplevel(2)
assert_frame_equal(result, expected)
# this is a copy in 0.14
result = self.frame.xs('two', level='second')
# setting this will give a SettingWithCopyError
# as we are trying to write a view
def f(x):
x[:] = 10
self.assertRaises(com.SettingWithCopyError, f, result)
def test_xs_level_multiple(self):
from pandas import read_table
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
df = read_table(StringIO(text), sep='\s+', engine='python')
result = df.xs(('a', 4), level=['one', 'four'])
expected = df.xs('a').xs(4, level='four')
assert_frame_equal(result, expected)
# this is a copy in 0.14
result = df.xs(('a', 4), level=['one', 'four'])
# setting this will give a SettingWithCopyError
# as we are trying to write a view
def f(x):
x[:] = 10
self.assertRaises(com.SettingWithCopyError, f, result)
# GH2107
dates = lrange(20111201, 20111205)
ids = 'abcde'
idx = MultiIndex.from_tuples([x for x in cart_product(dates, ids)])
idx.names = ['date', 'secid']
df = DataFrame(np.random.randn(len(idx), 3), idx, ['X', 'Y', 'Z'])
rs = df.xs(20111201, level='date')
xp = df.ix[20111201, :]
assert_frame_equal(rs, xp)
def test_xs_level0(self):
from pandas import read_table
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
df = read_table(StringIO(text), sep='\s+', engine='python')
result = df.xs('a', level=0)
expected = df.xs('a')
self.assertEqual(len(result), 2)
assert_frame_equal(result, expected)
def test_xs_level_series(self):
s = self.frame['A']
result = s[:, 'two']
expected = self.frame.xs('two', level=1)['A']
assert_series_equal(result, expected)
s = self.ymd['A']
result = s[2000, 5]
expected = self.ymd.ix[2000, 5]['A']
assert_series_equal(result, expected)
# not implementing this for now
self.assertRaises(TypeError, s.__getitem__, (2000, slice(3, 4)))
# result = s[2000, 3:4]
# lv =s.index.get_level_values(1)
# expected = s[(lv == 3) | (lv == 4)]
# expected.index = expected.index.droplevel(0)
# assert_series_equal(result, expected)
# can do this though
def test_get_loc_single_level(self):
s = Series(np.random.randn(len(self.single_level)),
index=self.single_level)
for k in self.single_level.values:
s[k]
def test_getitem_toplevel(self):
df = self.frame.T
result = df['foo']
expected = df.reindex(columns=df.columns[:3])
expected.columns = expected.columns.droplevel(0)
assert_frame_equal(result, expected)
result = df['bar']
result2 = df.ix[:, 'bar']
expected = df.reindex(columns=df.columns[3:5])
expected.columns = expected.columns.droplevel(0)
assert_frame_equal(result, expected)
assert_frame_equal(result, result2)
def test_getitem_setitem_slice_integers(self):
index = MultiIndex(levels=[[0, 1, 2], [0, 2]],
labels=[[0, 0, 1, 1, 2, 2],
[0, 1, 0, 1, 0, 1]])
frame = DataFrame(np.random.randn(len(index), 4), index=index,
columns=['a', 'b', 'c', 'd'])
res = frame.ix[1:2]
exp = frame.reindex(frame.index[2:])
assert_frame_equal(res, exp)
frame.ix[1:2] = 7
self.assert_((frame.ix[1:2] == 7).values.all())
series = Series(np.random.randn(len(index)), index=index)
res = series.ix[1:2]
exp = series.reindex(series.index[2:])
assert_series_equal(res, exp)
series.ix[1:2] = 7
self.assert_((series.ix[1:2] == 7).values.all())
def test_getitem_int(self):
levels = [[0, 1], [0, 1, 2]]
labels = [[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]]
index = MultiIndex(levels=levels, labels=labels)
frame = DataFrame(np.random.randn(6, 2), index=index)
result = frame.ix[1]
expected = frame[-3:]
expected.index = expected.index.droplevel(0)
assert_frame_equal(result, expected)
# raises exception
self.assertRaises(KeyError, frame.ix.__getitem__, 3)
# however this will work
result = self.frame.ix[2]
expected = self.frame.xs(self.frame.index[2])
assert_series_equal(result, expected)
def test_getitem_partial(self):
ymd = self.ymd.T
result = ymd[2000, 2]
expected = ymd.reindex(columns=ymd.columns[ymd.columns.labels[1] == 1])
expected.columns = expected.columns.droplevel(0).droplevel(0)
assert_frame_equal(result, expected)
def test_getitem_slice_not_sorted(self):
df = self.frame.sortlevel(1).T
# buglet with int typechecking
result = df.ix[:, :np.int32(3)]
expected = df.reindex(columns=df.columns[:3])
assert_frame_equal(result, expected)
def test_setitem_change_dtype(self):
dft = self.frame.T
s = dft['foo', 'two']
dft['foo', 'two'] = s > s.median()
assert_series_equal(dft['foo', 'two'], s > s.median())
# tm.assert_isinstance(dft._data.blocks[1].items, MultiIndex)
reindexed = dft.reindex(columns=[('foo', 'two')])
assert_series_equal(reindexed['foo', 'two'], s > s.median())
def test_frame_setitem_ix(self):
self.frame.ix[('bar', 'two'), 'B'] = 5
self.assertEquals(self.frame.ix[('bar', 'two'), 'B'], 5)
# with integer labels
df = self.frame.copy()
df.columns = lrange(3)
df.ix[('bar', 'two'), 1] = 7
self.assertEquals(df.ix[('bar', 'two'), 1], 7)
def test_fancy_slice_partial(self):
result = self.frame.ix['bar':'baz']
expected = self.frame[3:7]
assert_frame_equal(result, expected)
result = self.ymd.ix[(2000, 2):(2000, 4)]
lev = self.ymd.index.labels[1]
expected = self.ymd[(lev >= 1) & (lev <= 3)]
assert_frame_equal(result, expected)
def test_getitem_partial_column_select(self):
idx = MultiIndex(labels=[[0, 0, 0], [0, 1, 1], [1, 0, 1]],
levels=[['a', 'b'], ['x', 'y'], ['p', 'q']])
df = DataFrame(np.random.rand(3, 2), index=idx)
result = df.ix[('a', 'y'), :]
expected = df.ix[('a', 'y')]
assert_frame_equal(result, expected)
result = df.ix[('a', 'y'), [1, 0]]
expected = df.ix[('a', 'y')][[1, 0]]
assert_frame_equal(result, expected)
self.assertRaises(KeyError, df.ix.__getitem__,
(('a', 'foo'), slice(None, None)))
def test_sortlevel(self):
df = self.frame.copy()
df.index = np.arange(len(df))
assertRaisesRegexp(TypeError, 'hierarchical index', df.sortlevel, 0)
# axis=1
# series
a_sorted = self.frame['A'].sortlevel(0)
with assertRaisesRegexp(TypeError, 'hierarchical index'):
self.frame.reset_index()['A'].sortlevel()
# preserve names
self.assertEquals(a_sorted.index.names, self.frame.index.names)
# inplace
rs = self.frame.copy()
rs.sortlevel(0, inplace=True)
assert_frame_equal(rs, self.frame.sortlevel(0))
def test_sortlevel_large_cardinality(self):
# #2684 (int64)
index = MultiIndex.from_arrays([np.arange(4000)]*3)
df = DataFrame(np.random.randn(4000), index=index, dtype = np.int64)
# it works!
result = df.sortlevel(0)
self.assertTrue(result.index.lexsort_depth == 3)
# #2684 (int32)
index = MultiIndex.from_arrays([np.arange(4000)]*3)
df = DataFrame(np.random.randn(4000), index=index, dtype = np.int32)
# it works!
result = df.sortlevel(0)
self.assert_((result.dtypes.values == df.dtypes.values).all() == True)
self.assertTrue(result.index.lexsort_depth == 3)
def test_delevel_infer_dtype(self):
tuples = [tuple for tuple in cart_product(['foo', 'bar'],
[10, 20], [1.0, 1.1])]
index = MultiIndex.from_tuples(tuples,
names=['prm0', 'prm1', 'prm2'])
df = DataFrame(np.random.randn(8, 3), columns=['A', 'B', 'C'],
index=index)
deleveled = df.reset_index()
self.assert_(com.is_integer_dtype(deleveled['prm1']))
self.assert_(com.is_float_dtype(deleveled['prm2']))
def test_reset_index_with_drop(self):
deleveled = self.ymd.reset_index(drop=True)
self.assertEquals(len(deleveled.columns), len(self.ymd.columns))
deleveled = self.series.reset_index()
tm.assert_isinstance(deleveled, DataFrame)
self.assertEqual(len(deleveled.columns),
len(self.series.index.levels) + 1)
deleveled = self.series.reset_index(drop=True)
tm.assert_isinstance(deleveled, Series)
def test_sortlevel_by_name(self):
self.frame.index.names = ['first', 'second']
result = self.frame.sortlevel(level='second')
expected = self.frame.sortlevel(level=1)
assert_frame_equal(result, expected)
def test_sortlevel_mixed(self):
sorted_before = self.frame.sortlevel(1)
df = self.frame.copy()
df['foo'] = 'bar'
sorted_after = df.sortlevel(1)
assert_frame_equal(sorted_before, sorted_after.drop(['foo'], axis=1))
dft = self.frame.T
sorted_before = dft.sortlevel(1, axis=1)
dft['foo', 'three'] = 'bar'
sorted_after = dft.sortlevel(1, axis=1)
assert_frame_equal(sorted_before.drop([('foo', 'three')], axis=1),
sorted_after.drop([('foo', 'three')], axis=1))
def test_count_level(self):
def _check_counts(frame, axis=0):
index = frame._get_axis(axis)
for i in range(index.nlevels):
result = frame.count(axis=axis, level=i)
expected = frame.groupby(axis=axis, level=i).count(axis=axis)
expected = expected.reindex_like(result).astype('i8')
assert_frame_equal(result, expected)
self.frame.ix[1, [1, 2]] = np.nan
self.frame.ix[7, [0, 1]] = np.nan
self.ymd.ix[1, [1, 2]] = np.nan
self.ymd.ix[7, [0, 1]] = np.nan
_check_counts(self.frame)
_check_counts(self.ymd)
_check_counts(self.frame.T, axis=1)
_check_counts(self.ymd.T, axis=1)
# can't call with level on regular DataFrame
df = tm.makeTimeDataFrame()
assertRaisesRegexp(TypeError, 'hierarchical', df.count, level=0)
self.frame['D'] = 'foo'
result = self.frame.count(level=0, numeric_only=True)
assert_almost_equal(result.columns, ['A', 'B', 'C'])
def test_count_level_series(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz'],
['one', 'two', 'three', 'four']],
labels=[[0, 0, 0, 2, 2],
[2, 0, 1, 1, 2]])
s = Series(np.random.randn(len(index)), index=index)
result = s.count(level=0)
expected = s.groupby(level=0).count()
assert_series_equal(result.astype('f8'),
expected.reindex(result.index).fillna(0))
result = s.count(level=1)
expected = s.groupby(level=1).count()
assert_series_equal(result.astype('f8'),
expected.reindex(result.index).fillna(0))
def test_count_level_corner(self):
s = self.frame['A'][:0]
result = s.count(level=0)
expected = Series(0, index=s.index.levels[0])
assert_series_equal(result, expected)
df = self.frame[:0]
result = df.count(level=0)
expected = DataFrame({}, index=s.index.levels[0],
columns=df.columns).fillna(0).astype(np.int64)
assert_frame_equal(result, expected)
def test_unstack(self):
# just check that it works for now
unstacked = self.ymd.unstack()
unstacked2 = unstacked.unstack()
# test that ints work
unstacked = self.ymd.astype(int).unstack()
# test that int32 work
unstacked = self.ymd.astype(np.int32).unstack()
def test_unstack_multiple_no_empty_columns(self):
index = MultiIndex.from_tuples([(0, 'foo', 0), (0, 'bar', 0),
(1, 'baz', 1), (1, 'qux', 1)])
s = Series(np.random.randn(4), index=index)
unstacked = s.unstack([1, 2])
expected = unstacked.dropna(axis=1, how='all')
assert_frame_equal(unstacked, expected)
def test_stack(self):
# regular roundtrip
unstacked = self.ymd.unstack()
restacked = unstacked.stack()
assert_frame_equal(restacked, self.ymd)
unlexsorted = self.ymd.sortlevel(2)
unstacked = unlexsorted.unstack(2)
restacked = unstacked.stack()
assert_frame_equal(restacked.sortlevel(0), self.ymd)
unlexsorted = unlexsorted[::-1]
unstacked = unlexsorted.unstack(1)
restacked = unstacked.stack().swaplevel(1, 2)
assert_frame_equal(restacked.sortlevel(0), self.ymd)
unlexsorted = unlexsorted.swaplevel(0, 1)
unstacked = unlexsorted.unstack(0).swaplevel(0, 1, axis=1)
restacked = unstacked.stack(0).swaplevel(1, 2)
assert_frame_equal(restacked.sortlevel(0), self.ymd)
# columns unsorted
unstacked = self.ymd.unstack()
unstacked = unstacked.sort(axis=1, ascending=False)
restacked = unstacked.stack()
assert_frame_equal(restacked, self.ymd)
# more than 2 levels in the columns
unstacked = self.ymd.unstack(1).unstack(1)
result = unstacked.stack(1)
expected = self.ymd.unstack()
assert_frame_equal(result, expected)
result = unstacked.stack(2)
expected = self.ymd.unstack(1)
assert_frame_equal(result, expected)
result = unstacked.stack(0)
expected = self.ymd.stack().unstack(1).unstack(1)
assert_frame_equal(result, expected)
# not all levels present in each echelon
unstacked = self.ymd.unstack(2).ix[:, ::3]
stacked = unstacked.stack().stack()
ymd_stacked = self.ymd.stack()
assert_series_equal(stacked, ymd_stacked.reindex(stacked.index))
# stack with negative number
result = self.ymd.unstack(0).stack(-2)
expected = self.ymd.unstack(0).stack(0)
def test_unstack_odd_failure(self):
data = """day,time,smoker,sum,len
Fri,Dinner,No,8.25,3.
Fri,Dinner,Yes,27.03,9
Fri,Lunch,No,3.0,1
Fri,Lunch,Yes,13.68,6
Sat,Dinner,No,139.63,45
Sat,Dinner,Yes,120.77,42
Sun,Dinner,No,180.57,57
Sun,Dinner,Yes,66.82,19
Thur,Dinner,No,3.0,1
Thur,Lunch,No,117.32,44
Thur,Lunch,Yes,51.51,17"""
df = pd.read_csv(StringIO(data)).set_index(['day', 'time', 'smoker'])
# it works, #2100
result = df.unstack(2)
recons = result.stack()
assert_frame_equal(recons, df)
def test_stack_mixed_dtype(self):
df = self.frame.T
df['foo', 'four'] = 'foo'
df = df.sortlevel(1, axis=1)
stacked = df.stack()
assert_series_equal(stacked['foo'], df['foo'].stack())
self.assertEqual(stacked['bar'].dtype, np.float_)
def test_unstack_bug(self):
df = DataFrame({'state': ['naive', 'naive', 'naive',
'activ', 'activ', 'activ'],
'exp': ['a', 'b', 'b', 'b', 'a', 'a'],
'barcode': [1, 2, 3, 4, 1, 3],
'v': ['hi', 'hi', 'bye', 'bye', 'bye', 'peace'],
'extra': np.arange(6.)})
result = df.groupby(['state', 'exp', 'barcode', 'v']).apply(len)
unstacked = result.unstack()
restacked = unstacked.stack()
assert_series_equal(restacked,
result.reindex(restacked.index).astype(float))
def test_stack_unstack_preserve_names(self):
unstacked = self.frame.unstack()
self.assertEquals(unstacked.index.name, 'first')
self.assertEquals(unstacked.columns.names, ['exp', 'second'])
restacked = unstacked.stack()
self.assertEquals(restacked.index.names, self.frame.index.names)
def test_unstack_level_name(self):
result = self.frame.unstack('second')
expected = self.frame.unstack(level=1)
assert_frame_equal(result, expected)
def test_stack_level_name(self):
unstacked = self.frame.unstack('second')
result = unstacked.stack('exp')
expected = self.frame.unstack().stack(0)
assert_frame_equal(result, expected)
result = self.frame.stack('exp')
expected = self.frame.stack()
assert_series_equal(result, expected)
def test_stack_unstack_multiple(self):
unstacked = self.ymd.unstack(['year', 'month'])
expected = self.ymd.unstack('year').unstack('month')
assert_frame_equal(unstacked, expected)
self.assertEquals(unstacked.columns.names,
expected.columns.names)
# series
s = self.ymd['A']
s_unstacked = s.unstack(['year', 'month'])
assert_frame_equal(s_unstacked, expected['A'])
restacked = unstacked.stack(['year', 'month'])
restacked = restacked.swaplevel(0, 1).swaplevel(1, 2)
restacked = restacked.sortlevel(0)
assert_frame_equal(restacked, self.ymd)
self.assertEquals(restacked.index.names, self.ymd.index.names)
# GH #451
unstacked = self.ymd.unstack([1, 2])
expected = self.ymd.unstack(1).unstack(1).dropna(axis=1, how='all')
assert_frame_equal(unstacked, expected)
unstacked = self.ymd.unstack([2, 1])
expected = self.ymd.unstack(2).unstack(1).dropna(axis=1, how='all')
assert_frame_equal(unstacked, expected.ix[:, unstacked.columns])
def test_unstack_period_series(self):
# GH 4342
idx1 = pd.PeriodIndex(['2013-01', '2013-01', '2013-02', '2013-02',
'2013-03', '2013-03'], freq='M', name='period')
idx2 = Index(['A', 'B'] * 3, name='str')
value = [1, 2, 3, 4, 5, 6]
idx = MultiIndex.from_arrays([idx1, idx2])
s = Series(value, index=idx)
result1 = s.unstack()
result2 = s.unstack(level=1)
result3 = s.unstack(level=0)
e_idx = pd.PeriodIndex(['2013-01', '2013-02', '2013-03'], freq='M', name='period')
expected = DataFrame({'A': [1, 3, 5], 'B': [2, 4, 6]}, index=e_idx,
columns=['A', 'B'])
expected.columns.name = 'str'
assert_frame_equal(result1, expected)
assert_frame_equal(result2, expected)
assert_frame_equal(result3, expected.T)
idx1 = pd.PeriodIndex(['2013-01', '2013-01', '2013-02', '2013-02',
'2013-03', '2013-03'], freq='M', name='period1')
idx2 = pd.PeriodIndex(['2013-12', '2013-11', '2013-10', '2013-09',
'2013-08', '2013-07'], freq='M', name='period2')
idx = pd.MultiIndex.from_arrays([idx1, idx2])
s = Series(value, index=idx)
result1 = s.unstack()
result2 = s.unstack(level=1)
result3 = s.unstack(level=0)
e_idx = | pd.PeriodIndex(['2013-01', '2013-02', '2013-03'], freq='M', name='period1') | pandas.PeriodIndex |
# Copyright (c) 2018-2021, NVIDIA CORPORATION.
import array as arr
import datetime
import io
import operator
import random
import re
import string
import textwrap
from copy import copy
import cupy
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
from numba import cuda
import cudf
from cudf.core._compat import PANDAS_GE_110, PANDAS_GE_120
from cudf.core.column import column
from cudf.tests import utils
from cudf.tests.utils import (
ALL_TYPES,
DATETIME_TYPES,
NUMERIC_TYPES,
assert_eq,
assert_exceptions_equal,
does_not_raise,
gen_rand,
)
def test_init_via_list_of_tuples():
data = [
(5, "cats", "jump", np.nan),
(2, "dogs", "dig", 7.5),
(3, "cows", "moo", -2.1, "occasionally"),
]
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(pdf, gdf)
def _dataframe_na_data():
return [
pd.DataFrame(
{
"a": [0, 1, 2, np.nan, 4, None, 6],
"b": [np.nan, None, "u", "h", "d", "a", "m"],
},
index=["q", "w", "e", "r", "t", "y", "u"],
),
pd.DataFrame({"a": [0, 1, 2, 3, 4], "b": ["a", "b", "u", "h", "d"]}),
pd.DataFrame(
{
"a": [None, None, np.nan, None],
"b": [np.nan, None, np.nan, None],
}
),
pd.DataFrame({"a": []}),
pd.DataFrame({"a": [np.nan], "b": [None]}),
pd.DataFrame({"a": ["a", "b", "c", None, "e"]}),
pd.DataFrame({"a": ["a", "b", "c", "d", "e"]}),
]
@pytest.mark.parametrize("rows", [0, 1, 2, 100])
def test_init_via_list_of_empty_tuples(rows):
data = [()] * rows
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(
pdf,
gdf,
check_like=True,
check_column_type=False,
check_index_type=False,
)
@pytest.mark.parametrize(
"dict_of_series",
[
{"a": pd.Series([1.0, 2.0, 3.0])},
{"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 4.0], index=[1, 2, 3]),
},
{"a": [1, 2, 3], "b": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=["a", "b", "c"]),
"b": pd.Series([1.0, 2.0, 4.0], index=["c", "d", "e"]),
},
{
"a": pd.Series(
["a", "b", "c"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
"b": pd.Series(
["a", " b", "d"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
},
],
)
def test_init_from_series_align(dict_of_series):
pdf = pd.DataFrame(dict_of_series)
gdf = cudf.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
for key in dict_of_series:
if isinstance(dict_of_series[key], pd.Series):
dict_of_series[key] = cudf.Series(dict_of_series[key])
gdf = cudf.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
("dict_of_series", "expectation"),
[
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 5, 6]),
},
pytest.raises(
ValueError, match="Cannot align indices with non-unique values"
),
),
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
},
does_not_raise(),
),
],
)
def test_init_from_series_align_nonunique(dict_of_series, expectation):
with expectation:
gdf = cudf.DataFrame(dict_of_series)
if expectation == does_not_raise():
pdf = pd.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
def test_init_unaligned_with_index():
pdf = pd.DataFrame(
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
gdf = cudf.DataFrame(
{
"a": cudf.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": cudf.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
assert_eq(pdf, gdf, check_dtype=False)
def test_series_basic():
# Make series from buffer
a1 = np.arange(10, dtype=np.float64)
series = cudf.Series(a1)
assert len(series) == 10
np.testing.assert_equal(series.to_array(), np.hstack([a1]))
def test_series_from_cupy_scalars():
data = [0.1, 0.2, 0.3]
data_np = np.array(data)
data_cp = cupy.array(data)
s_np = cudf.Series([data_np[0], data_np[2]])
s_cp = cudf.Series([data_cp[0], data_cp[2]])
assert_eq(s_np, s_cp)
@pytest.mark.parametrize("a", [[1, 2, 3], [1, 10, 30]])
@pytest.mark.parametrize("b", [[4, 5, 6], [-11, -100, 30]])
def test_append_index(a, b):
df = pd.DataFrame()
df["a"] = a
df["b"] = b
gdf = cudf.DataFrame()
gdf["a"] = a
gdf["b"] = b
# Check the default index after appending two columns(Series)
expected = df.a.append(df.b)
actual = gdf.a.append(gdf.b)
assert len(expected) == len(actual)
assert_eq(expected.index, actual.index)
expected = df.a.append(df.b, ignore_index=True)
actual = gdf.a.append(gdf.b, ignore_index=True)
assert len(expected) == len(actual)
assert_eq(expected.index, actual.index)
def test_series_init_none():
# test for creating empty series
# 1: without initializing
sr1 = cudf.Series()
got = sr1.to_string()
expect = "Series([], dtype: float64)"
# values should match despite whitespace difference
assert got.split() == expect.split()
# 2: Using `None` as an initializer
sr2 = cudf.Series(None)
got = sr2.to_string()
expect = "Series([], dtype: float64)"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_basic():
np.random.seed(0)
df = cudf.DataFrame()
# Populate with cuda memory
df["keys"] = np.arange(10, dtype=np.float64)
np.testing.assert_equal(df["keys"].to_array(), np.arange(10))
assert len(df) == 10
# Populate with numpy array
rnd_vals = np.random.random(10)
df["vals"] = rnd_vals
np.testing.assert_equal(df["vals"].to_array(), rnd_vals)
assert len(df) == 10
assert tuple(df.columns) == ("keys", "vals")
# Make another dataframe
df2 = cudf.DataFrame()
df2["keys"] = np.array([123], dtype=np.float64)
df2["vals"] = np.array([321], dtype=np.float64)
# Concat
df = cudf.concat([df, df2])
assert len(df) == 11
hkeys = np.asarray(np.arange(10, dtype=np.float64).tolist() + [123])
hvals = np.asarray(rnd_vals.tolist() + [321])
np.testing.assert_equal(df["keys"].to_array(), hkeys)
np.testing.assert_equal(df["vals"].to_array(), hvals)
# As matrix
mat = df.as_matrix()
expect = np.vstack([hkeys, hvals]).T
np.testing.assert_equal(mat, expect)
# test dataframe with tuple name
df_tup = cudf.DataFrame()
data = np.arange(10)
df_tup[(1, "foobar")] = data
np.testing.assert_equal(data, df_tup[(1, "foobar")].to_array())
df = cudf.DataFrame(pd.DataFrame({"a": [1, 2, 3], "c": ["a", "b", "c"]}))
pdf = pd.DataFrame(pd.DataFrame({"a": [1, 2, 3], "c": ["a", "b", "c"]}))
assert_eq(df, pdf)
gdf = cudf.DataFrame({"id": [0, 1], "val": [None, None]})
gdf["val"] = gdf["val"].astype("int")
assert gdf["val"].isnull().all()
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"columns", [["a"], ["b"], "a", "b", ["a", "b"]],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_columns(pdf, columns, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(columns=columns, inplace=inplace)
actual = gdf.drop(columns=columns, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"labels",
[[1], [0], 1, 5, [5, 9], pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_labels_axis_0(pdf, labels, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(labels=labels, axis=0, inplace=inplace)
actual = gdf.drop(labels=labels, axis=0, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"index",
[[1], [0], 1, 5, [5, 9], pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_index(pdf, index, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(index=index, inplace=inplace)
actual = gdf.drop(index=index, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5},
index=pd.MultiIndex(
levels=[
["lama", "cow", "falcon"],
["speed", "weight", "length"],
],
codes=[
[0, 0, 0, 1, 1, 1, 2, 2, 2, 1],
[0, 1, 2, 0, 1, 2, 0, 1, 2, 1],
],
),
)
],
)
@pytest.mark.parametrize(
"index,level",
[
("cow", 0),
("lama", 0),
("falcon", 0),
("speed", 1),
("weight", 1),
("length", 1),
pytest.param(
"cow",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
pytest.param(
"lama",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
pytest.param(
"falcon",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_multiindex(pdf, index, level, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(index=index, inplace=inplace, level=level)
actual = gdf.drop(index=index, inplace=inplace, level=level)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"labels", [["a"], ["b"], "a", "b", ["a", "b"]],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_labels_axis_1(pdf, labels, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(labels=labels, axis=1, inplace=inplace)
actual = gdf.drop(labels=labels, axis=1, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
def test_dataframe_drop_error():
df = cudf.DataFrame({"a": [1], "b": [2], "c": [3]})
pdf = df.to_pandas()
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": "d"}),
rfunc_args_and_kwargs=([], {"columns": "d"}),
expected_error_message="column 'd' does not exist",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": ["a", "d", "b"]}),
rfunc_args_and_kwargs=([], {"columns": ["a", "d", "b"]}),
expected_error_message="column 'd' does not exist",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=(["a"], {"columns": "a", "axis": 1}),
rfunc_args_and_kwargs=(["a"], {"columns": "a", "axis": 1}),
expected_error_message="Cannot specify both",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"axis": 1}),
rfunc_args_and_kwargs=([], {"axis": 1}),
expected_error_message="Need to specify at least",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([[2, 0]],),
rfunc_args_and_kwargs=([[2, 0]],),
expected_error_message="One or more values not found in axis",
)
def test_dataframe_drop_raises():
df = cudf.DataFrame(
{"a": [1, 2, 3], "c": [10, 20, 30]}, index=["x", "y", "z"]
)
pdf = df.to_pandas()
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=(["p"],),
rfunc_args_and_kwargs=(["p"],),
expected_error_message="One or more values not found in axis",
)
# label dtype mismatch
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([3],),
rfunc_args_and_kwargs=([3],),
expected_error_message="One or more values not found in axis",
)
expect = pdf.drop("p", errors="ignore")
actual = df.drop("p", errors="ignore")
assert_eq(actual, expect)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": "p"}),
rfunc_args_and_kwargs=([], {"columns": "p"}),
expected_error_message="column 'p' does not exist",
)
expect = pdf.drop(columns="p", errors="ignore")
actual = df.drop(columns="p", errors="ignore")
assert_eq(actual, expect)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"labels": "p", "axis": 1}),
rfunc_args_and_kwargs=([], {"labels": "p", "axis": 1}),
expected_error_message="column 'p' does not exist",
)
expect = pdf.drop(labels="p", axis=1, errors="ignore")
actual = df.drop(labels="p", axis=1, errors="ignore")
assert_eq(actual, expect)
def test_dataframe_column_add_drop_via_setitem():
df = cudf.DataFrame()
data = np.asarray(range(10))
df["a"] = data
df["b"] = data
assert tuple(df.columns) == ("a", "b")
del df["a"]
assert tuple(df.columns) == ("b",)
df["c"] = data
assert tuple(df.columns) == ("b", "c")
df["a"] = data
assert tuple(df.columns) == ("b", "c", "a")
def test_dataframe_column_set_via_attr():
data_0 = np.asarray([0, 2, 4, 5])
data_1 = np.asarray([1, 4, 2, 3])
data_2 = np.asarray([2, 0, 3, 0])
df = cudf.DataFrame({"a": data_0, "b": data_1, "c": data_2})
for i in range(10):
df.c = df.a
assert assert_eq(df.c, df.a, check_names=False)
assert tuple(df.columns) == ("a", "b", "c")
df.c = df.b
assert assert_eq(df.c, df.b, check_names=False)
assert tuple(df.columns) == ("a", "b", "c")
def test_dataframe_column_drop_via_attr():
df = cudf.DataFrame({"a": []})
with pytest.raises(AttributeError):
del df.a
assert tuple(df.columns) == tuple("a")
@pytest.mark.parametrize("axis", [0, "index"])
def test_dataframe_index_rename(axis):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.rename(mapper={1: 5, 2: 6}, axis=axis)
got = gdf.rename(mapper={1: 5, 2: 6}, axis=axis)
assert_eq(expect, got)
expect = pdf.rename(index={1: 5, 2: 6})
got = gdf.rename(index={1: 5, 2: 6})
assert_eq(expect, got)
expect = pdf.rename({1: 5, 2: 6})
got = gdf.rename({1: 5, 2: 6})
assert_eq(expect, got)
# `pandas` can support indexes with mixed values. We throw a
# `NotImplementedError`.
with pytest.raises(NotImplementedError):
gdf.rename(mapper={1: "x", 2: "y"}, axis=axis)
def test_dataframe_MI_rename():
gdf = cudf.DataFrame(
{"a": np.arange(10), "b": np.arange(10), "c": np.arange(10)}
)
gdg = gdf.groupby(["a", "b"]).count()
pdg = gdg.to_pandas()
expect = pdg.rename(mapper={1: 5, 2: 6}, axis=0)
got = gdg.rename(mapper={1: 5, 2: 6}, axis=0)
assert_eq(expect, got)
@pytest.mark.parametrize("axis", [1, "columns"])
def test_dataframe_column_rename(axis):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.rename(mapper=lambda name: 2 * name, axis=axis)
got = gdf.rename(mapper=lambda name: 2 * name, axis=axis)
assert_eq(expect, got)
expect = pdf.rename(columns=lambda name: 2 * name)
got = gdf.rename(columns=lambda name: 2 * name)
assert_eq(expect, got)
rename_mapper = {"a": "z", "b": "y", "c": "x"}
expect = pdf.rename(columns=rename_mapper)
got = gdf.rename(columns=rename_mapper)
assert_eq(expect, got)
def test_dataframe_pop():
pdf = pd.DataFrame(
{"a": [1, 2, 3], "b": ["x", "y", "z"], "c": [7.0, 8.0, 9.0]}
)
gdf = cudf.DataFrame.from_pandas(pdf)
# Test non-existing column error
with pytest.raises(KeyError) as raises:
gdf.pop("fake_colname")
raises.match("fake_colname")
# check pop numeric column
pdf_pop = pdf.pop("a")
gdf_pop = gdf.pop("a")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check string column
pdf_pop = pdf.pop("b")
gdf_pop = gdf.pop("b")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check float column and empty dataframe
pdf_pop = pdf.pop("c")
gdf_pop = gdf.pop("c")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check empty dataframe edge case
empty_pdf = pd.DataFrame(columns=["a", "b"])
empty_gdf = cudf.DataFrame(columns=["a", "b"])
pb = empty_pdf.pop("b")
gb = empty_gdf.pop("b")
assert len(pb) == len(gb)
assert empty_pdf.empty and empty_gdf.empty
@pytest.mark.parametrize("nelem", [0, 3, 100, 1000])
def test_dataframe_astype(nelem):
df = cudf.DataFrame()
data = np.asarray(range(nelem), dtype=np.int32)
df["a"] = data
assert df["a"].dtype is np.dtype(np.int32)
df["b"] = df["a"].astype(np.float32)
assert df["b"].dtype is np.dtype(np.float32)
np.testing.assert_equal(df["a"].to_array(), df["b"].to_array())
@pytest.mark.parametrize("nelem", [0, 100])
def test_index_astype(nelem):
df = cudf.DataFrame()
data = np.asarray(range(nelem), dtype=np.int32)
df["a"] = data
assert df.index.dtype is np.dtype(np.int64)
df.index = df.index.astype(np.float32)
assert df.index.dtype is np.dtype(np.float32)
df["a"] = df["a"].astype(np.float32)
np.testing.assert_equal(df.index.to_array(), df["a"].to_array())
df["b"] = df["a"]
df = df.set_index("b")
df["a"] = df["a"].astype(np.int16)
df.index = df.index.astype(np.int16)
np.testing.assert_equal(df.index.to_array(), df["a"].to_array())
def test_dataframe_to_string():
pd.options.display.max_rows = 5
pd.options.display.max_columns = 8
# Test basic
df = cudf.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [11, 12, 13, 14, 15, 16]}
)
string = str(df)
assert string.splitlines()[-1] == "[6 rows x 2 columns]"
# Test skipped columns
df = cudf.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6],
"b": [11, 12, 13, 14, 15, 16],
"c": [11, 12, 13, 14, 15, 16],
"d": [11, 12, 13, 14, 15, 16],
}
)
string = df.to_string()
assert string.splitlines()[-1] == "[6 rows x 4 columns]"
# Test masked
df = cudf.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [11, 12, 13, 14, 15, 16]}
)
data = np.arange(6)
mask = np.zeros(1, dtype=cudf.utils.utils.mask_dtype)
mask[0] = 0b00101101
masked = cudf.Series.from_masked_array(data, mask)
assert masked.null_count == 2
df["c"] = masked
# check data
values = masked.copy()
validids = [0, 2, 3, 5]
densearray = masked.to_array()
np.testing.assert_equal(data[validids], densearray)
# valid position is corret
for i in validids:
assert data[i] == values[i]
# null position is correct
for i in range(len(values)):
if i not in validids:
assert values[i] is cudf.NA
pd.options.display.max_rows = 10
got = df.to_string()
expect = """
a b c
0 1 11 0
1 2 12 <NA>
2 3 13 2
3 4 14 3
4 5 15 <NA>
5 6 16 5
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_to_string_wide(monkeypatch):
monkeypatch.setenv("COLUMNS", "79")
# Test basic
df = cudf.DataFrame()
for i in range(100):
df["a{}".format(i)] = list(range(3))
pd.options.display.max_columns = 0
got = df.to_string()
expect = """
a0 a1 a2 a3 a4 a5 a6 a7 ... a92 a93 a94 a95 a96 a97 a98 a99
0 0 0 0 0 0 0 0 0 ... 0 0 0 0 0 0 0 0
1 1 1 1 1 1 1 1 1 ... 1 1 1 1 1 1 1 1
2 2 2 2 2 2 2 2 2 ... 2 2 2 2 2 2 2 2
[3 rows x 100 columns]
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_empty_to_string():
# Test for printing empty dataframe
df = cudf.DataFrame()
got = df.to_string()
expect = "Empty DataFrame\nColumns: []\nIndex: []\n"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_emptycolumns_to_string():
# Test for printing dataframe having empty columns
df = cudf.DataFrame()
df["a"] = []
df["b"] = []
got = df.to_string()
expect = "Empty DataFrame\nColumns: [a, b]\nIndex: []\n"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_copy():
# Test for copying the dataframe using python copy pkg
df = cudf.DataFrame()
df["a"] = [1, 2, 3]
df2 = copy(df)
df2["b"] = [4, 5, 6]
got = df.to_string()
expect = """
a
0 1
1 2
2 3
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_copy_shallow():
# Test for copy dataframe using class method
df = cudf.DataFrame()
df["a"] = [1, 2, 3]
df2 = df.copy()
df2["b"] = [4, 2, 3]
got = df.to_string()
expect = """
a
0 1
1 2
2 3
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_dtypes():
dtypes = pd.Series(
[np.int32, np.float32, np.float64], index=["c", "a", "b"]
)
df = cudf.DataFrame(
{k: np.ones(10, dtype=v) for k, v in dtypes.iteritems()}
)
assert df.dtypes.equals(dtypes)
def test_dataframe_add_col_to_object_dataframe():
# Test for adding column to an empty object dataframe
cols = ["a", "b", "c"]
df = pd.DataFrame(columns=cols, dtype="str")
data = {k: v for (k, v) in zip(cols, [["a"] for _ in cols])}
gdf = cudf.DataFrame(data)
gdf = gdf[:0]
assert gdf.dtypes.equals(df.dtypes)
gdf["a"] = [1]
df["a"] = [10]
assert gdf.dtypes.equals(df.dtypes)
gdf["b"] = [1.0]
df["b"] = [10.0]
assert gdf.dtypes.equals(df.dtypes)
def test_dataframe_dir_and_getattr():
df = cudf.DataFrame(
{
"a": np.ones(10),
"b": np.ones(10),
"not an id": np.ones(10),
"oop$": np.ones(10),
}
)
o = dir(df)
assert {"a", "b"}.issubset(o)
assert "not an id" not in o
assert "oop$" not in o
# Getattr works
assert df.a.equals(df["a"])
assert df.b.equals(df["b"])
with pytest.raises(AttributeError):
df.not_a_column
@pytest.mark.parametrize("order", ["C", "F"])
def test_empty_dataframe_as_gpu_matrix(order):
df = cudf.DataFrame()
# Check fully empty dataframe.
mat = df.as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (0, 0)
df = cudf.DataFrame()
nelem = 123
for k in "abc":
df[k] = np.random.random(nelem)
# Check all columns in empty dataframe.
mat = df.head(0).as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (0, 3)
@pytest.mark.parametrize("order", ["C", "F"])
def test_dataframe_as_gpu_matrix(order):
df = cudf.DataFrame()
nelem = 123
for k in "abcd":
df[k] = np.random.random(nelem)
# Check all columns
mat = df.as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (nelem, 4)
for i, k in enumerate(df.columns):
np.testing.assert_array_equal(df[k].to_array(), mat[:, i])
# Check column subset
mat = df.as_gpu_matrix(order=order, columns=["a", "c"]).copy_to_host()
assert mat.shape == (nelem, 2)
for i, k in enumerate("ac"):
np.testing.assert_array_equal(df[k].to_array(), mat[:, i])
def test_dataframe_as_gpu_matrix_null_values():
df = cudf.DataFrame()
nelem = 123
na = -10000
refvalues = {}
for k in "abcd":
df[k] = data = np.random.random(nelem)
bitmask = utils.random_bitmask(nelem)
df[k] = df[k].set_mask(bitmask)
boolmask = np.asarray(
utils.expand_bits_to_bytes(bitmask)[:nelem], dtype=np.bool_
)
data[~boolmask] = na
refvalues[k] = data
# Check null value causes error
with pytest.raises(ValueError) as raises:
df.as_gpu_matrix()
raises.match("column 'a' has null values")
for k in df.columns:
df[k] = df[k].fillna(na)
mat = df.as_gpu_matrix().copy_to_host()
for i, k in enumerate(df.columns):
np.testing.assert_array_equal(refvalues[k], mat[:, i])
def test_dataframe_append_empty():
pdf = pd.DataFrame(
{
"key": [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4],
"value": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
}
)
gdf = cudf.DataFrame.from_pandas(pdf)
gdf["newcol"] = 100
pdf["newcol"] = 100
assert len(gdf["newcol"]) == len(pdf)
assert len(pdf["newcol"]) == len(pdf)
assert_eq(gdf, pdf)
def test_dataframe_setitem_from_masked_object():
ary = np.random.randn(100)
mask = np.zeros(100, dtype=bool)
mask[:20] = True
np.random.shuffle(mask)
ary[mask] = np.nan
test1_null = cudf.Series(ary, nan_as_null=True)
assert test1_null.nullable
assert test1_null.null_count == 20
test1_nan = cudf.Series(ary, nan_as_null=False)
assert test1_nan.null_count == 0
test2_null = cudf.DataFrame.from_pandas(
pd.DataFrame({"a": ary}), nan_as_null=True
)
assert test2_null["a"].nullable
assert test2_null["a"].null_count == 20
test2_nan = cudf.DataFrame.from_pandas(
pd.DataFrame({"a": ary}), nan_as_null=False
)
assert test2_nan["a"].null_count == 0
gpu_ary = cupy.asarray(ary)
test3_null = cudf.Series(gpu_ary, nan_as_null=True)
assert test3_null.nullable
assert test3_null.null_count == 20
test3_nan = cudf.Series(gpu_ary, nan_as_null=False)
assert test3_nan.null_count == 0
test4 = cudf.DataFrame()
lst = [1, 2, None, 4, 5, 6, None, 8, 9]
test4["lst"] = lst
assert test4["lst"].nullable
assert test4["lst"].null_count == 2
def test_dataframe_append_to_empty():
pdf = pd.DataFrame()
pdf["a"] = []
pdf["b"] = [1, 2, 3]
gdf = cudf.DataFrame()
gdf["a"] = []
gdf["b"] = [1, 2, 3]
assert_eq(gdf, pdf)
def test_dataframe_setitem_index_len1():
gdf = cudf.DataFrame()
gdf["a"] = [1]
gdf["b"] = gdf.index._values
np.testing.assert_equal(gdf.b.to_array(), [0])
def test_empty_dataframe_setitem_df():
gdf1 = cudf.DataFrame()
gdf2 = cudf.DataFrame({"a": [1, 2, 3, 4, 5]})
gdf1["a"] = gdf2["a"]
assert_eq(gdf1, gdf2)
def test_assign():
gdf = cudf.DataFrame({"x": [1, 2, 3]})
gdf2 = gdf.assign(y=gdf.x + 1)
assert list(gdf.columns) == ["x"]
assert list(gdf2.columns) == ["x", "y"]
np.testing.assert_equal(gdf2.y.to_array(), [2, 3, 4])
@pytest.mark.parametrize("nrows", [1, 8, 100, 1000])
def test_dataframe_hash_columns(nrows):
gdf = cudf.DataFrame()
data = np.asarray(range(nrows))
data[0] = data[-1] # make first and last the same
gdf["a"] = data
gdf["b"] = gdf.a + 100
out = gdf.hash_columns(["a", "b"])
assert isinstance(out, cupy.ndarray)
assert len(out) == nrows
assert out.dtype == np.int32
# Check default
out_all = gdf.hash_columns()
np.testing.assert_array_equal(cupy.asnumpy(out), cupy.asnumpy(out_all))
# Check single column
out_one = cupy.asnumpy(gdf.hash_columns(["a"]))
# First matches last
assert out_one[0] == out_one[-1]
# Equivalent to the cudf.Series.hash_values()
np.testing.assert_array_equal(cupy.asnumpy(gdf.a.hash_values()), out_one)
@pytest.mark.parametrize("nrows", [3, 10, 100, 1000])
@pytest.mark.parametrize("nparts", [1, 2, 8, 13])
@pytest.mark.parametrize("nkeys", [1, 2])
def test_dataframe_hash_partition(nrows, nparts, nkeys):
np.random.seed(123)
gdf = cudf.DataFrame()
keycols = []
for i in range(nkeys):
keyname = "key{}".format(i)
gdf[keyname] = np.random.randint(0, 7 - i, nrows)
keycols.append(keyname)
gdf["val1"] = np.random.randint(0, nrows * 2, nrows)
got = gdf.partition_by_hash(keycols, nparts=nparts)
# Must return a list
assert isinstance(got, list)
# Must have correct number of partitions
assert len(got) == nparts
# All partitions must be DataFrame type
assert all(isinstance(p, cudf.DataFrame) for p in got)
# Check that all partitions have unique keys
part_unique_keys = set()
for p in got:
if len(p):
# Take rows of the keycolumns and build a set of the key-values
unique_keys = set(map(tuple, p.as_matrix(columns=keycols)))
# Ensure that none of the key-values have occurred in other groups
assert not (unique_keys & part_unique_keys)
part_unique_keys |= unique_keys
assert len(part_unique_keys)
@pytest.mark.parametrize("nrows", [3, 10, 50])
def test_dataframe_hash_partition_masked_value(nrows):
gdf = cudf.DataFrame()
gdf["key"] = np.arange(nrows)
gdf["val"] = np.arange(nrows) + 100
bitmask = utils.random_bitmask(nrows)
bytemask = utils.expand_bits_to_bytes(bitmask)
gdf["val"] = gdf["val"].set_mask(bitmask)
parted = gdf.partition_by_hash(["key"], nparts=3)
# Verify that the valid mask is correct
for p in parted:
df = p.to_pandas()
for row in df.itertuples():
valid = bool(bytemask[row.key])
expected_value = row.key + 100 if valid else np.nan
got_value = row.val
assert (expected_value == got_value) or (
np.isnan(expected_value) and np.isnan(got_value)
)
@pytest.mark.parametrize("nrows", [3, 10, 50])
def test_dataframe_hash_partition_masked_keys(nrows):
gdf = cudf.DataFrame()
gdf["key"] = np.arange(nrows)
gdf["val"] = np.arange(nrows) + 100
bitmask = utils.random_bitmask(nrows)
bytemask = utils.expand_bits_to_bytes(bitmask)
gdf["key"] = gdf["key"].set_mask(bitmask)
parted = gdf.partition_by_hash(["key"], nparts=3, keep_index=False)
# Verify that the valid mask is correct
for p in parted:
df = p.to_pandas()
for row in df.itertuples():
valid = bool(bytemask[row.val - 100])
# val is key + 100
expected_value = row.val - 100 if valid else np.nan
got_value = row.key
assert (expected_value == got_value) or (
np.isnan(expected_value) and np.isnan(got_value)
)
@pytest.mark.parametrize("keep_index", [True, False])
def test_dataframe_hash_partition_keep_index(keep_index):
gdf = cudf.DataFrame(
{"val": [1, 2, 3, 4], "key": [3, 2, 1, 4]}, index=[4, 3, 2, 1]
)
expected_df1 = cudf.DataFrame(
{"val": [1], "key": [3]}, index=[4] if keep_index else None
)
expected_df2 = cudf.DataFrame(
{"val": [2, 3, 4], "key": [2, 1, 4]},
index=[3, 2, 1] if keep_index else range(1, 4),
)
expected = [expected_df1, expected_df2]
parts = gdf.partition_by_hash(["key"], nparts=2, keep_index=keep_index)
for exp, got in zip(expected, parts):
assert_eq(exp, got)
def test_dataframe_hash_partition_empty():
gdf = cudf.DataFrame({"val": [1, 2], "key": [3, 2]}, index=["a", "b"])
parts = gdf.iloc[:0].partition_by_hash(["key"], nparts=3)
assert len(parts) == 3
for part in parts:
assert_eq(gdf.iloc[:0], part)
@pytest.mark.parametrize("dtype1", utils.supported_numpy_dtypes)
@pytest.mark.parametrize("dtype2", utils.supported_numpy_dtypes)
def test_dataframe_concat_different_numerical_columns(dtype1, dtype2):
df1 = pd.DataFrame(dict(x=pd.Series(np.arange(5)).astype(dtype1)))
df2 = pd.DataFrame(dict(x=pd.Series(np.arange(5)).astype(dtype2)))
if dtype1 != dtype2 and "datetime" in dtype1 or "datetime" in dtype2:
with pytest.raises(TypeError):
cudf.concat([df1, df2])
else:
pres = pd.concat([df1, df2])
gres = cudf.concat([cudf.from_pandas(df1), cudf.from_pandas(df2)])
assert_eq(cudf.from_pandas(pres), gres)
def test_dataframe_concat_different_column_types():
df1 = cudf.Series([42], dtype=np.float64)
df2 = cudf.Series(["a"], dtype="category")
with pytest.raises(ValueError):
cudf.concat([df1, df2])
df2 = cudf.Series(["a string"])
with pytest.raises(TypeError):
cudf.concat([df1, df2])
@pytest.mark.parametrize(
"df_1", [cudf.DataFrame({"a": [1, 2], "b": [1, 3]}), cudf.DataFrame({})]
)
@pytest.mark.parametrize(
"df_2", [cudf.DataFrame({"a": [], "b": []}), cudf.DataFrame({})]
)
def test_concat_empty_dataframe(df_1, df_2):
got = cudf.concat([df_1, df_2])
expect = pd.concat([df_1.to_pandas(), df_2.to_pandas()], sort=False)
# ignoring dtypes as pandas upcasts int to float
# on concatenation with empty dataframes
assert_eq(got, expect, check_dtype=False)
@pytest.mark.parametrize(
"df1_d",
[
{"a": [1, 2], "b": [1, 2], "c": ["s1", "s2"], "d": [1.0, 2.0]},
{"b": [1.9, 10.9], "c": ["s1", "s2"]},
{"c": ["s1"], "b": [None], "a": [False]},
],
)
@pytest.mark.parametrize(
"df2_d",
[
{"a": [1, 2, 3]},
{"a": [1, None, 3], "b": [True, True, False], "c": ["s3", None, "s4"]},
{"a": [], "b": []},
{},
],
)
def test_concat_different_column_dataframe(df1_d, df2_d):
got = cudf.concat(
[cudf.DataFrame(df1_d), cudf.DataFrame(df2_d), cudf.DataFrame(df1_d)],
sort=False,
)
expect = pd.concat(
[pd.DataFrame(df1_d), pd.DataFrame(df2_d), pd.DataFrame(df1_d)],
sort=False,
)
# numerical columns are upcasted to float in cudf.DataFrame.to_pandas()
# casts nan to 0 in non-float numerical columns
numeric_cols = got.dtypes[got.dtypes != "object"].index
for col in numeric_cols:
got[col] = got[col].astype(np.float64).fillna(np.nan)
assert_eq(got, expect, check_dtype=False)
@pytest.mark.parametrize(
"ser_1", [pd.Series([1, 2, 3]), pd.Series([], dtype="float64")]
)
@pytest.mark.parametrize("ser_2", [pd.Series([], dtype="float64")])
def test_concat_empty_series(ser_1, ser_2):
got = cudf.concat([cudf.Series(ser_1), cudf.Series(ser_2)])
expect = pd.concat([ser_1, ser_2])
assert_eq(got, expect)
def test_concat_with_axis():
df1 = pd.DataFrame(dict(x=np.arange(5), y=np.arange(5)))
df2 = pd.DataFrame(dict(a=np.arange(5), b=np.arange(5)))
concat_df = pd.concat([df1, df2], axis=1)
cdf1 = cudf.from_pandas(df1)
cdf2 = cudf.from_pandas(df2)
# concat only dataframes
concat_cdf = cudf.concat([cdf1, cdf2], axis=1)
assert_eq(concat_cdf, concat_df)
# concat only series
concat_s = pd.concat([df1.x, df1.y], axis=1)
cs1 = cudf.Series.from_pandas(df1.x)
cs2 = cudf.Series.from_pandas(df1.y)
concat_cdf_s = cudf.concat([cs1, cs2], axis=1)
assert_eq(concat_cdf_s, concat_s)
# concat series and dataframes
s3 = pd.Series(np.random.random(5))
cs3 = cudf.Series.from_pandas(s3)
concat_cdf_all = cudf.concat([cdf1, cs3, cdf2], axis=1)
concat_df_all = pd.concat([df1, s3, df2], axis=1)
assert_eq(concat_cdf_all, concat_df_all)
# concat manual multi index
midf1 = cudf.from_pandas(df1)
midf1.index = cudf.MultiIndex(
levels=[[0, 1, 2, 3], [0, 1]], codes=[[0, 1, 2, 3, 2], [0, 1, 0, 1, 0]]
)
midf2 = midf1[2:]
midf2.index = cudf.MultiIndex(
levels=[[3, 4, 5], [2, 0]], codes=[[0, 1, 2], [1, 0, 1]]
)
mipdf1 = midf1.to_pandas()
mipdf2 = midf2.to_pandas()
assert_eq(cudf.concat([midf1, midf2]), pd.concat([mipdf1, mipdf2]))
assert_eq(cudf.concat([midf2, midf1]), pd.concat([mipdf2, mipdf1]))
assert_eq(
cudf.concat([midf1, midf2, midf1]), pd.concat([mipdf1, mipdf2, mipdf1])
)
# concat groupby multi index
gdf1 = cudf.DataFrame(
{
"x": np.random.randint(0, 10, 10),
"y": np.random.randint(0, 10, 10),
"z": np.random.randint(0, 10, 10),
"v": np.random.randint(0, 10, 10),
}
)
gdf2 = gdf1[5:]
gdg1 = gdf1.groupby(["x", "y"]).min()
gdg2 = gdf2.groupby(["x", "y"]).min()
pdg1 = gdg1.to_pandas()
pdg2 = gdg2.to_pandas()
assert_eq(cudf.concat([gdg1, gdg2]), pd.concat([pdg1, pdg2]))
assert_eq(cudf.concat([gdg2, gdg1]), pd.concat([pdg2, pdg1]))
# series multi index concat
gdgz1 = gdg1.z
gdgz2 = gdg2.z
pdgz1 = gdgz1.to_pandas()
pdgz2 = gdgz2.to_pandas()
assert_eq(cudf.concat([gdgz1, gdgz2]), pd.concat([pdgz1, pdgz2]))
assert_eq(cudf.concat([gdgz2, gdgz1]), pd.concat([pdgz2, pdgz1]))
@pytest.mark.parametrize("nrows", [0, 3, 10, 100, 1000])
def test_nonmatching_index_setitem(nrows):
np.random.seed(0)
gdf = cudf.DataFrame()
gdf["a"] = np.random.randint(2147483647, size=nrows)
gdf["b"] = np.random.randint(2147483647, size=nrows)
gdf = gdf.set_index("b")
test_values = np.random.randint(2147483647, size=nrows)
gdf["c"] = test_values
assert len(test_values) == len(gdf["c"])
assert (
gdf["c"]
.to_pandas()
.equals(cudf.Series(test_values).set_index(gdf._index).to_pandas())
)
def test_from_pandas():
df = pd.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
gdf = cudf.DataFrame.from_pandas(df)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
s = df.x
gs = cudf.Series.from_pandas(s)
assert isinstance(gs, cudf.Series)
assert_eq(s, gs)
@pytest.mark.parametrize("dtypes", [int, float])
def test_from_records(dtypes):
h_ary = np.ndarray(shape=(10, 4), dtype=dtypes)
rec_ary = h_ary.view(np.recarray)
gdf = cudf.DataFrame.from_records(rec_ary, columns=["a", "b", "c", "d"])
df = pd.DataFrame.from_records(rec_ary, columns=["a", "b", "c", "d"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame.from_records(rec_ary)
df = pd.DataFrame.from_records(rec_ary)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
@pytest.mark.parametrize("columns", [None, ["first", "second", "third"]])
@pytest.mark.parametrize(
"index",
[
None,
["first", "second"],
"name",
"age",
"weight",
[10, 11],
["abc", "xyz"],
],
)
def test_from_records_index(columns, index):
rec_ary = np.array(
[("Rex", 9, 81.0), ("Fido", 3, 27.0)],
dtype=[("name", "U10"), ("age", "i4"), ("weight", "f4")],
)
gdf = cudf.DataFrame.from_records(rec_ary, columns=columns, index=index)
df = pd.DataFrame.from_records(rec_ary, columns=columns, index=index)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
def test_dataframe_construction_from_cupy_arrays():
h_ary = np.array([[1, 2, 3], [4, 5, 6]], np.int32)
d_ary = cupy.asarray(h_ary)
gdf = cudf.DataFrame(d_ary, columns=["a", "b", "c"])
df = pd.DataFrame(h_ary, columns=["a", "b", "c"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
df = pd.DataFrame(h_ary)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary, index=["a", "b"])
df = pd.DataFrame(h_ary, index=["a", "b"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
gdf = gdf.set_index(keys=0, drop=False)
df = pd.DataFrame(h_ary)
df = df.set_index(keys=0, drop=False)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
gdf = gdf.set_index(keys=1, drop=False)
df = pd.DataFrame(h_ary)
df = df.set_index(keys=1, drop=False)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
def test_dataframe_cupy_wrong_dimensions():
d_ary = cupy.empty((2, 3, 4), dtype=np.int32)
with pytest.raises(
ValueError, match="records dimension expected 1 or 2 but found: 3"
):
cudf.DataFrame(d_ary)
def test_dataframe_cupy_array_wrong_index():
d_ary = cupy.empty((2, 3), dtype=np.int32)
with pytest.raises(
ValueError,
match="Length mismatch: Expected axis has 2 elements, "
"new values have 1 elements",
):
cudf.DataFrame(d_ary, index=["a"])
with pytest.raises(
ValueError,
match="Length mismatch: Expected axis has 2 elements, "
"new values have 1 elements",
):
cudf.DataFrame(d_ary, index="a")
def test_index_in_dataframe_constructor():
a = pd.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
b = cudf.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
assert_eq(a, b)
assert_eq(a.loc[4:], b.loc[4:])
dtypes = NUMERIC_TYPES + DATETIME_TYPES + ["bool"]
@pytest.mark.parametrize("nelem", [0, 2, 3, 100, 1000])
@pytest.mark.parametrize("data_type", dtypes)
def test_from_arrow(nelem, data_type):
df = pd.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
padf = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
gdf = cudf.DataFrame.from_arrow(padf)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
s = pa.Array.from_pandas(df.a)
gs = cudf.Series.from_arrow(s)
assert isinstance(gs, cudf.Series)
# For some reason PyArrow to_pandas() converts to numpy array and has
# better type compatibility
np.testing.assert_array_equal(s.to_pandas(), gs.to_array())
@pytest.mark.parametrize("nelem", [0, 2, 3, 100, 1000])
@pytest.mark.parametrize("data_type", dtypes)
def test_to_arrow(nelem, data_type):
df = pd.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
gdf = cudf.DataFrame.from_pandas(df)
pa_df = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
pa_gdf = gdf.to_arrow(preserve_index=False).replace_schema_metadata(None)
assert isinstance(pa_gdf, pa.Table)
assert pa.Table.equals(pa_df, pa_gdf)
pa_s = pa.Array.from_pandas(df.a)
pa_gs = gdf["a"].to_arrow()
assert isinstance(pa_gs, pa.Array)
assert pa.Array.equals(pa_s, pa_gs)
pa_i = pa.Array.from_pandas(df.index)
pa_gi = gdf.index.to_arrow()
assert isinstance(pa_gi, pa.Array)
assert pa.Array.equals(pa_i, pa_gi)
@pytest.mark.parametrize("data_type", dtypes)
def test_to_from_arrow_nulls(data_type):
if data_type == "longlong":
data_type = "int64"
if data_type == "bool":
s1 = pa.array([True, None, False, None, True], type=data_type)
else:
dtype = np.dtype(data_type)
if dtype.type == np.datetime64:
time_unit, _ = np.datetime_data(dtype)
data_type = pa.timestamp(unit=time_unit)
s1 = pa.array([1, None, 3, None, 5], type=data_type)
gs1 = cudf.Series.from_arrow(s1)
assert isinstance(gs1, cudf.Series)
# We have 64B padded buffers for nulls whereas Arrow returns a minimal
# number of bytes, so only check the first byte in this case
np.testing.assert_array_equal(
np.asarray(s1.buffers()[0]).view("u1")[0],
gs1._column.mask_array_view.copy_to_host().view("u1")[0],
)
assert pa.Array.equals(s1, gs1.to_arrow())
s2 = pa.array([None, None, None, None, None], type=data_type)
gs2 = cudf.Series.from_arrow(s2)
assert isinstance(gs2, cudf.Series)
# We have 64B padded buffers for nulls whereas Arrow returns a minimal
# number of bytes, so only check the first byte in this case
np.testing.assert_array_equal(
np.asarray(s2.buffers()[0]).view("u1")[0],
gs2._column.mask_array_view.copy_to_host().view("u1")[0],
)
assert pa.Array.equals(s2, gs2.to_arrow())
def test_to_arrow_categorical():
df = pd.DataFrame()
df["a"] = pd.Series(["a", "b", "c"], dtype="category")
gdf = cudf.DataFrame.from_pandas(df)
pa_df = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
pa_gdf = gdf.to_arrow(preserve_index=False).replace_schema_metadata(None)
assert isinstance(pa_gdf, pa.Table)
assert pa.Table.equals(pa_df, pa_gdf)
pa_s = pa.Array.from_pandas(df.a)
pa_gs = gdf["a"].to_arrow()
assert isinstance(pa_gs, pa.Array)
assert pa.Array.equals(pa_s, pa_gs)
def test_from_arrow_missing_categorical():
pd_cat = pd.Categorical(["a", "b", "c"], categories=["a", "b"])
pa_cat = pa.array(pd_cat, from_pandas=True)
gd_cat = cudf.Series(pa_cat)
assert isinstance(gd_cat, cudf.Series)
assert_eq(
pd.Series(pa_cat.to_pandas()), # PyArrow returns a pd.Categorical
gd_cat.to_pandas(),
)
def test_to_arrow_missing_categorical():
pd_cat = pd.Categorical(["a", "b", "c"], categories=["a", "b"])
pa_cat = pa.array(pd_cat, from_pandas=True)
gd_cat = cudf.Series(pa_cat)
assert isinstance(gd_cat, cudf.Series)
assert pa.Array.equals(pa_cat, gd_cat.to_arrow())
@pytest.mark.parametrize("data_type", dtypes)
def test_from_scalar_typing(data_type):
if data_type == "datetime64[ms]":
scalar = (
np.dtype("int64")
.type(np.random.randint(0, 5))
.astype("datetime64[ms]")
)
elif data_type.startswith("datetime64"):
scalar = np.datetime64(datetime.date.today()).astype("datetime64[ms]")
data_type = "datetime64[ms]"
else:
scalar = np.dtype(data_type).type(np.random.randint(0, 5))
gdf = cudf.DataFrame()
gdf["a"] = [1, 2, 3, 4, 5]
gdf["b"] = scalar
assert gdf["b"].dtype == np.dtype(data_type)
assert len(gdf["b"]) == len(gdf["a"])
@pytest.mark.parametrize("data_type", NUMERIC_TYPES)
def test_from_python_array(data_type):
np_arr = np.random.randint(0, 100, 10).astype(data_type)
data = memoryview(np_arr)
data = arr.array(data.format, data)
gs = cudf.Series(data)
np.testing.assert_equal(gs.to_array(), np_arr)
def test_series_shape():
ps = pd.Series([1, 2, 3, 4])
cs = cudf.Series([1, 2, 3, 4])
assert ps.shape == cs.shape
def test_series_shape_empty():
ps = pd.Series(dtype="float64")
cs = cudf.Series([])
assert ps.shape == cs.shape
def test_dataframe_shape():
pdf = pd.DataFrame({"a": [0, 1, 2, 3], "b": [0.1, 0.2, None, 0.3]})
gdf = cudf.DataFrame.from_pandas(pdf)
assert pdf.shape == gdf.shape
def test_dataframe_shape_empty():
pdf = pd.DataFrame()
gdf = cudf.DataFrame()
assert pdf.shape == gdf.shape
@pytest.mark.parametrize("num_cols", [1, 2, 10])
@pytest.mark.parametrize("num_rows", [1, 2, 20])
@pytest.mark.parametrize("dtype", dtypes)
@pytest.mark.parametrize("nulls", ["none", "some", "all"])
def test_dataframe_transpose(nulls, num_cols, num_rows, dtype):
pdf = pd.DataFrame()
null_rep = np.nan if dtype in ["float32", "float64"] else None
for i in range(num_cols):
colname = string.ascii_lowercase[i]
data = pd.Series(np.random.randint(0, 26, num_rows).astype(dtype))
if nulls == "some":
idx = np.random.choice(
num_rows, size=int(num_rows / 2), replace=False
)
data[idx] = null_rep
elif nulls == "all":
data[:] = null_rep
pdf[colname] = data
gdf = cudf.DataFrame.from_pandas(pdf)
got_function = gdf.transpose()
got_property = gdf.T
expect = pdf.transpose()
assert_eq(expect, got_function)
assert_eq(expect, got_property)
@pytest.mark.parametrize("num_cols", [1, 2, 10])
@pytest.mark.parametrize("num_rows", [1, 2, 20])
def test_dataframe_transpose_category(num_cols, num_rows):
pdf = pd.DataFrame()
for i in range(num_cols):
colname = string.ascii_lowercase[i]
data = pd.Series(list(string.ascii_lowercase), dtype="category")
data = data.sample(num_rows, replace=True).reset_index(drop=True)
pdf[colname] = data
gdf = cudf.DataFrame.from_pandas(pdf)
got_function = gdf.transpose()
got_property = gdf.T
expect = pdf.transpose()
assert_eq(expect, got_function.to_pandas())
assert_eq(expect, got_property.to_pandas())
def test_generated_column():
gdf = cudf.DataFrame({"a": (i for i in range(5))})
assert len(gdf) == 5
@pytest.fixture
def pdf():
return pd.DataFrame({"x": range(10), "y": range(10)})
@pytest.fixture
def gdf(pdf):
return cudf.DataFrame.from_pandas(pdf)
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize(
"func",
[
lambda df, **kwargs: df.min(**kwargs),
lambda df, **kwargs: df.max(**kwargs),
lambda df, **kwargs: df.sum(**kwargs),
lambda df, **kwargs: df.product(**kwargs),
lambda df, **kwargs: df.cummin(**kwargs),
lambda df, **kwargs: df.cummax(**kwargs),
lambda df, **kwargs: df.cumsum(**kwargs),
lambda df, **kwargs: df.cumprod(**kwargs),
lambda df, **kwargs: df.mean(**kwargs),
lambda df, **kwargs: df.sum(**kwargs),
lambda df, **kwargs: df.max(**kwargs),
lambda df, **kwargs: df.std(ddof=1, **kwargs),
lambda df, **kwargs: df.var(ddof=1, **kwargs),
lambda df, **kwargs: df.std(ddof=2, **kwargs),
lambda df, **kwargs: df.var(ddof=2, **kwargs),
lambda df, **kwargs: df.kurt(**kwargs),
lambda df, **kwargs: df.skew(**kwargs),
lambda df, **kwargs: df.all(**kwargs),
lambda df, **kwargs: df.any(**kwargs),
],
)
@pytest.mark.parametrize("skipna", [True, False, None])
def test_dataframe_reductions(data, func, skipna):
pdf = pd.DataFrame(data=data)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(func(pdf, skipna=skipna), func(gdf, skipna=skipna))
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize("func", [lambda df: df.count()])
def test_dataframe_count_reduction(data, func):
pdf = pd.DataFrame(data=data)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(func(pdf), func(gdf))
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize("ops", ["sum", "product", "prod"])
@pytest.mark.parametrize("skipna", [True, False, None])
@pytest.mark.parametrize("min_count", [-10, -1, 0, 1, 2, 3, 10])
def test_dataframe_min_count_ops(data, ops, skipna, min_count):
psr = pd.DataFrame(data)
gsr = cudf.DataFrame(data)
if PANDAS_GE_120 and psr.shape[0] * psr.shape[1] < min_count:
pytest.xfail("https://github.com/pandas-dev/pandas/issues/39738")
assert_eq(
getattr(psr, ops)(skipna=skipna, min_count=min_count),
getattr(gsr, ops)(skipna=skipna, min_count=min_count),
check_dtype=False,
)
@pytest.mark.parametrize(
"binop",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_binops_df(pdf, gdf, binop):
pdf = pdf + 1.0
gdf = gdf + 1.0
d = binop(pdf, pdf)
g = binop(gdf, gdf)
assert_eq(d, g)
@pytest.mark.parametrize("binop", [operator.and_, operator.or_, operator.xor])
def test_bitwise_binops_df(pdf, gdf, binop):
d = binop(pdf, pdf + 1)
g = binop(gdf, gdf + 1)
assert_eq(d, g)
@pytest.mark.parametrize(
"binop",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_binops_series(pdf, gdf, binop):
pdf = pdf + 1.0
gdf = gdf + 1.0
d = binop(pdf.x, pdf.y)
g = binop(gdf.x, gdf.y)
assert_eq(d, g)
@pytest.mark.parametrize("binop", [operator.and_, operator.or_, operator.xor])
def test_bitwise_binops_series(pdf, gdf, binop):
d = binop(pdf.x, pdf.y + 1)
g = binop(gdf.x, gdf.y + 1)
assert_eq(d, g)
@pytest.mark.parametrize("unaryop", [operator.neg, operator.inv, operator.abs])
def test_unaryops_df(pdf, gdf, unaryop):
d = unaryop(pdf - 5)
g = unaryop(gdf - 5)
assert_eq(d, g)
@pytest.mark.parametrize(
"func",
[
lambda df: df.empty,
lambda df: df.x.empty,
lambda df: df.x.fillna(123, limit=None, method=None, axis=None),
lambda df: df.drop("x", axis=1, errors="raise"),
],
)
def test_unary_operators(func, pdf, gdf):
p = func(pdf)
g = func(gdf)
assert_eq(p, g)
def test_is_monotonic(gdf):
pdf = pd.DataFrame({"x": [1, 2, 3]}, index=[3, 1, 2])
gdf = cudf.DataFrame.from_pandas(pdf)
assert not gdf.index.is_monotonic
assert not gdf.index.is_monotonic_increasing
assert not gdf.index.is_monotonic_decreasing
def test_iter(pdf, gdf):
assert list(pdf) == list(gdf)
def test_iteritems(gdf):
for k, v in gdf.iteritems():
assert k in gdf.columns
assert isinstance(v, cudf.Series)
assert_eq(v, gdf[k])
@pytest.mark.parametrize("q", [0.5, 1, 0.001, [0.5], [], [0.005, 0.5, 1]])
@pytest.mark.parametrize("numeric_only", [True, False])
def test_quantile(q, numeric_only):
ts = pd.date_range("2018-08-24", periods=5, freq="D")
td = pd.to_timedelta(np.arange(5), unit="h")
pdf = pd.DataFrame(
{"date": ts, "delta": td, "val": np.random.randn(len(ts))}
)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(pdf["date"].quantile(q), gdf["date"].quantile(q))
assert_eq(pdf["delta"].quantile(q), gdf["delta"].quantile(q))
assert_eq(pdf["val"].quantile(q), gdf["val"].quantile(q))
if numeric_only:
assert_eq(pdf.quantile(q), gdf.quantile(q))
else:
q = q if isinstance(q, list) else [q]
assert_eq(
pdf.quantile(
q if isinstance(q, list) else [q], numeric_only=False
),
gdf.quantile(q, numeric_only=False),
)
def test_empty_quantile():
pdf = pd.DataFrame({"x": []})
df = cudf.DataFrame({"x": []})
actual = df.quantile()
expected = pdf.quantile()
assert_eq(actual, expected)
def test_from_pandas_function(pdf):
gdf = cudf.from_pandas(pdf)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(pdf, gdf)
gdf = cudf.from_pandas(pdf.x)
assert isinstance(gdf, cudf.Series)
assert_eq(pdf.x, gdf)
with pytest.raises(TypeError):
cudf.from_pandas(123)
@pytest.mark.parametrize("preserve_index", [True, False])
def test_arrow_pandas_compat(pdf, gdf, preserve_index):
pdf["z"] = range(10)
pdf = pdf.set_index("z")
gdf["z"] = range(10)
gdf = gdf.set_index("z")
pdf_arrow_table = pa.Table.from_pandas(pdf, preserve_index=preserve_index)
gdf_arrow_table = gdf.to_arrow(preserve_index=preserve_index)
assert pa.Table.equals(pdf_arrow_table, gdf_arrow_table)
gdf2 = cudf.DataFrame.from_arrow(pdf_arrow_table)
pdf2 = pdf_arrow_table.to_pandas()
assert_eq(pdf2, gdf2)
@pytest.mark.parametrize("nrows", [1, 8, 100, 1000, 100000])
def test_series_hash_encode(nrows):
data = np.asarray(range(nrows))
# Python hash returns different value which sometimes
# results in enc_with_name_arr and enc_arr to be same.
# And there is no other better way to make hash return same value.
# So using an integer name to get constant value back from hash.
s = cudf.Series(data, name=1)
num_features = 1000
encoded_series = s.hash_encode(num_features)
assert isinstance(encoded_series, cudf.Series)
enc_arr = encoded_series.to_array()
assert np.all(enc_arr >= 0)
assert np.max(enc_arr) < num_features
enc_with_name_arr = s.hash_encode(num_features, use_name=True).to_array()
assert enc_with_name_arr[0] != enc_arr[0]
@pytest.mark.parametrize("dtype", NUMERIC_TYPES + ["bool"])
def test_cuda_array_interface(dtype):
np_data = np.arange(10).astype(dtype)
cupy_data = cupy.array(np_data)
pd_data = pd.Series(np_data)
cudf_data = cudf.Series(cupy_data)
assert_eq(pd_data, cudf_data)
gdf = cudf.DataFrame()
gdf["test"] = cupy_data
pd_data.name = "test"
assert_eq(pd_data, gdf["test"])
@pytest.mark.parametrize("nelem", [0, 2, 3, 100])
@pytest.mark.parametrize("nchunks", [1, 2, 5, 10])
@pytest.mark.parametrize("data_type", dtypes)
def test_from_arrow_chunked_arrays(nelem, nchunks, data_type):
np_list_data = [
np.random.randint(0, 100, nelem).astype(data_type)
for i in range(nchunks)
]
pa_chunk_array = pa.chunked_array(np_list_data)
expect = pd.Series(pa_chunk_array.to_pandas())
got = cudf.Series(pa_chunk_array)
assert_eq(expect, got)
np_list_data2 = [
np.random.randint(0, 100, nelem).astype(data_type)
for i in range(nchunks)
]
pa_chunk_array2 = pa.chunked_array(np_list_data2)
pa_table = pa.Table.from_arrays(
[pa_chunk_array, pa_chunk_array2], names=["a", "b"]
)
expect = pa_table.to_pandas()
got = cudf.DataFrame.from_arrow(pa_table)
assert_eq(expect, got)
@pytest.mark.skip(reason="Test was designed to be run in isolation")
def test_gpu_memory_usage_with_boolmask():
ctx = cuda.current_context()
def query_GPU_memory(note=""):
memInfo = ctx.get_memory_info()
usedMemoryGB = (memInfo.total - memInfo.free) / 1e9
return usedMemoryGB
cuda.current_context().deallocations.clear()
nRows = int(1e8)
nCols = 2
dataNumpy = np.asfortranarray(np.random.rand(nRows, nCols))
colNames = ["col" + str(iCol) for iCol in range(nCols)]
pandasDF = pd.DataFrame(data=dataNumpy, columns=colNames, dtype=np.float32)
cudaDF = cudf.core.DataFrame.from_pandas(pandasDF)
boolmask = cudf.Series(np.random.randint(1, 2, len(cudaDF)).astype("bool"))
memory_used = query_GPU_memory()
cudaDF = cudaDF[boolmask]
assert (
cudaDF.index._values.data_array_view.device_ctypes_pointer
== cudaDF["col0"].index._values.data_array_view.device_ctypes_pointer
)
assert (
cudaDF.index._values.data_array_view.device_ctypes_pointer
== cudaDF["col1"].index._values.data_array_view.device_ctypes_pointer
)
assert memory_used == query_GPU_memory()
def test_boolmask(pdf, gdf):
boolmask = np.random.randint(0, 2, len(pdf)) > 0
gdf = gdf[boolmask]
pdf = pdf[boolmask]
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"mask_shape",
[
(2, "ab"),
(2, "abc"),
(3, "ab"),
(3, "abc"),
(3, "abcd"),
(4, "abc"),
(4, "abcd"),
],
)
def test_dataframe_boolmask(mask_shape):
pdf = pd.DataFrame()
for col in "abc":
pdf[col] = np.random.randint(0, 10, 3)
pdf_mask = pd.DataFrame()
for col in mask_shape[1]:
pdf_mask[col] = np.random.randint(0, 2, mask_shape[0]) > 0
gdf = cudf.DataFrame.from_pandas(pdf)
gdf_mask = cudf.DataFrame.from_pandas(pdf_mask)
gdf = gdf[gdf_mask]
pdf = pdf[pdf_mask]
assert np.array_equal(gdf.columns, pdf.columns)
for col in gdf.columns:
assert np.array_equal(
gdf[col].fillna(-1).to_pandas().values, pdf[col].fillna(-1).values
)
@pytest.mark.parametrize(
"mask",
[
[True, False, True],
pytest.param(
cudf.Series([True, False, True]),
marks=pytest.mark.xfail(
reason="Pandas can't index a multiindex with a Series"
),
),
],
)
def test_dataframe_multiindex_boolmask(mask):
gdf = cudf.DataFrame(
{"w": [3, 2, 1], "x": [1, 2, 3], "y": [0, 1, 0], "z": [1, 1, 1]}
)
gdg = gdf.groupby(["w", "x"]).count()
pdg = gdg.to_pandas()
assert_eq(gdg[mask], pdg[mask])
def test_dataframe_assignment():
pdf = pd.DataFrame()
for col in "abc":
pdf[col] = np.array([0, 1, 1, -2, 10])
gdf = cudf.DataFrame.from_pandas(pdf)
gdf[gdf < 0] = 999
pdf[pdf < 0] = 999
assert_eq(gdf, pdf)
def test_1row_arrow_table():
data = [pa.array([0]), pa.array([1])]
batch = pa.RecordBatch.from_arrays(data, ["f0", "f1"])
table = pa.Table.from_batches([batch])
expect = table.to_pandas()
got = cudf.DataFrame.from_arrow(table)
assert_eq(expect, got)
def test_arrow_handle_no_index_name(pdf, gdf):
gdf_arrow = gdf.to_arrow()
pdf_arrow = pa.Table.from_pandas(pdf)
assert pa.Table.equals(pdf_arrow, gdf_arrow)
got = cudf.DataFrame.from_arrow(gdf_arrow)
expect = pdf_arrow.to_pandas()
assert_eq(expect, got)
@pytest.mark.parametrize("num_rows", [1, 3, 10, 100])
@pytest.mark.parametrize("num_bins", [1, 2, 4, 20])
@pytest.mark.parametrize("right", [True, False])
@pytest.mark.parametrize("dtype", NUMERIC_TYPES + ["bool"])
@pytest.mark.parametrize("series_bins", [True, False])
def test_series_digitize(num_rows, num_bins, right, dtype, series_bins):
data = np.random.randint(0, 100, num_rows).astype(dtype)
bins = np.unique(np.sort(np.random.randint(2, 95, num_bins).astype(dtype)))
s = cudf.Series(data)
if series_bins:
s_bins = cudf.Series(bins)
indices = s.digitize(s_bins, right)
else:
indices = s.digitize(bins, right)
np.testing.assert_array_equal(
np.digitize(data, bins, right), indices.to_array()
)
def test_series_digitize_invalid_bins():
s = cudf.Series(np.random.randint(0, 30, 80), dtype="int32")
bins = cudf.Series([2, None, None, 50, 90], dtype="int32")
with pytest.raises(
ValueError, match="`bins` cannot contain null entries."
):
_ = s.digitize(bins)
def test_pandas_non_contiguious():
arr1 = np.random.sample([5000, 10])
assert arr1.flags["C_CONTIGUOUS"] is True
df = pd.DataFrame(arr1)
for col in df.columns:
assert df[col].values.flags["C_CONTIGUOUS"] is False
gdf = cudf.DataFrame.from_pandas(df)
assert_eq(gdf.to_pandas(), df)
@pytest.mark.parametrize("num_elements", [0, 2, 10, 100])
@pytest.mark.parametrize("null_type", [np.nan, None, "mixed"])
def test_series_all_null(num_elements, null_type):
if null_type == "mixed":
data = []
data1 = [np.nan] * int(num_elements / 2)
data2 = [None] * int(num_elements / 2)
for idx in range(len(data1)):
data.append(data1[idx])
data.append(data2[idx])
else:
data = [null_type] * num_elements
# Typecast Pandas because None will return `object` dtype
expect = pd.Series(data, dtype="float64")
got = cudf.Series(data)
assert_eq(expect, got)
@pytest.mark.parametrize("num_elements", [0, 2, 10, 100])
def test_series_all_valid_nan(num_elements):
data = [np.nan] * num_elements
sr = cudf.Series(data, nan_as_null=False)
np.testing.assert_equal(sr.null_count, 0)
def test_series_rename():
pds = pd.Series([1, 2, 3], name="asdf")
gds = cudf.Series([1, 2, 3], name="asdf")
expect = pds.rename("new_name")
got = gds.rename("new_name")
assert_eq(expect, got)
pds = pd.Series(expect)
gds = cudf.Series(got)
assert_eq(pds, gds)
pds = pd.Series(expect, name="name name")
gds = cudf.Series(got, name="name name")
assert_eq(pds, gds)
@pytest.mark.parametrize("data_type", dtypes)
@pytest.mark.parametrize("nelem", [0, 100])
def test_head_tail(nelem, data_type):
def check_index_equality(left, right):
assert left.index.equals(right.index)
def check_values_equality(left, right):
if len(left) == 0 and len(right) == 0:
return None
np.testing.assert_array_equal(left.to_pandas(), right.to_pandas())
def check_frame_series_equality(left, right):
check_index_equality(left, right)
check_values_equality(left, right)
gdf = cudf.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
check_frame_series_equality(gdf.head(), gdf[:5])
check_frame_series_equality(gdf.head(3), gdf[:3])
check_frame_series_equality(gdf.head(-2), gdf[:-2])
check_frame_series_equality(gdf.head(0), gdf[0:0])
check_frame_series_equality(gdf["a"].head(), gdf["a"][:5])
check_frame_series_equality(gdf["a"].head(3), gdf["a"][:3])
check_frame_series_equality(gdf["a"].head(-2), gdf["a"][:-2])
check_frame_series_equality(gdf.tail(), gdf[-5:])
check_frame_series_equality(gdf.tail(3), gdf[-3:])
check_frame_series_equality(gdf.tail(-2), gdf[2:])
check_frame_series_equality(gdf.tail(0), gdf[0:0])
check_frame_series_equality(gdf["a"].tail(), gdf["a"][-5:])
check_frame_series_equality(gdf["a"].tail(3), gdf["a"][-3:])
check_frame_series_equality(gdf["a"].tail(-2), gdf["a"][2:])
def test_tail_for_string():
gdf = cudf.DataFrame()
gdf["id"] = cudf.Series(["a", "b"], dtype=np.object_)
gdf["v"] = cudf.Series([1, 2])
assert_eq(gdf.tail(3), gdf.to_pandas().tail(3))
@pytest.mark.parametrize("drop", [True, False])
def test_reset_index(pdf, gdf, drop):
assert_eq(
pdf.reset_index(drop=drop, inplace=False),
gdf.reset_index(drop=drop, inplace=False),
)
assert_eq(
pdf.x.reset_index(drop=drop, inplace=False),
gdf.x.reset_index(drop=drop, inplace=False),
)
@pytest.mark.parametrize("drop", [True, False])
def test_reset_named_index(pdf, gdf, drop):
pdf.index.name = "cudf"
gdf.index.name = "cudf"
assert_eq(
pdf.reset_index(drop=drop, inplace=False),
gdf.reset_index(drop=drop, inplace=False),
)
assert_eq(
pdf.x.reset_index(drop=drop, inplace=False),
gdf.x.reset_index(drop=drop, inplace=False),
)
@pytest.mark.parametrize("drop", [True, False])
def test_reset_index_inplace(pdf, gdf, drop):
pdf.reset_index(drop=drop, inplace=True)
gdf.reset_index(drop=drop, inplace=True)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"data",
[
{
"a": [1, 2, 3, 4, 5],
"b": ["a", "b", "c", "d", "e"],
"c": [1.0, 2.0, 3.0, 4.0, 5.0],
}
],
)
@pytest.mark.parametrize(
"index",
[
"a",
["a", "b"],
pd.CategoricalIndex(["I", "II", "III", "IV", "V"]),
pd.Series(["h", "i", "k", "l", "m"]),
["b", pd.Index(["I", "II", "III", "IV", "V"])],
["c", [11, 12, 13, 14, 15]],
pd.MultiIndex(
levels=[
["I", "II", "III", "IV", "V"],
["one", "two", "three", "four", "five"],
],
codes=[[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]],
names=["col1", "col2"],
),
pd.RangeIndex(0, 5), # corner case
[pd.Series(["h", "i", "k", "l", "m"]), pd.RangeIndex(0, 5)],
[
pd.MultiIndex(
levels=[
["I", "II", "III", "IV", "V"],
["one", "two", "three", "four", "five"],
],
codes=[[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]],
names=["col1", "col2"],
),
pd.RangeIndex(0, 5),
],
],
)
@pytest.mark.parametrize("drop", [True, False])
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
def test_set_index(data, index, drop, append, inplace):
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas()
expected = pdf.set_index(index, inplace=inplace, drop=drop, append=append)
actual = gdf.set_index(index, inplace=inplace, drop=drop, append=append)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"data",
[
{
"a": [1, 1, 2, 2, 5],
"b": ["a", "b", "c", "d", "e"],
"c": [1.0, 2.0, 3.0, 4.0, 5.0],
}
],
)
@pytest.mark.parametrize("index", ["a", pd.Index([1, 1, 2, 2, 3])])
@pytest.mark.parametrize("verify_integrity", [True])
@pytest.mark.xfail
def test_set_index_verify_integrity(data, index, verify_integrity):
gdf = cudf.DataFrame(data)
gdf.set_index(index, verify_integrity=verify_integrity)
@pytest.mark.parametrize("drop", [True, False])
@pytest.mark.parametrize("nelem", [10, 200, 1333])
def test_set_index_multi(drop, nelem):
np.random.seed(0)
a = np.arange(nelem)
np.random.shuffle(a)
df = pd.DataFrame(
{
"a": a,
"b": np.random.randint(0, 4, size=nelem),
"c": np.random.uniform(low=0, high=4, size=nelem),
"d": np.random.choice(["green", "black", "white"], nelem),
}
)
df["e"] = df["d"].astype("category")
gdf = cudf.DataFrame.from_pandas(df)
assert_eq(gdf.set_index("a", drop=drop), gdf.set_index(["a"], drop=drop))
assert_eq(
df.set_index(["b", "c"], drop=drop),
gdf.set_index(["b", "c"], drop=drop),
)
assert_eq(
df.set_index(["d", "b"], drop=drop),
gdf.set_index(["d", "b"], drop=drop),
)
assert_eq(
df.set_index(["b", "d", "e"], drop=drop),
gdf.set_index(["b", "d", "e"], drop=drop),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_0(copy):
# TODO (ptaylor): pandas changes `int` dtype to `float64`
# when reindexing and filling new label indices with NaN
gdf = cudf.datasets.randomdata(
nrows=6,
dtypes={
"a": "category",
# 'b': int,
"c": float,
"d": str,
},
)
pdf = gdf.to_pandas()
# Validate reindex returns a copy unmodified
assert_eq(pdf.reindex(copy=True), gdf.reindex(copy=copy))
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_1(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis defaults to 0
assert_eq(pdf.reindex(index, copy=True), gdf.reindex(index, copy=copy))
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_2(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis=0
assert_eq(
pdf.reindex(index, axis=0, copy=True),
gdf.reindex(index, axis=0, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_3(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis=0
assert_eq(
pdf.reindex(columns, axis=1, copy=True),
gdf.reindex(columns, axis=1, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_4(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis=0
assert_eq(
pdf.reindex(labels=index, axis=0, copy=True),
gdf.reindex(labels=index, axis=0, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_5(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis=1
assert_eq(
pdf.reindex(labels=columns, axis=1, copy=True),
gdf.reindex(labels=columns, axis=1, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_6(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis='index'
assert_eq(
pdf.reindex(labels=index, axis="index", copy=True),
gdf.reindex(labels=index, axis="index", copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_7(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis='columns'
assert_eq(
pdf.reindex(labels=columns, axis="columns", copy=True),
gdf.reindex(labels=columns, axis="columns", copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_8(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes labels when index=labels
assert_eq(
pdf.reindex(index=index, copy=True),
gdf.reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_9(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes column names when columns=labels
assert_eq(
pdf.reindex(columns=columns, copy=True),
gdf.reindex(columns=columns, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_10(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes both labels and column names when
# index=index_labels and columns=column_labels
assert_eq(
pdf.reindex(index=index, columns=columns, copy=True),
gdf.reindex(index=index, columns=columns, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_change_dtype(copy):
if PANDAS_GE_110:
kwargs = {"check_freq": False}
else:
kwargs = {}
index = pd.date_range("12/29/2009", periods=10, freq="D")
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes both labels and column names when
# index=index_labels and columns=column_labels
assert_eq(
pdf.reindex(index=index, columns=columns, copy=True),
gdf.reindex(index=index, columns=columns, copy=copy),
**kwargs,
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_categorical_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"a": "category"})
pdf = gdf.to_pandas()
assert_eq(pdf["a"].reindex(copy=True), gdf["a"].reindex(copy=copy))
assert_eq(
pdf["a"].reindex(index, copy=True), gdf["a"].reindex(index, copy=copy)
)
assert_eq(
pdf["a"].reindex(index=index, copy=True),
gdf["a"].reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_float_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"c": float})
pdf = gdf.to_pandas()
assert_eq(pdf["c"].reindex(copy=True), gdf["c"].reindex(copy=copy))
assert_eq(
pdf["c"].reindex(index, copy=True), gdf["c"].reindex(index, copy=copy)
)
assert_eq(
pdf["c"].reindex(index=index, copy=True),
gdf["c"].reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_string_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"d": str})
pdf = gdf.to_pandas()
assert_eq(pdf["d"].reindex(copy=True), gdf["d"].reindex(copy=copy))
assert_eq(
pdf["d"].reindex(index, copy=True), gdf["d"].reindex(index, copy=copy)
)
assert_eq(
pdf["d"].reindex(index=index, copy=True),
gdf["d"].reindex(index=index, copy=copy),
)
def test_to_frame(pdf, gdf):
assert_eq(pdf.x.to_frame(), gdf.x.to_frame())
name = "foo"
gdf_new_name = gdf.x.to_frame(name=name)
pdf_new_name = pdf.x.to_frame(name=name)
assert_eq(pdf.x.to_frame(), gdf.x.to_frame())
name = False
gdf_new_name = gdf.x.to_frame(name=name)
pdf_new_name = pdf.x.to_frame(name=name)
assert_eq(gdf_new_name, pdf_new_name)
assert gdf_new_name.columns[0] is name
def test_dataframe_empty_sort_index():
pdf = pd.DataFrame({"x": []})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.sort_index()
got = gdf.sort_index()
assert_eq(expect, got)
@pytest.mark.parametrize("axis", [0, 1, "index", "columns"])
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("ignore_index", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_dataframe_sort_index(
axis, ascending, inplace, ignore_index, na_position
):
pdf = pd.DataFrame(
{"b": [1, 3, 2], "a": [1, 4, 3], "c": [4, 1, 5]},
index=[3.0, 1.0, np.nan],
)
gdf = cudf.DataFrame.from_pandas(pdf)
expected = pdf.sort_index(
axis=axis,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
got = gdf.sort_index(
axis=axis,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
if inplace is True:
assert_eq(pdf, gdf)
else:
assert_eq(expected, got)
@pytest.mark.parametrize("axis", [0, 1, "index", "columns"])
@pytest.mark.parametrize(
"level",
[
0,
"b",
1,
["b"],
"a",
["a", "b"],
["b", "a"],
[0, 1],
[1, 0],
[0, 2],
None,
],
)
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("ignore_index", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_dataframe_mulitindex_sort_index(
axis, level, ascending, inplace, ignore_index, na_position
):
pdf = pd.DataFrame(
{
"b": [1.0, 3.0, np.nan],
"a": [1, 4, 3],
1: ["a", "b", "c"],
"e": [3, 1, 4],
"d": [1, 2, 8],
}
).set_index(["b", "a", 1])
gdf = cudf.DataFrame.from_pandas(pdf)
# ignore_index is supported in v.1.0
expected = pdf.sort_index(
axis=axis,
level=level,
ascending=ascending,
inplace=inplace,
na_position=na_position,
)
if ignore_index is True:
expected = expected
got = gdf.sort_index(
axis=axis,
level=level,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
if inplace is True:
if ignore_index is True:
pdf = pdf.reset_index(drop=True)
assert_eq(pdf, gdf)
else:
if ignore_index is True:
expected = expected.reset_index(drop=True)
assert_eq(expected, got)
@pytest.mark.parametrize("dtype", dtypes + ["category"])
def test_dataframe_0_row_dtype(dtype):
if dtype == "category":
data = pd.Series(["a", "b", "c", "d", "e"], dtype="category")
else:
data = np.array([1, 2, 3, 4, 5], dtype=dtype)
expect = cudf.DataFrame()
expect["x"] = data
expect["y"] = data
got = expect.head(0)
for col_name in got.columns:
assert expect[col_name].dtype == got[col_name].dtype
expect = cudf.Series(data)
got = expect.head(0)
assert expect.dtype == got.dtype
@pytest.mark.parametrize("nan_as_null", [True, False])
def test_series_list_nanasnull(nan_as_null):
data = [1.0, 2.0, 3.0, np.nan, None]
expect = pa.array(data, from_pandas=nan_as_null)
got = cudf.Series(data, nan_as_null=nan_as_null).to_arrow()
# Bug in Arrow 0.14.1 where NaNs aren't handled
expect = expect.cast("int64", safe=False)
got = got.cast("int64", safe=False)
assert pa.Array.equals(expect, got)
def test_column_assignment():
gdf = cudf.datasets.randomdata(
nrows=20, dtypes={"a": "category", "b": int, "c": float}
)
new_cols = ["q", "r", "s"]
gdf.columns = new_cols
assert list(gdf.columns) == new_cols
def test_select_dtype():
gdf = cudf.datasets.randomdata(
nrows=20, dtypes={"a": "category", "b": int, "c": float, "d": str}
)
pdf = gdf.to_pandas()
assert_eq(pdf.select_dtypes("float64"), gdf.select_dtypes("float64"))
assert_eq(pdf.select_dtypes(np.float64), gdf.select_dtypes(np.float64))
assert_eq(
pdf.select_dtypes(include=["float64"]),
gdf.select_dtypes(include=["float64"]),
)
assert_eq(
pdf.select_dtypes(include=["object", "int", "category"]),
gdf.select_dtypes(include=["object", "int", "category"]),
)
assert_eq(
pdf.select_dtypes(include=["int64", "float64"]),
gdf.select_dtypes(include=["int64", "float64"]),
)
assert_eq(
pdf.select_dtypes(include=np.number),
gdf.select_dtypes(include=np.number),
)
assert_eq(
pdf.select_dtypes(include=[np.int64, np.float64]),
gdf.select_dtypes(include=[np.int64, np.float64]),
)
assert_eq(
pdf.select_dtypes(include=["category"]),
gdf.select_dtypes(include=["category"]),
)
assert_eq(
pdf.select_dtypes(exclude=np.number),
gdf.select_dtypes(exclude=np.number),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes,
rfunc=gdf.select_dtypes,
lfunc_args_and_kwargs=([], {"includes": ["Foo"]}),
rfunc_args_and_kwargs=([], {"includes": ["Foo"]}),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes,
rfunc=gdf.select_dtypes,
lfunc_args_and_kwargs=(
[],
{"exclude": np.number, "include": np.number},
),
rfunc_args_and_kwargs=(
[],
{"exclude": np.number, "include": np.number},
),
)
gdf = cudf.DataFrame(
{"A": [3, 4, 5], "C": [1, 2, 3], "D": ["a", "b", "c"]}
)
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(include=["object", "int", "category"]),
gdf.select_dtypes(include=["object", "int", "category"]),
)
assert_eq(
pdf.select_dtypes(include=["object"], exclude=["category"]),
gdf.select_dtypes(include=["object"], exclude=["category"]),
)
gdf = cudf.DataFrame({"a": range(10), "b": range(10, 20)})
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(include=["category"]),
gdf.select_dtypes(include=["category"]),
)
assert_eq(
pdf.select_dtypes(include=["float"]),
gdf.select_dtypes(include=["float"]),
)
assert_eq(
pdf.select_dtypes(include=["object"]),
gdf.select_dtypes(include=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"]), gdf.select_dtypes(include=["int"])
)
assert_eq(
pdf.select_dtypes(exclude=["float"]),
gdf.select_dtypes(exclude=["float"]),
)
assert_eq(
pdf.select_dtypes(exclude=["object"]),
gdf.select_dtypes(exclude=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"], exclude=["object"]),
gdf.select_dtypes(include=["int"], exclude=["object"]),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes, rfunc=gdf.select_dtypes,
)
gdf = cudf.DataFrame(
{"a": cudf.Series([], dtype="int"), "b": cudf.Series([], dtype="str")}
)
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(exclude=["object"]),
gdf.select_dtypes(exclude=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"], exclude=["object"]),
gdf.select_dtypes(include=["int"], exclude=["object"]),
)
def test_select_dtype_datetime():
gdf = cudf.datasets.timeseries(
start="2000-01-01", end="2000-01-02", freq="3600s", dtypes={"x": int}
)
gdf = gdf.reset_index()
pdf = gdf.to_pandas()
assert_eq(pdf.select_dtypes("datetime64"), gdf.select_dtypes("datetime64"))
assert_eq(
pdf.select_dtypes(np.dtype("datetime64")),
gdf.select_dtypes(np.dtype("datetime64")),
)
assert_eq(
pdf.select_dtypes(include="datetime64"),
gdf.select_dtypes(include="datetime64"),
)
def test_select_dtype_datetime_with_frequency():
gdf = cudf.datasets.timeseries(
start="2000-01-01", end="2000-01-02", freq="3600s", dtypes={"x": int}
)
gdf = gdf.reset_index()
pdf = gdf.to_pandas()
assert_exceptions_equal(
pdf.select_dtypes,
gdf.select_dtypes,
(["datetime64[ms]"],),
(["datetime64[ms]"],),
)
def test_array_ufunc():
gdf = cudf.DataFrame({"x": [2, 3, 4.0], "y": [9.0, 2.5, 1.1]})
pdf = gdf.to_pandas()
assert_eq(np.sqrt(gdf), np.sqrt(pdf))
assert_eq(np.sqrt(gdf.x), np.sqrt(pdf.x))
@pytest.mark.parametrize("nan_value", [-5, -5.0, 0, 5, 5.0, None, "pandas"])
def test_series_to_gpu_array(nan_value):
s = cudf.Series([0, 1, None, 3])
np.testing.assert_array_equal(
s.to_array(nan_value), s.to_gpu_array(nan_value).copy_to_host()
)
def test_dataframe_describe_exclude():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(exclude=["float"])
pdf_results = pdf.describe(exclude=["float"])
assert_eq(gdf_results, pdf_results)
def test_dataframe_describe_include():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(include=["int"])
pdf_results = pdf.describe(include=["int"])
assert_eq(gdf_results, pdf_results)
def test_dataframe_describe_default():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe()
pdf_results = pdf.describe()
assert_eq(pdf_results, gdf_results)
def test_series_describe_include_all():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
df["animal"] = np.random.choice(["dog", "cat", "bird"], data_length)
pdf = df.to_pandas()
gdf_results = df.describe(include="all")
pdf_results = pdf.describe(include="all")
assert_eq(gdf_results[["x", "y"]], pdf_results[["x", "y"]])
assert_eq(gdf_results.index, pdf_results.index)
assert_eq(gdf_results.columns, pdf_results.columns)
assert_eq(
gdf_results[["animal"]].fillna(-1).astype("str"),
pdf_results[["animal"]].fillna(-1).astype("str"),
)
def test_dataframe_describe_percentiles():
np.random.seed(12)
data_length = 10000
sample_percentiles = [0.0, 0.1, 0.33, 0.84, 0.4, 0.99]
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(percentiles=sample_percentiles)
pdf_results = pdf.describe(percentiles=sample_percentiles)
assert_eq(pdf_results, gdf_results)
def test_get_numeric_data():
pdf = pd.DataFrame(
{"x": [1, 2, 3], "y": [1.0, 2.0, 3.0], "z": ["a", "b", "c"]}
)
gdf = cudf.from_pandas(pdf)
assert_eq(pdf._get_numeric_data(), gdf._get_numeric_data())
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("period", [-1, -5, -10, -20, 0, 1, 5, 10, 20])
@pytest.mark.parametrize("data_empty", [False, True])
def test_shift(dtype, period, data_empty):
if data_empty:
data = None
else:
if dtype == np.int8:
# to keep data in range
data = gen_rand(dtype, 100000, low=-2, high=2)
else:
data = gen_rand(dtype, 100000)
gdf = cudf.DataFrame({"a": cudf.Series(data, dtype=dtype)})
pdf = pd.DataFrame({"a": pd.Series(data, dtype=dtype)})
shifted_outcome = gdf.a.shift(period).fillna(0)
expected_outcome = pdf.a.shift(period).fillna(0).astype(dtype)
if data_empty:
assert_eq(shifted_outcome, expected_outcome, check_index_type=False)
else:
assert_eq(shifted_outcome, expected_outcome)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("period", [-1, -5, -10, -20, 0, 1, 5, 10, 20])
@pytest.mark.parametrize("data_empty", [False, True])
def test_diff(dtype, period, data_empty):
if data_empty:
data = None
else:
if dtype == np.int8:
# to keep data in range
data = gen_rand(dtype, 100000, low=-2, high=2)
else:
data = gen_rand(dtype, 100000)
gdf = cudf.DataFrame({"a": cudf.Series(data, dtype=dtype)})
pdf = pd.DataFrame({"a": pd.Series(data, dtype=dtype)})
expected_outcome = pdf.a.diff(period)
diffed_outcome = gdf.a.diff(period).astype(expected_outcome.dtype)
if data_empty:
assert_eq(diffed_outcome, expected_outcome, check_index_type=False)
else:
assert_eq(diffed_outcome, expected_outcome)
@pytest.mark.parametrize("df", _dataframe_na_data())
@pytest.mark.parametrize("nan_as_null", [True, False, None])
def test_dataframe_isnull_isna(df, nan_as_null):
gdf = cudf.DataFrame.from_pandas(df, nan_as_null=nan_as_null)
assert_eq(df.isnull(), gdf.isnull())
assert_eq(df.isna(), gdf.isna())
# Test individual columns
for col in df:
assert_eq(df[col].isnull(), gdf[col].isnull())
assert_eq(df[col].isna(), gdf[col].isna())
@pytest.mark.parametrize("df", _dataframe_na_data())
@pytest.mark.parametrize("nan_as_null", [True, False, None])
def test_dataframe_notna_notnull(df, nan_as_null):
gdf = cudf.DataFrame.from_pandas(df, nan_as_null=nan_as_null)
assert_eq(df.notnull(), gdf.notnull())
assert_eq(df.notna(), gdf.notna())
# Test individual columns
for col in df:
assert_eq(df[col].notnull(), gdf[col].notnull())
assert_eq(df[col].notna(), gdf[col].notna())
def test_ndim():
pdf = pd.DataFrame({"x": range(5), "y": range(5, 10)})
gdf = cudf.DataFrame.from_pandas(pdf)
assert pdf.ndim == gdf.ndim
assert pdf.x.ndim == gdf.x.ndim
s = pd.Series(dtype="float64")
gs = cudf.Series()
assert s.ndim == gs.ndim
@pytest.mark.parametrize(
"decimals",
[
-3,
0,
5,
pd.Series([1, 4, 3, -6], index=["w", "x", "y", "z"]),
cudf.Series([-4, -2, 12], index=["x", "y", "z"]),
{"w": -1, "x": 15, "y": 2},
],
)
def test_dataframe_round(decimals):
pdf = pd.DataFrame(
{
"w": np.arange(0.5, 10.5, 1),
"x": np.random.normal(-100, 100, 10),
"y": np.array(
[
14.123,
2.343,
np.nan,
0.0,
-8.302,
np.nan,
94.313,
-112.236,
-8.029,
np.nan,
]
),
"z": np.repeat([-0.6459412758761901], 10),
}
)
gdf = cudf.DataFrame.from_pandas(pdf)
if isinstance(decimals, cudf.Series):
pdecimals = decimals.to_pandas()
else:
pdecimals = decimals
result = gdf.round(decimals)
expected = pdf.round(pdecimals)
assert_eq(result, expected)
# with nulls, maintaining existing null mask
for c in pdf.columns:
arr = pdf[c].to_numpy().astype("float64") # for pandas nulls
arr.ravel()[np.random.choice(10, 5, replace=False)] = np.nan
pdf[c] = gdf[c] = arr
result = gdf.round(decimals)
expected = pdf.round(pdecimals)
assert_eq(result, expected)
for c in gdf.columns:
np.array_equal(gdf[c].nullmask.to_array(), result[c].to_array())
@pytest.mark.parametrize(
"data",
[
[0, 1, 2, 3],
[-2, -1, 2, 3, 5],
[-2, -1, 0, 3, 5],
[True, False, False],
[True],
[False],
[],
[True, None, False],
[True, True, None],
[None, None],
[[0, 5], [1, 6], [2, 7], [3, 8], [4, 9]],
[[1, True], [2, False], [3, False]],
pytest.param(
[["a", True], ["b", False], ["c", False]],
marks=[
pytest.mark.xfail(
reason="NotImplementedError: all does not "
"support columns of object dtype."
)
],
),
],
)
def test_all(data):
# Pandas treats `None` in object type columns as True for some reason, so
# replacing with `False`
if np.array(data).ndim <= 1:
pdata = cudf.utils.utils._create_pandas_series(data=data).replace(
[None], False
)
gdata = cudf.Series.from_pandas(pdata)
else:
pdata = pd.DataFrame(data, columns=["a", "b"]).replace([None], False)
gdata = cudf.DataFrame.from_pandas(pdata)
# test bool_only
if pdata["b"].dtype == "bool":
got = gdata.all(bool_only=True)
expected = pdata.all(bool_only=True)
assert_eq(got, expected)
else:
with pytest.raises(NotImplementedError):
gdata.all(bool_only=False)
with pytest.raises(NotImplementedError):
gdata.all(level="a")
got = gdata.all()
expected = pdata.all()
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[0, 1, 2, 3],
[-2, -1, 2, 3, 5],
[-2, -1, 0, 3, 5],
[0, 0, 0, 0, 0],
[0, 0, None, 0],
[True, False, False],
[True],
[False],
[],
[True, None, False],
[True, True, None],
[None, None],
[[0, 5], [1, 6], [2, 7], [3, 8], [4, 9]],
[[1, True], [2, False], [3, False]],
pytest.param(
[["a", True], ["b", False], ["c", False]],
marks=[
pytest.mark.xfail(
reason="NotImplementedError: any does not "
"support columns of object dtype."
)
],
),
],
)
@pytest.mark.parametrize("axis", [0, 1])
def test_any(data, axis):
if np.array(data).ndim <= 1:
pdata = cudf.utils.utils._create_pandas_series(data=data)
gdata = cudf.Series.from_pandas(pdata)
if axis == 1:
with pytest.raises(NotImplementedError):
gdata.any(axis=axis)
else:
got = gdata.any(axis=axis)
expected = pdata.any(axis=axis)
assert_eq(got, expected)
else:
pdata = pd.DataFrame(data, columns=["a", "b"])
gdata = cudf.DataFrame.from_pandas(pdata)
# test bool_only
if pdata["b"].dtype == "bool":
got = gdata.any(bool_only=True)
expected = pdata.any(bool_only=True)
assert_eq(got, expected)
else:
with pytest.raises(NotImplementedError):
gdata.any(bool_only=False)
with pytest.raises(NotImplementedError):
gdata.any(level="a")
got = gdata.any(axis=axis)
expected = pdata.any(axis=axis)
assert_eq(got, expected)
@pytest.mark.parametrize("axis", [0, 1])
def test_empty_dataframe_any(axis):
pdf = pd.DataFrame({}, columns=["a", "b"])
gdf = cudf.DataFrame.from_pandas(pdf)
got = gdf.any(axis=axis)
expected = pdf.any(axis=axis)
assert_eq(got, expected, check_index_type=False)
@pytest.mark.parametrize("indexed", [False, True])
def test_dataframe_sizeof(indexed):
rows = int(1e6)
index = list(i for i in range(rows)) if indexed else None
gdf = cudf.DataFrame({"A": [8] * rows, "B": [32] * rows}, index=index)
for c in gdf._data.columns:
assert gdf._index.__sizeof__() == gdf._index.__sizeof__()
cols_sizeof = sum(c.__sizeof__() for c in gdf._data.columns)
assert gdf.__sizeof__() == (gdf._index.__sizeof__() + cols_sizeof)
@pytest.mark.parametrize("a", [[], ["123"]])
@pytest.mark.parametrize("b", ["123", ["123"]])
@pytest.mark.parametrize(
"misc_data",
["123", ["123"] * 20, 123, [1, 2, 0.8, 0.9] * 50, 0.9, 0.00001],
)
@pytest.mark.parametrize("non_list_data", [123, "abc", "zyx", "rapids", 0.8])
def test_create_dataframe_cols_empty_data(a, b, misc_data, non_list_data):
expected = pd.DataFrame({"a": a})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = b
actual["b"] = b
assert_eq(actual, expected)
expected = pd.DataFrame({"a": []})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = misc_data
actual["b"] = misc_data
assert_eq(actual, expected)
expected = pd.DataFrame({"a": a})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = non_list_data
actual["b"] = non_list_data
assert_eq(actual, expected)
def test_empty_dataframe_describe():
pdf = pd.DataFrame({"a": [], "b": []})
gdf = cudf.from_pandas(pdf)
expected = pdf.describe()
actual = gdf.describe()
assert_eq(expected, actual)
def test_as_column_types():
col = column.as_column(cudf.Series([]))
assert_eq(col.dtype, np.dtype("float64"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="float64"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="float32")
assert_eq(col.dtype, np.dtype("float32"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="float32"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="str")
assert_eq(col.dtype, np.dtype("object"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="str"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="object")
assert_eq(col.dtype, np.dtype("object"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="object"))
assert_eq(pds, gds)
pds = pd.Series(np.array([1, 2, 3]), dtype="float32")
gds = cudf.Series(column.as_column(np.array([1, 2, 3]), dtype="float32"))
assert_eq(pds, gds)
pds = pd.Series([1, 2, 3], dtype="float32")
gds = cudf.Series([1, 2, 3], dtype="float32")
assert_eq(pds, gds)
pds = pd.Series([], dtype="float64")
gds = cudf.Series(column.as_column(pds))
assert_eq(pds, gds)
pds = pd.Series([1, 2, 4], dtype="int64")
gds = cudf.Series(column.as_column(cudf.Series([1, 2, 4]), dtype="int64"))
assert_eq(pds, gds)
pds = pd.Series([1.2, 18.0, 9.0], dtype="float32")
gds = cudf.Series(
column.as_column(cudf.Series([1.2, 18.0, 9.0]), dtype="float32")
)
assert_eq(pds, gds)
pds = pd.Series([1.2, 18.0, 9.0], dtype="str")
gds = cudf.Series(
column.as_column(cudf.Series([1.2, 18.0, 9.0]), dtype="str")
)
assert_eq(pds, gds)
pds = pd.Series(pd.Index(["1", "18", "9"]), dtype="int")
gds = cudf.Series(
cudf.core.index.StringIndex(["1", "18", "9"]), dtype="int"
)
assert_eq(pds, gds)
def test_one_row_head():
gdf = cudf.DataFrame({"name": ["carl"], "score": [100]}, index=[123])
pdf = gdf.to_pandas()
head_gdf = gdf.head()
head_pdf = pdf.head()
assert_eq(head_pdf, head_gdf)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", NUMERIC_TYPES)
def test_series_astype_numeric_to_numeric(dtype, as_dtype):
psr = pd.Series([1, 2, 4, 3], dtype=dtype)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", NUMERIC_TYPES)
def test_series_astype_numeric_to_numeric_nulls(dtype, as_dtype):
data = [1, 2, None, 3]
sr = cudf.Series(data, dtype=dtype)
got = sr.astype(as_dtype)
expect = cudf.Series([1, 2, None, 3], dtype=as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize(
"as_dtype",
[
"str",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_numeric_to_other(dtype, as_dtype):
psr = pd.Series([1, 2, 3], dtype=dtype)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"as_dtype",
[
"str",
"int32",
"uint32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_string_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05"]
else:
data = ["1", "2", "3"]
psr = pd.Series(data)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"as_dtype",
[
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_datetime_to_other(as_dtype):
data = ["2001-01-01", "2002-02-02", "2001-01-05"]
psr = pd.Series(data)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"inp",
[
("datetime64[ns]", "2011-01-01 00:00:00.000000000"),
("datetime64[us]", "2011-01-01 00:00:00.000000"),
("datetime64[ms]", "2011-01-01 00:00:00.000"),
("datetime64[s]", "2011-01-01 00:00:00"),
],
)
def test_series_astype_datetime_to_string(inp):
dtype, expect = inp
base_date = "2011-01-01"
sr = cudf.Series([base_date], dtype=dtype)
got = sr.astype(str)[0]
assert expect == got
@pytest.mark.parametrize(
"as_dtype",
[
"int32",
"uint32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
"str",
],
)
def test_series_astype_categorical_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05", "2001-01-01"]
else:
data = [1, 2, 3, 1]
psr = pd.Series(data, dtype="category")
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize("ordered", [True, False])
def test_series_astype_to_categorical_ordered(ordered):
psr = pd.Series([1, 2, 3, 1], dtype="category")
gsr = cudf.from_pandas(psr)
ordered_dtype_pd = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=ordered
)
ordered_dtype_gd = cudf.CategoricalDtype.from_pandas(ordered_dtype_pd)
assert_eq(
psr.astype("int32").astype(ordered_dtype_pd).astype("int32"),
gsr.astype("int32").astype(ordered_dtype_gd).astype("int32"),
)
@pytest.mark.parametrize("ordered", [True, False])
def test_series_astype_cat_ordered_to_unordered(ordered):
pd_dtype = pd.CategoricalDtype(categories=[1, 2, 3], ordered=ordered)
pd_to_dtype = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=not ordered
)
gd_dtype = cudf.CategoricalDtype.from_pandas(pd_dtype)
gd_to_dtype = cudf.CategoricalDtype.from_pandas(pd_to_dtype)
psr = pd.Series([1, 2, 3], dtype=pd_dtype)
gsr = cudf.Series([1, 2, 3], dtype=gd_dtype)
expect = psr.astype(pd_to_dtype)
got = gsr.astype(gd_to_dtype)
assert_eq(expect, got)
def test_series_astype_null_cases():
data = [1, 2, None, 3]
# numerical to other
assert_eq(cudf.Series(data, dtype="str"), cudf.Series(data).astype("str"))
assert_eq(
cudf.Series(data, dtype="category"),
cudf.Series(data).astype("category"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="int32").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="uint32").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="datetime64[ms]"),
cudf.Series(data).astype("datetime64[ms]"),
)
# categorical to other
assert_eq(
cudf.Series(data, dtype="str"),
cudf.Series(data, dtype="category").astype("str"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="category").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="datetime64[ms]"),
cudf.Series(data, dtype="category").astype("datetime64[ms]"),
)
# string to other
assert_eq(
cudf.Series([1, 2, None, 3], dtype="int32"),
cudf.Series(["1", "2", None, "3"]).astype("int32"),
)
assert_eq(
cudf.Series(
["2001-01-01", "2001-02-01", None, "2001-03-01"],
dtype="datetime64[ms]",
),
cudf.Series(["2001-01-01", "2001-02-01", None, "2001-03-01"]).astype(
"datetime64[ms]"
),
)
assert_eq(
cudf.Series(["a", "b", "c", None], dtype="category").to_pandas(),
cudf.Series(["a", "b", "c", None]).astype("category").to_pandas(),
)
# datetime to other
data = [
"2001-01-01 00:00:00.000000",
"2001-02-01 00:00:00.000000",
None,
"2001-03-01 00:00:00.000000",
]
assert_eq(
cudf.Series(data),
cudf.Series(data, dtype="datetime64[us]").astype("str"),
)
assert_eq(
pd.Series(data, dtype="datetime64[ns]").astype("category"),
cudf.from_pandas(pd.Series(data, dtype="datetime64[ns]")).astype(
"category"
),
)
def test_series_astype_null_categorical():
sr = cudf.Series([None, None, None], dtype="category")
expect = cudf.Series([None, None, None], dtype="int32")
got = sr.astype("int32")
assert_eq(expect, got)
@pytest.mark.parametrize(
"data",
[
(
pd.Series([3, 3.0]),
pd.Series([2.3, 3.9]),
pd.Series([1.5, 3.9]),
pd.Series([1.0, 2]),
),
[
pd.Series([3, 3.0]),
pd.Series([2.3, 3.9]),
pd.Series([1.5, 3.9]),
pd.Series([1.0, 2]),
],
],
)
def test_create_dataframe_from_list_like(data):
pdf = pd.DataFrame(data, index=["count", "mean", "std", "min"])
gdf = cudf.DataFrame(data, index=["count", "mean", "std", "min"])
assert_eq(pdf, gdf)
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(pdf, gdf)
def test_create_dataframe_column():
pdf = pd.DataFrame(columns=["a", "b", "c"], index=["A", "Z", "X"])
gdf = cudf.DataFrame(columns=["a", "b", "c"], index=["A", "Z", "X"])
assert_eq(pdf, gdf)
pdf = pd.DataFrame(
{"a": [1, 2, 3], "b": [2, 3, 5]},
columns=["a", "b", "c"],
index=["A", "Z", "X"],
)
gdf = cudf.DataFrame(
{"a": [1, 2, 3], "b": [2, 3, 5]},
columns=["a", "b", "c"],
index=["A", "Z", "X"],
)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"data",
[
[1, 2, 4],
[],
[5.0, 7.0, 8.0],
pd.Categorical(["a", "b", "c"]),
["m", "a", "d", "v"],
],
)
def test_series_values_host_property(data):
pds = cudf.utils.utils._create_pandas_series(data=data)
gds = cudf.Series(data)
np.testing.assert_array_equal(pds.values, gds.values_host)
@pytest.mark.parametrize(
"data",
[
[1, 2, 4],
[],
[5.0, 7.0, 8.0],
pytest.param(
pd.Categorical(["a", "b", "c"]),
marks=pytest.mark.xfail(raises=NotImplementedError),
),
pytest.param(
["m", "a", "d", "v"],
marks=pytest.mark.xfail(raises=NotImplementedError),
),
],
)
def test_series_values_property(data):
pds = cudf.utils.utils._create_pandas_series(data=data)
gds = cudf.Series(data)
gds_vals = gds.values
assert isinstance(gds_vals, cupy.ndarray)
np.testing.assert_array_equal(gds_vals.get(), pds.values)
@pytest.mark.parametrize(
"data",
[
{"A": [1, 2, 3], "B": [4, 5, 6]},
{"A": [1.0, 2.0, 3.0], "B": [4.0, 5.0, 6.0]},
{"A": [1, 2, 3], "B": [1.0, 2.0, 3.0]},
{"A": np.float32(np.arange(3)), "B": np.float64(np.arange(3))},
pytest.param(
{"A": [1, None, 3], "B": [1, 2, None]},
marks=pytest.mark.xfail(
reason="Nulls not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": [None, None, None], "B": [None, None, None]},
marks=pytest.mark.xfail(
reason="Nulls not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": [], "B": []},
marks=pytest.mark.xfail(reason="Requires at least 1 row"),
),
pytest.param(
{"A": [1, 2, 3], "B": ["a", "b", "c"]},
marks=pytest.mark.xfail(
reason="str or categorical not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": pd.Categorical(["a", "b", "c"]), "B": ["d", "e", "f"]},
marks=pytest.mark.xfail(
reason="str or categorical not supported by as_gpu_matrix"
),
),
],
)
def test_df_values_property(data):
pdf = pd.DataFrame.from_dict(data)
gdf = cudf.DataFrame.from_pandas(pdf)
pmtr = pdf.values
gmtr = gdf.values.get()
np.testing.assert_array_equal(pmtr, gmtr)
def test_value_counts():
pdf = pd.DataFrame(
{
"numeric": [1, 2, 3, 4, 5, 6, 1, 2, 4] * 10,
"alpha": ["u", "h", "d", "a", "m", "u", "h", "d", "a"] * 10,
}
)
gdf = cudf.DataFrame(
{
"numeric": [1, 2, 3, 4, 5, 6, 1, 2, 4] * 10,
"alpha": ["u", "h", "d", "a", "m", "u", "h", "d", "a"] * 10,
}
)
assert_eq(
pdf.numeric.value_counts().sort_index(),
gdf.numeric.value_counts().sort_index(),
check_dtype=False,
)
assert_eq(
pdf.alpha.value_counts().sort_index(),
gdf.alpha.value_counts().sort_index(),
check_dtype=False,
)
@pytest.mark.parametrize(
"data",
[
[],
[0, 12, 14],
[0, 14, 12, 12, 3, 10, 12, 14],
np.random.randint(-100, 100, 200),
pd.Series([0.0, 1.0, None, 10.0]),
[None, None, None, None],
[np.nan, None, -1, 2, 3],
],
)
@pytest.mark.parametrize(
"values",
[
np.random.randint(-100, 100, 10),
[],
[np.nan, None, -1, 2, 3],
[1.0, 12.0, None, None, 120],
[0, 14, 12, 12, 3, 10, 12, 14, None],
[None, None, None],
["0", "12", "14"],
["0", "12", "14", "a"],
],
)
def test_isin_numeric(data, values):
index = np.random.randint(0, 100, len(data))
psr = cudf.utils.utils._create_pandas_series(data=data, index=index)
gsr = cudf.Series.from_pandas(psr, nan_as_null=False)
expected = psr.isin(values)
got = gsr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(
["2018-01-01", "2019-04-03", None, "2019-12-30"],
dtype="datetime64[ns]",
),
pd.Series(
[
"2018-01-01",
"2019-04-03",
None,
"2019-12-30",
"2018-01-01",
"2018-01-01",
],
dtype="datetime64[ns]",
),
],
)
@pytest.mark.parametrize(
"values",
[
[],
[1514764800000000000, 1577664000000000000],
[
1514764800000000000,
1577664000000000000,
1577664000000000000,
1577664000000000000,
1514764800000000000,
],
["2019-04-03", "2019-12-30", "2012-01-01"],
[
"2012-01-01",
"2012-01-01",
"2012-01-01",
"2019-04-03",
"2019-12-30",
"2012-01-01",
],
],
)
def test_isin_datetime(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.isin(values)
expected = psr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(["this", "is", None, "a", "test"]),
pd.Series(["test", "this", "test", "is", None, "test", "a", "test"]),
pd.Series(["0", "12", "14"]),
],
)
@pytest.mark.parametrize(
"values",
[
[],
["this", "is"],
[None, None, None],
["12", "14", "19"],
pytest.param(
[12, 14, 19],
marks=pytest.mark.xfail(
not PANDAS_GE_120,
reason="pandas's failure here seems like a bug(in < 1.2) "
"given the reverse succeeds",
),
),
["is", "this", "is", "this", "is"],
],
)
def test_isin_string(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.isin(values)
expected = psr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(["a", "b", "c", "c", "c", "d", "e"], dtype="category"),
pd.Series(["a", "b", None, "c", "d", "e"], dtype="category"),
pd.Series([0, 3, 10, 12], dtype="category"),
pd.Series([0, 3, 10, 12, 0, 10, 3, 0, 0, 3, 3], dtype="category"),
],
)
@pytest.mark.parametrize(
"values",
[
[],
["a", "b", None, "f", "words"],
["0", "12", None, "14"],
[0, 10, 12, None, 39, 40, 1000],
[0, 0, 0, 0, 3, 3, 3, None, 1, 2, 3],
],
)
def test_isin_categorical(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.isin(values)
expected = psr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(
["this", "is", None, "a", "test"], index=["a", "b", "c", "d", "e"]
),
pd.Series([0, 15, 10], index=[0, None, 9]),
pd.Series(
range(25),
index=pd.date_range(
start="2019-01-01", end="2019-01-02", freq="H"
),
),
],
)
@pytest.mark.parametrize(
"values",
[
[],
["this", "is"],
[0, 19, 13],
["2019-01-01 04:00:00", "2019-01-01 06:00:00", "2018-03-02"],
],
)
def test_isin_index(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.index.isin(values)
expected = psr.index.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
pd.MultiIndex.from_arrays(
[[1, 2, 3], ["red", "blue", "green"]], names=("number", "color")
),
pd.MultiIndex.from_arrays([[], []], names=("number", "color")),
pd.MultiIndex.from_arrays(
[[1, 2, 3, 10, 100], ["red", "blue", "green", "pink", "white"]],
names=("number", "color"),
),
],
)
@pytest.mark.parametrize(
"values,level,err",
[
(["red", "orange", "yellow"], "color", None),
(["red", "white", "yellow"], "color", None),
([0, 1, 2, 10, 11, 15], "number", None),
([0, 1, 2, 10, 11, 15], None, TypeError),
(pd.Series([0, 1, 2, 10, 11, 15]), None, TypeError),
(pd.Index([0, 1, 2, 10, 11, 15]), None, TypeError),
(pd.Index([0, 1, 2, 8, 11, 15]), "number", None),
(pd.Index(["red", "white", "yellow"]), "color", None),
([(1, "red"), (3, "red")], None, None),
(((1, "red"), (3, "red")), None, None),
(
pd.MultiIndex.from_arrays(
[[1, 2, 3], ["red", "blue", "green"]],
names=("number", "color"),
),
None,
None,
),
(
pd.MultiIndex.from_arrays([[], []], names=("number", "color")),
None,
None,
),
(
pd.MultiIndex.from_arrays(
[
[1, 2, 3, 10, 100],
["red", "blue", "green", "pink", "white"],
],
names=("number", "color"),
),
None,
None,
),
],
)
def test_isin_multiindex(data, values, level, err):
pmdx = data
gmdx = cudf.from_pandas(data)
if err is None:
expected = pmdx.isin(values, level=level)
if isinstance(values, pd.MultiIndex):
values = cudf.from_pandas(values)
got = gmdx.isin(values, level=level)
assert_eq(got, expected)
else:
assert_exceptions_equal(
lfunc=pmdx.isin,
rfunc=gmdx.isin,
lfunc_args_and_kwargs=([values], {"level": level}),
rfunc_args_and_kwargs=([values], {"level": level}),
check_exception_type=False,
expected_error_message=re.escape(
"values need to be a Multi-Index or set/list-like tuple "
"squences when `level=None`."
),
)
@pytest.mark.parametrize(
"data",
[
pd.DataFrame(
{
"num_legs": [2, 4],
"num_wings": [2, 0],
"bird_cats": pd.Series(
["sparrow", "pigeon"],
dtype="category",
index=["falcon", "dog"],
),
},
index=["falcon", "dog"],
),
pd.DataFrame(
{"num_legs": [8, 2], "num_wings": [0, 2]},
index=["spider", "falcon"],
),
pd.DataFrame(
{
"num_legs": [8, 2, 1, 0, 2, 4, 5],
"num_wings": [2, 0, 2, 1, 2, 4, -1],
}
),
],
)
@pytest.mark.parametrize(
"values",
[
[0, 2],
{"num_wings": [0, 3]},
pd.DataFrame(
{"num_legs": [8, 2], "num_wings": [0, 2]},
index=["spider", "falcon"],
),
pd.DataFrame(
{
"num_legs": [2, 4],
"num_wings": [2, 0],
"bird_cats": pd.Series(
["sparrow", "pigeon"],
dtype="category",
index=["falcon", "dog"],
),
},
index=["falcon", "dog"],
),
["sparrow", "pigeon"],
pd.Series(["sparrow", "pigeon"], dtype="category"),
pd.Series([1, 2, 3, 4, 5]),
"abc",
123,
],
)
def test_isin_dataframe(data, values):
pdf = data
gdf = cudf.from_pandas(pdf)
if cudf.utils.dtypes.is_scalar(values):
assert_exceptions_equal(
lfunc=pdf.isin,
rfunc=gdf.isin,
lfunc_args_and_kwargs=([values],),
rfunc_args_and_kwargs=([values],),
)
else:
try:
expected = pdf.isin(values)
except ValueError as e:
if str(e) == "Lengths must match.":
pytest.xfail(
not PANDAS_GE_110,
"https://github.com/pandas-dev/pandas/issues/34256",
)
if isinstance(values, (pd.DataFrame, pd.Series)):
values = cudf.from_pandas(values)
got = gdf.isin(values)
assert_eq(got, expected)
def test_constructor_properties():
df = cudf.DataFrame()
key1 = "a"
key2 = "b"
val1 = np.array([123], dtype=np.float64)
val2 = np.array([321], dtype=np.float64)
df[key1] = val1
df[key2] = val2
# Correct use of _constructor (for DataFrame)
assert_eq(df, df._constructor({key1: val1, key2: val2}))
# Correct use of _constructor (for cudf.Series)
assert_eq(df[key1], df[key2]._constructor(val1, name=key1))
# Correct use of _constructor_sliced (for DataFrame)
assert_eq(df[key1], df._constructor_sliced(val1, name=key1))
# Correct use of _constructor_expanddim (for cudf.Series)
assert_eq(df, df[key2]._constructor_expanddim({key1: val1, key2: val2}))
# Incorrect use of _constructor_sliced (Raises for cudf.Series)
with pytest.raises(NotImplementedError):
df[key1]._constructor_sliced
# Incorrect use of _constructor_expanddim (Raises for DataFrame)
with pytest.raises(NotImplementedError):
df._constructor_expanddim
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", ALL_TYPES)
def test_df_astype_numeric_to_all(dtype, as_dtype):
if "uint" in dtype:
data = [1, 2, None, 4, 7]
elif "int" in dtype or "longlong" in dtype:
data = [1, 2, None, 4, -7]
elif "float" in dtype:
data = [1.0, 2.0, None, 4.0, np.nan, -7.0]
gdf = cudf.DataFrame()
gdf["foo"] = cudf.Series(data, dtype=dtype)
gdf["bar"] = cudf.Series(data, dtype=dtype)
insert_data = cudf.Series(data, dtype=dtype)
expect = cudf.DataFrame()
expect["foo"] = insert_data.astype(as_dtype)
expect["bar"] = insert_data.astype(as_dtype)
got = gdf.astype(as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"as_dtype",
[
"int32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_df_astype_string_to_other(as_dtype):
if "datetime64" in as_dtype:
# change None to "NaT" after this issue is fixed:
# https://github.com/rapidsai/cudf/issues/5117
data = ["2001-01-01", "2002-02-02", "2000-01-05", None]
elif as_dtype == "int32":
data = [1, 2, 3]
elif as_dtype == "category":
data = ["1", "2", "3", None]
elif "float" in as_dtype:
data = [1.0, 2.0, 3.0, np.nan]
insert_data = cudf.Series.from_pandas(pd.Series(data, dtype="str"))
expect_data = cudf.Series(data, dtype=as_dtype)
gdf = cudf.DataFrame()
expect = cudf.DataFrame()
gdf["foo"] = insert_data
gdf["bar"] = insert_data
expect["foo"] = expect_data
expect["bar"] = expect_data
got = gdf.astype(as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"as_dtype",
[
"int64",
"datetime64[s]",
"datetime64[us]",
"datetime64[ns]",
"str",
"category",
],
)
def test_df_astype_datetime_to_other(as_dtype):
data = [
"1991-11-20 00:00:00.000",
"2004-12-04 00:00:00.000",
"2016-09-13 00:00:00.000",
None,
]
gdf = cudf.DataFrame()
expect = cudf.DataFrame()
gdf["foo"] = cudf.Series(data, dtype="datetime64[ms]")
gdf["bar"] = cudf.Series(data, dtype="datetime64[ms]")
if as_dtype == "int64":
expect["foo"] = cudf.Series(
[690595200000, 1102118400000, 1473724800000, None], dtype="int64"
)
expect["bar"] = cudf.Series(
[690595200000, 1102118400000, 1473724800000, None], dtype="int64"
)
elif as_dtype == "str":
expect["foo"] = cudf.Series(data, dtype="str")
expect["bar"] = cudf.Series(data, dtype="str")
elif as_dtype == "category":
expect["foo"] = cudf.Series(gdf["foo"], dtype="category")
expect["bar"] = cudf.Series(gdf["bar"], dtype="category")
else:
expect["foo"] = cudf.Series(data, dtype=as_dtype)
expect["bar"] = cudf.Series(data, dtype=as_dtype)
got = gdf.astype(as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"as_dtype",
[
"int32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
"str",
],
)
def test_df_astype_categorical_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05", "2001-01-01"]
else:
data = [1, 2, 3, 1]
psr = pd.Series(data, dtype="category")
pdf = pd.DataFrame()
pdf["foo"] = psr
pdf["bar"] = psr
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(pdf.astype(as_dtype), gdf.astype(as_dtype))
@pytest.mark.parametrize("ordered", [True, False])
def test_df_astype_to_categorical_ordered(ordered):
psr = pd.Series([1, 2, 3, 1], dtype="category")
pdf = pd.DataFrame()
pdf["foo"] = psr
pdf["bar"] = psr
gdf = cudf.DataFrame.from_pandas(pdf)
ordered_dtype_pd = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=ordered
)
ordered_dtype_gd = cudf.CategoricalDtype.from_pandas(ordered_dtype_pd)
assert_eq(
pdf.astype(ordered_dtype_pd).astype("int32"),
gdf.astype(ordered_dtype_gd).astype("int32"),
)
@pytest.mark.parametrize(
"dtype,args",
[(dtype, {}) for dtype in ALL_TYPES]
+ [("category", {"ordered": True}), ("category", {"ordered": False})],
)
def test_empty_df_astype(dtype, args):
df = cudf.DataFrame()
kwargs = {}
kwargs.update(args)
assert_eq(df, df.astype(dtype=dtype, **kwargs))
@pytest.mark.parametrize(
"errors",
[
pytest.param(
"raise", marks=pytest.mark.xfail(reason="should raise error here")
),
pytest.param("other", marks=pytest.mark.xfail(raises=ValueError)),
"ignore",
pytest.param(
"warn", marks=pytest.mark.filterwarnings("ignore:Traceback")
),
],
)
def test_series_astype_error_handling(errors):
sr = cudf.Series(["random", "words"])
got = sr.astype("datetime64", errors=errors)
assert_eq(sr, got)
@pytest.mark.parametrize("dtype", ALL_TYPES)
def test_df_constructor_dtype(dtype):
if "datetime" in dtype:
data = ["1991-11-20", "2004-12-04", "2016-09-13", None]
elif dtype == "str":
data = ["a", "b", "c", None]
elif "float" in dtype:
data = [1.0, 0.5, -1.1, np.nan, None]
elif "bool" in dtype:
data = [True, False, None]
else:
data = [1, 2, 3, None]
sr = cudf.Series(data, dtype=dtype)
expect = cudf.DataFrame()
expect["foo"] = sr
expect["bar"] = sr
got = cudf.DataFrame({"foo": data, "bar": data}, dtype=dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"data",
[
cudf.datasets.randomdata(
nrows=10, dtypes={"a": "category", "b": int, "c": float, "d": int}
),
cudf.datasets.randomdata(
nrows=10, dtypes={"a": "category", "b": int, "c": float, "d": str}
),
cudf.datasets.randomdata(
nrows=10, dtypes={"a": bool, "b": int, "c": float, "d": str}
),
cudf.DataFrame(),
cudf.DataFrame({"a": [0, 1, 2], "b": [1, None, 3]}),
cudf.DataFrame(
{
"a": [1, 2, 3, 4],
"b": [7, np.NaN, 9, 10],
"c": [np.NaN, np.NaN, np.NaN, np.NaN],
"d": cudf.Series([None, None, None, None], dtype="int64"),
"e": [100, None, 200, None],
"f": cudf.Series([10, None, np.NaN, 11], nan_as_null=False),
}
),
cudf.DataFrame(
{
"a": [10, 11, 12, 13, 14, 15],
"b": cudf.Series(
[10, None, np.NaN, 2234, None, np.NaN], nan_as_null=False
),
}
),
],
)
@pytest.mark.parametrize(
"op", ["max", "min", "sum", "product", "mean", "var", "std"]
)
@pytest.mark.parametrize("skipna", [True, False])
def test_rowwise_ops(data, op, skipna):
gdf = data
pdf = gdf.to_pandas()
if op in ("var", "std"):
expected = getattr(pdf, op)(axis=1, ddof=0, skipna=skipna)
got = getattr(gdf, op)(axis=1, ddof=0, skipna=skipna)
else:
expected = getattr(pdf, op)(axis=1, skipna=skipna)
got = getattr(gdf, op)(axis=1, skipna=skipna)
assert_eq(expected, got, check_exact=False)
@pytest.mark.parametrize(
"op", ["max", "min", "sum", "product", "mean", "var", "std"]
)
def test_rowwise_ops_nullable_dtypes_all_null(op):
gdf = cudf.DataFrame(
{
"a": [1, 2, 3, 4],
"b": [7, np.NaN, 9, 10],
"c": [np.NaN, np.NaN, np.NaN, np.NaN],
"d": cudf.Series([None, None, None, None], dtype="int64"),
"e": [100, None, 200, None],
"f": cudf.Series([10, None, np.NaN, 11], nan_as_null=False),
}
)
expected = cudf.Series([None, None, None, None], dtype="float64")
if op in ("var", "std"):
got = getattr(gdf, op)(axis=1, ddof=0, skipna=False)
else:
got = getattr(gdf, op)(axis=1, skipna=False)
assert_eq(got.null_count, expected.null_count)
assert_eq(got, expected)
@pytest.mark.parametrize(
"op,expected",
[
(
"max",
cudf.Series(
[10.0, None, np.NaN, 2234.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"min",
cudf.Series(
[10.0, None, np.NaN, 13.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"sum",
cudf.Series(
[20.0, None, np.NaN, 2247.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"product",
cudf.Series(
[100.0, None, np.NaN, 29042.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"mean",
cudf.Series(
[10.0, None, np.NaN, 1123.5, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"var",
cudf.Series(
[0.0, None, np.NaN, 1233210.25, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"std",
cudf.Series(
[0.0, None, np.NaN, 1110.5, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
],
)
def test_rowwise_ops_nullable_dtypes_partial_null(op, expected):
gdf = cudf.DataFrame(
{
"a": [10, 11, 12, 13, 14, 15],
"b": cudf.Series(
[10, None, np.NaN, 2234, None, np.NaN], nan_as_null=False,
),
}
)
if op in ("var", "std"):
got = getattr(gdf, op)(axis=1, ddof=0, skipna=False)
else:
got = getattr(gdf, op)(axis=1, skipna=False)
assert_eq(got.null_count, expected.null_count)
assert_eq(got, expected)
@pytest.mark.parametrize(
"op,expected",
[
(
"max",
cudf.Series([10, None, None, 2234, None, 453], dtype="int64",),
),
("min", cudf.Series([10, None, None, 13, None, 15], dtype="int64",),),
(
"sum",
cudf.Series([20, None, None, 2247, None, 468], dtype="int64",),
),
(
"product",
cudf.Series([100, None, None, 29042, None, 6795], dtype="int64",),
),
(
"mean",
cudf.Series(
[10.0, None, None, 1123.5, None, 234.0], dtype="float32",
),
),
(
"var",
cudf.Series(
[0.0, None, None, 1233210.25, None, 47961.0], dtype="float32",
),
),
(
"std",
cudf.Series(
[0.0, None, None, 1110.5, None, 219.0], dtype="float32",
),
),
],
)
def test_rowwise_ops_nullable_int_dtypes(op, expected):
gdf = cudf.DataFrame(
{
"a": [10, 11, None, 13, None, 15],
"b": cudf.Series(
[10, None, 323, 2234, None, 453], nan_as_null=False,
),
}
)
if op in ("var", "std"):
got = getattr(gdf, op)(axis=1, ddof=0, skipna=False)
else:
got = getattr(gdf, op)(axis=1, skipna=False)
assert_eq(got.null_count, expected.null_count)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[ms]"
),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[ns]"
),
"t3": cudf.Series(
["1960-08-31 06:00:00", "2030-08-02 10:00:00"], dtype="<M8[s]"
),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[us]"
),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[ms]"
),
"i1": cudf.Series([1001, 2002], dtype="int64"),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(["1940-08-31 06:00:00", None], dtype="<M8[ms]"),
"i1": cudf.Series([1001, 2002], dtype="int64"),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"i1": cudf.Series([1001, 2002], dtype="int64"),
"f1": cudf.Series([-100.001, 123.456], dtype="float64"),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"i1": cudf.Series([1001, 2002], dtype="int64"),
"f1": cudf.Series([-100.001, 123.456], dtype="float64"),
"b1": cudf.Series([True, False], dtype="bool"),
},
],
)
@pytest.mark.parametrize("op", ["max", "min"])
@pytest.mark.parametrize("skipna", [True, False])
def test_rowwise_ops_datetime_dtypes(data, op, skipna):
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas()
got = getattr(gdf, op)(axis=1, skipna=skipna)
expected = getattr(pdf, op)(axis=1, skipna=skipna)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data,op,skipna",
[
(
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ms]",
),
"t2": cudf.Series(
["1940-08-31 06:00:00", None], dtype="<M8[ms]"
),
},
"max",
True,
),
(
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ms]",
),
"t2": cudf.Series(
["1940-08-31 06:00:00", None], dtype="<M8[ms]"
),
},
"min",
False,
),
(
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ms]",
),
"t2": cudf.Series(
["1940-08-31 06:00:00", None], dtype="<M8[ms]"
),
},
"min",
True,
),
],
)
def test_rowwise_ops_datetime_dtypes_2(data, op, skipna):
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas()
got = getattr(gdf, op)(axis=1, skipna=skipna)
expected = getattr(pdf, op)(axis=1, skipna=skipna)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
(
{
"t1": pd.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ns]",
),
"t2": pd.Series(
["1940-08-31 06:00:00", pd.NaT], dtype="<M8[ns]"
),
}
)
],
)
def test_rowwise_ops_datetime_dtypes_pdbug(data):
pdf = pd.DataFrame(data)
gdf = cudf.from_pandas(pdf)
expected = pdf.max(axis=1, skipna=False)
got = gdf.max(axis=1, skipna=False)
if PANDAS_GE_120:
assert_eq(got, expected)
else:
# PANDAS BUG: https://github.com/pandas-dev/pandas/issues/36907
with pytest.raises(AssertionError, match="numpy array are different"):
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[5.0, 6.0, 7.0],
"single value",
np.array(1, dtype="int64"),
np.array(0.6273643, dtype="float64"),
],
)
def test_insert(data):
pdf = pd.DataFrame.from_dict({"A": [1, 2, 3], "B": ["a", "b", "c"]})
gdf = cudf.DataFrame.from_pandas(pdf)
# insertion by index
pdf.insert(0, "foo", data)
gdf.insert(0, "foo", data)
assert_eq(pdf, gdf)
pdf.insert(3, "bar", data)
gdf.insert(3, "bar", data)
assert_eq(pdf, gdf)
pdf.insert(1, "baz", data)
gdf.insert(1, "baz", data)
assert_eq(pdf, gdf)
# pandas insert doesn't support negative indexing
pdf.insert(len(pdf.columns), "qux", data)
gdf.insert(-1, "qux", data)
assert_eq(pdf, gdf)
def test_cov():
gdf = cudf.datasets.randomdata(10)
pdf = gdf.to_pandas()
assert_eq(pdf.cov(), gdf.cov())
@pytest.mark.xfail(reason="cupy-based cov does not support nulls")
def test_cov_nans():
pdf = pd.DataFrame()
pdf["a"] = [None, None, None, 2.00758632, None]
pdf["b"] = [0.36403686, None, None, None, None]
pdf["c"] = [None, None, None, 0.64882227, None]
pdf["d"] = [None, -1.46863125, None, 1.22477948, -0.06031689]
gdf = cudf.from_pandas(pdf)
assert_eq(pdf.cov(), gdf.cov())
@pytest.mark.parametrize(
"gsr",
[
cudf.Series([4, 2, 3]),
cudf.Series([4, 2, 3], index=["a", "b", "c"]),
cudf.Series([4, 2, 3], index=["a", "b", "d"]),
cudf.Series([4, 2], index=["a", "b"]),
cudf.Series([4, 2, 3], index=cudf.core.index.RangeIndex(0, 3)),
pytest.param(
cudf.Series([4, 2, 3, 4, 5], index=["a", "b", "d", "0", "12"]),
marks=pytest.mark.xfail,
),
],
)
@pytest.mark.parametrize("colnames", [["a", "b", "c"], [0, 1, 2]])
@pytest.mark.parametrize(
"op",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_df_sr_binop(gsr, colnames, op):
data = [[3.0, 2.0, 5.0], [3.0, None, 5.0], [6.0, 7.0, np.nan]]
data = dict(zip(colnames, data))
gsr = gsr.astype("float64")
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas(nullable=True)
psr = gsr.to_pandas(nullable=True)
expect = op(pdf, psr)
got = op(gdf, gsr).to_pandas(nullable=True)
assert_eq(expect, got, check_dtype=False)
expect = op(psr, pdf)
got = op(gsr, gdf).to_pandas(nullable=True)
assert_eq(expect, got, check_dtype=False)
@pytest.mark.parametrize(
"op",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
# comparison ops will temporarily XFAIL
# see PR https://github.com/rapidsai/cudf/pull/7491
pytest.param(operator.eq, marks=pytest.mark.xfail()),
pytest.param(operator.lt, marks=pytest.mark.xfail()),
pytest.param(operator.le, marks=pytest.mark.xfail()),
pytest.param(operator.gt, marks=pytest.mark.xfail()),
pytest.param(operator.ge, marks=pytest.mark.xfail()),
pytest.param(operator.ne, marks=pytest.mark.xfail()),
],
)
@pytest.mark.parametrize(
"gsr", [cudf.Series([1, 2, 3, 4, 5], index=["a", "b", "d", "0", "12"])]
)
def test_df_sr_binop_col_order(gsr, op):
colnames = [0, 1, 2]
data = [[0, 2, 5], [3, None, 5], [6, 7, np.nan]]
data = dict(zip(colnames, data))
gdf = cudf.DataFrame(data)
pdf = pd.DataFrame.from_dict(data)
psr = gsr.to_pandas()
expect = op(pdf, psr).astype("float")
out = op(gdf, gsr).astype("float")
got = out[expect.columns]
assert_eq(expect, got)
@pytest.mark.parametrize("set_index", [None, "A", "C", "D"])
@pytest.mark.parametrize("index", [True, False])
@pytest.mark.parametrize("deep", [True, False])
def test_memory_usage(deep, index, set_index):
# Testing numerical/datetime by comparing with pandas
# (string and categorical columns will be different)
rows = int(100)
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int64"),
"B": np.arange(rows, dtype="int32"),
"C": np.arange(rows, dtype="float64"),
}
)
df["D"] = pd.to_datetime(df.A)
if set_index:
df = df.set_index(set_index)
gdf = cudf.from_pandas(df)
if index and set_index is None:
# Special Case: Assume RangeIndex size == 0
assert gdf.index.memory_usage(deep=deep) == 0
else:
# Check for Series only
assert df["B"].memory_usage(index=index, deep=deep) == gdf[
"B"
].memory_usage(index=index, deep=deep)
# Check for entire DataFrame
assert_eq(
df.memory_usage(index=index, deep=deep).sort_index(),
gdf.memory_usage(index=index, deep=deep).sort_index(),
)
@pytest.mark.xfail
def test_memory_usage_string():
rows = int(100)
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int32"),
"B": np.random.choice(["apple", "banana", "orange"], rows),
}
)
gdf = cudf.from_pandas(df)
# Check deep=False (should match pandas)
assert gdf.B.memory_usage(deep=False, index=False) == df.B.memory_usage(
deep=False, index=False
)
# Check string column
assert gdf.B.memory_usage(deep=True, index=False) == df.B.memory_usage(
deep=True, index=False
)
# Check string index
assert gdf.set_index("B").index.memory_usage(
deep=True
) == df.B.memory_usage(deep=True, index=False)
def test_memory_usage_cat():
rows = int(100)
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int32"),
"B": np.random.choice(["apple", "banana", "orange"], rows),
}
)
df["B"] = df.B.astype("category")
gdf = cudf.from_pandas(df)
expected = (
gdf.B._column.cat().categories.__sizeof__()
+ gdf.B._column.cat().codes.__sizeof__()
)
# Check cat column
assert gdf.B.memory_usage(deep=True, index=False) == expected
# Check cat index
assert gdf.set_index("B").index.memory_usage(deep=True) == expected
def test_memory_usage_list():
df = cudf.DataFrame({"A": [[0, 1, 2, 3], [4, 5, 6], [7, 8], [9]]})
expected = (
df.A._column.offsets._memory_usage()
+ df.A._column.elements._memory_usage()
)
assert expected == df.A.memory_usage()
@pytest.mark.xfail
def test_memory_usage_multi():
rows = int(100)
deep = True
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int32"),
"B": np.random.choice(np.arange(3, dtype="int64"), rows),
"C": np.random.choice(np.arange(3, dtype="float64"), rows),
}
).set_index(["B", "C"])
gdf = cudf.from_pandas(df)
# Assume MultiIndex memory footprint is just that
# of the underlying columns, levels, and codes
expect = rows * 16 # Source Columns
expect += rows * 16 # Codes
expect += 3 * 8 # Level 0
expect += 3 * 8 # Level 1
assert expect == gdf.index.memory_usage(deep=deep)
@pytest.mark.parametrize(
"list_input",
[
pytest.param([1, 2, 3, 4], id="smaller"),
pytest.param([1, 2, 3, 4, 5, 6], id="larger"),
],
)
@pytest.mark.parametrize(
"key",
[
pytest.param("list_test", id="new_column"),
pytest.param("id", id="existing_column"),
],
)
def test_setitem_diff_size_list(list_input, key):
gdf = cudf.datasets.randomdata(5)
with pytest.raises(
ValueError, match=("All columns must be of equal length")
):
gdf[key] = list_input
@pytest.mark.parametrize(
"series_input",
[
pytest.param(cudf.Series([1, 2, 3, 4]), id="smaller_cudf"),
pytest.param(cudf.Series([1, 2, 3, 4, 5, 6]), id="larger_cudf"),
pytest.param(cudf.Series([1, 2, 3], index=[4, 5, 6]), id="index_cudf"),
pytest.param(pd.Series([1, 2, 3, 4]), id="smaller_pandas"),
pytest.param(pd.Series([1, 2, 3, 4, 5, 6]), id="larger_pandas"),
pytest.param(pd.Series([1, 2, 3], index=[4, 5, 6]), id="index_pandas"),
],
)
@pytest.mark.parametrize(
"key",
[
pytest.param("list_test", id="new_column"),
pytest.param("id", id="existing_column"),
],
)
def test_setitem_diff_size_series(series_input, key):
gdf = cudf.datasets.randomdata(5)
pdf = gdf.to_pandas()
pandas_input = series_input
if isinstance(pandas_input, cudf.Series):
pandas_input = pandas_input.to_pandas()
expect = pdf
expect[key] = pandas_input
got = gdf
got[key] = series_input
# Pandas uses NaN and typecasts to float64 if there's missing values on
# alignment, so need to typecast to float64 for equality comparison
expect = expect.astype("float64")
got = got.astype("float64")
assert_eq(expect, got)
def test_tupleize_cols_False_set():
pdf = pd.DataFrame()
gdf = cudf.DataFrame()
pdf[("a", "b")] = [1]
gdf[("a", "b")] = [1]
assert_eq(pdf, gdf)
assert_eq(pdf.columns, gdf.columns)
def test_init_multiindex_from_dict():
pdf = pd.DataFrame({("a", "b"): [1]})
gdf = cudf.DataFrame({("a", "b"): [1]})
assert_eq(pdf, gdf)
assert_eq(pdf.columns, gdf.columns)
def test_change_column_dtype_in_empty():
pdf = pd.DataFrame({"a": [], "b": []})
gdf = cudf.from_pandas(pdf)
assert_eq(pdf, gdf)
pdf["b"] = pdf["b"].astype("int64")
gdf["b"] = gdf["b"].astype("int64")
assert_eq(pdf, gdf)
def test_dataframe_from_table_empty_index():
df = cudf.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
odict = df._data
tbl = cudf._lib.table.Table(odict)
result = cudf.DataFrame._from_table(tbl) # noqa: F841
@pytest.mark.parametrize("dtype", ["int64", "str"])
def test_dataframe_from_dictionary_series_same_name_index(dtype):
pd_idx1 = pd.Index([1, 2, 0], name="test_index").astype(dtype)
pd_idx2 = pd.Index([2, 0, 1], name="test_index").astype(dtype)
pd_series1 = pd.Series([1, 2, 3], index=pd_idx1)
pd_series2 = pd.Series([1, 2, 3], index=pd_idx2)
gd_idx1 = cudf.from_pandas(pd_idx1)
gd_idx2 = cudf.from_pandas(pd_idx2)
gd_series1 = cudf.Series([1, 2, 3], index=gd_idx1)
gd_series2 = cudf.Series([1, 2, 3], index=gd_idx2)
expect = pd.DataFrame({"a": pd_series1, "b": pd_series2})
got = cudf.DataFrame({"a": gd_series1, "b": gd_series2})
if dtype == "str":
# Pandas actually loses its index name erroneously here...
expect.index.name = "test_index"
assert_eq(expect, got)
assert expect.index.names == got.index.names
@pytest.mark.parametrize(
"arg", [slice(2, 8, 3), slice(1, 20, 4), slice(-2, -6, -2)]
)
def test_dataframe_strided_slice(arg):
mul = pd.DataFrame(
{
"Index": [1, 2, 3, 4, 5, 6, 7, 8, 9],
"AlphaIndex": ["a", "b", "c", "d", "e", "f", "g", "h", "i"],
}
)
pdf = pd.DataFrame(
{"Val": [10, 9, 8, 7, 6, 5, 4, 3, 2]},
index=pd.MultiIndex.from_frame(mul),
)
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf[arg]
got = gdf[arg]
assert_eq(expect, got)
@pytest.mark.parametrize(
"data,condition,other,error",
[
(pd.Series(range(5)), pd.Series(range(5)) > 0, None, None),
(pd.Series(range(5)), pd.Series(range(5)) > 1, None, None),
(pd.Series(range(5)), pd.Series(range(5)) > 1, 10, None),
(
pd.Series(range(5)),
pd.Series(range(5)) > 1,
pd.Series(range(5, 10)),
None,
),
(
pd.DataFrame(np.arange(10).reshape(-1, 2), columns=["A", "B"]),
(
pd.DataFrame(np.arange(10).reshape(-1, 2), columns=["A", "B"])
% 3
)
== 0,
-pd.DataFrame(np.arange(10).reshape(-1, 2), columns=["A", "B"]),
None,
),
(
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}),
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}) == 4,
None,
None,
),
(
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}),
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}) != 4,
None,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[True, True, True],
None,
ValueError,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[True, True, True, False],
None,
ValueError,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[[True, True, True, False], [True, True, True, False]],
None,
ValueError,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[[True, True], [False, True], [True, False], [False, True]],
None,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
cuda.to_device(
np.array(
[[True, True], [False, True], [True, False], [False, True]]
)
),
None,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
cupy.array(
[[True, True], [False, True], [True, False], [False, True]]
),
17,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[[True, True], [False, True], [True, False], [False, True]],
17,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[
[True, True, False, True],
[True, True, False, True],
[True, True, False, True],
[True, True, False, True],
],
None,
ValueError,
),
(
pd.Series([1, 2, np.nan]),
pd.Series([1, 2, np.nan]) == 4,
None,
None,
),
(
pd.Series([1, 2, np.nan]),
pd.Series([1, 2, np.nan]) != 4,
None,
None,
),
(
pd.Series([4, np.nan, 6]),
pd.Series([4, np.nan, 6]) == 4,
None,
None,
),
(
pd.Series([4, np.nan, 6]),
pd.Series([4, np.nan, 6]) != 4,
None,
None,
),
(
pd.Series([4, np.nan, 6], dtype="category"),
pd.Series([4, np.nan, 6], dtype="category") != 4,
None,
None,
),
(
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category"),
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category") == "b",
None,
None,
),
(
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category"),
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category") == "b",
"s",
None,
),
(
pd.Series([1, 2, 3, 2, 5]),
pd.Series([1, 2, 3, 2, 5]) == 2,
pd.DataFrame(
{
"a": pd.Series([1, 2, 3, 2, 5]),
"b": pd.Series([1, 2, 3, 2, 5]),
}
),
NotImplementedError,
),
],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_df_sr_mask_where(data, condition, other, error, inplace):
ps_where = data
gs_where = cudf.from_pandas(data)
ps_mask = ps_where.copy(deep=True)
gs_mask = gs_where.copy(deep=True)
if hasattr(condition, "__cuda_array_interface__"):
if type(condition).__module__.split(".")[0] == "cupy":
ps_condition = cupy.asnumpy(condition)
else:
ps_condition = np.array(condition).astype("bool")
else:
ps_condition = condition
if type(condition).__module__.split(".")[0] == "pandas":
gs_condition = cudf.from_pandas(condition)
else:
gs_condition = condition
ps_other = other
if type(other).__module__.split(".")[0] == "pandas":
gs_other = cudf.from_pandas(other)
else:
gs_other = other
if error is None:
expect_where = ps_where.where(
ps_condition, other=ps_other, inplace=inplace
)
got_where = gs_where.where(
gs_condition, other=gs_other, inplace=inplace
)
expect_mask = ps_mask.mask(
ps_condition, other=ps_other, inplace=inplace
)
got_mask = gs_mask.mask(gs_condition, other=gs_other, inplace=inplace)
if inplace:
expect_where = ps_where
got_where = gs_where
expect_mask = ps_mask
got_mask = gs_mask
if pd.api.types.is_categorical_dtype(expect_where):
np.testing.assert_array_equal(
expect_where.cat.codes,
got_where.cat.codes.astype(expect_where.cat.codes.dtype)
.fillna(-1)
.to_array(),
)
assert_eq(expect_where.cat.categories, got_where.cat.categories)
np.testing.assert_array_equal(
expect_mask.cat.codes,
got_mask.cat.codes.astype(expect_mask.cat.codes.dtype)
.fillna(-1)
.to_array(),
)
assert_eq(expect_mask.cat.categories, got_mask.cat.categories)
else:
assert_eq(
expect_where.fillna(-1),
got_where.fillna(-1),
check_dtype=False,
)
assert_eq(
expect_mask.fillna(-1), got_mask.fillna(-1), check_dtype=False
)
else:
assert_exceptions_equal(
lfunc=ps_where.where,
rfunc=gs_where.where,
lfunc_args_and_kwargs=(
[ps_condition],
{"other": ps_other, "inplace": inplace},
),
rfunc_args_and_kwargs=(
[gs_condition],
{"other": gs_other, "inplace": inplace},
),
compare_error_message=False
if error is NotImplementedError
else True,
)
assert_exceptions_equal(
lfunc=ps_mask.mask,
rfunc=gs_mask.mask,
lfunc_args_and_kwargs=(
[ps_condition],
{"other": ps_other, "inplace": inplace},
),
rfunc_args_and_kwargs=(
[gs_condition],
{"other": gs_other, "inplace": inplace},
),
compare_error_message=False,
)
@pytest.mark.parametrize(
"data,condition,other,has_cat",
[
(
pd.DataFrame(
{
"a": pd.Series(["a", "a", "b", "c", "a", "d", "d", "a"]),
"b": pd.Series(["o", "p", "q", "e", "p", "p", "a", "a"]),
}
),
pd.DataFrame(
{
"a": pd.Series(["a", "a", "b", "c", "a", "d", "d", "a"]),
"b": pd.Series(["o", "p", "q", "e", "p", "p", "a", "a"]),
}
)
!= "a",
None,
None,
),
(
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
),
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
)
!= "a",
None,
True,
),
(
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
),
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
)
== "a",
None,
True,
),
(
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
),
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
)
!= "a",
"a",
True,
),
(
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
),
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
)
== "a",
"a",
True,
),
],
)
def test_df_string_cat_types_mask_where(data, condition, other, has_cat):
ps = data
gs = cudf.from_pandas(data)
ps_condition = condition
if type(condition).__module__.split(".")[0] == "pandas":
gs_condition = cudf.from_pandas(condition)
else:
gs_condition = condition
ps_other = other
if type(other).__module__.split(".")[0] == "pandas":
gs_other = cudf.from_pandas(other)
else:
gs_other = other
expect_where = ps.where(ps_condition, other=ps_other)
got_where = gs.where(gs_condition, other=gs_other)
expect_mask = ps.mask(ps_condition, other=ps_other)
got_mask = gs.mask(gs_condition, other=gs_other)
if has_cat is None:
assert_eq(
expect_where.fillna(-1).astype("str"),
got_where.fillna(-1),
check_dtype=False,
)
assert_eq(
expect_mask.fillna(-1).astype("str"),
got_mask.fillna(-1),
check_dtype=False,
)
else:
assert_eq(expect_where, got_where, check_dtype=False)
assert_eq(expect_mask, got_mask, check_dtype=False)
@pytest.mark.parametrize(
"data,expected_upcast_type,error",
[
(
pd.Series([random.random() for _ in range(10)], dtype="float32"),
np.dtype("float32"),
None,
),
(
pd.Series([random.random() for _ in range(10)], dtype="float16"),
np.dtype("float32"),
None,
),
(
pd.Series([random.random() for _ in range(10)], dtype="float64"),
np.dtype("float64"),
None,
),
(
pd.Series([random.random() for _ in range(10)], dtype="float128"),
None,
NotImplementedError,
),
],
)
def test_from_pandas_unsupported_types(data, expected_upcast_type, error):
pdf = pd.DataFrame({"one_col": data})
if error == NotImplementedError:
with pytest.raises(error):
cudf.from_pandas(data)
with pytest.raises(error):
cudf.Series(data)
with pytest.raises(error):
cudf.from_pandas(pdf)
with pytest.raises(error):
cudf.DataFrame(pdf)
else:
df = cudf.from_pandas(data)
assert_eq(data, df, check_dtype=False)
assert df.dtype == expected_upcast_type
df = cudf.Series(data)
assert_eq(data, df, check_dtype=False)
assert df.dtype == expected_upcast_type
df = cudf.from_pandas(pdf)
assert_eq(pdf, df, check_dtype=False)
assert df["one_col"].dtype == expected_upcast_type
df = cudf.DataFrame(pdf)
assert_eq(pdf, df, check_dtype=False)
assert df["one_col"].dtype == expected_upcast_type
@pytest.mark.parametrize("nan_as_null", [True, False])
@pytest.mark.parametrize("index", [None, "a", ["a", "b"]])
def test_from_pandas_nan_as_null(nan_as_null, index):
data = [np.nan, 2.0, 3.0]
if index is None:
pdf = pd.DataFrame({"a": data, "b": data})
expected = cudf.DataFrame(
{
"a": column.as_column(data, nan_as_null=nan_as_null),
"b": column.as_column(data, nan_as_null=nan_as_null),
}
)
else:
pdf = pd.DataFrame({"a": data, "b": data}).set_index(index)
expected = cudf.DataFrame(
{
"a": column.as_column(data, nan_as_null=nan_as_null),
"b": column.as_column(data, nan_as_null=nan_as_null),
}
)
expected = cudf.DataFrame(
{
"a": column.as_column(data, nan_as_null=nan_as_null),
"b": column.as_column(data, nan_as_null=nan_as_null),
}
)
expected = expected.set_index(index)
got = cudf.from_pandas(pdf, nan_as_null=nan_as_null)
assert_eq(expected, got)
@pytest.mark.parametrize("nan_as_null", [True, False])
def test_from_pandas_for_series_nan_as_null(nan_as_null):
data = [np.nan, 2.0, 3.0]
psr = pd.Series(data)
expected = cudf.Series(column.as_column(data, nan_as_null=nan_as_null))
got = cudf.from_pandas(psr, nan_as_null=nan_as_null)
assert_eq(expected, got)
@pytest.mark.parametrize("copy", [True, False])
def test_df_series_dataframe_astype_copy(copy):
gdf = cudf.DataFrame({"col1": [1, 2], "col2": [3, 4]})
pdf = gdf.to_pandas()
assert_eq(
gdf.astype(dtype="float", copy=copy),
pdf.astype(dtype="float", copy=copy),
)
assert_eq(gdf, pdf)
gsr = cudf.Series([1, 2])
psr = gsr.to_pandas()
assert_eq(
gsr.astype(dtype="float", copy=copy),
psr.astype(dtype="float", copy=copy),
)
assert_eq(gsr, psr)
gsr = cudf.Series([1, 2])
psr = gsr.to_pandas()
actual = gsr.astype(dtype="int64", copy=copy)
expected = psr.astype(dtype="int64", copy=copy)
assert_eq(expected, actual)
assert_eq(gsr, psr)
actual[0] = 3
expected[0] = 3
assert_eq(gsr, psr)
@pytest.mark.parametrize("copy", [True, False])
def test_df_series_dataframe_astype_dtype_dict(copy):
gdf = cudf.DataFrame({"col1": [1, 2], "col2": [3, 4]})
pdf = gdf.to_pandas()
assert_eq(
gdf.astype(dtype={"col1": "float"}, copy=copy),
pdf.astype(dtype={"col1": "float"}, copy=copy),
)
assert_eq(gdf, pdf)
gsr = cudf.Series([1, 2])
psr = gsr.to_pandas()
assert_eq(
gsr.astype(dtype={None: "float"}, copy=copy),
psr.astype(dtype={None: "float"}, copy=copy),
)
assert_eq(gsr, psr)
assert_exceptions_equal(
lfunc=psr.astype,
rfunc=gsr.astype,
lfunc_args_and_kwargs=([], {"dtype": {"a": "float"}, "copy": copy}),
rfunc_args_and_kwargs=([], {"dtype": {"a": "float"}, "copy": copy}),
)
gsr = cudf.Series([1, 2])
psr = gsr.to_pandas()
actual = gsr.astype({None: "int64"}, copy=copy)
expected = psr.astype({None: "int64"}, copy=copy)
assert_eq(expected, actual)
assert_eq(gsr, psr)
actual[0] = 3
expected[0] = 3
assert_eq(gsr, psr)
@pytest.mark.parametrize(
"data,columns",
[
([1, 2, 3, 100, 112, 35464], ["a"]),
(range(100), None),
([], None),
((-10, 21, 32, 32, 1, 2, 3), ["p"]),
((), None),
([[1, 2, 3], [1, 2, 3]], ["col1", "col2", "col3"]),
([range(100), range(100)], ["range" + str(i) for i in range(100)]),
(((1, 2, 3), (1, 2, 3)), ["tuple0", "tuple1", "tuple2"]),
([[1, 2, 3]], ["list col1", "list col2", "list col3"]),
([range(100)], ["range" + str(i) for i in range(100)]),
(((1, 2, 3),), ["k1", "k2", "k3"]),
],
)
def test_dataframe_init_1d_list(data, columns):
expect = pd.DataFrame(data, columns=columns)
actual = cudf.DataFrame(data, columns=columns)
assert_eq(
expect, actual, check_index_type=False if len(data) == 0 else True
)
expect = pd.DataFrame(data, columns=None)
actual = cudf.DataFrame(data, columns=None)
assert_eq(
expect, actual, check_index_type=False if len(data) == 0 else True
)
@pytest.mark.parametrize(
"data,cols,index",
[
(
np.ndarray(shape=(4, 2), dtype=float, order="F"),
["a", "b"],
["a", "b", "c", "d"],
),
(
np.ndarray(shape=(4, 2), dtype=float, order="F"),
["a", "b"],
[0, 20, 30, 10],
),
(
np.ndarray(shape=(4, 2), dtype=float, order="F"),
["a", "b"],
[0, 1, 2, 3],
),
(np.array([11, 123, -2342, 232]), ["a"], [1, 2, 11, 12]),
(np.array([11, 123, -2342, 232]), ["a"], ["khsdjk", "a", "z", "kk"]),
(
cupy.ndarray(shape=(4, 2), dtype=float, order="F"),
["a", "z"],
["a", "z", "a", "z"],
),
(cupy.array([11, 123, -2342, 232]), ["z"], [0, 1, 1, 0]),
(cupy.array([11, 123, -2342, 232]), ["z"], [1, 2, 3, 4]),
(cupy.array([11, 123, -2342, 232]), ["z"], ["a", "z", "d", "e"]),
(np.random.randn(2, 4), ["a", "b", "c", "d"], ["a", "b"]),
(np.random.randn(2, 4), ["a", "b", "c", "d"], [1, 0]),
(cupy.random.randn(2, 4), ["a", "b", "c", "d"], ["a", "b"]),
(cupy.random.randn(2, 4), ["a", "b", "c", "d"], [1, 0]),
],
)
def test_dataframe_init_from_arrays_cols(data, cols, index):
gd_data = data
if isinstance(data, cupy.core.ndarray):
# pandas can't handle cupy arrays in general
pd_data = data.get()
# additional test for building DataFrame with gpu array whose
# cuda array interface has no `descr` attribute
numba_data = cuda.as_cuda_array(data)
else:
pd_data = data
numba_data = None
# verify with columns & index
pdf = | pd.DataFrame(pd_data, columns=cols, index=index) | pandas.DataFrame |
import datetime
from unittest import TestCase
import numpy as np
import pandas as pd
from mlnext import pipeline
class TestColumnSelector(TestCase):
def setUp(self):
data = np.arange(8).reshape(-1, 2)
cols = ['a', 'b']
self.df = pd.DataFrame(data, columns=cols)
def test_select_columns(self):
t = pipeline.ColumnSelector(keys=['a'])
expected = self.df.loc[:, ['a']]
result = t.fit_transform(self.df)
pd.testing.assert_frame_equal(result, expected)
class TestColumnDropper(TestCase):
def setUp(self):
data = np.arange(8).reshape(-1, 2)
cols = ['a', 'b']
self.df = pd.DataFrame(data, columns=cols)
def test_drop_columns(self):
t = pipeline.ColumnDropper(columns=['b'])
expected = self.df.loc[:, ['a']]
result = t.fit_transform(self.df)
pd.testing.assert_frame_equal(result, expected)
def test_drop_columns_verbose(self):
t = pipeline.ColumnDropper(columns=['b'], verbose=True)
expected = self.df.loc[:, ['a']]
result = t.transform(self.df)
pd.testing.assert_frame_equal(result, expected)
def test_drop__missing_columns(self):
t = pipeline.ColumnDropper(columns=['c'])
with self.assertWarns(Warning):
t.transform(self.df)
class TestColumnRename(TestCase):
def test_rename_columns(self):
t = pipeline.ColumnRename(lambda x: x.split('.')[-1])
df = pd.DataFrame(columns=['a.b.c', 'd.e.f'])
expected = pd.DataFrame(columns=['c', 'f'])
result = t.fit_transform(df)
pd.testing.assert_frame_equal(result, expected)
class TestNaDropper(TestCase):
def test_drop_na(self):
t = pipeline.NaDropper()
df = pd.DataFrame([1, 0, pd.NA])
expected = pd.DataFrame([1, 0], dtype=object)
result = t.fit_transform(df)
pd.testing.assert_frame_equal(result, expected)
class TestClip(TestCase):
def test_clip(self):
t = pipeline.Clip(lower=0.5, upper=1.5)
df = | pd.DataFrame([[0.1, 0.4, 0.6, 0.8, 1.2, 1.5]]) | pandas.DataFrame |
def is_sha1(maybe_sha):
if len(maybe_sha) != 40:
return False
try:
sha_int = int(maybe_sha, 16)
except ValueError:
return False
return True
def validate(date_text):
try:
datetime.datetime.strptime(date_text, '%d-%m-%Y:%S-%M-%H')
return True
except ValueError:
return False
from flask_cors import CORS
from flask import Flask, render_template, Response, request, jsonify
import pandas as pd
import os
import json
import shutil
import datetime
import base64
import binascii
import datetime
import requests as r
LOGIN_FILE_NAME = "login.csv"
DB = "templates/images"
GLOBAL_LIST = "acts.csv"
IP = "192.168.127.12:80"
INSTANCE_IP = "192.168.3.11"
count_requests = 0
#IP = "127.0.0.1:5000"
app = Flask(__name__)
CORS(app)
@app.errorhandler(405)
def method_not_allowed(e):
global count_requests
count_requests += 1
return jsonify({'error': 405}), 405
@app.route("/")
def index():
return render_template('index.html')
@app.route("/api/v1/categories", methods = ["GET", "POST"])
def list_categories():
global count_requests
count_requests += 1
if not os.path.exists(DB):
os.makedirs(DB, exist_ok = True)
if request.method == 'GET':
categories = os.listdir(DB)
if not categories:
return Response('{}', status=204, mimetype='application/json')
response_data = {}
for category in categories:
response_data[category] = len(os.listdir(DB + "/" + category))
return jsonify(response_data)
elif request.method == "POST":
category = json.loads(request.data)[0]
if category in os.listdir(DB):
return Response('{}', status=400, mimetype='application/json')
os.makedirs(DB + "/" + category, exist_ok = True)
return Response('{}', status=201, mimetype='application/json')
else:
return Response('{}', status=405, mimetype='application/json')
@app.route("/api/v1/categories/<category>", methods = ["DELETE"])
def delete_category(category = None):
global count_requests
count_requests += 1
if request.method == 'DELETE':
categories = os.listdir(DB)
if category in categories:
if GLOBAL_LIST in os.listdir():
data = | pd.read_csv(GLOBAL_LIST) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 14 11:05:32 2021
Data Source: https://www.kaggle.com/nandalald/turkey-price
@author: Ashish
"""
import pandas as pd
import matplotlib.pyplot as plt
# load data
df1 = pd.read_csv('../../data/kaggle_turkey_foodprice_train.csv')
df2 = pd.read_csv('../../data/kaggle_turkey_foodprice_test.csv')
# merge the data frames
print(df1.shape, df2.shape)
df = pd.concat([df1,df2])
print(df.shape)
# data cleaning
## replace redundant values from column
# reference: https://stackoverflow.com/questions/33413249/how-to-remove-string-value-from-column-in-pandas-dataframe
df['ProductName'] = df.ProductName.str.replace('- Retail','')
print(df.head(5))
## filter values
df = df.query("Place != 'National Average'")
print(df.head(5))
print(df.dtypes)
# univariate plotting
# convert object dtypes to categorical
## reference: https://stackoverflow.com/questions/52404971/get-a-list-of-categories-of-categorical-variable-python-pandas
## TODO: convert all object dtypes to categorical
df['ProductName'] = | pd.Categorical(df['ProductName']) | pandas.Categorical |
import time, glob, os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
def load_csv(exp_dir):
res_l = []
tim_l = []
for file in sorted(glob.glob(exp_dir + '/res/res*.csv')):
res = pd.read_csv(file, index_col=[0, 1], header=[0, 1])
res = res.groupby(by='call', axis=0).median()
res_l.append(res.reset_index())
for file in sorted(glob.glob(exp_dir + '/res/time*.csv')):
tim = pd.read_csv(file, index_col=[0], header=[0])
# tim = tim.groupby(by='call', axis=0).median()
tim_l.append(tim.reset_index())
# res = pd.read_csv(exp_dir+'/res/res*.csv', index_col=[0, 1], header=[0, 1])
# tim = pd.read_csv(exp_dir+'/res/time*.csv', index_col=[0], header=[0])
name = glob.glob(exp_dir + '/*.yaml')
return | pd.concat(res_l) | pandas.concat |
import datetime
import re
from itertools import islice
import numpy as np
import pandas as pd
from bs4 import BeautifulSoup
from dateutil.parser import parse as d
from utils_pandas import daterange
from utils_pandas import export
from utils_scraping import any_in
from utils_scraping import camelot_cache
from utils_scraping import get_next_number
from utils_scraping import get_next_numbers
from utils_scraping import logger
from utils_scraping import MAX_DAYS
from utils_scraping import NUM_OR_DASH
from utils_scraping import pairwise
from utils_scraping import parse_file
from utils_scraping import parse_numbers
from utils_scraping import seperate
from utils_scraping import split
from utils_scraping import strip
from utils_scraping import USE_CACHE_DATA
from utils_scraping import web_files
from utils_thai import file2date
from utils_thai import find_thai_date
from utils_thai import get_province
from utils_thai import join_provinces
from utils_thai import parse_gender
from utils_thai import today
def briefing_case_detail_lines(soup):
parts = soup.find_all('p')
parts = [c for c in [c.strip() for c in [c.get_text() for c in parts]] if c]
maintitle, parts = seperate(parts, lambda x: "วันที่" in x)
if not maintitle or "ผู้ป่วยรายใหม่ประเทศไทย" not in maintitle[0]:
return
# footer, parts = seperate(parts, lambda x: "กรมควบคุมโรค กระทรวงสาธารณสุข" in x)
table = list(split(parts, re.compile(r"^\w*[0-9]+\.").match))
if len(table) == 2:
# titles at the end
table, titles = table
table = [titles, table]
else:
table.pop(0)
# if only one table we can use camelot to get the table. will be slow but less problems
# ctable = camelot.read_pdf(file, pages="6", process_background=True)[0].df
for titles, cells in pairwise(table):
title = titles[0].strip("(ต่อ)").strip()
header, cells = seperate(cells, re.compile("ลักษณะผู้ติดเชื้อ").search)
# "อยู่ระหว่างสอบสวน (93 ราย)" on 2021-04-05 screws things up as its not a province
# have to use look behind
thai = r"[\u0E00-\u0E7Fa-zA-Z'. ]+[\u0E00-\u0E7Fa-zA-Z'.]"
not_prov = r"(?<!อยู่ระหว่างสอบสวน)(?<!ยู่ระหว่างสอบสวน)(?<!ระหว่างสอบสวน)"
provish = f"{thai}{not_prov}"
nl = " *\n* *"
nu = "(?:[0-9]+)"
is_pcell = re.compile(rf"({provish}(?:{nl}\({provish}\))?{nl}\( *{nu} *ราย *\))")
lines = pairwise(islice(is_pcell.split("\n".join(cells)), 1, None)) # because can be split over <p>
yield title, lines
def briefing_case_detail(date, pages):
num_people = re.compile(r"([0-9]+) *ราย")
totals = dict() # groupname -> running total
all_cells = {}
rows = []
if date <= d("2021-02-26"): # missing 2nd page of first lot (1.1)
pages = []
for soup in pages:
for title, lines in briefing_case_detail_lines(soup):
if "ติดเชื้อจากต่างประเทศ" in title: # imported
continue
elif "การคัดกรองเชิงรุก" in title:
case_type = "Proactive"
elif "เดินทางมาจากต่างประเทศ" in title:
# case_type = "Quarantine"
continue # just care about province cases for now
# if re.search("(จากระบบเฝ้าระวัง|ติดเชื้อในประเทศ)", title):
else:
case_type = "Walkin"
all_cells.setdefault(title, []).append(lines)
# print(title,case_type)
for prov_num, line in lines:
# for prov in provs: # TODO: should really be 1. make split only split 1.
# TODO: sometimes cells/data separated by "-" 2021-01-03
prov, num = prov_num.strip().split("(", 1)
prov = get_province(prov)
num = int(num_people.search(num).group(1))
totals[title] = totals.get(title, 0) + num
_, rest = get_next_numbers(line, "(?:nผล|ผลพบ)") # "result"
asym, rest = get_next_number(
rest,
"(?s)^.*(?:ไม่มีอาการ|ไมมี่อาการ|ไม่มีอาการ)",
default=0,
remove=True
)
sym, rest = get_next_number(
rest,
"(?s)^.*(?<!(?:ไม่มี|ไมมี่|ไม่มี))(?:อาการ|อาการ)",
default=0,
remove=True
)
unknown, _ = get_next_number(
rest,
"อยู่ระหว่างสอบสวนโรค",
# "อยู่ระหว่างสอบสวน",
"อยู่ระหว่างสอบสวน",
"อยู่ระหว่างสอบสวน",
"ไม่ระบุ",
default=0)
# unknown2 = get_next_number(
# rest,
# "อยู่ระหว่างสอบสวน",
# "อยู่ระหว่างสอบสวน",
# default=0)
# if unknown2:
# unknown = unknown2
# TODO: if 1, can be by itself
if asym == 0 and sym == 0 and unknown == 0:
sym, asym, unknown = None, None, None
else:
assert asym + sym + unknown == num
rows.append((date, prov, case_type, num, asym, sym))
# checksum on title totals
for title, total in totals.items():
m = num_people.search(title)
if not m:
continue
if date in [d("2021-03-19")]: # 1.1 64!=56
continue
assert total == int(m.group(1)), f"group total={total} instead of: {title}\n{all_cells[title]}"
df = pd.DataFrame(
rows,
columns=["Date", "Province", "Case Type", "Cases", "Cases Asymptomatic", "Cases Symptomatic"]
).set_index(['Date', 'Province'])
return df
def briefing_case_types(date, pages, url):
rows = []
vac_rows = []
if date < d("2021-02-01"):
pages = []
for i, soup in enumerate(pages):
text = soup.get_text()
if "รายงานสถานการณ์" not in text:
continue
# cases = get_next_number(text, "ติดเชื้อจาก", before=True)
# walkins = get_next_number(text.split("รายผู้ที่เดิน")[0], "ในประเทศ", until="ราย")
# quarantine = get_next_number(text, "ต่างประเทศ", until="ราย", default=0)
if date == d("2021-05-17"):
numbers, rest = get_next_numbers(text.split("อาการหนัก")[1], "ในประเทศ")
local, cases, imported, prison, walkins, proactive, imported2, prison2, *_ = numbers
assert local == walkins + proactive
assert imported == imported2
assert prison == prison2
else:
numbers, rest = get_next_numbers(text, "รวม", until="รายผู้ที่เดิน")
cases, walkins, proactive, *quarantine = numbers
domestic = get_next_number(rest, "ในประเทศ", return_rest=False, until="ราย")
if domestic and date not in [d("2021-11-22"), d("2021-12-02"), d("2021-12-29")]:
assert domestic <= cases
assert domestic == walkins + proactive
quarantine = quarantine[0] if quarantine else 0
ports, _ = get_next_number(
text,
"ช่องเส้นทางธรรมชาติ",
"รายผู้ที่เดินทางมาจากต่างประเทศ",
before=True,
default=0
)
imported = ports + quarantine
prison, _ = get_next_number(text.split("รวม")[1], "ที่ต้องขัง", default=0, until="ราย")
cases2 = get_next_number(rest, r"\+", return_rest=False, until="ราย")
if cases2 is not None and cases2 != cases:
# Total cases moved to the bottom
# cases == domestic
cases = cases2
assert cases == domestic + imported + prison
if date not in [d("2021-11-01")]:
assert cases == walkins + proactive + imported + prison, f"{date}: briefing case types don't match"
# hospitalisations
hospital, field, severe, respirator, hospitalised = [np.nan] * 5
numbers, rest = get_next_numbers(text, "อาการหนัก")
if numbers:
severe, respirator, *_ = numbers
hospital, _ = get_next_number(text, "ใน รพ.")
field, _ = get_next_number(text, "รพ.สนาม")
num, _ = get_next_numbers(text, "ใน รพ.", before=True)
hospitalised = num[0]
assert hospital + field == hospitalised or date in [d("2021-09-04")]
elif "ผู้ป่วยรักษาอยู่" in text:
hospitalised, *_ = get_next_numbers(text, "ผู้ป่วยรักษาอยู่", return_rest=False, before=True)
if date > d("2021-03-31"): # don't seem to add up before this
hospital, *_ = get_next_numbers(text, "ใน รพ.", return_rest=False, until="ราย")
field, *_ = get_next_numbers(text, "รพ.สนาม", return_rest=False, until="ราย")
assert hospital + field == hospitalised
if date < d("2021-05-18"):
recovered, _ = get_next_number(text, "(เพ่ิมขึ้น|เพิ่มขึ้น)", until="ราย")
else:
# 2021-05-18 Using single infographic with 3rd wave numbers?
numbers, _ = get_next_numbers(text, "หายป่วยแล้ว", "หายป่วยแลว้")
cum_recovered_3rd, recovered, *_ = numbers
if cum_recovered_3rd < recovered:
recovered = cum_recovered_3rd
assert not | pd.isna(recovered) | pandas.isna |
"""Module with tests realted adding and managing metadata."""
import os
import json
import io
import unittest
import pandas as pd
from pandas.testing import assert_frame_equal
from hicognition.test_helpers import LoginTestCase, TempDirTestCase
# import sys
# add path to import app
# sys.path.append("./")
from app import db
from app.models import Dataset, BedFileMetadata
class TestAddMetadata(LoginTestCase, TempDirTestCase):
"""
Tests for addition of metadata to existing bedfile.
"""
def setUp(self):
super().setUp()
self.owned_dataset = Dataset(id=1, user_id=1)
self.unowned_dataset = Dataset(id=1, user_id=2)
# create dataset with test data length 6
test_filepath_len_6 = os.path.join(TempDirTestCase.TEMP_PATH, "test.bed")
test_data_len_6 = pd.DataFrame(
{"id": [0, 1, 2, 3, 4, 5], "start": [0] * 6, "end": [10] * 6}
)
test_data_len_6.to_csv(test_filepath_len_6, sep="\t", header=None)
self.dataset_len_6 = Dataset(id=1, user_id=1, file_path=test_filepath_len_6)
def test_access_denied_without_token(self):
"""Test whether post request results in 401 error
if no token is provided."""
# dispatch post request
response = self.client.post(
"/api/bedFileMetadata/", content_type="multipart/form-data"
)
self.assertEqual(response.status_code, 401)
def test_access_denied_not_owned_dataset(self):
"""Tests whether access is denied for posting to
not owned dataset."""
# authenticate
token = self.add_and_authenticate("test", "asdf")
# add dataset
db.session.add(self.unowned_dataset)
db.session.commit()
# create token_header
token_headers = self.get_token_header(token)
# add content-type
token_headers["Content-Type"] = "multipart/form-data"
# construct form data
data = {
"datasetID": "1",
"file": (io.BytesIO(b"abcdef"), "test.csv"),
"separator": ",",
}
# dispatch post request
response = self.client.post(
"/api/bedFileMetadata/",
data=data,
headers=token_headers,
content_type="multipart/form-data",
)
self.assertEqual(response.status_code, 403)
def test_invalid_form_no_dataset_id(self):
"""Tests whether for with no dataset id causes
invalid error."""
# authenticate
token = self.add_and_authenticate("test", "asdf")
# add dataset
db.session.add(self.owned_dataset)
db.session.commit()
# create token_header
token_headers = self.get_token_header(token)
# add content-type
token_headers["Content-Type"] = "multipart/form-data"
# construct form data
data = {"file": (io.BytesIO(b"abcdef"), "test.csv"), "separator": ","}
# dispatch post request
response = self.client.post(
"/api/bedFileMetadata/",
data=data,
headers=token_headers,
content_type="multipart/form-data",
)
self.assertEqual(response.status_code, 400)
def test_invalid_form_no_separator(self):
"""Tests whether no separator in form causes
invalid error."""
# authenticate
token = self.add_and_authenticate("test", "asdf")
# add dataset
db.session.add(self.owned_dataset)
db.session.commit()
# create token_header
token_headers = self.get_token_header(token)
# add content-type
token_headers["Content-Type"] = "multipart/form-data"
# construct form data
data = {"datasetID": 1, "file": (io.BytesIO(b"abcdef"), "test.csv")}
# dispatch post request
response = self.client.post(
"/api/bedFileMetadata/",
data=data,
headers=token_headers,
content_type="multipart/form-data",
)
self.assertEqual(response.status_code, 400)
def test_invalid_form_no_textfile(self):
"""Tests whether wrong filetype causes
invalid error."""
# authenticate
token = self.add_and_authenticate("test", "asdf")
# add dataset
db.session.add(self.owned_dataset)
db.session.commit()
# create token_header
token_headers = self.get_token_header(token)
# add content-type
token_headers["Content-Type"] = "multipart/form-data"
# construct form data
data = {
"datasetID": 1,
"file": (io.BytesIO(b"abcdef"), "test.cool"),
"separator": ",",
}
# dispatch post request
response = self.client.post(
"/api/bedFileMetadata/",
data=data,
headers=token_headers,
content_type="multipart/form-data",
)
self.assertEqual(response.status_code, 400)
def test_dataset_does_not_exist(self):
"""Tests whether dataset id that does not exist causes 404 error."""
# authenticate
token = self.add_and_authenticate("test", "asdf")
# create token_header
token_headers = self.get_token_header(token)
# add content-type
token_headers["Content-Type"] = "multipart/form-data"
# construct form data
data = {
"datasetID": 500,
"file": (io.BytesIO(b"abcdef"), "test.csv"),
"separator": ",",
}
# dispatch post request
response = self.client.post(
"/api/bedFileMetadata/",
data=data,
headers=token_headers,
content_type="multipart/form-data",
)
self.assertEqual(response.status_code, 404)
def test_length_missmatch_dataset_metadata(self):
"""Tests whether length missmatch between
dataset and metadata is detected."""
# authenticate
token = self.add_and_authenticate("test", "asdf")
# create paylod_file
payload_filepath = os.path.join(TempDirTestCase.TEMP_PATH, "payload.bed")
payload_data = pd.DataFrame(
{"id": [0, 1, 2, 3], "start": [0] * 4, "end": [10] * 4}
)
payload_data.to_csv(payload_filepath, sep="\t")
# add dataset
db.session.add(self.dataset_len_6)
db.session.commit()
# create token_header
token_headers = self.get_token_header(token)
# add content-type
token_headers["Content-Type"] = "multipart/form-data"
# construct form data
data = {
"datasetID": 1,
"file": (open(payload_filepath, "rb"), "test.csv"),
"separator": "tab",
}
# dispatch post request
response = self.client.post(
"/api/bedFileMetadata/",
data=data,
headers=token_headers,
content_type="multipart/form-data",
)
self.assertEqual(response.status_code, 200)
self.assertEqual(
response.get_json(), {"ValidationError": "Dataset length missmatch!"}
)
def test_valid_metadata_added_correctly(self):
"""Tests whether valid metadata dataset is added correctly."""
# authenticate
token = self.add_and_authenticate("test", "asdf")
# create paylod_file
payload_filepath = os.path.join(TempDirTestCase.TEMP_PATH, "payload.bed")
payload_data = pd.DataFrame(
{"size": [0, 1, 2, 3, 4, 5], "start": [0] * 6, "end": [10] * 6}
)
payload_data.to_csv(payload_filepath, sep="\t", index=False)
# add dataset
db.session.add(self.dataset_len_6)
db.session.commit()
# create token_header
token_headers = self.get_token_header(token)
# add content-type
token_headers["Content-Type"] = "multipart/form-data"
# construct form data
data = {
"datasetID": 1,
"file": (open(payload_filepath, "rb"), "test.csv"),
"separator": "tab",
}
# dispatch post request
response = self.client.post(
"/api/bedFileMetadata/",
data=data,
headers=token_headers,
content_type="multipart/form-data",
)
self.assertEqual(response.status_code, 200)
# check whether metadata database entry was created and links to correct dataset
self.assertEqual(1, len(BedFileMetadata.query.all()))
metadata = BedFileMetadata.query.first()
self.assertEqual(metadata.dataset_id, 1)
# check whether dataframe is ok
test_dataframe = pd.read_csv(metadata.file_path)
assert_frame_equal(test_dataframe, payload_data)
def test_valid_metadata_with_string_columns_added_correctly(self):
"""Tests whether valid metadata dataset is added correctly
even if it contains string columns"""
# authenticate
token = self.add_and_authenticate("test", "<PASSWORD>")
# create paylod_file
payload_filepath = os.path.join(TempDirTestCase.TEMP_PATH, "payload.bed")
payload_data = pd.DataFrame(
{
"size": [0, 1, 2, 3, 4, 5],
"start": [0] * 6,
"end": [10] * 6,
"string_column": ["asdf"] * 6,
}
)
payload_data.to_csv(payload_filepath, sep="\t", index=False)
# add dataset
db.session.add(self.dataset_len_6)
db.session.commit()
# create token_header
token_headers = self.get_token_header(token)
# add content-type
token_headers["Content-Type"] = "multipart/form-data"
# construct form data
data = {
"datasetID": 1,
"file": (open(payload_filepath, "rb"), "test.csv"),
"separator": "tab",
}
# dispatch post request
response = self.client.post(
"/api/bedFileMetadata/",
data=data,
headers=token_headers,
content_type="multipart/form-data",
)
self.assertEqual(response.status_code, 200)
# check whetehr numeric fields are returned
self.assertEqual(
response.get_json(),
{
"message": "success! Preprocessing triggered.",
"field_names": list(
sorted(payload_data.drop("string_column", axis="columns"))
),
"id": 1,
},
)
# check whether metadata database entry was created and links to correct dataset
self.assertEqual(1, len(BedFileMetadata.query.all()))
metadata = BedFileMetadata.query.first()
self.assertEqual(metadata.dataset_id, 1)
# check whether dataframe is ok
test_dataframe = | pd.read_csv(metadata.file_path) | pandas.read_csv |
import numpy as np
import scipy.stats as stats
import pandas as pd
import loter.pipeline as lt
import loter.initparam as initparam
import loter.initdata as initdata
import loter.opti as opti
import loter.estimatea as esta
import loter.estimateh as esth
import loter.graph as ests
##################################################################
# #
# Pipeline to optimize H (admixed haplotypes) and S (selection) #
# given A (ancestral haplotypes) simultaneously #
# #
# Initialization: #
# - init A with ancestral haplotypes #
# #
# Optimization: #
# - join optimisation of H and S #
# #
# Computational Complexity: #
# - n, the number of ancestral individuals #
# - m, the number of SNPs #
# complexity -> O(n^2 * m) #
# #
# Remarks: #
# In practice the pipeline is use as a phase corrector module #
##################################################################
def init_fix_a(data, param):
def init_a_fix(A, G):
return param["A_in"]
return initdata.init_data(data, init_a_fix, initdata.init_h_rand)
def opti_A_fix_join(data, param):
data, param = ests.optimize_SHknn(data, param)
data, param = esth.optimize_H_old(data, param)
return data, param
fixa_pip_A_join = lt.Pipeline(
initparam.param_initializers["classic_init"],
init_fix_a,
opti_A_fix_join,
)
##################################################################
# #
# Pipeline to optimize S (selection) given A #
# (ancestral haplotypes) and H (admixed haplotypes) #
# #
# Initialization: #
# - init A with ancestral haplotypes #
# - init H with admixed haplotypes #
# #
# Optimization: #
# - optimisation of S #
# #
# Computational Complexity: #
# - n, the number of ancestral individuals #
# - m, the number of SNPs #
# complexity -> O(n * m) #
# #
##################################################################
def init_fix_ah(data, param):
def init_a_fix(A, G):
return param["A_in"]
def init_h_fix(H, G):
return param["H_in"]
return initdata.init_data(data, init_a_fix, init_h_fix)
def opti_AH_fix_knn(data, param):
data["A"] = data["A"].astype(np.uint8)
data["S"] = data["S"].astype(np.uint32)
data, param = ests.optimize_Sknn(data, param)
return data, param
fixa_pip_AH_knn = lt.Pipeline(
initparam.param_initializers["classic_init"],
init_fix_ah,
opti_AH_fix_knn
)
def learn_Sknn(pop, A_in, H_in, weights, penalty=40, num_threads=10):
G_pop = pop["G"]
H_pop = H_in
l_res_mix = fixa_pip_AH_knn(G_pop,
nb_iter=1, nbclust=len(A_in), penalty=penalty,
num_threads=num_threads,
weights=weights,
A_in=A_in,
H_in=H_pop
)
return l_res_mix[0]["S"]
def learn_S_join(pop, A_in, penalty=40, small_penalty=0, num_threads=10):
G_pop = pop["G"]
l_res_mix = fixa_pip_A_join(G_pop,
nb_iter=1, nbclust=len(A_in), penalty=penalty,
A_in=A_in,
small_penalty=small_penalty,
num_threads=num_threads
)
return l_res_mix[0]["S"], l_res_mix[0]["H"]
def get_items(dict_object):
"""
Compatible Python 2 et 3 get item for dictionnary
"""
for key in dict_object:
yield key, dict_object[key]
def clusters_to_list_pop(S, l_k):
"""
From a selection matrix S, compute the origin of each SNP.
input:
S -- matrix where we are copying
l_k -- populations sizes
"""
res = np.copy(S)
a = np.repeat(np.arange(len(l_k)), l_k)
b = np.arange(sum(l_k))
d = {k: v for v, k in zip(a, b)}
for k, v in get_items(d): res[S==k] = v
return res
def locanc_g_knn(l_h, g_adm, penalty=40, small_penalty=0, num_threads=10):
A_in = np.ascontiguousarray(np.vstack(l_h))
S_adm, H = learn_S_join({"G": g_adm}, A_in, penalty, small_penalty, num_threads)
result = clusters_to_list_pop(S_adm, [len(A) for A in l_h])
return result, S_adm, H
def locanc_h_knn(l_h, h_adm, penalty=40, num_threads=10):
A_in = np.ascontiguousarray(np.vstack(l_h))
g_adm = h_adm[::2] + h_adm[1::2]
n, m = h_adm.shape
weights = np.ones(m)
S_adm = learn_Sknn({"G": g_adm, "H": h_adm}, A_in, h_adm, weights, penalty, num_threads)
result = clusters_to_list_pop(S_adm, [len(A) for A in l_h])
return result, S_adm
def update_counts(counts, arr, k=2):
for p in range(k):
counts[p,:,:][arr == p] += 1
return counts
def mode(counts):
argmax = np.argmax(counts, axis=0)
return argmax, argmax.choose(counts)
def encode_haplo(H):
H1, H2 = H[::2], H[1::2]
return ((np.maximum(H1, H2) * (np.maximum(H1, H2) + 1)) / 2) + np.minimum(H1, H2)
def loter_multiple_pops(l_H, h_adm, lambd, num_threads=10, default=True):
odd = False
if h_adm.shape[0] % 2 != 0 & default:
odd = True
h_adm = np.vstack([h_adm, np.repeat(0, h_adm.shape[1])])
res_loter, _= locanc_h_knn([h.astype(np.uint8) for h in l_H],
h_adm.astype(np.uint8), lambd, num_threads)
if odd & default:
res_loter = res_loter[:res_loter.shape[0]-1]
return res_loter
def boostrap_loter_multiple_pops(l_H, h_adm, lambd, counts, nbrun=20, num_threads=10):
def shuffle(H):
n, m = H.shape
return H[np.random.randint(n, size=n), :]
if nbrun > 1:
for i in range(nbrun):
shuffled_H = [shuffle(h) for h in l_H]
counts = update_counts(counts,
loter_multiple_pops(shuffled_H,
h_adm,
lambd,
num_threads,
False),
len(l_H)
)
else:
counts = update_counts(counts,
loter_multiple_pops(l_H,
h_adm,
lambd,
num_threads,
False),
len(l_H)
)
return counts
def loter_local_ancestry(l_H, h_adm, range_lambda=np.arange(1.5, 5.5, 0.5),
rate_vote=0.5, nb_bagging=20, num_threads=10,
default=True):
odd = False
if h_adm.shape[0] % 2 != 0 & default:
odd = True
h_adm = np.vstack([h_adm, np.repeat(0, h_adm.shape[1])])
input_loter = (l_H, h_adm)
n, m = h_adm.shape
counts = np.zeros((len(l_H), n, m))
for l in range_lambda:
res_boostrap = boostrap_loter_multiple_pops(*input_loter, lambd=l,
counts=counts, nbrun=nb_bagging,
num_threads=num_threads)
res_tmp = mode(counts)
if default:
if odd:
res_loter = (res_tmp[0][:res_tmp[0].shape[0]-1],
res_tmp[1][:res_tmp[1].shape[1]-1])
else:
res_loter = res_tmp
return res_loter
else:
r = vote_and_impute(res_tmp, rate_vote)
return r, res_tmp
def diploid_sim(cluster_found, cluster_truth):
(n,m) = cluster_found.shape
return np.count_nonzero(cluster_found == cluster_truth) / float(n*m)
def find_lambda(s_in, threshold = 0.90, min_lambda = 1,
max_lambda = 500, num_threads=10):
n, m = s_in.shape
if max_lambda - min_lambda <= 1:
return locanc_g_knn([np.zeros((1,m)), np.ones((1,m))],
s_in, min_lambda, min_lambda, num_threads)
else:
mean = (max_lambda - min_lambda) / 2 + min_lambda
r_g, s_g, h_g = locanc_g_knn([np.zeros((1,m)), np.ones((1,m))],
s_in, mean, mean, num_threads)
sim = diploid_sim(r_g[::2] + r_g[1::2], s_in)
if sim > threshold:
return find_lambda(s_in, threshold, min_lambda = (max_lambda - min_lambda) / 2 + min_lambda,
max_lambda = max_lambda, num_threads=num_threads)
else:
return find_lambda(s_in, threshold, min_lambda = min_lambda,
max_lambda = max_lambda - ((max_lambda - min_lambda) / 2),
num_threads=num_threads)
def vote_and_impute(s, percent_threshold=0.5):
def select_val(s, percent_threshold):
max_s, min_s = np.max(s[1]), np.min(s[1])
threshold = percent_threshold*(max_s - min_s) + min_s
select = np.logical_and(s[1][::2] >= threshold,
s[1][1::2] >= threshold)
arr = encode_haplo(s[0])
arr[np.logical_not(select)] = 255
return arr
arr = select_val(s, percent_threshold)
n, m = arr.shape
res = np.copy(arr)
for i in range(n):
serie = | pd.Series(arr[i]) | pandas.Series |
from pandas.testing import assert_frame_equal
import pandas as pd
import pytest
from speed_daemon import data
@pytest.fixture
def default_input():
return {
"download": 1000000,
"ping": 1000000,
"timestamp": "2020-10-12T03:09:18.231187Z",
"upload": 1000000,
}
@pytest.fixture
def default_expected_response():
return {
"_timestamp_string": "2020-10-12T03:09:18.231187Z",
"date": | pd.to_datetime("2020-10-12") | pandas.to_datetime |
# -*- coding: utf-8 -*-
# %%
# LightGBM install: use conda: https://anaconda.org/conda-forge/lightgbm
# StratifiedKFold: This cross-validation object is a variation of KFold that returns stratified folds. The folds are made by preserving the percentage of samples for each class.
# KFold: Provides train/test indices to split data in train/test sets. Split dataset into k consecutive folds (without shuffling by default).
# %%
import warnings
warnings.filterwarnings("ignore")
import numpy as np
import pandas as pd
import os
import matplotlib.pyplot as plt
import seaborn as sns
import lightgbm as lgb
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import roc_auc_score
plt.style.use('seaborn')
sns.set(font_scale=1)
#user defined
import augment as ag
os.chdir('/Users/hanbosun/Documents/GitHub/TrasactionPrediction/')
random_state = 42
# %%
df_train = pd.read_csv('input/train.csv')
df_test = pd.read_csv('input/test.csv')
ids = np.arange(df_train.shape[0])
np.random.seed(random_state)
np.random.shuffle(ids)
df_train = df_train.iloc[ids,:]
df_train_all = df_train
# %%
df_train = df_train_all.iloc[:2000,:]
# %%
lgb_params = {
"objective" : "binary",
"metric" : "auc",
"boosting": 'gbdt',
"max_depth" : -1,
"num_leaves" : 13,
"learning_rate" : 0.01,
"bagging_freq": 5,
"bagging_fraction" : 0.4,
"feature_fraction" : 0.05,
"min_data_in_leaf": 80,
"min_sum_hessian_in_leaf": 10,
"tree_learner": "serial",
"boost_from_average": "false",
#"lambda_l1" : 5,
#"lambda_l2" : 5,
"bagging_seed" : random_state,
"seed": random_state,
"verbosity": -1
}
# %%
skf = StratifiedKFold(n_splits=5, shuffle=True, random_state=random_state)
oof = df_train[['ID_code', 'target']]
oof['predict'] = 0
predictions = df_test[['ID_code']]
val_aucs = []
feature_importance_df = pd.DataFrame()
features = [col for col in df_train.columns if col not in ['target', 'ID_code']]
X_test = df_test[features].values
# %%
for fold, (trn_idx, val_idx) in enumerate(skf.split(df_train, df_train['target'])):
X_train, y_train = df_train.iloc[trn_idx][features], df_train.iloc[trn_idx]['target']
X_valid, y_valid = df_train.iloc[val_idx][features], df_train.iloc[val_idx]['target']
N = 5
p_valid,yp = 0,0
for i in range(N):
#X_t, y_t = ag.augment(X_train.values, y_train.values)
#X_t, y_t = ag.augment_fast1(X_train.values, y_train.values)
X_t, y_t = ag.augment_fast2(X_train.values, y_train.values)
print(X_t.shape)
X_t = | pd.DataFrame(X_t) | pandas.DataFrame |
from decimal import Decimal
import unittest, sys
import pandas as pd
import numpy as np
from datetime import datetime, timedelta
from unittest.mock import patch
from forex_predictor.data_extraction.process_raw_data import create_relevant_data_row, create_row, find_start_date_index, get_dataframe_from_dates, get_dates, get_market, get_max_input_minutes_missing, get_open_and_close_for_period, get_relevant_data, process_input_data, set_intervals, set_const_intervals, set_market, set_max_input_minutes_missing, set_target_interval, get_intervals, get_target_interval, apply_category_label_binary, load_market_csv
class Test_Process_Raw_Data(unittest.TestCase):
#Test helper methods
def test_convert_datestring_array_to_datetime(self):
datestrings = ['2020-01-01 00:00:00', '2020-01-02 00:00:00', '2020-01-01 03:00:00']
expected_datetimes = [datetime.strptime('2020-01-01 00:00:00', '%Y-%m-%d %H:%M:%S'), datetime.strptime('2020-01-02 00:00:00', '%Y-%m-%d %H:%M:%S'), datetime.strptime('2020-01-01 03:00:00', '%Y-%m-%d %H:%M:%S')]
self.assertEqual(expected_datetimes, convert_datestring_array_to_datetime(datestrings))
def test_create_expected_row(self):
input_row = [5,4,3,2,1]
expected_row = np.array([[1,2,3,4,1,2]])
actual_row = create_expected_row(input_row, [1,2])
self.assertTrue(np.array_equal(expected_row, actual_row))
#Test process_raw_data methods
def test_set_intervals(self):
intervals = [5, 5, 5]
set_intervals(intervals)
self.assertEqual(intervals, get_intervals())
def test_set_target_interval(self):
interval = timedelta(minutes=69)
set_target_interval(interval)
self.assertEqual(interval, get_target_interval())
def test_set_const_intervals(self):
expected_intervals = [3, 3, 3, 3, 3]
set_const_intervals(3, 5)
self.assertEqual(expected_intervals, get_intervals())
def test_set_max_input_minutes_missing(self):
minutes = 69
set_max_input_minutes_missing(minutes)
self.assertEqual(minutes, get_max_input_minutes_missing())
def test_set_market(self):
market = 'GBP/JPY'
set_market(market)
self.assertEqual(market, get_market())
def test_categorise_data(self):
self.assertEqual(1, apply_category_label_binary(1.2222, 1.2223))
self.assertEqual(0, apply_category_label_binary(1.2223, 1.2222))
@patch('forex_predictor.data_extraction.process_raw_data.pd')
def test_load_market_csv(self, mock_pd):
load_market_csv('EUR/GBP')
mock_pd.read_csv.assert_called_with('data/EUR_GBP.csv')
def test_get_dates(self):
intervals = [5, 5, 5]
set_intervals(intervals)
training_start = datetime.strptime('2020-01-01 00:00:00', '%Y-%m-%d %H:%M:%S')
validation_start = datetime.strptime('2020-01-01 01:00:00', '%Y-%m-%d %H:%M:%S')
test_start = datetime.strptime('2020-01-01 02:00:00', '%Y-%m-%d %H:%M:%S')
test_end = datetime.strptime('2020-01-01 03:00:00', '%Y-%m-%d %H:%M:%S')
actual_training_dates, actual_validation_dates, actual_test_dates = get_dates(training_start, validation_start, test_start, test_end)
expected_training_dates = convert_datestring_array_to_datetime(['2020-01-01 00:00:00', '2020-01-01 00:15:00', '2020-01-01 00:30:00', '2020-01-01 00:45:00'])
expected_validation_dates = convert_datestring_array_to_datetime(['2020-01-01 01:00:00', '2020-01-01 01:15:00', '2020-01-01 01:30:00', '2020-01-01 01:45:00'])
expected_test_dates = convert_datestring_array_to_datetime(['2020-01-01 02:00:00', '2020-01-01 02:15:00', '2020-01-01 02:30:00', '2020-01-01 02:45:00'])
self.assertEqual(expected_training_dates, actual_training_dates)
self.assertEqual(expected_validation_dates, actual_validation_dates)
self.assertEqual(expected_test_dates, actual_test_dates)
@patch('forex_predictor.data_extraction.process_raw_data.get_dataframe_from_dates')
def test_get_relevant_data(self, mock_method):
set_intervals([15,15,15,15])
set_target_interval(timedelta(minutes=60))
df = pd.read_csv('tests/resources/dataframe_data.csv')
target_date = datetime.strptime('2014-07-17 00:00:00', '%Y-%m-%d %H:%M:%S')
get_relevant_data(df, target_date)
start_date = datetime.strptime('2014-07-16 23:00:00', '%Y-%m-%d %H:%M:%S')
end_date = datetime.strptime('2014-07-17 01:00:00', '%Y-%m-%d %H:%M:%S')
mock_method.assert_called_with(start_date, end_date, df)
def test_get_dataframe_from_dates(self):
original_df = pd.read_csv('tests/resources/dataframe_data.csv')
start_date = datetime.strptime('2014-07-17 00:00:00', '%Y-%m-%d %H:%M:%S')
end_date = datetime.strptime('2014-07-17 00:05:00', '%Y-%m-%d %H:%M:%S')
actual_df = get_dataframe_from_dates(start_date, end_date, original_df)
expected_df = original_df.iloc[74:79, :]
self.assertTrue(expected_df.equals(actual_df))
def test_find_start_date_index(self):
target_date = datetime.strptime('2014-07-18 08:46:00', '%Y-%m-%d %H:%M:%S')
df = pd.read_csv('tests/resources/dataframe_data.csv')
actual_index = find_start_date_index(df, target_date)
expected_index = 1994
self.assertEqual(expected_index, actual_index)
def test_process_input_data(self):
set_intervals([5, 5, 5])
df = pd.read_csv('tests/resources/dataframe_data.csv').iloc[1998:2013, :]
test_data = {
'datetime': ['2014-07-18 08:49:00', '2014-07-18 08:54:00', '2014-07-18 08:59:00'],
'open': [0.79227, 0.79223, 0.79315],
'high': [0.79231, 0.79312, 0.79325],
'low': [0.79216, 0.79219, 0.79279],
'close': [0.79222, 0.79312, 0.79284]
}
expected_input_data = pd.DataFrame(data=test_data)
actual_input_data = process_input_data(df)
self.assertTrue(expected_input_data.equals(actual_input_data))
def test_process_input_data_error(self):
set_intervals([5, 5, 5, 60])
df = pd.read_csv('tests/resources/dataframe_data.csv').iloc[1998:2013, :]
expected_error_message = 'Insufficient data to process for this number of intervals'
try:
actual_input_data = process_input_data(df)
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
self.assertEqual(expected_error_message, str(exc_value))
def test_create_row(self):
set_intervals([5,5,5])
test_data = {
'datetime': ['2014-07-18 08:49:00', '2014-07-18 08:54:00', '2014-07-18 08:59:00'],
'open': [0.79227, 0.79223, 0.79315],
'high': [0.79231, 0.79312, 0.79325],
'low': [0.79216, 0.79219, 0.79279],
'close': [0.79222, 0.79312, 0.79284]
}
input_values = pd.DataFrame(data=test_data)
expected_row = create_expected_row([0.79227, 0.79231, 0.79216, 0.79222, 0.79223, 0.79312, 0.79219, 0.79312, 0.79315, 0.79325, 0.79279, 0.79284], [1, 2])
actual_row = create_row(input_values, [1,2])
self.assertTrue(np.array_equal(expected_row, actual_row))
def test_create_relevant_data_row(self):
set_intervals([5,5,5])
set_target_interval(timedelta(minutes=5))
df = | pd.read_csv('tests/resources/dataframe_data.csv') | pandas.read_csv |
# There are several ways to create a DataFrame.
# One way way is to use a dictionary. For example:
dict = {"country": ["Brazil", "Russia", "India", "China", "South Africa"],
"capital": ["Brasilia", "Moscow", "New Dehli", "Beijing", "Pretoria"],
"area": [8.516, 17.10, 3.286, 9.597, 1.221],
"population": [200.4, 143.5, 1252, 1357, 52.98] }
import pandas as pd
brics = pd.DataFrame(dict)
print(brics)
# Pandas has assigned a key for each country as the numerical values 0 through 4.
# If you would like to have different index values,
# say, the two letter country code, you can do that easily as well.
# Set the index for brics
brics.index = ["BR", "RU", "IN", "CH", "SA"]
# Print out brics with new index values
print(brics)
# Another way to create a DataFrame is by importing a csv file using Pandas.
countries = | pd.read_csv('countries.csv') | pandas.read_csv |
"""Author: <NAME>
This contains the main Spomato class to be used to access the Spotify API and create new playlists based on the user's
defined criteria.
"""
import os
import pandas as pd
import spotipy
class Spomato():
"""Object used to access spotify API through spotipy and generate playlists.
This can take a combination user's saved tracks, playlists, and/or artist's songs to generate a playlist of a
specified length. This was conceived to use the Tomato Timer method as Spotify playlists.
This does require the user to provide a user API token from the spotify API. The API scopes used by this library are
playlist-read-private, playlist-modify-private, and user-library-read.
Parameters
----------
access_token : str
A valid Spotify Access token.
Attributes
----------
data : dictionary
Dictionary storing available data structures to create playlists.
spotipy_session : spotipy.client.Spotify
A spotipy session to access the spotify API.
access_token : str
A valid Spotify Access token. This requires the scopes playlist-read-private, playlist-modify-private,
and user-library-read
current_user_id : str
The string id of the user of the access token used to create the spotipy session.
"""
def __init__(self,
access_token=None):
"""Initialization function that sets access token and generates initial spotipy session.
Parameters
----------
access_token : str
A valid Spotify Access token. This requires the scopes playlist-read-private, playlist-modify-private,
and user-library-read.
Returns
-------
None
"""
self.access_token = access_token
self.data = {}
self.spotipy_session = self._get_spotipy_session()
self.current_user_id = self.spotipy_session.current_user()['id']
def update_token(self, access_token):
"""Updates the token and spotify session with the provided access_token. Generally used if your access token
has expired.
Parameters
----------
access_token : str
A valid Spotify Access token. This requires the scopes playlist-read-private, playlist-modify-private,
and user-library-read.
Returns
-------
None
"""
# update the class access token and the spotipy session
self.access_token = access_token
self.spotipy_session = self._get_spotipy_session()
self.current_user_id = self.spotipy_session.current_user()['id']
def _get_spotipy_session(self):
"""Internal Function to create a new spotify session.
Returns
-------
spotipy_session : spotipy.client.Spotify
A spotipy session to access the spotify API.
"""
return spotipy.Spotify(auth=self.access_token)
@staticmethod
def _parse_album(album_data, market='US'):
"""Parses the album data returned from the Spotify API and returns the song information as a pandas DataFrame.
Parameters
----------
album_data : dict
A dictionary of album data from Spotify API
market : str
A string representation of the Spotify market to filter on. Default is 'US'
Returns
-------
pandas.DataFrame
A dataframe of song ids and time for each song
"""
# iterate over each record in the album data and parse the track data
series_list = []
album_tracks = album_data['tracks']['items']
for record in album_tracks:
songid = record['id']
markets = record['available_markets']
# time is stored in milliseconds, divide to convert to seconds.
time = record['duration_ms']/1000
# filter out any songs that are not in the specified market
if market in markets:
series = pd.Series([songid, time], index=['song_id', 'time'])
series_list.append(series)
if len(series_list) > 0:
song_df = pd.concat(series_list, axis=1).transpose()
else:
song_df = pd.DataFrame(columns=['song_id', 'time'])
return song_df
@staticmethod
def _parse_user_playlist(data, market='US'):
"""Parses a user playlist data set from the Spotify API and returns the song information as a pandas DataFrame.
Parameters
----------
data : dictionary
Contains songs in a playlist from the Spotify API
market : str
A string representation of the Spotify market to filter on. Default is 'US'
Returns
-------
pandas.DataFrame
A dataframe of song ids and time for each song
"""
# iterate over each record in the playlist data and parse the track data
series_list = []
data = data['tracks']['items']
for item in data:
record = item['track']
songid = record['id']
markets = record['available_markets']
# time is stored in milliseconds, divide to convert to seconds.
time = record['duration_ms']/1000
# filter out any songs that are not in the specified market
if market in markets:
series = pd.Series([songid, time], index=['song_id', 'time'])
series_list.append(series)
if len(series_list) > 0:
song_df = pd.concat(series_list, axis=1).transpose()
else:
song_df = pd.DataFrame(columns=['song_id', 'time'])
return song_df
@staticmethod
def _parse_public_playlist(data, market='US'):
"""Parses public playlist data set from the Spotify API and returns the song information as a pandas DataFrame.
Parameters
----------
data : dictionary
Contains songs in a playlist from the Spotify API
market : str
A string representation of the Spotify market to filter on. Default is 'US'
Returns
-------
pandas.DataFrame
A dataframe of song ids and time for each song
"""
# iterate over each record in the playlist data and parse the track data
series_list = []
data = data['items']
for item in data:
record = item['track']
songid = record['id']
markets = record['available_markets']
# time is stored in milliseconds, divide to convert to seconds.
time = record['duration_ms']/1000
# filter out any songs that are not in the specified market
if market in markets:
series = pd.Series([songid, time], index=['song_id', 'time'])
series_list.append(series)
if len(series_list) > 0:
song_df = pd.concat(series_list, axis=1).transpose()
else:
song_df = pd.DataFrame(columns=['song_id', 'time'])
return song_df
@staticmethod
def _parse_saved_tracks(data, market='US'):
"""Parses a the saved songs data set of the user from the Spotify API and returns the song information as a
pandas DataFrame.
Parameters
----------
data : dictionary
Contains saved songs of the user from the Spotify API
market : str
A string representation of the Spotify market to filter on. Default is 'US'
Returns
-------
pandas.DataFrame
A dataframe of song ids and time for each song
"""
# iterate over each record in the saved track data and parse the individual track data
series_list = []
for item in data:
record = item['track']
songid = record['id']
markets = record['available_markets']
# time is stored in milliseconds, divide to convert to seconds.
time = record['duration_ms']/1000
# filter out any songs that are not in the specified market
if market in markets:
series = pd.Series([songid, time], index=['song_id', 'time'])
series_list.append(series)
if len(series_list) > 0:
song_df = pd.concat(series_list, axis=1).transpose()
else:
song_df = pd.DataFrame(columns=['song_id', 'time'])
return song_df
def _cache_data(self, data_key, file_path):
"""Export the results of a dataset of song ids to local filesystem as a csv.
Parameters
----------
data_key : str
Key of the dataset to save
file_path : str
Full path of filename to save the file.
Returns
-------
None
"""
# use pandas dataframe write function to save file
self.data[data_key].to_csv(file_path, index=False)
def _load_cached_data(self, data_key, file_path):
"""Load a Saved Dataset into the Spomato data dictionary. Requires a csv with columns of 'song_id' and 'time'.
Parameters
----------
data_key : str
Key to associate the loaded dataset in the data dictionary.
file_path : str
Full path of filename to load the file.
Returns
-------
None
"""
data = pd.read_csv(file_path)
# ensure the required columns are in the dataset else raise error
if 'song_id' not in data.columns:
raise ValueError('Column song_id not found in loaded data file.')
if 'time' not in data.columns:
raise ValueError('Column song_id not found in loaded data file.')
# data looks correct, add dataset to data
self.data[data_key] = data
def get_file_data(self,
data_key='default',
file_path=None,
overwrite=False):
"""Loads a file of song data into Spomato to be used for generating new playlists.
Parameters
----------
data_key : str
Key to associate the dataset in the data dictionary.
file_path : str
Full path of filename if loading or saving dataset.
overwrite : bool
Boolean to determine if the dataset should be overwritten if it already exists.
Returns
-------
None
"""
if not isinstance(data_key, str):
raise TypeError('Argument data_key must be of type string')
if file_path is not None and not isinstance(file_path, str):
raise TypeError('Argument file_path must be of type string')
# check if the data key already exists to ensure data is not unexpectedly overwritten
if data_key in self.data.keys() and overwrite is False:
msg = (f'Dataset {data_key} already exists and reset argument is set to False. '
'Set reset to True to overwrite dataset.')
raise ValueError(msg)
# read the data from file if the file exists
if os.path.isfile(file_path):
self._load_cached_data(data_key=data_key,
file_path=file_path)
else:
raise ValueError('File path {f} does not exist.'.format(f=file_path))
def get_api_data(self,
data_key='default',
file_path=None,
source=None,
reset=False,
market='US'):
"""Generates a song dataset to load into Spomato to be used for generating new playlists.
Parameters
----------
data_key : str
Key to associate the dataset in the data dictionary.
file_path : str
If not None, the dataset generated will also be saved to the specified file path..
source : dict
Contains all sources you want to use in generating the dataset. The dictionary is keyed by one of 3 source
types: savedtracks, playlist, or artist. For savedtracks the value can be None, as no further data is
required. For playlist or artist, the value should contain a list of all spotify ids of the appropriate
type. If not specified, it defaults to your saved tracks.
reset : bool
Boolean to determine if the dataset should be regenerated if it already exists.
market : str
A string representation of the Spotify market to filter on. Default is 'US'
Returns
-------
None
"""
if not isinstance(data_key, str):
raise TypeError('Argument data_key must be of type string')
if file_path is not None and not isinstance(file_path, str):
raise TypeError('Argument file_path must be of type string')
if source is not None and not isinstance(source, dict):
raise TypeError('Argument source must be of type dict')
if not isinstance(reset, (bool, int)):
raise TypeError('Argument reset must be of type bool or int')
if not isinstance(market, str):
raise TypeError('Argument market must be of type string')
# check if the data key already exists to ensure data is not unexpectedly overwritten
if data_key in self.data.keys() and reset is False:
msg = (f'Dataset {data_key} already exists and reset argument is set to False. '
'Set reset to True to overwrite dataset.')
raise ValueError(msg)
# default the data source to the user's saved tracks if not specified
if source is None:
source = {'savedtracks': None}
# generate the dataset and save it into the Spomato object
self.data[data_key] = self._get_new_data(source=source,
market=market)
# Cache the data if the file_path is specified
if file_path:
self._cache_data(data_key=data_key,
file_path=file_path)
def _get_playlist_dataframe(self,
source_list,
market):
"""Short summary.
Parameters
----------
source_list : list
A list of playlist ids to source songs from
market : str
A string representation of the Spotify market to filter on.
Returns
-------
pandas.DataFrame
A dataframe of songs with song id and time.
"""
# get the list of playlists and filter out datasets included in the source list
playlist_df = self.get_playlists()
playlist_list = []
for pl_id in source_list:
if not playlist_df[playlist_df.playlist_id == pl_id].empty:
pl_json = self.spotipy_session.user_playlist(self.current_user_id, pl_id)
pl_df = self._parse_user_playlist(pl_json, market)
playlist_list.append(pl_df)
else:
pl_json = self.spotipy_session.playlist_tracks(pl_id)
pl_df = self._parse_public_playlist(pl_json, market)
playlist_list.append(pl_df)
if len(playlist_list) == 0:
raise ValueError('No valid playlists.')
# concatinate the dataframes of all the playlist and remove any duplicates
data = pd.concat(playlist_list)
data.drop_duplicates(inplace=True)
return data
def _get_artist_dataframe(self,
source_list,
market):
"""Short summary.
Parameters
----------
source_list : list
A list of playlist ids to source songs from
market : str
A string representation of the Spotify market to filter on.
Returns
-------
pandas.DataFrame
A dataframe of songs with song id and time.
"""
# iterate over each artist, get the data from the Spotify API, and parse the song data
artist_list = []
for artist in source_list:
artist_songs = self._get_artist_data(artist, market)
artist_list.append(artist_songs)
# concatinate the dataframes of all the playlist and remove any duplicates
data = pd.concat(artist_list)
data.drop_duplicates(inplace=True)
return data
def _get_new_data(self,
source=None,
market='US'):
"""Creates a new dataset from the specified source list and returns a pandas DataFrame of song ids and times.
Parameters
----------
source : dict
Contains all sources you want to use in generating the dataset. The dictionary is keyed by one of 3 source
types: savedtracks, playlist, or artist. For savedtracks the value can be None, as no further data is
required. For playlist or artist, the value should contain a list of all spotify ids of the appropriate
type.
market : str
A string representation of the Spotify market to filter on.
Returns
-------
pd.DataFrame
A dataframe of song ids generated from the sources.
"""
# if the source is not specified, default to the saved tracks of the current user.
if source is None:
source = {'savedtracks': None}
elif not isinstance(source, dict):
raise ValueError('Argument source must be of type dict or None.')
elif len(source.keys()) == 0:
raise ValueError('Argument source must contain at least 1 valid key from: savedtracks, artist, playlist')
else:
for key in source.keys():
if key not in ['savedtracks', 'artist', 'playlist']:
raise ValueError(f'{key} is not a valid data source type.')
# iterate over the source types in the source dictionary and parse out the data
data_list = []
for sourcetype in source.keys():
if sourcetype == 'savedtracks':
# print 'SAVEDTRACKS'
data = self._get_saved_tracks(market)
data_list.append(data)
elif sourcetype == 'playlist':
playlist_data = self._get_playlist_dataframe(source_list=source['playlist'],
market=market)
data_list.append(playlist_data)
elif sourcetype == 'artist':
artist_data = self._get_artist_dataframe(source_list=source['artist'],
market=market)
data_list.append(artist_data)
# concatinate the dataframes of all the source types and remove any duplicates
data = pd.concat(data_list)
data.drop_duplicates(inplace=True)
return data
def pick_tracks(self,
data_key,
time=25,
extra=5,
time_limit=None):
"""Using a specified dataset, this generates a subset of the dataframe of songs that fit the time constraints.
Parameters
----------
data_key : str
Name of the dataset to use stored in the data object in Spomato
time : int
The length in minutes to make the playlist
extra : type
The amount of buffer time to add on to the end of the playlist.
time_limit : type
The maximum song length in minutes to include in the playlist.
Returns
-------
pd.DataFrame
A dataframe of song ids generated from the sources.
"""
if not isinstance(data_key, str):
raise TypeError('Argument data_key must be of type string')
if not isinstance(time, (int, float)):
raise TypeError('Argument time must be of type int or float')
if not isinstance(extra, (int, float)):
raise TypeError('Argument extra must be of type int or float')
if time_limit is not None and not isinstance(time_limit, (int, float)):
raise TypeError('Argument time_limit must be of type int or float')
track_df = self.data[data_key]
# the time in our dataframe is specified in seconds, we need to convert the times
time *= 60
extra *= 60
# if time limit is not specified, default it to one third of the parameter time
if time_limit is None:
time_limit = time/3.0
else:
time_limit *= 60
# filter out any records that are longer than the time limit
track_df = track_df[track_df['time'] <= time_limit]
# iterate adding songs to the selected track until the time is reached
time_used = 0
track_list = []
done = False
while not done:
# filter down to tracks that fit in the remaining time
track_df = track_df[track_df.time <= (time + extra - time_used)]
# if the total time is greater than the specified time, mark the iteration done.
if time_used > time:
done = True
# if the filtered song list is empty, there are no songs left, so mark iteration done
elif track_df.empty:
done = True
# otherwise, take a random track from the dataframe, add to the track list, and remove it from being
# selected again
else:
track = track_df.sample().iloc[0]
track_df = track_df[track_df.song_id != track.song_id]
track_list.append(track)
time_used += track.time
# concatinate all of the selected tracks into a dataframe.
picked_track_df = pd.concat(track_list, axis=1).T
return picked_track_df
def _get_saved_tracks(self, market):
"""Access the spotify API to get the saved tracks for a user and returns a dataframe of song ids and times.
Parameters
----------
market : str
A string representation of the Spotify market to filter on.
Returns
-------
pd.DataFrame
A dataframe of song ids generated from the sources.
"""
# iterate over a user's saved tracks until all have been accessed and parsed
end = False
i = 0
track_df_list = []
while not end:
data = self.spotipy_session.current_user_saved_tracks(limit=50, offset=i*50)['items']
if len(data) > 0:
track_df = self._parse_saved_tracks(data, market)
track_df_list.append(track_df)
i += 1
else:
end = True
# concatinate the created dataframes and remove any duplicates
track_df = pd.concat(track_df_list).reset_index(drop=True)
track_df.drop_duplicates(inplace=True)
return track_df
def _get_artist_data(self, artist_id, market):
"""Access the spotify API to get an artist's tracks and returns a dataframe of song ids and times.
Parameters
----------
artist_id : type
Description of parameter `artist_id`.
market : str
A string representation of the Spotify market to filter on.
Returns
-------
pandas.DataFrame
A dataframe of song ids and times generated from the sources.
"""
# get all of the artist's albums ids and parse out the json for each
artist_albums = self.spotipy_session.artist_albums(artist_id)
album_ids = [x['id'] for x in artist_albums['items']]
album_jsons = self.spotipy_session.albums(album_ids)['albums']
# iterate over each album and parse out the songs
songdf = []
for album in album_jsons:
if market in album['available_markets']:
songs = self._parse_album(album, market)
songdf.append(songs)
# concatinate the results from each album into a single dataframe
data = pd.concat(songdf)
return data
def get_playlists(self):
"""Access the spotify API to get the playlists for a user and returns a dataframe of names and ids.
Returns
-------
pandas.DataFrame
A dataframe consisting of the current user's playlist names and playlist ids.
"""
# get the user's playlist and parse the playlist name and id
pl_json = self.spotipy_session.current_user_playlists()
series_index = ['playlist_name', 'playlist_id']
playlist_list = [pd.Series([pl['name'], pl['id']], index=series_index) for pl in pl_json['items']]
# concatinate the results into a pandas dataframe
playlist_df = pd.concat(playlist_list, axis=1).T
return playlist_df
def make_playlist(self,
playlist_name,
song_df,
overwrite=False):
"""Create or overwrite a spotify playlist from the dataframe of songs.
Parameters
----------
playlist_name : str
The name of the playlist you want to create/overwrite
song_df : pandas.DataFrame
Dataframe of songs to be in the playlist.
overwrite : bool
Boolean to determine whether to overwrite the playlist if it already exists.
Returns
-------
None
"""
if not isinstance(playlist_name, str):
raise TypeError('Argument playlist_name must be of type string')
if not isinstance(song_df, pd.DataFrame):
raise TypeError('Argument song_df must be of type string')
# get the user's playlists
playlist_df = self.get_playlists()
# if the playlist name already exists and is not set to be overwritten, raise an error
if playlist_name in playlist_df.playlist_name.tolist() and not overwrite:
raise ValueError('Playlist {p} already exists, set overwrite to True.'.format(p=playlist_name))
# if the playlist already exists, replace the playlist with the new track list
if playlist_name in playlist_df.playlist_name.tolist():
playlist_id = playlist_df[playlist_df.playlist_name == playlist_name].iloc[0].playlist_id
self.spotipy_session.user_playlist_replace_tracks(user=self.current_user_id,
playlist_id=playlist_id,
tracks=song_df.song_id.tolist()
)
# if the playlist doesn't exist, create a new playlist with the track list
else:
self.spotipy_session.user_playlist_create(self.current_user_id,
playlist_name,
public=False)
playlist_df = self.get_playlists()
playlist_id = playlist_df[playlist_df.playlist_name == playlist_name].iloc[0].playlist_id
self.spotipy_session.user_playlist_add_tracks(self.current_user_id,
playlist_id,
tracks=song_df.song_id.tolist()
)
def pick_track_and_make_playlist(self,
data_key,
playlist_name,
time=25,
extra=5,
time_limit=None,
overwrite=False):
"""Picks the tracks from a created dataset and creates/overwrites a playlist with the data.
Parameters
----------
data_key : str
Name of the dataset to use stored in the data object in Spomato
playlist_name : str
The name of the playlist you want to create/overwrite
time : int
The length in minutes to make the playlist
extra : int
The amount of buffer time to add on to the end of the playlist.
time_limit : int
The maximum song length in minutes to include in the playlist.
overwrite : bool
Boolean to determine whether to overwrite the playlist if it already exists.
Returns
-------
None
"""
if not isinstance(data_key, str):
raise TypeError('Argument data_key must be of type string')
if not isinstance(playlist_name, str):
raise TypeError('Argument playlist_name must be of type string')
if not isinstance(time, (int, float)):
raise TypeError('Argument time must be of type int or float')
if not isinstance(extra, (int, float)):
raise TypeError('Argument extra must be of type int or float')
if time_limit is not None and not isinstance(time_limit, (int, float)):
raise TypeError('Argument time_limit must be of type int or float')
# generate the list of songs for the playlist
song_df = self.pick_tracks(data_key=data_key,
time=time,
extra=extra,
time_limit=time_limit)
# create the playlist with the song dataframe
self.make_playlist(playlist_name=playlist_name,
song_df=song_df,
overwrite=overwrite)
def artist_id_search(self,
artist,
limit=10,
offset=0):
"""Search the Spotify API for an artist and return the search results of matches and ids. This can be useful if
you don't know an artist's id to generate a playlist.
Parameters
----------
artist : str
Name of the artist to search. More complex searches can be run using the query guidelines in Spotipy.
limit : int
Number of records to return from search
offset : int
The number of records to skip in search result.
Returns
-------
pandas.DataFrame
A dataframe of artist names and ids from the search result.
"""
if not isinstance(limit, int):
raise TypeError('Argument limit must be of type int')
if not isinstance(offset, int):
raise TypeError('Argument offset must be of type int')
artist_results = self.spotipy_session.search(artist,
type='artist',
limit=limit,
offset=offset)
artist_items = artist_results['artists']['items']
if len(artist_items) > 0:
index = ['artist', 'id']
artist_df = pd.concat([ | pd.Series([x['name'], x['id']], index=index) | pandas.Series |
# coding: utf-8
import numpy as np
import pandas as pd
import umap
from bokeh.resources import INLINE, CDN
from bokeh.embed import file_html
#https://umap-learn.readthedocs.io/en/latest/basic_usage.html
def embeddable_image(image_path):
from io import BytesIO
from PIL import Image
import base64
image = Image.open(str(image_path)).resize((64, 64), Image.BICUBIC)
buffer = BytesIO()
image.save(buffer, format='png')
for_encoding = buffer.getvalue()
return 'data:image/png;base64,' + base64.b64encode(for_encoding).decode()
def umap_bokeh(bn_feat, #= pd.read_csv('../results/prod_test_feat.csv', index_col=0),
pred_df, #= pd.read_csv('../results/predicted_malaria.csv', index_col = 0),
image_folder = '../flask/uploads'
):
#feat_file = '../results/prod_test_feat.csv'
#prediction_csv = '../results/predictions_malaria.csv'
#image_folder = '../flask/uploads'
#bn_feat = pd.read_csv('../data/cv_feat.csv', index_col=0)
#pred_df = pd.read_csv('../results/predictions_prod_test.csv', index_col = 0)
#image_folder = '../flask/uploads'
#load
'''This function is for plotting a bokeh plot of UMAP dimensionality
reduction plot, colored based on labels, with thumnail overlays for
webapp results.'''
if bn_feat.shape[0] < 3:
print('Please select more than 3 cells to classify')
return
# bn_feat = pd.read_csv(feat_file, index_col = 0)
#bn_feat = bn_feat.sample(frac=0.01)
reducer = umap.UMAP(random_state=42)
## -- UMAP (might want to make into another function)
#Train the dimonsionality reduction
features = bn_feat.drop(columns=['label','fn'])
reducer.fit(features)
embedding = reducer.transform(features)
# Verify that the result of calling transform is
# idenitical to accessing the embedding_ attribute
assert(np.all(embedding == reducer.embedding_))
embedding.shape
# mask = df.label == 'Parasitized'
bn_feat['path'] = str(image_folder) +'/' + bn_feat['fn']
from bokeh.plotting import figure, show, output_notebook
from bokeh.models import HoverTool, ColumnDataSource, CategoricalColorMapper
from bokeh.palettes import Spectral10
from bokeh.embed import components
output_notebook()
df_images = | pd.DataFrame(embedding, columns=('x','y'), index=bn_feat.index) | pandas.DataFrame |
import json
from operator import itemgetter
from pathlib import Path
import geopandas as gpd
import pandas as pd
from shapely import wkt
from dtcv.pt_lib import *
p = Path('/Users/eric/Google Sync/sandiegodata.org/Projects/Downtown Partnership Homeless'
'/Annotations/complete/gcp')
intersections_file = '../data/gcp_intersections_2230.csv'
cols = 'x y width height'.split()
def load_gcp_files(p):
region_ig = itemgetter(*cols)
rows = []
for fn in p.glob('*.json'):
if not '-gcp-' in fn.stem:
continue
with fn.open() as f:
d = json.load(f)['_via_img_metadata']
for k, v in d.items():
if k == 'example':
continue
image_url = v['filename']
for region in v['regions']:
try:
row = [fn, image_url] + \
list(region_ig(region['shape_attributes'])) + \
[region['region_attributes']['Intersection']]
rows.append(row)
except KeyError:
print("Error in {} wrong keys in shape attributes: {}" \
.format(fn, region['shape_attributes']))
return | pd.DataFrame(rows, columns=['source', 'image'] + cols + ['intersection']) | pandas.DataFrame |
"""SQL io tests
The SQL tests are broken down in different classes:
- `PandasSQLTest`: base class with common methods for all test classes
- Tests for the public API (only tests with sqlite3)
- `_TestSQLApi` base class
- `TestSQLApi`: test the public API with sqlalchemy engine
- `TestSQLiteFallbackApi`: test the public API with a sqlite DBAPI
connection
- Tests for the different SQL flavors (flavor specific type conversions)
- Tests for the sqlalchemy mode: `_TestSQLAlchemy` is the base class with
common methods, `_TestSQLAlchemyConn` tests the API with a SQLAlchemy
Connection object. The different tested flavors (sqlite3, MySQL,
PostgreSQL) derive from the base class
- Tests for the fallback mode (`TestSQLiteFallback`)
"""
import csv
from datetime import date, datetime, time
from io import StringIO
import sqlite3
import warnings
import numpy as np
import pytest
from pandas.core.dtypes.common import is_datetime64_dtype, is_datetime64tz_dtype
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
Series,
Timestamp,
concat,
date_range,
isna,
to_datetime,
to_timedelta,
)
import pandas._testing as tm
import pandas.io.sql as sql
from pandas.io.sql import read_sql_query, read_sql_table
try:
import sqlalchemy
import sqlalchemy.schema
import sqlalchemy.sql.sqltypes as sqltypes
from sqlalchemy.ext import declarative
from sqlalchemy.orm import session as sa_session
SQLALCHEMY_INSTALLED = True
except ImportError:
SQLALCHEMY_INSTALLED = False
SQL_STRINGS = {
"create_iris": {
"sqlite": """CREATE TABLE iris (
"SepalLength" REAL,
"SepalWidth" REAL,
"PetalLength" REAL,
"PetalWidth" REAL,
"Name" TEXT
)""",
"mysql": """CREATE TABLE iris (
`SepalLength` DOUBLE,
`SepalWidth` DOUBLE,
`PetalLength` DOUBLE,
`PetalWidth` DOUBLE,
`Name` VARCHAR(200)
)""",
"postgresql": """CREATE TABLE iris (
"SepalLength" DOUBLE PRECISION,
"SepalWidth" DOUBLE PRECISION,
"PetalLength" DOUBLE PRECISION,
"PetalWidth" DOUBLE PRECISION,
"Name" VARCHAR(200)
)""",
},
"insert_iris": {
"sqlite": """INSERT INTO iris VALUES(?, ?, ?, ?, ?)""",
"mysql": """INSERT INTO iris VALUES(%s, %s, %s, %s, "%s");""",
"postgresql": """INSERT INTO iris VALUES(%s, %s, %s, %s, %s);""",
},
"create_test_types": {
"sqlite": """CREATE TABLE types_test_data (
"TextCol" TEXT,
"DateCol" TEXT,
"IntDateCol" INTEGER,
"IntDateOnlyCol" INTEGER,
"FloatCol" REAL,
"IntCol" INTEGER,
"BoolCol" INTEGER,
"IntColWithNull" INTEGER,
"BoolColWithNull" INTEGER
)""",
"mysql": """CREATE TABLE types_test_data (
`TextCol` TEXT,
`DateCol` DATETIME,
`IntDateCol` INTEGER,
`IntDateOnlyCol` INTEGER,
`FloatCol` DOUBLE,
`IntCol` INTEGER,
`BoolCol` BOOLEAN,
`IntColWithNull` INTEGER,
`BoolColWithNull` BOOLEAN
)""",
"postgresql": """CREATE TABLE types_test_data (
"TextCol" TEXT,
"DateCol" TIMESTAMP,
"DateColWithTz" TIMESTAMP WITH TIME ZONE,
"IntDateCol" INTEGER,
"IntDateOnlyCol" INTEGER,
"FloatCol" DOUBLE PRECISION,
"IntCol" INTEGER,
"BoolCol" BOOLEAN,
"IntColWithNull" INTEGER,
"BoolColWithNull" BOOLEAN
)""",
},
"insert_test_types": {
"sqlite": {
"query": """
INSERT INTO types_test_data
VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?)
""",
"fields": (
"TextCol",
"DateCol",
"IntDateCol",
"IntDateOnlyCol",
"FloatCol",
"IntCol",
"BoolCol",
"IntColWithNull",
"BoolColWithNull",
),
},
"mysql": {
"query": """
INSERT INTO types_test_data
VALUES("%s", %s, %s, %s, %s, %s, %s, %s, %s)
""",
"fields": (
"TextCol",
"DateCol",
"IntDateCol",
"IntDateOnlyCol",
"FloatCol",
"IntCol",
"BoolCol",
"IntColWithNull",
"BoolColWithNull",
),
},
"postgresql": {
"query": """
INSERT INTO types_test_data
VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
""",
"fields": (
"TextCol",
"DateCol",
"DateColWithTz",
"IntDateCol",
"IntDateOnlyCol",
"FloatCol",
"IntCol",
"BoolCol",
"IntColWithNull",
"BoolColWithNull",
),
},
},
"read_parameters": {
"sqlite": "SELECT * FROM iris WHERE Name=? AND SepalLength=?",
"mysql": 'SELECT * FROM iris WHERE `Name`="%s" AND `SepalLength`=%s',
"postgresql": 'SELECT * FROM iris WHERE "Name"=%s AND "SepalLength"=%s',
},
"read_named_parameters": {
"sqlite": """
SELECT * FROM iris WHERE Name=:name AND SepalLength=:length
""",
"mysql": """
SELECT * FROM iris WHERE
`Name`="%(name)s" AND `SepalLength`=%(length)s
""",
"postgresql": """
SELECT * FROM iris WHERE
"Name"=%(name)s AND "SepalLength"=%(length)s
""",
},
"create_view": {
"sqlite": """
CREATE VIEW iris_view AS
SELECT * FROM iris
"""
},
}
class MixInBase:
def teardown_method(self, method):
# if setup fails, there may not be a connection to close.
if hasattr(self, "conn"):
for tbl in self._get_all_tables():
self.drop_table(tbl)
self._close_conn()
class MySQLMixIn(MixInBase):
def drop_table(self, table_name):
cur = self.conn.cursor()
cur.execute(f"DROP TABLE IF EXISTS {sql._get_valid_mysql_name(table_name)}")
self.conn.commit()
def _get_all_tables(self):
cur = self.conn.cursor()
cur.execute("SHOW TABLES")
return [table[0] for table in cur.fetchall()]
def _close_conn(self):
from pymysql.err import Error
try:
self.conn.close()
except Error:
pass
class SQLiteMixIn(MixInBase):
def drop_table(self, table_name):
self.conn.execute(
f"DROP TABLE IF EXISTS {sql._get_valid_sqlite_name(table_name)}"
)
self.conn.commit()
def _get_all_tables(self):
c = self.conn.execute("SELECT name FROM sqlite_master WHERE type='table'")
return [table[0] for table in c.fetchall()]
def _close_conn(self):
self.conn.close()
class SQLAlchemyMixIn(MixInBase):
def drop_table(self, table_name):
sql.SQLDatabase(self.conn).drop_table(table_name)
def _get_all_tables(self):
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
table_list = meta.tables.keys()
return table_list
def _close_conn(self):
pass
class PandasSQLTest:
"""
Base class with common private methods for SQLAlchemy and fallback cases.
"""
def _get_exec(self):
if hasattr(self.conn, "execute"):
return self.conn
else:
return self.conn.cursor()
@pytest.fixture(params=[("data", "iris.csv")])
def load_iris_data(self, datapath, request):
import io
iris_csv_file = datapath(*request.param)
if not hasattr(self, "conn"):
self.setup_connect()
self.drop_table("iris")
self._get_exec().execute(SQL_STRINGS["create_iris"][self.flavor])
with io.open(iris_csv_file, mode="r", newline=None) as iris_csv:
r = csv.reader(iris_csv)
next(r) # skip header row
ins = SQL_STRINGS["insert_iris"][self.flavor]
for row in r:
self._get_exec().execute(ins, row)
def _load_iris_view(self):
self.drop_table("iris_view")
self._get_exec().execute(SQL_STRINGS["create_view"][self.flavor])
def _check_iris_loaded_frame(self, iris_frame):
pytype = iris_frame.dtypes[0].type
row = iris_frame.iloc[0]
assert issubclass(pytype, np.floating)
tm.equalContents(row.values, [5.1, 3.5, 1.4, 0.2, "Iris-setosa"])
def _load_test1_data(self):
columns = ["index", "A", "B", "C", "D"]
data = [
(
"2000-01-03 00:00:00",
0.980268513777,
3.68573087906,
-0.364216805298,
-1.15973806169,
),
(
"2000-01-04 00:00:00",
1.04791624281,
-0.0412318367011,
-0.16181208307,
0.212549316967,
),
(
"2000-01-05 00:00:00",
0.498580885705,
0.731167677815,
-0.537677223318,
1.34627041952,
),
(
"2000-01-06 00:00:00",
1.12020151869,
1.56762092543,
0.00364077397681,
0.67525259227,
),
]
self.test_frame1 = DataFrame(data, columns=columns)
def _load_test2_data(self):
df = DataFrame(
dict(
A=[4, 1, 3, 6],
B=["asd", "gsq", "ylt", "jkl"],
C=[1.1, 3.1, 6.9, 5.3],
D=[False, True, True, False],
E=["1990-11-22", "1991-10-26", "1993-11-26", "1995-12-12"],
)
)
df["E"] = to_datetime(df["E"])
self.test_frame2 = df
def _load_test3_data(self):
columns = ["index", "A", "B"]
data = [
("2000-01-03 00:00:00", 2 ** 31 - 1, -1.987670),
("2000-01-04 00:00:00", -29, -0.0412318367011),
("2000-01-05 00:00:00", 20000, 0.731167677815),
("2000-01-06 00:00:00", -290867, 1.56762092543),
]
self.test_frame3 = DataFrame(data, columns=columns)
def _load_raw_sql(self):
self.drop_table("types_test_data")
self._get_exec().execute(SQL_STRINGS["create_test_types"][self.flavor])
ins = SQL_STRINGS["insert_test_types"][self.flavor]
data = [
{
"TextCol": "first",
"DateCol": "2000-01-03 00:00:00",
"DateColWithTz": "2000-01-01 00:00:00-08:00",
"IntDateCol": 535852800,
"IntDateOnlyCol": 20101010,
"FloatCol": 10.10,
"IntCol": 1,
"BoolCol": False,
"IntColWithNull": 1,
"BoolColWithNull": False,
},
{
"TextCol": "first",
"DateCol": "2000-01-04 00:00:00",
"DateColWithTz": "2000-06-01 00:00:00-07:00",
"IntDateCol": 1356998400,
"IntDateOnlyCol": 20101212,
"FloatCol": 10.10,
"IntCol": 1,
"BoolCol": False,
"IntColWithNull": None,
"BoolColWithNull": None,
},
]
for d in data:
self._get_exec().execute(
ins["query"], [d[field] for field in ins["fields"]]
)
def _count_rows(self, table_name):
result = (
self._get_exec()
.execute(f"SELECT count(*) AS count_1 FROM {table_name}")
.fetchone()
)
return result[0]
def _read_sql_iris(self):
iris_frame = self.pandasSQL.read_query("SELECT * FROM iris")
self._check_iris_loaded_frame(iris_frame)
def _read_sql_iris_parameter(self):
query = SQL_STRINGS["read_parameters"][self.flavor]
params = ["Iris-setosa", 5.1]
iris_frame = self.pandasSQL.read_query(query, params=params)
self._check_iris_loaded_frame(iris_frame)
def _read_sql_iris_named_parameter(self):
query = SQL_STRINGS["read_named_parameters"][self.flavor]
params = {"name": "Iris-setosa", "length": 5.1}
iris_frame = self.pandasSQL.read_query(query, params=params)
self._check_iris_loaded_frame(iris_frame)
def _to_sql(self, method=None):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", method=method)
assert self.pandasSQL.has_table("test_frame1")
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
# Nuke table
self.drop_table("test_frame1")
def _to_sql_empty(self):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1.iloc[:0], "test_frame1")
def _to_sql_fail(self):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
assert self.pandasSQL.has_table("test_frame1")
msg = "Table 'test_frame1' already exists"
with pytest.raises(ValueError, match=msg):
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
self.drop_table("test_frame1")
def _to_sql_replace(self):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
# Add to table again
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="replace")
assert self.pandasSQL.has_table("test_frame1")
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
self.drop_table("test_frame1")
def _to_sql_append(self):
# Nuke table just in case
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
# Add to table again
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="append")
assert self.pandasSQL.has_table("test_frame1")
num_entries = 2 * len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
self.drop_table("test_frame1")
def _to_sql_method_callable(self):
check = [] # used to double check function below is really being used
def sample(pd_table, conn, keys, data_iter):
check.append(1)
data = [dict(zip(keys, row)) for row in data_iter]
conn.execute(pd_table.table.insert(), data)
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", method=sample)
assert self.pandasSQL.has_table("test_frame1")
assert check == [1]
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
# Nuke table
self.drop_table("test_frame1")
def _roundtrip(self):
self.drop_table("test_frame_roundtrip")
self.pandasSQL.to_sql(self.test_frame1, "test_frame_roundtrip")
result = self.pandasSQL.read_query("SELECT * FROM test_frame_roundtrip")
result.set_index("level_0", inplace=True)
# result.index.astype(int)
result.index.name = None
tm.assert_frame_equal(result, self.test_frame1)
def _execute_sql(self):
# drop_sql = "DROP TABLE IF EXISTS test" # should already be done
iris_results = self.pandasSQL.execute("SELECT * FROM iris")
row = iris_results.fetchone()
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, "Iris-setosa"])
def _to_sql_save_index(self):
df = DataFrame.from_records(
[(1, 2.1, "line1"), (2, 1.5, "line2")], columns=["A", "B", "C"], index=["A"]
)
self.pandasSQL.to_sql(df, "test_to_sql_saves_index")
ix_cols = self._get_index_columns("test_to_sql_saves_index")
assert ix_cols == [["A"]]
def _transaction_test(self):
with self.pandasSQL.run_transaction() as trans:
trans.execute("CREATE TABLE test_trans (A INT, B TEXT)")
class DummyException(Exception):
pass
# Make sure when transaction is rolled back, no rows get inserted
ins_sql = "INSERT INTO test_trans (A,B) VALUES (1, 'blah')"
try:
with self.pandasSQL.run_transaction() as trans:
trans.execute(ins_sql)
raise DummyException("error")
except DummyException:
# ignore raised exception
pass
res = self.pandasSQL.read_query("SELECT * FROM test_trans")
assert len(res) == 0
# Make sure when transaction is committed, rows do get inserted
with self.pandasSQL.run_transaction() as trans:
trans.execute(ins_sql)
res2 = self.pandasSQL.read_query("SELECT * FROM test_trans")
assert len(res2) == 1
# -----------------------------------------------------------------------------
# -- Testing the public API
class _TestSQLApi(PandasSQLTest):
"""
Base class to test the public API.
From this two classes are derived to run these tests for both the
sqlalchemy mode (`TestSQLApi`) and the fallback mode
(`TestSQLiteFallbackApi`). These tests are run with sqlite3. Specific
tests for the different sql flavours are included in `_TestSQLAlchemy`.
Notes:
flavor can always be passed even in SQLAlchemy mode,
should be correctly ignored.
we don't use drop_table because that isn't part of the public api
"""
flavor = "sqlite"
mode: str
def setup_connect(self):
self.conn = self.connect()
@pytest.fixture(autouse=True)
def setup_method(self, load_iris_data):
self.load_test_data_and_sql()
def load_test_data_and_sql(self):
self._load_iris_view()
self._load_test1_data()
self._load_test2_data()
self._load_test3_data()
self._load_raw_sql()
def test_read_sql_iris(self):
iris_frame = sql.read_sql_query("SELECT * FROM iris", self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_read_sql_view(self):
iris_frame = sql.read_sql_query("SELECT * FROM iris_view", self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_to_sql(self):
sql.to_sql(self.test_frame1, "test_frame1", self.conn)
assert sql.has_table("test_frame1", self.conn)
def test_to_sql_fail(self):
sql.to_sql(self.test_frame1, "test_frame2", self.conn, if_exists="fail")
assert sql.has_table("test_frame2", self.conn)
msg = "Table 'test_frame2' already exists"
with pytest.raises(ValueError, match=msg):
sql.to_sql(self.test_frame1, "test_frame2", self.conn, if_exists="fail")
def test_to_sql_replace(self):
sql.to_sql(self.test_frame1, "test_frame3", self.conn, if_exists="fail")
# Add to table again
sql.to_sql(self.test_frame1, "test_frame3", self.conn, if_exists="replace")
assert sql.has_table("test_frame3", self.conn)
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame3")
assert num_rows == num_entries
def test_to_sql_append(self):
sql.to_sql(self.test_frame1, "test_frame4", self.conn, if_exists="fail")
# Add to table again
sql.to_sql(self.test_frame1, "test_frame4", self.conn, if_exists="append")
assert sql.has_table("test_frame4", self.conn)
num_entries = 2 * len(self.test_frame1)
num_rows = self._count_rows("test_frame4")
assert num_rows == num_entries
def test_to_sql_type_mapping(self):
sql.to_sql(self.test_frame3, "test_frame5", self.conn, index=False)
result = sql.read_sql("SELECT * FROM test_frame5", self.conn)
tm.assert_frame_equal(self.test_frame3, result)
def test_to_sql_series(self):
s = Series(np.arange(5, dtype="int64"), name="series")
sql.to_sql(s, "test_series", self.conn, index=False)
s2 = sql.read_sql_query("SELECT * FROM test_series", self.conn)
tm.assert_frame_equal(s.to_frame(), s2)
def test_roundtrip(self):
sql.to_sql(self.test_frame1, "test_frame_roundtrip", con=self.conn)
result = sql.read_sql_query("SELECT * FROM test_frame_roundtrip", con=self.conn)
# HACK!
result.index = self.test_frame1.index
result.set_index("level_0", inplace=True)
result.index.astype(int)
result.index.name = None
tm.assert_frame_equal(result, self.test_frame1)
def test_roundtrip_chunksize(self):
sql.to_sql(
self.test_frame1,
"test_frame_roundtrip",
con=self.conn,
index=False,
chunksize=2,
)
result = sql.read_sql_query("SELECT * FROM test_frame_roundtrip", con=self.conn)
tm.assert_frame_equal(result, self.test_frame1)
def test_execute_sql(self):
# drop_sql = "DROP TABLE IF EXISTS test" # should already be done
iris_results = sql.execute("SELECT * FROM iris", con=self.conn)
row = iris_results.fetchone()
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, "Iris-setosa"])
def test_date_parsing(self):
# Test date parsing in read_sql
# No Parsing
df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn)
assert not issubclass(df.DateCol.dtype.type, np.datetime64)
df = sql.read_sql_query(
"SELECT * FROM types_test_data", self.conn, parse_dates=["DateCol"]
)
assert issubclass(df.DateCol.dtype.type, np.datetime64)
assert df.DateCol.tolist() == [
pd.Timestamp(2000, 1, 3, 0, 0, 0),
pd.Timestamp(2000, 1, 4, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data",
self.conn,
parse_dates={"DateCol": "%Y-%m-%d %H:%M:%S"},
)
assert issubclass(df.DateCol.dtype.type, np.datetime64)
assert df.DateCol.tolist() == [
pd.Timestamp(2000, 1, 3, 0, 0, 0),
pd.Timestamp(2000, 1, 4, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data", self.conn, parse_dates=["IntDateCol"]
)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
assert df.IntDateCol.tolist() == [
pd.Timestamp(1986, 12, 25, 0, 0, 0),
pd.Timestamp(2013, 1, 1, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data", self.conn, parse_dates={"IntDateCol": "s"}
)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
assert df.IntDateCol.tolist() == [
pd.Timestamp(1986, 12, 25, 0, 0, 0),
pd.Timestamp(2013, 1, 1, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data",
self.conn,
parse_dates={"IntDateOnlyCol": "%Y%m%d"},
)
assert issubclass(df.IntDateOnlyCol.dtype.type, np.datetime64)
assert df.IntDateOnlyCol.tolist() == [
pd.Timestamp("2010-10-10"),
pd.Timestamp("2010-12-12"),
]
def test_date_and_index(self):
# Test case where same column appears in parse_date and index_col
df = sql.read_sql_query(
"SELECT * FROM types_test_data",
self.conn,
index_col="DateCol",
parse_dates=["DateCol", "IntDateCol"],
)
assert issubclass(df.index.dtype.type, np.datetime64)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
def test_timedelta(self):
# see #6921
df = to_timedelta(Series(["00:00:01", "00:00:03"], name="foo")).to_frame()
with tm.assert_produces_warning(UserWarning):
df.to_sql("test_timedelta", self.conn)
result = sql.read_sql_query("SELECT * FROM test_timedelta", self.conn)
tm.assert_series_equal(result["foo"], df["foo"].astype("int64"))
def test_complex_raises(self):
df = DataFrame({"a": [1 + 1j, 2j]})
msg = "Complex datatypes not supported"
with pytest.raises(ValueError, match=msg):
df.to_sql("test_complex", self.conn)
@pytest.mark.parametrize(
"index_name,index_label,expected",
[
# no index name, defaults to 'index'
(None, None, "index"),
# specifying index_label
(None, "other_label", "other_label"),
# using the index name
("index_name", None, "index_name"),
# has index name, but specifying index_label
("index_name", "other_label", "other_label"),
# index name is integer
(0, None, "0"),
# index name is None but index label is integer
(None, 0, "0"),
],
)
def test_to_sql_index_label(self, index_name, index_label, expected):
temp_frame = DataFrame({"col1": range(4)})
temp_frame.index.name = index_name
query = "SELECT * FROM test_index_label"
sql.to_sql(temp_frame, "test_index_label", self.conn, index_label=index_label)
frame = sql.read_sql_query(query, self.conn)
assert frame.columns[0] == expected
def test_to_sql_index_label_multiindex(self):
temp_frame = DataFrame(
{"col1": range(4)},
index=MultiIndex.from_product([("A0", "A1"), ("B0", "B1")]),
)
# no index name, defaults to 'level_0' and 'level_1'
sql.to_sql(temp_frame, "test_index_label", self.conn)
frame = sql.read_sql_query("SELECT * FROM test_index_label", self.conn)
assert frame.columns[0] == "level_0"
assert frame.columns[1] == "level_1"
# specifying index_label
sql.to_sql(
temp_frame,
"test_index_label",
self.conn,
if_exists="replace",
index_label=["A", "B"],
)
frame = sql.read_sql_query("SELECT * FROM test_index_label", self.conn)
assert frame.columns[:2].tolist() == ["A", "B"]
# using the index name
temp_frame.index.names = ["A", "B"]
sql.to_sql(temp_frame, "test_index_label", self.conn, if_exists="replace")
frame = sql.read_sql_query("SELECT * FROM test_index_label", self.conn)
assert frame.columns[:2].tolist() == ["A", "B"]
# has index name, but specifying index_label
sql.to_sql(
temp_frame,
"test_index_label",
self.conn,
if_exists="replace",
index_label=["C", "D"],
)
frame = sql.read_sql_query("SELECT * FROM test_index_label", self.conn)
assert frame.columns[:2].tolist() == ["C", "D"]
msg = "Length of 'index_label' should match number of levels, which is 2"
with pytest.raises(ValueError, match=msg):
sql.to_sql(
temp_frame,
"test_index_label",
self.conn,
if_exists="replace",
index_label="C",
)
def test_multiindex_roundtrip(self):
df = DataFrame.from_records(
[(1, 2.1, "line1"), (2, 1.5, "line2")],
columns=["A", "B", "C"],
index=["A", "B"],
)
df.to_sql("test_multiindex_roundtrip", self.conn)
result = sql.read_sql_query(
"SELECT * FROM test_multiindex_roundtrip", self.conn, index_col=["A", "B"]
)
tm.assert_frame_equal(df, result, check_index_type=True)
def test_integer_col_names(self):
df = DataFrame([[1, 2], [3, 4]], columns=[0, 1])
sql.to_sql(df, "test_frame_integer_col_names", self.conn, if_exists="replace")
def test_get_schema(self):
create_sql = sql.get_schema(self.test_frame1, "test", con=self.conn)
assert "CREATE" in create_sql
def test_get_schema_dtypes(self):
float_frame = DataFrame({"a": [1.1, 1.2], "b": [2.1, 2.2]})
dtype = sqlalchemy.Integer if self.mode == "sqlalchemy" else "INTEGER"
create_sql = sql.get_schema(
float_frame, "test", con=self.conn, dtype={"b": dtype}
)
assert "CREATE" in create_sql
assert "INTEGER" in create_sql
def test_get_schema_keys(self):
frame = DataFrame({"Col1": [1.1, 1.2], "Col2": [2.1, 2.2]})
create_sql = sql.get_schema(frame, "test", con=self.conn, keys="Col1")
constraint_sentence = 'CONSTRAINT test_pk PRIMARY KEY ("Col1")'
assert constraint_sentence in create_sql
# multiple columns as key (GH10385)
create_sql = sql.get_schema(
self.test_frame1, "test", con=self.conn, keys=["A", "B"]
)
constraint_sentence = 'CONSTRAINT test_pk PRIMARY KEY ("A", "B")'
assert constraint_sentence in create_sql
def test_chunksize_read(self):
df = DataFrame(np.random.randn(22, 5), columns=list("abcde"))
df.to_sql("test_chunksize", self.conn, index=False)
# reading the query in one time
res1 = sql.read_sql_query("select * from test_chunksize", self.conn)
# reading the query in chunks with read_sql_query
res2 = DataFrame()
i = 0
sizes = [5, 5, 5, 5, 2]
for chunk in sql.read_sql_query(
"select * from test_chunksize", self.conn, chunksize=5
):
res2 = concat([res2, chunk], ignore_index=True)
assert len(chunk) == sizes[i]
i += 1
tm.assert_frame_equal(res1, res2)
# reading the query in chunks with read_sql_query
if self.mode == "sqlalchemy":
res3 = DataFrame()
i = 0
sizes = [5, 5, 5, 5, 2]
for chunk in sql.read_sql_table("test_chunksize", self.conn, chunksize=5):
res3 = concat([res3, chunk], ignore_index=True)
assert len(chunk) == sizes[i]
i += 1
tm.assert_frame_equal(res1, res3)
def test_categorical(self):
# GH8624
# test that categorical gets written correctly as dense column
df = DataFrame(
{
"person_id": [1, 2, 3],
"person_name": ["<NAME>", "<NAME>", "<NAME>"],
}
)
df2 = df.copy()
df2["person_name"] = df2["person_name"].astype("category")
df2.to_sql("test_categorical", self.conn, index=False)
res = sql.read_sql_query("SELECT * FROM test_categorical", self.conn)
tm.assert_frame_equal(res, df)
def test_unicode_column_name(self):
# GH 11431
df = DataFrame([[1, 2], [3, 4]], columns=["\xe9", "b"])
df.to_sql("test_unicode", self.conn, index=False)
def test_escaped_table_name(self):
# GH 13206
df = DataFrame({"A": [0, 1, 2], "B": [0.2, np.nan, 5.6]})
df.to_sql("d1187b08-4943-4c8d-a7f6", self.conn, index=False)
res = sql.read_sql_query("SELECT * FROM `d1187b08-4943-4c8d-a7f6`", self.conn)
tm.assert_frame_equal(res, df)
@pytest.mark.single
@pytest.mark.skipif(not SQLALCHEMY_INSTALLED, reason="SQLAlchemy not installed")
class TestSQLApi(SQLAlchemyMixIn, _TestSQLApi):
"""
Test the public API as it would be used directly
Tests for `read_sql_table` are included here, as this is specific for the
sqlalchemy mode.
"""
flavor = "sqlite"
mode = "sqlalchemy"
def connect(self):
return sqlalchemy.create_engine("sqlite:///:memory:")
def test_read_table_columns(self):
# test columns argument in read_table
sql.to_sql(self.test_frame1, "test_frame", self.conn)
cols = ["A", "B"]
result = sql.read_sql_table("test_frame", self.conn, columns=cols)
assert result.columns.tolist() == cols
def test_read_table_index_col(self):
# test columns argument in read_table
sql.to_sql(self.test_frame1, "test_frame", self.conn)
result = sql.read_sql_table("test_frame", self.conn, index_col="index")
assert result.index.names == ["index"]
result = sql.read_sql_table("test_frame", self.conn, index_col=["A", "B"])
assert result.index.names == ["A", "B"]
result = sql.read_sql_table(
"test_frame", self.conn, index_col=["A", "B"], columns=["C", "D"]
)
assert result.index.names == ["A", "B"]
assert result.columns.tolist() == ["C", "D"]
def test_read_sql_delegate(self):
iris_frame1 = sql.read_sql_query("SELECT * FROM iris", self.conn)
iris_frame2 = sql.read_sql("SELECT * FROM iris", self.conn)
tm.assert_frame_equal(iris_frame1, iris_frame2)
iris_frame1 = sql.read_sql_table("iris", self.conn)
iris_frame2 = sql.read_sql("iris", self.conn)
tm.assert_frame_equal(iris_frame1, iris_frame2)
def test_not_reflect_all_tables(self):
# create invalid table
qry = """CREATE TABLE invalid (x INTEGER, y UNKNOWN);"""
self.conn.execute(qry)
qry = """CREATE TABLE other_table (x INTEGER, y INTEGER);"""
self.conn.execute(qry)
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
sql.read_sql_table("other_table", self.conn)
sql.read_sql_query("SELECT * FROM other_table", self.conn)
# Verify some things
assert len(w) == 0
def test_warning_case_insensitive_table_name(self):
# see gh-7815
#
# We can't test that this warning is triggered, a the database
# configuration would have to be altered. But here we test that
# the warning is certainly NOT triggered in a normal case.
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# This should not trigger a Warning
self.test_frame1.to_sql("CaseSensitive", self.conn)
# Verify some things
assert len(w) == 0
def _get_index_columns(self, tbl_name):
from sqlalchemy.engine import reflection
insp = reflection.Inspector.from_engine(self.conn)
ixs = insp.get_indexes("test_index_saved")
ixs = [i["column_names"] for i in ixs]
return ixs
def test_sqlalchemy_type_mapping(self):
# Test Timestamp objects (no datetime64 because of timezone) (GH9085)
df = DataFrame(
{"time": to_datetime(["201412120154", "201412110254"], utc=True)}
)
db = sql.SQLDatabase(self.conn)
table = sql.SQLTable("test_type", db, frame=df)
# GH 9086: TIMESTAMP is the suggested type for datetimes with timezones
assert isinstance(table.table.c["time"].type, sqltypes.TIMESTAMP)
def test_database_uri_string(self):
# Test read_sql and .to_sql method with a database URI (GH10654)
test_frame1 = self.test_frame1
# db_uri = 'sqlite:///:memory:' # raises
# sqlalchemy.exc.OperationalError: (sqlite3.OperationalError) near
# "iris": syntax error [SQL: 'iris']
with tm.ensure_clean() as name:
db_uri = "sqlite:///" + name
table = "iris"
test_frame1.to_sql(table, db_uri, if_exists="replace", index=False)
test_frame2 = sql.read_sql(table, db_uri)
test_frame3 = sql.read_sql_table(table, db_uri)
query = "SELECT * FROM iris"
test_frame4 = sql.read_sql_query(query, db_uri)
tm.assert_frame_equal(test_frame1, test_frame2)
tm.assert_frame_equal(test_frame1, test_frame3)
tm.assert_frame_equal(test_frame1, test_frame4)
# using driver that will not be installed on Travis to trigger error
# in sqlalchemy.create_engine -> test passing of this error to user
try:
# the rest of this test depends on pg8000's being absent
import pg8000 # noqa
pytest.skip("pg8000 is installed")
except ImportError:
pass
db_uri = "postgresql+pg8000://user:pass@host/dbname"
with pytest.raises(ImportError, match="pg8000"):
sql.read_sql("select * from table", db_uri)
def _make_iris_table_metadata(self):
sa = sqlalchemy
metadata = sa.MetaData()
iris = sa.Table(
"iris",
metadata,
sa.Column("SepalLength", sa.REAL),
sa.Column("SepalWidth", sa.REAL),
sa.Column("PetalLength", sa.REAL),
sa.Column("PetalWidth", sa.REAL),
sa.Column("Name", sa.TEXT),
)
return iris
def test_query_by_text_obj(self):
# WIP : GH10846
name_text = sqlalchemy.text("select * from iris where name=:name")
iris_df = sql.read_sql(name_text, self.conn, params={"name": "Iris-versicolor"})
all_names = set(iris_df["Name"])
assert all_names == {"Iris-versicolor"}
def test_query_by_select_obj(self):
# WIP : GH10846
iris = self._make_iris_table_metadata()
name_select = sqlalchemy.select([iris]).where(
iris.c.Name == sqlalchemy.bindparam("name")
)
iris_df = sql.read_sql(name_select, self.conn, params={"name": "Iris-setosa"})
all_names = set(iris_df["Name"])
assert all_names == {"Iris-setosa"}
class _EngineToConnMixin:
"""
A mixin that causes setup_connect to create a conn rather than an engine.
"""
@pytest.fixture(autouse=True)
def setup_method(self, load_iris_data):
super().load_test_data_and_sql()
engine = self.conn
conn = engine.connect()
self.__tx = conn.begin()
self.pandasSQL = sql.SQLDatabase(conn)
self.__engine = engine
self.conn = conn
yield
self.__tx.rollback()
self.conn.close()
self.conn = self.__engine
self.pandasSQL = sql.SQLDatabase(self.__engine)
# XXX:
# super().teardown_method(method)
@pytest.mark.single
class TestSQLApiConn(_EngineToConnMixin, TestSQLApi):
pass
@pytest.mark.single
class TestSQLiteFallbackApi(SQLiteMixIn, _TestSQLApi):
"""
Test the public sqlite connection fallback API
"""
flavor = "sqlite"
mode = "fallback"
def connect(self, database=":memory:"):
return sqlite3.connect(database)
def test_sql_open_close(self):
# Test if the IO in the database still work if the connection closed
# between the writing and reading (as in many real situations).
with tm.ensure_clean() as name:
conn = self.connect(name)
sql.to_sql(self.test_frame3, "test_frame3_legacy", conn, index=False)
conn.close()
conn = self.connect(name)
result = sql.read_sql_query("SELECT * FROM test_frame3_legacy;", conn)
conn.close()
tm.assert_frame_equal(self.test_frame3, result)
@pytest.mark.skipif(SQLALCHEMY_INSTALLED, reason="SQLAlchemy is installed")
def test_con_string_import_error(self):
conn = "mysql://root@localhost/pandas_nosetest"
msg = "Using URI string without sqlalchemy installed"
with pytest.raises(ImportError, match=msg):
sql.read_sql("SELECT * FROM iris", conn)
def test_read_sql_delegate(self):
iris_frame1 = sql.read_sql_query("SELECT * FROM iris", self.conn)
iris_frame2 = sql.read_sql("SELECT * FROM iris", self.conn)
tm.assert_frame_equal(iris_frame1, iris_frame2)
msg = "Execution failed on sql 'iris': near \"iris\": syntax error"
with pytest.raises(sql.DatabaseError, match=msg):
sql.read_sql("iris", self.conn)
def test_safe_names_warning(self):
# GH 6798
df = DataFrame([[1, 2], [3, 4]], columns=["a", "b "]) # has a space
# warns on create table with spaces in names
with tm.assert_produces_warning():
sql.to_sql(df, "test_frame3_legacy", self.conn, index=False)
def test_get_schema2(self):
# without providing a connection object (available for backwards comp)
create_sql = sql.get_schema(self.test_frame1, "test")
assert "CREATE" in create_sql
def _get_sqlite_column_type(self, schema, column):
for col in schema.split("\n"):
if col.split()[0].strip('""') == column:
return col.split()[1]
raise ValueError(f"Column {column} not found")
def test_sqlite_type_mapping(self):
# Test Timestamp objects (no datetime64 because of timezone) (GH9085)
df = DataFrame(
{"time": to_datetime(["201412120154", "201412110254"], utc=True)}
)
db = sql.SQLiteDatabase(self.conn)
table = sql.SQLiteTable("test_type", db, frame=df)
schema = table.sql_schema()
assert self._get_sqlite_column_type(schema, "time") == "TIMESTAMP"
# -----------------------------------------------------------------------------
# -- Database flavor specific tests
class _TestSQLAlchemy(SQLAlchemyMixIn, PandasSQLTest):
"""
Base class for testing the sqlalchemy backend.
Subclasses for specific database types are created below. Tests that
deviate for each flavor are overwritten there.
"""
flavor: str
@pytest.fixture(autouse=True, scope="class")
def setup_class(cls):
cls.setup_import()
cls.setup_driver()
conn = cls.connect()
conn.connect()
def load_test_data_and_sql(self):
self._load_raw_sql()
self._load_test1_data()
@pytest.fixture(autouse=True)
def setup_method(self, load_iris_data):
self.load_test_data_and_sql()
@classmethod
def setup_import(cls):
# Skip this test if SQLAlchemy not available
if not SQLALCHEMY_INSTALLED:
pytest.skip("SQLAlchemy not installed")
@classmethod
def setup_driver(cls):
raise NotImplementedError()
@classmethod
def connect(cls):
raise NotImplementedError()
def setup_connect(self):
try:
self.conn = self.connect()
self.pandasSQL = sql.SQLDatabase(self.conn)
# to test if connection can be made:
self.conn.connect()
except sqlalchemy.exc.OperationalError:
pytest.skip(f"Can't connect to {self.flavor} server")
def test_read_sql(self):
self._read_sql_iris()
def test_read_sql_parameter(self):
self._read_sql_iris_parameter()
def test_read_sql_named_parameter(self):
self._read_sql_iris_named_parameter()
def test_to_sql(self):
self._to_sql()
def test_to_sql_empty(self):
self._to_sql_empty()
def test_to_sql_fail(self):
self._to_sql_fail()
def test_to_sql_replace(self):
self._to_sql_replace()
def test_to_sql_append(self):
self._to_sql_append()
def test_to_sql_method_multi(self):
self._to_sql(method="multi")
def test_to_sql_method_callable(self):
self._to_sql_method_callable()
def test_create_table(self):
temp_conn = self.connect()
temp_frame = DataFrame(
{"one": [1.0, 2.0, 3.0, 4.0], "two": [4.0, 3.0, 2.0, 1.0]}
)
pandasSQL = sql.SQLDatabase(temp_conn)
pandasSQL.to_sql(temp_frame, "temp_frame")
assert temp_conn.has_table("temp_frame")
def test_drop_table(self):
temp_conn = self.connect()
temp_frame = DataFrame(
{"one": [1.0, 2.0, 3.0, 4.0], "two": [4.0, 3.0, 2.0, 1.0]}
)
pandasSQL = sql.SQLDatabase(temp_conn)
pandasSQL.to_sql(temp_frame, "temp_frame")
assert temp_conn.has_table("temp_frame")
pandasSQL.drop_table("temp_frame")
assert not temp_conn.has_table("temp_frame")
def test_roundtrip(self):
self._roundtrip()
def test_execute_sql(self):
self._execute_sql()
def test_read_table(self):
iris_frame = sql.read_sql_table("iris", con=self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_read_table_columns(self):
iris_frame = sql.read_sql_table(
"iris", con=self.conn, columns=["SepalLength", "SepalLength"]
)
tm.equalContents(iris_frame.columns.values, ["SepalLength", "SepalLength"])
def test_read_table_absent_raises(self):
msg = "Table this_doesnt_exist not found"
with pytest.raises(ValueError, match=msg):
sql.read_sql_table("this_doesnt_exist", con=self.conn)
def test_default_type_conversion(self):
df = sql.read_sql_table("types_test_data", self.conn)
assert issubclass(df.FloatCol.dtype.type, np.floating)
assert issubclass(df.IntCol.dtype.type, np.integer)
assert issubclass(df.BoolCol.dtype.type, np.bool_)
# Int column with NA values stays as float
assert issubclass(df.IntColWithNull.dtype.type, np.floating)
# Bool column with NA values becomes object
assert issubclass(df.BoolColWithNull.dtype.type, np.object)
def test_bigint(self):
# int64 should be converted to BigInteger, GH7433
df = DataFrame(data={"i64": [2 ** 62]})
df.to_sql("test_bigint", self.conn, index=False)
result = sql.read_sql_table("test_bigint", self.conn)
tm.assert_frame_equal(df, result)
def test_default_date_load(self):
df = sql.read_sql_table("types_test_data", self.conn)
# IMPORTANT - sqlite has no native date type, so shouldn't parse, but
# MySQL SHOULD be converted.
assert issubclass(df.DateCol.dtype.type, np.datetime64)
def test_datetime_with_timezone(self):
# edge case that converts postgresql datetime with time zone types
# to datetime64[ns,psycopg2.tz.FixedOffsetTimezone..], which is ok
# but should be more natural, so coerce to datetime64[ns] for now
def check(col):
# check that a column is either datetime64[ns]
# or datetime64[ns, UTC]
if is_datetime64_dtype(col.dtype):
# "2000-01-01 00:00:00-08:00" should convert to
# "2000-01-01 08:00:00"
assert col[0] == Timestamp("2000-01-01 08:00:00")
# "2000-06-01 00:00:00-07:00" should convert to
# "2000-06-01 07:00:00"
assert col[1] == Timestamp("2000-06-01 07:00:00")
elif is_datetime64tz_dtype(col.dtype):
assert str(col.dt.tz) == "UTC"
# "2000-01-01 00:00:00-08:00" should convert to
# "2000-01-01 08:00:00"
# "2000-06-01 00:00:00-07:00" should convert to
# "2000-06-01 07:00:00"
# GH 6415
expected_data = [
Timestamp("2000-01-01 08:00:00", tz="UTC"),
Timestamp("2000-06-01 07:00:00", tz="UTC"),
]
expected = Series(expected_data, name=col.name)
tm.assert_series_equal(col, expected)
else:
raise AssertionError(
f"DateCol loaded with incorrect type -> {col.dtype}"
)
# GH11216
df = pd.read_sql_query("select * from types_test_data", self.conn)
if not hasattr(df, "DateColWithTz"):
pytest.skip("no column with datetime with time zone")
# this is parsed on Travis (linux), but not on macosx for some reason
# even with the same versions of psycopg2 & sqlalchemy, possibly a
# Postgresql server version difference
col = df.DateColWithTz
assert is_datetime64tz_dtype(col.dtype)
df = pd.read_sql_query(
"select * from types_test_data", self.conn, parse_dates=["DateColWithTz"]
)
if not hasattr(df, "DateColWithTz"):
pytest.skip("no column with datetime with time zone")
col = df.DateColWithTz
assert is_datetime64tz_dtype(col.dtype)
assert str(col.dt.tz) == "UTC"
check(df.DateColWithTz)
df = pd.concat(
list(
pd.read_sql_query(
"select * from types_test_data", self.conn, chunksize=1
)
),
ignore_index=True,
)
col = df.DateColWithTz
assert is_datetime64tz_dtype(col.dtype)
assert str(col.dt.tz) == "UTC"
expected = sql.read_sql_table("types_test_data", self.conn)
col = expected.DateColWithTz
assert is_datetime64tz_dtype(col.dtype)
tm.assert_series_equal(df.DateColWithTz, expected.DateColWithTz)
# xref #7139
# this might or might not be converted depending on the postgres driver
df = sql.read_sql_table("types_test_data", self.conn)
check(df.DateColWithTz)
def test_datetime_with_timezone_roundtrip(self):
# GH 9086
# Write datetimetz data to a db and read it back
# For dbs that support timestamps with timezones, should get back UTC
# otherwise naive data should be returned
expected = DataFrame(
{"A": date_range("2013-01-01 09:00:00", periods=3, tz="US/Pacific")}
)
expected.to_sql("test_datetime_tz", self.conn, index=False)
if self.flavor == "postgresql":
# SQLAlchemy "timezones" (i.e. offsets) are coerced to UTC
expected["A"] = expected["A"].dt.tz_convert("UTC")
else:
# Otherwise, timestamps are returned as local, naive
expected["A"] = expected["A"].dt.tz_localize(None)
result = sql.read_sql_table("test_datetime_tz", self.conn)
tm.assert_frame_equal(result, expected)
result = sql.read_sql_query("SELECT * FROM test_datetime_tz", self.conn)
if self.flavor == "sqlite":
# read_sql_query does not return datetime type like read_sql_table
assert isinstance(result.loc[0, "A"], str)
result["A"] = to_datetime(result["A"])
tm.assert_frame_equal(result, expected)
def test_naive_datetimeindex_roundtrip(self):
# GH 23510
# Ensure that a naive DatetimeIndex isn't converted to UTC
dates = date_range("2018-01-01", periods=5, freq="6H")
expected = DataFrame({"nums": range(5)}, index=dates)
expected.to_sql("foo_table", self.conn, index_label="info_date")
result = sql.read_sql_table("foo_table", self.conn, index_col="info_date")
# result index with gain a name from a set_index operation; expected
tm.assert_frame_equal(result, expected, check_names=False)
def test_date_parsing(self):
# No Parsing
df = sql.read_sql_table("types_test_data", self.conn)
expected_type = object if self.flavor == "sqlite" else np.datetime64
assert issubclass(df.DateCol.dtype.type, expected_type)
df = sql.read_sql_table("types_test_data", self.conn, parse_dates=["DateCol"])
assert issubclass(df.DateCol.dtype.type, np.datetime64)
df = sql.read_sql_table(
"types_test_data", self.conn, parse_dates={"DateCol": "%Y-%m-%d %H:%M:%S"}
)
assert issubclass(df.DateCol.dtype.type, np.datetime64)
df = sql.read_sql_table(
"types_test_data",
self.conn,
parse_dates={"DateCol": {"format": "%Y-%m-%d %H:%M:%S"}},
)
assert issubclass(df.DateCol.dtype.type, np.datetime64)
df = sql.read_sql_table(
"types_test_data", self.conn, parse_dates=["IntDateCol"]
)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
df = sql.read_sql_table(
"types_test_data", self.conn, parse_dates={"IntDateCol": "s"}
)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
df = sql.read_sql_table(
"types_test_data", self.conn, parse_dates={"IntDateCol": {"unit": "s"}}
)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
def test_datetime(self):
df = DataFrame(
{"A": date_range("2013-01-01 09:00:00", periods=3), "B": np.arange(3.0)}
)
df.to_sql("test_datetime", self.conn)
# with read_table -> type information from schema used
result = sql.read_sql_table("test_datetime", self.conn)
result = result.drop("index", axis=1)
tm.assert_frame_equal(result, df)
# with read_sql -> no type information -> sqlite has no native
result = sql.read_sql_query("SELECT * FROM test_datetime", self.conn)
result = result.drop("index", axis=1)
if self.flavor == "sqlite":
assert isinstance(result.loc[0, "A"], str)
result["A"] = to_datetime(result["A"])
tm.assert_frame_equal(result, df)
else:
tm.assert_frame_equal(result, df)
def test_datetime_NaT(self):
df = DataFrame(
{"A": date_range("2013-01-01 09:00:00", periods=3), "B": np.arange(3.0)}
)
df.loc[1, "A"] = np.nan
df.to_sql("test_datetime", self.conn, index=False)
# with read_table -> type information from schema used
result = sql.read_sql_table("test_datetime", self.conn)
tm.assert_frame_equal(result, df)
# with read_sql -> no type information -> sqlite has no native
result = sql.read_sql_query("SELECT * FROM test_datetime", self.conn)
if self.flavor == "sqlite":
assert isinstance(result.loc[0, "A"], str)
result["A"] = to_datetime(result["A"], errors="coerce")
tm.assert_frame_equal(result, df)
else:
tm.assert_frame_equal(result, df)
def test_datetime_date(self):
# test support for datetime.date
df = DataFrame([date(2014, 1, 1), date(2014, 1, 2)], columns=["a"])
df.to_sql("test_date", self.conn, index=False)
res = read_sql_table("test_date", self.conn)
result = res["a"]
expected = to_datetime(df["a"])
# comes back as datetime64
tm.assert_series_equal(result, expected)
def test_datetime_time(self):
# test support for datetime.time
df = DataFrame([time(9, 0, 0), time(9, 1, 30)], columns=["a"])
df.to_sql("test_time", self.conn, index=False)
res = read_sql_table("test_time", self.conn)
tm.assert_frame_equal(res, df)
# GH8341
# first, use the fallback to have the sqlite adapter put in place
sqlite_conn = TestSQLiteFallback.connect()
sql.to_sql(df, "test_time2", sqlite_conn, index=False)
res = sql.read_sql_query("SELECT * FROM test_time2", sqlite_conn)
ref = df.applymap(lambda _: _.strftime("%H:%M:%S.%f"))
tm.assert_frame_equal(ref, res) # check if adapter is in place
# then test if sqlalchemy is unaffected by the sqlite adapter
sql.to_sql(df, "test_time3", self.conn, index=False)
if self.flavor == "sqlite":
res = sql.read_sql_query("SELECT * FROM test_time3", self.conn)
ref = df.applymap(lambda _: _.strftime("%H:%M:%S.%f"))
tm.assert_frame_equal(ref, res)
res = sql.read_sql_table("test_time3", self.conn)
tm.assert_frame_equal(df, res)
def test_mixed_dtype_insert(self):
# see GH6509
s1 = Series(2 ** 25 + 1, dtype=np.int32)
s2 = Series(0.0, dtype=np.float32)
df = DataFrame({"s1": s1, "s2": s2})
# write and read again
df.to_sql("test_read_write", self.conn, index=False)
df2 = sql.read_sql_table("test_read_write", self.conn)
tm.assert_frame_equal(df, df2, check_dtype=False, check_exact=True)
def test_nan_numeric(self):
# NaNs in numeric float column
df = DataFrame({"A": [0, 1, 2], "B": [0.2, np.nan, 5.6]})
df.to_sql("test_nan", self.conn, index=False)
# with read_table
result = sql.read_sql_table("test_nan", self.conn)
tm.assert_frame_equal(result, df)
# with read_sql
result = sql.read_sql_query("SELECT * FROM test_nan", self.conn)
tm.assert_frame_equal(result, df)
def test_nan_fullcolumn(self):
# full NaN column (numeric float column)
df = DataFrame({"A": [0, 1, 2], "B": [np.nan, np.nan, np.nan]})
df.to_sql("test_nan", self.conn, index=False)
# with read_table
result = sql.read_sql_table("test_nan", self.conn)
tm.assert_frame_equal(result, df)
# with read_sql -> not type info from table -> stays None
df["B"] = df["B"].astype("object")
df["B"] = None
result = sql.read_sql_query("SELECT * FROM test_nan", self.conn)
tm.assert_frame_equal(result, df)
def test_nan_string(self):
# NaNs in string column
df = DataFrame({"A": [0, 1, 2], "B": ["a", "b", np.nan]})
df.to_sql("test_nan", self.conn, index=False)
# NaNs are coming back as None
df.loc[2, "B"] = None
# with read_table
result = sql.read_sql_table("test_nan", self.conn)
tm.assert_frame_equal(result, df)
# with read_sql
result = sql.read_sql_query("SELECT * FROM test_nan", self.conn)
tm.assert_frame_equal(result, df)
def _get_index_columns(self, tbl_name):
from sqlalchemy.engine import reflection
insp = reflection.Inspector.from_engine(self.conn)
ixs = insp.get_indexes(tbl_name)
ixs = [i["column_names"] for i in ixs]
return ixs
def test_to_sql_save_index(self):
self._to_sql_save_index()
def test_transactions(self):
self._transaction_test()
def test_get_schema_create_table(self):
# Use a dataframe without a bool column, since MySQL converts bool to
# TINYINT (which read_sql_table returns as an int and causes a dtype
# mismatch)
self._load_test3_data()
tbl = "test_get_schema_create_table"
create_sql = sql.get_schema(self.test_frame3, tbl, con=self.conn)
blank_test_df = self.test_frame3.iloc[:0]
self.drop_table(tbl)
self.conn.execute(create_sql)
returned_df = sql.read_sql_table(tbl, self.conn)
tm.assert_frame_equal(returned_df, blank_test_df, check_index_type=False)
self.drop_table(tbl)
def test_dtype(self):
cols = ["A", "B"]
data = [(0.8, True), (0.9, None)]
df = DataFrame(data, columns=cols)
df.to_sql("dtype_test", self.conn)
df.to_sql("dtype_test2", self.conn, dtype={"B": sqlalchemy.TEXT})
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
sqltype = meta.tables["dtype_test2"].columns["B"].type
assert isinstance(sqltype, sqlalchemy.TEXT)
msg = "The type of B is not a SQLAlchemy type"
with pytest.raises(ValueError, match=msg):
df.to_sql("error", self.conn, dtype={"B": str})
# GH9083
df.to_sql("dtype_test3", self.conn, dtype={"B": sqlalchemy.String(10)})
meta.reflect()
sqltype = meta.tables["dtype_test3"].columns["B"].type
assert isinstance(sqltype, sqlalchemy.String)
assert sqltype.length == 10
# single dtype
df.to_sql("single_dtype_test", self.conn, dtype=sqlalchemy.TEXT)
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
sqltypea = meta.tables["single_dtype_test"].columns["A"].type
sqltypeb = meta.tables["single_dtype_test"].columns["B"].type
assert isinstance(sqltypea, sqlalchemy.TEXT)
assert isinstance(sqltypeb, sqlalchemy.TEXT)
def test_notna_dtype(self):
cols = {
"Bool": Series([True, None]),
"Date": Series([datetime(2012, 5, 1), None]),
"Int": Series([1, None], dtype="object"),
"Float": Series([1.1, None]),
}
df = DataFrame(cols)
tbl = "notna_dtype_test"
df.to_sql(tbl, self.conn)
returned_df = sql.read_sql_table(tbl, self.conn) # noqa
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
if self.flavor == "mysql":
my_type = sqltypes.Integer
else:
my_type = sqltypes.Boolean
col_dict = meta.tables[tbl].columns
assert isinstance(col_dict["Bool"].type, my_type)
assert isinstance(col_dict["Date"].type, sqltypes.DateTime)
assert isinstance(col_dict["Int"].type, sqltypes.Integer)
assert isinstance(col_dict["Float"].type, sqltypes.Float)
def test_double_precision(self):
V = 1.23456789101112131415
df = DataFrame(
{
"f32": Series([V], dtype="float32"),
"f64": Series([V], dtype="float64"),
"f64_as_f32": Series([V], dtype="float64"),
"i32": Series([5], dtype="int32"),
"i64": Series([5], dtype="int64"),
}
)
df.to_sql(
"test_dtypes",
self.conn,
index=False,
if_exists="replace",
dtype={"f64_as_f32": sqlalchemy.Float(precision=23)},
)
res = sql.read_sql_table("test_dtypes", self.conn)
# check precision of float64
assert np.round(df["f64"].iloc[0], 14) == np.round(res["f64"].iloc[0], 14)
# check sql types
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
col_dict = meta.tables["test_dtypes"].columns
assert str(col_dict["f32"].type) == str(col_dict["f64_as_f32"].type)
assert isinstance(col_dict["f32"].type, sqltypes.Float)
assert isinstance(col_dict["f64"].type, sqltypes.Float)
assert isinstance(col_dict["i32"].type, sqltypes.Integer)
assert isinstance(col_dict["i64"].type, sqltypes.BigInteger)
def test_connectable_issue_example(self):
# This tests the example raised in issue
# https://github.com/pandas-dev/pandas/issues/10104
def foo(connection):
query = "SELECT test_foo_data FROM test_foo_data"
return sql.read_sql_query(query, con=connection)
def bar(connection, data):
data.to_sql(name="test_foo_data", con=connection, if_exists="append")
def main(connectable):
with connectable.connect() as conn:
with conn.begin():
foo_data = conn.run_callable(foo)
conn.run_callable(bar, foo_data)
DataFrame({"test_foo_data": [0, 1, 2]}).to_sql("test_foo_data", self.conn)
main(self.conn)
def test_temporary_table(self):
test_data = "Hello, World!"
expected = DataFrame({"spam": [test_data]})
Base = declarative.declarative_base()
class Temporary(Base):
__tablename__ = "temp_test"
__table_args__ = {"prefixes": ["TEMPORARY"]}
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
spam = sqlalchemy.Column(sqlalchemy.Unicode(30), nullable=False)
Session = sa_session.sessionmaker(bind=self.conn)
session = Session()
with session.transaction:
conn = session.connection()
Temporary.__table__.create(conn)
session.add(Temporary(spam=test_data))
session.flush()
df = sql.read_sql_query(sql=sqlalchemy.select([Temporary.spam]), con=conn)
tm.assert_frame_equal(df, expected)
class _TestSQLAlchemyConn(_EngineToConnMixin, _TestSQLAlchemy):
def test_transactions(self):
pytest.skip("Nested transactions rollbacks don't work with Pandas")
class _TestSQLiteAlchemy:
"""
Test the sqlalchemy backend against an in-memory sqlite database.
"""
flavor = "sqlite"
@classmethod
def connect(cls):
return sqlalchemy.create_engine("sqlite:///:memory:")
@classmethod
def setup_driver(cls):
# sqlite3 is built-in
cls.driver = None
def test_default_type_conversion(self):
df = sql.read_sql_table("types_test_data", self.conn)
assert issubclass(df.FloatCol.dtype.type, np.floating)
assert issubclass(df.IntCol.dtype.type, np.integer)
# sqlite has no boolean type, so integer type is returned
assert issubclass(df.BoolCol.dtype.type, np.integer)
# Int column with NA values stays as float
assert issubclass(df.IntColWithNull.dtype.type, np.floating)
# Non-native Bool column with NA values stays as float
assert issubclass(df.BoolColWithNull.dtype.type, np.floating)
def test_default_date_load(self):
df = sql.read_sql_table("types_test_data", self.conn)
# IMPORTANT - sqlite has no native date type, so shouldn't parse, but
assert not issubclass(df.DateCol.dtype.type, np.datetime64)
def test_bigint_warning(self):
# test no warning for BIGINT (to support int64) is raised (GH7433)
df = DataFrame({"a": [1, 2]}, dtype="int64")
df.to_sql("test_bigintwarning", self.conn, index=False)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
sql.read_sql_table("test_bigintwarning", self.conn)
assert len(w) == 0
class _TestMySQLAlchemy:
"""
Test the sqlalchemy backend against an MySQL database.
"""
flavor = "mysql"
@classmethod
def connect(cls):
url = "mysql+{driver}://root@localhost/pandas_nosetest"
return sqlalchemy.create_engine(
url.format(driver=cls.driver), connect_args=cls.connect_args
)
@classmethod
def setup_driver(cls):
pymysql = pytest.importorskip("pymysql")
cls.driver = "pymysql"
cls.connect_args = {"client_flag": pymysql.constants.CLIENT.MULTI_STATEMENTS}
def test_default_type_conversion(self):
df = sql.read_sql_table("types_test_data", self.conn)
assert issubclass(df.FloatCol.dtype.type, np.floating)
assert issubclass(df.IntCol.dtype.type, np.integer)
# MySQL has no real BOOL type (it's an alias for TINYINT)
assert issubclass(df.BoolCol.dtype.type, np.integer)
# Int column with NA values stays as float
assert issubclass(df.IntColWithNull.dtype.type, np.floating)
# Bool column with NA = int column with NA values => becomes float
assert issubclass(df.BoolColWithNull.dtype.type, np.floating)
def test_read_procedure(self):
import pymysql
# see GH7324. Although it is more an api test, it is added to the
# mysql tests as sqlite does not have stored procedures
df = DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3]})
df.to_sql("test_procedure", self.conn, index=False)
proc = """DROP PROCEDURE IF EXISTS get_testdb;
CREATE PROCEDURE get_testdb ()
BEGIN
SELECT * FROM test_procedure;
END"""
connection = self.conn.connect()
trans = connection.begin()
try:
r1 = connection.execute(proc) # noqa
trans.commit()
except pymysql.Error:
trans.rollback()
raise
res1 = sql.read_sql_query("CALL get_testdb();", self.conn)
tm.assert_frame_equal(df, res1)
# test delegation to read_sql_query
res2 = sql.read_sql("CALL get_testdb();", self.conn)
tm.assert_frame_equal(df, res2)
class _TestPostgreSQLAlchemy:
"""
Test the sqlalchemy backend against an PostgreSQL database.
"""
flavor = "postgresql"
@classmethod
def connect(cls):
url = "postgresql+{driver}://postgres@localhost/pandas_nosetest"
return sqlalchemy.create_engine(url.format(driver=cls.driver))
@classmethod
def setup_driver(cls):
pytest.importorskip("psycopg2")
cls.driver = "psycopg2"
def test_schema_support(self):
# only test this for postgresql (schema's not supported in
# mysql/sqlite)
df = DataFrame({"col1": [1, 2], "col2": [0.1, 0.2], "col3": ["a", "n"]})
# create a schema
self.conn.execute("DROP SCHEMA IF EXISTS other CASCADE;")
self.conn.execute("CREATE SCHEMA other;")
# write dataframe to different schema's
df.to_sql("test_schema_public", self.conn, index=False)
df.to_sql(
"test_schema_public_explicit", self.conn, index=False, schema="public"
)
df.to_sql("test_schema_other", self.conn, index=False, schema="other")
# read dataframes back in
res1 = sql.read_sql_table("test_schema_public", self.conn)
tm.assert_frame_equal(df, res1)
res2 = sql.read_sql_table("test_schema_public_explicit", self.conn)
tm.assert_frame_equal(df, res2)
res3 = sql.read_sql_table(
"test_schema_public_explicit", self.conn, schema="public"
)
tm.assert_frame_equal(df, res3)
res4 = sql.read_sql_table("test_schema_other", self.conn, schema="other")
tm.assert_frame_equal(df, res4)
msg = "Table test_schema_other not found"
with pytest.raises(ValueError, match=msg):
sql.read_sql_table("test_schema_other", self.conn, schema="public")
# different if_exists options
# create a schema
self.conn.execute("DROP SCHEMA IF EXISTS other CASCADE;")
self.conn.execute("CREATE SCHEMA other;")
# write dataframe with different if_exists options
df.to_sql("test_schema_other", self.conn, schema="other", index=False)
df.to_sql(
"test_schema_other",
self.conn,
schema="other",
index=False,
if_exists="replace",
)
df.to_sql(
"test_schema_other",
self.conn,
schema="other",
index=False,
if_exists="append",
)
res = sql.read_sql_table("test_schema_other", self.conn, schema="other")
tm.assert_frame_equal(concat([df, df], ignore_index=True), res)
# specifying schema in user-provided meta
# The schema won't be applied on another Connection
# because of transactional schemas
if isinstance(self.conn, sqlalchemy.engine.Engine):
engine2 = self.connect()
meta = sqlalchemy.MetaData(engine2, schema="other")
pdsql = sql.SQLDatabase(engine2, meta=meta)
pdsql.to_sql(df, "test_schema_other2", index=False)
pdsql.to_sql(df, "test_schema_other2", index=False, if_exists="replace")
pdsql.to_sql(df, "test_schema_other2", index=False, if_exists="append")
res1 = sql.read_sql_table("test_schema_other2", self.conn, schema="other")
res2 = pdsql.read_table("test_schema_other2")
tm.assert_frame_equal(res1, res2)
def test_copy_from_callable_insertion_method(self):
# GH 8953
# Example in io.rst found under _io.sql.method
# not available in sqlite, mysql
def psql_insert_copy(table, conn, keys, data_iter):
# gets a DBAPI connection that can provide a cursor
dbapi_conn = conn.connection
with dbapi_conn.cursor() as cur:
s_buf = StringIO()
writer = csv.writer(s_buf)
writer.writerows(data_iter)
s_buf.seek(0)
columns = ", ".join(f'"{k}"' for k in keys)
if table.schema:
table_name = f"{table.schema}.{table.name}"
else:
table_name = table.name
sql_query = f"COPY {table_name} ({columns}) FROM STDIN WITH CSV"
cur.copy_expert(sql=sql_query, file=s_buf)
expected = DataFrame({"col1": [1, 2], "col2": [0.1, 0.2], "col3": ["a", "n"]})
expected.to_sql(
"test_copy_insert", self.conn, index=False, method=psql_insert_copy
)
result = sql.read_sql_table("test_copy_insert", self.conn)
tm.assert_frame_equal(result, expected)
@pytest.mark.single
@pytest.mark.db
class TestMySQLAlchemy(_TestMySQLAlchemy, _TestSQLAlchemy):
pass
@pytest.mark.single
@pytest.mark.db
class TestMySQLAlchemyConn(_TestMySQLAlchemy, _TestSQLAlchemyConn):
pass
@pytest.mark.single
@pytest.mark.db
class TestPostgreSQLAlchemy(_TestPostgreSQLAlchemy, _TestSQLAlchemy):
pass
@pytest.mark.single
@pytest.mark.db
class TestPostgreSQLAlchemyConn(_TestPostgreSQLAlchemy, _TestSQLAlchemyConn):
pass
@pytest.mark.single
class TestSQLiteAlchemy(_TestSQLiteAlchemy, _TestSQLAlchemy):
pass
@pytest.mark.single
class TestSQLiteAlchemyConn(_TestSQLiteAlchemy, _TestSQLAlchemyConn):
pass
# -----------------------------------------------------------------------------
# -- Test Sqlite / MySQL fallback
@pytest.mark.single
class TestSQLiteFallback(SQLiteMixIn, PandasSQLTest):
"""
Test the fallback mode against an in-memory sqlite database.
"""
flavor = "sqlite"
@classmethod
def connect(cls):
return sqlite3.connect(":memory:")
def setup_connect(self):
self.conn = self.connect()
def load_test_data_and_sql(self):
self.pandasSQL = sql.SQLiteDatabase(self.conn)
self._load_test1_data()
@pytest.fixture(autouse=True)
def setup_method(self, load_iris_data):
self.load_test_data_and_sql()
def test_read_sql(self):
self._read_sql_iris()
def test_read_sql_parameter(self):
self._read_sql_iris_parameter()
def test_read_sql_named_parameter(self):
self._read_sql_iris_named_parameter()
def test_to_sql(self):
self._to_sql()
def test_to_sql_empty(self):
self._to_sql_empty()
def test_to_sql_fail(self):
self._to_sql_fail()
def test_to_sql_replace(self):
self._to_sql_replace()
def test_to_sql_append(self):
self._to_sql_append()
def test_to_sql_method_multi(self):
# GH 29921
self._to_sql(method="multi")
def test_create_and_drop_table(self):
temp_frame = DataFrame(
{"one": [1.0, 2.0, 3.0, 4.0], "two": [4.0, 3.0, 2.0, 1.0]}
)
self.pandasSQL.to_sql(temp_frame, "drop_test_frame")
assert self.pandasSQL.has_table("drop_test_frame")
self.pandasSQL.drop_table("drop_test_frame")
assert not self.pandasSQL.has_table("drop_test_frame")
def test_roundtrip(self):
self._roundtrip()
def test_execute_sql(self):
self._execute_sql()
def test_datetime_date(self):
# test support for datetime.date
df = DataFrame([date(2014, 1, 1), date(2014, 1, 2)], columns=["a"])
df.to_sql("test_date", self.conn, index=False)
res = read_sql_query("SELECT * FROM test_date", self.conn)
if self.flavor == "sqlite":
# comes back as strings
tm.assert_frame_equal(res, df.astype(str))
elif self.flavor == "mysql":
tm.assert_frame_equal(res, df)
def test_datetime_time(self):
# test support for datetime.time, GH #8341
df = DataFrame([time(9, 0, 0), time(9, 1, 30)], columns=["a"])
df.to_sql("test_time", self.conn, index=False)
res = read_sql_query("SELECT * FROM test_time", self.conn)
if self.flavor == "sqlite":
# comes back as strings
expected = df.applymap(lambda _: _.strftime("%H:%M:%S.%f"))
tm.assert_frame_equal(res, expected)
def _get_index_columns(self, tbl_name):
ixs = sql.read_sql_query(
"SELECT * FROM sqlite_master WHERE type = 'index' "
+ f"AND tbl_name = '{tbl_name}'",
self.conn,
)
ix_cols = []
for ix_name in ixs.name:
ix_info = sql.read_sql_query(f"PRAGMA index_info({ix_name})", self.conn)
ix_cols.append(ix_info.name.tolist())
return ix_cols
def test_to_sql_save_index(self):
self._to_sql_save_index()
def test_transactions(self):
self._transaction_test()
def _get_sqlite_column_type(self, table, column):
recs = self.conn.execute(f"PRAGMA table_info({table})")
for cid, name, ctype, not_null, default, pk in recs:
if name == column:
return ctype
raise ValueError(f"Table {table}, column {column} not found")
def test_dtype(self):
if self.flavor == "mysql":
pytest.skip("Not applicable to MySQL legacy")
cols = ["A", "B"]
data = [(0.8, True), (0.9, None)]
df = DataFrame(data, columns=cols)
df.to_sql("dtype_test", self.conn)
df.to_sql("dtype_test2", self.conn, dtype={"B": "STRING"})
# sqlite stores Boolean values as INTEGER
assert self._get_sqlite_column_type("dtype_test", "B") == "INTEGER"
assert self._get_sqlite_column_type("dtype_test2", "B") == "STRING"
msg = r"B \(<class 'bool'>\) not a string"
with pytest.raises(ValueError, match=msg):
df.to_sql("error", self.conn, dtype={"B": bool})
# single dtype
df.to_sql("single_dtype_test", self.conn, dtype="STRING")
assert self._get_sqlite_column_type("single_dtype_test", "A") == "STRING"
assert self._get_sqlite_column_type("single_dtype_test", "B") == "STRING"
def test_notna_dtype(self):
if self.flavor == "mysql":
pytest.skip("Not applicable to MySQL legacy")
cols = {
"Bool": Series([True, None]),
"Date": Series([datetime(2012, 5, 1), None]),
"Int": Series([1, None], dtype="object"),
"Float": | Series([1.1, None]) | pandas.Series |
from flask import Flask, render_template, request, flash
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import base64
from scipy.signal import medfilt
from scipy.integrate import trapz
import xml.etree.ElementTree as et
from datetime import date
today = date.today()
import webbrowser
np.warnings.filterwarnings('ignore')
sns.set(style="darkgrid")
ALLOWED_EXTENSIONS = {'xml'}
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
app = Flask(__name__)
app.secret_key = b'_5#y2L"F4Q8z\n\rec]/'
@app.route('/', methods=["POST"])
def process_data():
if request.method == "POST":
output_name = request.form.get("outName")
pacedFlatValue = request.form.get("pacedFlatValue")
unpacedFlatValue = request.form.get("unpacedFlatValue")
pacedThresholdPercentCutoff = request.form.get("pacedThresholdPercentCutoff")
unpacedThresholdPercentCutoff = request.form.get("unpacedThresholdPercentCutoff")
pacedZScoreCutoff = request.form.get("pacedZScoreCutoff")
hyperbolicCosFrequencyValue = request.form.get("hyperbolicCosFrequencyValue")
if output_name:
output_name = output_name
else:
output_name = "Data"
if pacedFlatValue:
pacedFlatValue = int(pacedFlatValue)
else:
pacedFlatValue = 50
if unpacedFlatValue:
unpacedFlatValue = int(unpacedFlatValue)
else:
unpacedFlatValue = 60
if pacedThresholdPercentCutoff:
pacedThresholdPercentCutoff = float(pacedThresholdPercentCutoff)
else:
pacedThresholdPercentCutoff = 0.98
if unpacedThresholdPercentCutoff:
unpacedThresholdPercentCutoff = float(unpacedThresholdPercentCutoff)
else:
unpacedThresholdPercentCutoff = 0.992
if pacedZScoreCutoff:
pacedZScoreCutoff = int(pacedZScoreCutoff)
else:
pacedZScoreCutoff = 150
if hyperbolicCosFrequencyValue:
hyperbolicCosFrequencyValue = int(hyperbolicCosFrequencyValue)
else:
hyperbolicCosFrequencyValue = 600
# fileName = request.files['inFile']
fileNames = request.files.getlist("inFile[]")
for fileName in fileNames:
if fileName and allowed_file(fileName.filename):
fileName = fileName
elif fileName and ~allowed_file(fileName.filename):
flash('No file inputed or incorrect type of file. Please input one or more .xml files.')
return render_template('index.html')
else:
flash('No file inputed or incorrect type of file. Please input one or more .xml files.')
return render_template('index.html')
roots = []
for fileName in fileNames:
roots.append([et.parse(fileName).getroot()])
# roots = [et.parse(fileName).getroot()]
#--------------------------------------------------------------------------------------------------------------
def modified_z_score(intensity):
median_int = np.median(intensity)
mad_int = np.median([np.abs(intensity - median_int)])
if mad_int == 0:
mad_int = 1
modified_z_scores = 0.6745 * (intensity - median_int) / mad_int
return modified_z_scores
def df_fixer(y,n):
threshold = 0
x = 0
while threshold == 0:
if np.nanquantile(abs(np.array(modified_z_score(np.diff(y)))), 1) > pacedZScoreCutoff:
if abs(np.array(modified_z_score(np.diff(y))))[int(data.Qonset[n*12])+x:int(data.Qoffset[n*12])+30].max() < np.nanquantile(abs(np.array(modified_z_score(np.diff(y)))), pacedThresholdPercentCutoff)+pacedFlatValue:
threshold = abs(np.array(modified_z_score(np.diff(y))))[int(data.Qonset[n*12])+x:int(data.Qoffset[n*12])+30].max() + 1
elif abs(np.array(modified_z_score(np.diff(y))))[int(data.Qonset[n*12])+x:int(data.Qoffset[n*12])+30].max() > np.nanquantile(abs(np.array(modified_z_score(np.diff(y)))), pacedThresholdPercentCutoff)+pacedFlatValue:
x += 5
elif np.nanquantile(abs(np.array(modified_z_score(np.diff(y)))), 1) <= pacedZScoreCutoff:
if abs(np.array(modified_z_score(np.diff(y))))[int(data.Qonset[n*12])+x:int(data.Qoffset[n*12])+30].max() < np.nanquantile(abs(np.array(modified_z_score(np.diff(y)))), unpacedThresholdPercentCutoff)+unpacedFlatValue:
threshold = abs(np.array(modified_z_score(np.diff(y))))[int(data.Qonset[n*12])+x:int(data.Qoffset[n*12])+30].max() + 1
elif abs(np.array(modified_z_score(np.diff(y))))[int(data.Qonset[n*12])+x:int(data.Qoffset[n*12])+30].max() > np.nanquantile(abs(np.array(modified_z_score(np.diff(y)))), unpacedThresholdPercentCutoff)+unpacedFlatValue:
x += 5
spikes = abs(np.array(modified_z_score(np.diff(y)))) > threshold
y_out = y.copy()
for i in np.arange(len(spikes)):
if spikes[i] != 0:
y_out[i+y_out.index[0]] = None
return y_out
def half_df_fixer(y,n):
threshold = 0
x = 0
while threshold == 0:
if np.nanquantile(abs(np.array(modified_z_score(np.diff(y)))), 1) > pacedZScoreCutoff:
if abs(np.array(modified_z_score(np.diff(y))))[int(half_data.Qonset[n*12])+x:int(half_data.Qoffset[n*12])+30].max() < np.nanquantile(abs(np.array(modified_z_score(np.diff(y)))), pacedThresholdPercentCutoff)+pacedFlatValue:
threshold = abs(np.array(modified_z_score(np.diff(y))))[int(half_data.Qonset[n*12])+x:int(half_data.Qoffset[n*12])+30].max() + 1
elif abs(np.array(modified_z_score(np.diff(y))))[int(half_data.Qonset[n*12])+x:int(half_data.Qoffset[n*12])+30].max() > np.nanquantile(abs(np.array(modified_z_score(np.diff(y)))), pacedThresholdPercentCutoff)+pacedFlatValue:
x += 5
elif np.nanquantile(abs(np.array(modified_z_score(np.diff(y)))), 1) <= pacedZScoreCutoff:
if abs(np.array(modified_z_score(np.diff(y))))[int(half_data.Qonset[n*12])+x:int(half_data.Qoffset[n*12])+30].max() < np.nanquantile(abs(np.array(modified_z_score(np.diff(y)))), unpacedThresholdPercentCutoff)+unpacedFlatValue:
threshold = abs(np.array(modified_z_score(np.diff(y))))[int(half_data.Qonset[n*12])+x:int(half_data.Qoffset[n*12])+30].max() + 1
elif abs(np.array(modified_z_score(np.diff(y))))[int(half_data.Qonset[n*12])+x:int(half_data.Qoffset[n*12])+30].max() > np.nanquantile(abs(np.array(modified_z_score(np.diff(y)))), unpacedThresholdPercentCutoff)+unpacedFlatValue:
x += 5
spikes = abs(np.array(modified_z_score(np.diff(y)))) > threshold
y_out = y.copy()
for i in np.arange(len(spikes)):
if spikes[i] != 0:
y_out[i+y_out.index[0]] = None
return y_out
def hanging_line(point1, point2):
a = (point2[1] - point1[1])/(np.cosh(point2[0] % hyperbolicCosFrequencyValue) - np.cosh(point1[0] % hyperbolicCosFrequencyValue))
b = point1[1] - a*np.cosh(point1[0] % hyperbolicCosFrequencyValue)
x = np.linspace(point1[0], point2[0], (point2[0] - point1[0])+1)
y = a*np.cosh(x % hyperbolicCosFrequencyValue) + b
return (x,y)
Tags = {'tags':[]}
tags = {'tags':[]}
for root in roots:
root = root[0]
if len(root.find('{http://www3.medical.philips.com}waveforms').getchildren()) == 2:
if int(root.find('{http://www3.medical.philips.com}waveforms')[1].attrib['samplespersec']) == 1000:
for elem in root.find('{http://www3.medical.philips.com}waveforms')[1]:
tag = {}
tag['Lead'] = elem.attrib['leadname']
if (root[6][1][0][14].text == 'Invalid' or elem[0].text == 'Invalid') and root[6].tag == '{http://www3.medical.philips.com}internalmeasurements':
if root[6][1][0][14].text == None or root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text == 'Invalid' or root[6][1][0][14].text == '\n ' or root[6][1][0][14].text == 'Failed':
tag['Ponset'] = 0
tag['Pdur'] = 0
tag['Print'] = 0
tag['Poffset'] = 0
else:
tag['Ponset'] = int(root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text) - int(root[6][1][0][14].text)
tag['Pdur'] = 0
tag['Print'] = int(root[6][1][0][14].text)
tag['Poffset'] = (int(root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text) - int(root[6][1][0][14].text)) + 0
elif root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text == 'Invalid' or root[6][1][0][14].text == None or root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text == 'Failed' or root[6][1][0][14].text == 'Failed' or (root[6][1][0][14].text == 'Invalid' or elem[0].text == 'Invalid'):
tag['Ponset'] = 0
tag['Pdur'] = 0
tag['Print'] = 0
tag['Poffset'] = 0
else:
tag['Ponset'] = int(root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text) - int(root[6][1][0][14].text)
tag['Pdur'] = int(elem[0].text)
tag['Print'] = int(root[6][1][0][14].text)
tag['Poffset'] = (int(root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text) - int(root[6][1][0][14].text)) + int(elem[0].text)
if (root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text == 'Invalid' or root[6][0][29].text == 'Invalid' or elem[4].text == 'Invalid' or root[6][1][0][18].text == 'Invalid'):
tag['Qonset'] = np.nan
tag['Qrsdur'] = np.nan
tag['Qoffset'] = np.nan
tag['Tonset'] = np.nan
tag['Qtint'] = np.nan
tag['Toffset'] = np.nan
tag['Tdur'] = np.nan
else:
tag['Qonset'] = int(root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text)
tag['Qrsdur'] = int(root[6][0][29].text)
tag['Qoffset'] = tag['Qonset'] + tag['Qrsdur']
tag['Tonset'] = int(elem[4].text)
tag['Qtint'] = int(root[6][1][0][18].text)
tag['Toffset'] = tag['Qonset'] + tag['Qtint']
tag['Tdur'] = tag['Qoffset'] - tag['Qonset']
if root[7].tag == '{http://www3.medical.philips.com}interpretations' and root[6].tag == '{http://www3.medical.philips.com}internalmeasurements':
if root[7][0][1][0].text != None and (root[7][0][1][0].text).isdigit(): tag['HeartRate'] = int(root[7][0][1][0].text)
if root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[1].text != None: tag['RRint'] = int(root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[1].text)
if root[6][1][0][9].text != None: tag['AtrialRate'] = int(root[6][1][0][9].text)
if root[6][0][15].text != None and root[6][0][15].text != 'Indeterminate': tag['QRSFrontAxis'] = int(root[6][0][15].text)
if root[6][0][31].text != None and root[6][0][31].text != 'Failed': tag['QTC'] = int(root[6][0][31].text)
tag['Target'] = []
for n in range(len(root[7][0][root[7][0].getchildren().index(root[7][0].find('{http://www3.medical.philips.com}statement')):])):
tag['Target'].append(root[7][0][root[7][0].getchildren().index(root[7][0].find('{http://www3.medical.philips.com}statement')):][n][0].text)
else:
tag['HeartRate'] = np.nan
tag['RRint'] = np.nan
tag['AtrialRate'] = np.nan
tag['QRSFrontAxis'] = np.nan
tag['QTC'] = np.nan
tag['Target'] = []
if root[3].tag == '{http://www3.medical.philips.com}reportinfo' and root[5].tag == '{http://www3.medical.philips.com}patient':
time = root[3].attrib
tag['Date'] = time['date']
tag['Time'] = time['time']
tag['Sex'] = root[5][0][6].text
tag['ID'] = root[5][0][0].text
tag['Name'] = root[5][0].find('{http://www3.medical.philips.com}name')[0].text + ', ' + root[5][0].find('{http://www3.medical.philips.com}name')[1].text
if root[5][0].find('{http://www3.medical.philips.com}age')[0].tag == '{http://www3.medical.philips.com}dateofbirth':
tag['Age'] = int(today.strftime("%Y")) - int(root[5][0].find('{http://www3.medical.philips.com}age')[0].text[0:4])
if root[5][0].find('{http://www3.medical.philips.com}age')[0].tag == '{http://www3.medical.philips.com}years':
tag['Age'] = int(root[5][0].find('{http://www3.medical.philips.com}age')[0].text)
tag['Waveform'] = elem[6].text
tags['tags'].append(tag)
else:
for elem in root.find('{http://www3.medical.philips.com}waveforms')[1]:
Tag = {}
Tag['Lead'] = elem.attrib['leadname']
if (root[6][1][0][14].text == 'Invalid' or elem[0].text == 'Invalid') and root[6].tag == '{http://www3.medical.philips.com}internalmeasurements':
if root[6][1][0][14].text == None or root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text == 'Invalid' or root[6][1][0][14].text == '\n ' or root[6][1][0][14].text == 'Failed':
Tag['Ponset'] = 0
Tag['Pdur'] = 0
Tag['Print'] = 0
Tag['Poffset'] = 0
else:
Tag['Ponset'] = float(root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text) - int(root[6][1][0][14].text)
Tag['Pdur'] = 0
Tag['Print'] = int(root[6][1][0][14].text)
Tag['Poffset'] = (int(root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text) - int(root[6][1][0][14].text)) + 0
elif root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text == 'Invalid' or root[6][1][0][14].text == None or root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text == None or root[6][1][0][14].text == 'Invalid' or elem[0].text == 'Invalid' and root[6].tag == '{http://www3.medical.philips.com}internalmeasurements':
Tag['Ponset'] = 0
Tag['Pdur'] = 0
Tag['Print'] = 0
Tag['Poffset'] = 0
else:
Tag['Ponset'] = int(root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text) - int(root[6][1][0][14].text)
Tag['Pdur'] = int(elem[0].text)
Tag['Print'] = int(root[6][1][0][14].text)
Tag['Poffset'] = (int(root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text) - int(root[6][1][0][14].text)) + int(elem[0].text)
if (root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text == 'Invalid' or root[6][1][0][18].text == None or root[6][0][29].text == 'Invalid' or elem[4].text == 'Invalid' or root[6][1][0][18].text == 'Invalid'):
Tag['Qonset'] = np.nan
Tag['Qrsdur'] = np.nan
Tag['Qoffset'] = np.nan
Tag['Tonset'] = np.nan
Tag['Qtint'] = np.nan
Tag['Toffset'] = np.nan
Tag['Tdur'] = np.nan
else:
Tag['Qonset'] = int(root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text)
Tag['Qrsdur'] = int(root[6][0][29].text)
Tag['Qoffset'] = Tag['Qonset'] + Tag['Qrsdur']
Tag['Tonset'] = int(elem[4].text)
Tag['Qtint'] = int(root[6][1][0][18].text)
Tag['Toffset'] = Tag['Qonset'] + Tag['Qtint']
Tag['Tdur'] = Tag['Qoffset'] - Tag['Qonset']
if root[7].tag == '{http://www3.medical.philips.com}interpretations' and root[6].tag == '{http://www3.medical.philips.com}internalmeasurements':
if root[7][0][1][0].text != None and (root[7][0][1][0].text).isdigit(): Tag['HeartRate'] = int(root[7][0][1][0].text)
if root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[1].text != None: Tag['RRint'] = int(root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[1].text)
if root[6][1][0][9].text != None: Tag['AtrialRate'] = int(root[6][1][0][9].text)
if root[6][0][15].text != None and root[6][0][15].text != 'Indeterminate': Tag['QRSFrontAxis'] = int(root[6][0][15].text)
if root[6][0][31].text != None: Tag['QTC'] = int(root[6][0][31].text)
Tag['Target'] = []
for n in range(len(root[7][0][root[7][0].getchildren().index(root[7][0].find('{http://www3.medical.philips.com}statement')):])):
Tag['Target'].append(root[7][0][root[7][0].getchildren().index(root[7][0].find('{http://www3.medical.philips.com}statement')):][n][0].text)
else:
Tag['HeartRate'] = np.nan
Tag['RRint'] = np.nan
Tag['AtrialRate'] = np.nan
Tag['QRSFrontAxis'] = np.nan
Tag['QTC'] = np.nan
Tag['Target'] = []
if root[3].tag == '{http://www3.medical.philips.com}reportinfo' and root[5].tag == '{http://www3.medical.philips.com}patient':
time = root[3].attrib
Tag['Date'] = time['date']
Tag['Time'] = time['time']
Tag['Sex'] = root[5][0][6].text
Tag['ID'] = root[5][0][0].text
Tag['Name'] = root[5][0].find('{http://www3.medical.philips.com}name')[0].text + ', ' + root[5][0].find('{http://www3.medical.philips.com}name')[1].text
if len(root[5][0].find('{http://www3.medical.philips.com}age')) > 0:
if root[5][0].find('{http://www3.medical.philips.com}age')[0].tag == '{http://www3.medical.philips.com}dateofbirth':
Tag['Age'] = int(today.strftime("%Y")) - int(root[5][0].find('{http://www3.medical.philips.com}age')[0].text[0:4])
if root[5][0].find('{http://www3.medical.philips.com}age')[0].tag == '{http://www3.medical.philips.com}years':
Tag['Age'] = int(root[5][0].find('{http://www3.medical.philips.com}age')[0].text)
Tag['Waveform'] = elem[6].text
Tags['tags'].append(Tag)
half_data = pd.DataFrame(Tags['tags'])
data = pd.DataFrame(tags['tags'])
del roots
del root
del elem
count1000 = int(len(data)/12)
count500 = int(len(half_data)/12)
count = count1000 + count500
if len(data) > 0:
array = np.unique(data[data.isnull().any(axis=1)][['ID', 'Date', 'Time']])
missing_data = data.loc[data['ID'].isin(array) & data['Date'].isin(array) & data['Time'].isin(array)]
data.drop(missing_data.index, axis=0,inplace=True)
missing_data = missing_data.reset_index(drop=True)
del tag
del tags
data = data.reset_index(drop=True)
for n in range(count1000):
data.Tonset[n*12:(n+1)*12] = np.repeat(int(data.Tonset[n*12:(n+1)*12].sum()/12), 12)
data.Pdur[n*12:(n+1)*12] = np.repeat(int(data.Pdur[n*12:(n+1)*12].sum()/12), 12)
x = 0
p = []
for x in range(len(data.Waveform)):
t = base64.b64decode(data.Waveform[x])
p.append(np.asarray(t))
x+=1
p = np.asarray(p)
a = []
for i in p:
o = []
for x in i:
o.append(x)
a.append(o)
df = pd.DataFrame(a)
df.insert(0, 'Lead', data['Lead'])
blank = []
for n in range(count1000):
blank.append(pd.pivot_table(df[(n*12):(n+1)*12], columns=df.Lead))
test = pd.concat(blank)
new = []
array = []
for n in range(13):
for index, num in zip(test.iloc[:, n-1][::2], test.iloc[:, n-1][1::2]):
if num > 128:
new.append(index - (256 * (256 - num)))
elif num < 128:
new.append(index + (256 * num))
elif num == 0:
new.append(index)
else:
new.append(index)
new = []
array.append(new)
array = np.asarray([array[0], array[1], array[2], array[3], array[4], array[5], array[6], array[7], array[8], array[9], array[10], array[11]])
df = pd.DataFrame(array)
df = | pd.pivot_table(df, columns=test.columns) | pandas.pivot_table |
from typing import TYPE_CHECKING
import numpy as np
import pandas as pd
from vivarium.framework.randomness import get_hash
from vivarium_csu_sanofi_multiple_myeloma.constants import models
from vivarium_csu_sanofi_multiple_myeloma.constants.metadata import SCENARIOS, HAZARD_RATE_SOURCES
from vivarium_csu_sanofi_multiple_myeloma.constants.data_values import (OS_HR, PFS_HR, PROBABILITY_RETREAT, RCT_OS_HR,
RCT_PFS_HR, REGISTRY_ENROLL_PROBABILITY)
from vivarium_csu_sanofi_multiple_myeloma.utilities import LogNormalHazardRate
if TYPE_CHECKING:
from vivarium.framework.engine import Builder
from vivarium.framework.event import Event
from vivarium.framework.population import SimulantData
TREATMENT_LINES = pd.Index(
list(models.MULTIPLE_MYELOMA_WITH_CONDITION_STATES),
name=models.MULTIPLE_MYELOMA_MODEL_NAME
)
def make_treatment_coverage(year, scenario):
scalar_2019 = (2019-2016)/(2021-2016)
scalar_2020 = (2020-2016)/(2021-2016)
coverages = {
(2016, SCENARIOS.baseline): (
[0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000],
),
(2019, SCENARIOS.baseline): (
[0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.198 * scalar_2019, 0.323 * scalar_2019, 0.365 * scalar_2019, 0.3011 * scalar_2019],
),
(2020, SCENARIOS.baseline): (
[0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.198 * scalar_2020, 0.323 * scalar_2020, 0.365 * scalar_2020, 0.3011 * scalar_2020]
),
(2021, SCENARIOS.baseline): (
[0.000, 0.005, 0.010, 0.033, 0.033],
[0.029, 0.198, 0.323, 0.365, 0.3011],
),
(2025, SCENARIOS.baseline): (
[0.000, 0.100, 0.090, 0.070, 0.070],
[0.34, 0.34, 0.34, 0.34, 0.34],
),
(2025, SCENARIOS.alternative): (
[0.100, 0.100, 0.090, 0.070, 0.070],
[0.34, 0.34, 0.34, 0.34, 0.34],
)
}
for target_year in (2016, 2019, 2020, 2021):
coverages[(target_year, SCENARIOS.alternative)] = coverages[(target_year, SCENARIOS.baseline)]
coverage_data = coverages[(year, scenario)]
coverage = pd.DataFrame({
models.TREATMENTS.isatuximab: coverage_data[0],
models.TREATMENTS.daratumumab: coverage_data[1],
}, index=TREATMENT_LINES)
coverage[models.TREATMENTS.residual] = 1 - coverage.sum(axis=1)
return coverage
def make_hazard_ratios(draw: int, pfs: dict, os: dict):
index_cols = [models.MULTIPLE_MYELOMA_MODEL_NAME, 'multiple_myeloma_treatment', 'retreated']
pfs_hazard_ratio = pd.DataFrame(columns=index_cols + ['hazard_ratio']).set_index(index_cols)
os_hazard_ratio = pd.DataFrame(columns=index_cols + ['hazard_ratio']).set_index(index_cols)
pfs_hazard_ratio.loc[(models.SUSCEPTIBLE_STATE_NAME, models.TREATMENTS.not_treated, False)] = 1.0
os_hazard_ratio.loc[(models.SUSCEPTIBLE_STATE_NAME, models.TREATMENTS.not_treated, False)] = 1.0
for key in pfs:
random_seed = '_'.join([str(k) for k in key] + [str(draw)])
rs = np.random.RandomState(get_hash(random_seed))
survival_percentile = rs.random()
pfs_hazard_ratio.loc[key] = LogNormalHazardRate(*pfs[key]).get_random_variable(survival_percentile)
os_hazard_ratio.loc[key] = LogNormalHazardRate(*os[key]).get_random_variable(survival_percentile)
for key in set(os).difference(pfs):
random_seed = '_'.join([str(k) for k in key] + [str(draw)])
rs = np.random.RandomState(get_hash(random_seed))
survival_percentile = rs.random()
os_hazard_ratio.loc[key] = LogNormalHazardRate(*os[key]).get_random_variable(survival_percentile)
pfs_hazard_ratio = pfs_hazard_ratio.reset_index()
os_hazard_ratio = os_hazard_ratio.reset_index()
# FIXME: Super-duper hack to make lookup table work. Need at least one continuous parameter.
pfs_hazard_ratio['year_start'] = 1990
pfs_hazard_ratio['year_end'] = 2100
os_hazard_ratio['year_start'] = 1990
os_hazard_ratio['year_end'] = 2100
return pfs_hazard_ratio, os_hazard_ratio
class MultipleMyelomaTreatmentCoverage:
configuration_defaults = {
'mm_scenarios': {
'mm_treatment_scenario': SCENARIOS.baseline,
}
}
@property
def name(self):
return self.__class__.__name__
def __init__(self):
self.configuration_defaults = {
'mm_scenarios': {
'mm_treatment_scenario': MultipleMyelomaTreatmentCoverage.configuration_defaults[
'mm_scenarios']['mm_treatment_scenario'],
}
}
# noinspection PyAttributeOutsideInit
def setup(self, builder: 'Builder') -> None:
self.clock = builder.time.clock()
self.randomness = builder.randomness.get_stream(self.name)
scenario = builder.configuration.mm_scenarios.mm_treatment_scenario
assert scenario in SCENARIOS
self.coverage = {}
for year in (2016, 2019, 2020, 2021, 2025):
self.coverage[year] = make_treatment_coverage(year, scenario)
# What treatment are they currently on.
self.treatment_column = 'multiple_myeloma_treatment'
# Did they previously receive isatuximab or daratumumab
self.ever_isa_or_dara_column = 'ever_isa_or_dara'
self.retreated_column = 'retreated'
# registry_evaluation_status has 3 potential values: unevaluated, eligible, enrolled
self.registry_evaluation_status = 'registry_evaluation_status'
self.registry_evaluation_date = 'registry_evaluation_date'
self.ever_isa = 'ever_isa'
self.registry_start_date = pd.Timestamp('2021-01-01')
columns_created = [
self.treatment_column,
self.ever_isa_or_dara_column,
self.retreated_column,
self.registry_evaluation_status,
self.registry_evaluation_date,
self.ever_isa
]
columns_required = (
[models.MULTIPLE_MYELOMA_MODEL_NAME]
+ [f'{s}_event_time' for s in models.MULTIPLE_MYELOMA_WITH_CONDITION_STATES]
)
self.population_view = builder.population.get_view(columns_required + columns_created)
builder.population.initializes_simulants(
self.on_initialize_simulants,
creates_columns=columns_created,
requires_columns=columns_required,
requires_streams=[self.randomness.key],
)
builder.event.register_listener('time_step__cleanup', self.on_time_step_cleanup)
def on_initialize_simulants(self, pop_data: 'SimulantData') -> None:
current_coverage = self.get_current_coverage(pop_data.creation_time)
initial_mm_state = self.population_view.subview([models.MULTIPLE_MYELOMA_MODEL_NAME]).get(pop_data.index)
pop_update = pd.DataFrame({
self.treatment_column: models.TREATMENTS.not_treated,
self.ever_isa_or_dara_column: False,
self.retreated_column: False,
self.registry_evaluation_status: 'unevaluated',
self.registry_evaluation_date: pd.NaT,
self.ever_isa: False
}, index=pop_data.index)
with_mm = initial_mm_state.loc[
initial_mm_state[models.MULTIPLE_MYELOMA_MODEL_NAME] != models.SUSCEPTIBLE_STATE_NAME,
models.MULTIPLE_MYELOMA_MODEL_NAME
]
pop_update.loc[with_mm.index, self.treatment_column] = models.TREATMENTS.residual
self.population_view.update(pop_update)
def on_time_step_cleanup(self, event: 'Event'):
pop = self.population_view.get(event.index)
retreat_mask = self.randomness.get_draw(pop.index, additional_key='retreat') < PROBABILITY_RETREAT
ever_isa_or_dara = pop[self.ever_isa_or_dara_column].copy()
ever_isa = pop[self.ever_isa].copy()
registry_eligible = pd.Series(False, index=pop.index)
registry_mask = self.randomness.get_draw(pop.index, additional_key='registry') < REGISTRY_ENROLL_PROBABILITY
proportion_ever_isa_or_dara = 0
coverage = self.get_current_coverage(event.time)
lines = TREATMENT_LINES.tolist()
for current_line, previous_line in zip(lines, [None] + lines[:-1]):
# First, unpack probabilities for the current and previous line.
p_isa = coverage.at[current_line, models.TREATMENTS.isatuximab]
p_dara = coverage.at[current_line, models.TREATMENTS.daratumumab]
p_resid = coverage.at[current_line, models.TREATMENTS.residual]
# Our base filter, which we'll partition.
new_treatment_line = pop[f'{current_line}_event_time'] == event.time
# First group, never had isa/dara
naive = new_treatment_line & ~ever_isa_or_dara
naive_choices = [models.TREATMENTS.isatuximab, models.TREATMENTS.daratumumab, models.TREATMENTS.residual]
rescale_probabilities = lambda p1, p2, e: (p1 - e * PROBABILITY_RETREAT * p1 / (p1 + p2)) / (1 - e)
if p_isa + p_dara:
p_isa_naive = rescale_probabilities(p_isa, p_dara, proportion_ever_isa_or_dara)
p_dara_naive = rescale_probabilities(p_dara, p_isa, proportion_ever_isa_or_dara)
else:
p_isa_naive = p_isa
p_dara_naive = p_dara
if p_isa_naive + p_dara_naive > 1:
p_isa_naive = p_isa_naive / (p_isa_naive + p_dara_naive)
p_dara_naive = p_dara_naive / (p_isa_naive + p_dara_naive)
p_resid_naive = 1 - p_isa_naive - p_dara_naive
naive_probs = [p_isa_naive, p_dara_naive, p_resid_naive]
pop.loc[naive, self.treatment_column] = self.randomness.choice(
pop.loc[naive].index,
choices=naive_choices,
p=naive_probs,
)
isa_or_dara = pop[self.treatment_column].isin([
models.TREATMENTS.isatuximab, models.TREATMENTS.daratumumab
])
isa = pop[self.treatment_column] == models.TREATMENTS.isatuximab
pop.loc[naive & isa, self.ever_isa] = True
pop.loc[naive & isa_or_dara, self.ever_isa_or_dara_column] = True
# These are no-ops. Here for clarity.
pop.loc[naive & ~isa_or_dara, self.ever_isa_or_dara_column] = False
pop.loc[naive, self.retreated_column] = False
# ever_x = (1 - PROBABILITY_RETREAT) * ever_x-1 + coverage_x
proportion_ever_isa_or_dara = (1 - PROBABILITY_RETREAT) * proportion_ever_isa_or_dara + p_isa + p_dara
# Second group, simulants w/prior exposure to isa/dara, and will be retreated this line
retreat = new_treatment_line & ever_isa_or_dara & retreat_mask
retreat_choices = [models.TREATMENTS.isatuximab, models.TREATMENTS.daratumumab]
if p_isa + p_dara:
retreat_probs = [p_isa / (p_isa + p_dara), p_dara / (p_isa + p_dara)]
else:
retreat_probs = [p_isa, p_dara]
pop.loc[retreat, self.treatment_column] = self.randomness.choice(
pop.loc[retreat].index,
choices=retreat_choices,
p=retreat_probs,
)
pop.loc[retreat, self.ever_isa_or_dara_column] = True # This is a no-op. Here for clarity.
pop.loc[retreat, self.retreated_column] = True
# Third group, got 1 dose of isa/dara, but won't receive one this line, may receive again
no_retreat = new_treatment_line & ever_isa_or_dara & ~retreat_mask
pop.loc[no_retreat, self.treatment_column] = models.TREATMENTS.residual
# pop.loc[no_retreat, ever_isa_or_dara] does not change
# pop.loc[no_retreat, retreated] does not change
# Build registry mask
if self.registry_start_date <= event.time:
registry_eligible = registry_eligible | (~ever_isa & isa)
if self.registry_start_date <= event.time:
pop.loc[registry_eligible & registry_mask, self.registry_evaluation_status] = 'enrolled'
pop.loc[registry_eligible, self.registry_evaluation_date] = event.time
pop.loc[registry_eligible & ~registry_mask, self.registry_evaluation_status] = 'eligible'
self.population_view.update(pop)
def get_current_coverage(self, time: pd.Timestamp) -> pd.DataFrame:
"""Get a df with columns: [TREATMENTS.isatuximab, TREATMENTS.daratumumab, TREATMENTS.residual]
indexed by multiple myeloma state."""
if time.year < 2016:
return self.coverage[2016]
elif time.year < 2019:
upper_year = 2019
lower_year = 2016
elif time.year < 2020:
upper_year = 2020
lower_year = 2019
elif time.year < 2021:
upper_year = 2021
lower_year = 2020
elif time.year < 2025:
upper_year = 2025
lower_year = 2021
elif time.year >= 2025:
return self.coverage[2025]
t = (time - pd.Timestamp(f'{lower_year}-01-01')) / ( | pd.Timestamp(f'{upper_year}-01-01') | pandas.Timestamp |
#%%
# Let's import some packages
import numpy as np
import pandas as pd
from sklearn.model_selection import ShuffleSplit
from sklearn.datasets import load_boston
from sklearn.linear_model import LinearRegression
import scipy.stats as stats
import matplotlib.pyplot as plt
import sklearn
from sklearn.metrics import r2_score
# special matplotlib argument for improved plots
from matplotlib import rcParams
# Pretty display for notebooks
#%matplotlib inline
#%%
# Let's load data
boston = load_boston()
#%%
# Display what is in dataste
print(boston.keys())
#%%
# Display column names
print(boston.DESCR)
#%%
data = | pd.DataFrame(boston.data) | pandas.DataFrame |
import matplotlib
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
from matplotlib.widgets import Slider, Button
import numpy as np
import matplotlib.pyplot as plt
from cell_cycle_gating import cellcycle_phases as cc
from cell_cycle_gating import dead_cell_filter as dcf
from cell_cycle_gating import ph3_filter as pf
import pandas as pd
import re
from ipywidgets import interactive, interact, fixed, interact_manual
import ipywidgets as widgets
from IPython.display import display
import os
def reevaluate_phases(log_dna, dna_gates, log_edu, edu_gates):
cell_id = (1 * ((log_dna > dna_gates[0]) & # G1
(log_dna < dna_gates[1]) &
(log_edu < edu_gates[0])) +
2 * ((log_dna >= dna_gates[0]) & # S
(log_dna < dna_gates[3]) &
(log_edu >= edu_gates[0])) +
#(log_edu < edu_gates[1])) +
2.1 * ((log_dna >= dna_gates[1]) # S dropout
& (log_dna < dna_gates[2]) &
(log_edu < edu_gates[0])) +
3 * ((log_dna >= dna_gates[2]) & # G2
(log_dna < dna_gates[3]) &
(log_edu < edu_gates[0])) +
0.5 * (log_dna < dna_gates[0]) +
3.1 * (log_dna > dna_gates[3]))
fractions = {}
for state, val in zip(['subG1', 'G1', 'S', 'S_dropout', 'G2', 'beyondG2'],
[0.5, 1, 2, 2.1, 3, 3.1]):
fractions[state] = np.mean(cell_id == (val % 4))
return fractions, cell_id
def update_gating(obj, well, ndict,
ldr_channel=True, ph3_channel=True,
x_dna=None, px_edu=None, x_ldr=None, system=None):
if os.path.isdir(obj):
obj_file = get_obj_file(obj, well)
path2file = "%s/%s" % (obj, obj_file)
df = pd.read_table(path2file)
df = map_channel_names(df, ndict)
well = re.search('result.(.*?)\[', obj_file).group(1)
well = "%s%s" % (well[0], well[1:].zfill(2))
else:
dfo = pd.read_table(obj)
dfo = dfo.rename(columns=ndict)
df = dfo[dfo.well == well].copy()
edu = np.array(df['edu'].tolist())
dna = np.array(df['dna'].tolist())
edu_notnan = ~np.isnan(edu)
edu = edu[edu_notnan]
dna = dna[edu_notnan]
if ldr_channel:
ldr = np.array(df['ldr'].tolist())
ldr = ldr[edu_notnan]
if system == 'ixm':
x_ldr = np.arange(500, ldr.max(), 100)
ldr_gates = dcf.get_ldrgates(ldr, x_ldr)
dna_gates = dcf.get_dna_gating(dna, ldr, ldr_gates)
cell_fate_dict, outcome = dcf.live_dead(ldr, ldr_gates, dna, dna_gates, x_ldr=x_ldr)
live_cols = [s for s in list(cell_fate_dict.keys()) if 'alive' in s]
dead_cols = [s for s in list(cell_fate_dict.keys()) if 'dead' in s]
a = 0
d = 0
for col in live_cols:
a += cell_fate_dict[col]
for col in dead_cols:
d += cell_fate_dict[col]
else:
outcome = np.array([1] * len(dna))
if ph3_channel:
#ph3 = np.array(df['Nuclei Selected - pH3INT'].tolist())
ph3 = np.array(df['ph3'].tolist())
ph3 = ph3[edu_notnan]
ph3 = ph3[outcome>=1]
if px_edu is None:
px_edu = np.arange(-0.2, 5.3, .02)
if x_dna is None:
x_dna = np.arange(2.5, 8, 0.02)
log_dna = cc.compute_log_dna(dna[outcome>=1], x_dna)
edu_shift, offset_edu, edu_g1_max, edu_s_min = cc.get_edu_gates(edu[outcome>=1], px_edu)
log_edu = cc.compute_log_edu(edu[outcome>=1], px_edu, offset_edu)
y=interactive(gating,
log_dna=fixed(log_dna),
g1_left= np.median(log_dna)-.25,
g1_right = np.median(log_dna) - 0.15,
g2_left= np.median(log_dna) + 0.15,
g2_right = np.median(log_dna) +.25,
log_edu = fixed(log_edu),
edu_lower = np.median(log_edu),
edu_upper = np.median(log_edu) + 0.5)
for i, child in enumerate(y.children):
child.step = 0.05
if i <=3:
child.min = log_dna.min()
child.max = log_dna.max()
else:
child.min = log_edu.min()
child.max = log_edu.max()
return y
def gating(log_dna, log_edu,
g1_left, g1_right, g2_left, g2_right, edu_lower, edu_upper):
dna_gates = [g1_left, g1_right, g2_left, g2_right]
edu_gates = [edu_lower, edu_upper]
xy = np.vstack([log_dna, log_edu])
z = cc.gaussian_kde(xy)(xy)
fig, ax = plt.subplots()
plt.subplots_adjust(bottom=0.5)
plt.scatter(log_dna, log_edu, c=z, s=2, rasterized=True)
l, = plt.plot([dna_gates[i] for i in [0, 0, 3, 3, 0, 0, 1, 1, 0, 2, 2, 3]],
[-1, edu_gates[1], edu_gates[1], -1,
np.nan, edu_gates[0], edu_gates[0],
-1, np.nan, -1, edu_gates[0], edu_gates[0]],
'--', color='red')
plt.xlabel('log10 (DNA content)')
plt.ylabel('log10 (EdU)')
plt.xlim((log_dna.min(), log_dna.max()))
plt.ylim((log_edu.min(), log_edu.max()))
#plt.pie(fractions.values(), labels=fractions.keys())
plt.show()
def apply_gating(y, obj, well, ndict,
ldr_channel=True, ph3_channel=True,
x_dna=None, px_edu=None, x_ldr=None, system=None):
if os.path.isdir(obj):
obj_file = get_obj_file(obj, well)
path2file = "%s/%s" % (obj, obj_file)
df = pd.read_table(path2file)
df = map_channel_names(df, ndict)
well = re.search('result.(.*?)\[', obj_file).group(1)
well = "%s%s" % (well[0], well[1:].zfill(2))
else:
dfo = pd.read_table(obj)
dfo = dfo.rename(columns=ndict)
df = dfo[dfo.well == well].copy()
edu = np.array(df['edu'].tolist())
dna = np.array(df['dna'].tolist())
edu_notnan = ~np.isnan(edu)
edu = edu[edu_notnan]
dna = dna[edu_notnan]
if ldr_channel:
ldr = np.array(df['ldr'].tolist())
ldr = ldr[edu_notnan]
if system == 'ixm':
x_ldr = np.arange(500, ldr.max(), 100)
ldr_gates = dcf.get_ldrgates(ldr, x_ldr)
dna_gates = dcf.get_dna_gating(dna, ldr, ldr_gates)
cell_fate_dict, outcome = dcf.live_dead(ldr, ldr_gates, dna, dna_gates, x_ldr=x_ldr)
live_cols = [s for s in list(cell_fate_dict.keys()) if 'alive' in s]
dead_cols = [s for s in list(cell_fate_dict.keys()) if 'dead' in s]
a = 0
d = 0
for col in live_cols:
a += cell_fate_dict[col]
for col in dead_cols:
d += cell_fate_dict[col]
else:
outcome = np.array([1] * len(dna))
if ph3_channel:
#ph3 = np.array(df['Nuclei Selected - pH3INT'].tolist())
ph3 = np.array(df['ph3'].tolist())
ph3 = ph3[edu_notnan]
if px_edu is None:
px_edu = np.arange(-0.2, 5.3, .02)
if x_dna is None:
x_dna = np.arange(2.5, 8, 0.02)
log_dna = cc.compute_log_dna(dna[outcome>=1], x_dna)
edu_shift, offset_edu, edu_g1_max, edu_s_min = cc.get_edu_gates(edu[outcome>=1], px_edu)
log_edu = cc.compute_log_edu(edu[outcome>=1], px_edu, offset_edu)
ndna_gates = [y.kwargs['g1_left'], y.kwargs['g1_right'], y.kwargs['g2_left'], y.kwargs['g2_right']]
nedu_gates = [y.kwargs['edu_lower'], y.kwargs['edu_upper']]
fractions, cell_id = reevaluate_phases(log_dna, ndna_gates, log_edu, nedu_gates)
if ph3_channel:
f_ph3, ph3_cutoff, ph3_lims = pf.get_ph3_gates(ph3[outcome>=1], cell_id)
log_ph3 = pf.compute_log_ph3(ph3[outcome>=1])
fractions, cell_id = pf.evaluate_Mphase(log_ph3, ph3_cutoff, cell_id)
if ldr_channel:
fractions['cell_count'] = a
fractions['cell_count__dead'] = d
fractions['cell_count__total'] = len(dna)
results_file = "results/summary_%s.csv" % obj.split('.txt')[0]
dfs = | pd.read_csv(results_file) | pandas.read_csv |
import requests
import json
import numpy as np
import pandas as pd
import sqlalchemy as sql
import time
from scripts.config import *
from sqlalchemy import create_engine
from utils.logger import logger
from typing import *
SEARCH = "https://api.twitter.com/2/tweets/search/all"
class DB:
@property
def location(self) -> str:
return 'sqlite:///{path}.db'
@classmethod
def delete(cls, table_name: str, path: str) -> None:
query = sql.text("DROP TABLE IF EXISTS {table_name}".format(table_name=table_name))
engine = create_engine(DB().location.format(path=path), echo=False)
logger.info('Deleting table {table_name}'.format(table_name=table_name))
engine.execute(query)
@classmethod
def fetch(cls, table_name: str, path: str) -> pd.DataFrame:
engine = create_engine(DB().location.format(path=path), echo=False)
df = pd.DataFrame(engine.execute("SELECT * FROM {table_name}".format(table_name=table_name)).fetchall())
if table_name=='users':
header_dict_user = {0:"created_at",1:"name",2:"screen_name",3:"description",
4:"id",5:"location",
6:"followers_count",7:"following_count",8:"listed_count",9:"tweet_count"}
df = df.rename(header_dict_user,axis = 1)
elif table_name=='user_tweets':
header_dict_user_tweets = {0:"created_at",1:"screen_name",2:'text',3:'lang',
4:'retweet_count',5:'reply_count',6:'like_count', 7:"quote_count",
8:"id",9:"author_id",10:"conversation_id",11:"in_reply_to_user_id",12:"geo"}
df = df.rename(header_dict_user_tweets,axis = 1)
elif table_name == 'keyword_tweets':
header_dict_keyword_tweets = {0:"created_at",1:"screen_name",2:'text',3:'lang',
4:'retweet_count',5:'reply_count',6:'like_count', 7:"quote_count",
8:"id",9:"author_id",10:"conversation_id",11:"in_reply_to_user_id",12:"geo",
13:'entities'}
df = df.rename(header_dict_keyword_tweets,axis = 1)
return df
@classmethod
def write(cls, table_name: str, data: pd.DataFrame, path: str) -> None:
engine = create_engine(DB().location.format(path=path), echo=False)
logger.info('Writing {rows} rows to table {table}'.format(rows=len(data), table=table_name))
if data.index[0] != 0:
data = data.reset_index()
data.to_sql(table_name, con=engine, if_exists='append', index=False)
class Follow:
def __init__(self):
self.sleep_time = 15
self.url = 'https://api.twitter.com/2/users/{user_id}/{kind}'
@classmethod
def create_headers(cls, bearer_token):
headers = {"Authorization": "Bearer {}".format(bearer_token), 'User-Agent': 'v2FollowingLookupPython'}
return headers
@classmethod
def custom_params(cls):
return {"user.fields": "created_at", 'max_results': 5}
@classmethod
def _fetch(cls, user_id: str, kind: str = 'following', target_total: int = 100,
token_number: int = 0) -> pd.DataFrame:
"""
Helper function
"""
url = Follow().url.format(user_id=user_id, kind=kind)
headers = Follow.create_headers(bearer_token=eval('BEARER{token}'.format(token=token_number)))
params = Follow.custom_params()
counter, results = 0, []
while counter < target_total:
response = requests.request("GET", url, headers=headers, params=params)
if response.status_code == 429:
logger.info('Sleeping')
time.sleep(int(60 * Follow().sleep_time))
continue
if response.status_code != 200:
continue
data = json.loads(response.text)
if not 'data' in data.keys():
break
data, meta = pd.DataFrame(data['data']), data['meta']
counter += len(data)
if isinstance(data, pd.DataFrame):
results.append(data)
if not 'next_token' in list(meta.keys()):
break
else:
params['next_token'] = meta['next_token']
if len(results) > 0:
return pd.concat(results, axis=0)
return | pd.DataFrame({'username': ''}, index=[0]) | pandas.DataFrame |
from logging import root
import os
import pandas as pd
import mysql.connector
from query import Query
class findDifferenceBetweenSalaries:
def __init__(self):
self.mydb = mysql.connector.connect(
host="xxxxxxxxxx",
user="xxxxx",
password="<PASSWORD>",
port="xxxx",
database="xxxxx"
)
def execute(self):
employee_df = pd.read_sql(Query.EMPLOYEE_TABLE, self.mydb)
departments_df = | pd.read_sql(Query.DEPARTMENTS_TABLE, self.mydb) | pandas.read_sql |
"""Main module."""
import math
import os
import sys
import datetime
import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
# print("matplotlib=", matplotlib.rcParams.keys())
matplotlib.rcParams['text.usetex'] = True
# matplotlib.rcParams['text.latex.unicode'] = True key not available
import magnetdata
def list_sequence(lst, seq):
"""Return sequences of seq in lst"""
sequences = []
count = 0
len_seq = len(seq)
upper_bound = len(lst)-len_seq+1
for i in range(upper_bound):
if lst[i:i+len_seq] == seq:
count += 1
sequences.append([i,i+len_seq])
return sequences
# see: https://stackoverflow.com/questions/5419204/index-of-duplicates-items-in-a-python-list
#from collections import defaultdict
def list_duplicates_of(seq,item):
"""Return sequences of duplicate adjacent item in seq"""
start_at = -1
locs = []
sequences = []
start_index = -1
while True:
try:
loc = seq.index(item,start_at+1)
except ValueError:
end_index = locs[-1]
sequences.append([start_index, end_index])
# print("break end_index=%d" % end_index)
break
else:
if not locs:
# seq=[loc,0]
start_index = loc
# print( "item=%d, start: %d" % (item, loc) )
else:
if (loc-locs[-1]) != 1:
end_index = locs[-1]
sequences.append([start_index, end_index])
start_index = loc
# print( "item=%d, end: %d, new_start: %d" % (item, locs[-1], loc) )
locs.append(loc)
start_at = loc
return sequences #locs
class MagnetRun:
"""
Magnet Run
Site: name of the site
Insert: list of the MagnetIDs composing the insert
MagnetData: pandas dataframe or tdms file
"""
def __init__(self, site="unknown", insert="", data=None):
"""default constructor"""
self.Site = site
self.Insert = insert
self.MagnetData = data
start_date = None
try:
if "Date" in self.MagnetData.getKeys() and "Time" in self.MagnetData.getKeys():
start_date=self.MagnetData.getData("Date").iloc[0]
start_time=self.MagnetData.getData("Time").iloc[0]
end_date=self.MagnetData.getData("Date").iloc[-1]
end_time = self.MagnetData.getData('Time').iloc[-1]
tformat="%Y.%m.%d %H:%M:%S"
t0 = datetime.datetime.strptime(start_date+" "+start_time, tformat)
t1 = datetime.datetime.strptime(end_date+" "+end_time, tformat)
dt = (t1-t0)
duration = dt / datetime.timedelta(seconds=1)
print("* Site: %s, Insert: %s" % (self.Site, self.Insert),
"start_date=%s" % start_date,
"start_time=%s" % start_time,
"duration=%g s" % duration)
if self.MagnetData.Type == 0:
if self.Site == "M9":
self.MagnetData.addData("IH", "IH = Idcct1 + Idcct2")
self.MagnetData.addData("IB", "IB = Idcct3 + Idcct4")
elif self.Site in ["M8", "M10"]:
self.MagnetData.addData("IH", "IH = Idcct3 + Idcct4")
self.MagnetData.addData("IB", "IB = Idcct1 + Idcct2")
# what about M1, M5 and M7???
except:
print("MagnetRun.__init__: trouble loading data")
try:
file_name = "%s_%s_%s-wrongdata.txt" % (self.Site, self.Insert,start_date)
self.MagnetData.to_csv(file_name, sep=str('\t'), index=False, header=True)
except:
print("MagnetRun.__init__: trouble loading data - fail to save csv file")
pass
pass
@classmethod
def fromtxt(cls, site, filename):
"""create from a txt file"""
with open(filename, 'r') as f:
insert=f.readline().split()[-1]
data = magnetdata.MagnetData.fromtxt(filename)
# print("magnetrun.fromtxt: data=", data)
return cls(site, insert, data)
@classmethod
def fromcsv(cls, site, insert, filename):
"""create from a csv file"""
data = magnetdata.MagnetData.fromcsv(filename)
return cls(site, insert, data)
@classmethod
def fromStringIO(cls, site, name):
"""create from a stringIO"""
from io import StringIO
# try:
ioname = StringIO(name)
# TODO rework: get item 2 otherwise set to unknown
insert = "Unknown"
headers = ioname.readline().split()
if len(headers) >=2:
insert = headers[1]
data = magnetdata.MagnetData.fromStringIO(name)
# except:
# print("cannot read data for %s insert, %s site" % (insert, site) )
# fo = open("wrongdata.txt", "w", newline='\n')
# fo.write(ioname)
# fo.close()
# sys.exit(1)
return cls(site, insert, data)
def __repr__(self):
return "%s(Site=%r, Insert=%r, MagnetData=%r)" % \
(self.__class__.__name__,
self.Site,
self.Insert,
self.MagnetData)
def getSite(self):
"""returns Site"""
return self.Site
def getInsert(self):
"""returns Insert"""
return self.Insert
def setSite(self, site):
"""set Site"""
self.Site = site
def getType(self):
"""returns Data Type"""
return self.MagnetData.Type
def getMData(self):
"""return Magnet Data object"""
return self.MagnetData
def getData(self, key=""):
"""return Data"""
return self.MagnetData.getData(key)
def getKeys(self):
"""return list of Data keys"""
return self.MagnetData.Keys
def getDuration(self):
"""compute duration of the run in seconds"""
duration = None
if "Date" in self.MagnetData.getKeys() and "Time" in self.MagnetData.getKeys():
start_date=self.MagnetData.getData("Date").iloc[0]
start_time=self.MagnetData.getData("Time").iloc[0]
end_date=self.MagnetData.getData("Date").iloc[-1]
end_time = self.MagnetData.getData('Time').iloc[-1]
tformat="%Y.%m.%d %H:%M:%S"
t0 = datetime.datetime.strptime(start_date+" "+start_time, tformat)
t1 = datetime.datetime.strptime(end_date+" "+end_time, tformat)
dt = (t1-t0)
duration = dt / datetime.timedelta(seconds=1)
return duration
def stats(self):
"""compute stats from the actual run"""
# TODO:
# add teb,... to list
# add duration
# add duration per Field above certain values
# add \int Power over time
from tabulate import tabulate
# see https://github.com/astanin/python-tabulate for tablefmt
print("Statistics:\n")
tables = []
headers = ["Name", "Mean", "Max", "Min", "Std", "Median", "Mode"]
for (f,unit) in zip(['Field', 'Pmagnet', 'teb', 'debitbrut'],["T", "MW", "C","m\u00B3/h"]):
v_min = float(self.MagnetData.getData(f).min())
v_max = float(self.MagnetData.getData(f).max())
v_mean = float(self.MagnetData.getData(f).mean())
v_var = float(self.MagnetData.getData(f).var())
v_median = float(self.MagnetData.getData(f).median())
v_mode = float(self.MagnetData.getData(f).mode())
table = ["%s[%s]" % (f,unit), v_mean, v_max, v_min, math.sqrt(v_var), v_median, v_mode]
tables.append(table)
print(tabulate(tables, headers, tablefmt="simple"), "\n")
return 0
def plateaus(self, twindows=6, threshold=1.e-4, b_threshold=1.e-3, duration=5, show=False, save=True, debug=False):
"""get plateaus, pics from the actual run"""
# TODO:
# pass b_thresold as input param
# b_threshold = 1.e-3
if debug:
print("Search for plateaux:", "Type:", self.MagnetData.Type)
B_min = float(self.MagnetData.getData('Field').min())
B_max = float(self.MagnetData.getData('Field').max())
B_mean = float(self.MagnetData.getData('Field').mean())
B_var = float(self.MagnetData.getData('Field').var())
Bz = self.MagnetData.getData('Field')
regime = Bz.to_numpy()
df_ = pd.DataFrame(regime)
df_['regime']=pd.Series(regime)
diff = np.diff(regime) # scale by B_max??
df_['diff']=pd.Series(diff)
ndiff = np.where(abs(diff) >= threshold, diff, 0)
df_['ndiff']= | pd.Series(ndiff) | pandas.Series |
import pandas as pd
from pathlib import Path
# import matplotlib.pyplot as plt
# added by Pierre
import matplotlib as mpl
mpl.use('TkAgg') # or whatever other backend that you want
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import os
import argparse
from stable_baselines import results_plotter
from stable_baselines.results_plotter import load_results, ts2xy
def moving_average(values, window):
"""
Smooth values by doing a moving average
:param values: (numpy array)
:param window: (int)
:return: (numpy array)
"""
weights = np.repeat(1.0, window) / window
return np.convolve(values, weights, 'valid')
def plot_results(log_folder, type_str, leg_label):
"""
plot the results
:param log_folder: (str) the save location of the results to plot
:param type: (str) either 'timesteps', 'episodes' or 'walltime_hrs'
"""
x, y = ts2xy(load_results(log_folder), type_str)
y = moving_average(y, window=50)
# Truncate x
x = x[len(x) - len(y):]
# plt.figure()
# plt.plot(x, y, label=leg_label)
# plt.xlabel(type_str)
plt.ylabel('Rewards')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--folder', help='Log folder', type=str, default='trained_agents')
parser.add_argument('-e', '--env', help='env name', type=str)
args = parser.parse_args()
env_id = args.env
log_dir = args.folder
print(log_dir)
###############
# METRICS
###############
# Get the mean of the reward and wall train time of all the seed runs in the experiment
res_file_list = []
for path in Path(log_dir).rglob('stats.csv'):
# print(path)
res_file_list.append(path)
res_file_list = sorted(res_file_list)
# print(res_file_list)
li = []
count = 0
for filename in res_file_list:
df = | pd.read_csv(filename, index_col=None, header=0) | pandas.read_csv |
# Copyright (c) 2020 Huawei Technologies Co., Ltd.
# <EMAIL>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
from typing import Dict, List
import numpy as np
import pandas as pd
from collections import Counter
from src.compress import compress
# 日志器
from src.logger_setting.my_logger import get_logger
from src.setting import setting
LOGGER = get_logger()
def groupby_calc(df):
df['esn'] = df['esn'].astype('str')
df = df.groupby(['esn'])
return df
def calc_total(series):
series = series.values
count = 0
for d in range(len(series)):
if d < len(series) - 1:
if pd.isna(series[d]) or pd.isna(series[d + 1]):
continue
if float(series[d]) <= float(series[d + 1]):
count += float(series[d + 1]) - float(series[d])
else:
count += float(series[d + 1])
return count
def is_active(series):
series = calc_total(series)
if float(series) / setting.mb > 10:
return 1
else:
return 0
def get_max(series):
if series:
return np.max(series)
else:
return setting.INVALID_VALUE
def get_min(series):
if series:
return np.min(series)
else:
return setting.INVALID_VALUE
def get_avg(values, counts):
count = sum(counts) if type(counts) == list else counts
if count == 0:
return setting.INVALID_VALUE
else:
return sum(values) / count if type(values) == list else values / count
def get_avg_max_min(df, avg_name, max_name, min_name, counts):
avg_list = list(filter(lambda x: int(x) != setting.INVALID_VALUE, df[avg_name].values))
sum_value = get_sum(avg_list)
cnt = get_sum(list(df[counts].values))
avg = sum_value / cnt if cnt != 0 else setting.INVALID_VALUE
max_list = list(filter(lambda x: int(x) != setting.INVALID_VALUE, df[max_name].values))
max_value = get_max(max_list)
min_list = list(filter(lambda x: int(x) != setting.INVALID_VALUE, df[min_name].values))
min_value = get_min(min_list)
return {avg_name: avg,
max_name: max_value,
min_name: min_value}
def get_sum(series):
if series:
return np.sum(series)
else:
return setting.INVALID_VALUE
def get_std(series):
if series:
return np.std(series)
else:
return setting.INVALID_VALUE
def get_all_day():
all_day_file = compress.get_all_csv_file(os.path.join(setting.data_path, 'extractData'))
day_list = []
for file in all_day_file:
day_list.append(os.path.split(file)[1].split("\\")[-1].split('_')[0])
return list(set(day_list))
def merge_day_data(day_dict: Dict[str, List[str]]):
for day in day_dict.keys():
file_list: List[str] = day_dict.get(day)
df = pd.concat(pd.read_csv(file, error_bad_lines=False, index_col=False) for file in file_list)
df.columns = setting.parameter_json["extract_data_columns"]
df = df.sort_values('collectTime', ascending=True)
# 把-9999变成了NaN,但是原来是空的值,在读进来的时候已经变成NaN了,所有空值和-9999都变成了NaN
df = df.replace(setting.INVALID_VALUE, np.nan)
grouped = groupby_calc(df).agg(
MaxRSRP=pd.NamedAgg(column='RSRP', aggfunc=max),
MinRSRP=pd.NamedAgg(column='RSRP', aggfunc=min),
AvgRSRP=pd.NamedAgg(column='RSRP', aggfunc=sum),
CntRSRP=pd.NamedAgg(column='RSRP', aggfunc="count"),
MaxCQI=pd.NamedAgg(column='CQI', aggfunc=max),
MinCQI=pd.NamedAgg(column='CQI', aggfunc=min),
AvgCQI=pd.NamedAgg(column='CQI', aggfunc=sum),
CntCQI=pd.NamedAgg(column='CQI', aggfunc="count"),
MaxRSRQ=pd.NamedAgg(column='RSRQ', aggfunc=max),
MinRSRQ=pd.NamedAgg(column='RSRQ', aggfunc=min),
AvgRSRQ=pd.NamedAgg(column='RSRQ', aggfunc=sum),
CntRSRQ=pd.NamedAgg(column='RSRQ', aggfunc="count"),
MaxRSSI=pd.NamedAgg(column='RSSI', aggfunc=max),
MinRSSI=pd.NamedAgg(column='RSSI', aggfunc=min),
AvgRSSI=pd.NamedAgg(column='RSSI', aggfunc=sum),
CntRSSI=pd.NamedAgg(column='RSSI', aggfunc="count"),
MaxSINR=pd.NamedAgg(column='SINR', aggfunc=max),
MinSINR=pd.NamedAgg(column='SINR', aggfunc=min),
AvgSINR=pd.NamedAgg(column='SINR', aggfunc=sum),
CntSINR=pd.NamedAgg(column='SINR', aggfunc="count"),
TotalDownload= | pd.NamedAgg(column='TotalDownload', aggfunc=calc_total) | pandas.NamedAgg |
#%%
import re
import pandas as pd
#%%
# processing bilayer and monolayer energies
bilayers_filename = './R2/BilayersEnergies'
monolayers_filename = './R2/MonolayersEnergies'
with open(bilayers_filename, 'r') as ofile:
bi_lines = ofile.readlines()
with open(monolayers_filename, 'r') as ofile:
mo_lines = ofile.readlines()
# print(lines)
#%%
#parse the energes files
def energies_parse_lines(lines):
pattern0 = '(?P<Name>.*)(\/$)'
pattern1 = '(?P<Value>[+-]?[0-9]*[.]?[0-9]*)(?: eV$)'
results = []
#%%
for i in range(int(len(lines) / 2)):
print(i)
print(lines[i])
m0 = re.search(pattern0, lines[i])
if m0:
name = m0.group('Name')
print(lines[i + 1])
m1 = re.search(pattern1, lines[i + 1])
if m1:
value = m1.group('Value')
if m0 and m1:
results.append([name, float(value)])
df = pd.DataFrame(results, columns=['Name', 'Value'])
return df
# %%
bilayers_df = energies_parse_lines(bi_lines)
bilayers_df['Name'] = bilayers_df['Name'].apply(
lambda x: x.replace('-T-', '-T_')
if x.find('-T-') != -1
else x.replace('-', '_', 1))
mono_df = energies_parse_lines(mo_lines)
# %%
# process the count of monolayers in bilayer
# from file IE_validationm_set
countmonolayers_file = './R2/IE_validation_set'
with open(countmonolayers_file, 'r') as ofile:
countmonolayers_lines = ofile.readlines()
# %%
def counts_partse_lines(lines):
pattern0 = '(?P<Name>.*)(?:-selected.dat\n$)'
pattern1 = '(?P<m1_count>[0-9]*)[_](?P<m2_count>[0-9]*)(?:\n$)'
results = []
#%%
for i in range(int(len(lines) / 2)):
print(i)
print(lines[i])
m0 = re.search(pattern0, lines[i])
if m0:
name = m0.group('Name')
print(lines[i + 1])
m1 = re.search(pattern1, lines[i + 1])
if m1:
value1 = m1.group('m1_count')
value2 = m1.group('m2_count')
if m0 and m1 and value1.isnumeric() and value1.isnumeric():
results.append([name, int(value1), int(value2)])
df = pd.DataFrame(results, columns=['Name', 'Count_m1', 'Count_m2'])
return df
# %%
counts_df = counts_partse_lines(countmonolayers_lines)
counts_df['Name'] = counts_df['Name'].apply(
lambda x: x.replace('-T-', '-T_')
if x.find('-T-') != -1
else x.replace('-', '_', 1))
# %%
#%%
# combine bilyaer energies with counts of monolayers
#
df = bilayers_df
df[['monolayer1', 'monolayer2']] = df.Name.str.split('_', expand=True)
#%%
df = df.iloc[:,[0,2,3,1]].merge(counts_df, how='inner', on='Name')
# %%
# %%
df = df.merge(mono_df.rename(
columns={
'Name':'monolayer1',
'Value':'ie_m1'}
),
how='inner',
on='monolayer1',
suffixes={'_df', '_m1'}) \
.merge(mono_df.rename(
columns={
'Name':'monolayer2',
'Value':'ie_m2'}
),
how='inner',
on='monolayer2',
suffixes={'_df', '_m2'})
# %%
areas_df = | pd.read_csv('./R2/areas.csv') | pandas.read_csv |
from typing import List, Tuple
from datetime import datetime
from os import listdir
from os.path import join, isdir
import yaml
import geopy.distance
import pandas as pd
import xarray as xr
import xlrd
import numpy as np
import geopandas as gpd
from epippy.geographics import match_points_to_regions, get_nuts_shapes, get_natural_earth_shapes, \
replace_iso2_codes, convert_country_codes, revert_old_country_names, convert_old_country_names
from epippy.generation import get_powerplants, match_powerplants_to_regions
from epippy.generation.hydro import get_hydro_production
from epippy import data_path
import logging
logging.basicConfig(level=logging.INFO, format="%(levelname)s %(asctime)s - %(message)s")
logger = logging.getLogger()
def read_runoff_data(resolution: float, timestamps: pd.DatetimeIndex) -> xr.Dataset:
"""
Reading runoff data.
Parameters
----------
resolution: float
Reanalysis data spatial resolution.
timestamps: pd.DatetimeIndex
Time horizon.
Returns
-------
runoff_dataset: xr.Dataset
"""
runoff_dir = f"{data_path}generation/hydro/source/ERA5/runoff/{resolution}"
assert isdir(runoff_dir), f"Error: No data found for resolution {resolution} (directory {runoff_dir} not found)."
runoff_files = [join(runoff_dir, fn) for fn in listdir(runoff_dir) if fn.endswith(".nc")]
runoff_dataset = xr.open_mfdataset(runoff_files, combine='by_coords')
runoff_dataset = runoff_dataset.stack(locations=('longitude', 'latitude'))
missing_ts = set(timestamps) - set(pd.to_datetime(runoff_dataset.time.values))
assert not missing_ts, f"Error: Data is not available for following timestamps: {sorted(list(missing_ts))}."
runoff_dataset = runoff_dataset.sel(time=timestamps)
# Add area to dataset
runoff_dataset = add_runoff_area(runoff_dataset, resolution)
return runoff_dataset
def add_runoff_area(dataset: xr.Dataset, resolution: float) -> xr.Dataset:
"""
Computing cell area for each (lon, lat) pair.
Parameters
----------
dataset: xarray.Dataset
Contains runoff data, in this case expressed in m.
resolution: float
Runoff data spatial resolution.
Returns
-------
dataset: xr.Dataset
Same input dataset with 'area' variable added.
"""
# Get distance between two latitudes. Does not depend on the geo-location, thus an arbitrary point is considered.
p1 = (0.0, 0.0)
p2 = (p1[0] + resolution, 0.0)
dist_latitude = geopy.distance.distance(p1, p2).km
lon = dataset['longitude'].values
lat = dataset['latitude'].values
# Get vectors of 'bordering' longitudes.
lonplus = lon + resolution / 2
lonmin = lon - resolution / 2
# Initialize a zero-vector and compute distances between longitude pairs.
dist = np.zeros(len(lat))
for idx in np.arange(len(lat)):
dist[idx] = geopy.distance.distance((lat[idx], lonplus[idx]), (lat[idx], lonmin[idx])).km
# Compute cell area and attach it to the dataset.
dataset['area'] = ('locations', dist * dist_latitude)
return dataset
def get_phs_storage_capacities(phs_capacity_df: pd.DataFrame, default_phs_duration: float) -> pd.DataFrame:
"""
Assigning storage capacities (MWh) to PHS plants.
Parameters
----------
phs_capacity_df: pd.Series
Series containing PHS rated power data (indexed by NUTS code).
default_phs_duration: float
Default duration for PHS plants, in case data does not exist for plant or country.
Returns
-------
phs_cap_storage: pd.DataFrame
Frame containing PHS power and energy ratings.
"""
phs_geth_fn = f"{data_path}generation/hydro/source/Geth_2015_EU_PHS_review.xlsx"
phs_geth_all = pd.read_excel(phs_geth_fn, sheet_name='overall', index_col=0).dropna(subset=['Estor [GWh]'])
# Iterate through all PHS plants
for idx in phs_capacity_df.index:
# Retrieve ISO2 country code for index checks
# code = convert_country_codes([phs_capacity_df.loc[idx, 'Country']], 'name', 'alpha_2', True)[0]
iso_code = phs_capacity_df.loc[idx, 'ISO2']
if iso_code in phs_geth_all.index:
# Compute country-specific PHS duration, based on Geth
default_duration = phs_geth_all.loc[iso_code, 'Estor [GWh]'] / phs_geth_all.loc[iso_code, 'Pd,nom [GW]']
else:
# If country data is missing, impose default value
default_duration = default_phs_duration
# If ISO2 in file sheets (detailed country data exists), read file...
try:
phs_geth = pd.read_excel(phs_geth_fn, sheet_name=iso_code, index_col='JRC_HPDB_id')
if idx in phs_geth.index and not np.isnan(phs_geth.loc[idx, 'Estor [GWh]']):
# If storage content is provided, fetch it directly...
phs_capacity_df.loc[idx, 'Energy'] = phs_geth.loc[idx, 'Estor [GWh]'] * 1e3
else:
# ... otherwise, consider default duration.
phs_capacity_df.loc[idx, 'Energy'] = phs_capacity_df.loc[idx, 'Capacity'] * default_duration
# ...else, impose default duration.
except xlrd.biffh.XLRDError:
phs_capacity_df.loc[idx, 'Energy'] = phs_capacity_df.loc[idx, 'Capacity'] * default_duration
return phs_capacity_df[['Name', 'Capacity', 'Energy', 'region_code']]
def build_phs_data(phs_plants_df: pd.DataFrame, default_phs_duration: float) -> pd.DataFrame:
"""
Compute total PHS power (GW) and energy (GWh) capacities for a series of regions.
Parameters
----------
phs_plants_df: pd.DataFrame
Frame containing PHS power plant data.
default_phs_duration: float
Default duration for PHS plants.
Returns
-------
php_capacity_df: pd.DataFrame
Dataframe containing PHS power (GW) and energy (GWh) capacity
"""
phs_storage_df = get_phs_storage_capacities(phs_plants_df, default_phs_duration)
phs_capacity_df = phs_storage_df.groupby(phs_storage_df['region_code']).sum() * 1e-3
return phs_capacity_df
def compute_ror_series(runoff_dataset: xr.Dataset, region_points: List[Tuple[float, float]],
flood_event_threshold: float) -> pd.DataFrame:
"""
Computing ROR p.u. time series as directly proportional to runoff for a given grid cell/area.
Parameters
----------
runoff_dataset: xarray.Dataset
Contains runoff data, in this case expressed in m.
region_points: List[Tuple[float, float]]
List of points (lon, lat) within a region.
flood_event_threshold: float
Quantile clipping runoff time series (stems from the assumption that ROR plants
are designed for a, e.g. p80 flow).
Returns
-------
ts_norm: pd.DataFrame
Time series of p.u. capacity factors for ROR plants.
"""
# Mean of runoffs over all points within the region.
ts = runoff_dataset.ro.sel(locations=region_points).mean(dim='locations').load()
# Compute quantile from xarray object.
q = ts.quantile(q=flood_event_threshold)
# Clipping according to the flood_event_threshold
ts[ts > q] = q
# Normalizing for p.u. representation.
return ts / ts.max()
def build_ror_data(ror_capacity_ds: pd.Series, timestamps: pd.DatetimeIndex,
runoff_dataset: xr.Dataset, runoff_points_region_ds: pd.Series) -> Tuple[pd.Series, pd.DataFrame]:
"""
Compute total ROR capacities (in GW) and inflow (p.u. of capacity) for a series of regions.
Parameters
----------
ror_capacity_ds: pd.Series
Series containing ROR power (GW) capacity per plant, indexed by the region in which the plant is located.
timestamps: pd.DatetimeIndex
Time stamps over which the inflows must be computed.
runoff_dataset: xr.Dataset
ERA5 runoff dataset
runoff_points_region_ds: pd.Series
Indicates in which region each ERA5 point falls.
Returns
-------
ror_capacity_ds: pd.Series
Series containing ROR power (GW) capacity per region.
ror_inflows_df: pd.DataFrame
ROR inflow time-series (p.u. of power capacity) for each region.
"""
ror_thresholds_fn = f"{data_path}generation/hydro/source/ror_flood_event_thresholds.csv"
ror_thresholds = pd.read_csv(ror_thresholds_fn, index_col=0)
ror_capacity_ds = ror_capacity_ds.groupby(ror_capacity_ds.index).sum() * 1e-3
ror_inflows_df = | pd.DataFrame(index=timestamps, columns=ror_capacity_ds.index) | pandas.DataFrame |
"""
Functions specific to preprocess raw extract data from HMIS.
The raw data is provided in the following format:
(king data is divided by year;
for pierce & snohomish all years are in one folder)
data/*county*/*year*/Affiliation.csv
Client.csv
Disabilities.csv
EmploymentEducation.csv
Enrollment_families.csv
Enrollment.csv
EnrollmentCoC.csv
Exit.csv
Export.csv
Funder.csv
HealthAndDV.csv
IncomeBenefits.csv
Inventory.csv
Organization.csv
Project.csv
ProjectCoC.csv
Services.csv
Site.csv
"""
import pandas as pd
import datetime
import os.path as op
import numpy as np
import json
import puget.utils as pu
import warnings
from puget.data import DATA_PATH
# Paths of csvs
COUNTY_FOLDERS = {'king': [str(s) for s in range(2012, 2017)],
'pierce': ['2012_2016'], 'snohomish': ['2012_2016']}
# these values translate to unknown data for various reasons. Treat as NANs
CATEGORICAL_UNKNOWN = [8, 9, 99]
# entry/exit suffixes for columns
ENTRY_EXIT_SUFFIX = ['_entry', '_exit', '_update']
# Names that should be excluded:
NAME_EXCLUSION = ["consent", "refused", "anonymous", "client",
"refsued", "noname", "unknown"]
# dict of default metadata file names
METADATA_FILES = {'enrollment': 'enrollment.json',
'exit': 'exit.json',
'client': 'client.json',
'disabilities': 'disabilities.json',
'employment_education': 'employment_education.json',
'health_dv': 'health_dv.json',
'income': 'income.json',
'project': 'project.json'}
for k, v in METADATA_FILES.items():
METADATA_FILES[k] = op.join(DATA_PATH, 'metadata', v)
file_path_boilerplate = (
"""
file_spec : dict or string
if a dict, keys should be paths, values should be full path to files
if a string, should be the filename of the .csv table and data_dir &
paths parameters are required
county: string
name of county to get data for. Must be a key in COUNTY_FOLDERS and
have a folder of the same name in the data folder. Not required
if file_spec is a dictionary
data_dir : string
full path to general data folder (usually puget/data/*county*);
not required if file_spec is a dictionary
paths : list
list of directories inside data_dir to look for csv files in;
not required if file_spec is a dictionary
""")
metdata_boilerplate = (
"""
metadata_file : string
name of json metadata file with lists of columns to use for
deduplication, columns to drop, categorical and time-like columns
""")
def std_path_setup(filename, data_dir, paths):
"""
Setup filenames for read_table assuming standard data directory structure.
Parameters
----------
filename : string
This should be the filename of the .csv table
data_dir : string
full path to general data folder (usually puget/data/*county*)
paths : list
list of directories inside data_dir to look for csv files in
Returns
----------
dict with key of paths, value of filenames for all included folders
"""
file_list = []
for p in paths:
file_list.append(op.join(data_dir, p, filename))
file_spec = dict(zip(paths, file_list))
return file_spec
def read_table(file_spec, county=None, data_dir=None, paths=None,
columns_to_drop=None, categorical_var=None,
categorical_unknown=CATEGORICAL_UNKNOWN,
time_var=None, duplicate_check_columns=None, dedup=True,
encoding=None, name_columns=None):
"""
Read in any .csv table from multiple folders in the raw data.
Parameters
----------
%s
columns_to_drop : list
A list of of columns to drop. The default is None.
categorical_var : list
A list of categorical (including binary) variables where values
listed in categorical_unknown should be recorded as NaNs
categorical_unknown: list
values that should be recorded as NaNs for categorical variables
typically: 8, 9, 99
time_var : list
A list of time (variables) in yyyy-mm-dd format that are
reformatted into pandas timestamps. Default is None.
duplicate_check_columns : list
list of columns to conside in deduplication.
Generally, duplicate rows may happen when the same record is
registered across the .csv files for each folder.
dedup: boolean
flag to turn on/off deduplication. Defaults to True
Returns
----------
dataframe of a csv tables from all included folders
"""
if columns_to_drop is None:
columns_to_drop = []
if categorical_var is None:
categorical_var = []
if time_var is None:
time_var = []
if not isinstance(file_spec, dict):
if data_dir is None:
if county is None:
raise ValueError('If file_spec is a string, data_dir or ' +
'county must be passed')
else:
if not isinstance(county, str):
raise ValueError('county must be a string -- '
'one county at a time, please!')
data_dir = op.join(DATA_PATH, county)
if paths is None:
if county is None:
raise ValueError('If file_spec is a string, paths or county ' +
'must be passed')
else:
if not isinstance(county, str):
raise ValueError('county must be a string -- '
'one county at a time, please!')
paths = COUNTY_FOLDERS[county]
file_spec = std_path_setup(file_spec, data_dir, paths)
else:
if data_dir is not None or paths is not None:
raise ValueError(
'If file_spec is a dict, data_dir and paths cannot be passed')
file_spec_use = file_spec.copy()
# Start by reading the first file into a DataFrame
path, fname = file_spec_use.popitem()
df = pd.read_csv(fname, low_memory=False, encoding=encoding)
# Then, for the rest of the files, append to the DataFrame.
for path, fname in file_spec_use.items():
this_df = | pd.read_csv(fname, low_memory=False, encoding=encoding) | pandas.read_csv |
from __future__ import annotations
from contextlib import contextmanager
import os
from pathlib import Path
import random
from shutil import rmtree
import string
import tempfile
from typing import (
IO,
Any,
)
import numpy as np
from pandas import set_option
from pandas.io.common import get_handle
@contextmanager
def decompress_file(path, compression):
"""
Open a compressed file and return a file object.
Parameters
----------
path : str
The path where the file is read from.
compression : {'gzip', 'bz2', 'zip', 'xz', None}
Name of the decompression to use
Returns
-------
file object
"""
with get_handle(path, "rb", compression=compression, is_text=False) as handle:
yield handle.handle
@contextmanager
def set_timezone(tz: str):
"""
Context manager for temporarily setting a timezone.
Parameters
----------
tz : str
A string representing a valid timezone.
Examples
--------
>>> from datetime import datetime
>>> from dateutil.tz import tzlocal
>>> tzlocal().tzname(datetime.now())
'IST'
>>> with set_timezone('US/Eastern'):
... tzlocal().tzname(datetime.now())
...
'EDT'
"""
import os
import time
def setTZ(tz):
if tz is None:
try:
del os.environ["TZ"]
except KeyError:
pass
else:
os.environ["TZ"] = tz
time.tzset()
orig_tz = os.environ.get("TZ")
setTZ(tz)
try:
yield
finally:
setTZ(orig_tz)
@contextmanager
def ensure_clean(filename=None, return_filelike: bool = False, **kwargs: Any):
"""
Gets a temporary path and agrees to remove on close.
This implementation does not use tempfile.mkstemp to avoid having a file handle.
If the code using the returned path wants to delete the file itself, windows
requires that no program has a file handle to it.
Parameters
----------
filename : str (optional)
suffix of the created file.
return_filelike : bool (default False)
if True, returns a file-like which is *always* cleaned. Necessary for
savefig and other functions which want to append extensions.
**kwargs
Additional keywords are passed to open().
"""
folder = Path(tempfile.gettempdir())
if filename is None:
filename = ""
filename = (
"".join(random.choices(string.ascii_letters + string.digits, k=30)) + filename
)
path = folder / filename
path.touch()
handle_or_str: str | IO = str(path)
if return_filelike:
kwargs.setdefault("mode", "w+b")
handle_or_str = open(path, **kwargs)
try:
yield handle_or_str
finally:
if not isinstance(handle_or_str, str):
handle_or_str.close()
if path.is_file():
path.unlink()
@contextmanager
def ensure_clean_dir():
"""
Get a temporary directory path and agrees to remove on close.
Yields
------
Temporary directory path
"""
directory_name = tempfile.mkdtemp(suffix="")
try:
yield directory_name
finally:
try:
rmtree(directory_name)
except OSError:
pass
@contextmanager
def ensure_safe_environment_variables():
"""
Get a context manager to safely set environment variables
All changes will be undone on close, hence environment variables set
within this contextmanager will neither persist nor change global state.
"""
saved_environ = dict(os.environ)
try:
yield
finally:
os.environ.clear()
os.environ.update(saved_environ)
@contextmanager
def with_csv_dialect(name, **kwargs):
"""
Context manager to temporarily register a CSV dialect for parsing CSV.
Parameters
----------
name : str
The name of the dialect.
kwargs : mapping
The parameters for the dialect.
Raises
------
ValueError : the name of the dialect conflicts with a builtin one.
See Also
--------
csv : Python's CSV library.
"""
import csv
_BUILTIN_DIALECTS = {"excel", "excel-tab", "unix"}
if name in _BUILTIN_DIALECTS:
raise ValueError("Cannot override builtin dialect.")
csv.register_dialect(name, **kwargs)
yield
csv.unregister_dialect(name)
@contextmanager
def use_numexpr(use, min_elements=None):
from pandas.core.computation import expressions as expr
if min_elements is None:
min_elements = expr._MIN_ELEMENTS
olduse = expr.USE_NUMEXPR
oldmin = expr._MIN_ELEMENTS
| set_option("compute.use_numexpr", use) | pandas.set_option |
from collections import defaultdict
import pandas as pd
import re
converters = {}
class AnnotationConverter:
SPEAKER_ID_TO_TYPE = defaultdict(
lambda: "NA",
{
"C1": "OCH",
"C2": "OCH",
"CHI": "CHI",
"CHI*": "CHI",
"FA0": "FEM",
"FA1": "FEM",
"FA2": "FEM",
"FA3": "FEM",
"FA4": "FEM",
"FA5": "FEM",
"FA6": "FEM",
"FA7": "FEM",
"FA8": "FEM",
"FC1": "OCH",
"FC2": "OCH",
"FC3": "OCH",
"MA0": "MAL",
"MA1": "MAL",
"MA2": "MAL",
"MA3": "MAL",
"MA4": "MAL",
"MA5": "MAL",
"MC1": "OCH",
"MC2": "OCH",
"MC3": "OCH",
"MC4": "OCH",
"MC5": "OCH",
"MI1": "OCH",
"MOT*": "FEM",
"OC0": "OCH",
"UC1": "OCH",
"UC2": "OCH",
"UC3": "OCH",
"UC4": "OCH",
"UC5": "OCH",
"UC6": "OCH",
"UA1": "NA",
"UA2": "NA",
"UA3": "NA",
"UA4": "NA",
"UA5": "NA",
"UA6": "NA",
"EE1": "NA",
"EE2": "NA",
"FAE": "NA",
"MAE": "NA",
"FCE": "NA",
"MCE": "NA",
},
)
THREAD_SAFE = True
def __init_subclass__(cls, **kwargs):
super().__init_subclass__(**kwargs)
converters[cls.FORMAT] = cls
class CsvConverter(AnnotationConverter):
FORMAT = "csv"
@staticmethod
def convert(filename: str, filter="") -> pd.DataFrame:
return | pd.read_csv(filename) | pandas.read_csv |
from tinydb import TinyDB, Query
from tinydb.storages import JSONStorage
from tinydb.middlewares import CachingMiddleware
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# https://pypi.org/project/tinydb/
dbECC = TinyDB('../../db/serverdbECC.json',
indent=4, separators=(',', ': '),
default_table="device_info",
# storage=CachingMiddleware(JSONStorage)
)
dbECCData = TinyDB('../../db/serverdbECC.json',
indent=4, separators=(',', ': '),
default_table="data",
# storage=CachingMiddleware(JSONStorage)
)
dbRSA = TinyDB('../../db/serverdbRSA.json',
indent=4, separators=(',', ': '),
default_table="device_pub_priv",
# storage=CachingMiddleware(JSONStorage)
)
dbRSATime = TinyDB('../../db/serverdbRSA.json',
indent=4, separators=(',', ': '),
default_table="timing",
# storage=CachingMiddleware(JSONStorage)
)
def visualize_ecc():
data_info = []
for row in dbECC:
data_info.append(row)
kdf = | pd.DataFrame(data_info) | pandas.DataFrame |
import anndata as ad
import pandas as pd
def load_metadata(adata, metadata_file, path='', separator=';', remove_index_str = None):
"""
Load observational metadata in adata.obs.
Input metadata file as csv/txt and the adata object to annotate.
first raw of the metadata file is considered as a header
first column contain the cell name
Paramters
---------
adata: initial AnnData object
metadata_file: csv file containing as a first column the cell names and in the
rest of the columns any king of metadata to load
path: pathe to the metadata file
separator: ';' or "\t", character used to split the columns
remove_index_str: a list of string to be removed in the index of AnnData object.
Default value is None. For example, if the index is ['/path/to/file1.txt','/path/to/file2.txt']
and remove_index_str = ['/path/to/','.txt'], then the index of AnnData object
will be changed to ['file1','file2']
Return
------
Annotated AnnData
"""
# dict_annot = {}
# with open(path+metadata_file) as f:
# head = f.readline().strip().split(separator)
# file = f.readlines()
# for key in head:
# dict_annot[key] = []
# data = [line.strip().split(separator) for line in file]
# for name in adata.obs_names:
# # this line is not always true. It depends on how the format of the data are
# name = name.split('.')[0]
# found = False
# for line in data:
# if name == line[0]:
# i = 0
# for key in head:
# dict_annot[key].append(line[i])
# i += 1
# found = True
# continue
# # if we could not find annotations
# if found == False:
# for key in head:
# dict_annot[key].append('NA')
# for key in head:
# adata.obs[key] = dict_annot[key]
metadata = pd.read_csv(path+metadata_file, sep = "\t", header = 0)
str_index = adata.obs.index
if remove_index_str:
for value in remove_index_str:
str_index = str_index.str.replace(value,'',regex=False)
df = | pd.DataFrame('NA', index=str_index, columns=metadata.columns) | pandas.DataFrame |
import numpy as np
import pandas as pd
def get_eval_df(sequencer):
ids = [ss.identifier for ss in sequencer.get_pairs()]
classes = ["mean"] + ["cls {}".format(i) for i in range(sequencer.n_classes)]
return | pd.DataFrame(columns=ids, index=classes) | pandas.DataFrame |
import os
import pickle
import librosa
import warnings
import numpy as np
import pandas as pd
warnings.filterwarnings('ignore')
from scipy.stats import skew, kurtosis
from pychorus import find_and_output_chorus
from flask import Flask, request, json, render_template
# Create flask app
app = Flask(__name__)
# Load pkl model
model = pickle.load(open('Your model name here', 'rb'))
@app.route('/')
def home():
return render_template('index.html')
@app.route('/predict', methods = ['POST'])
def predict():
song_link = list(request.form.values())[0]
# get features from songs
data = []
d, cols = extract_features(song_link)
data.append(d)
dataset = pd.DataFrame(data, columns=cols)
# select features which we used in ml model
df = | pd.read_csv('Data/bestfeatures.csv') | pandas.read_csv |
import builtins
from io import StringIO
import numpy as np
import pytest
from pandas.errors import UnsupportedFunctionCall
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series, Timestamp, date_range, isna
import pandas._testing as tm
import pandas.core.nanops as nanops
from pandas.util import _test_decorators as td
@pytest.fixture(
params=[np.int32, np.int64, np.float32, np.float64],
ids=["np.int32", "np.int64", "np.float32", "np.float64"],
)
def numpy_dtypes_for_minmax(request):
"""
Fixture of numpy dtypes with min and max values used for testing
cummin and cummax
"""
dtype = request.param
min_val = (
np.iinfo(dtype).min if np.dtype(dtype).kind == "i" else np.finfo(dtype).min
)
max_val = (
np.iinfo(dtype).max if np.dtype(dtype).kind == "i" else np.finfo(dtype).max
)
return (dtype, min_val, max_val)
@pytest.mark.parametrize("agg_func", ["any", "all"])
@pytest.mark.parametrize("skipna", [True, False])
@pytest.mark.parametrize(
"vals",
[
["foo", "bar", "baz"],
["foo", "", ""],
["", "", ""],
[1, 2, 3],
[1, 0, 0],
[0, 0, 0],
[1.0, 2.0, 3.0],
[1.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[True, True, True],
[True, False, False],
[False, False, False],
[np.nan, np.nan, np.nan],
],
)
def test_groupby_bool_aggs(agg_func, skipna, vals):
df = DataFrame({"key": ["a"] * 3 + ["b"] * 3, "val": vals * 2})
# Figure out expectation using Python builtin
exp = getattr(builtins, agg_func)(vals)
# edge case for missing data with skipna and 'any'
if skipna and all(isna(vals)) and agg_func == "any":
exp = False
exp_df = DataFrame([exp] * 2, columns=["val"], index=Index(["a", "b"], name="key"))
result = getattr(df.groupby("key"), agg_func)(skipna=skipna)
tm.assert_frame_equal(result, exp_df)
def test_max_min_non_numeric():
# #2700
aa = DataFrame({"nn": [11, 11, 22, 22], "ii": [1, 2, 3, 4], "ss": 4 * ["mama"]})
result = aa.groupby("nn").max()
assert "ss" in result
result = aa.groupby("nn").max(numeric_only=False)
assert "ss" in result
result = aa.groupby("nn").min()
assert "ss" in result
result = aa.groupby("nn").min(numeric_only=False)
assert "ss" in result
def test_min_date_with_nans():
# GH26321
dates = pd.to_datetime(
pd.Series(["2019-05-09", "2019-05-09", "2019-05-09"]), format="%Y-%m-%d"
).dt.date
df = pd.DataFrame({"a": [np.nan, "1", np.nan], "b": [0, 1, 1], "c": dates})
result = df.groupby("b", as_index=False)["c"].min()["c"]
expected = pd.to_datetime(
pd.Series(["2019-05-09", "2019-05-09"], name="c"), format="%Y-%m-%d"
).dt.date
tm.assert_series_equal(result, expected)
result = df.groupby("b")["c"].min()
expected.index.name = "b"
tm.assert_series_equal(result, expected)
def test_intercept_builtin_sum():
s = Series([1.0, 2.0, np.nan, 3.0])
grouped = s.groupby([0, 1, 2, 2])
result = grouped.agg(builtins.sum)
result2 = grouped.apply(builtins.sum)
expected = grouped.sum()
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result2, expected)
# @pytest.mark.parametrize("f", [max, min, sum])
# def test_builtins_apply(f):
@pytest.mark.parametrize("f", [max, min, sum])
@pytest.mark.parametrize("keys", ["jim", ["jim", "joe"]]) # Single key # Multi-key
def test_builtins_apply(keys, f):
# see gh-8155
df = pd.DataFrame(np.random.randint(1, 50, (1000, 2)), columns=["jim", "joe"])
df["jolie"] = np.random.randn(1000)
fname = f.__name__
result = df.groupby(keys).apply(f)
ngroups = len(df.drop_duplicates(subset=keys))
assert_msg = f"invalid frame shape: {result.shape} (expected ({ngroups}, 3))"
assert result.shape == (ngroups, 3), assert_msg
tm.assert_frame_equal(
result, # numpy's equivalent function
df.groupby(keys).apply(getattr(np, fname)),
)
if f != sum:
expected = df.groupby(keys).agg(fname).reset_index()
expected.set_index(keys, inplace=True, drop=False)
tm.assert_frame_equal(result, expected, check_dtype=False)
tm.assert_series_equal(getattr(result, fname)(), getattr(df, fname)())
def test_arg_passthru():
# make sure that we are passing thru kwargs
# to our agg functions
# GH3668
# GH5724
df = pd.DataFrame(
{
"group": [1, 1, 2],
"int": [1, 2, 3],
"float": [4.0, 5.0, 6.0],
"string": list("abc"),
"category_string": pd.Series(list("abc")).astype("category"),
"category_int": [7, 8, 9],
"datetime": pd.date_range("20130101", periods=3),
"datetimetz": pd.date_range("20130101", periods=3, tz="US/Eastern"),
"timedelta": pd.timedelta_range("1 s", periods=3, freq="s"),
},
columns=[
"group",
"int",
"float",
"string",
"category_string",
"category_int",
"datetime",
"datetimetz",
"timedelta",
],
)
expected_columns_numeric = Index(["int", "float", "category_int"])
# mean / median
expected = pd.DataFrame(
{
"category_int": [7.5, 9],
"float": [4.5, 6.0],
"timedelta": [pd.Timedelta("1.5s"), pd.Timedelta("3s")],
"int": [1.5, 3],
"datetime": [
pd.Timestamp("2013-01-01 12:00:00"),
pd.Timestamp("2013-01-03 00:00:00"),
],
"datetimetz": [
pd.Timestamp("2013-01-01 12:00:00", tz="US/Eastern"),
pd.Timestamp("2013-01-03 00:00:00", tz="US/Eastern"),
],
},
index=Index([1, 2], name="group"),
columns=["int", "float", "category_int", "datetime", "datetimetz", "timedelta"],
)
for attr in ["mean", "median"]:
result = getattr(df.groupby("group"), attr)()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = getattr(df.groupby("group"), attr)(numeric_only=False)
tm.assert_frame_equal(result.reindex_like(expected), expected)
# TODO: min, max *should* handle
# categorical (ordered) dtype
expected_columns = Index(
[
"int",
"float",
"string",
"category_int",
"datetime",
"datetimetz",
"timedelta",
]
)
for attr in ["min", "max"]:
result = getattr(df.groupby("group"), attr)()
tm.assert_index_equal(result.columns, expected_columns)
result = getattr(df.groupby("group"), attr)(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(
[
"int",
"float",
"string",
"category_string",
"category_int",
"datetime",
"datetimetz",
"timedelta",
]
)
for attr in ["first", "last"]:
result = getattr(df.groupby("group"), attr)()
tm.assert_index_equal(result.columns, expected_columns)
result = getattr(df.groupby("group"), attr)(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(["int", "float", "string", "category_int", "timedelta"])
result = df.groupby("group").sum()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = df.groupby("group").sum(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(["int", "float", "category_int"])
for attr in ["prod", "cumprod"]:
result = getattr(df.groupby("group"), attr)()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = getattr(df.groupby("group"), attr)(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
# like min, max, but don't include strings
expected_columns = Index(
["int", "float", "category_int", "datetime", "datetimetz", "timedelta"]
)
for attr in ["cummin", "cummax"]:
result = getattr(df.groupby("group"), attr)()
# GH 15561: numeric_only=False set by default like min/max
tm.assert_index_equal(result.columns, expected_columns)
result = getattr(df.groupby("group"), attr)(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(["int", "float", "category_int", "timedelta"])
result = getattr(df.groupby("group"), "cumsum")()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = getattr(df.groupby("group"), "cumsum")(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
def test_non_cython_api():
# GH5610
# non-cython calls should not include the grouper
df = DataFrame(
[[1, 2, "foo"], [1, np.nan, "bar"], [3, np.nan, "baz"]], columns=["A", "B", "C"]
)
g = df.groupby("A")
gni = df.groupby("A", as_index=False)
# mad
expected = DataFrame([[0], [np.nan]], columns=["B"], index=[1, 3])
expected.index.name = "A"
result = g.mad()
tm.assert_frame_equal(result, expected)
expected = DataFrame([[1, 0.0], [3, np.nan]], columns=["A", "B"], index=[0, 1])
result = gni.mad()
tm.assert_frame_equal(result, expected)
# describe
expected_index = pd.Index([1, 3], name="A")
expected_col = pd.MultiIndex(
levels=[["B"], ["count", "mean", "std", "min", "25%", "50%", "75%", "max"]],
codes=[[0] * 8, list(range(8))],
)
expected = pd.DataFrame(
[
[1.0, 2.0, np.nan, 2.0, 2.0, 2.0, 2.0, 2.0],
[0.0, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
],
index=expected_index,
columns=expected_col,
)
result = g.describe()
tm.assert_frame_equal(result, expected)
expected = pd.concat(
[
df[df.A == 1].describe().unstack().to_frame().T,
df[df.A == 3].describe().unstack().to_frame().T,
]
)
expected.index = pd.Index([0, 1])
result = gni.describe()
tm.assert_frame_equal(result, expected)
# any
expected = DataFrame(
[[True, True], [False, True]], columns=["B", "C"], index=[1, 3]
)
expected.index.name = "A"
result = g.any()
tm.assert_frame_equal(result, expected)
# idxmax
expected = DataFrame([[0.0], [np.nan]], columns=["B"], index=[1, 3])
expected.index.name = "A"
result = g.idxmax()
tm.assert_frame_equal(result, expected)
def test_cython_api2():
# this takes the fast apply path
# cumsum (GH5614)
df = DataFrame([[1, 2, np.nan], [1, np.nan, 9], [3, 4, 9]], columns=["A", "B", "C"])
expected = DataFrame([[2, np.nan], [np.nan, 9], [4, 9]], columns=["B", "C"])
result = df.groupby("A").cumsum()
tm.assert_frame_equal(result, expected)
# GH 5755 - cumsum is a transformer and should ignore as_index
result = df.groupby("A", as_index=False).cumsum()
tm.assert_frame_equal(result, expected)
# GH 13994
result = df.groupby("A").cumsum(axis=1)
expected = df.cumsum(axis=1)
tm.assert_frame_equal(result, expected)
result = df.groupby("A").cumprod(axis=1)
expected = df.cumprod(axis=1)
tm.assert_frame_equal(result, expected)
def test_cython_median():
df = DataFrame(np.random.randn(1000))
df.values[::2] = np.nan
labels = np.random.randint(0, 50, size=1000).astype(float)
labels[::17] = np.nan
result = df.groupby(labels).median()
exp = df.groupby(labels).agg(nanops.nanmedian)
tm.assert_frame_equal(result, exp)
df = DataFrame(np.random.randn(1000, 5))
rs = df.groupby(labels).agg(np.median)
xp = df.groupby(labels).median()
tm.assert_frame_equal(rs, xp)
def test_median_empty_bins(observed):
df = pd.DataFrame(np.random.randint(0, 44, 500))
grps = range(0, 55, 5)
bins = pd.cut(df[0], grps)
result = df.groupby(bins, observed=observed).median()
expected = df.groupby(bins, observed=observed).agg(lambda x: x.median())
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"dtype", ["int8", "int16", "int32", "int64", "float32", "float64", "uint64"]
)
@pytest.mark.parametrize(
"method,data",
[
("first", {"df": [{"a": 1, "b": 1}, {"a": 2, "b": 3}]}),
("last", {"df": [{"a": 1, "b": 2}, {"a": 2, "b": 4}]}),
("min", {"df": [{"a": 1, "b": 1}, {"a": 2, "b": 3}]}),
("max", {"df": [{"a": 1, "b": 2}, {"a": 2, "b": 4}]}),
("nth", {"df": [{"a": 1, "b": 2}, {"a": 2, "b": 4}], "args": [1]}),
("count", {"df": [{"a": 1, "b": 2}, {"a": 2, "b": 2}], "out_type": "int64"}),
],
)
def test_groupby_non_arithmetic_agg_types(dtype, method, data):
# GH9311, GH6620
df = pd.DataFrame(
[{"a": 1, "b": 1}, {"a": 1, "b": 2}, {"a": 2, "b": 3}, {"a": 2, "b": 4}]
)
df["b"] = df.b.astype(dtype)
if "args" not in data:
data["args"] = []
if "out_type" in data:
out_type = data["out_type"]
else:
out_type = dtype
exp = data["df"]
df_out = pd.DataFrame(exp)
df_out["b"] = df_out.b.astype(out_type)
df_out.set_index("a", inplace=True)
grpd = df.groupby("a")
t = getattr(grpd, method)(*data["args"])
tm.assert_frame_equal(t, df_out)
@pytest.mark.parametrize(
"i",
[
(
Timestamp("2011-01-15 12:50:28.502376"),
Timestamp("2011-01-20 12:50:28.593448"),
),
(24650000000000001, 24650000000000002),
],
)
def test_groupby_non_arithmetic_agg_int_like_precision(i):
# see gh-6620, gh-9311
df = pd.DataFrame([{"a": 1, "b": i[0]}, {"a": 1, "b": i[1]}])
grp_exp = {
"first": {"expected": i[0]},
"last": {"expected": i[1]},
"min": {"expected": i[0]},
"max": {"expected": i[1]},
"nth": {"expected": i[1], "args": [1]},
"count": {"expected": 2},
}
for method, data in grp_exp.items():
if "args" not in data:
data["args"] = []
grouped = df.groupby("a")
res = getattr(grouped, method)(*data["args"])
assert res.iloc[0].b == data["expected"]
@pytest.mark.parametrize(
"func, values",
[
("idxmin", {"c_int": [0, 2], "c_float": [1, 3], "c_date": [1, 2]}),
("idxmax", {"c_int": [1, 3], "c_float": [0, 2], "c_date": [0, 3]}),
],
)
def test_idxmin_idxmax_returns_int_types(func, values):
# GH 25444
df = pd.DataFrame(
{
"name": ["A", "A", "B", "B"],
"c_int": [1, 2, 3, 4],
"c_float": [4.02, 3.03, 2.04, 1.05],
"c_date": ["2019", "2018", "2016", "2017"],
}
)
df["c_date"] = pd.to_datetime(df["c_date"])
result = getattr(df.groupby("name"), func)()
expected = pd.DataFrame(values, index=Index(["A", "B"], name="name"))
tm.assert_frame_equal(result, expected)
def test_fill_consistency():
# GH9221
# pass thru keyword arguments to the generated wrapper
# are set if the passed kw is None (only)
df = DataFrame(
index=pd.MultiIndex.from_product(
[["value1", "value2"], date_range("2014-01-01", "2014-01-06")]
),
columns=Index(["1", "2"], name="id"),
)
df["1"] = [
np.nan,
1,
np.nan,
np.nan,
11,
np.nan,
np.nan,
2,
np.nan,
np.nan,
22,
np.nan,
]
df["2"] = [
np.nan,
3,
np.nan,
np.nan,
33,
np.nan,
np.nan,
4,
np.nan,
np.nan,
44,
np.nan,
]
expected = df.groupby(level=0, axis=0).fillna(method="ffill")
result = df.T.groupby(level=0, axis=1).fillna(method="ffill").T
tm.assert_frame_equal(result, expected)
def test_groupby_cumprod():
# GH 4095
df = pd.DataFrame({"key": ["b"] * 10, "value": 2})
actual = df.groupby("key")["value"].cumprod()
expected = df.groupby("key")["value"].apply(lambda x: x.cumprod())
expected.name = "value"
tm.assert_series_equal(actual, expected)
df = pd.DataFrame({"key": ["b"] * 100, "value": 2})
actual = df.groupby("key")["value"].cumprod()
# if overflows, groupby product casts to float
# while numpy passes back invalid values
df["value"] = df["value"].astype(float)
expected = df.groupby("key")["value"].apply(lambda x: x.cumprod())
expected.name = "value"
tm.assert_series_equal(actual, expected)
def scipy_sem(*args, **kwargs):
from scipy.stats import sem
return sem(*args, ddof=1, **kwargs)
@pytest.mark.parametrize(
"op,targop",
[
("mean", np.mean),
("median", np.median),
("std", np.std),
("var", np.var),
("sum", np.sum),
("prod", np.prod),
("min", np.min),
("max", np.max),
("first", lambda x: x.iloc[0]),
("last", lambda x: x.iloc[-1]),
("count", np.size),
pytest.param("sem", scipy_sem, marks=td.skip_if_no_scipy),
],
)
def test_ops_general(op, targop):
df = DataFrame(np.random.randn(1000))
labels = np.random.randint(0, 50, size=1000).astype(float)
result = getattr(df.groupby(labels), op)().astype(float)
expected = df.groupby(labels).agg(targop)
tm.assert_frame_equal(result, expected)
def test_max_nan_bug():
raw = """,Date,app,File
-04-23,2013-04-23 00:00:00,,log080001.log
-05-06,2013-05-06 00:00:00,,log.log
-05-07,2013-05-07 00:00:00,OE,xlsx"""
df = pd.read_csv(StringIO(raw), parse_dates=[0])
gb = df.groupby("Date")
r = gb[["File"]].max()
e = gb["File"].max().to_frame()
tm.assert_frame_equal(r, e)
assert not r["File"].isna().any()
def test_nlargest():
a = Series([1, 3, 5, 7, 2, 9, 0, 4, 6, 10])
b = Series(list("a" * 5 + "b" * 5))
gb = a.groupby(b)
r = gb.nlargest(3)
e = Series(
[7, 5, 3, 10, 9, 6],
index=MultiIndex.from_arrays([list("aaabbb"), [3, 2, 1, 9, 5, 8]]),
)
tm.assert_series_equal(r, e)
a = Series([1, 1, 3, 2, 0, 3, 3, 2, 1, 0])
gb = a.groupby(b)
e = Series(
[3, 2, 1, 3, 3, 2],
index=MultiIndex.from_arrays([list("aaabbb"), [2, 3, 1, 6, 5, 7]]),
)
tm.assert_series_equal(gb.nlargest(3, keep="last"), e)
def test_nlargest_mi_grouper():
# see gh-21411
npr = np.random.RandomState(123456789)
dts = date_range("20180101", periods=10)
iterables = [dts, ["one", "two"]]
idx = MultiIndex.from_product(iterables, names=["first", "second"])
s = Series(npr.randn(20), index=idx)
result = s.groupby("first").nlargest(1)
exp_idx = MultiIndex.from_tuples(
[
(dts[0], dts[0], "one"),
(dts[1], dts[1], "one"),
(dts[2], dts[2], "one"),
(dts[3], dts[3], "two"),
(dts[4], dts[4], "one"),
(dts[5], dts[5], "one"),
(dts[6], dts[6], "one"),
(dts[7], dts[7], "one"),
(dts[8], dts[8], "two"),
(dts[9], dts[9], "one"),
],
names=["first", "first", "second"],
)
exp_values = [
2.2129019979039612,
1.8417114045748335,
0.858963679564603,
1.3759151378258088,
0.9430284594687134,
0.5296914208183142,
0.8318045593815487,
-0.8476703342910327,
0.3804446884133735,
-0.8028845810770998,
]
expected = Series(exp_values, index=exp_idx)
tm.assert_series_equal(result, expected, check_exact=False, rtol=1e-3)
def test_nsmallest():
a = Series([1, 3, 5, 7, 2, 9, 0, 4, 6, 10])
b = Series(list("a" * 5 + "b" * 5))
gb = a.groupby(b)
r = gb.nsmallest(3)
e = Series(
[1, 2, 3, 0, 4, 6],
index=MultiIndex.from_arrays([list("aaabbb"), [0, 4, 1, 6, 7, 8]]),
)
tm.assert_series_equal(r, e)
a = Series([1, 1, 3, 2, 0, 3, 3, 2, 1, 0])
gb = a.groupby(b)
e = Series(
[0, 1, 1, 0, 1, 2],
index=MultiIndex.from_arrays([list("aaabbb"), [4, 1, 0, 9, 8, 7]]),
)
tm.assert_series_equal(gb.nsmallest(3, keep="last"), e)
@pytest.mark.parametrize("func", ["cumprod", "cumsum"])
def test_numpy_compat(func):
# see gh-12811
df = | pd.DataFrame({"A": [1, 2, 1], "B": [1, 2, 3]}) | pandas.DataFrame |
import unittest
import pandas as pd
from chemcharts.core.container.chemdata import ChemData
from chemcharts.core.container.fingerprint import *
from chemcharts.core.functions.binning import Binning
class TestBinning(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
smiles = Smiles(["COc1ccc(-c2c(-c3ccc(S(N)(=O)=O)cc3)[nH]c3ccccc23)cc1",
"COc1ccc(-c2c(-c3ccc(S(N)(=O)=O)cc3)oc3ccccc23)cc1F",
"Cc1cc(C)c(S(=O)(=O)N2CCN(C(C)c3nc(C(C)(C)C)no3)CC2)c(C)c1",
"C1ccc2c(c1)-c1ccc3ccccc3c1C2Cc1nn[nH]n1",
"Cc1ccccc1-c1c(C(=O)N=c2cccc[nH]2)cnc2ccccc12",
"N=c1[nH]c(=O)c2ncn(Cc3cccc4ccccc34)c2[nH]1",
"O=C1c2cccc3c(F)ccc(c23)CN1c1cccnc1"])
values = | pd.DataFrame([1, 3, 4, 5, 2, 1, 6], columns=["test_value"]) | pandas.DataFrame |
"""
Module full of various helpers for creating matplotlib animations.
"""
import numpy as np
import pandas as pd
import pytweening
from sklearn.preprocessing import MinMaxScaler
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
# In order to get the final points, there's 2 high-level steps:
#
# 1. Generate points between x1 and x2 that are spaced evenly
# 2. Use pytweening to transform the generated points such that they match the
# supplied timing function.
#
# To make step 2 happen, we have to go about it in complicated way (there might
# be a better, more clever way to do this).
#
# The ``pytweening`` module is nice, but only works on values between 0 and 1.
# We'll transform the generated evenly spaced points such that they have a min
# of 0 and max of 1, then apply the pytweening function, then transform the new
# points back to their original scale.
# We'll use sklearn's MinMaxScaler for this process.
def tween(x1, x2, fn, n_points=50):
"""
Generate intermediate points between x1 and x2 based on a specified tweening
function.
x1 and x2 may be either single scaler values or 1-d numpy arrays of the same
shape.
Parameters
----------
- x1: a scaler value or a 1-d array (numpy array or pandas series)
- x2: a scaler value or a 1-d array (numpy array or pandas series)
- fn: a timing function from pytweening
- n_points: the number of points to generate, including the starting and
stopping points.
Returns
-------
If x1 and x2 are scalers, a 1-d array with n_points elements where the first
element is x1 and the last is x2.
If x1 and x2 are 1-d arrays, a matrix of shape (n_points, x1.size). Each row
in the matrix is the data points at one step. The first row is x1 and the
last row is x2.
Examples
--------
>>> import pytweening
>>> tween(1, 10, pytweening.linear, 10)
array([ 1., 2., 3., 4., 5., 6., 7., 8., 9., 10.])
>>> tween(1, 10, pytweening.easeInQuad, 10)
array([ 1. , 1.11111111, 1.44444444, 2. , 2.77777778,
3.77777778, 5. , 6.44444444, 8.11111111, 10. ])
>>> tween(1, 10, pytweening.easeOutQuad, 10)
array([ 1. , 2.88888889, 4.55555556, 6. , 7.22222222,
8.22222222, 9. , 9.55555556, 9.88888889, 10. ])
>>> x1 = np.array([1, 1, 2])
>>> x2 = np.array([2, 5, 4])
>>> tween(x1, x2, pytweening.linear, 5)
array([[1. , 1. , 2. ],
[1.25, 2. , 2.5 ],
[1.5 , 3. , 3. ],
[1.75, 4. , 3.5 ],
[2. , 5. , 4. ]])
"""
# handle the case where we have scaler values first
if np.isscalar(x1) and np.isscalar(x2):
if x1 == x2:
return np.repeat(x1, n_points)
xx = np.linspace(x1, x2, n_points).reshape(-1, 1)
scaler = MinMaxScaler().fit(xx)
linspace = (
np.linspace(0, 1, n_points) if x1 < x2 else np.linspace(1, 0, n_points)
)
return scaler.inverse_transform(
np.array([fn(x) for x in linspace]).reshape(-1, 1)
).ravel()
# sanity check arguments
if len(x1.shape) != 1 or len(x2.shape) != 1 or x1.shape[0] != x2.shape[0]:
raise ValueError(
"x1 and x2 must be either scaler values or 1-d numpy arrays of the same shape"
)
xx_linear = np.linspace(x1, x2, n_points)
scaler = MinMaxScaler().fit(xx_linear)
xx_minmax = scaler.transform(xx_linear)
# because rounding, sometimes we end up w/ numbers like 1.0000000002
xx_minmax = np.where(xx_minmax > 1, 1, xx_minmax)
xx_minmax_t = | pd.DataFrame(xx_minmax) | pandas.DataFrame |
import argparse
import os
import torch
import numpy as np
import pandas as pd
import torch.nn as nn
import torch.optim as optim
from tqdm import tqdm as tqdm_notebook
from datasets import DataManager
from utils import *
from models import get_model
seed_everything(43)
ap = argparse.ArgumentParser(description='pretraining')
ap.add_argument('dataset', choices=['c10', 'c100', 'tin','svhn'], type=str, help='Dataset choice')
ap.add_argument('model', type=str, help='Model choice')
ap.add_argument('--test_only', '-t', type=bool, default=False, help='test the best model')
ap.add_argument('--valid_size', '-v', type=float, default=0.1, help='valid_size')
ap.add_argument('--batch_size', default=128, type=int, help='Batch Size')
ap.add_argument('--lr', default=0.05, type=float, help='Learning rate')
ap.add_argument('--scheduler_type', '-st', type=int, choices=[1, 2], default=1, help='lr scheduler type')
ap.add_argument('--decay', '-d', type=float, default=0.001, help='weight decay')
ap.add_argument('--epochs', default=200, type=int, help='Epochs')
ap.add_argument('--workers', default=0, type=int, help='number of workers')
ap.add_argument('--cuda_id', '-id', type=str, default='0', help='gpu number')
args = ap.parse_args()
############################### preparing dataset ################################
data_object = DataManager(args)
trainloader, valloader, testloader = data_object.prepare_data()
dataloaders = {
'train': trainloader, 'val': valloader, "test": testloader
}
############################### preparing model ###################################
model = get_model(args.model, 'full', data_object.num_classes, data_object.insize)
############################## preparing for training #############################
if os.path.exists('logs') == False:
os.mkdir("logs")
if os.path.exists('checkpoints') == False:
os.mkdir("checkpoints")
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=0.9, weight_decay=args.decay)
device = torch.device(f"cuda:{str(args.cuda_id)}")
model.to(device)
def train(model, loss_fn, optimizer, scheduler=None):
model.train()
counter = 0
tk1 = tqdm_notebook(dataloaders['train'], total=len(dataloaders['train']))
running_loss = 0
for x_var, y_var in tk1:
counter +=1
x_var = x_var.to(device=device)
y_var = y_var.to(device=device)
scores = model(x_var)
loss = loss_fn(scores, y_var)
running_loss+=loss.item()
tk1.set_postfix(loss=running_loss/counter)
optimizer.zero_grad()
loss.backward()
optimizer.step()
return running_loss/counter
def test(model, loss_fn, optimizer, phase, scheduler=None):
model.eval()
counter = 0
tk1 = tqdm_notebook(dataloaders[phase], total=len(dataloaders[phase]))
running_loss = 0
running_acc = 0
total = 0
with torch.no_grad():
for x_var, y_var in tk1:
counter +=1
x_var = x_var.to(device=device)
y_var = y_var.to(device=device)
scores = model(x_var)
loss = loss_fn(scores, y_var)
_, scores = torch.max(scores.data, 1)
y_var = y_var.cpu().detach().numpy()
scores = scores.cpu().detach().numpy()
correct = (scores == y_var).sum().item()
running_loss+=loss.item()
running_acc+=correct
total+=scores.shape[0]
tk1.set_postfix(loss=running_loss/counter, acc=running_acc/total)
return running_acc/total, running_loss/counter
###################################### training starts here ############################
best_acc = 0
num_epochs = args.epochs
train_losses = []
valid_losses = []
valid_accuracy = []
if args.test_only == False:
for epoch in range(num_epochs):
adjust_learning_rate(optimizer, epoch, args)
print('Starting epoch %d / %d' % (epoch + 1, num_epochs))
t_loss = train(model, criterion, optimizer)
acc, v_loss = test(model, criterion, optimizer, "val")
if acc>best_acc:
print("**Saving model**")
best_acc=acc
torch.save({
"epoch": epoch + 1,
"state_dict" : model.state_dict(),
"acc" : best_acc,
}, f"checkpoints/{args.model}_{args.dataset}_pretrained.pth")
train_losses.append(t_loss)
valid_losses.append(v_loss)
valid_accuracy.append(acc)
df_data=np.array([train_losses, valid_losses, valid_accuracy]).T
df = | pd.DataFrame(df_data, columns = ['train_losses','valid_losses','valid_accuracy']) | pandas.DataFrame |
import pandas as pd
import functools as ft
import numpy as np
import sys
# driver script for applying the refactoring transformation
class Stmt:
def __init__(self, start_line, start_char, end_line, end_char):
self.start_line = start_line
self.start_char = start_char
self.end_line = end_line
self.end_char = end_char
def __str__(self):
return("[" + str(self.start_line) + ", " + str(self.start_char) + "; " + str(self.end_line) + ", " + str(self.end_char) + "]")
def __eq__(self, other):
return( self.start_line == other.start_line and self.end_line == other.end_line and self.start_char == other.start_char and self.end_char == other.end_char)
def __lt__(self, other):
return( self.start_line < other.start_line or (self.start_line == other.start_line and self.start_char < other.start_char))
def __le__(self, other):
return( self < other or self == other)
def __hash__(self):
return id(self)
def subsumes(self, other): # returns true if self subsumes the other_stmt
left_sub = self.start_line < other.start_line or (self.start_line == other.start_line and self.start_char <= other.start_char)
right_sub = self.end_line > other.end_line or (self.end_line == other.end_line and self.end_char >= other.end_char)
return( left_sub and right_sub and not self == other)
class ParseTreeNode:
def __init__(self, child_list, stmt):
self.stmt = stmt
self.child_list = child_list
self.is_leaf = False
self.text = [[]]
self.parent = None
if len(child_list) == 0:
self.is_leaf = True
for c in child_list:
c.parent = self
def __set_self_text(self, file_contents):
if self.is_leaf:
self.text = [get_stmt( self.stmt, file_contents)] # return array of strings representing the lines of the statement
return
# if we're here it means there is at least one child
current_child = self.child_list[0]
# get the text from the beginning of the stmt until the beginning of the first child node
subs = 0 if current_child.stmt.start_line == self.stmt.start_line else 1
self.text = [get_stmt( Stmt(self.stmt.start_line, self.stmt.start_char, current_child.stmt.start_line, current_child.stmt.start_char - subs), file_contents)]
for ind in range(1, len(self.child_list)):
next_child = self.child_list[ind]
# print(Stmt(current_child.stmt.end_line, current_child.stmt.end_char + 1, next_child.stmt.start_line, next_child.stmt.start_char))
self.text += [get_stmt( Stmt(current_child.stmt.end_line, current_child.stmt.end_char + 1, next_child.stmt.start_line, next_child.stmt.start_char - 1), file_contents)]
current_child = next_child
adds = 0 if len(self.child_list) == 1 and self.stmt.start_line == self.stmt.end_line and self.stmt.end_char > self.child_list[0].stmt.end_char else 1
self.text += [get_stmt( Stmt(current_child.stmt.end_line, current_child.stmt.end_char + adds, self.stmt.end_line, self.stmt.end_char), file_contents)]
def set_text(self, file_contents):
self.__set_self_text(file_contents)
for c in self.child_list:
c.set_text(file_contents)
def get_text(self):
to_ret = self.text[0].copy()
for ind in range(1, len(self.text)):
to_ret += self.child_list[ind - 1].get_text()
to_ret += self.text[ind].copy()
return( to_ret)
def print(self):
print_array_newline_sep( self.get_text())
def __hash__(self):
return id(self)
def count_tree(rnode):
count = 1
for c in rnode.child_list:
count += count_tree(c)
return count
# text is an array of arrays, just the first element
# is an array of strings
# we want to get the index of the element with "await" in it
# there should just be one (but if there's more than one we can just do the first one)
def get_index_with_await( text, swapping_last = False):
indices = [(k, i) for k in range(len(text)) for i in range( 0, len(text[k])) if text[k][i].count("await ") > 0]
if len(indices) > 1 or len(indices) == 0:
print("WHAT IS GOING ON: " + str(text))
return( (-1, -1))
if swapping_last:
return( indices[-1]) # if we're reordering forward, get the last await
return( indices[0]) # otherwise, get the first await
def corresp_paren(p):
return {
'(' : ')',
')' : '(',
'[' : ']',
']' : '[',
'{' : '}',
'}' : '{'
}.get(p, p) # just return p itself as a default if it's not in the dict
def build_paren_stack( a_string):
paren_stack = ""
for c in a_string:
if c == "(" or c == "{" or c == "[":
paren_stack += c
elif c == ")" or c == "}" or c == "]":
if len(paren_stack) > 0 and paren_stack[-1] == corresp_paren(c):
paren_stack = paren_stack[ : -1]
else:
paren_stack += c
return( paren_stack)
# we need to match the parens before and after the awaits, in case
# note: no need to look through the text of the child_list, since these are self-contained
# statements and so won't include closing/opening parens in part of the enclosing stmt, which
# is the statement we're parsing
def get_compensating_parens( text, text_ind, ind_to_split_at):
if ind_to_split_at == -1: # then there was an error
return( -1, -1)
start_text = text[text_ind][0 : ind_to_split_at]
end_text = text[text_ind][ ind_to_split_at + 1 : ]
# get the text we're going to split:
split_text = text[text_ind][ind_to_split_at]
front_paren_stack = build_paren_stack( ''.join(start_text) + split_text[0: split_text.index('await')])
end_paren_stack = build_paren_stack( split_text[ split_text.index('await') + len('await') : ] + ''.join(end_text))
if build_paren_stack(front_paren_stack + end_paren_stack) != "":
#raise ValueError("Mismatched parens in: " + text[text_ind][ind_to_split_at])
return( -1, -1)
return( front_paren_stack, end_paren_stack)
# this is like move_stmt, but instead of just shifting the statement, we're actually going
# to split it into an initial promise creation:
# var temp = < portion of the statement after the await >
# this goes where the statement would be moved to
# < portion of the statement before the await > = await temp
# this should just involve changing the text() of the moved node and the placeholder node
# no scoping issues should ensue, since we're moving the whole statement
def move_and_split_await( root_node, root_map, stmt_to_move, stmt_to_move_before, temp_var_name, add_timing = False, filename = ""):
node_to_move = root_map[stmt_to_move]
node_to_move_before = root_map[stmt_to_move_before]
# updates required:
# remove node_to_move from its parent's child list
# then, add it before stmt_to_move_before
# NEW ITEM: update the text (using temp_var_name as specified)
# when we remove node_to_move from the child list:
# -- just replace it with a new, blank node but with the old stmt as the stmt
# -- then, it's not blank any more: the text is the corresponding "everything before the await" = await temp_var_name
# -- and, when we move the node, we're actually replacing the text to be var temp_var_name = "everything after the await"
# -- also, split the child node list
# -- and, replace it in the node_map
old_pos_node = ParseTreeNode( [], stmt_to_move)
# now, compute the index to split at
(text_ind, ind_to_split_at) = get_index_with_await(node_to_move.text)
#pre-compute paren stacks so we can catch all errors at once
(front_paren_stack, end_paren_stack) = get_compensating_parens( node_to_move.text, text_ind, ind_to_split_at)
if ind_to_split_at == -1 or front_paren_stack == -1: # then this is a problem, and we just won't do the reordering
print("There's an issue and we can't automatically swap the following statement: ")
node_to_move.print()
print("With the following statement: ")
node_to_move_before.print()
print("DONE REPORTING PROBLEMS")
# return( root_node, root_map, 0)
return(0)
# split both the text array and the child_list
start_text = node_to_move.text[text_ind][0 : ind_to_split_at]
end_text = node_to_move.text[text_ind][ ind_to_split_at + 1 : ]
start_child_list = node_to_move.child_list[0 : ind_to_split_at]
end_child_list = node_to_move.child_list[ ind_to_split_at :]
# get the text we're going to split:
split_text = node_to_move.text[text_ind][ind_to_split_at]
string_before_await = split_text[0: split_text.index('await')]
string_after_await = split_text[ split_text.index('await') + len('await') : ]
if build_paren_stack(string_after_await) != "":
return( 0)
# now, add the new updates to the strings
# don't forget the parens
string_before_await = string_before_await + " await " + temp_var_name + end_paren_stack
string_after_await = "var " + temp_var_name + " = " + front_paren_stack + string_after_await
# and, set up the nodes
# starting off the same as before
if node_to_move.parent != None:
child_list_to_rem_from = node_to_move.parent.child_list
child_list_to_rem_from[child_list_to_rem_from.index(node_to_move)] = old_pos_node
root_map[stmt_to_move] = old_pos_node
if node_to_move_before.parent != None:
child_list_to_add_to = node_to_move_before.parent.child_list
child_list_to_add_to.insert( child_list_to_add_to.index(node_to_move_before), node_to_move)
node_to_move_before.parent.text.insert( child_list_to_add_to.index(node_to_move_before), [""])
node_to_move.parent = node_to_move_before.parent
# now, update the text and child_lists in node_to_move and old_pos_node:
# node_to_move gets the string after await (i.e. the promise creation we want to move earlier)
# and, gets the end_child_list (for the same reason)
bad_paren_start = ""
bad_parens = False
if add_timing:
timing_pre_text = "var TIMING_" + temp_var_name + " = perf_hooks.performance.now();\n "
timing_post_text = "console.log(\"" + filename + "& " + str(old_pos_node.stmt) + "& " + temp_var_name + "& \" + (perf_hooks.performance.now() - TIMING_" + temp_var_name + "));\n "
new_await_timing_var = "await " + temp_var_name + end_paren_stack + ";\n "
string_before_await = split_text[0: split_text.index('await')]
# can't add timing if the await is in the middle of a statement, for example like multiassignment in kactus's utils.ts
# so in this case just put the timing before the entire statement
if build_paren_stack(string_before_await) != "":
bad_parens = True
if len( str.strip(string_before_await)) > 0:
# only create this variable if we actually need it (just having it hanging out alone at the end of string_before_await is an error, but then it's also an error if created but never used)
string_before_await += " AWAIT_VAR_TIMING_" + temp_var_name
new_await_timing_var = "var AWAIT_VAR_TIMING_" + temp_var_name + " = " + new_await_timing_var
if not bad_parens:
string_before_await = timing_pre_text + new_await_timing_var + timing_post_text + string_before_await
else:
bad_paren_start = timing_pre_text + new_await_timing_var + timing_post_text
node_to_move.text = [[string_after_await] + end_text]
node_to_move.child_list = end_child_list
old_pos_node.text = [merge_into_first_string_of_list( start_text, bad_paren_start) + [string_before_await]]
old_pos_node.child_list = start_child_list
# return( root_node, root_map)
return(1)
# this relies on the earlier swaps already being done
def move_await_later( root_node, root_map, stmt_to_move, stmt_to_move_after, temp_var_name, add_timing = False, filename = ""):
swapping_last = True
node_to_move = root_map[stmt_to_move]
node_to_move_after = root_map[stmt_to_move_after]
# updates required:
# remove node_to_move from its parent's child list
# then, add it before stmt_to_move_before
# NEW ITEM: update the text (using temp_var_name as specified)
# when we remove node_to_move from the child list:
# -- just replace it with a new, blank node but with the old stmt as the stmt
# -- then, it's not blank any more: the text is the corresponding "everything before the await" = await temp_var_name
# -- and, when we move the node, we're actually replacing the text to be var temp_var_name = "everything after the await"
# -- also, split the child node list
# -- and, replace it in the node_map
old_pos_node = ParseTreeNode( [], stmt_to_move)
# now, compute the index to split at
(text_ind, ind_to_split_at) = get_index_with_await(node_to_move.text, swapping_last)
#pre-compute paren stacks so we can catch all errors at once
(front_paren_stack, end_paren_stack) = get_compensating_parens( node_to_move.text, text_ind, ind_to_split_at)
if ind_to_split_at == -1 or front_paren_stack == -1: # then this is a problem, and we just won't do the reordering
print("There's an issue and we can't automatically swap the following statement: ")
node_to_move.print()
print("With the following statement: ")
node_to_move_after.print()
print("DONE REPORTING PROBLEMS")
# return( root_node, root_map, 0)
return(0)
# split both the text array and the child_list
start_text = node_to_move.text[text_ind][0 : ind_to_split_at]
end_text = node_to_move.text[text_ind][ ind_to_split_at + 1 : ]
start_child_list = node_to_move.child_list[0 : ind_to_split_at]
end_child_list = node_to_move.child_list[ ind_to_split_at :]
# get the text we're going to split:
split_text = node_to_move.text[text_ind][ind_to_split_at]
string_before_await = split_text[0: split_text.rindex('await')]
string_after_await = split_text[ split_text.rindex('await') + len('await') : ]
if build_paren_stack(string_after_await) != "":
return( 0)
new_await_var = "var " + temp_var_name + "_LATER = " + front_paren_stack + split_text[ split_text.rindex('await') + len('await') : ]
# if len( str.strip(string_before_await)) > 0:
# string_before_await = string_before_await + " " + temp_var_name + "_LATER"
# if build_paren_stack(string_before_await) != "": # the addition of the varname doesnt change this funcionality
# # don't even split the text any more
# node_to_split.text = [[timing_pre_text]+ node_to_split.text[text_ind] + ["\n" + timing_post_text]]
# else:
node_to_move.text[text_ind] = start_text + [new_await_var] + end_text + [end_paren_stack]
placeholder_node = ParseTreeNode([], None)
placeholder_node.parent = node_to_move
node_to_move.child_list.insert( ind_to_split_at, placeholder_node)
node_to_move.text.insert(ind_to_split_at, [""])
# now, add the await to the node to move after
actual_await_node = ParseTreeNode([], None)
actual_await_node.parent = node_to_move_after
actual_await_node.text = [[string_before_await + "await " + temp_var_name + "_LATER"]]
if add_timing:
timing_pre_text = "var TIMING_" + temp_var_name + "_LATER = perf_hooks.performance.now();\n "
timing_post_text = "console.log(\"" + filename + "& " + str(old_pos_node.stmt) + "& " + temp_var_name + "& \" + (perf_hooks.performance.now() - TIMING_" + temp_var_name + "_LATER));\n "
actual_await_node.text = [[timing_pre_text + timing_post_text + actual_await_node.text[0][0]]]
# make room in the parent node, make sure to add it at the end
node_to_move_after.text += [[""]]
node_to_move_after.child_list += [actual_await_node]
return( 1)
def merge_into_first_string_of_list( string_list, to_merge):
if string_list == []:
return( [to_merge])
string_list[0] = to_merge + string_list[0]
return( string_list)
def time_await( root_node, root_map, stmt_to_time, temp_var_name, filename):
node_to_time = root_map[stmt_to_time]
# all we care about here is the await, we;re just updating the text
(text_ind, ind_to_split_at) = get_index_with_await(node_to_time.text)
(front_paren_stack, end_paren_stack) = get_compensating_parens( node_to_time.text, text_ind, ind_to_split_at)
if ind_to_split_at == -1 or front_paren_stack == -1: # then this is a problem, and we just won't do the reordering
print("There's an issue and we can't automatically time the following statement: ")
node_to_time.print()
print("DONE REPORTING PROBLEMS")
return( 0)
# split both the text array and the child_list
start_text = node_to_time.text[text_ind][0 : ind_to_split_at]
end_text = node_to_time.text[text_ind][ ind_to_split_at + 1 : ]
# get the text we're going to split:
split_text = node_to_time.text[text_ind][ind_to_split_at]
# string_after_await = split_text[ split_text.index('await') + len('await') : ]
# now, add the new updates to the strings
# don't forget the parens
# string_after_await = "var " + temp_var_name + " = " + front_paren_stack + string_after_await
# always add timing
timing_pre_text = "var TIMING_" + temp_var_name + " = perf_hooks.performance.now();\n "
timing_post_text = "console.log(\"" + filename + "& " + str(node_to_time.stmt) + "& " + temp_var_name + "& \" + (perf_hooks.performance.now() - TIMING_" + temp_var_name + "));\n "
new_await_timing_var = "await " + front_paren_stack + split_text[ split_text.index('await') + len('await') : ]
string_before_await = split_text[0: split_text.index('await')]
if len( str.strip(string_before_await)) > 0:
string_before_await = string_before_await + " AWAIT_VAR_TIMING_" + temp_var_name
new_await_timing_var = "var AWAIT_VAR_TIMING_" + temp_var_name + " = " + new_await_timing_var
string_timing_await = timing_pre_text + new_await_timing_var
if build_paren_stack(string_before_await) != "": # the addition of the varname doesnt change this funcionality
# don't even split the text any more
node_to_time.text[text_ind] = [timing_pre_text] + node_to_time.text[text_ind] + ["\n" + timing_post_text]
else:
node_to_time.text[text_ind] = start_text + [string_timing_await]+ end_text + [end_paren_stack + timing_post_text + string_before_await]
placeholder_node = ParseTreeNode([], None)
placeholder_node.parent = node_to_time
node_to_time.child_list.insert( ind_to_split_at, placeholder_node)
node_to_time.text.insert(ind_to_split_at, [""])
return( 1)
def time_call( root_node, root_map, stmt_to_time, temp_var_name, filename):
node_to_time = root_map[stmt_to_time]
# all we care about here is the await, we;re just updating the text
# unlike where we were timing the awaits, now we're actually timing the whole statement and have no
# need to split it. all we do is put timing code around the statement
timing_pre_text = "var TIMING_" + temp_var_name + " = perf_hooks.performance.now();\n "
timing_post_text = "console.log(\"" + filename + "& " + str(node_to_time.stmt) + "& " + temp_var_name + "& \" + (perf_hooks.performance.now() - TIMING_" + temp_var_name + "));\n "
placeholder_nodes = (ParseTreeNode([], None), ParseTreeNode([], None))
placeholder_nodes[0].parent = node_to_time
placeholder_nodes[1].parent = node_to_time
node_to_time.child_list.insert( 0, placeholder_nodes[0])
node_to_time.child_list += [placeholder_nodes[1]]
node_to_time.text.insert(0, [timing_pre_text])
node_to_time.text += [[timing_post_text]]
return( 1)
def move_stmt( root_node, root_map, stmt_to_move, stmt_to_move_before):
node_to_move = root_map[stmt_to_move]
node_to_move_before = root_map[stmt_to_move_before]
# updates required:
# remove node_to_move from its parent's child list
# then, add it before stmt_to_move_before
# when we remove node_to_move from the child list:
# -- just replace it with a new, blank node but with the old stmt as the stmt
# -- and, replace it in the node_map
child_list_to_rem_from = node_to_move.parent.child_list
placeholder_node = ParseTreeNode( [], stmt_to_move)
child_list_to_rem_from[child_list_to_rem_from.index(node_to_move)] = placeholder_node
root_map[stmt_to_move] = placeholder_node
child_list_to_add_to = node_to_move_before.parent.child_list
child_list_to_add_to.insert( child_list_to_add_to.index(node_to_move_before), node_to_move)
node_to_move_before.parent.text.insert( child_list_to_add_to.index(node_to_move_before), [""])
node_to_move.parent = node_to_move_before.parent
return( root_node, root_map)
def convert__file_spec_stmt_list_to_tree( stmt_list, file_contents):
# can iterate through the dataframe
# probably need some recursive setup here, but this is going to be the wrapper helper function
# first, make a root statement that encompasses the whole file
root_stmt = Stmt(0, 0, len(file_contents) - 1, len(file_contents[-1]))
[root_node, root_map] = create_subsumed( [root_stmt] + stmt_list, 0, dict([]))[1: ]
return( root_node, root_map)
def create_subsumed( stmt_list, cur_ind, stmt_node_map):
if not cur_ind < len(stmt_list):
raise ValueError("Index must be less than the length of the stmt array")
child_list = []
current_stmt = stmt_list[ cur_ind]
while cur_ind < len(stmt_list) - 1 and current_stmt.subsumes( stmt_list[ cur_ind + 1]):
[cur_ind, next_node, stmt_node_map] = create_subsumed( stmt_list, cur_ind + 1, stmt_node_map)
child_list += [ next_node]
# cur_ind += 1
cur_node = ParseTreeNode( child_list, current_stmt)
stmt_node_map[current_stmt] = cur_node
return( cur_ind, cur_node, stmt_node_map)
def convert_string_to_stmt( row):
stmt_string = row.stmt
stmt_string = stmt_string.split(",")
if len(stmt_string) != 4:
raise ValueError("This string should represent a stmt, which has 4 ints for position")
return( Stmt( int(stmt_string[0]) - 1, int(stmt_string[1]) - 1, int(stmt_string[2]) - 1, int(stmt_string[3])))
# convert a row of QL output to a list of statements
# we're subtracting 1 from the line numbers since the queries report starting at line 1
# but we need line 0 for the file
def convert_row_to_stmts( row):
[s_startline, s_startchar, s_endline, s_endchar,
ess_startline, ess_startchar, ess_endline, ess_endchar,
lss_startline, lss_startchar, lss_endline, lss_endchar, filename] = row
s = Stmt( s_startline - 1, s_startchar - 1, s_endline - 1, s_endchar)
ess = Stmt( ess_startline - 1, ess_startchar - 1, ess_endline - 1, ess_endchar)
lss = Stmt( lss_startline - 1, lss_startchar - 1, lss_endline - 1, lss_endchar)
return( [s, ess, lss, filename])
def convert_row_to_stmts_with_calls( row):
[s_startline, s_startchar, s_endline, s_endchar,
ess_startline, ess_startchar, ess_endline, ess_endchar,
lss_startline, lss_startchar, lss_endline, lss_endchar, filename,
cs_startline, cs_startchar, cs_endline, cs_endchar, cs_name, cs_filename] = row
s = Stmt( s_startline - 1, s_startchar - 1, s_endline - 1, s_endchar)
ess = Stmt( ess_startline - 1, ess_startchar - 1, ess_endline - 1, ess_endchar)
lss = Stmt( lss_startline - 1, lss_startchar - 1, lss_endline - 1, lss_endchar)
cs = Stmt( cs_startline - 1, cs_startchar - 1, cs_endline - 1, cs_endchar)
return( [s, ess, lss, filename, cs, cs_name, cs_filename])
def convert_row_to_stmt( row):
[s_startline, s_startchar, s_endline, s_endchar, filename] = row
s = Stmt( s_startline - 1, s_startchar - 1, s_endline - 1, s_endchar)
return( [s, filename])
def keep_first_stmt( row1, s1, s1_file):
if row1.file != s1_file: # if they're not in the same file they can't be overlapping
return True
s2 = row1.to_move
s1_before = (s1.start_line < s2.start_line or (s1.start_line == s2.start_line and s1.start_char < s2.start_char))
s1_after_no_overlap = (s1.start_line > s2.start_line or (s1.start_line == s2.start_line and s1.start_char > s2.start_char))
s1_after_no_overlap = s1_after_no_overlap and (s1.end_line > s2.end_line or (s1.end_line == s2.end_line and s1.end_char > s2.end_char))
return( s1_before or s1_after_no_overlap or s1 == s2) # we'll have removed duplicates at this point so
# return array of strings representing the lines of the statement
def get_stmt(stmt, file_contents):
ind = stmt.start_line
if ind == stmt.end_line and (len(file_contents[ind]) < stmt.start_char or stmt.end_char == -1):
return []
# special case for the statement only being one character -- seems to happen with "{" after generic classes
if ind == stmt.end_line and stmt.start_char == stmt.end_char and not (ind == 0 and stmt.start_char == 0): # fake root node
return( [ file_contents[ ind][ stmt.start_char : stmt.end_char + 1 ]])
# special case if the stmt is only on one line
if ind == stmt.end_line:
adds = 1 if len(file_contents[ind]) > stmt.end_char else 0
end_char = ";" if (adds == 1 and file_contents[ ind][ stmt.end_char] == ";") else ""
end_char = "," if (adds == 1 and file_contents[ ind][ stmt.end_char] == ",") else end_char
return( [ file_contents[ ind][ stmt.start_char : stmt.end_char ] + end_char])
stmt_cont = []
if not len(file_contents[ind]) < stmt.start_char:
stmt_cont = [ file_contents[ ind][ stmt.start_char :]]
ind = ind + 1
while ind < stmt.end_line:
stmt_cont += [ file_contents[ ind]]
ind = ind + 1
stmt_cont += [ file_contents[ ind][ 0 : stmt.end_char + 1]]
return( stmt_cont)
# print an array (should be strings), with each array entry on a new line
# here used to print out the contents of a file, post split on newline
def print_array_newline_sep( to_print):
print( ft.reduce( lambda a, b: a + "\n" + b, to_print, ""))
# save a copy of a specified file (name is oldname_old)
def save_old_copy( filename, file_contents):
print( "Modifying -- " + filename + " -- but not saving an old copy")
# file = open( filename + "_old", 'w')
# file.write( file_contents)
# file.close()
def reprocess_file_name( all_stmts):
org_root = "/home/ellen/Documents/odasa/projects/kactus/revision-2020-January-06--15-50-46/src"
new_root = "/home/ellen/Documents/ASJProj/TESTING_reordering/kactus"
all_stmts['file'] = all_stmts.apply( change_string_root, args=(org_root, new_root), axis=1)
def just_add_timing( dat, full_stmts, print_to_file = False, num_to_swap = -1, time_these_calls = None):
if time_these_calls is not None:
add_calls_timing( time_these_calls[ ~ time_these_calls.call_file.isin( dat.file)], full_stmts, print_to_file, -1)
df = dat
if num_to_swap != -1:
df = pd.DataFrame.head( dat, n = num_to_swap)
files = df.file.unique()
for f in files:
file = open(f, 'r')
file_contents = file.read()
file.close()
if print_to_file:
# save a copy of the file
save_old_copy(f, file_contents)
file_contents = file_contents.split("\n")
swaps = df[ df.file == f][['to_move', 'swap_before', 'swap_after']] # they'll already be sorted
all_stmts = full_stmts[full_stmts.file == f]
all_stmts.sort_values(['file','stmt'], inplace=True)
# create the parse tree for this whole file
(rnode, rmap) = convert__file_spec_stmt_list_to_tree( all_stmts.stmt.to_list(), file_contents)
rnode.set_text( file_contents)
add_swaps_to_all_stmt( all_stmts, swaps)
perf_hooks_added = [False]
time_one_file( swaps, rnode, rmap, f, do_time, perf_hooks_added)
if time_these_calls is not None:
calls = time_these_calls[ time_these_calls.call_file == f][['call_stmt', 'call_name']]
calls['call_stmt'] = calls.apply( lambda row: all_stmts[all_stmts.stmt == row.call_stmt].stmt.to_list()[0], axis=1)
time_one_file( calls, rnode, rmap, f, do_time_call, perf_hooks_added)
if print_to_file:
file = open( f, 'w')
file.write(ft.reduce( lambda a, b: join_stmts(a, b), rnode.get_text(), "").lstrip())
file.close()
else:
print("PROCESSING----------------------------------------------------")
print(f)
print("FILE CONTENTS BELOW-------------------------------------------")
print_array_newline_sep(rnode.get_text())
def break_stmt( root_node, root_map, stmt_to_break, breaking_text, filename):
node_to_time = root_map[stmt_to_break]
# all we care about here is the await, we;re just updating the text
# unlike where we were timing the awaits, now we're actually timing the whole statement and have no
# need to split it. all we do is put timing code around the statement
throws_text = "console.warn(\"" + breaking_text + "\");\n "
placeholder_node = ParseTreeNode([], None)
placeholder_node.parent = node_to_time
node_to_time.child_list.insert( 0, placeholder_node)
node_to_time.text.insert(0, [throws_text])
return( 1)
def break_one_file( row, rnode, rmap, filename):
to_break = row.to_move
breaking_text = "TEMP_VAR_AUTOGEN_CALLING_" + str(row.name) + "__RANDOM"
if break_stmt( rnode, rmap, to_break, breaking_text, filename) == 0:
return(0)
return(1)
def break_everything( dat, full_stmts, print_to_file = False, num_to_swap = -1):
df = dat
if num_to_swap != -1:
df = pd.DataFrame.head( dat, n = num_to_swap)
files = df.file.unique()
for f in files:
file = open(f, 'r')
file_contents = file.read()
file.close()
if print_to_file:
# save a copy of the file
save_old_copy(f, file_contents)
file_contents = file_contents.split("\n")
swaps = df[ df.file == f][['to_move', 'swap_before', 'swap_after']] # they'll already be sorted
all_stmts = full_stmts[full_stmts.file == f]
all_stmts.sort_values(['file','stmt'], inplace=True)
# create the parse tree for this whole file
(rnode, rmap) = convert__file_spec_stmt_list_to_tree( all_stmts.stmt.to_list(), file_contents)
rnode.set_text( file_contents)
add_swaps_to_all_stmt( all_stmts, swaps)
swaps.apply( break_one_file, args=(rnode, rmap, f), axis=1)
if print_to_file:
file = open( f, 'w')
file.write(ft.reduce( lambda a, b: a + "\n" + b, rnode.get_text(), "").lstrip())
file.close()
else:
print("PROCESSING----------------------------------------------------")
print(f)
print("FILE CONTENTS BELOW-------------------------------------------")
print_array_newline_sep(rnode.get_text())
def add_calls_timing( dat, full_stmts, print_to_file = False, num_to_swap = -1):
df = dat
if num_to_swap != -1:
df = pd.DataFrame.head( dat, n = num_to_swap)
files = df.call_file.unique()
for f in files:
file = open(f, 'r')
file_contents = file.read()
file.close()
if print_to_file:
# save a copy of the file
save_old_copy(f, file_contents)
file_contents = file_contents.split("\n")
calls = df[ df.call_file == f][['call_stmt', 'call_name']] # they'll already be sorted
all_stmts = full_stmts[full_stmts.file == f]
all_stmts.sort_values(['file','stmt'], inplace=True)
# create the parse tree for this whole file
(rnode, rmap) = convert__file_spec_stmt_list_to_tree( all_stmts.stmt.to_list(), file_contents)
rnode.set_text( file_contents)
calls['call_stmt'] = calls.apply( lambda row: all_stmts[all_stmts.stmt == row.call_stmt].stmt.to_list()[0], axis=1)
time_one_file( calls, rnode, rmap, f, do_time_call, [False]) # always add perf hooks when just timing a file
if print_to_file:
file = open( f, 'w')
file.write(ft.reduce( lambda a, b: a + "\n" + b, rnode.get_text(), "").lstrip())
file.close()
else:
print("PROCESSING----------------------------------------------------")
print(f)
print("FILE CONTENTS BELOW-------------------------------------------")
print_array_newline_sep(rnode.get_text())
def do_swapping( dat, full_stmts, print_to_file = False, num_to_swap = -1, add_timing = False, pre_swap = True, post_swap = False, time_these_calls = None):
# first, do all the timings in files which we don't also do swaps in
if time_these_calls is not None:
add_calls_timing( time_these_calls[ ~ time_these_calls.call_file.isin( dat.file)], full_stmts, print_to_file, -1)
df = dat
if num_to_swap != -1:
df = pd.DataFrame.head( dat, n = num_to_swap)
files = df.file.unique()
for f in files:
file = open(f, 'r')
file_contents = file.read()
file.close()
if print_to_file:
# save a copy of the file
save_old_copy(f, file_contents)
file_contents = file_contents.split("\n")
swaps = df[ df.file == f][['to_move', 'swap_before', 'swap_after']] # they'll already be sorted
# stmt_file_name = f[f.rindex("/") + 1 : -3] + "_stmts.txt" # the last "/" until the end is the root file name, then the -3 gets rid of the .ts or .js
# all_stmts_data = pd.read_csv(stmt_file_name, sep = ',', header=None)
# all_stmts = all_stmts_data.apply(convert_row_to_stmt, axis=1, result_type='expand')
# all_stmts.columns = ['stmt', 'file']
# all_stmts.sort_values(['file', 'stmt'], inplace=True)
# reprocess_file_name( all_stmts)
all_stmts = full_stmts[full_stmts.file == f]
all_stmts.sort_values(['file','stmt'], inplace=True)
# create the parse tree for this whole file
(rnode, rmap) = convert__file_spec_stmt_list_to_tree( all_stmts.stmt.to_list(), file_contents)
rnode.set_text( file_contents)
add_swaps_to_all_stmt( all_stmts, swaps)
perf_hooks_added = [False] # tracking whether or not we need to add perf_hooks to the file (once it's added once, don't add it again) -- it's an array for pass by ref
if pre_swap:
file_sum = pd.DataFrame()
if not post_swap: # only do the self-swaps if we're not post-swapping
file_sum = deal_with_self_preswaps( swaps[swaps.to_move == swaps.swap_before], rnode, rmap, (add_timing and not post_swap), f)
preswap_one_file( swaps[swaps.to_move != swaps.swap_before], rnode, rmap, (add_timing and not post_swap), f, (0 if file_sum.empty else file_sum.sum()), perf_hooks_added)
if pre_swap and post_swap:
swaps.swap_after = preprocess_df_both_reorders( swaps)
if post_swap: # implies not preswap
deal_with_self_postswaps( swaps[swaps.to_move == swaps.swap_after], rnode, rmap, add_timing, f, perf_hooks_added)
lateswap_one_file( swaps[swaps.to_move != swaps.swap_after], rnode, rmap, add_timing, f, perf_hooks_added)
if time_these_calls is not None:
calls = time_these_calls[ time_these_calls.call_file == f][['call_stmt', 'call_name']]
calls['call_stmt'] = calls.apply( lambda row: all_stmts[all_stmts.stmt == row.call_stmt].stmt.to_list()[0], axis=1)
time_one_file( calls, rnode, rmap, f, do_time_call, perf_hooks_added)
if print_to_file:
file = open( f, 'w')
file.write(ft.reduce( lambda a, b: join_stmts(a, b), rnode.get_text(), "").lstrip())
file.close()
else:
print("PROCESSING----------------------------------------------------")
print(f)
print("FILE CONTENTS BELOW-------------------------------------------")
print_array_newline_sep(rnode.get_text())
def do_self_swap( row, rnode, rmap, add_timing = False, filename = ""):
to_move = row.to_move
temp_var_name = "TEMP_VAR_AUTOGEN" + str(row.name) + "__RANDOM"
# these should be the same stmts, since we should only have one DF
# probably make a column in our DF that is "swap_before", and then run through the ones with values
# move_stmt( rnode, rmap, to_move, move_before)
if not add_timing:
if split_single_await( rnode, rmap, to_move, temp_var_name, add_timing, filename) == 0:
return(0)
else:
if time_await( rnode, rmap, to_move, temp_var_name, filename) == 0:
return(0)
return(1)
def deal_with_self_preswaps( self_swaps, rnode, rmap, add_timing = False, filename = ""):
# don't need to do any recursive checking, since this is all going to be self-swaps
# all we need to do is split the await, basically the same work as if we were just adding timing
to_ret = self_swaps.apply( do_self_swap, args=(rnode, rmap, add_timing, filename), axis=1)
if to_ret is not None:
return( to_ret)
return( pd.DataFrame())
# need to split an await statement even if it cant be swapped any earlier, since when we move it
# later the promise creation needs to stay where it was originally
def split_single_await( root_node, root_map, stmt_to_split, temp_var_name, add_timing = False, filename = ""):
node_to_split = root_map[stmt_to_split]
# all we care about here is the await, we;re just updating the text
(text_ind, ind_to_split_at) = get_index_with_await(node_to_split.text)
(front_paren_stack, end_paren_stack) = get_compensating_parens( node_to_split.text, text_ind, ind_to_split_at)
if ind_to_split_at == -1 or front_paren_stack == -1: # then this is a problem, and we just won't do the reordering
print("There's an issue and we can't automatically split the following statement: ")
node_to_split.print()
print("DONE REPORTING PROBLEMS")
return( 0)
# split both the text array and the child_list
start_text = node_to_split.text[text_ind][0 : ind_to_split_at]
end_text = node_to_split.text[text_ind][ ind_to_split_at + 1 : ]
# get the text we're going to split:
split_text = node_to_split.text[text_ind][ind_to_split_at]
# adapted from the time_await code, it's basically the same thing but we're not adding timing
# we're just splitting the await
new_await_var = "var " + temp_var_name + " = await " + front_paren_stack + split_text[ split_text.index('await') + len('await') : ]
string_before_await = split_text[0: split_text.index('await')] + " " + temp_var_name
# if build_paren_stack(string_before_await) != "": # the addition of the varname doesnt change this funcionality
# # don't even split the text any more
# node_to_split.text = [[timing_pre_text]+ node_to_split.text[text_ind] + ["\n" + timing_post_text]]
# else:
node_to_split.text = [start_text + [new_await_var]+ end_text + [end_paren_stack + string_before_await]]
placeholder_node = ParseTreeNode([], None)
placeholder_node.parent = node_to_split
node_to_split.child_list.insert( ind_to_split_at, placeholder_node)
return( 1)
def deal_with_self_postswaps( self_swaps, rnode, rmap, add_timing = False, filename = "", perf_hooks_added = [False]):
# don't need to do any recursive checking, since this is all going to be self-swaps
# all we need to do is split the await, basically the same work as if we were just adding timing
if add_timing:
results = self_swaps.apply( do_time, args=(rnode, rmap, filename), axis=1)
if not results.empty and results.sum() > 0 and not perf_hooks_added[0]:
req_perf_node = ParseTreeNode( [], None)
req_perf_node.parent = rnode
req_perf_node.text = [["const perf_hooks = require(\'perf_hooks\'); "]]
ind_to_insert = 0
if len(rnode.text) > 0 and len(rnode.text[0]) > 0 and len(rnode.text[0][0]) > 1 and rnode.text[0][0][0:2] == "#!": # cant move above #! command
ind_to_insert = 1
rnode.child_list.insert(ind_to_insert, req_perf_node)
rnode.text.insert(ind_to_insert, [""])
# update to say we've adding perf_hooks to this file, and therefore don't need to do it again
perf_hooks_added[0] = True
# replace the root of a string with a new specified root
# throw an exception if the string does not have the root old_root
def change_string_root( row, org_root, new_root, is_call = False):
org_string = ""
if not is_call:
org_string = row.file
else:
org_string = row.call_file
if org_string.index(org_root) != 0:
raise ValueError("The original path " + org_string + " does not have the original root: " + org_root)
return( new_root + org_string[ len(org_root): ])
def add_swaps_to_all_stmt( all_stmts, swap_df):
# since the rmap is indexed by statement object, and we've created the swap_associations and all_stmts dataframes
# separately, we can't index the rmap with the swap_associations
# so, we need to add a column to all_stmts, with the corresponding association but with the right objects
try:
swap_df['to_move'] = swap_df.apply( lambda row: all_stmts[all_stmts.stmt == row.to_move].stmt.to_list()[0], axis=1) # can index at 0 since there will only be one
swap_df['swap_before'] = swap_df.apply( lambda row: all_stmts[all_stmts.stmt == row.swap_before].stmt.to_list()[0], axis=1)
swap_df['swap_after'] = swap_df.apply( lambda row: all_stmts[all_stmts.stmt == row.swap_after].stmt.to_list()[0], axis=1)
except IndexError:
print(swap_df)
def do_early_swap( row, rnode, rmap, add_timing = False, filename = ""):
to_move = row.to_move
move_before = row.swap_before
temp_var_name = "TEMP_VAR_AUTOGEN" + str(row.name) + "__RANDOM"
# these should be the same stmts, since we should only have one DF
# probably make a column in our DF that is "swap_before", and then run through the ones with values
# move_stmt( rnode, rmap, to_move, move_before)
print(row)
if move_and_split_await( rnode, rmap, to_move, move_before, temp_var_name, add_timing, filename) == 0:
return(0)
return(1)
def do_late_swap( row, rnode, rmap, add_timing = False, filename = ""):
to_move = row.to_move
move_after = row.swap_after
temp_var_name = "TEMP_VAR_AUTOGEN" + str(row.name) + "__RANDOM"
# these should be the same stmts, since we should only have one DF
# probably make a column in our DF that is "swap_before", and then run through the ones with values
# move_stmt( rnode, rmap, to_move, move_before)
if move_await_later( rnode, rmap, to_move, move_after, temp_var_name, add_timing, filename) == 0:
return(0)
return(1)
def do_time( row, rnode, rmap, filename):
to_time = row.to_move
temp_var_name = "TEMP_VAR_AUTOGEN" + str(row.name) + "__RANDOM"
if time_await( rnode, rmap, to_time, temp_var_name, filename) == 0:
return(0)
return(1)
def do_time_call( row, rnode, rmap, filename):
to_time = row.call_stmt
temp_var_name = "TEMP_VAR_AUTOGEN_CALLING_" + str(row.name) + "_" + row.call_name + "__RANDOM"
if time_call( rnode, rmap, to_time, temp_var_name, filename) == 0:
return(0)
return(1)
# function to preprocess a dataframe of swaps for ONE FILE
# if we're doing both forward and backward swapping, there's no guarantee that
# a statement can swap down to something below where another statement is swapping up
# and, since there is no dependency check between these statements, we need to conservatively
# assume that there is a dependency
# solution: set the swap_after to be the min of swap_after and the swap_befores of any stmts
# which themselves are > current statement and their swap_befores are > current_stmt
def preprocess_df_both_reorders( swap_df):
# for each statement
return(swap_df.apply( get_late_swap_for_row, args=( swap_df, ), axis=1))
def get_late_swap_for_row( row, df):
to_consider = df[(df.to_move > row.swap_after) & (df.swap_before > row.to_move)]
if to_consider.empty:
return( row.swap_after)
earliest_later_up = to_consider.swap_before.min()
return( min( row.swap_after, earliest_later_up))
def swap_condition( results, tsum):
if results.empty and tsum > 0:
return True
elif results.empty:
return False
elif results.sum() > 0:
return True
return False
def preswap_one_file( swap_df, rnode, rmap, add_timing = False, filename = "", tsum = 0, perf_hooks_added=[False], counter=0):
recursive_swaps = swap_df[swap_df.swap_before.isin(swap_df.to_move)]
if not recursive_swaps.empty:
preswap_one_file( recursive_swaps, rnode, rmap, add_timing, filename, tsum, perf_hooks_added, counter + 1)
swap_df = pd.concat([recursive_swaps, swap_df]).drop_duplicates(keep=False)
results = swap_df.apply( do_early_swap, args=(rnode, rmap, add_timing, filename), axis=1)
# dont need to time anymore since timing will be adding during post-swap
if add_timing and counter == 0 and swap_condition(results, tsum) and not perf_hooks_added[0]:
req_perf_node = ParseTreeNode( [], None)
req_perf_node.parent = rnode
req_perf_node.text = [["const perf_hooks = require(\'perf_hooks\'); "]]
ind_to_insert = 0
if len(rnode.text) > 0 and len(rnode.text[0]) > 0 and len(rnode.text[0][0]) > 1 and rnode.text[0][0][0:2] == "#!":
ind_to_insert = 1
rnode.child_list.insert(ind_to_insert, req_perf_node)
rnode.text.insert(ind_to_insert, [""])
# update to say we've adding perf_hooks to this file, and therefore don't need to do it again
perf_hooks_added[0] = True
# don't add it to the rmap since it doesnt really have a corresponding statement
def lateswap_one_file( swap_df, rnode, rmap, add_timing = False, filename = "", perf_hooks_added = [False], counter=0):
recursive_swaps = swap_df[swap_df.swap_after.isin(swap_df.to_move)]
if not recursive_swaps.empty:
lateswap_one_file( recursive_swaps, rnode, rmap, add_timing, filename, perf_hooks_added, counter + 1)
swap_df = | pd.concat([recursive_swaps, swap_df]) | pandas.concat |
"""
Last.FM Datasets and Helpers.
References:
- [Last.FM Dataset 1K](http://ocelma.net/MusicRecommendationDataset/lastfm-1K.html)
- [Lenskit datasets](https://github.com/lenskit/lkpy/blob/master/lenskit/datasets.py)
"""
import logging
import os
import pandas as pd
from skipgrammar.datasets.common import (UserItemIterableDataset, cached,
get_file)
logger = logging.getLogger(__name__)
VARIANTS = {
"lastfm-50": {
"origin": "https://github.com/eifuentes/lastfm-dataset-1K/releases/download/v1.0/lastfm-dataset-50.snappy.parquet",
"filename": "lastfm-dataset-50.snappy.parquet",
},
"lastfm-1k": {
"origin": "https://github.com/eifuentes/lastfm-dataset-1K/releases/download/v1.0/lastfm-dataset-1k.snappy.parquet",
"filename": "lastfm-dataset-1k.snappy.parquet",
},
}
USER_PROFILE = {
"origin": "https://github.com/eifuentes/lastfm-dataset-1K/releases/download/v1.0/userid-profile.tsv.zip",
"filename": "userid-profile.tsv.zip",
"extract": {"filename": "userid-profile.tsv"},
}
class LastFM:
"""
Last.FM datasets access class, including lastmf-50 and lastfm-1k.
Parameters:
listens_filepath (str): Filepath to the parquet file containing the user listening history dataset.
user_profile_filepath (str): Filepath to the tab seperated (.tsv) file containing the user profile dataset.
"""
def __init__(self, listens_filepath, user_profile_filepath):
self.listens_filepath = listens_filepath
self.user_profile_filepath = user_profile_filepath
@cached
def listens(self):
"""
The listens table.
>>> lfdset = LastFM(listens_filepath='data/lastfm-dataset-50.snappy.parquet', ...)
"""
listens = | pd.read_parquet(self.listens_filepath) | pandas.read_parquet |
import numpy as np
import pandas as pd
import pickle
import time
import random
import os
from sklearn import linear_model, model_selection, ensemble
from sklearn.svm import SVC
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.base import clone
from sklearn import metrics
from sklearn.model_selection import cross_validate, train_test_split, StratifiedKFold
import sklearn.metrics as m
from joblib import Parallel, delayed
from sklearn.base import clone
from sklearn.utils import shuffle, resample
type_='marker'
basename = type_+'_features_expired_prediction_'
dir_ = '../../data/'
t0_all=time.time()
seed = 42
np.random.seed(seed)
max_depth = 1
C=1
tol=1e-3
min_samples_leaf=2
min_samples_split=2
n_estimators=100
models = {
"Logistic Regression" : linear_model.LogisticRegression(
C=C,
penalty='l1',
solver="liblinear",
tol=tol,
random_state=seed)
}
classification_metrics = ['roc_auc']
cv_split = 10
test_size = 0.15
n_jobs = 25
nboot=200
X_all_proteins = pd.read_csv(dir_+'integrated_X_raw_all_proteins.csv',index_col=0)
proteins_no_immunoglobulins = pickle.load(open(dir_+'proteins_no_immunoglobulins.pkl','rb'))
X_all_proteins = X_all_proteins.loc[:,proteins_no_immunoglobulins]
joined = pd.read_csv(dir_+'mortality_X_y.csv',index_col=0)
X_all_clinical = pd.read_csv(dir_+'integrated_X_clinical_and_cohort_covariates.csv',index_col=0)
Y_pgd = | pd.read_csv(dir_+'integrated_pgd_y.csv',index_col=0,header=None) | pandas.read_csv |
"""
All rights reserved to cnvrg.io
http://www.cnvrg.io
SKTrainer.py
==============================================================================
"""
import os
import pickle
import numpy as np
import pandas as pd
from cnvrg import Experiment
from cnvrg.charts import Bar, MatrixHeatmap, Scatterplot
from sklearn.model_selection import cross_validate
from sklearn.metrics import accuracy_score, mean_squared_error, r2_score, mean_absolute_error
class SKTrainerRegression:
DIGITS_TO_ROUND = 3
REGRESSION_TYPE = ['linear', 'logistic']
def __init__(self, model, train_set, test_set, output_model_name, testing_mode, folds=None, regression_type=0):
self.__model = model
self.__x_train, self.__y_train = train_set
self.__train_set_size = len(self.__y_train)
self.__x_test, self.__y_test = test_set
self.__test_set_size = len(self.__y_test)
self.__testing_mode = testing_mode
self.__cross_val_folds = folds
self.__is_cross_val = (folds is not None)
self.__features = list(self.__x_train.columns)
self.__labels = [str(l) for l in list(set(self.__y_train).union(set(self.__y_test)))]
self.__metrics = {'model': output_model_name}
self.__y_pred = None
self.__experiment = Experiment()
self.__regression_type = SKTrainerRegression.REGRESSION_TYPE[regression_type]
self.__coef, self.__intercept = None, None
def run(self):
self.__model.fit(self.__x_train, self.__y_train)
try: self.__coef = self.__model.coef_
except AttributeError: pass
try: self.__intercept = self.__model.intercept_
except AttributeError: pass
if self.__is_cross_val:
self.__metrics['folds'] = self.__cross_val_folds
if self.__is_cross_val is True:
self.__train_with_cross_validation()
else:
self.__train_without_cross_validation()
self.__save_model()
def __plot_all(self, y_test_pred):
self.__plot_accuracies_and_errors()
# self.__plot_regression_function()
self.__plot_feature_importance()
self.__plot_correlation_matrix()
# self.__plot_feature_vs_feature()
def __train_with_cross_validation(self):
"""
This method enables sk-learn algorithms to perform KFold-cross-validation.
The method also initiates the cnvrg experiment with all its metrics.
"""
scores = cross_validate(estimator=self.__model,
X=self.__x_train,
y=self.__y_train,
cv=self.__cross_val_folds,
return_train_score=True,
scoring=['neg_mean_squared_error', 'neg_mean_absolute_error', 'r2', 'accuracy'],
return_estimator=True)
train_err_cv_mse = (-1) * scores['train_neg_mean_squared_error']
train_err_cv_mae = (-1) * scores['train_neg_mean_absolute_error']
train_err_cv_r2 = scores['train_r2']
val_acc_cv = scores['test_accuracy']
val_err_cv_mse = (-1) * scores['test_neg_mean_squared_error']
val_err_cv_mae = (-1) * scores['test_neg_mean_absolute_error']
val_err_cv_r2 = scores['test_r2']
self.__model = scores['estimator'][-1]
self.__y_pred = self.__model.predict(self.__x_test)
test_acc = accuracy_score(self.__y_test, self.__y_pred)
test_loss = mean_squared_error(self.__y_test, self.__y_pred)
self.__metrics.update({
'train_loss_mae': train_err_cv_mae,
'train_loss_mse': train_err_cv_mse,
'train_loss_r2': train_err_cv_r2,
'validation_acc': val_acc_cv,
'val_loss_mae': val_err_cv_mae,
'val_loss_mse': val_err_cv_mse,
'val_loss_r2': val_err_cv_r2,
'test_acc': test_acc,
'test_loss_mse': test_loss})
self.__plot_all(self.__y_pred)
def __train_without_cross_validation(self):
"""
The method also initiates the cnvrg experiment with all its metrics.
"""
y_hat = self.__model.predict(self.__x_train) # y_hat is a.k.a y_pred
train_loss_MSE = mean_squared_error(self.__y_train, y_hat)
train_loss_MAE = mean_absolute_error(self.__y_train, y_hat)
train_loss_R2 = r2_score(self.__y_train, y_hat)
self.__y_pred = self.__model.predict(self.__x_test)
test_loss_MSE = mean_squared_error(self.__y_test, self.__y_pred)
test_loss_MAE = mean_absolute_error(self.__y_test, self.__y_pred)
test_loss_R2 = r2_score(self.__y_test, self.__y_pred)
self.__metrics.update({
'train_loss_mae': train_loss_MAE,
'train_loss_mse': train_loss_MSE,
'train_loss_r2': train_loss_R2,
'test_loss_mse': test_loss_MSE,
'test_loss_mae': test_loss_MAE,
'test_loss_r2': test_loss_R2})
self.__plot_all(self.__y_pred)
def __plot_regression_function(self):
if self.__regression_type == 'linear':
a, b = self.__coef[0], self.__intercept
x = np.linspace(-100, 100, 200)
y = a * x + b
elif self.__regression_type == 'logistic':
x = np.linspace(-100, 100, 200)
y = 1 / (1 + np.exp(-x))
self.__experiment.log_metric(key="Regression Function", Xs=x.tolist(), Ys=y.tolist(), grouping=['regression line'] * len(x))
def __plot_feature_importance(self):
try:
importance = getattr(self.__model, "feature_importances_")
if self.__testing_mode is False:
self.__experiment.log_chart('Feature Importance', x_axis='Features', y_axis='Importance', data=Bar(x=self.__features, y=importance))
else:
print(importance)
except AttributeError:
pass
def __plot_accuracies_and_errors(self):
if self.__testing_mode is True:
print("Model: {model}\n"
"train_acc={train_acc}\n"
"train_loss={train_loss}\n"
"test_acc={test_acc}\n"
"test_loss={test_loss}".format(
model=self.__metrics['model'], train_acc=self.__metrics['train_acc'], train_loss=self.__metrics['train_loss'],
test_acc=self.__metrics['test_acc'], test_loss=self.__metrics['test_loss']))
if self.__is_cross_val is True:
print("Folds: {folds}\n".format(folds=self.__metrics['folds']))
else: # testing mode is off.
for k, v in self.__metrics.items():
self.__plot_accuracies_and_errors_helper()
if isinstance(v, list):
self.__experiment.log_metric(k, v)
else:
self.__experiment.log_param(k, v)
def __plot_accuracies_and_errors_helper(self):
for k, v in self.__metrics.items():
if isinstance(v, float):
self.__metrics[k] = round(self.__metrics[k], SKTrainerRegression.DIGITS_TO_ROUND)
def __save_model(self):
output_model_name = self.__metrics['model']
output_file_name = os.environ.get("CNVRG_WORKDIR") + "/" + output_model_name if os.environ.get("CNVRG_WORKDIR") \
is not None else output_model_name
pickle.dump(self.__model, open(output_file_name, 'wb'))
"""training & testing methods"""
def __plot_correlation_matrix(self):
data = pd.concat([pd.concat([self.__x_train, self.__x_test], axis=0), pd.concat([self.__y_train, self.__y_test], axis=0)], axis=1)
correlation = data.corr()
self.__experiment.log_chart("correlation", [MatrixHeatmap(np.round(correlation.values, 2))],
x_ticks=correlation.index.tolist(), y_ticks=correlation.index.tolist())
def __plot_feature_vs_feature(self):
data = pd.concat([pd.concat([self.__x_train, self.__x_test], axis=0), | pd.concat([self.__y_train, self.__y_test], axis=0) | pandas.concat |
# python gt-gen-vac-fixed-num-cbgs-crossgroup.py args.quick_test
# python gt-gen-vac-fixed-num-cbgs-crossgroup.py False
import setproctitle
setproctitle.setproctitle("gnn-simu-vac@chenlin")
import os
import datetime
import pandas as pd
import numpy as np
import pickle
import random
import argparse
import constants
import functions
import disease_model #disease_model_only_modify_attack_rates
import time
import pdb
# Training settings
parser = argparse.ArgumentParser()
parser.add_argument('--epic_data_root', default='/data/chenlin/COVID-19/Data',
help='TBA')
parser.add_argument('--gt_result_root', default=os.path.abspath(os.path.join(os.pardir,'data/safegraph')),
help='TBA')
parser.add_argument('--vaccination_ratio', type=float, default=0.02,
help='Vaccination ratio (w.r.t. total population).')
parser.add_argument('--vaccination_time', type=int, default=0,
help='Vaccination time.')
parser.add_argument('--protection_rate', type=float, default=1,
help='Protection rate.')
parser.add_argument('--min_datetime', default=datetime.datetime(2020, 3, 1, 0),
help='Start date & time.')
parser.add_argument('--max_datetime', default=datetime.datetime(2020, 5, 2, 23),
help='End date & time.')
parser.add_argument('--num_days', type=int, default=63,
help='Num of simulation days.')
parser.add_argument('--num_groups', type=int, default=3,
help='Num of groups in each demographic dimension.')
parser.add_argument('--msa_name', default='SanFrancisco',
help='MSA name.')
parser.add_argument('--random_seed', type=int, default=42,
help='Random seed.')
parser.add_argument('--NN', type=int, default=70,
help='Num of CBGs to receive vaccines.')
parser.add_argument('--num_experiments', type=int, default=100,
help='Num of randombags (i.e., random strategies).')
parser.add_argument('--quick_test', default=False, action='store_true',
help='Quick Test: prototyping.')
parser.add_argument('--proportional', default=True,
help='If true, divide vaccines proportional to cbg populations.')
parser.add_argument('--grouping', default=False, action='store_true',
help='If true, only generate samples containing CBGs from the same demographic group.')
args = parser.parse_args()
# Constants
#gt_result_root = os.path.abspath(os.path.join(os.pardir,'data/safegraph')); print('gt_result_root: ', gt_result_root)
###############################################################################
# Main variable settings
MSA_NAME_FULL = constants.MSA_NAME_FULL_DICT[args.msa_name] #MSA_NAME_FULL = 'San_Francisco_Oakland_Hayward_CA'
# Random Seed
print('Random_seed:',args.random_seed)
# Divide the available vaccines to how many CBGs
print('Num of CBGs to receive vaccines: ', args.NN)
# Number of randombag experiments
print('Number of randombag experiments: ', args.num_experiments)
# Quick Test: prototyping
print('Quick testing?', args.quick_test)
if(args.quick_test == 'True'): NUM_SEEDS = 2
else: NUM_SEEDS = 40 #30 #60
print('NUM_SEEDS: ', NUM_SEEDS)
STARTING_SEED = range(NUM_SEEDS)
if(args.proportional==True):
extra_string = 'proportional'
else:
extra_string = 'identical'
# Store filename
if(args.grouping):
filename = os.path.join(args.gt_result_root, args.msa_name,
'vac_results_%s_%s_%s_randomseed%s_%sseeds_%ssamples_%s.csv'
%(args.msa_name,args.vaccination_ratio,args.NN,args.random_seed,NUM_SEEDS, args.num_experiments, extra_string))
else:
filename = os.path.join(args.gt_result_root, args.msa_name,
'crossgroup_vac_results_%s_%s_%s_randomseed%s_%sseeds_%ssamples_%s.csv'
%(args.msa_name,args.vaccination_ratio,args.NN,args.random_seed,NUM_SEEDS, args.num_experiments, extra_string))
print('filename: ', filename)
if(os.path.exists(filename)):
print('This file already exists. Better have a check?')
pdb.set_trace()
# Compared filename (existing data) #20220130
#compared_filename = os.path.join(args.gt_result_root, args.msa_name,
# 'vac_results_SanFrancisco_0.02_70_randomseed42_40seeds_1000samples_proportional.csv')
#print('Compared_filename: ', filename)
# Compute mean, safegap of existing samples.
#exist_data = pd.read_csv(compared_filename)
###############################################################################
# Functions
def run_simulation(starting_seed, num_seeds, vaccination_vector, vaccine_acceptance,protection_rate=1):
m = disease_model.Model(starting_seed=starting_seed,
num_seeds=num_seeds,
debug=False,clip_poisson_approximation=True,ipf_final_match='poi',ipf_num_iter=100)
m.init_exogenous_variables(poi_areas=poi_areas,
poi_dwell_time_correction_factors=poi_dwell_time_correction_factors,
cbg_sizes=cbg_sizes,
poi_cbg_visits_list=poi_cbg_visits_list,
all_hours=all_hours,
p_sick_at_t0=constants.parameters_dict[args.msa_name][0],
#vaccination_time=24*31, # when to apply vaccination (which hour)
vaccination_time=24*args.vaccination_time, # when to apply vaccination (which hour)
vaccination_vector = vaccination_vector,
vaccine_acceptance=vaccine_acceptance,
protection_rate = protection_rate,
home_beta=constants.parameters_dict[args.msa_name][1],
cbg_attack_rates_original = cbg_attack_rates_scaled,
cbg_death_rates_original = cbg_death_rates_scaled,
poi_psi=constants.parameters_dict[args.msa_name][2],
just_compute_r0=False,
latency_period=96, # 4 days
infectious_period=84, # 3.5 days
confirmation_rate=.1,
confirmation_lag=168, # 7 days
death_lag=432
)
m.init_endogenous_variables()
T1,L_1,I_1,R_1,C2,D2,total_affected, history_C2, history_D2, total_affected_each_cbg = m.simulate_disease_spread(no_print=True)
del T1
del L_1
del I_1
del C2
del D2
#return total_affected, history_C2, history_D2, total_affected_each_cbg
return history_C2, history_D2
# Hybrid Grouping
def assign_hybrid_group(data):
return (data['Elder_Ratio_Group']*9 + data['Mean_Household_Income_Group']*3 + data['Essential_Worker_Ratio_Group'])
#return (data['Age_Quantile_FOR_RANDOMBAG']*81 + data['Income_Quantile_FOR_RANDOMBAG']*27 + data['EW_Quantile_FOR_RANDOMBAG']*9 + data['Vulner_Quantile_FOR_RANDOMBAG']*3 + data['Damage_Quantile_FOR_RANDOMBAG'])
###############################################################################
# Load Data
# Load POI-CBG visiting matrices
f = open(os.path.join(args.epic_data_root, args.msa_name, '%s_2020-03-01_to_2020-05-02.pkl'%MSA_NAME_FULL), 'rb')
poi_cbg_visits_list = pickle.load(f)
f.close()
# Load precomputed parameters to adjust(clip) POI dwell times
d = pd.read_csv(os.path.join(args.epic_data_root,args.msa_name, 'parameters_%s.csv' % args.msa_name))
# No clipping
new_d = d
all_hours = functions.list_hours_in_range(args.min_datetime, args.max_datetime)
poi_areas = new_d['feet'].values#面积
poi_dwell_times = new_d['median'].values#平均逗留时间
poi_dwell_time_correction_factors = (poi_dwell_times / (poi_dwell_times+60)) ** 2
del new_d
del d
# Load ACS Data for MSA-county matching
acs_data = pd.read_csv(os.path.join(args.epic_data_root,'list1.csv'),header=2)
acs_msas = [msa for msa in acs_data['CBSA Title'].unique() if type(msa) == str]
msa_match = functions.match_msa_name_to_msas_in_acs_data(MSA_NAME_FULL, acs_msas)
msa_data = acs_data[acs_data['CBSA Title'] == msa_match].copy()
msa_data['FIPS Code'] = msa_data.apply(lambda x : functions.get_fips_codes_from_state_and_county_fp((x['FIPS State Code']),x['FIPS County Code']), axis=1)
good_list = list(msa_data['FIPS Code'].values);#print('CBG included: ', good_list)
del acs_data
# Load CBG ids for the MSA
cbg_ids_msa = pd.read_csv(os.path.join(args.epic_data_root,args.msa_name,'%s_cbg_ids.csv'%MSA_NAME_FULL))
cbg_ids_msa.rename(columns={"cbg_id":"census_block_group"}, inplace=True)
num_cbgs = len(cbg_ids_msa)
print('Number of CBGs in this metro area:', num_cbgs)
# Mapping from cbg_ids to columns in hourly visiting matrices
cbgs_to_idxs = dict(zip(cbg_ids_msa['census_block_group'].values, range(num_cbgs)))
x = {}
for i in cbgs_to_idxs:
x[str(i)] = cbgs_to_idxs[i]
# Load SafeGraph data to obtain CBG sizes (i.e., populations)
filepath = os.path.join(args.epic_data_root,"safegraph_open_census_data/data/cbg_b01.csv")
cbg_agesex = pd.read_csv(filepath)
# Extract CBGs belonging to the MSA - https://covid-mobility.stanford.edu//datasets/
cbg_age_msa = pd.merge(cbg_ids_msa, cbg_agesex, on='census_block_group', how='left')
del cbg_agesex
# Add up males and females of the same age, according to the detailed age list (DETAILED_AGE_LIST)
# which is defined in constants.py
for i in range(3,25+1): # 'B01001e3'~'B01001e25'
male_column = 'B01001e'+str(i)
female_column = 'B01001e'+str(i+24)
cbg_age_msa[constants.DETAILED_AGE_LIST[i-3]] = cbg_age_msa.apply(lambda x : x[male_column]+x[female_column],axis=1)
# Rename
cbg_age_msa.rename(columns={'B01001e1':'Sum'},inplace=True)
# Extract columns of interest
columns_of_interest = ['census_block_group','Sum'] + constants.DETAILED_AGE_LIST
cbg_age_msa = cbg_age_msa[columns_of_interest].copy()
# Deal with NaN values
cbg_age_msa.fillna(0,inplace=True)
# Deal with CBGs with 0 populations
cbg_age_msa['Sum'] = cbg_age_msa['Sum'].apply(lambda x : x if x!=0 else 1)
cbg_age_msa['Elder_Absolute'] = cbg_age_msa.apply(lambda x : x['70 To 74 Years']+x['75 To 79 Years']+x['80 To 84 Years']+x['85 Years And Over'],axis=1)
cbg_age_msa['Elder_Ratio'] = cbg_age_msa['Elder_Absolute'] / cbg_age_msa['Sum']
# Obtain cbg sizes (populations)
cbg_sizes = cbg_age_msa['Sum'].values
cbg_sizes = np.array(cbg_sizes,dtype='int32')
print('Total population: ',np.sum(cbg_sizes))
# Select counties belonging to the MSA
y = []
for i in x:
if((len(i)==12) & (int(i[0:5])in good_list)):
y.append(x[i])
if((len(i)==11) & (int(i[0:4])in good_list)):
y.append(x[i])
idxs_msa_all = list(x.values())
# Load other demographic data
# Income
filepath = os.path.join(args.epic_data_root,"ACS_5years_Income_Filtered_Summary.csv")
cbg_income = pd.read_csv(filepath)
# Drop duplicate column 'Unnamed:0'
cbg_income.drop(['Unnamed: 0'],axis=1, inplace=True)
# Income Data Resource 1: ACS 5-year (2013-2017) Data
# Extract pois corresponding to the metro area (Philadelphia), by merging dataframes
cbg_income_msa = pd.merge(cbg_ids_msa, cbg_income, on='census_block_group', how='left')
del cbg_income
# Deal with NaN values
cbg_income_msa.fillna(0,inplace=True)
# Add information of cbg populations, from cbg_age_Phi(cbg_b01.csv)
cbg_income_msa['Sum'] = cbg_age_msa['Sum'].copy()
# Rename
cbg_income_msa.rename(columns = {'total_household_income':'Total_Household_Income',
'total_households':'Total_Households',
'mean_household_income':'Mean_Household_Income',
'median_household_income':'Median_Household_Income'},inplace=True)
# Occupation
filepath = os.path.join(args.epic_data_root,"safegraph_open_census_data/data/cbg_c24.csv")
cbg_occupation = pd.read_csv(filepath)
# Extract pois corresponding to the metro area, by merging dataframes
cbg_occupation_msa = pd.merge(cbg_ids_msa, cbg_occupation, on='census_block_group', how='left')
del cbg_occupation
columns_of_essential_workers = list(constants.ew_rate_dict.keys())
for column in columns_of_essential_workers:
cbg_occupation_msa[column] = cbg_occupation_msa[column].apply(lambda x : x*constants.ew_rate_dict[column])
cbg_occupation_msa['Essential_Worker_Absolute'] = cbg_occupation_msa.apply(lambda x : x[columns_of_essential_workers].sum(), axis=1)
cbg_occupation_msa['Sum'] = cbg_age_msa['Sum']
cbg_occupation_msa['Essential_Worker_Ratio'] = cbg_occupation_msa['Essential_Worker_Absolute'] / cbg_occupation_msa['Sum']
columns_of_interest = ['census_block_group','Sum','Essential_Worker_Absolute','Essential_Worker_Ratio']
cbg_occupation_msa = cbg_occupation_msa[columns_of_interest].copy()
# Deal with NaN values
cbg_occupation_msa.fillna(0,inplace=True)
##############################################################################
# Load and scale age-aware CBG-specific attack/death rates (original)
cbg_death_rates_original = np.loadtxt(os.path.join(args.epic_data_root,args.msa_name, 'cbg_death_rates_original_'+args.msa_name))
cbg_attack_rates_original = np.ones(cbg_death_rates_original.shape)
# The scaling factors are set according to a grid search
attack_scale = 1 # Fix attack_scale
cbg_attack_rates_scaled = cbg_attack_rates_original * attack_scale
cbg_death_rates_scaled = cbg_death_rates_original * constants.death_scale_dict[args.msa_name]
###############################################################################
# Collect data together
data = | pd.DataFrame() | pandas.DataFrame |
import os
import re
import pandas as pd
import numpy as np
from datetime import datetime, timedelta
from urllib.error import URLError
from .util import download_file, get_path, timer
#todo: using flags in ncdc data to filter
#todo: extra filter for bad data (999s etc)
STATION_LIST_PATH = get_path(__file__, 'isd-history.txt')
DOWNLOAD_DATA_SUBFOLDER_NAME = 'downloaded_data'
class Downloader(object):
def __init__(self, lat=None, long=None, radius_km=50, date_from=None, date_to=None):
self.latitude = lat
self.longitude = long
self.distance = radius_km
self.date_from = datetime(*date_from) if isinstance(date_from, tuple) else date_from
self.date_to = datetime(*date_to) if isinstance(date_to, tuple) else date_to
self.station_id_list = []
self.station_info = None
self.station_data = None
@staticmethod
def haversine(lon1, lat1, lon2, lat2):
"""
Calculate the great circle distance between two points on the earth (specified in decimal degrees).
Inputs in degrees. The result is in km.
"""
# todo speedup with pd.map
# convert decimal degrees to radians
lon1_rad, lat1_rad, lon2_rad, lat2_rad = map(np.radians, [lon1, lat1, lon2, lat2])
# haversine formula
dlon = lon2_rad - lon1_rad
dlat = lat2_rad - lat1_rad
a = np.power(np.sin(dlat / 2), 2) + np.cos(lat1_rad) * np.cos(lat2_rad) * np.power(np.sin(dlon / 2), 2)
c = 2 * np.arcsin(np.sqrt(a))
km = 6371 * c
return km
@staticmethod
def rh_from_dew_temperature(t, t_dew, simple=False):
if simple:
rh = 5 * (t_dew - t) + 100
else:
T = np.add(t, 273.15)
Td = np.add(t_dew, 273.15)
L_Rv = 5423
rh = np.exp(L_Rv * (1 / T - 1 / Td)) * 100
return rh
class NCDC_Downloader(Downloader):
@staticmethod
def update_station_list():
url = 'ftp://ftp.ncdc.noaa.gov/pub/data/noaa/isd-history.txt'
download_file(url=url, save_to=STATION_LIST_PATH)
@staticmethod
def _load_stations_metadata():
colspecs = [(0, 7), (7, 13), (13, 43), (43, 48), (48, 51), (51, 57),
(57, 65), (65, 74), (74, 82), (82, 91), (91, 999)]
df = pd.read_fwf(STATION_LIST_PATH, skiprows=range(20), colspecs=colspecs, parse_dates=[9, 10])
# df.index = [self._convert_between_id_and_usaf_wban(usaf=r['USAF'], wban=r['WBAN']) for i, r in df.iterrows()]
return df
@staticmethod
def _convert_between_id_and_usaf_wban(id=None, usaf=None, wban=None):
if id:
usaf, wban = [float(i) for i in id.split('-')]
return usaf, wban
else:
id = '{:0>6.0f}-{:0>5.0f}'.format(usaf, wban)
return id
def find_stations_nearby(self):
assert self.latitude and self.longitude and self.distance
stations = self._load_stations_metadata()
stations = stations[pd.notnull(stations['LAT']) & pd.notnull(stations['LON'])]
# calculate distance from target
apply_haversine = lambda r: self.haversine(lon1=self.longitude, lat1=self.latitude,
lon2=r['LON'], lat2=r['LAT'])
stations['Distance'] = stations.apply(apply_haversine, axis=1)
# filter for valid stations
distance_mask = stations['Distance'] <= self.distance
timerange_mask = [True] * len(stations)
if self.date_from:
timerange_mask &= stations['BEGIN'] <= self.date_from
if self.date_to:
timerange_mask &= stations['END'] >= self.date_to
stations_nearby = stations[distance_mask & timerange_mask]
# adding ids as index
get_ids = lambda r: self._convert_between_id_and_usaf_wban(usaf=r['USAF'], wban=r['WBAN'])
id_list = stations_nearby.apply(get_ids, axis=1).tolist()
stations_nearby.index = id_list
# storing valid stations
self.station_info = stations_nearby
self.station_id_list = id_list
return self.station_id_list
def download_ncdc_station_data(self, station_id, date_from=None, date_to=None):
# defining data range
if date_from is None or date_to is None:
info = self.station_info.ix[station_id, :]
date_from = info['BEGIN'] if date_from is None else date_from
date_to = info['END'] if date_to is None else date_to
# downloading data
downloaded_file_list = []
for year in range(date_from.year, date_to.year + 1):
file_name = '{id}-{year}.gz'.format(id=station_id, year=year)
url = 'ftp://ftp.ncdc.noaa.gov/pub/data/noaa/{year}/{file_name}'.format(year=year, file_name=file_name)
save_to = os.path.join(DOWNLOAD_DATA_SUBFOLDER_NAME, file_name)
try:
download_file(url=url, save_to=save_to)
downloaded_file_list.append(save_to)
except URLError:
print('!!! File {} not found. download skipped. !!!'.format(file_name))
return downloaded_file_list
def _parse_ncdc_data(self, fpath):
header_names = ['total', 'USAF', 'WBAN', 'datetime', 'source', 'latitude', 'longitude', 'report_type',
'elevation',
'call_letter_id', 'quality_control', 'direction', 'direction_quality', 'observation',
'speed_times_10',
'speed_quality', 'sky', 'sky_quality', 'sky_determination', 'sky_cavok_code', 'visibility',
'visibility_quality', 'visibility_variability', 'visibility_variability_quality', 'temperature',
'temperature_quality', 'temperature_dew', 'temperature_dew_quality',
'pressure_sea_level', 'pressure_quality']
colspecs = [(0, 4), (4, 10), (10, 15), (15, 27), (27, 28), (28, 34), (34, 41), (41, 46), (46, 51),
(51, 56), (56, 60), (60, 63), (63, 64), (64, 65), (65, 69), (69, 70), (70, 75), (75, 76), (76, 77),
(77, 78), (78, 84), (84, 85), (85, 86), (86, 87), (87, 92), (92, 93), (93, 98), (98, 99), (99, 104),
(104, 105)]
compression = 'gzip' if fpath.endswith('.gz') else 'infer'
df = pd.read_fwf(fpath, colspecs=colspecs, header=None, names=header_names, index_col=3,
parse_dates=True, compression=compression)
return df
def merge_ncdc_data(self, downloaded_file_list):
# collating all data into one parsed dataframe
df_collated = | pd.DataFrame() | pandas.DataFrame |
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import warnings
warnings.filterwarnings('ignore')
soal = ['EXT1', 'EXT2', 'EXT3', 'EXT4', 'EXT5', 'EXT6', 'EXT7', 'EXT8', 'EXT9', 'EXT10', 'EST1', 'EST2', 'EST3', 'EST4', 'EST5', 'EST6', 'EST7', 'EST8', 'EST9', 'EST10', 'AGR1', 'AGR2', 'AGR3', 'AGR4', 'AGR5',
'AGR6', 'AGR7', 'AGR8', 'AGR9', 'AGR10', 'CSN1', 'CSN2', 'CSN3', 'CSN4', 'CSN5', 'CSN6', 'CSN7', 'CSN8', 'CSN9', 'CSN10', 'OPN1', 'OPN2', 'OPN3', 'OPN4', 'OPN5', 'OPN6', 'OPN7', 'OPN8', 'OPN9', 'OPN10']
# Pertanyaan rentang 1-5 (diganti -2, -1, 0, 1, 2)
answer = [1.0, 2.0, 2.0, 1.0, 2.0, 2.0, 2.0, 1.0, 1.0, 2.0, -2.0, -1.0, 1.0, 1.0, -1.0, -1.0, -1.0, -1.0, 0.0, -1.0, 1.0, 2.0, 1.0, 1.0,
1.0, -0.0, -1.0, -1.0, 0.0, 1.0, 0.0, -1.0, 0.0, 1.0, -1.0, -1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 2.0, 1.0, 2.0, 1.0, 2.0, 2.0, 0.0, 1.0, 2.0]
df = pd.DataFrame(answer, index=soal)
df = df.T
# Analisis Pertanyaan
# EXT
df['EXT1'] = df['EXT1'] * 1
df['EXT2'] = df['EXT2'] * -1
df['EXT3'] = df['EXT3'] * 1
df['EXT4'] = df['EXT4'] * -1
df['EXT5'] = df['EXT5'] * 1
df['EXT6'] = df['EXT6'] * -1
df['EXT7'] = df['EXT7'] * 1
df['EXT8'] = df['EXT8'] * -1
df['EXT9'] = df['EXT9'] * 1
df['EXT10'] = df['EXT10'] * -1
# ESP
df['EST1'] = df['EST1'] * 1
df['EST2'] = df['EST2'] * -1
df['EST3'] = df['EST3'] * 1
df['EST4'] = df['EST4'] * -1
df['EST5'] = df['EST5'] * 1
df['EST6'] = df['EST6'] * 1
df['EST7'] = df['EST7'] * 1
df['EST8'] = df['EST8'] * 1
df['EST9'] = df['EST9'] * 1
df['EST10'] = df['EST10'] * 1
# AGR
df['AGR1'] = df['AGR1'] * -1
df['AGR2'] = df['AGR2'] * 1
df['AGR3'] = df['AGR3'] * -1
df['AGR4'] = df['AGR4'] * 1
df['AGR5'] = df['AGR5'] * -1
df['AGR6'] = df['AGR6'] * -1
df['AGR7'] = df['AGR7'] * 1
df['AGR8'] = df['AGR8'] * -1
df['AGR9'] = df['AGR9'] * 1
df['AGR10'] = df['AGR10'] * 1
# CSN
df['CSN1'] = df['CSN1'] * 1
df['CSN2'] = df['CSN2'] * -1
df['CSN3'] = df['CSN3'] * 1
df['CSN4'] = df['CSN4'] * -1
df['CSN5'] = df['CSN5'] * 1
df['CSN6'] = df['CSN6'] * -1
df['CSN7'] = df['CSN7'] * 1
df['CSN8'] = df['CSN8'] * -1
df['CSN9'] = df['CSN9'] * 1
df['CSN10'] = df['CSN10'] * 1
# OPN
df['OPN1'] = df['OPN1'] * 1
df['OPN2'] = df['OPN2'] * -1
df['OPN3'] = df['OPN3'] * 1
df['OPN4'] = df['OPN4'] * -1
df['OPN5'] = df['OPN5'] * 1
df['OPN6'] = df['OPN6'] * -1
df['OPN7'] = df['OPN7'] * 1
df['OPN8'] = df['OPN8'] * 1
df['OPN9'] = df['OPN9'] * 1
df['OPN10'] = df['OPN10'] * 1
# Model
col_list = list(df)
ext = col_list[0:10]
est = col_list[10:20]
agr = col_list[20:30]
csn = col_list[30:40]
opn = col_list[40:50]
# Classify --> Tambah 3 biar nilainya rentang 1-5
df_total = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
import unittest
import platform
import pandas as pd
import numpy as np
import pyarrow.parquet as pq
import hpat
from hpat.tests.test_utils import (
count_array_REPs, count_parfor_REPs, count_array_OneDs, get_start_end)
from hpat.tests.gen_test_data import ParquetGenerator
from numba import types
from numba.config import IS_32BITS
from numba.errors import TypingError
_cov_corr_series = [(pd.Series(x), pd.Series(y)) for x, y in [
(
[np.nan, -2., 3., 9.1],
[np.nan, -2., 3., 5.0],
),
# TODO(quasilyte): more intricate data for complex-typed series.
# Some arguments make assert_almost_equal fail.
# Functions that yield mismaching results:
# _column_corr_impl and _column_cov_impl.
(
[complex(-2., 1.0), complex(3.0, 1.0)],
[complex(-3., 1.0), complex(2.0, 1.0)],
),
(
[complex(-2.0, 1.0), complex(3.0, 1.0)],
[1.0, -2.0],
),
(
[1.0, -4.5],
[complex(-4.5, 1.0), complex(3.0, 1.0)],
),
]]
min_float64 = np.finfo('float64').min
max_float64 = np.finfo('float64').max
test_global_input_data_float64 = [
[1., np.nan, -1., 0., min_float64, max_float64],
[np.nan, np.inf, np.NINF, np.NZERO]
]
min_int64 = np.iinfo('int64').min
max_int64 = np.iinfo('int64').max
max_uint64 = np.iinfo('uint64').max
test_global_input_data_integer64 = [
[1, -1, 0],
[min_int64, max_int64],
[max_uint64]
]
test_global_input_data_numeric = test_global_input_data_integer64 + test_global_input_data_float64
test_global_input_data_unicode_kind4 = [
'ascii',
'12345',
'1234567890',
'¡Y tú quién te crees?',
'🐍⚡',
'大处着眼,小处着手。',
]
test_global_input_data_unicode_kind1 = [
'ascii',
'12345',
'1234567890',
]
def _make_func_from_text(func_text, func_name='test_impl'):
loc_vars = {}
exec(func_text, {}, loc_vars)
test_impl = loc_vars[func_name]
return test_impl
def _make_func_use_binop1(operator):
func_text = "def test_impl(A, B):\n"
func_text += " return A {} B\n".format(operator)
return _make_func_from_text(func_text)
def _make_func_use_binop2(operator):
func_text = "def test_impl(A, B):\n"
func_text += " A {} B\n".format(operator)
func_text += " return A\n"
return _make_func_from_text(func_text)
def _make_func_use_method_arg1(method):
func_text = "def test_impl(A, B):\n"
func_text += " return A.{}(B)\n".format(method)
return _make_func_from_text(func_text)
GLOBAL_VAL = 2
class TestSeries(unittest.TestCase):
def test_create1(self):
def test_impl():
df = pd.DataFrame({'A': [1, 2, 3]})
return (df.A == 1).sum()
hpat_func = hpat.jit(test_impl)
self.assertEqual(hpat_func(), test_impl())
@unittest.skip('Feature request: implement Series::ctor with list(list(type))')
def test_create_list_list_unicode(self):
def test_impl():
S = pd.Series([
['abc', 'defg', 'ijk'],
['lmn', 'opq', 'rstuvwxyz']
])
return S
hpat_func = hpat.jit(test_impl)
result_ref = test_impl()
result = hpat_func()
pd.testing.assert_series_equal(result, result_ref)
@unittest.skip('Feature request: implement Series::ctor with list(list(type))')
def test_create_list_list_integer(self):
def test_impl():
S = pd.Series([
[123, 456, -789],
[-112233, 445566, 778899]
])
return S
hpat_func = hpat.jit(test_impl)
result_ref = test_impl()
result = hpat_func()
pd.testing.assert_series_equal(result, result_ref)
@unittest.skip('Feature request: implement Series::ctor with list(list(type))')
def test_create_list_list_float(self):
def test_impl():
S = pd.Series([
[1.23, -4.56, 7.89],
[11.2233, 44.5566, -778.899]
])
return S
hpat_func = hpat.jit(test_impl)
result_ref = test_impl()
result = hpat_func()
pd.testing.assert_series_equal(result, result_ref)
def test_create2(self):
def test_impl(n):
df = pd.DataFrame({'A': np.arange(n)})
return (df.A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
self.assertEqual(hpat_func(n), test_impl(n))
def test_create_series1(self):
def test_impl():
A = pd.Series([1, 2, 3])
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_create_series_index1(self):
# create and box an indexed Series
def test_impl():
A = pd.Series([1, 2, 3], ['A', 'C', 'B'])
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_create_series_index2(self):
def test_impl():
A = pd.Series([1, 2, 3], index=['A', 'C', 'B'])
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_create_series_index3(self):
def test_impl():
A = pd.Series([1, 2, 3], index=['A', 'C', 'B'], name='A')
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_create_series_index4(self):
def test_impl(name):
A = pd.Series([1, 2, 3], index=['A', 'C', 'B'], name=name)
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func('A'), test_impl('A'))
def test_create_str(self):
def test_impl():
df = pd.DataFrame({'A': ['a', 'b', 'c']})
return (df.A == 'a').sum()
hpat_func = hpat.jit(test_impl)
self.assertEqual(hpat_func(), test_impl())
def test_pass_df1(self):
def test_impl(df):
return (df.A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df), test_impl(df))
def test_pass_df_str(self):
def test_impl(df):
return (df.A == 'a').sum()
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': ['a', 'b', 'c']})
self.assertEqual(hpat_func(df), test_impl(df))
def test_pass_series1(self):
# TODO: check to make sure it is series type
def test_impl(A):
return (A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_pass_series2(self):
# test creating dataframe from passed series
def test_impl(A):
df = pd.DataFrame({'A': A})
return (df.A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_pass_series_str(self):
def test_impl(A):
return (A == 'a').sum()
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': ['a', 'b', 'c']})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_pass_series_index1(self):
def test_impl(A):
return A
hpat_func = hpat.jit(test_impl)
S = pd.Series([3, 5, 6], ['a', 'b', 'c'], name='A')
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_size(self):
def test_impl(S):
return S.size
hpat_func = hpat.jit(test_impl)
n = 11
for S, expected in [
(pd.Series(), 0),
(pd.Series([]), 0),
(pd.Series(np.arange(n)), n),
(pd.Series([np.nan, 1, 2]), 3),
(pd.Series(['1', '2', '3']), 3),
]:
with self.subTest(S=S, expected=expected):
self.assertEqual(hpat_func(S), expected)
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_attr2(self):
def test_impl(A):
return A.copy().values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_attr3(self):
def test_impl(A):
return A.min()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_series_attr4(self):
def test_impl(A):
return A.cumsum().values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_argsort1(self):
def test_impl(A):
return A.argsort()
hpat_func = hpat.jit(test_impl)
n = 11
A = pd.Series(np.random.ranf(n))
pd.testing.assert_series_equal(hpat_func(A), test_impl(A))
def test_series_attr6(self):
def test_impl(A):
return A.take([2, 3]).values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_attr7(self):
def test_impl(A):
return A.astype(np.float64)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_getattr_ndim(self):
'''Verifies getting Series attribute ndim is supported'''
def test_impl(S):
return S.ndim
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_getattr_T(self):
'''Verifies getting Series attribute T is supported'''
def test_impl(S):
return S.T
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
np.testing.assert_array_equal(hpat_func(S), test_impl(S))
def test_series_copy_str1(self):
def test_impl(A):
return A.copy()
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_copy_int1(self):
def test_impl(A):
return A.copy()
hpat_func = hpat.jit(test_impl)
S = pd.Series([1, 2, 3])
np.testing.assert_array_equal(hpat_func(S), test_impl(S))
def test_series_copy_deep(self):
def test_impl(A, deep):
return A.copy(deep=deep)
hpat_func = hpat.jit(test_impl)
for S in [
pd.Series([1, 2]),
pd.Series([1, 2], index=["a", "b"]),
]:
with self.subTest(S=S):
for deep in (True, False):
with self.subTest(deep=deep):
actual = hpat_func(S, deep)
expected = test_impl(S, deep)
pd.testing.assert_series_equal(actual, expected)
self.assertEqual(actual.values is S.values, expected.values is S.values)
self.assertEqual(actual.values is S.values, not deep)
# Shallow copy of index is not supported yet
if deep:
self.assertEqual(actual.index is S.index, expected.index is S.index)
self.assertEqual(actual.index is S.index, not deep)
def test_series_astype_int_to_str1(self):
'''Verifies Series.astype implementation with function 'str' as argument
converts integer series to series of strings
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_int_to_str2(self):
'''Verifies Series.astype implementation with a string literal dtype argument
converts integer series to series of strings
'''
def test_impl(S):
return S.astype('str')
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_to_str1(self):
'''Verifies Series.astype implementation with function 'str' as argument
handles string series not changing it
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_to_str2(self):
'''Verifies Series.astype implementation with a string literal dtype argument
handles string series not changing it
'''
def test_impl(S):
return S.astype('str')
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_to_str_index_str(self):
'''Verifies Series.astype implementation with function 'str' as argument
handles string series not changing it
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'], index=['d', 'e', 'f'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_to_str_index_int(self):
'''Verifies Series.astype implementation with function 'str' as argument
handles string series not changing it
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'], index=[1, 2, 3])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip('TODO: requires str(datetime64) support in Numba')
def test_series_astype_dt_to_str1(self):
'''Verifies Series.astype implementation with function 'str' as argument
converts datetime series to series of strings
'''
def test_impl(A):
return A.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series([pd.Timestamp('20130101 09:00:00'),
pd.Timestamp('20130101 09:00:02'),
pd.Timestamp('20130101 09:00:03')
])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip('AssertionError: Series are different'
'[left]: [0.000000, 1.000000, 2.000000, 3.000000, ...'
'[right]: [0.0, 1.0, 2.0, 3.0, ...'
'TODO: needs alignment to NumPy on Numba side')
def test_series_astype_float_to_str1(self):
'''Verifies Series.astype implementation with function 'str' as argument
converts float series to series of strings
'''
def test_impl(A):
return A.astype(str)
hpat_func = hpat.jit(test_impl)
n = 11.0
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_int32_to_int64(self):
'''Verifies Series.astype implementation with NumPy dtype argument
converts series with dtype=int32 to series with dtype=int64
'''
def test_impl(A):
return A.astype(np.int64)
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n), dtype=np.int32)
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_int_to_float64(self):
'''Verifies Series.astype implementation with NumPy dtype argument
converts integer series to series of float
'''
def test_impl(A):
return A.astype(np.float64)
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_float_to_int32(self):
'''Verifies Series.astype implementation with NumPy dtype argument
converts float series to series of integers
'''
def test_impl(A):
return A.astype(np.int32)
hpat_func = hpat.jit(test_impl)
n = 11.0
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip('TODO: needs Numba astype impl support string literal as dtype arg')
def test_series_astype_literal_dtype1(self):
'''Verifies Series.astype implementation with a string literal dtype argument
converts float series to series of integers
'''
def test_impl(A):
return A.astype('int32')
hpat_func = hpat.jit(test_impl)
n = 11.0
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip('TODO: needs Numba astype impl support converting unicode_type to int')
def test_series_astype_str_to_int32(self):
'''Verifies Series.astype implementation with NumPy dtype argument
converts series of strings to series of integers
'''
import numba
def test_impl(A):
return A.astype(np.int32)
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series([str(x) for x in np.arange(n) - n // 2])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip('TODO: needs Numba astype impl support converting unicode_type to float')
def test_series_astype_str_to_float64(self):
'''Verifies Series.astype implementation with NumPy dtype argument
converts series of strings to series of float
'''
def test_impl(A):
return A.astype(np.float64)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['3.24', '1E+05', '-1', '-1.3E-01', 'nan', 'inf'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_index_str(self):
'''Verifies Series.astype implementation with function 'str' as argument
handles string series not changing it
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'], index=['a', 'b', 'c'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_index_int(self):
'''Verifies Series.astype implementation with function 'str' as argument
handles string series not changing it
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'], index=[2, 3, 5])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_np_call_on_series1(self):
def test_impl(A):
return np.min(A)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_values(self):
def test_impl(A):
return A.values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_values1(self):
def test_impl(A):
return (A == 2).values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_shape1(self):
def test_impl(A):
return A.shape
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_static_setitem_series1(self):
def test_impl(A):
A[0] = 2
return (A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_setitem_series1(self):
def test_impl(A, i):
A[i] = 2
return (A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A.copy(), 0), test_impl(df.A.copy(), 0))
def test_setitem_series2(self):
def test_impl(A, i):
A[i] = 100
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
A1 = df.A.copy()
A2 = df.A
hpat_func(A1, 0)
test_impl(A2, 0)
pd.testing.assert_series_equal(A1, A2)
@unittest.skip("enable after remove dead in hiframes is removed")
def test_setitem_series3(self):
def test_impl(A, i):
S = pd.Series(A)
S[i] = 100
hpat_func = hpat.jit(test_impl)
n = 11
A = np.arange(n)
A1 = A.copy()
A2 = A
hpat_func(A1, 0)
test_impl(A2, 0)
np.testing.assert_array_equal(A1, A2)
def test_setitem_series_bool1(self):
def test_impl(A):
A[A > 3] = 100
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
A1 = df.A.copy()
A2 = df.A
hpat_func(A1)
test_impl(A2)
pd.testing.assert_series_equal(A1, A2)
def test_setitem_series_bool2(self):
def test_impl(A, B):
A[A > 3] = B[A > 3]
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n), 'B': np.arange(n)**2})
A1 = df.A.copy()
A2 = df.A
hpat_func(A1, df.B)
test_impl(A2, df.B)
pd.testing.assert_series_equal(A1, A2)
def test_static_getitem_series1(self):
def test_impl(A):
return A[0]
hpat_func = hpat.jit(test_impl)
n = 11
A = pd.Series(np.arange(n))
self.assertEqual(hpat_func(A), test_impl(A))
def test_getitem_series1(self):
def test_impl(A, i):
return A[i]
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A, 0), test_impl(df.A, 0))
def test_getitem_series_str1(self):
def test_impl(A, i):
return A[i]
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': ['aa', 'bb', 'cc']})
self.assertEqual(hpat_func(df.A, 0), test_impl(df.A, 0))
def test_series_iat1(self):
def test_impl(A):
return A.iat[3]
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n)**2)
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_iat2(self):
def test_impl(A):
A.iat[3] = 1
return A
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_iloc1(self):
def test_impl(A):
return A.iloc[3]
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n)**2)
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_iloc2(self):
def test_impl(A):
return A.iloc[3:8]
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(
hpat_func(S), test_impl(S).reset_index(drop=True))
def test_series_op1(self):
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(1, n), 'B': np.ones(n - 1)})
pd.testing.assert_series_equal(hpat_func(df.A, df.B), test_impl(df.A, df.B), check_names=False)
def test_series_op2(self):
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = hpat.jit(test_impl)
n = 11
if platform.system() == 'Windows' and not IS_32BITS:
df = pd.DataFrame({'A': np.arange(1, n, dtype=np.int64)})
else:
df = pd.DataFrame({'A': np.arange(1, n)})
pd.testing.assert_series_equal(hpat_func(df.A, 1), test_impl(df.A, 1), check_names=False)
def test_series_op3(self):
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop2(operator)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(1, n), 'B': np.ones(n - 1)})
pd.testing.assert_series_equal(hpat_func(df.A, df.B), test_impl(df.A, df.B), check_names=False)
def test_series_op4(self):
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop2(operator)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(1, n)})
pd.testing.assert_series_equal(hpat_func(df.A, 1), test_impl(df.A, 1), check_names=False)
def test_series_op5(self):
arithmetic_methods = ('add', 'sub', 'mul', 'div', 'truediv', 'floordiv', 'mod', 'pow')
for method in arithmetic_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(1, n), 'B': np.ones(n - 1)})
pd.testing.assert_series_equal(hpat_func(df.A, df.B), test_impl(df.A, df.B), check_names=False)
@unittest.skipIf(platform.system() == 'Windows', 'Series values are different (20.0 %)'
'[left]: [1, 1024, 59049, 1048576, 9765625, 60466176, 282475249, 1073741824, 3486784401, 10000000000]'
'[right]: [1, 1024, 59049, 1048576, 9765625, 60466176, 282475249, 1073741824, -808182895, 1410065408]')
def test_series_op5_integer_scalar(self):
arithmetic_methods = ('add', 'sub', 'mul', 'div', 'truediv', 'floordiv', 'mod', 'pow')
for method in arithmetic_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
if platform.system() == 'Windows' and not IS_32BITS:
operand_series = pd.Series(np.arange(1, n, dtype=np.int64))
else:
operand_series = pd.Series(np.arange(1, n))
operand_scalar = 10
pd.testing.assert_series_equal(
hpat_func(operand_series, operand_scalar),
test_impl(operand_series, operand_scalar),
check_names=False)
def test_series_op5_float_scalar(self):
arithmetic_methods = ('add', 'sub', 'mul', 'div', 'truediv', 'floordiv', 'mod', 'pow')
for method in arithmetic_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
operand_series = pd.Series(np.arange(1, n))
operand_scalar = .5
pd.testing.assert_series_equal(
hpat_func(operand_series, operand_scalar),
test_impl(operand_series, operand_scalar),
check_names=False)
def test_series_op6(self):
def test_impl(A):
return -A
hpat_func = hpat.jit(test_impl)
n = 11
A = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(A), test_impl(A))
def test_series_op7(self):
comparison_binops = ('<', '>', '<=', '>=', '!=', '==')
for operator in comparison_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = hpat.jit(test_impl)
n = 11
A = pd.Series(np.arange(n))
B = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_names=False)
def test_series_op8(self):
comparison_methods = ('lt', 'gt', 'le', 'ge', 'ne', 'eq')
for method in comparison_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
A = pd.Series(np.arange(n))
B = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_names=False)
@unittest.skipIf(platform.system() == 'Windows', "Attribute dtype are different: int64, int32")
def test_series_op8_integer_scalar(self):
comparison_methods = ('lt', 'gt', 'le', 'ge', 'eq', 'ne')
for method in comparison_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
operand_series = pd.Series(np.arange(1, n))
operand_scalar = 10
pd.testing.assert_series_equal(
hpat_func(operand_series, operand_scalar),
test_impl(operand_series, operand_scalar),
check_names=False)
def test_series_op8_float_scalar(self):
comparison_methods = ('lt', 'gt', 'le', 'ge', 'eq', 'ne')
for method in comparison_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
operand_series = pd.Series(np.arange(1, n))
operand_scalar = .5
pd.testing.assert_series_equal(
hpat_func(operand_series, operand_scalar),
test_impl(operand_series, operand_scalar),
check_names=False)
def test_series_inplace_binop_array(self):
def test_impl(A, B):
A += B
return A
hpat_func = hpat.jit(test_impl)
n = 11
A = np.arange(n)**2.0 # TODO: use 2 for test int casting
B = pd.Series(np.ones(n))
np.testing.assert_array_equal(hpat_func(A.copy(), B), test_impl(A, B))
def test_series_fusion1(self):
def test_impl(A, B):
return A + B + 1
hpat_func = hpat.jit(test_impl)
n = 11
if platform.system() == 'Windows' and not IS_32BITS:
A = pd.Series(np.arange(n), dtype=np.int64)
B = pd.Series(np.arange(n)**2, dtype=np.int64)
else:
A = pd.Series(np.arange(n))
B = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B))
self.assertEqual(count_parfor_REPs(), 1)
def test_series_fusion2(self):
# make sure getting data var avoids incorrect single def assumption
def test_impl(A, B):
S = B + 2
if A[0] == 0:
S = A + 1
return S + B
hpat_func = hpat.jit(test_impl)
n = 11
if platform.system() == 'Windows' and not IS_32BITS:
A = pd.Series(np.arange(n), dtype=np.int64)
B = pd.Series(np.arange(n)**2, dtype=np.int64)
else:
A = pd.Series(np.arange(n))
B = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B))
self.assertEqual(count_parfor_REPs(), 3)
def test_series_len(self):
def test_impl(A, i):
return len(A)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A, 0), test_impl(df.A, 0))
def test_series_box(self):
def test_impl():
A = pd.Series([1, 2, 3])
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_series_box2(self):
def test_impl():
A = pd.Series(['1', '2', '3'])
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_series_list_str_unbox1(self):
def test_impl(A):
return A.iloc[0]
hpat_func = hpat.jit(test_impl)
S = pd.Series([['aa', 'b'], ['ccc'], []])
np.testing.assert_array_equal(hpat_func(S), test_impl(S))
# call twice to test potential refcount errors
np.testing.assert_array_equal(hpat_func(S), test_impl(S))
def test_np_typ_call_replace(self):
# calltype replacement is tricky for np.typ() calls since variable
# type can't provide calltype
def test_impl(i):
return np.int32(i)
hpat_func = hpat.jit(test_impl)
self.assertEqual(hpat_func(1), test_impl(1))
def test_series_ufunc1(self):
def test_impl(A, i):
return np.isinf(A).values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A, 1), test_impl(df.A, 1))
def test_list_convert(self):
def test_impl():
df = pd.DataFrame({'one': np.array([-1, np.nan, 2.5]),
'two': ['foo', 'bar', 'baz'],
'three': [True, False, True]})
return df.one.values, df.two.values, df.three.values
hpat_func = hpat.jit(test_impl)
one, two, three = hpat_func()
self.assertTrue(isinstance(one, np.ndarray))
self.assertTrue(isinstance(two, np.ndarray))
self.assertTrue(isinstance(three, np.ndarray))
@unittest.skip("needs empty_like typing fix in npydecl.py")
def test_series_empty_like(self):
def test_impl(A):
return np.empty_like(A)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertTrue(isinstance(hpat_func(df.A), np.ndarray))
def test_series_fillna1(self):
def test_impl(A):
return A.fillna(5.0)
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': [1.0, 2.0, np.nan, 1.0]})
pd.testing.assert_series_equal(hpat_func(df.A),
test_impl(df.A), check_names=False)
# test inplace fillna for named numeric series (obtained from DataFrame)
def test_series_fillna_inplace1(self):
def test_impl(A):
A.fillna(5.0, inplace=True)
return A
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': [1.0, 2.0, np.nan, 1.0]})
pd.testing.assert_series_equal(hpat_func(df.A),
test_impl(df.A), check_names=False)
def test_series_fillna_str1(self):
def test_impl(A):
return A.fillna("dd")
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': ['aa', 'b', None, 'ccc']})
pd.testing.assert_series_equal(hpat_func(df.A),
test_impl(df.A), check_names=False)
def test_series_fillna_str_inplace1(self):
def test_impl(A):
A.fillna("dd", inplace=True)
return A
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'ccc'])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
# TODO: handle string array reflection
# hpat_func(S1)
# test_impl(S2)
# np.testing.assert_array_equal(S1, S2)
def test_series_fillna_str_inplace_empty1(self):
def test_impl(A):
A.fillna("", inplace=True)
return A
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'ccc'])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skip('Unsupported functionality: failed to handle index')
def test_series_fillna_index_str(self):
def test_impl(S):
return S.fillna(5.0)
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, 2.0, np.nan, 1.0], index=['a', 'b', 'c', 'd'])
pd.testing.assert_series_equal(hpat_func(S),
test_impl(S), check_names=False)
@unittest.skip('Unsupported functionality: failed to handle index')
def test_series_fillna_index_int(self):
def test_impl(S):
return S.fillna(5.0)
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, 2.0, np.nan, 1.0], index=[2, 3, 4, 5])
pd.testing.assert_series_equal(hpat_func(S),
test_impl(S), check_names=False)
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'No support of axis argument in old-style Series.dropna() impl')
def test_series_dropna_axis1(self):
'''Verifies Series.dropna() implementation handles 'index' as axis argument'''
def test_impl(S):
return S.dropna(axis='index')
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'No support of axis argument in old-style Series.dropna() impl')
def test_series_dropna_axis2(self):
'''Verifies Series.dropna() implementation handles 0 as axis argument'''
def test_impl(S):
return S.dropna(axis=0)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'No support of axis argument in old-style Series.dropna() impl')
def test_series_dropna_axis3(self):
'''Verifies Series.dropna() implementation handles correct non-literal axis argument'''
def test_impl(S, axis):
return S.dropna(axis=axis)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf])
S2 = S1.copy()
axis_values = [0, 'index']
for value in axis_values:
pd.testing.assert_series_equal(hpat_func(S1, value), test_impl(S2, value))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_float_index1(self):
'''Verifies Series.dropna() implementation for float series with default index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
for data in test_global_input_data_float64:
S1 = pd.Series(data)
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_float_index2(self):
'''Verifies Series.dropna() implementation for float series with string index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf], ['a', 'b', 'c', 'd', 'e'])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_str_index1(self):
'''Verifies Series.dropna() implementation for series of strings with default index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'cccd', ''])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_str_index2(self):
'''Verifies Series.dropna() implementation for series of strings with string index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'cccd', ''], ['a', 'b', 'c', 'd', 'e'])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_str_index3(self):
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'cccd', ''], index=[1, 2, 5, 7, 10])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skip('BUG: old-style dropna impl returns series without index, in new-style inplace is unsupported')
def test_series_dropna_float_inplace_no_index1(self):
'''Verifies Series.dropna() implementation for float series with default index and inplace argument True'''
def test_impl(S):
S.dropna(inplace=True)
return S
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skip('TODO: add reflection support and check method return value')
def test_series_dropna_float_inplace_no_index2(self):
'''Verifies Series.dropna(inplace=True) results are reflected back in the original float series'''
def test_impl(S):
return S.dropna(inplace=True)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf])
S2 = S1.copy()
self.assertIsNone(hpat_func(S1))
self.assertIsNone(test_impl(S2))
pd.testing.assert_series_equal(S1, S2)
@unittest.skip('BUG: old-style dropna impl returns series without index, in new-style inplace is unsupported')
def test_series_dropna_str_inplace_no_index1(self):
'''Verifies Series.dropna() implementation for series of strings
with default index and inplace argument True
'''
def test_impl(S):
S.dropna(inplace=True)
return S
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'cccd', ''])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skip('TODO: add reflection support and check method return value')
def test_series_dropna_str_inplace_no_index2(self):
'''Verifies Series.dropna(inplace=True) results are reflected back in the original string series'''
def test_impl(S):
return S.dropna(inplace=True)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'cccd', ''])
S2 = S1.copy()
self.assertIsNone(hpat_func(S1))
self.assertIsNone(test_impl(S2))
pd.testing.assert_series_equal(S1, S2)
def test_series_dropna_str_parallel1(self):
'''Verifies Series.dropna() distributed work for series of strings with default index'''
def test_impl(A):
B = A.dropna()
return (B == 'gg').sum()
hpat_func = hpat.jit(distributed=['A'])(test_impl)
S1 = pd.Series(['aa', 'b', None, 'ccc', 'dd', 'gg'])
start, end = get_start_end(len(S1))
# TODO: gatherv
self.assertEqual(hpat_func(S1[start:end]), test_impl(S1))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
self.assertTrue(count_array_OneDs() > 0)
@unittest.skip('AssertionError: Series are different\n'
'Series length are different\n'
'[left]: 3, Int64Index([0, 1, 2], dtype=\'int64\')\n'
'[right]: 2, Int64Index([1, 2], dtype=\'int64\')')
def test_series_dropna_dt_no_index1(self):
'''Verifies Series.dropna() implementation for datetime series with default index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([pd.NaT, pd.Timestamp('1970-12-01'), pd.Timestamp('2012-07-25')])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
def test_series_dropna_bool_no_index1(self):
'''Verifies Series.dropna() implementation for bool series with default index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([True, False, False, True])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_int_no_index1(self):
'''Verifies Series.dropna() implementation for integer series with default index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
n = 11
S1 = pd.Series(np.arange(n, dtype=np.int64))
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skip('numba.errors.TypingError - fix needed\n'
'Failed in hpat mode pipeline'
'(step: convert to distributed)\n'
'Invalid use of Function(<built-in function len>)'
'with argument(s) of type(s): (none)\n')
def test_series_rename1(self):
def test_impl(A):
return A.rename('B')
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': [1.0, 2.0, np.nan, 1.0]})
pd.testing.assert_series_equal(hpat_func(df.A), test_impl(df.A))
def test_series_sum_default(self):
def test_impl(S):
return S.sum()
hpat_func = hpat.jit(test_impl)
S = pd.Series([1., 2., 3.])
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_sum_nan(self):
def test_impl(S):
return S.sum()
hpat_func = hpat.jit(test_impl)
# column with NA
S = pd.Series([np.nan, 2., 3.])
self.assertEqual(hpat_func(S), test_impl(S))
# all NA case should produce 0
S = pd.Series([np.nan, np.nan])
self.assertEqual(hpat_func(S), test_impl(S))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default, "Old style Series.sum() does not support parameters")
def test_series_sum_skipna_false(self):
def test_impl(S):
return S.sum(skipna=False)
hpat_func = hpat.jit(test_impl)
S = | pd.Series([np.nan, 2., 3.]) | pandas.Series |
#SPDX-License-Identifier: MIT
""" Helper methods constant across all workers """
import requests
import datetime
import time
import traceback
import json
import os
import sys
import math
import logging
import numpy
import copy
import concurrent
import multiprocessing
import psycopg2
import psycopg2.extensions
import csv
import io
from logging import FileHandler, Formatter, StreamHandler
from multiprocessing import Process, Queue, Pool, Value
from os import getpid
import sqlalchemy as s
import pandas as pd
from pathlib import Path
from urllib.parse import urlparse, quote
from sqlalchemy.ext.automap import automap_base
from augur.config import AugurConfig
from augur.logging import AugurLogging
from sqlalchemy.sql.expression import bindparam
from concurrent import futures
import dask.dataframe as dd
class Persistant():
ROOT_AUGUR_DIR = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
def __init__(self, worker_type, data_tables=[],operations_tables=[]):
self.db_schema = None
self.helper_schema = None
self.worker_type = worker_type
#For database functionality
self.data_tables = data_tables
self.operations_tables = operations_tables
self._root_augur_dir = Persistant.ROOT_AUGUR_DIR
# count of tuples inserted in the database ( to store stats for each task in op tables)
self.update_counter = 0
self.insert_counter = 0
self._results_counter = 0
# Update config with options that are general and not specific to any worker
self.augur_config = AugurConfig(self._root_augur_dir)
#TODO: consider taking parts of this out for the base class and then overriding it in WorkerGitInterfaceable
self.config = {
'worker_type': self.worker_type,
'host': self.augur_config.get_value('Server', 'host')
}
self.config.update(self.augur_config.get_section("Logging"))
try:
worker_defaults = self.augur_config.get_default_config()['Workers'][self.config['worker_type']]
self.config.update(worker_defaults)
except KeyError as e:
logging.warn('Could not get default configuration for {}'.format(self.config['worker_type']))
worker_info = self.augur_config.get_value('Workers', self.config['worker_type'])
self.config.update(worker_info)
worker_port = self.config['port']
while True:
try:
r = requests.get('http://{}:{}/AUGWOP/heartbeat'.format(
self.config['host'], worker_port)).json()
if 'status' in r:
if r['status'] == 'alive':
worker_port += 1
except:
break
#add credentials to db config. Goes to databaseable
self.config.update({
'port': worker_port,
'id': "workers.{}.{}".format(self.worker_type, worker_port),
'capture_output': False,
'location': 'http://{}:{}'.format(self.config['host'], worker_port),
'port_broker': self.augur_config.get_value('Server', 'port'),
'host_broker': self.augur_config.get_value('Server', 'host'),
'host_database': self.augur_config.get_value('Database', 'host'),
'port_database': self.augur_config.get_value('Database', 'port'),
'user_database': self.augur_config.get_value('Database', 'user'),
'name_database': self.augur_config.get_value('Database', 'name'),
'password_database': self.augur_config.get_value('Database', 'password')
})
# Initialize logging in the main process
self.initialize_logging()
# Clear log contents from previous runs
open(self.config["server_logfile"], "w").close()
open(self.config["collection_logfile"], "w").close()
# Get configured collection logger
self.logger = logging.getLogger(self.config["id"])
self.logger.info('Worker (PID: {}) initializing...'.format(str(os.getpid())))
#Return string representation of an object with all information needed to recreate the object (Think of it like a pickle made out of text)
#Called using repr(*object*). eval(repr(*object*)) == *object*
def __repr__(self):
return f"{self.config['id']}"
def initialize_logging(self):
#Get the log level in upper case from the augur config's logging section.
self.config['log_level'] = self.config['log_level'].upper()
if self.config['debug']:
self.config['log_level'] = 'DEBUG'
if self.config['verbose']:
format_string = AugurLogging.verbose_format_string
else:
format_string = AugurLogging.simple_format_string
#Use stock python formatter for stdout
formatter = Formatter(fmt=format_string)
#User custom for stderr, Gives more info than verbose_format_string
error_formatter = Formatter(fmt=AugurLogging.error_format_string)
worker_dir = AugurLogging.get_log_directories(self.augur_config, reset_logfiles=False) + "/workers/"
Path(worker_dir).mkdir(exist_ok=True)
logfile_dir = worker_dir + f"/{self.worker_type}/"
Path(logfile_dir).mkdir(exist_ok=True)
#Create more complex sublogs in the logfile directory determined by the AugurLogging class
server_logfile = logfile_dir + '{}_{}_server.log'.format(self.worker_type, self.config["port"])
collection_logfile = logfile_dir + '{}_{}_collection.log'.format(self.worker_type, self.config["port"])
collection_errorfile = logfile_dir + '{}_{}_collection.err'.format(self.worker_type, self.config["port"])
self.config.update({
'logfile_dir': logfile_dir,
'server_logfile': server_logfile,
'collection_logfile': collection_logfile,
'collection_errorfile': collection_errorfile
})
collection_file_handler = FileHandler(filename=self.config['collection_logfile'], mode="a")
collection_file_handler.setFormatter(formatter)
collection_file_handler.setLevel(self.config['log_level'])
collection_errorfile_handler = FileHandler(filename=self.config['collection_errorfile'], mode="a")
collection_errorfile_handler.setFormatter(error_formatter)
collection_errorfile_handler.setLevel(logging.WARNING)
logger = logging.getLogger(self.config['id'])
logger.handlers = []
logger.addHandler(collection_file_handler)
logger.addHandler(collection_errorfile_handler)
logger.setLevel(self.config['log_level'])
logger.propagate = False
if self.config['debug']:
self.config['log_level'] = 'DEBUG'
console_handler = StreamHandler()
console_handler.setFormatter(formatter)
console_handler.setLevel(self.config['log_level'])
logger.addHandler(console_handler)
if self.config['quiet']:
logger.disabled = True
self.logger = logger
#database interface, the git interfaceable adds additional function to the super method.
def initialize_database_connections(self):
DB_STR = 'postgresql://{}:{}@{}:{}/{}'.format(
self.config['user_database'], self.config['password_database'], self.config['host_database'], self.config['port_database'], self.config['name_database']
)
# Create an sqlalchemy engine for both database schemas
self.logger.info("Making database connections")
self.db_schema = 'augur_data'
self.db = s.create_engine(DB_STR, poolclass=s.pool.NullPool,
connect_args={'options': '-csearch_path={}'.format(self.db_schema)})
# , 'client_encoding': 'utf8'
self.helper_schema = 'augur_operations'
self.helper_db = s.create_engine(DB_STR, poolclass=s.pool.NullPool,
connect_args={'options': '-csearch_path={}'.format(self.helper_schema)})
metadata = s.MetaData()
helper_metadata = s.MetaData()
# Reflect only the tables we will use for each schema's metadata object
metadata.reflect(self.db, only=self.data_tables)
helper_metadata.reflect(self.helper_db, only=self.operations_tables)
Base = automap_base(metadata=metadata)
HelperBase = automap_base(metadata=helper_metadata)
Base.prepare()
HelperBase.prepare()
# So we can access all our tables when inserting, updating, etc
for table in self.data_tables:
setattr(self, '{}_table'.format(table), Base.classes[table].__table__)
try:
self.logger.info(HelperBase.classes.keys())
except:
pass
for table in self.operations_tables:
try:
setattr(self, '{}_table'.format(table), HelperBase.classes[table].__table__)
except Exception as e:
self.logger.error("Error setting attribute for table: {} : {}".format(table, e))
# Increment so we are ready to insert the 'next one' of each of these most recent ids
self.logger.info("Trying to find max id of table...")
try:
self.history_id = self.get_max_id('worker_history', 'history_id', operations_table=True) + 1
except Exception as e:
self.logger.info(f"Could not find max id. ERROR: {e}")
#25151
#self.logger.info(f"Good, passed the max id getter. Max id: {self.history_id}")
#Make sure the type used to store date is synced with the worker?
def sync_df_types(self, subject, source, subject_columns, source_columns):
type_dict = {}
## Getting rid of nan's and NoneTypes across the dataframe to start:
subject = subject.fillna(value=numpy.nan)
source = source.fillna(value=numpy.nan)
for index in range(len(source_columns)):
if type(source[source_columns[index]].values[0]) == numpy.datetime64:
subject[subject_columns[index]] = pd.to_datetime(
subject[subject_columns[index]], utc=True
)
source[source_columns[index]] = pd.to_datetime(
source[source_columns[index]], utc=True
)
continue
## Dealing with an error coming from paginate endpoint and the GitHub issue worker
### For a release in mid september, 2021. #SPG This did not work on Ints or Floats
# if type(source[source_columns[index]].values[0]).isnull():
# subject[subject_columns[index]] = pd.fillna(value=np.nan)
# source[source_columns[index]] = pd.fillna(value=np.nan)
# continue
source_index = source_columns[index]
try:
source_index = source_columns[index]
type_dict[subject_columns[index]] = type(source[source_index].values[0])
#self.logger.info(f"Source data column is {source[source_index].values[0]}")
#self.logger.info(f"Type dict at {subject_columns[index]} is : {type(source[source_index].values[0])}")
except Exception as e:
self.logger.info(f"Source data registered exception: {source[source_index]}")
self.print_traceback("", e, True)
subject = subject.astype(type_dict)
return subject, source
#Convert safely from sql type to python type?
def get_sqlalchemy_type(self, data, column_name=None):
if type(data) == str:
try:
time.strptime(data, "%Y-%m-%dT%H:%M:%SZ")
return s.types.TIMESTAMP
except ValueError:
return s.types.String
elif (
isinstance(data, (int, numpy.integer))
or (isinstance(data, float) and column_name and 'id' in column_name)
):
return s.types.BigInteger
elif isinstance(data, float):
return s.types.Float
elif type(data) in [numpy.datetime64, pd._libs.tslibs.timestamps.Timestamp]:
return s.types.TIMESTAMP
elif column_name and 'id' in column_name:
return s.types.BigInteger
return s.types.String
def _convert_float_nan_to_int(self, df):
for column in df.columns:
if (
df[column].dtype == float
and ((df[column] % 1 == 0) | (df[column].isnull())).all()
):
df[column] = df[column].astype("Int64").astype(object).where(
| pd.notnull(df[column]) | pandas.notnull |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 20 22:28:42 2018
@author: Erkin
"""
#%%
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import seaborn as sns
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
def warn(*args, **kwargs):
pass
import warnings
warnings.warn = warn
# T VALUE FUNCTION
#def t_ind(quotes, tgt_margin=0.2, n_days=30):
# quotes=quotes[['date','lowest_newprice']]
# quotes=quotes.reset_index(drop=True)
#
# t_matrix=pd.DataFrame(quotes.date).iloc[0:len(quotes)-n_days,]
# t_matrix['T']=0
# t_matrix['decision']='hold'
# for i in range(len(quotes)-n_days):
# a=quotes.iloc[i:i+n_days,:]
# a['first_price']=quotes.iloc[i,1]
# a['variation']=(a.lowest_newprice-a.first_price)/a.first_price
# t_value=len(a[(a.variation>tgt_margin)]) - len(a[(a.variation<-tgt_margin)])
# t_matrix.iloc[i,1]=t_value
# if (t_value > 10):
# t_matrix.iloc[i,2]='buy'
# elif(t_value < -10):
# t_matrix.iloc[i,2]='sell'
#
# plt.subplot(2, 1, 1)
# dates = matplotlib.dates.date2num(t_matrix.date)
# plt.plot_date(dates, t_matrix['T'],linestyle='solid', marker='None')
# plt.title(' T vs time ')
# plt.xlabel('Time')
# plt.ylabel('T value')
#
#
# plt.subplot(2, 1, 2)
# dates = matplotlib.dates.date2num(quotes.iloc[0:len(quotes)-n_days,].date)
# plt.plot_date(dates, quotes.iloc[0:len(quotes)-n_days,]['lowest_newprice'],linestyle='solid', marker='None')
# plt.xlabel('Time')
# plt.ylabel('price')
# plt.show()
# plt.show()
# return t_matrix
#
# FUNCTION ENDS
# importing necessary datasets.
product_info=pd.read_csv('/Users/Erkin/Desktop/McGill/personal project/data/product_info.csv')
product_info=product_info.drop('Unnamed: 0',axis=1)
product_info_head=product_info.head(1000)
product=pd.read_csv('/Users/Erkin/Desktop/McGill/personal project/data/product.csv')
product=product.drop('Unnamed: 0',axis=1)
product_head=product.head(1000)
map_product=pd.read_csv('/Users/Erkin/Desktop/McGill/personal project/data/map_product.csv')
map_product=map_product.drop('Unnamed: 0',axis=1)
map_product_head=map_product.head(1000)
product_answer=pd.read_csv('/Users/Erkin/Desktop/McGill/personal project/data/product_answer.csv')
product_answer=product_answer.drop('Unnamed: 0',axis=1)
product_answer_head=product_answer.head(1000)
product_question=pd.read_csv('/Users/Erkin/Desktop/McGill/personal project/data/product_question.csv')
product_question=product_question.drop('Unnamed: 0',axis=1)
product_question_head=product_question.head(1000)
product_review=pd.read_csv('/Users/Erkin/Desktop/McGill/personal project/data/product_review.csv')
product_review=product_review.drop('Unnamed: 0',axis=1)
product_review_head=product_review.head(1000)
merged=pd.read_csv('/Users/Erkin/Desktop/McGill/personal project/data/merged.csv')
merged=merged.drop('Unnamed: 0',axis=1)
merged_head=merged.head(1000)
#product names
#product_names=product.iloc[:,1:3]
#merged=pd.merge(product_names,product_info,how='right',on='asin')
#merged_head=merged.head(1000)
#
##lowest price na replacement
#merged=merged.drop('Unnamed: 0',axis=1)
#merged['lowest_newprice']=merged['lowest_newprice'].fillna(merged['list_price'])
#merged_head=merged.head(1000)
#merged.isna().sum()
#removing values with less than 200 observations.
#asd=merged.groupby(['asin']).count()
#asd=asd[asd.date > 200]
#asd.reset_index(level=0, inplace=True)
#merged=merged[merged.asin.isin(asd.asin)]
#merged=merged.reset_index(drop=True)
#merged['date'] = pd.to_datetime(merged['date']).dt.date
#
#unique_asins=merged.asin.unique()
#merged['T']=99
#for asin in unique_asins:
# print(asin)
# quotes=merged[merged.asin==asin]
# iterable=quotes.iloc[0:len(quotes)-n_days,]
# for i, row in iterable.iterrows():
# a=quotes.loc[i:i+n_days,:]
# a['first_price']=quotes.loc[i,'lowest_newprice']
# a['variation']=(a.lowest_newprice-a.first_price)/a.first_price
# t_value=len(a[(a.variation>tgt_margin)]) - len(a[(a.variation<-tgt_margin)])
# merged.loc[i,'T']=t_value
#asins=merged.asin.unique().tolist()
#product0=t_ind(quotes=merged[merged.asin==asins[0]])
#product1=t_ind(quotes=merged[merged.asin==asins[1]])
#product2=t_ind(quotes=merged[merged.asin==asins[2]])
#product3=t_ind(quotes=merged[merged.asin==asins[3]])
#product4=t_ind(quotes=merged[merged.asin==asins[4]])
#product5=t_ind(quotes=merged[merged.asin==asins[5]])
#product6=t_ind(quotes=merged[merged.asin==asins[6]])
#
## Create the time index
#product6.set_index('date', inplace=True)
#ts=product6.drop('decision',axis=1)
#
#
## Verify the time index
#product6.head()
#product6.info()
#product6.index
## Run the AutoRegressive model
#from statsmodels.tsa.ar_model import AR
#ar1=AR(ts)
#model1=ar1.fit()
## View the results
#print('Lag: %s' % model1.k_ar)
#print('Coefficients: %s' % model1.params)
#
## Separate the data into training and test
#split_size = round(len(ts)*0.3)
#ts_train,ts_test = ts[0:len(ts)-split_size], ts[len(ts)-split_size:]
#
## Run the model again on the training data
#ar2=AR(ts_train)
#model2=ar2.fit()
#
## Predicting the outcome based on the test data
#ts_test_pred_ar = model2.predict(start=len(ts_train),end=len(ts_train)+len(ts_test)-1,dynamic=False)
#ts_test_pred_ar.index=ts_test.index
#
## Calculating the mean squared error of the model
#from sklearn.metrics import mean_squared_error
#error = mean_squared_error(ts_test,ts_test_pred_ar)
#print('Test MSE: %.3f' %error)
#
## Plot the graph comparing the real value and predicted value
#from matplotlib import pyplot
#fig = plt.figure(dpi=100)
#pyplot.plot(ts_test)
#pyplot.plot(ts_test_pred_ar)
#df_dateasin=merged[['date','asin']]
#
#
#
#reviews_sorted=product_review.sort_values('review_date')
#reviews_sorted['number_of_reviews']=reviews_sorted.groupby(['asin','review_date']).cumcount()+1
#reviews_sorted['star_tot']=reviews_sorted.groupby('asin').star.cumsum()
#reviews_sorted = reviews_sorted.drop_duplicates(['asin','review_date'], keep='last')
#df_dateasin = df_dateasin.drop_duplicates(['asin','date'], keep='last')
#df_dateasin.columns=['review_date','asin']
#reviews_sorted = pd.merge(df_dateasin,reviews_sorted,how='left')
#reviews_sorted_head=reviews_sorted.head(1000)
#
#
#
#
#
#reviews_sorted['reviews_total']=reviews_sorted.groupby('asin').number_of_reviews.cumsum()
#reviews_sorted['star_avg']=reviews_sorted.star_tot/reviews_sorted.number_of_reviews
#reviews_sorted = reviews_sorted.drop_duplicates(['asin','review_date'], keep='last')
#t1=reviews_sorted[['review_date','asin','number_of_reviews','star_avg']]
#t1.columns=['date','asin','number_of_reviews','star_avg']
#merged2 = pd.merge(merged,t1,how='left')
#
#t2 = pd.merge(df_dateasin,t1,how='left')
#
#nul = merged2['number_of_reviews'].isnull()
#nul.groupby((nul.diff() == 1).cumsum()).cumsum()*3 + merged2['number_of_reviews'].ffill()
#
# FEATURE ENGINEERING
# aggregation of number of reviews and average star rating
reviews_sorted=product_review.set_index('review_date').sort_index()
reviews_sorted['number_of_reviews']=reviews_sorted.groupby('asin').cumcount()+1
reviews_sorted['star_tot']=reviews_sorted.groupby('asin').star.cumsum()
reviews_sorted=reviews_sorted.reset_index()
reviews_sorted['star_avg']=reviews_sorted.star_tot/reviews_sorted.number_of_reviews
reviews_sorted = reviews_sorted.drop_duplicates(['asin','review_date'], keep='last')
t1=reviews_sorted[['review_date','asin','number_of_reviews','star_avg']]
t1.columns=['date','asin','number_of_reviews','star_avg']
merged2 = pd.merge(merged,t1,how='left')
merged2['number_of_reviews']=merged2.groupby('asin').number_of_reviews.fillna(method='ffill')
merged2['number_of_reviews']=merged2.groupby('asin').number_of_reviews.fillna(method='bfill')
merged2['star_avg']=merged2.groupby('asin').star_avg.fillna(method='ffill')
merged2['star_avg']=merged2.groupby('asin').star_avg.fillna(method='bfill')
# hold and buy
merged2_head=merged2.head(10000)
df_pred = merged2[merged2['T'] < 40]
df_pred_head=df_pred.head(10000)
df_pred['decision']=0 #don't buy
df_pred.loc[(df_pred['T']>5),'decision']=1 #buy
df_pred_head=df_pred.head(10000)
#
## price diff
#price_diff=[]
#df_pred['price_diff']=0
#for game in df_pred.asin.unique():
# price_diff.append(0)
# for row in range(1,len(df_pred[df_pred.asin==game])):
# price_diff.append((df_pred.iloc[row,4]-df_pred.iloc[row-1,4])/df_pred.iloc[row-1,4])
#df_pred['price_diff']=price_diff
#df_pred_head=df_pred.head(10000)
#
#
#removing products less than 150 datapoints
asd=df_pred.groupby(['asin']).count()
asd=asd[asd.date > 150]
asd.reset_index(level=0, inplace=True)
df_pred=df_pred[df_pred.asin.isin(asd.asin)]
df_pred=df_pred.reset_index(drop=True)
df_pred=df_pred.dropna(subset=['sales_rank'])
#%%
# BENCHMARK MODEL
#random forest
asins=df_pred.asin.unique().tolist()
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_selection import SelectFromModel
products=[]
accuracies=[]
precisions=[]
recalls=[]
fscores=[]
supports=[]
d = {}
for i in range(len(asins)):
d["product" + str(i)] = df_pred[df_pred.asin==asins[i]]
benchmark_model={}
benchmark_ytest={}
for key, value in d.items():
X=value[['lowest_newprice','total_new','total_used','sales_rank']]
y=value.decision
split_size = round(len(X)*0.3)
X_train,X_test = X[0:len(X)-split_size], X[len(X)-split_size:]
y_train, y_test = y[0:len(y)-split_size], y[len(y)-split_size:]
y_test=y_test.reset_index(drop=True)
# from sklearn.model_selection import train_test_split
# X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 5)
randomforest = RandomForestClassifier(random_state=0,n_estimators=100,max_depth=10)
model = randomforest.fit(X_train, y_train)
# sfm = SelectFromModel(model, threshold=0.03)
# sfm.fit(X_train, y_train)
# for feature_list_index in sfm.get_support(indices=True):
# print(X_train.columns[feature_list_index])
y_test_pred=pd.DataFrame(model.predict(X_test))
test_pred=pd.concat([y_test,y_test_pred],axis=1)
benchmark_ytest[str(key)]=test_pred
from sklearn.metrics import accuracy_score
benchmark_model[str(key)]=accuracy_score(y_test,y_test_pred)
from sklearn.metrics import precision_recall_fscore_support as score
precision, recall, fscore, support = score(y_test, y_test_pred)
products.append(key)
accuracies.append(accuracy_score(y_test,y_test_pred))
precisions.append(precision)
recalls.append(recall)
fscores.append(fscore)
supports.append(support)
products_df=pd.DataFrame({'products':products})
accuracies_df=pd.DataFrame({'accuracy':accuracies})
precisions_df=pd.DataFrame(precisions, columns=['precision_hold','precision_buy'])
recalls_df=pd.DataFrame(recalls, columns=['recall_hold','recall_buy'])
fscores_df=pd.DataFrame(fscores, columns=['fscore_hold','fscore_buy'])
supports_df=pd.DataFrame(supports, columns=['support_hold','support_buy'])
benchmark_scores=pd.concat([products_df,accuracies_df,precisions_df,recalls_df,fscores_df,supports_df],axis=1)
benchmark_scores=benchmark_scores.dropna()
benchmark_scores=benchmark_scores[benchmark_scores['support_buy']>10]
benchmark_scores.precision_buy.mean() #precision is 52.7%
benchmark_scores.recall_buy.mean() #recall is 38%
benchmark_scores.accuracy.mean() #accuracy is 70%
#%%
#regression
asins=df_pred.asin.unique().tolist()
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.feature_selection import SelectFromModel
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_recall_fscore_support as score
products=[]
accuracies=[]
precisions=[]
recalls=[]
fscores=[]
supports=[]
d = {}
for i in range(len(asins)):
d["product" + str(i)] = df_pred[df_pred.asin==asins[i]]
benchmark_model={}
benchmark_ytest={}
for key, value in d.items():
X=value[['lowest_newprice','total_new','total_used','sales_rank']]
y=value['T']
dec=value.decision
split_size = round(len(X)*0.3)
X_train,X_test = X[0:len(X)-split_size], X[len(X)-split_size:]
y_train, y_test = y[0:len(y)-split_size], y[len(y)-split_size:]
y_test=y_test.reset_index(drop=True)
dec_train,dec_test=dec[0:len(dec)-split_size], dec[len(dec)-split_size:]
# from sklearn.model_selection import train_test_split
# X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 5)
randomforest = RandomForestRegressor(random_state=0,n_estimators=100,max_depth=10)
model = randomforest.fit(X_train, y_train)
# sfm = SelectFromModel(model, threshold=0.03)
# sfm.fit(X_train, y_train)
# for feature_list_index in sfm.get_support(indices=True):
# print(X_train.columns[feature_list_index])
y_test_pred=pd.DataFrame(model.predict(X_test))
y_test_pred['decision']=0
y_test_pred.loc[y_test_pred[0]>5,'decision']=1
y_test_pred=y_test_pred.drop([0],axis=1)
test_pred=pd.concat([dec_test,y_test_pred],axis=1)
benchmark_ytest[str(key)]=test_pred
benchmark_model[str(key)]=accuracy_score(dec_test,y_test_pred)
precision, recall, fscore, support = score(dec_test, y_test_pred)
products.append(key)
accuracies.append(accuracy_score(dec_test,y_test_pred))
precisions.append(precision)
recalls.append(recall)
fscores.append(fscore)
supports.append(support)
products_df=pd.DataFrame({'products':products})
accuracies_df=pd.DataFrame({'accuracy':accuracies})
precisions_df=pd.DataFrame(precisions, columns=['precision_hold','precision_buy'])
recalls_df=pd.DataFrame(recalls, columns=['recall_hold','recall_buy'])
fscores_df=pd.DataFrame(fscores, columns=['fscore_hold','fscore_buy'])
supports_df=pd.DataFrame(supports, columns=['support_hold','support_buy'])
benchmark_scores=pd.concat([products_df,accuracies_df,precisions_df,recalls_df,fscores_df,supports_df],axis=1)
benchmark_scores=benchmark_scores.dropna()
benchmark_scores=benchmark_scores[benchmark_scores['support_buy']>10]
benchmark_scores.precision_buy.mean() #precision is 51% - 57
benchmark_scores.recall_buy.mean() #recall is 56% - 46
benchmark_scores.accuracy.mean() #accuracy is 78% - 71
#%%
# all products (# just a random trial)
#
#X=df_pred[['total_new','total_used','sales_rank','price_diff']]
#y=df_pred.decision
#
#from sklearn.model_selection import train_test_split
#X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 5)
#randomforest = RandomForestClassifier(random_state=0,n_estimators=100,max_depth=10)
#model = randomforest.fit(X_train, y_train)
#
#sfm = SelectFromModel(model, threshold=0.03)
#sfm.fit(X_train, y_train)
#for feature_list_index in sfm.get_support(indices=True):
# print(X_train.columns[feature_list_index])
#y_test_pred=pd.DataFrame(model.predict(X_test))
#
#
#from sklearn import metrics
#from sklearn.metrics import classification_report
#
#print("MODEL B1: All Products \n")
#
#print ('The precision for this classifier is ' + str(metrics.precision_score(y_test, y_test_pred)))
#print ('The recall for this classifier is ' + str(metrics.recall_score(y_test, y_test_pred)))
#print ('The f1 for this classifier is ' + str(metrics.f1_score(y_test, y_test_pred)))
#print ('The accuracy for this classifier is ' + str(metrics.accuracy_score(y_test, y_test_pred)))
#
#print ('\nHere is the classification report:')
#print (classification_report(y_test, y_test_pred))
#
#from sklearn.metrics import confusion_matrix
#print(pd.DataFrame(confusion_matrix(y_test, y_test_pred, labels=[1, 0]), index=['true:1', 'true:0'], columns=['pred:1', 'pred:0']))
#
# IMPROVED MODEL
#asins=df_pred.asin.unique().tolist()
#
##Accuracy for product 0
#product0=df_pred[df_pred.asin==asins[0]]
#X=product0[['lowest_newprice','total_new','total_used','sales_rank','number_of_reviews','star_avg']]
#y=product0.decision
#
#
#from sklearn.ensemble import RandomForestClassifier
#randomforest = RandomForestClassifier(random_state=0)
#model = randomforest.fit(X, y)
#
#from sklearn.feature_selection import SelectFromModel
#sfm = SelectFromModel(model, threshold=0.05)
#sfm.fit(X, y)
#for feature_list_index in sfm.get_support(indices=True):
# print(X.columns[feature_list_index])
#
#pd.DataFrame(list(zip(X.columns,model.feature_importances_)), columns = ['predictor','Gini coefficient'])
#
#
#from sklearn.model_selection import train_test_split
#X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 5)
#
#model = randomforest.fit(X_train, y_train)
#
#
#y_test_pred=pd.DataFrame(model.predict(X_test))
#
#from sklearn.metrics import accuracy_score
#accuracy_score(y_test,y_test_pred)
#
#
##Accuracy for product 1
#
#product2=df_pred[df_pred.asin==asins[2]]
#X=product2[['lowest_newprice','total_new','total_used','sales_rank','number_of_reviews','star_avg']]
#y=product2.decision
#
#
#from sklearn.ensemble import RandomForestClassifier
#randomforest = RandomForestClassifier(random_state=0)
#model = randomforest.fit(X, y)
#
#from sklearn.feature_selection import SelectFromModel
#sfm = SelectFromModel(model, threshold=0.05)
#sfm.fit(X, y)
#for feature_list_index in sfm.get_support(indices=True):
# print(X.columns[feature_list_index])
#
#pd.DataFrame(list(zip(X.columns,model.feature_importances_)), columns = ['predictor','Gini coefficient'])
#
#
#from sklearn.model_selection import train_test_split
#X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 5)
#
#model = randomforest.fit(X_train, y_train)
#
#
#y_test_pred=pd.DataFrame(model.predict(X_test))
#
#from sklearn.metrics import accuracy_score
#accuracy_score(y_test,y_test_pred)
#
#
##Accuracy for product 7
#
#product7=df_pred[df_pred.asin==asins[7]]
#X=product7[['lowest_newprice','total_new','total_used','sales_rank','number_of_reviews','star_avg']]
#y=product7.decision
#
#
#from sklearn.ensemble import RandomForestClassifier
#randomforest = RandomForestClassifier(random_state=0)
#model = randomforest.fit(X, y)
#
#from sklearn.feature_selection import SelectFromModel
#sfm = SelectFromModel(model, threshold=0.05)
#sfm.fit(X, y)
#for feature_list_index in sfm.get_support(indices=True):
# print(X.columns[feature_list_index])
#
#pd.DataFrame(list(zip(X.columns,model.feature_importances_)), columns = ['predictor','Gini coefficient'])
#
#
#from sklearn.model_selection import train_test_split
#X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 5)
#
#model = randomforest.fit(X_train, y_train)
#
#
#y_test_pred=pd.DataFrame(model.predict(X_test))
#
#from sklearn.metrics import accuracy_score
#accuracy_score(y_test,y_test_pred)
#
#
#
#
#
##Accuracy for product 9
#
#product9=df_pred[df_pred.asin==asins[9]]
#X=product9[['lowest_newprice','total_new','total_used','sales_rank','number_of_reviews','star_avg']]
#y=product9.decision
#
#
#from sklearn.ensemble import RandomForestClassifier
#randomforest = RandomForestClassifier(random_state=0)
#model = randomforest.fit(X, y)
#
#from sklearn.feature_selection import SelectFromModel
#sfm = SelectFromModel(model, threshold=0.05)
#sfm.fit(X, y)
#for feature_list_index in sfm.get_support(indices=True):
# print(X.columns[feature_list_index])
#
#pd.DataFrame(list(zip(X.columns,model.feature_importances_)), columns = ['predictor','Gini coefficient'])
#
#
#from sklearn.model_selection import train_test_split
#X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 5)
#
#model = randomforest.fit(X_train, y_train)
#
#
#y_test_pred=pd.DataFrame(model.predict(X_test))
#
#from sklearn.metrics import accuracy_score
#print(accuracy_score(y_test,y_test_pred))
#y_test_pred['actual']=y_test.reset_index(drop=True)
#
#%%
# MATCHING QUESTION AND ANSWERS WITH PRODUCTS
matching=product[['asin','forum_id']]
product_question_asin = pd.merge(product_question,matching, on=['forum_id'])
matching=product_question_asin[['asin','forum_id','question_id']]
product_answer_asin=pd.merge(product_answer,matching, on=['question_id'])
# FEATURE ENGINEERING FOR QUESTIONS AND ANSWERS
# for questions
questions_sorted=product_question_asin.set_index('question_date').sort_index()
questions_sorted['sentiment_total']=questions_sorted.groupby('asin').sentiment.cumsum()
questions_sorted['number_of_questions']=questions_sorted.groupby('asin').sentiment.cumcount()+1
questions_sorted['sentiment_avg']=questions_sorted.sentiment_total/questions_sorted.number_of_questions
questions_sorted['polarity_total']=questions_sorted.groupby('asin').polarity.cumsum()
questions_sorted['polarity_avg']=questions_sorted.polarity_total/questions_sorted.number_of_questions
questions_sorted['subjectivity_total']=questions_sorted.groupby('asin').subjectivity.cumsum()
questions_sorted['subjectivity_avg']=questions_sorted.subjectivity_total/questions_sorted.number_of_questions
questions_sorted['len_question']=questions_sorted.question.apply(len)
questions_sorted['len_question_total']=questions_sorted.groupby('asin').len_question.cumsum()
questions_sorted['question_lenght_avg']=questions_sorted['len_question_total']/questions_sorted.number_of_questions
questions_sorted=questions_sorted.reset_index()
questions_sorted = questions_sorted.drop_duplicates(['asin','question_date'], keep='last')
questions_useful=questions_sorted[['question_date','asin', 'sentiment_total', 'number_of_questions', 'sentiment_avg',
'polarity_total', 'polarity_avg', 'subjectivity_total',
'subjectivity_avg', 'len_question_total',
'question_lenght_avg']]
questions_useful.columns=['date','asin', 'sentiment_total_question', 'number_of_questions', 'sentiment_avg_question',
'polarity_total_question', 'polarity_avg_question', 'subjectivity_total_question',
'subjectivity_avg_question', 'len_question_total',
'question_lenght_avg']
merged_ques = pd.merge(df_pred,questions_useful,how='left')
merged_ques[['sentiment_total_question', 'number_of_questions', 'sentiment_avg_question',
'polarity_total_question', 'polarity_avg_question', 'subjectivity_total_question',
'subjectivity_avg_question', 'len_question_total',
'question_lenght_avg']]=merged_ques.groupby('asin')[['sentiment_total_question', 'number_of_questions', 'sentiment_avg_question',
'polarity_total_question', 'polarity_avg_question', 'subjectivity_total_question',
'subjectivity_avg_question', 'len_question_total',
'question_lenght_avg']].fillna(method='ffill')
merged_ques[['sentiment_total_question', 'number_of_questions', 'sentiment_avg_question',
'polarity_total_question', 'polarity_avg_question', 'subjectivity_total_question',
'subjectivity_avg_question', 'len_question_total',
'question_lenght_avg']]=merged_ques.groupby('asin')[['sentiment_total_question', 'number_of_questions', 'sentiment_avg_question',
'polarity_total_question', 'polarity_avg_question', 'subjectivity_total_question',
'subjectivity_avg_question', 'len_question_total',
'question_lenght_avg']].fillna(method='bfill')
merged_ques_head=merged_ques.head(10000)
#for answers
product_answer_sorted=product_answer_asin.set_index('answer_date').sort_index()
product_answer_sorted['number_of_answers']=product_answer_sorted.groupby('asin').cumcount()+1
product_answer_sorted['sentiment_total']=product_answer_sorted.groupby('asin').sentiment.cumsum()
product_answer_sorted['sentiment_avg']=product_answer_sorted.sentiment_total/product_answer_sorted.number_of_answers
product_answer_sorted['polarity_total']=product_answer_sorted.groupby('asin').polarity.cumsum()
product_answer_sorted['polarity_avg']=product_answer_sorted.polarity_total/product_answer_sorted.number_of_answers
product_answer_sorted['subjectivity_total']=product_answer_sorted.groupby('asin').subjectivity.cumsum()
product_answer_sorted['subjectivity_avg']=product_answer_sorted.subjectivity_total/product_answer_sorted.number_of_answers
product_answer_sorted['len_answer']=product_answer_sorted.answer.apply(len)
product_answer_sorted['len_answer_total']=product_answer_sorted.groupby('asin').len_answer.cumsum()
product_answer_sorted['answer_lenght_avg']=product_answer_sorted['len_answer_total']/product_answer_sorted.number_of_answers
product_answer_sorted=product_answer_sorted.reset_index()
product_answer_useful=product_answer_sorted[['answer_date','asin',
'number_of_answers', 'sentiment_total', 'sentiment_avg',
'polarity_total', 'polarity_avg', 'subjectivity_total',
'subjectivity_avg', 'len_answer_total',
'answer_lenght_avg']]
product_answer_useful.columns=['date','asin',
'number_of_answers', 'sentiment_total_answer', 'sentiment_avg_answer',
'polarity_total_answer', 'polarity_avg_answer', 'subjectivity_total_answer',
'subjectivity_avg_answer', 'len_answer_total',
'answer_lenght_avg']
merged_ans = pd.merge(merged_ques,product_answer_useful,how='left')
merged_ans[ ['number_of_answers', 'sentiment_total_answer', 'sentiment_avg_answer',
'polarity_total_answer', 'polarity_avg_answer', 'subjectivity_total_answer',
'subjectivity_avg_answer', 'len_answer_total',
'answer_lenght_avg']]=merged_ans.groupby('asin')[['number_of_answers', 'sentiment_total_answer', 'sentiment_avg_answer',
'polarity_total_answer', 'polarity_avg_answer', 'subjectivity_total_answer',
'subjectivity_avg_answer', 'len_answer_total',
'answer_lenght_avg']].fillna(method='ffill')
merged_ans[ ['number_of_answers', 'sentiment_total_answer', 'sentiment_avg_answer',
'polarity_total_answer', 'polarity_avg_answer', 'subjectivity_total_answer',
'subjectivity_avg_answer', 'len_answer_total',
'answer_lenght_avg']]=merged_ans.groupby('asin')[['number_of_answers', 'sentiment_total_answer', 'sentiment_avg_answer',
'polarity_total_answer', 'polarity_avg_answer', 'subjectivity_total_answer',
'subjectivity_avg_answer', 'len_answer_total',
'answer_lenght_avg']].fillna(method='bfill')
merged_ans_head=merged_ans.head(20000)
merged_ans.len_answer_total.isna().sum()
merged_ans_dropedna=merged_ans.dropna()
len(df_pred[df_pred.decision==0])/len(df_pred)
#%%
# #### IMPROVED MODEL ####
#random forest classificiation
## keeping products with more than 150 data points
asd=merged_ans_dropedna.groupby(['asin']).count()
asd=asd[asd.date > 150]
asd.reset_index(level=0, inplace=True)
merged_ans_dropedna=merged_ans_dropedna[merged_ans_dropedna.asin.isin(asd.asin)]
merged_ans_dropedna=merged_ans_dropedna.reset_index(drop=True)
asins=merged_ans_dropedna.asin.unique().tolist()
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_selection import SelectFromModel
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_recall_fscore_support as score
#
products=[]
accuracies=[]
precisions=[]
recalls=[]
fscores=[]
supports=[]
d = {}
for i in range(len(asins)):
d["product" + str(i)] = merged_ans_dropedna[merged_ans_dropedna.asin==asins[i]]
importance = pd.DataFrame()
improved_model={}
improved_ytest={}
for key, value in d.items():
print(key)
X=value[['lowest_newprice','total_new','total_used','sales_rank','number_of_reviews','star_avg']]
# X=value.drop(['asin', 'name', 'date', 'list_price','lowest_usedprice','tradein_value','T','decision'],axis=1)
y=value.decision
## feature selection
randomforest = RandomForestClassifier(random_state=0)
model = randomforest.fit(X, y)
sfm = SelectFromModel(model, threshold=0.01)
sfm.fit(X, y)
for feature_list_index in sfm.get_support(indices=True):
print(X.columns[feature_list_index])
feature_idx = sfm.get_support()
feature_name = X.columns[feature_idx]
print(pd.DataFrame(list(zip(X.columns,model.feature_importances_)), columns = ['predictor','Gini coefficient']).sort_values('Gini coefficient',ascending=False))
temp_importance=pd.DataFrame([list(model.feature_importances_)],columns=X.columns)
key_index=[key]
temp_importance.index = key_index
importance=importance.append(temp_importance)
X_important = pd.DataFrame(sfm.transform(X))
X_important.columns = feature_name
# model
split_size = round(len(X_important)*0.3)
X_train,X_test = X[0:len(X)-split_size], X[len(X)-split_size:]
# X_train,X_test = X_important[0:len(X_important)-split_size], X_important[len(X_important)-split_size:]
y_train, y_test = y[0:len(y)-split_size], y[len(y)-split_size:]
y_test=y_test.reset_index(drop=True)
# from sklearn.model_selection import train_test_split
# X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 5)
randomforest = RandomForestClassifier(random_state=0,n_estimators=100,max_depth=10)
model = randomforest.fit(X_train, y_train)
# prediction
y_test_pred=pd.DataFrame(model.predict(X_test))
test_pred=pd.concat([y_test,y_test_pred],axis=1)
improved_ytest[str(key)]=test_pred
improved_model[str(key)]=accuracy_score(y_test,y_test_pred)
precision, recall, fscore, support = score(y_test, y_test_pred)
products.append(key)
accuracies.append(accuracy_score(y_test,y_test_pred))
precisions.append(precision)
recalls.append(recall)
fscores.append(fscore)
supports.append(support)
products_df=pd.DataFrame({'products':products})
accuracies_df=pd.DataFrame({'accuracy':accuracies})
precisions_df=pd.DataFrame(precisions, columns=['precision_hold','precision_buy'])
recalls_df=pd.DataFrame(recalls, columns=['recall_hold','recall_buy'])
fscores_df=pd.DataFrame(fscores, columns=['fscore_hold','fscore_buy'])
supports_df=pd.DataFrame(supports, columns=['support_hold','support_buy'])
improved_scores=pd.concat([products_df,accuracies_df,precisions_df,recalls_df,fscores_df,supports_df],axis=1)
improved_scores=improved_scores.dropna()
improved_scores=improved_scores[improved_scores['support_buy']!=0]
print('precision:',improved_scores.precision_buy.mean()) #precision is 40%
print('recall:',improved_scores.recall_buy.mean()) #recall is 43%
print('accuracy:',improved_scores.accuracy.mean()) #accuracy is 75.3%
# importance dataframe removing zeros
importance=importance[importance.lowest_newprice!=0]
print(importance.mean().sort_values(ascending=False))
#%%
#regression
asd=merged_ans_dropedna.groupby(['asin']).count()
asd=asd[asd.date > 150]
asd.reset_index(level=0, inplace=True)
merged_ans_dropedna=merged_ans_dropedna[merged_ans_dropedna.asin.isin(asd.asin)]
merged_ans_dropedna=merged_ans_dropedna.reset_index(drop=True)
asins=merged_ans_dropedna.asin.unique().tolist()
from sklearn.ensemble import RandomForestRegressor
from sklearn.feature_selection import SelectFromModel
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_recall_fscore_support as score
#
products=[]
accuracies=[]
precisions=[]
recalls=[]
fscores=[]
supports=[]
d = {}
for i in range(len(asins)):
d["product" + str(i)] = merged_ans_dropedna[merged_ans_dropedna.asin==asins[i]]
importance = pd.DataFrame()
improved_model={}
improved_ytest={}
for key, value in d.items():
print(key)
X=value[['lowest_newprice','total_new','total_used','sales_rank','number_of_reviews','star_avg']]
# X=value.drop(['asin', 'name', 'date', 'list_price','lowest_usedprice','tradein_value','T','decision'],axis=1)
y=value['T']
dec=value.decision
## feature selection
randomforest = RandomForestRegressor(random_state=0)
model = randomforest.fit(X, y)
sfm = SelectFromModel(model, threshold=0.01)
sfm.fit(X, y)
for feature_list_index in sfm.get_support(indices=True):
print(X.columns[feature_list_index])
feature_idx = sfm.get_support()
feature_name = X.columns[feature_idx]
print(pd.DataFrame(list(zip(X.columns,model.feature_importances_)), columns = ['predictor','Gini coefficient']).sort_values('Gini coefficient',ascending=False))
temp_importance=pd.DataFrame([list(model.feature_importances_)],columns=X.columns)
key_index=[key]
temp_importance.index = key_index
importance=importance.append(temp_importance)
X_important = pd.DataFrame(sfm.transform(X))
X_important.columns = feature_name
# model
split_size = round(len(X_important)*0.3)
X_train,X_test = X[0:len(X)-split_size], X[len(X)-split_size:]
# X_train,X_test = X_important[0:len(X_important)-split_size], X_important[len(X_important)-split_size:]
y_train, y_test = y[0:len(y)-split_size], y[len(y)-split_size:]
y_test=y_test.reset_index(drop=True)
dec_train,dec_test=dec[0:len(dec)-split_size], dec[len(dec)-split_size:]
# from sklearn.model_selection import train_test_split
# X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 5)
randomforest = RandomForestRegressor(random_state=0,n_estimators=100,max_depth=10)
model = randomforest.fit(X_train, y_train)
# prediction
y_test_pred=pd.DataFrame(model.predict(X_test))
y_test_pred['decision']=0
y_test_pred.loc[y_test_pred[0]>5,'decision']=1
y_test_pred=y_test_pred.drop([0],axis=1)
test_pred=pd.concat([dec_test,y_test_pred],axis=1)
improved_ytest[str(key)]=test_pred
improved_model[str(key)]=accuracy_score(dec_test,y_test_pred)
precision, recall, fscore, support = score(dec_test, y_test_pred)
products.append(key)
accuracies.append(accuracy_score(dec_test,y_test_pred))
precisions.append(precision)
recalls.append(recall)
fscores.append(fscore)
supports.append(support)
products_df=pd.DataFrame({'products':products})
accuracies_df=pd.DataFrame({'accuracy':accuracies})
precisions_df= | pd.DataFrame(precisions, columns=['precision_hold','precision_buy']) | pandas.DataFrame |
import pandas as pd
import numpy as np
from datetime import date, timedelta
from ai4good.utils.logger_util import get_logger
from ai4good.models.validate.model_metrics import model_metrics
logger = get_logger(__name__)
def model_validation_metrics(population:int, model:str, age_categories:list, case_cols:list, df_baseline:pd.DataFrame, df_model:pd.DataFrame, save_output=""):
# Initialise metrics
if model.upper() == "CM":
time_col = "Time"
cols_results = ["age", "case"]
elif model.upper() == "ABM":
time_col = "DAY"
cols_results = ["age", "case"]
elif model.upper() == "NM":
time_col = "Time"
cols_results = ["case"]
cols_overall = [time_col] + case_cols
df_model_metrics = | pd.DataFrame(columns=cols_results) | pandas.DataFrame |
import random
import time
import algorithms # local module
import pandas as pd
from graphics import graphic as simulation_graphic
from process import Process
from pathlib import Path
def get_cpu_time_unit():
"""
a unit of time independent on cpu run this code for simulating time
"""
started_at = time.time()
for i in range(1, 10000):
if i % 2 == 0:
temp = i / 2
else:
temp = 2 * i
ended_at = time.time()
return ended_at - started_at
class Simulator:
def __init__(self, algorithm: str):
"""
run_time attr is for running algorithm in second
cpu_total_time is total time of running processes for cpu. The number doesn't have any unit
cpu_run_time attr total time of running processes for cpu. unit of time depend on get_cpu_time_unit function
:param algorithm: algorithm name that valid in algorithm list
"""
self.algorithm = algorithm
self.algorithm_class = self.get_algorithm_class()
self.algorithms_list = ["FCFS", "NonPreemptiveSFJ", "PreemptiveSFJ", "RR",
"NonPreemptivePriority", "PreemptivePriority"]
self.processes = []
self.total_process = 0
self.run_time = 0
self.cpu_run_time = 0
self.cpu_total_time = 0
self.cpu_utilization = 0
self.throughput = 0
self.average_waiting_time = 0.0
self.average_turnaround_time = 0.0
self.average_response_time = 0.0
def _compress_df_rows(self, df: pd.DataFrame, column: str) -> pd.DataFrame:
"""
you can compress your dataframe based on your column name.
for example you have 1000 rows that their priority column have value of 0
so you average values of all other columns and compress 1000 rows to one
:param df: dataframe which you want to compress same column values
:param column: the column that you want to compress values
:return: compressed dataframe
"""
# raise error if column not in compress columns
if column not in ('priority', 'burst_time', 'arrival_time'):
raise KeyError("Your column not valid")
df = df.sort_values(column, ignore_index=True)
unique_columns = list(df[column].unique())
data = []
# compress column based on column name
if column == "priority":
# mean over each priority
for c in unique_columns:
result = df.where(df[column] == c).mean(numeric_only=True)
data.append([
result['waiting_time'],
result['turnaround_time'],
result['response_time']
])
# create dataframe
compress_df = pd.DataFrame(
data,
columns=['waiting_time', 'turnaround_time', 'response_time'],
index=unique_columns)
elif column == "burst_time":
# divide variety of the column to 20 part and create range of numbers instead of a number
pieces = 20 if len(unique_columns) > 20 else 1
batches = len(unique_columns) // pieces
index_ranges = []
for c in range(pieces + 1):
low_range = batches * c
high_range = batches * (c + 1)
# mean over the ranges
result = df.where((low_range <= df[column]) & (df[column] < high_range)).mean(numeric_only=True)
data.append([
result['waiting_time'],
result['turnaround_time'],
result['response_time']
])
# create index ranges for creating index of dataframe
index_ranges.append(f"{low_range}-{high_range}")
# create dataframe
compress_df = pd.DataFrame(
data,
columns=['waiting_time', 'turnaround_time', 'response_time'],
index=index_ranges)
elif column == "arrival_time":
# divide variety of the column to 20 part and create range of numbers instead of a number
pieces = 20 if len(unique_columns) > 20 else 1
batches = len(unique_columns) // pieces
index_ranges = []
for c in range(pieces + 1):
low_range = batches * c
high_range = batches * (c + 1)
# mean over the ranges
result = df.where((low_range <= df[column]) & (df[column] < high_range)).mean(numeric_only=True)
data.append([
result['waiting_time'],
result['turnaround_time'],
result['response_time']
])
# create index ranges for creating index of dataframe
index_ranges.append(f"{low_range}-{high_range}")
# create dataframe
compress_df = pd.DataFrame(
data,
columns=['waiting_time', 'turnaround_time', 'response_time'],
index=index_ranges)
return compress_df
def set_algorithm(self, algorithm: str) -> bool:
"""
changing algorithm and validate algorithm
:param algorithm: new algorithm name
:return: True if has changed
"""
self.algorithm = algorithm
self.algorithm_class = self.get_algorithm_class()
return True
@staticmethod
def generate_processes_data(
path: str = 'data.csv', size: int = 1000, max_arrival_time: int = 100, max_priority=10, max_burst_time=40
) -> bool:
"""
Generate process with random numbers for arrival and burst time and priority. then save it to a csv file
:param max_burst_time: maximum number for random number of burst time
:param max_priority: maximum number for random number of priority
:param max_arrival_time: maximum number for random number of arrival time
:param path: path to save csv file
:param size: number of processes
:return: bool
"""
data = []
# save data as lists of lists then create dataframe. e.g [ [pid1, arrival1], [pid2, arrival2]]
for i in range(size):
data.append([
i + 1, # PID
random.randint(0, max_arrival_time), # arrival_time
random.randint(0, max_priority), # priority
random.randint(0, max_burst_time), # burst_time
])
df = pd.DataFrame(data, columns=['pid', 'arrival_time', 'priority', 'burst_time'])
df.to_csv(path_or_buf=path, index=False)
return True
def read_processes_data(self, path='data.csv', dataframe=None) -> bool:
"""
read data from csv file or pandas dataframe
one of path or dataframe parms needed
:param path: string path of your csv file
:param dataframe: you can pass dataframe object
:return: a true boolean if everythings goes right
"""
if isinstance(dataframe, pd.DataFrame):
df = dataframe
elif path and path.split('.')[-1] == 'csv':
df = pd.read_csv(path)
else:
raise Exception("Your file should be a csv format or pass dataframe object to function")
# clear last loaded processes
self.processes.clear()
for i in range(0, df['pid'].count()):
process = Process(
pid=df['pid'][i],
arrival_time=df['arrival_time'][i],
priority=df['priority'][i],
burst_time=df['burst_time'][i]
)
self.processes.append(process)
return True
def get_algorithm_class(self):
"""
find algorithm class based on algorithm folder
:return a class
"""
try:
return getattr(algorithms, self.algorithm)
except Exception as e:
print(e)
raise Exception("try again! you have to enter a valid algorithm")
def run(self):
"""
Simulate algorithm and save the result of it
"""
if len(self.processes) == 0:
raise Exception("you have to load processes")
# algorithm instance. need process list object
algorithm = self.algorithm_class(self.processes)
start_time = time.time()
# run algorithm
result = algorithm.run()
end_time = time.time()
self.run_time = end_time - start_time
# set result of simulation
# this processes referred to executed processes in output of algorithm run
processes = result['executed_processes']
total_process = len(processes)
cpu_total_time = result['cpu_total_time']
cpu_run_time = cpu_total_time * get_cpu_time_unit()
throughput = total_process / cpu_total_time
cpu_utilization = (cpu_total_time - result['cpu_idle_time']) / cpu_total_time
average_waiting_time = sum(process.waiting_time for process in processes) / total_process
average_turnaround_time = sum(process.turnaround_time for process in processes) / total_process
average_response_time = sum(process.response_time for process in processes) / total_process
self.total_process = total_process
self.cpu_total_time = cpu_total_time
self.cpu_run_time = cpu_run_time
self.throughput = throughput
self.cpu_utilization = cpu_utilization
self.average_waiting_time = average_waiting_time
self.average_turnaround_time = average_turnaround_time
self.average_response_time = average_response_time
# update processes
self.processes = processes
def save_result_simulation(self):
"""
Save result of simulation to results/algorithmname.csv
:param max_arrival_time: maximum number for random number of arrival time
:param path: path to save csv file
:param size: number of processes
:return: bool
"""
if not self.cpu_total_time > 0:
print("you have to run an algorithm then save it")
return
self.processes.sort(key=lambda p: p.pid)
data = []
columns = [
# process information
'pid', 'arrival_time', 'priority', 'burst_time', 'waiting_time', 'turnaround_time', 'response_time',
'start_time', 'end_time',
# simulation information
'total_process', 'run_time', 'cpu_total_time', 'cpu_run_time', 'cpu_utilization', 'throughput',
'average_waiting_time', 'average_turnaround_time', 'average_response_time'
]
# save data as lists of lists then create dataframe. e.g [ [pid1, arrival1], [pid2, arrival2]]
for process in self.processes:
data.append([
process.pid,
process.arrival_time,
process.priority,
process.burst_time,
process.waiting_time,
process.turnaround_time,
process.response_time,
process.start_time,
process.end_time,
])
# insert simulation information to just first row
data[0].extend([
self.total_process,
self.run_time,
self.cpu_total_time,
self.cpu_run_time,
self.cpu_utilization,
self.throughput,
self.average_waiting_time,
self.average_turnaround_time,
self.average_response_time
])
df = pd.DataFrame(data, columns=columns)
# handle path in linux and windows
folder_path = Path("results/")
df.to_csv(path_or_buf=folder_path / f"{self.algorithm}.csv", index=False)
return True
def analyze_algorithms(self):
# handle path in linux and windows
folder_path = Path("results/")
columns = [
'average_waiting_time', 'average_turnaround_time', 'average_response_time'
]
algorithms_data = []
exists_algorithms = []
for algo in self.algorithms_list:
try:
df = pd.read_csv(folder_path / f"{algo}.csv", usecols=columns)
algorithms_data.append([
algo, # algorithm name
df['average_waiting_time'][0],
df['average_turnaround_time'][0],
df['average_response_time'][0],
])
exists_algorithms.append(algo)
except FileNotFoundError:
pass
if algorithms_data is None:
raise Exception("You have to run one algorithm at least")
columns.insert(0, 'name')
algorithms_df = | pd.DataFrame(algorithms_data, columns=columns, index=exists_algorithms) | pandas.DataFrame |
# author: <NAME>
# date: 2021-11-27
"""This script imports preprocessed test data and fitted Ridge and RandomForestRegressor models.
It then evaluates them on the test set and outputs evaluation metrics to the output directory.
Usage: fit_model.py --source_data=<filepath> --output_dir=<filepath>
Options:
--source_data=<filepath> directory containing transformed data (this is a required option)
--output_dir=<filepath> directory to output figures and tables (this is a required option)
"""
from docopt import docopt
import random
import numpy as np
import pandas as pd
import altair as alt
import sklearn.metrics as sk
import math
import pickle
import scipy.sparse
from sklearn.model_selection import train_test_split
from altair_saver import save
from sklearn.compose import ColumnTransformer, make_column_transformer
from sklearn.dummy import DummyClassifier, DummyRegressor
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_selection import RFE, RFECV
from sklearn.impute import SimpleImputer
from sklearn.linear_model import LogisticRegression, Ridge, RidgeCV
from sklearn.metrics import make_scorer
from sklearn.model_selection import (
GridSearchCV,
RandomizedSearchCV,
ShuffleSplit,
cross_val_score,
cross_validate,
train_test_split,
)
from sklearn.pipeline import Pipeline, make_pipeline
from sklearn.preprocessing import (
OneHotEncoder,
OrdinalEncoder,
PolynomialFeatures,
StandardScaler,
)
from sklearn.pipeline import Pipeline, make_pipeline
opt = docopt(__doc__)
def main(opt):
input_dir = opt['--source_data']
output_dir = opt['--output_dir']
# load test data from input directory
print("loading test data..")
X_transformed_test_sparse = scipy.sparse.load_npz(input_dir + '/x_test_sparse.npz')
X_transformed_test = pd.DataFrame.sparse.from_spmatrix(X_transformed_test_sparse)
y_test = pd.read_csv(input_dir + '/y_test.csv')
feats = pd.read_csv(input_dir + '/feature_names.csv').iloc[:,0]
# load models from pickle files
print("loading fitted models..")
ridge_model = pickle.load(open("results/models/ridge_model.pickle", 'rb'))
rf_model = pickle.load(open("results/models/rf_model.pickle", 'rb'))
# generate predictions on test set
y_pred_ridge = ridge_model.predict(X_transformed_test)
y_pred_rf = rf_model.predict(X_transformed_test)
print("creating tables and figures..")
# create scores dataframe and save it to output directory
r2_ridge = round(sk.r2_score(y_test, y_pred_ridge), 2)
r2_rf = round(sk.r2_score(y_test, y_pred_rf), 2)
rmse = round(math.sqrt(sk.mean_squared_error(y_test, y_pred_ridge)), 2)
rmse_rf = round(math.sqrt(sk.mean_squared_error(y_test, y_pred_rf)), 2)
scores = {
"Metric": ["R2", "RMSE"],
"Ridge Scores": [r2_ridge, rmse],
}
test_scores = pd.DataFrame(scores)
test_scores.to_csv(output_dir + '/tables/test_scores.csv', index = False)
print("saved model test results to: " + output_dir)
# Plot the predicted values against true values, then save the graph in the output directory
y_data = {
"Ridge precitions": y_pred_ridge,
"y_actual": y_test.iloc[:, 0]
}
salary_data = pd.DataFrame(y_data)
point = alt.Chart(salary_data, title='Ridge regression effectiveness in predicting salary values').mark_circle(opacity = 0.5).encode(
alt.X("y_actual", title="Actual Salary"),
alt.Y("Ridge precitions", title="Predicted Salary")
)
line = pd.DataFrame({
'x': [0, 500000],
'y': [0, 500000],
})
line_plot = alt.Chart(line).mark_line(color= 'red').encode(
x= 'x',
y= 'y',
)
chart = point + line_plot
chart.save(output_dir + "/figures/predicted_vs_actual_chart.png")
print("saved model evaluation chart to: " + output_dir)
# create model coefficient dataframes and save them to the output directory
neg_coefficients_df = pd.DataFrame(data=ridge_model.coef_, index=feats, columns=["coefficient"]).sort_values("coefficient")[:10].reset_index()
neg_coefficients_df.columns = ["Feature", "Coefficient"]
pos_coefficients_df =pd.DataFrame(data=ridge_model.coef_, index=feats, columns=["coefficient"]).sort_values("coefficient", ascending = False)[:10].reset_index()
pos_coefficients_df.columns = ["Feature", "Coefficient"]
ridge_feats = pd.DataFrame(data=ridge_model.coef_, index=feats, columns=["coefficient"]).sort_values(by = "coefficient", ascending = False).reset_index()
rf_feats = pd.DataFrame(data=rf_model.feature_importances_, index=feats, columns=["coefficient"]).sort_values(by = "coefficient", ascending = False).reset_index()
rf_coef_df = pd.DataFrame(rf_feats)
ridge_coef_df = | pd.DataFrame(ridge_feats) | pandas.DataFrame |
import json
import pickle
from datetime import timedelta, datetime
import joblib
import numpy as np
import pandas as pd
class Processor:
def __init__(self, raw_data: dict):
self.df = self.clean(raw_data)
def clean(self, raw_data):
df = pd.DataFrame()
df["Confirmed"] = | pd.DataFrame.from_dict(raw_data, orient="index") | pandas.DataFrame.from_dict |
## Online battery validation
import os
import glob
import pandas as pd
import numpy as np
import pickle
class BESS(object):
def __init__(self, max_energy, max_power, init_soc_proc, efficiency):
self.soc = init_soc_proc
self.max_e_capacity = max_energy
self.efficiency = efficiency
self.energy = self.max_e_capacity * (self.soc)/100
self.power = max_power
def calculate_NLF(self, net_load_day):
""" Net load factor
"""
df = pd.DataFrame(net_load_day).abs()
NLF = df.mean()/df.max()
return NLF[0]
def calculate_SBSPM(self, NR, LE, UE, error=0.01):
"""
Calculates second by second Service Performance Measure (SBSPM)
"""
if (NR >= LE - error) and (NR <= UE + error):
SBSPM = 1
elif (NR > UE + error):
SBSPM = max([1-abs(NR - UE), 0])
elif (NR < LE - error):
SBSPM = max([1-abs(NR - LE), 0])
else:
raise ValueError('The NR is undefined {}'.format(NR))
return SBSPM
def average_SPM_over_SP(self, SBSPM_list):
"""
Averages SPM over Settlement period
"""
SPM = sum(SBSPM_list)/1800
return SPM
def check_availability(self, SPM):
"""
Returns availability factor
"""
if SPM >= 0.95:
availability_factor = 1
elif (SPM >= 0.75) and (SPM < 0.95):
availability_factor = 0.75
elif (SPM >= 0.5) and (SPM < 0.75):
availability_factor = 0.5
elif (SPM < 0.5):
availability_factor = 0
return availability_factor
def save_to_pickle(name, list_to_save, save_path):
with open(os.path.join(save_path, '{}.pkl'.format(name)), 'wb') as f:
pickle.dump(list_to_save, f)
return
def load_from_pickle(name, save_path):
with open(os.path.join(save_path, '{}.pkl'.format(name)), 'rb') as f:
p = pickle.load(f)
return p
import os
import pandas as pd
path = "."
bess_name = "sonnen"
apath = os.path.join(path, 'simulations_{}'.format(bess_name), '{}'.format(1),'agent_{}.csv'.format(1))
nl = pd.read_csv(apath).loc[:,['nl_a{}'.format(1)]]
pb = pd.read_csv(apath).loc[:,['pb_a{}'.format(1)]]
c_reg = pd.read_csv(apath).loc[:,['c_reg_a{}'.format(1)]]
#flex_down = pd.read_csv(apath).loc[:,['flex_down_a{}'.format(1)]]
#flex_up = pd.read_csv(apath).loc[:,['flex_up_a{}'.format(1)]]
apath = os.path.join(path, 'forecasts', bess_name, 'min_power_forecast.csv')
min_efr_forecast = | pd.read_csv(apath, sep=",") | pandas.read_csv |
'''
ilf core utilities
'''
import sys
import re
import math
import json
import io
import pandas as pd
from itertools import chain
import pytricia as pt
from .numbers import IP4PROTOCOLS, IP4SERVICES
# -- Helpers
def lowest_bit(num):
bit, low = -1, (num & -num)
if not low:
return 0
while(low):
low >>= 1
bit += 1
return bit
def binarr(n):
return [n >> i & 1 for i in range(n.bit_length() - 1, -1, -1)]
def is_power2(n):
'check if n is power of 2, note: 2**0 is 1 is valid'
return (n>0 and (n & (n-1) == 0))
def pp2portstr(port, proto):
'convert port, protocol numbers to port string'
return str(Ival.port_proto(int(port), int(proto)))
class Ip4Protocol(object):
'translate between ipv4 protocol number and associated name'
def __init__(self):
self._num_toname = {} # e.g. 6 -> 'tcp'
self._num_todesc = {} # e.g. 6 -> 'Transmission Control'
self._name_tonum = {} # e.e. 'tcp' -> 6
for k, (name, desc) in IP4PROTOCOLS.items():
self._num_toname[k] = name
self._num_todesc[k] = desc
self._name_tonum[name] = k # TODO: assumes name's are unique
def getprotobyname(self, name):
'turn protocol name into its ip protocol number'
err = 'invalid ipv4 protocol name: {!r}'
rv = self._name_tonum.get(name.lower(), None)
if rv is None:
raise ValueError(err.format(name))
return rv
def getnamebyproto(self, num):
'turn ipv4 protocol number into its name'
err = 'invalid ipv4 protocol number {}'
rv = self._num_toname.get(num, None)
if rv is None:
raise ValueError(err.format(num))
return rv
class Ip4Service(object):
'translate between ipv4 service name and associated portstrings'
def __init__(self):
self._service_toports = {} # e.g https -> ['443/tcp', '443/udp']
self._port_toservice = {} # 'port/proto' -> ip4-service-name
for portstr, service in IP4SERVICES.items():
self._port_toservice[portstr] = service
self._service_toports.setdefault(service, []).append(portstr)
def getportsbyserv(self, name):
'translate service name (eg https) to a list of portstrings'
rv = self._service_toports.get(name.lower(), [])
return rv
def getservbyport(self, portstr):
'translate a portstring to a service name'
rv = self._port_toservice.get(portstr.lower(), '')
return rv
def set_service(self, service, portstrings):
'set known ports for a service, eg http->[80/tcp]'
# TODO: check validity, remove spaces etc ...
service = service.strip().lower()
portstrings = [portstr.strip().lower() for portstr in portstrings]
self._service_toports[service] = portstrings
for portstr in portstrings:
self._port_toservice[portstr] = service
IPP = Ip4Protocol() # for use w/ Ival (ipv4 only)
class Ival(object):
'helper class that abstracts PORTSTR or IP'
INVALID, IP, PORTSTR = (0, 1, 2) # types of Ival's
TYPE = {0: 'INVALID', 1: 'IP', 2: 'PORTSTR'}
TYPES = (INVALID, IP, PORTSTR)
def __init__(self, type_, start, length):
'create Ival from specified type & start, length'
self.type = type_
self.start = start
self.length = length
# -- alternate constructors
@classmethod
def ip_pfx(cls, value):
'Create Ival IP from a.b.c.d/e'
if value == 'any':
return cls(cls.IP, 0, 2**32)
x = value.split('/', 1)
err = 'Invalid ip prefix {!r}'
plen = 32 if len(x) == 1 else int(x[1])
if plen < 0 or plen > 32:
raise ValueError(err.format(value))
x = list(map(int, x[0].split('.')))
if len(x) < 1 or len(x) > 4:
raise ValueError(err.format(value))
elif len(x) < 4:
x = (x + [0, 0, 0, 0])[0:4]
for digit in x:
if digit < 0 or digit > 255:
raise ValueError(err.format(value))
return cls(cls.IP, x[0]*2**24 + x[1]*2**16 + x[2]*2**8 + x[3],
2**(32-plen))
@classmethod
def port_pfx(cls, value):
'create Ival PORTSTR from port expressed as prefix a.b.c.d/e'
return Ival.ip_pfx(value).switch(cls.PORTSTR)
@classmethod
def port_str(cls, value):
'Create Ival from <port>/<proto>'
value = value.lower().strip()
err = 'Invalid port string {!r}'
if value == 'any/any' or value == 'any':
return cls(cls.PORTSTR, 0, 2**32)
x = value.split('/') # port(range)/proto-name
if len(x) != 2:
raise ValueError(err.format(value))
x[0:1] = x[0].split('-') # only split port(range) on '-'
x = [y.strip() for y in x]
if len(x) == 2:
# port/proto or any/proto
proto_num = IPP.getprotobyname(x[1])
if x[0] == 'any':
length = 2**16
base = 0
else:
length = 1
base = int(x[0])
if base < 0 or base > 2**16 - 1:
raise ValueError(err.format(value))
return cls(cls.PORTSTR, proto_num * 2**16 + base, length)
elif len(x) == 3:
# start-stop/proto-name
proto_num = IPP.getprotobyname(x[2])
start, stop = int(x[0]), int(x[1])
if start > stop:
start, stop = stop, start
length = stop - start + 1
if start < 0 or start > 2**16 - 1:
raise ValueError(err.format(value))
if stop < 0 or stop > 2**16 - 1:
raise ValueError(err.format(value))
return cls(cls.PORTSTR, proto_num * 2**16 + start, length)
@classmethod
def port_proto(cls, port, proto):
'Create Ival from <port>, <proto>'
port = int(port)
proto = int(proto)
err = 'Invalid port protocol numbers {!r}, {!r}'
if proto < 0 or proto > 255 or port < 0 or port > 2**16 - 1:
raise ValueError(err.format(port, proto))
return cls(cls.PORTSTR, port + proto * 2**16, 1)
# -- comparisons
def __repr__(self):
return '({!r}, {!r})'.format(self.TYPE[self.type], str(self))
def __str__(self):
if self.type == self.IP:
if self.length == 2**32:
return '0.0.0.0/0' # 'any'
elif self.length == 1:
plen = ''
else:
plen = '/{}'.format(32 - int(math.log(
1 + self.length)//math.log(2)))
d1 = (self.start // 2**24) & 0xFF
d2 = (self.start // 2**16) & 0xFF
d3 = (self.start // 2**8) & 0xFF
d4 = (self.start) & 0xFF
return '{}.{}.{}.{}{}'.format(d1, d2, d3, d4, plen)
elif self.type == self.PORTSTR:
if self.length == 2**32:
return 'any/any'
elif self.length == 2**16:
ports = 'any'
elif self.length == 1:
ports = str(self.start & 0xFFFF)
else:
start = self.start & 0xFFFF
ports = '{}-{}'.format(start, start + self.length - 1)
proto = int((self.start // 2**16) & 0xFF)
name = IPP.getnamebyproto(proto)
return '{}/{}'.format(ports, name)
else:
return 'invalid'
def __len__(self):
return self.length
def __contains__(self, other):
return self.type == other.type and\
self.start <= other.start and\
self.start + self.length >= other.start + other.length
def __hash__(self):
'needed because of __eq__, donot modify obj when hashed'
return hash(self.values())
def __ne__(self, other):
return self.values() != other.values()
def __eq__(self, other):
# max intervals (len is 2**32) are equal regardless of start value
if self.type == other.type and self.length == 2**32:
return other.length == self.length
return self.values() == other.values()
def __lt__(self, other):
return self.values() < other.values()
def __le__(self, other):
'self starts to the left of other or is smaller'
return self.values() <= other.values()
def __gt__(self, other):
'self starts to the right of other'
return self.values() > other.values()
def __ge__(self, other):
'self starts to the right of other'
return self.values() >= other.values()
def __iter__(self):
'iterate through the interval with new ivals of len=1'
self.idx = -1
return self
def __next__(self):
self.idx += 1
if self.idx < self.length:
return Ival(self.type, self.start + self.idx, 1)
raise StopIteration
# -- methods
def values(self, values=None):
'get the values of the ival object'
return (self.type, self.start, self.length)
def is_valid(self):
'return True if valid, False otherwise'
if self.type not in self.TYPES:
return False
if self.start < 0 or self.start > 2**32 - 1:
return False
if self.length < 0 or self.length > 2**32 - 1:
return False
return True
def prefix(self):
'return an new IP-typed Ival for this ival'
ival = self.network()
ival.type = Ival.IP
return ival
def network(self):
'return new ival for the first value'
# keeps the prefix (ival) length, only mask start if its IP
# is a no-op for types != 'IP' (!)
mask = 2**32 - self.length
start = self.start & mask if self.type == Ival.IP else self.start
return Ival(self.type, start, self.length)
def broadcast(self):
'return new ival for the last value'
# TODO: Ival('0/0').broadcast() == Ival('255.255.255.255') ??
# should broadcast yield an address/32 or address/pfxlen ??
imask = self.length - 1
start = self.start | imask if self.type == Ival.IP else self.start
return Ival(self.type, start, self.length)
def address(self):
'return new ival with length 1 for start value'
return Ival(self.type, self.start, 1)
def mask(self):
'return the mask as quad dotted string'
if self.type == self.IP:
mask = 2**32 - self.length
d1 = (mask // 2**24) & 0xFF
d2 = (mask // 2**16) & 0xFF
d3 = (mask // 2**8) & 0xFF
d4 = (mask) & 0xFF
return '{}.{}.{}.{}'.format(d1, d2, d3, d4)
raise ValueError('type {!r} not a prefix'.format(self.TYPE[self.type]))
def imask(self):
'return the inverse mask as quad dotted string'
if self.type == self.IP:
imask = self.length - 1
d1 = (imask // 2**24) & 0xFF
d2 = (imask // 2**16) & 0xFF
d3 = (imask // 2**8) & 0xFF
d4 = (imask) & 0xFF
return '{}.{}.{}.{}'.format(d1, d2, d3, d4)
raise ValueError('type {!r} not a prefix'.format(self.TYPE[self.type]))
def is_any(self):
return self.length == 2**32 # any-interval has max length
def port(self):
'return new Ival with type set as PORTSTR'
return Ival(Ival.PORTSTR, self.start, self.length)
def switch(self, ival_type):
'switch Ival.type to ival_type'
if ival_type not in self.TYPES:
raise ValueError('Unknown Ival type {!r}'.format(ival_type))
self.type = ival_type
return self
# -- summarization
def splice(self, ival_type=None):
'return a list of new prefix-like intervals, override type if given'
if ival_type and ival_type not in self.TYPES:
raise ValueError('Unknown Ival type {!r}'.format(ival_type))
rv = []
start, length = self.start, self.length
ival_type = ival_type if ival_type else self.type
maxx = start + length
while start < maxx:
lbit = lowest_bit(start)
hbit = length.bit_length()
maxlen = 2**lbit
newlen = maxlen if length > maxlen else 2**(hbit-1)
rv.append((start, newlen))
start, length = start + newlen, length - newlen
return [Ival(ival_type, x, y) for x, y in rv]
@classmethod
def combine(cls, x, y):
'if possible, return a combined ival, None otherwise'
# border cases
if x is None and y is None:
return None
elif y is None:
return cls(*x.values())
elif x is None:
return cls(*y.values())
elif x.type != y.type:
return None
# x,y two valid Ivals of same type
# - intervals are the same
if x == y:
return cls(*x.values())
# - interval inside the other interval
if x in y:
return cls(*y.values())
if y in x:
return cls(*x.values())
# ensure x starts to the left of y
x, y = (x, y) if x.start <= y.start else (y, x)
# type dependent situations
if x.type == cls.PORTSTR:
# combine adjacent intervals
if x.start + x.length == y.start:
return cls(x.type, x.start, x.length + y.length)
# combine partially overlapping intervals
if x.start + x.length > y.start:
ivlen = max(x.start + x.length, y.start + y.length) - x.start
return cls(x.type, x.start, ivlen)
if x.type == cls.IP:
# pfxs can only be combined if:
# - intervals are adjacent
# - lengths are equal
# - lowest start address does not change with doubling of mask
if x.length == y.length and x.start + x.length == y.start:
# x.start MUST be the network() address of the ival!
if x.start == x.start & (2**32 - 2*x.length):
return cls(x.type, x.start, 2*x.length)
return None # no joy
@classmethod
def summary(cls, ivals):
'summarize a (heterogeneous) list of port/prefix-intervals'
# reverse since this sorts on type, start & length in ascending order
# originals go back on the heap, new ivals go onto rv
heap = list(reversed(sorted(i.network() for i in ivals)))
rv = []
while len(heap):
x = heap.pop()
y = heap.pop() if len(heap) else None
if y:
z = cls.combine(x, y) # z is None if not combined
if z:
heap.append(z) # combined range back on heap
continue # start again
else:
heap.append(y) # push back for later combine attempt
y = rv.pop() if len(rv) else None
if y:
z = cls.combine(x, y) # y is None when x combines x+y
if z:
heap.append(z) # combined range back on heap
else:
rv.append(y) # could not combine, both goto rv and
rv.append(x) # make sure to keep rv ordering intact
else:
rv.append(x)
return [Ival(*i.values()) for i in rv] # ensure new objs are returned
@classmethod
def pfx_summary(cls, ivals):
'summarize the IP-s in ivals, returns only IP-pfxs'
return cls.summary(i for i in ivals if i.type == cls.IP)
@classmethod
def port_summary(cls, ivals):
'summarize the PORTSTR-s in ivals, returns only PORTSTRs'
return cls.summary(i for i in ivals if i.type == cls.PORTSTR)
@classmethod
def portpfx_summary(cls, ivals):
'summarize PORTSTR-s and return them as ip prefixes'
PORTSTR, IP = cls.PORTSTR, cls.IP
portpfxs = [y for x in ivals if x.type==PORTSTR for y in x.splice(IP)]
return cls.summary(portpfxs)
class Ip4FilterError(Exception):
pass
class Ip4Match(object):
__slots__ = 'rule action name object'.split()
def __init__(self, rule, action, name, obj):
self.rule = rule
self.action = action
self.name = name
self.object = obj
class Ip4Filter(object):
'''
A class for ip session lookup's via src, dst & portstring
- action() -> yields associated action or nomatch value
- match() -> yields match dict or nomatch value
- get() -> match dict {
'rule': Matched rule number
'name' : the name of rule (or '')
'action': the rule's action
'object': the rule's python object (or None)
}
'''
def __init__(self, nomatch=None):
self._src = pt.PyTricia() # pfx -> set(rids) - Ival(src ip pfx)
self._dst = pt.PyTricia() # pfx -> set(rids) - Ival(dst ip pfx)
self._srv = pt.PyTricia() # pfx'-> set(rids) - Ival(dport/protocol)
self._act = {} # rid -> action (lower cased)
self._obj = {} # rid -> any python object
self._tag = {} # rid -> name tag of rule if any, else ''
self._nomatch = nomatch # return value when there is no match at all
def __len__(self):
'the number of rules in the filter'
return len(self._act)
def _lines(self, csv=False):
'return filter as lines for printing'
# {rule_id: {src:[..], dst:[..], srv: [..], name: str, action: str, obj: obj}}
rules = sorted(self.as_dict.items()) # rules dict -> ordered [(k,v)]
fields = 'rule name src dst srv action obj'.split()
fmt = '{!s:<5} {!s:<15} {!s:21} {!s:21} {!s:16} {!s:7} {!s}'
fmt = '{},{},{},{},{},{},{}' if csv else fmt
lines = [fmt.format(*fields)] # csv-header of field names
for rid, rule in rules:
maxl = max(len(rule['src']), len(rule['dst']), len(rule['srv']))
for lnr in range(0, maxl):
rid = rid if lnr == 0 else ''
tag = rule['name'] if lnr == 0 else ''
src = rule['src'][lnr] if lnr < len(rule['src']) else ''
dst = rule['dst'][lnr] if lnr < len(rule['dst']) else ''
prt = rule['srv'][lnr] if lnr < len(rule['srv']) else ''
act = rule['action'] if lnr == 0 else ''
obj = json.dumps(rule['obj']) if lnr == 0 else ''
obj = '' if obj in ['null', '""'] else obj
lines.append(fmt.format(rid, tag, src, dst, prt, act, obj))
return lines
def _set_rid(self, rid, tbl, ival):
'set/add to rule-id on single prefix in specific table'
pfx = str(ival)
try:
if tbl.has_key(pfx): # find the exact prefix
tbl[pfx].add(rid) # add to existing prefix
else:
tbl[pfx] = set([rid]) # it's a new prefix
# propagate rid to more specifics
for kid in tbl.children(pfx): # propagate rid to more specifics
tbl[kid].add(rid)
# adopt rid's matched by less specific parent (if any)
parent = tbl.parent(pfx)
if parent:
tbl[pfx] = tbl[pfx].union(tbl[parent])
except ValueError as e:
fmt = 'invalid prefix? {}: {}'
print(fmt.format(pfx, repr(e)), file=sys.stderr)
sys.exit(1)
return self
def _add(self, rid, srcs, dsts, srvs, name='', action='', obj=None):
'add Ivals to a new rule or just add to an existing rule'
for ival in Ival.pfx_summary(srcs):
self._set_rid(rid, self._src, ival)
for ival in Ival.pfx_summary(dsts):
self._set_rid(rid, self._dst, ival)
for ival in Ival.portpfx_summary(srvs):
self._set_rid(rid, self._srv, ival)
# name,action are strings; action always lowercase
name = '' if name is None else str(name).strip()
action = '' if action is None else str(action).strip().lower()
# set attributes if not already present
self._act.setdefault(rid, action)
self._obj.setdefault(rid, obj)
self._tag.setdefault(rid, name)
return self
# -- build methods
@classmethod
def compile(cls, fname):
from . import comp
return comp.compile(fname)
def add(self, rid, srcs, dsts, srvs, action='', name='', obj=None):
'add src-list, dst-list and or list of srvs to a new/old rule'
# sanity check arguments
if not isinstance(rid, int):
raise TypeError('expected an int, not {!r}'.format(rid))
for x in [srcs, dsts, srvs]:
if not isinstance(x, (list, tuple)):
raise TypeError('expected a list, not {!r}'.format(x))
srcs = [Ival.ip_pfx(x) for x in srcs]
dsts = [Ival.ip_pfx(x) for x in dsts]
srvs = [Ival.port_str(x) for x in srvs]
return self._add(rid, srcs, dsts, srvs, name, action, obj)
def ruleset(self, src=None, dst=None, srv=None):
'return the set of rule ids matched by src and/or dst and/or service'
# - finds matching rule sets by prefix lookups per item
# returns the minimum rule nr of intersection
try:
rv = [] # collect required matches
if src is not None:
rv.append(self._src[src])
if dst is not None:
rv.append(self._dst[dst])
if srv is not None:
# encode as pfx to index a PyTricia table
pfx = str(Ival.port_str(srv).switch(Ival.IP))
rv.append(self._srv[pfx])
if len(rv):
return set.intersection(*rv)
return set()
except (KeyError, ValueError):
return set()
except TypeError: # invalid value supplied
print('ruleset type error on', src, dst, srv)
return set()
# -- usage methods
def match(self, src, dst, srv=None):
'return a match object or the nomatch value'
rids = self.ruleset(src, dst, srv)
if len(rids) == 0:
return self._nomatch
rid = min(rids)
return Ip4Match(rid,
self._act.get(rid, None),
self._tag.get(rid, ''),
self._obj.get(rid, None))
# -- to/from CSV
@property
def as_dict(self):
'reconstruct the rules in a dict of dicts'
# {rule nr: {src:[..], dst:[..], srv: [..], action: str, name: str, obj: {..}}}
rules = {}
for pfx in self._src.keys():
for rulenr in self._src[pfx]:
rules.setdefault(rulenr, {}).setdefault('src', []).append(pfx)
try:
for pfx in self._dst.keys():
for rulenr in self._dst[pfx]:
rules[rulenr].setdefault('dst', []).append(pfx)
for pfx in self._srv.keys(): # portstr encoded as a pfx
for rulenr in self._srv[pfx]:
rules[rulenr].setdefault('srv', []).append(pfx)
for rulenr, action in self._act.items():
rules[rulenr]['action'] = action
for rulenr, obj in self._obj.items():
rules[rulenr]['obj'] = obj
for rulenr, name in self._tag.items():
rules[rulenr]['name'] = name
except KeyError as e:
errfmt = 'Error in rule {}:{}'
raise Exception(errfmt.format(rulenr, repr(e)))
for r, rule in rules.items():
# first summarize auto-added more specifics (for set calculations)
rule['src'] = Ival.summary(map(Ival.ip_pfx, rule['src']))
rule['dst'] = Ival.summary(map(Ival.ip_pfx, rule['dst']))
rule['srv'] = Ival.summary(map(Ival.port_pfx, rule['srv']))
# next stringify the ivals
rule['src'] = list(map(str, rule['src']))
rule['dst'] = list(map(str, rule['dst']))
rule['srv'] = list(map(str, rule['srv']))
return rules
def to_csv(self):
'write ruleset to csv-file'
rv = []
for line in self._lines(csv=True):
rv.append(line)
return '\n'.join(rv)
def from_csv(self, text):
'read ruleset from csv-text'
inp = io.StringIO(text + '\n')
try:
df = | pd.read_csv(inp, skipinitialspace=True) | pandas.read_csv |
'''
DEMO UTILITIES
'''
#%% Import Python modules
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy import stats
from matplotlib.patches import Rectangle
from fcgadgets.macgyver import utilities_general as gu
from fcgadgets.cbrunner import cbrun_utilities as cbu
#%%
def GetSingleEnsembleResults(meta):
v0=[]
for iScn in range(meta['Project']['N Scenario']):
v0.append(cbu.LoadSingleOutputFile(meta,iScn,0,0))
return v0
#%%
def CalculateAggregateVariables(meta,v1):
for iScn in range(meta['Project']['N Scenario']):
# Calculate carbon content of dead wood, organic and mineral soil horizons following Shaw et al. (2017)
v1[iScn]['SoilOrgH']=v1[iScn]['C_Eco_Pools'][:,:,meta['Core']['iEP']['LitterVF']]+ \
v1[iScn]['C_Eco_Pools'][:,:,meta['Core']['iEP']['LitterS']]
v1[iScn]['SoilMinH']=v1[iScn]['C_Eco_Pools'][:,:,meta['Core']['iEP']['SoilVF']]+ \
v1[iScn]['C_Eco_Pools'][:,:,meta['Core']['iEP']['SoilS']]+ \
v1[iScn]['C_Eco_Pools'][:,:,meta['Core']['iEP']['SoilS']]
v1[iScn]['DeadWood']=v1[iScn]['C_Eco_Pools'][:,:,meta['Core']['iEP']['SnagStem']]+ \
v1[iScn]['C_Eco_Pools'][:,:,meta['Core']['iEP']['SnagBranch']]+ \
v1[iScn]['C_Eco_Pools'][:,:,meta['Core']['iEP']['LitterM']]+ \
v1[iScn]['C_Eco_Pools'][:,:,meta['Core']['iEP']['LitterF']]
v1[iScn]['C_BiomassAG_Tot']=np.sum(v1[iScn]['C_Eco_Pools'][:,:,meta['Core']['iEP']['BiomassAboveground']],axis=2)
v1[iScn]['C_BiomassBG_Tot']=np.sum(v1[iScn]['C_Eco_Pools'][:,:,meta['Core']['iEP']['BiomassBelowground']],axis=2)
v1[iScn]['C_BiomassAG_Tot'][np.isnan(v1[iScn]['C_BiomassAG_Tot'])]=0
v1[iScn]['C_Eco_Tot']=np.sum(v1[iScn]['C_Eco_Pools'],axis=2)
v1[iScn]['C_Eco_Tot']=np.sum(v1[iScn]['C_Eco_Pools'],axis=2)
v1[iScn]['C_Eco_Tot'][np.isnan(v1[iScn]['C_Eco_Tot'])]=0
#v1[iScn]['Sum']['C_Forest']=v1[iScn]['Sum']['C_Biomass_Tot']+v1[iScn]['Sum']['C_DeadWood_Tot']+v1[iScn]['Sum']['C_Litter_Tot']+v1[iScn]['Sum']['C_Soil_Tot']
return v1
#%% Tabulate responses based on scenario comparison of specified time period
def CompareScenarios(meta,v1,iB,iP,iT):
#--------------------------------------------------------------------------
# Percent response of fluxes
#--------------------------------------------------------------------------
dFluxRel={}
dFluxRel['NPP']=np.round(np.mean((v1[iP]['C_NPP_Tot'][iT]-v1[0]['C_NPP_Tot'][iT])/v1[iB]['C_NPP_Tot'][iT]*100))
dFluxRel['Gross Growth']=np.round(np.mean((v1[iP]['C_G_Gross_Tot'][iT]-v1[0]['C_G_Gross_Tot'][iT])/v1[iB]['C_G_Gross_Tot'][iT]*100))
cb0=np.mean(v1[iB]['C_G_Gross'][iT,0,meta['Core']['iEP']['Foliage']])
cp0=np.mean(v1[iP]['C_G_Gross'][iT,0,meta['Core']['iEP']['Foliage']])
dFluxRel['Foliage production']=np.round((cp0-cb0)/cb0*100)
ind=np.where(np.array(meta['Core']['Name Pools Eco'])=='RootFine')[0]
cb0=np.mean(v1[iB]['C_G_Gross'][iT,0,ind])
cp0=np.mean(v1[iP]['C_G_Gross'][iT,0,ind])
dFluxRel['Fine root production']=np.round((cp0-cb0)/cb0*100)
dFluxRel['Net Growth']=np.round(np.mean((v1[iP]['C_G_Net_Tot'][iT]-v1[iB]['C_G_Net_Tot'][iT])/v1[iB]['C_G_Net_Tot'][iT]*100))
dFluxRel['Tree Mortality']=np.round(np.mean((v1[iP]['C_M_Reg_Tot'][iT]-v1[iB]['C_M_Reg_Tot'][iT])/v1[iB]['C_M_Reg_Tot'][iT]*100))
ind=np.where(np.array(meta['Core']['Name Pools Eco'])=='Foliage')[0]
cb0=np.mean(v1[iB]['C_LF'][iT,0,ind]);cp0=np.mean(v1[iP]['C_LF'][iT,0,ind])
ind=np.where(np.array(meta['Core']['Name Pools Eco'])=='Branch')[0]
cb0=cb0+np.mean(v1[iB]['C_LF'][iT,0,ind]);cp0=cp0+np.mean(v1[iP]['C_LF'][iT,0,ind])
dFluxRel['Foliage+Branch turnover']=np.round((cp0-cb0)/cb0*100)
ind=np.where(np.array(meta['Core']['Name Pools Eco'])=='RootCoarse')[0]
cb0=np.mean(v1[iB]['C_LF'][iT,0,ind]);cp0=np.mean(v1[iP]['C_LF'][iT,0,ind])
dFluxRel['Coarse root turnover']=np.round((cp0-cb0)/cb0*100)
ind=np.where(np.array(meta['Core']['Name Pools Eco'])=='RootFine')[0]
cb0=np.mean(v1[iB]['C_LF'][iT,0,ind]);cp0=np.mean(v1[iP]['C_LF'][iT,0,ind])
dFluxRel['Fine root turnover']=np.round((cp0-cb0)/cb0*100)
dFluxRel['Litterfall']=np.round(np.mean((v1[1]['C_LF_Tot'][iT]-v1[0]['C_LF_Tot'][iT])/v1[0]['C_LF_Tot'][iT]*100))
dFluxRel['RH']=np.round(np.mean((v1[1]['C_RH_Tot'][iT]-v1[0]['C_RH_Tot'][iT])/v1[0]['C_RH_Tot'][iT]*100))
#cb0=np.mean(v1[1]['C_Eco_Pools'][iT,0,28]);cp0=np.mean(v1[0]['C_Eco_Pools'][iT,0,28])
#dFluxRel['Litter Decomp']=np.round(np.mean((cp0-cb0)/cb0*100))
#--------------------------------------------------------------------------
# Percent respoonse of biomass pools
#--------------------------------------------------------------------------
s=['StemMerch','StemNonMerch','Foliage','Branch','Bark','RootCoarse','RootFine']
dr0=np.zeros(len(s));cb=np.zeros(len(s));cp=np.zeros(len(s))
for i in range(len(s)):
ind=np.where(np.array(meta['Core']['Name Pools Eco'])==s[i])[0]
cb0=np.mean(v1[iB]['C_G_Net'][iT,0,ind])
cp0=np.mean(v1[iP]['C_G_Net'][iT,0,ind])
dr0[i]=(cp0-cb0)/cb0*100
cb[i]=cb0; cp[i]=cp0
Stem_b=cb[0]+cb[1]+cb[4]; Stem_p=cp[0]+cp[1]+cp[4]
dPoolRel={}
dPoolRel['Stemwood']=np.round((Stem_p-Stem_b)/Stem_b*100)
dPoolRel['Branch']=np.round(dr0[3])
dPoolRel['Foliage']=np.round(dr0[2])
dPoolRel['Coarse roots']=np.round(dr0[5])
dPoolRel['Fine roots']=np.round(dr0[6])
yP=v1[iP]['C_Biomass_Tot'][iT]
yB=v1[iB]['C_Biomass_Tot'][iT]
dPoolRel['Biomass Total']=np.round(np.mean((yP-yB)/yB*100))
dPoolRel['Dead Wood']=np.round(np.mean((v1[iP]['C_DeadWood_Tot'][iT]-v1[iB]['C_DeadWood_Tot'][iT])/v1[iB]['C_DeadWood_Tot'][iT]*100))
dPoolRel['Litter']=np.round(np.mean(((v1[iP]['C_Litter_Tot'][iT])-(+v1[iB]['C_Litter_Tot'][iT]))/(v1[iB]['C_Litter_Tot'][iT])*100))
dPoolRel['Soil organic horizon']=np.round(np.mean((v1[iP]['SoilOrgH'][iT]-v1[iB]['SoilOrgH'][iT])/v1[iB]['SoilOrgH'][iT]*100))
dPoolRel['Soil mineral Horizon']=np.round(np.mean((v1[iP]['SoilMinH'][iT]-v1[iB]['SoilMinH'][iT])/v1[iB]['SoilMinH'][iT]*100))
dPoolRel['Soil Total']=np.round(np.mean(((v1[iP]['C_Soil_Tot'][iT]+v1[iP]['C_Litter_Tot'][iT])-(v1[iB]['C_Soil_Tot'][iT]+v1[iB]['C_Litter_Tot'][iT]))/(v1[iB]['C_Soil_Tot'][iT]+v1[iB]['C_Litter_Tot'][iT])*100))
#--------------------------------------------------------------------------
# Actual respoonse of fluxes
#--------------------------------------------------------------------------
nam=['StemMerch','StemNonMerch','Foliage','Branch','Bark','RootCoarse','RootFine']
dFluxAct={}
for i in range(len(nam)):
ind=np.where(np.array(meta['Core']['Name Pools Eco'])==nam[i])[0]
dFluxAct[nam[i]]=np.mean(v1[iP]['C_G_Net'][iT,0,ind])-np.mean(v1[iB]['C_G_Net'][iT,0,ind])
dFluxAct['Stem Total']=dFluxAct['StemMerch']+dFluxAct['StemNonMerch']
y_b=v1[iB]['C_NPP_Tot'][iT,0]-v1[iB]['C_RH_Tot'][iT,0]
y_p=v1[iP]['C_NPP_Tot'][iT,0]-v1[iP]['C_RH_Tot'][iT,0]
dFluxAct['NEP']=np.mean(y_p-y_b)
#--------------------------------------------------------------------------
# Stemwood mortality
#--------------------------------------------------------------------------
# Actual and percent response of stemwood mortality for ten years following application
ind=np.where(np.array(meta['Core']['Name Pools Eco'])=='StemMerch')[0]
cb0=np.mean(v1[iB]['C_M_Reg'][iT,0,ind])
cp0=np.mean(v1[iP]['C_M_Reg'][iT,0,ind])
dStemMort={}
dStemMort['Rel']=(cp0-cb0)/cb0*100
dStemMort['Act']=cp0-cb0
#--------------------------------------------------------------------------
# Merch volume
#--------------------------------------------------------------------------
cb0=np.max(v1[iB]['V_StemMerch'][iT,0])
cp0=np.max(v1[iP]['V_StemMerch'][iT,0])
dMerchVolume={}
dMerchVolume['Act']=cp0-cb0;
dMerchVolume['Rel']=(cp0-cb0)/cb0*100
#--------------------------------------------------------------------------
# Nitrogen use efficiency (applied)
#--------------------------------------------------------------------------
N=200 # NUE applied
dcStemG=(np.mean(v1[iP]['C_G_Net'][iT,0,0]-v1[iB]['C_G_Net'][iT,0,0]))*1000
dcStem=(np.mean(v1[iP]['C_Eco_Pools'][iT,0,0]-v1[iB]['C_Eco_Pools'][iT,0,0]))*1000
dcStem=dcStem+(np.mean(v1[iP]['C_Eco_Pools'][iT,0,1]-v1[iB]['C_Eco_Pools'][iT,0,1]))*1000
dcStem=dcStem+(np.mean(v1[iP]['C_Eco_Pools'][iT,0,4]-v1[iB]['C_Eco_Pools'][iT,0,4]))*1000
dNUE_applied={}
dNUE_applied['Stemwood']=np.round(dcStem/N)
dcFoliage=(np.mean(v1[iP]['C_Eco_Pools'][iT,0,2]-v1[iB]['C_Eco_Pools'][iT,0,2]))*1000
dNUE_applied['Foliage']=np.round(dcFoliage/N)
dcBranch=(np.mean(v1[iP]['C_Eco_Pools'][iT,0,3]-v1[iB]['C_Eco_Pools'][iT,0,3]))*1000
dNUE_applied['Branch']=np.round(dcBranch/N)
dcRC=(np.mean(v1[iP]['C_Eco_Pools'][iT,0,5]-v1[iB]['C_Eco_Pools'][iT,0,5]))*1000
dNUE_applied['Coarse root']=np.round(dcRC/N)
dcRF=(np.mean(v1[iP]['C_Eco_Pools'][iT,0,6]-v1[iB]['C_Eco_Pools'][iT,0,6]))*1000
dNUE_applied['Fine root']=np.round(dcRC/N)
dcDW=(np.mean(v1[iP]['C_DeadWood_Tot'][iT]-v1[iB]['C_DeadWood_Tot'][iT]))*1000
dNUE_applied['Dead Wood']=dcDW/N
dcL=(np.mean(v1[iP]['C_Litter_Tot'][iT]-v1[iB]['C_Litter_Tot'][iT]))*1000
dNUE_applied['Litter']=dcL/N
dcS=(np.mean(v1[iP]['C_Soil_Tot'][iT]-v1[iB]['C_Soil_Tot'][iT]))*1000
dNUE_applied['Soil']=dcS/N
dcTot=(np.mean(v1[iP]['C_Eco_Tot'][iT]-v1[iB]['C_Eco_Tot'][iT]))*1000
dNUE_applied['Total']=dcTot/N
dNUE_applied['Biomass']=dNUE_applied['Stemwood']+dNUE_applied['Foliage']+dNUE_applied['Branch']+dNUE_applied['Coarse root']+dNUE_applied['Fine root']
#--------------------------------------------------------------------------
# Nitrogen use efficiency (utilized)
#--------------------------------------------------------------------------
N=40 # NUE utilized
dcStemG=(np.mean(v1[iP]['C_G_Net'][iT,0,0]-v1[iB]['C_G_Net'][iT,0,0]))*1000
dcStem=(np.mean(v1[iP]['C_Eco_Pools'][iT,0,0]-v1[iB]['C_Eco_Pools'][iT,0,0]))*1000
dcStem=dcStem+(np.mean(v1[iP]['C_Eco_Pools'][iT,0,1]-v1[iB]['C_Eco_Pools'][iT,0,1]))*1000
dcStem=dcStem+(np.mean(v1[iP]['C_Eco_Pools'][iT,0,4]-v1[iB]['C_Eco_Pools'][iT,0,4]))*1000
dNUE_utilized={}
dNUE_utilized['Stemwood']=np.round(dcStem/N)
dcFoliage=(np.mean(v1[iP]['C_Eco_Pools'][iT,0,2]-v1[iB]['C_Eco_Pools'][iT,0,2]))*1000
dNUE_utilized['Foliage']=np.round(dcFoliage/N)
dcBranch=(np.mean(v1[iP]['C_Eco_Pools'][iT,0,3]-v1[iB]['C_Eco_Pools'][iT,0,3]))*1000
dNUE_utilized['Branch']=np.round(dcBranch/N)
dcRC=(np.mean(v1[iP]['C_Eco_Pools'][iT,0,5]-v1[iB]['C_Eco_Pools'][iT,0,5]))*1000
dNUE_utilized['Coarse root']=np.round(dcRC/N)
dcRF=(np.mean(v1[iP]['C_Eco_Pools'][iT,0,6]-v1[iB]['C_Eco_Pools'][iT,0,6]))*1000
dNUE_utilized['Fine root']=np.round(dcRC/N)
dcDW=(np.mean(v1[iP]['C_DeadWood_Tot'][iT]-v1[iB]['C_DeadWood_Tot'][iT]))*1000
dNUE_utilized['Dead Wood']=dcDW/N
dcL=(np.mean(v1[iP]['C_Litter_Tot'][iT]-v1[iB]['C_Litter_Tot'][iT]))*1000
dNUE_utilized['Litter']=dcL/N
dcS=(np.mean(v1[iP]['C_Soil_Tot'][iT]-v1[iB]['C_Soil_Tot'][iT]))*1000
dNUE_utilized['Soil']=dcS/N
dcTot=(np.mean(v1[iP]['C_Eco_Tot'][iT]-v1[iB]['C_Eco_Tot'][iT]))*1000
dNUE_utilized['Total']=dcTot/N
dNUE_utilized['Biomass']=dNUE_utilized['Stemwood']+dNUE_utilized['Foliage']+dNUE_utilized['Branch']+dNUE_utilized['Coarse root']+dNUE_utilized['Fine root']
return dFluxRel,dFluxAct,dPoolRel,dStemMort,dMerchVolume,dNUE_applied,dNUE_utilized
#%%
def ExportSummariesByScenario(meta,tv,t_start,t_end,mu1):
it=np.where( (tv>=t_start) & (tv<=t_end) )[0]
for iScn in range(meta['Project']['N Scenario']):
VL=['A','LogSizeEnhancement','V_StemMerch','V_StemMerchToMill',
'C_Biomass_Tot',
'C_DeadWood_Tot',
'C_DumpLandfill_Tot',
'C_Eco_Pools',
'C_Piled_Tot',
'C_G_Gross',
'C_G_Gross_Tot',
'C_G_Net',
'C_G_Net_Tot',
'C_InUse_Tot',
'C_LF',
'C_LF_Tot',
'C_Litter_Tot',
'C_M_Dist',
'C_M_Reg',
'C_M_Reg_Tot',
'C_NPP_Tot',
'C_Pro_Pools',
'C_RH',
'C_RH_Tot',
'C_Soil_Tot',
'C_ToFirewoodDom',
'C_ToFirewoodFor',
'C_ToLogExport',
'C_ToLumber',
'C_ToMDF',
'C_ToMill',
'C_ToMillMerch',
'C_ToMillNonMerch',
'C_ToMillSnagStem',
'C_ToOSB',
'C_ToPaper',
'C_ToPellets',
'C_ToPlywood',
'C_ToPowerFacilityDom',
'C_ToPowerFacilityFor',
'C_ToPowerGrid',
'C_ToSlashpileBurn',
'E_CO2e_LULUCF_NEE',
'E_CO2e_LULUCF_EcoOther',
'E_CO2e_LULUCF_Fire',
'E_CO2e_LULUCF_OpenBurning',
'E_CO2e_LULUCF_Wildfire',
'E_CO2e_LULUCF_HWP',
'E_CO2e_ESC_Comb',
'E_CO2e_ESC_SubBM',
'E_CO2e_ESC_SubE',
'E_CO2e_ET_Comb',
'E_CO2e_IPPU_Comb',
'E_CO2e_AGHGB_WOSub',
'E_CO2e_AGHGB_WOSub_cumu',
'E_CO2e_AGHGB_WSub',
'E_CO2e_AGHGB_WSub_cumu',
'Cost Roads',
'Cost Knockdown',
'Cost Ripping',
'Cost Nutrient Management',
'Cost PAS Deactivation',
'Cost Harvest Felling and Piling',
'Cost Harvest Hauling',
'Cost Harvest Overhead',
'Cost Harvest Residuals',
'Cost Milling',
'Cost Slashpile Burn',
'Cost Planting',
'Cost Survey',
'Cost Silviculture Total',
'Cost Total',
'Cost Total Disc',
'Cost Total Disc_cumu',
'Revenue FirewoodDom',
'Revenue LogExport',
'Revenue Lumber',
'Revenue MDF',
'Revenue OSB',
'Revenue Paper',
'Revenue Pellets',
'Revenue Plywood',
'Revenue PowerFacilityDom',
'Revenue PowerGrid',
'Revenue Gross',
'Revenue Gross Disc',
'Revenue Gross Disc_cumu',
'Revenue Net',
'Revenue Net Disc',
'Revenue Net Disc_cumu',
'Yield FirewoodDom',
'Yield LogExport',
'Yield Lumber',
'Yield MDF',
'Yield OSB',
'Yield Paper',
'Yield Pellets',
'Yield Plywood',
'Yield PowerFacilityDom','Yield PowerGrid']
d={}
for k in VL:
d['Sum ' + k]=np.round(np.sum(mu1[iScn][k][it]),decimals=2)
for k in VL:
d['Mean ' + k]=np.round(np.mean(mu1[iScn][k][it]),decimals=2)
if iScn==0:
df= | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
from .constants import PARSING_SCHEME
from ..decorators import float_property_decorator, int_property_decorator
from .nba_utils import _retrieve_all_teams
from .. import utils
from .roster import Roster
from .schedule import Schedule
class Team:
"""
An object containing all of a team's season information.
Finds and parses all team stat information and identifiers, such as rank,
name, and abbreviation, and sets them as properties which can be directly
read from for easy reference.
If calling directly, the team's abbreviation needs to be passed. Otherwise,
the Teams class will handle all arguments.
Parameters
----------
team_name : string (optional)
The name of the team to pull if being called directly.
team_data : string (optional)
A string containing all of the rows of stats for a given team. If
multiple tables are being referenced, this will be comprised of
multiple rows in a single string. Is only used when called directly
from the Teams class.
rank : int (optional)
A team's position in the league based on the number of points they
obtained during the season. Is only used when called directly from the
Teams class.
year : string (optional)
The requested year to pull stats from.
"""
def __init__(self, team_name=None, team_data=None, rank=None, year=None):
self._year = year
self._rank = rank
self._abbreviation = None
self._name = None
self._games_played = None
self._minutes_played = None
self._field_goals = None
self._field_goal_attempts = None
self._field_goal_percentage = None
self._three_point_field_goals = None
self._three_point_field_goal_attempts = None
self._three_point_field_goal_percentage = None
self._two_point_field_goals = None
self._two_point_field_goal_attempts = None
self._two_point_field_goal_percentage = None
self._free_throws = None
self._free_throw_attempts = None
self._free_throw_percentage = None
self._offensive_rebounds = None
self._defensive_rebounds = None
self._total_rebounds = None
self._assists = None
self._steals = None
self._blocks = None
self._turnovers = None
self._personal_fouls = None
self._points = None
self._opp_field_goals = None
self._opp_field_goal_attempts = None
self._opp_field_goal_percentage = None
self._opp_three_point_field_goals = None
self._opp_three_point_field_goal_attempts = None
self._opp_three_point_field_goal_percentage = None
self._opp_two_point_field_goals = None
self._opp_two_point_field_goal_attempts = None
self._opp_two_point_field_goal_percentage = None
self._opp_free_throws = None
self._opp_free_throw_attempts = None
self._opp_free_throw_percentage = None
self._opp_offensive_rebounds = None
self._opp_defensive_rebounds = None
self._opp_total_rebounds = None
self._opp_assists = None
self._opp_steals = None
self._opp_blocks = None
self._opp_turnovers = None
self._opp_personal_fouls = None
self._opp_points = None
if team_name:
team_data = self._retrieve_team_data(year, team_name)
self._parse_team_data(team_data)
def __str__(self):
"""
Return the string representation of the class.
"""
return f'{self.name} ({self.abbreviation}) - {self._year}'
def __repr__(self):
"""
Return the string representation of the class.
"""
return self.__str__()
def _retrieve_team_data(self, year, team_name):
"""
Pull all stats for a specific team.
By first retrieving a dictionary containing all information for all
teams in the league, only select the desired team for a specific year
and return only their relevant results.
Parameters
----------
year : string
A ``string`` of the requested year to pull stats from.
team_name : string
A ``string`` of the team's 3-letter abbreviation, such as 'HOU' for
the Houston Rockets.
Returns
-------
PyQuery object
Returns a PyQuery object containing all stats and information for
the specified team.
"""
team_data_dict, year = _retrieve_all_teams(year)
self._year = year
team_data = team_data_dict[team_name]['data']
self._rank = team_data_dict[team_name]['rank']
return team_data
def _parse_team_data(self, team_data):
"""
Parses a value for every attribute.
This function looks through every attribute with the exception of
'_rank' and retrieves the value according to the parsing scheme and
index of the attribute from the passed HTML data. Once the value is
retrieved, the attribute's value is updated with the returned result.
Note that this method is called directly once Team is invoked and does
not need to be called manually.
Parameters
----------
team_data : string
A string containing all of the rows of stats for a given team. If
multiple tables are being referenced, this will be comprised of
multiple rows in a single string.
"""
for field in self.__dict__:
# The rank attribute is passed directly to the class during
# instantiation.
if field == '_rank' or \
field == '_year':
continue
value = utils._parse_field(PARSING_SCHEME,
team_data,
str(field)[1:])
setattr(self, field, value)
@property
def dataframe(self):
"""
Returns a pandas DataFrame containing all other class properties and
values. The index for the DataFrame is the string abbreviation of the
team, such as 'DET'.
"""
fields_to_include = {
'abbreviation': self.abbreviation,
'assists': self.assists,
'blocks': self.blocks,
'defensive_rebounds': self.defensive_rebounds,
'field_goal_attempts': self.field_goal_attempts,
'field_goal_percentage': self.field_goal_percentage,
'field_goals': self.field_goals,
'free_throw_attempts': self.free_throw_attempts,
'free_throw_percentage': self.free_throw_percentage,
'free_throws': self.free_throws,
'games_played': self.games_played,
'minutes_played': self.minutes_played,
'name': self.name,
'offensive_rebounds': self.offensive_rebounds,
'opp_assists': self.opp_assists,
'opp_blocks': self.opp_blocks,
'opp_defensive_rebounds': self.opp_defensive_rebounds,
'opp_field_goal_attempts': self.opp_field_goal_attempts,
'opp_field_goal_percentage': self.opp_field_goal_percentage,
'opp_field_goals': self.opp_field_goals,
'opp_free_throw_attempts': self.opp_free_throw_attempts,
'opp_free_throw_percentage': self.opp_free_throw_percentage,
'opp_free_throws': self.opp_free_throws,
'opp_offensive_rebounds': self.opp_offensive_rebounds,
'opp_personal_fouls': self.opp_personal_fouls,
'opp_points': self.opp_points,
'opp_steals': self.opp_steals,
'opp_three_point_field_goal_attempts':
self.opp_three_point_field_goal_attempts,
'opp_three_point_field_goal_percentage':
self.opp_three_point_field_goal_percentage,
'opp_three_point_field_goals': self.opp_three_point_field_goals,
'opp_total_rebounds': self.opp_total_rebounds,
'opp_turnovers': self.opp_turnovers,
'opp_two_point_field_goal_attempts':
self.opp_two_point_field_goal_attempts,
'opp_two_point_field_goal_percentage':
self.opp_two_point_field_goal_percentage,
'opp_two_point_field_goals': self.opp_two_point_field_goals,
'personal_fouls': self.personal_fouls,
'points': self.points,
'rank': self.rank,
'steals': self.steals,
'three_point_field_goal_attempts':
self.three_point_field_goal_attempts,
'three_point_field_goal_percentage':
self.three_point_field_goal_percentage,
'three_point_field_goals': self.three_point_field_goals,
'total_rebounds': self.total_rebounds,
'turnovers': self.turnovers,
'two_point_field_goal_attempts':
self.two_point_field_goal_attempts,
'two_point_field_goal_percentage':
self.two_point_field_goal_percentage,
'two_point_field_goals': self.two_point_field_goals
}
return | pd.DataFrame([fields_to_include], index=[self._abbreviation]) | pandas.DataFrame |
import pandas as pd
import pprint
import re
ISO8601YMD = re.compile(r'\d{4}-\d{2}-\d{2}T')
NY = 'America/New_York'
class Entity(object):
'''This helper class provides property access (the "dot notation")
to the json object, backed by the original object stored in the _raw
field.
'''
def __init__(self, raw):
self._raw = raw
def __getattr__(self, key):
if key in self._raw:
val = self._raw[key]
if (isinstance(val, str) and
(key.endswith('_at') or
key.endswith('_timestamp') or
key.endswith('_time')) and
ISO8601YMD.match(val)):
return pd.Timestamp(val)
else:
return val
return super().__getattribute__(key)
def __repr__(self):
return '{name}({raw})'.format(
name=self.__class__.__name__,
raw=pprint.pformat(self._raw, indent=4),
)
class Account(Entity):
"""
Entity properties:
https://alpaca.markets/docs/api-documentation/api-v2/account/
"""
pass
class AccountConfigurations(Entity):
"""
Entity properties:
https://alpaca.markets/docs/api-documentation/api-v2/account-configuration/
"""
pass
class Asset(Entity):
"""
Entity properties:
https://alpaca.markets/docs/api-documentation/api-v2/assets/#asset-entity
"""
pass
class Order(Entity):
"""
Entity properties:
https://alpaca.markets/docs/api-documentation/api-v2/orders/#order-entity
"""
def __init__(self, raw):
super().__init__(raw)
try:
self.legs = [Order(o) for o in self.legs]
except Exception:
# No order legs existed
pass
class Position(Entity):
"""
Entity properties:
https://alpaca.markets/docs/api-documentation/api-v2/positions/#position-entity
"""
pass
class AccountActivity(Entity):
"""
Entity properties:
https://alpaca.markets/docs/api-documentation/api-v2/account-activities/
"""
pass
class Bar(Entity):
"""
Entity properties:
https://alpaca.markets/docs/api-documentation/api-v2/market-data/bars/
#bars-entity
"""
def __getattr__(self, key):
if key == 't':
val = self._raw[key[0]]
return pd.Timestamp(val, unit='s', tz=NY)
return super().__getattr__(key)
class Bars(list):
def __init__(self, raw):
super().__init__([Bar(o) for o in raw])
self._raw = raw
@property
def df(self):
if not hasattr(self, '_df'):
df = pd.DataFrame(
self._raw, columns=('t', 'o', 'h', 'l', 'c', 'v'),
)
alias = {
't': 'time',
'o': 'open',
'h': 'high',
'l': 'low',
'c': 'close',
'v': 'volume',
}
df.columns = [alias[c] for c in df.columns]
df.set_index('time', inplace=True)
if not df.empty:
df.index = pd.to_datetime(
(df.index * 1e9).astype('int64'), utc=True,
).tz_convert(NY)
else:
df.index = pd.to_datetime(
df.index, utc=True
)
self._df = df
return self._df
class BarSet(dict):
def __init__(self, raw):
for symbol in raw:
self[symbol] = Bars(raw[symbol])
self._raw = raw
@property
def df(self):
'''## Experimental '''
if not hasattr(self, '_df'):
dfs = []
for symbol, bars in self.items():
df = bars.df.copy()
df.columns = pd.MultiIndex.from_product(
[[symbol, ], df.columns])
dfs.append(df)
if len(dfs) == 0:
self._df = | pd.DataFrame() | pandas.DataFrame |
import tensorflow as tf
from keras.models import Sequential
from keras.callbacks import ModelCheckpoint
from keras.models import load_model
from keras.optimizers import Adam
import numpy as np
import pandas as pd
import time
import warnings
warnings.filterwarnings("ignore")
import glob
import matplotlib.pyplot as plt
import math
import pickle5 as pickle
def plot_each_app(df, dates, predict, y_test, title, look_back=0):
num_date = len(dates)
fig, axes = plt.subplots(num_date, 1, figsize=(24, num_date * 5))
plt.suptitle(title, fontsize='25')
fig.tight_layout()
fig.subplots_adjust(top=0.95)
for i in range(num_date):
if i == 0: l = 0
ind = df.loc[dates[i]].index[look_back:]
axes.flat[i].plot(ind, y_test[l:l + len(ind)], color='blue', alpha=0.6, label='True value')
axes.flat[i].plot(ind, predict[l:l + len(ind)], color='red', alpha=0.6, label='Predicted value')
axes.flat[i].legend()
l = len(ind)
plt.show()
def rmse_loss(y_predict, y):
return math.sqrt(np.mean(np.square(y_predict - y)))
def mae_loss(y_predict, y):
return np.mean(np.abs(y_predict - y))
def build_fc_model():
fc_model = Sequential()
fc_model.add(tf.keras.layers.Dense(2, activation='relu'))
fc_model.add(tf.keras.layers.Dense(256, activation='relu'))
fc_model.add(tf.keras.layers.Dense(512))
fc_model.add(tf.keras.layers.Dense(1024))
fc_model.add(tf.keras.layers.Dense(1))
fc_model.build((214816, 2))
fc_model.summary()
return fc_model
def choose_appliance(appliance):
appliance_dict = {}
for i in range(1, 7):
f = f'./pkl_files/house_{i}.pkl'
with open(f, 'rb') as pickle_file:
data = pickle.load(pickle_file)
for col in data.columns:
if appliance in col:
appliance_dict[i] = col
return appliance_dict
def train_test_split(appliance_dict, test_house):
X_train = []
Y_train = []
X_test = []
Y_test = []
for house, app in appliance_dict.items():
f = f'./pkl_files/house_{house}.pkl'
with open(f, 'rb') as pickle_file:
data = pickle.load(pickle_file)
if house == 1 or house == 2 or house == 3 or house == 4:
train_mains = data[['1_mains', '2_mains']].values
train_app = data[app].values
X_train.extend(train_mains)
Y_train.extend(train_app)
elif house == test_house:
test_mains = data[['1_mains', '2_mains']].values
test_app = data[app].values
X_test.extend(test_mains)
Y_test.extend(test_app)
f = f'./pkl_files/house_{test_house}.pkl'
with open(f, 'rb') as pickle_file:
data = pickle.load(pickle_file)
date_indexes = data.index.values
dates = [str(time)[:10] for time in date_indexes]
dates = sorted(list(set(dates)))
X_train = np.asarray(X_train)
X_test = np.asarray(X_test)
Y_train = np.asarray(Y_train)
Y_test = np.asarray(Y_test)
print(X_train.shape, Y_train.shape, X_test.shape, Y_test.shape)
return X_train, X_test, Y_train, Y_test, data, dates, date_indexes
def train_model(nn_model, X_train, X_test, Y_train, Y_test, epochs, batch_size, appliance, house):
adam = Adam(lr=1e-5)
nn_model.compile(loss='mean_squared_error', optimizer=adam)
start = time.time()
checkpointer = ModelCheckpoint(filepath=f'./nn_house_{house}_{appliance}.hdf5', verbose=0, save_best_only=True)
hist_nn = nn_model.fit(X_train, Y_train,
batch_size=batch_size, verbose=1, epochs=epochs,
validation_split=0.33, callbacks=[checkpointer])
print('Finish training time: ', time.time() - start)
nn_model = load_model(f'nn_house_{house}_{appliance}.hdf5')
prediciton = nn_model.predict(X_test).reshape(-1)
RMSE = rmse_loss(prediciton, Y_test)
MAE = mae_loss(prediciton, Y_test)
train_loss = hist_nn.history['loss']
val_loss = hist_nn.history['val_loss']
return prediciton, RMSE, MAE
def pckl_results(date_indexes, prediction, appliance, house):
results_dict = {'timestamp': list(date_indexes), 'prediction': list(prediction)}
results_df = | pd.DataFrame(data=results_dict) | pandas.DataFrame |
from __future__ import print_function
import os
import pandas as pd
import xgboost as xgb
import time
import shutil
from sklearn import preprocessing
from sklearn.cross_validation import train_test_split
import numpy as np
def archive_results(filename,results,algo,script):
"""
:type algo: basestring
:type script: basestring
:type results: DataFrame
"""
#assert results == pd.DataFrame
now=time.localtime()[0:5]
dirname='../archive'
subdirfmt='%4d-%02d-%02d-%02d-%02d'
subdir=subdirfmt %now
if not os.path.exists(os.path.join(dirname,str(algo))):
os.mkdir(os.path.join(dirname,str(algo)))
dir_to_create=os.path.join(dirname,str(algo),subdir)
if not os.path.exists(dir_to_create):
os.mkdir(dir_to_create)
os.chdir(dir_to_create)
results.to_csv(filename,index=False,float_format='%.6f')
shutil.copy2(script,'.')
return
os.chdir('/home/cuoco/KC/cervical-cancer-screening/src')
trainfile=('../input/patients_train.csv.gz')
testfile=('../input/patients_test.csv.gz')
train=pd.read_csv(trainfile,low_memory=False )
test=pd.read_csv(testfile,low_memory=False )
surgical=pd.read_csv('../features/surgical_pap.csv.gz')
diagnosis=pd.read_csv('../features/diagnosis_hpv.csv.gz')
procedure_cervi=pd.read_csv('../features/procedure_cervi.csv.gz')
procedure_hpv=pd.read_csv('../features/procedure_hpv.csv.gz')
procedure_vaccine=pd.read_csv('../features/procedure_vaccine.csv.gz')
procedure_vagi=pd.read_csv('../features/procedure_vagi.csv.gz')
procedure_plan_type=pd.read_csv('../features/procedure_plan_type.csv.gz')
rx_payment=pd.read_csv('../features/rx_payment.csv.gz')
train_pract_screen_ratio=pd.read_csv('../features/train_pract_screen_ratio.csv.gz')
test_pract_screen_ratio=pd.read_csv('../features/test_pract_screen_ratio.csv.gz')
visits=pd.read_csv('../features/visits.csv.gz')
train=pd.merge(train,surgical, on='patient_id',how='left')
test=pd.merge(test,surgical, on='patient_id',how='left')
print('after merging surgical')
print(train.shape,test.shape)
train=pd.merge(train,diagnosis, on='patient_id',how='left')
test=pd.merge(test,diagnosis, on='patient_id',how='left')
print('after merging diagnosis')
print(train.shape,test.shape)
#train=pd.merge(train,procedure_cervi, on='patient_id',how='left')
#test=pd.merge(test,procedure_cervi, on='patient_id',how='left')
#train=pd.merge(train,procedure_hpv, on='patient_id',how='left')
#test=pd.merge(test,procedure_hpv, on='patient_id',how='left')
#train=pd.merge(train,procedure_vaccine, on='patient_id',how='left')
#test=pd.merge(test,procedure_vaccine, on='patient_id',how='left')
train=pd.merge(train,procedure_vagi, on='patient_id',how='left')
test=pd.merge(test,procedure_vagi, on='patient_id',how='left')
train=pd.merge(train,procedure_plan_type, on='patient_id',how='left')
test=pd.merge(test,procedure_plan_type, on='patient_id',how='left')
print('after merging procedure')
print(train.shape,test.shape)
train=pd.merge(train,rx_payment, on='patient_id',how='left')
test=pd.merge(test,rx_payment, on='patient_id',how='left')
print('after merging rx_payment')
print(train.shape,test.shape)
train=pd.merge(train,train_pract_screen_ratio, on='patient_id',how='left')
test=pd.merge(test,test_pract_screen_ratio, on='patient_id',how='left')
print('after merging pract_scree_ratio')
print(train.shape,test.shape)
train=pd.merge(train,visits, on='patient_id',how='left')
test= | pd.merge(test,visits, on='patient_id',how='left') | pandas.merge |
# general
import logging
import os
import sys
import time
import configparser
import math
import scipy.optimize as opt
from scipy.spatial import ConvexHull
from copy import deepcopy
from itertools import combinations
# graph
import networkx as nx
import geonetworkx as gnx
# data
import pandas as pd
# optimization
import julia
# config
from .. import config
from ...exception_utils import DHCOptimizerException
from .NLP.data_regressions import *
from PyQt5.QtWidgets import QMessageBox
from ....python_julia_interface import JuliaQgisInterface
class ADNetworkOptimizer:
"""Network optimizer in automatic design mode.
Given a 'networkx' network having costs, capacities, demand and production attributes, the optimize method trying to
find the minimal fixed cost network supplying the given objective.
"""
def __init__(self, optimization_graph=None, **kwargs):
self.logger = logging.getLogger(__name__)
self.optimization_graph = optimization_graph
self.network_objective = kwargs.get('network_objective', None)
self.solution_graph = None
self.connected = False
self.connected_buildings = None
self.old_capacity = {}
self.conf = {}
os.makedirs(os.path.join(os.environ["LOCALAPPDATA"], "QGIS\\QGIS3\\planheat_data\\tmp"), exist_ok=True)
self.solver_log_file = os.path.join(os.environ["LOCALAPPDATA"], "QGIS\\QGIS3\\planheat_data\\tmp\\output.log")
self.energy = None
self.consumption_file_path = os.path.join(os.path.dirname(__file__),'NLP','consumption_data.csv')
self.conf_path = os.path.join(os.path.dirname(__file__),'NLP','conf.ini')
def check_is_ready(self):
"""Check that all necessary inputs have been set."""
self.logger.info("Checking optimization inputs for automatic design.")
if self.optimization_graph is None:
raise RuntimeError("The optimization graph needs to be defined in order to optimize the network.")
if self.network_objective is None:
raise RuntimeError("A network objective has to be set (in MW).")
def check_infeasibility(self, graph, objective):
self.logger.info("Checking infeasibility for automatic design.")
ccs = list(nx.connected_components(graph.to_undirected()))
productions = nx.get_node_attributes(graph, config.SUPPLY_POWER_CAPACITY_KEY)
heat_demand = nx.get_node_attributes(graph, config.BUILDING_CONSUMPTION_KEY)
total_residual_connections = 0.0
# print([e for e in graph.edges()])
for cc in ccs:
#print('!!', cc)
residual_production = sum(productions[n] for n in cc if n in productions)
residual_consumption = sum(heat_demand[n] for n in cc if n in heat_demand)
residual_maximum_connection = min(residual_production, residual_consumption)
total_residual_connections += residual_maximum_connection
if total_residual_connections < objective - 1e-8:
raise DHCOptimizerException("Problem is inconsistent: total production capacity is lower than coverage"
" objective (taking into account connected components): "
"reachable consumption is %f"
" and total objective is %f" % (total_residual_connections,
objective))
def optimize(self):
"""Run the optimization with the selected method
:return: flows : dict. Flow on each edge.
:return: obj_val: float. Solution cost.
"""
self.logger.info("Solving with Dynamic Slope Scaling Procedure in Julia :")
optimization_start = time.time()
# 1. Preprocess for old network graph
if self.old_network_graph is not None:
# DSSP on old network
old_network_obj = sum(list(nx.get_node_attributes(self.old_network_graph, config.BUILDING_CONSUMPTION_KEY).values()))-1e-5
try:
self.check_infeasibility(self.old_network_graph, old_network_obj)
except DHCOptimizerException as e:
e.data = "Invalid existing network: " + e.data
raise e
flows, obj_val = self.optimize_with_dssp_julia(self.old_network_graph, old_network_obj, set())
self.logger.info("Optimization phase time: %.2fs" % (time.time() - optimization_start))
solution_old_graph = self.build_solution_graph(self.old_network_graph, flows)
if self.modify_old_network:
# Add max capacity on old edges
self.old_capacity = deepcopy(flows)
old_buildings = list(nx.get_node_attributes(self.old_network_graph, config.BUILDING_CONSUMPTION_KEY).values())
for key in flows:
if (key[1],key[0],0) not in self.old_capacity and key[1] not in old_buildings:
self.old_capacity[(key[1],key[0],0)] = self.old_capacity[key]
# Add Imaginary edges
for edge in self.old_capacity:
if self.optimization_graph.has_edge(*edge):
# add nodes
if not self.optimization_graph.has_node(config.IM_PREFIX+edge[0]):
self.optimization_graph.add_node(config.IM_PREFIX+edge[0])
self.optimization_graph.nodes[config.IM_PREFIX+edge[0]][config.GPD_GEO_KEY] = \
self.optimization_graph.nodes[edge[0]][config.GPD_GEO_KEY]
if not self.optimization_graph.has_node(config.IM_PREFIX+edge[1]):
self.optimization_graph.add_node(config.IM_PREFIX+edge[1])
self.optimization_graph.nodes[config.IM_PREFIX+edge[1]][config.GPD_GEO_KEY] = \
self.optimization_graph.nodes[edge[1]][config.GPD_GEO_KEY]
# add edges
if not self.optimization_graph.has_edge(edge[0],config.IM_PREFIX+edge[0]):
self.optimization_graph.add_edge(edge[0],config.IM_PREFIX+edge[0])
if not self.optimization_graph.has_edge(config.IM_PREFIX+edge[0],config.IM_PREFIX+edge[1]):
self.optimization_graph.add_edge(config.IM_PREFIX+edge[0],config.IM_PREFIX+edge[1])
if not self.optimization_graph.has_edge(config.IM_PREFIX+edge[1],edge[1]):
self.optimization_graph.add_edge(config.IM_PREFIX+edge[1],edge[1])
# put cost
self.optimization_graph.edges[(config.IM_PREFIX+edge[0],config.IM_PREFIX+edge[1],0)][config.EDGE_COST_KEY] = \
self.optimization_graph.edges[(edge[0],edge[1],0)][config.EDGE_COST_KEY]
self.optimization_graph.edges[(edge[0],edge[1],0)][config.EDGE_COST_KEY] = 1e-5
self.optimization_graph.edges[(edge[0],config.IM_PREFIX+edge[0],0)][config.EDGE_COST_KEY] = 1e-5
self.optimization_graph.edges[(config.IM_PREFIX+edge[1],edge[1],0)][config.EDGE_COST_KEY] = 1e-5
else:
# if we don't modify the old network, we have to change the capacity of the supplies
already_consummed = {}
for edge in solution_old_graph.edges():
if solution_old_graph.nodes[edge[0]].get(config.NODE_TYPE_KEY) == config.SUPPLY_NODE_TYPE:
already_consummed[edge[0]] = already_consummed.get(edge[0], 0) + \
solution_old_graph.edges[edge][config.SOLUTION_POWER_FLOW_KEY]
for source in already_consummed:
if already_consummed[source] <= self.optimization_graph.nodes[source][config.SUPPLY_POWER_CAPACITY_KEY]:
self.optimization_graph.nodes[source][config.SUPPLY_POWER_CAPACITY_KEY] -= already_consummed[source]
self.network_objective -= already_consummed[source]
else:
self.network_objective -= self.optimization_graph.nodes[source][config.SUPPLY_POWER_CAPACITY_KEY]
self.optimization_graph.nodes[source][config.SUPPLY_POWER_CAPACITY_KEY] = 0
# Remove edges from old network
edges_to_remove = set()
for e in self.optimization_graph.edges():
if self.old_network_graph.has_edge(*e) or self.old_network_graph.has_edge(e[1],e[0]):
edges_to_remove.add(e)
self.optimization_graph.remove_edges_from(edges_to_remove)
# Remove isolated buildings of optimization graph
isolated_to_remove = set()
for e in self.old_network_graph.edges():
if e[0] in self.old_network_graph.nodes() and \
self.optimization_graph.nodes[e[1]].get(config.NODE_TYPE_KEY) == config.BUILDING_NODE_TYPE:
isolated_to_remove.add(e)
self.optimization_graph.remove_edges_from(isolated_to_remove)
# Remove buildings from old network
for n, data in self.old_network_graph.nodes(data=True):
if data.get(config.NODE_TYPE_KEY) == config.BUILDING_NODE_TYPE:
self.optimization_graph.remove_node(n)
# Re-link sources
sources = set()
for n, data in self.optimization_graph.nodes(data=True):
if data.get(config.NODE_TYPE_KEY) == config.SUPPLY_NODE_TYPE:
sources.add(n)
source_graph = self.optimization_graph.subgraph(sources).copy()
self.optimization_graph.remove_nodes_from(sources)
gnx.remove_isolates(self.optimization_graph)
node_filter = lambda n: self.optimization_graph.nodes.get(n,{}).get(config.NODE_TYPE_KEY) != config.BUILDING_NODE_TYPE
gnx.spatial_points_merge(self.optimization_graph, source_graph.nodes_to_gdf(), node_filter=node_filter, inplace=True)
# fill missing information
gnx.fill_edges_missing_geometry_attributes(self.optimization_graph)
gnx.fill_length_attribute(self.optimization_graph, config.EDGE_LENGTH_KEY, only_missing=True)
gnx.fill_length_attribute(self.optimization_graph, config.EDGE_COST_KEY, only_missing=True)
for e in self.optimization_graph.edges(keys=True):
self.optimization_graph.edges[e][config.LEASTCOST_COEF_KEY] = \
self.optimization_graph.edges[e].get(config.LEASTCOST_COEF_KEY,0)
# 2. Process the DSSP on optimization graph
self.check_is_ready()
self.check_infeasibility(self.optimization_graph, self.network_objective)
if self.old_network_graph is not None and self.modify_old_network:
old_buildings = set(nx.get_node_attributes(self.old_network_graph, config.BUILDING_CONSUMPTION_KEY).keys())
else:
old_buildings = set()
flows, obj_val = self.optimize_with_dssp_julia(self.optimization_graph, self.network_objective, old_buildings,postprocess= (not self.modify_old_network))
self.logger.info("Optimization phase time: %.2fs" % (time.time() - optimization_start))
self.solution_graph = self.build_solution_graph(self.optimization_graph, flows, self.connected)
# 3. Postprocess for old network graph
if self.old_network_graph is not None:
if self.modify_old_network:
# Put the right supply capacity and cost
for edge in self.old_capacity:
if self.solution_graph.has_edge(edge[0],edge[1]):
self.solution_graph.edges[(edge[0],edge[1])][config.EDGE_COST_KEY] = \
self.optimization_graph.edges[(config.IM_PREFIX+edge[0],config.IM_PREFIX+edge[1],0)][config.EDGE_COST_KEY]
# Remove imaginary edges
imaginary_nodes_to_remove = set()
nodes_to_relabel = {}
for edge in self.solution_graph.edges():
if str(edge[0]).startswith(config.IM_PREFIX) and str(edge[1]).startswith(config.IM_PREFIX):
real_edge = edge[0][len(config.IM_PREFIX):],edge[1][len(config.IM_PREFIX):]
self.old_capacity[(real_edge[0], real_edge[1], 0)] = pd.np.inf
self.old_capacity[(real_edge[1], real_edge[0], 0)] = pd.np.inf
if not self.solution_graph.has_edge(*real_edge):
for i in range(2):
nodes_to_relabel[edge[i]] = real_edge[i]
else:
self.solution_graph.edges[real_edge[0],real_edge[1]][config.SOLUTION_POWER_FLOW_KEY] += \
self.solution_graph.edges[edge].get(config.SOLUTION_POWER_FLOW_KEY,0)
imaginary_nodes_to_remove.add(edge[0])
imaginary_nodes_to_remove.add(edge[1])
elif str(edge[0]).startswith(config.IM_PREFIX):
imaginary_nodes_to_remove.add(edge[0])
elif str(edge[1]).startswith(config.IM_PREFIX):
imaginary_nodes_to_remove.add(edge[1])
nx.relabel_nodes(self.solution_graph, nodes_to_relabel, copy=False)
self.solution_graph.remove_nodes_from(list(imaginary_nodes_to_remove))
for node in nodes_to_relabel.values():
if self.solution_graph.has_edge(node, node):
self.solution_graph.remove_edge(node, node)
else:
for source in nx.get_node_attributes(self.solution_graph, config.SUPPLY_POWER_CAPACITY_KEY):
self.solution_graph.nodes[source][config.SUPPLY_POWER_CAPACITY_KEY] += already_consummed.get(source,0)
self.optimization_graph.nodes[source][config.SUPPLY_POWER_CAPACITY_KEY] += already_consummed.get(source,0)
return flows, obj_val
def optimize_NLP(self, only_preprocess=False):
''' solve the NLP problem of the optimal size of the pipes knowing the route i.e. the selected streets
1. Load parameters
2. Preprocess to satisfy the heat demand at any time of the year
3. Solve the problem with Ipopt or Knitro on the graph
4. Add velocity, pressure and costs information on solution graph's edges if the status of the optimizer is "Optimal" '''
self.logger.info("Start pipe size optimization")
# 1. Load parameters
self.load_conf()
consumption_data, capacity_data = self.get_consumption_and_capacities_from_csv(self.solution_graph, self.consumption_file_path)
# 2. Preprocess NLP
if self.old_network_graph is not None and self.modify_old_network:
max_capacity = self.old_capacity
else:
max_capacity = {}
lb_flow = self.preprocess(self.solution_graph, consumption_data, capacity_data, max_capacity)
# Conversion MW flow to diameter
lb_diam = {}
a_vel, b_vel = self.conf["A_MAX_VELOCITY"], self.conf["B_MAX_VELOCITY"]
for edge in lb_flow:
mass_flow = self.convert_power_to_mass_flow(lb_flow[edge])
# mass flow = v * pi*(D/2)**2 = (A*D+B) * pi*(D/2)**2 (in mm)
f = lambda x : x**3 *a_vel + x**2*b_vel - 4*mass_flow/math.pi/self.conf["RHO"]*1e6
a, b = 0, 500
while b-a>0.1:
c = (a+b)/2
if f(a)*f(c) <= 0 : b = c
else: a = c
lb_diam[edge] = a/10 # (in cm)
if only_preprocess:
self.fill_edges_with_NLP({'Diameter': lb_diam})
self.logger.info("Pipe size optimization completed")
return True
# 3.
peak_consumption = self.get_annual_peak_consumption(consumption_data)
NLP_Output, status = self.optimize_pipe_size(self.solution_graph, lb_diam, peak_consumption, max_capacity)
if status == "Optimal": # "Optimal", "Unbounded", "Infeasible", "UserLimit", "Error" or "NotSolved"
self.logger.info("Pipe size optimization completed")
self.logger.info("Collecting the NLP solution" )
self.fill_edges_with_NLP(NLP_Output)
return True
else:
self.logger.warning("NLP optimization exits with status: %s" % str(status))
self.fill_edges_with_NLP({'Diameter': lb_diam})
return False
def build_solution_graph(self, graph, flows, connecting_graph=False):
"""Create the solution with the optimization results. Keep only the edges with non negative flow."""
self.logger.info("Building solution graph")
self.clean_flow_cycles(flows)
edges_to_keep = [e for e, flow in flows.items() if flow > 0]
solution_graph_mdg = graph.edge_subgraph(edges_to_keep)
if connecting_graph:
# We add the edges to connect
edges_to_keep = edges_to_keep + self.connecting_graph(solution_graph_mdg)
# We rebuild the graph
solution_graph_mdg = graph.edge_subgraph(edges_to_keep)
# We add the flow attribute
for e in edges_to_keep:
solution_graph_mdg.edges[(e[0], e[1], 0)][config.SOLUTION_POWER_FLOW_KEY] = flows[e]
# We convert it in GeoDiGraph
solution_graph_mdg.crs = self.optimization_graph.crs
solution_graph = gnx.GeoDiGraph(solution_graph_mdg, crs=solution_graph_mdg.crs)
gnx.remove_isolates(solution_graph)
solution_graph.name = "solution_graph"
return solution_graph
def preprocess(self, solution_graph, consumption_data, capacity_data, max_capacity={}):
''' calculate the lower bound for flow.
1. Simplify the graph until having no end nodes
2. If there is no crossing nodes, the preprocess is ended.
3. 1) we calculate for each edge the surplus and the need
2) we deduced the lower bounds'''
self.logger.info('start preprocess')
lb_flow = {}
# Graph copy to directed graph
G = nx.DiGraph(nx.Graph(solution_graph))
# check dimensions
assert len(consumption_data) == len(capacity_data), "Dimension of consumption_data and capacity_data much match"
start_time = time.time()
# 1. simplify the graph and calculate the lower bounds
end_nodes = set([x for x in G.nodes() \
if len(set(G.predecessors(x)).union(set(G.successors(x))))==1\
and G.node[x].get(config.NODE_TYPE_KEY,config.SUPPLY_NODE_TYPE) != config.SUPPLY_NODE_TYPE])
finished = self.calculate_consumption_predecessors_nodes(G, consumption_data, capacity_data, lb_flow, end_nodes)
if finished:
return lb_flow
# 3.
source_nodes = set([n for n in G.nodes() if G.node[n].get(config.NODE_TYPE_KEY,None) == config.SUPPLY_NODE_TYPE])
needs_data, surplus_data = {}, {}
for node in source_nodes:
for edge in G.out_edges(node):
self.find_needs(G, edge, needs_data, consumption_data, source_nodes, max_capacity)
for edge in G.edges():
self.find_surplus(G, edge, surplus_data, consumption_data, capacity_data, set(), max_capacity)
for edge in set(surplus_data.keys()).intersection(set(needs_data.keys())):
if type(surplus_data[edge]) != int and type(needs_data[edge]) != int:
lb_flow[edge] = max(lb_flow.get(edge,0), max(pd.concat([surplus_data[edge],needs_data[edge]], axis=1).min(axis=1)))
lb_flow[(edge[1], edge[0], *edge[2:])] = max( lb_flow.get((edge[1], edge[0], *edge[2:]),0), lb_flow[edge] )
self.logger.info('end preprocess in ' + str(time.time() - start_time) + ' s')
return lb_flow
def connecting_graph(self, solution_graph, weight='cost', ignore_sources=False):
"""Return the list of edges to add to have a connected graph
1. find the groups of sources isolated from each others
2. calculate for each group of sources the convex hull
3. find the smallest path between each pair of groups
The key idea is to add to the graph edges of weight 0 between all nodes on the convex hull
and then run a dijkstra between one random node of group1 to one random node of group2.
To have the "real" path, we just have to remove 0-weigth edges
4. Do a minimum spanning tree with the aggregated graph (nodes are the different groups and edges are the path found just before)
"""
debut = time.time()
self.logger.info('start connecting graph')
# we need an undirected graph
undirected_solution_graph = solution_graph.to_undirected()
if self.old_network_graph is not None and self.modify_old_network:
undirected_solution_graph = nx.compose(nx.MultiGraph(self.old_network_graph), undirected_solution_graph)
# if already connected
if nx.is_connected(undirected_solution_graph) == True:
self.logger.info("the solution graph is already connected")
return []
# Computing the minimum sources in each component and all junction nodes in the solution graph
nodetype = nx.get_node_attributes(undirected_solution_graph, config.NODE_TYPE_KEY)
list_sources = [node for node in nodetype if nodetype[node] == config.SUPPLY_NODE_TYPE]
# 1. Search of all connected subgraphs
if not ignore_sources:
reduced_list_sources = []
while len(list_sources) > 0:
source, is_isolated = list_sources.pop(0), True
for i in range(len(list_sources)):
is_isolated = is_isolated and not (nx.has_path(undirected_solution_graph, source, list_sources[i]))
if is_isolated:
reduced_list_sources.append(source)
else:
reduced_list_sources = [list(n)[0] for n in nx.connected_components(undirected_solution_graph)]
# 2. Creation of all convex hulls for each source in reduced_list_sources
hulls = {}
for source in reduced_list_sources:
coord_compo = {}
nodes_connecting_source = nx.node_connected_component(undirected_solution_graph, source)
for node in nodes_connecting_source:
xy = tuple(self.optimization_graph.get_node_coordinates(node))
coord_compo[xy] = node
if len(coord_compo) > 2:
convexhull = ConvexHull(list(coord_compo.keys())).points
else:
convexhull = list(coord_compo.keys())
hulls[source] = [coord_compo[tuple(coord)] for coord in convexhull]
# 3. Create list of possible list_edges_to_add
list_edges_to_add = {} # list of {(S1, S2):(length_of_SP, edges_to_add)}
for S1, S2 in combinations(reduced_list_sources, 2):
# change weight of edges
for i in range(len(hulls[S1])-1):
u,v = hulls[S1][i], hulls[S1][i+1]
self.optimization_graph.add_edge(u,v,key=-1,weight=0)
self.optimization_graph.add_edge(hulls[S1][-1],hulls[S1][0],key=-1,weight=0)
for i in range(len(hulls[S2])-1):
u,v = hulls[S2][i], hulls[S2][i+1]
self.optimization_graph.add_edge(u,v,key=-1,weight=0)
self.optimization_graph.add_edge(hulls[S2][-1],hulls[S2][0],key=-1,weight=0)
# find the shortest path
source, target = hulls[S1][0], hulls[S2][0] # it's a choice to take 0, but no matter
try:
length, path = nx.single_source_dijkstra(self.optimization_graph, source, target=target, weight=weight)
except nx.NetworkXNoPath:
self.logger.info("Source " + str(S1) + " and source " + str(S2) + " can't be connected")
return []
list_weights = nx.get_edge_attributes(self.optimization_graph, weight)
# edges to add to connect S1 and S2
edges_to_add = []
for i in range(len(path) - 1):
u, v = path[i], path[i + 1]
# if the edge between (u,v) is not artificial, we add it
if list_weights.get((u, v, -1), None) != 0 and list_weights.get((u, v, 0), None) is not None:
edges_to_add.append((u, v, 0))
if list_weights.get((v, u, -1), None) != 0 and list_weights.get((v, u, 0), None) is not None:
edges_to_add.append((v, u, 0))
list_edges_to_add[(S1, S2)] = (length, edges_to_add)
# change weight of edges
for i in range(len(hulls[S1])-1):
u,v = hulls[S1][i], hulls[S1][i+1]
self.optimization_graph.remove_edge(u,v,key=-1)
self.optimization_graph.remove_edge(hulls[S1][-1],hulls[S1][0],key=-1)
for i in range(len(hulls[S2])-1):
u,v = hulls[S2][i], hulls[S2][i+1]
self.optimization_graph.remove_edge(u,v,key=-1)
self.optimization_graph.remove_edge(hulls[S2][-1],hulls[S2][0],key=-1)
# 4. choice of best edges to add (Kruskal)
G = nx.Graph()
for (S1, S2) in list_edges_to_add:
(length, _) = list_edges_to_add[(S1, S2)]
if not G.has_node(S1):
G.add_node(S1)
if not G.has_node(S2):
G.add_node(S2)
G.add_edge(S1, S2, weight=length)
reduced_list_edges_to_add = set()
T = nx.minimum_spanning_tree(G)
for u, v in T.edges:
if (u, v) in list_edges_to_add:
reduced_list_edges_to_add = reduced_list_edges_to_add.union(set(list_edges_to_add[(u, v)][1]))
if (v, u) in list_edges_to_add:
reduced_list_edges_to_add = reduced_list_edges_to_add.union(set(list_edges_to_add[(v, u)][1]))
self.logger.info('end connecting graph in ' + str(time.time() - debut) + ' s')
return list(reduced_list_edges_to_add)
@staticmethod
def clean_flow_cycles(flows: dict):
"""Remove the sub-optimal flow cycles allowed with the flow conservation. Flows dictionnary is modified
inplace."""
for e, flow in flows.items():
if flow > 0:
reversed_e = (e[1], e[0], *e[2:])
if reversed_e in flows and flows[reversed_e] > 0:
reversed_flow = flows[reversed_e]
cycle_flow = min(flow, reversed_flow)
flows[e] -= cycle_flow
flows[reversed_e] -= cycle_flow
# -------------- NLP methods
def load_conf(self):
""" loads the parameters defined in the config.ini in self.conf to prepare the NLP optimization"""
conf = configparser.ConfigParser()
conf.read(self.conf_path)
params = self.conf
for s in conf.sections():
for p in conf[s]:
params[p.upper()] = eval(conf.get(s,p))
self.conf = params
if self.energy == "Heating":
T = self.conf['SUPPLY_HEAT_TEMPERATURE']
if self.energy == "Cooling":
T = self.conf['SUPPLY_COOL_TEMPERATURE']
# piecewise linear functions
self.conf['CP'] = CP(T)
self.conf['RHO'] = RHO(T)
# REGRESSIONS
if self.energy == "Heating":
self.conf['A_HEAT_TRANSIT_COEF'], self.conf['B_HEAT_TRANSIT_COEF'] = \
HEAT_LOSS_COST((self.conf['SUPPLY_HEAT_TEMPERATURE']+self.conf['RETURN_HEAT_TEMPERATURE'])/2)
if self.energy == "Cooling":
self.conf['A_COOL_TRANSIT_COEF'], self.conf['B_COOL_TRANSIT_COEF'] = \
HEAT_LOSS_COST((self.conf['SUPPLY_COOL_TEMPERATURE']+self.conf['RETURN_COOL_TEMPERATURE'])/2)
self.conf['A_LINEAR_COST'], self.conf['B_LINEAR_COST'] = CONSTRUCTION_COST()
self.conf['A_MAX_VELOCITY'], self.conf['B_MAX_VELOCITY'] = MAX_VELOCITY()
def convert_power_to_mass_flow(self, power_mw):
if self.energy == "Heating":
mass_flow = (power_mw * 1e6) / (self.conf['CP']\
* (self.conf['SUPPLY_HEAT_TEMPERATURE'] - self.conf['RETURN_HEAT_TEMPERATURE']))
if self.energy == "Cooling":
mass_flow = (power_mw * 1e6) / (self.conf['CP']\
* (self.conf['RETURN_COOL_TEMPERATURE'] - self.conf['SUPPLY_COOL_TEMPERATURE']))
return mass_flow
def convert_mass_flow_to_power(self, mass_flow):
if self.energy == "Heating":
power_mw = mass_flow * 1e-6 * self.conf['CP'] * \
(self.conf['SUPPLY_HEAT_TEMPERATURE'] - self.conf['RETURN_HEAT_TEMPERATURE'])
if self.energy == "Cooling":
power_mw = mass_flow * 1e-6 * self.conf['CP'] * \
(self.conf['RETURN_COOL_TEMPERATURE'] - self.conf['SUPPLY_COOL_TEMPERATURE'])
return power_mw
def get_params(self, network_frame, peak_consumption):
lengths = nx.get_edge_attributes(network_frame, config.EDGE_LENGTH_KEY)
Length, Outflow, Supply_Max_Inflow = {}, {}, {}
sources = set(n for n,d in network_frame.nodes(data = True) if \
config.NODE_TYPE_KEY in d and d[config.NODE_TYPE_KEY] == config.SUPPLY_NODE_TYPE)
connected_buildings = set(n for n,d in network_frame.nodes(data =True) if \
config.NODE_TYPE_KEY in d and d[config.NODE_TYPE_KEY] == config.BUILDING_NODE_TYPE)
# IMPORTANT : edges between a source and a junction must have the form (source, junction)
# edges between a building and junction must have the form (junction, building)
for key in lengths:
u, v = key[0], key[1]
if (v,u) not in Length:
if v in sources: Length[(v,u)] = max(lengths[key],1e-5) # we don't want a length of zero
else: Length[(u,v)] = max(lengths[key],1e-5)
for s in sources:
Supply_Max_Inflow[s] = self.convert_power_to_mass_flow(network_frame.nodes[s][config.SUPPLY_POWER_CAPACITY_KEY])
for b in connected_buildings:
if self.energy == "Heating":
Outflow[b] = self.convert_power_to_mass_flow(peak_consumption[b])
if self.energy == "Cooling":
Outflow[b] = self.convert_power_to_mass_flow(peak_consumption[b])
GraphParam = {}
GraphParam["LENGTH"] = Length
GraphParam["ELEVATION"] = nx.get_node_attributes(network_frame, config.NODE_ELEVATION_KEY)
GraphParam["OUTFLOW"] = Outflow
GraphParam["SUPPLY_MAX_INFLOW"] = Supply_Max_Inflow
return GraphParam
def get_consumption_and_capacities_from_csv(self, graph, csv_file):
consumption_data, capacity_data = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
from tspdb.src.database_module.db_class import Interface
from tspdb.src.pindex.pindex_utils import index_ts_mapper, index_ts_inv_mapper, index_exists, get_bound_time
from scipy.stats import norm
def unnormalize(arr, mean, std):
return arr *std + mean
def get_prediction_range( index_name, table_name, value_column, interface, t1,t2 , uq = True, uq_method ='Gaussian', c = 95., projected = False):
"""
Return an array of N (N = t2-t1+1) predicted value along with the confidence interval for the value of column_name at time t1 to t2
using index_name by calling either forecast_range or impute_range function
----------
Parameters
----------
index_name: string
name of the PINDEX used to query the prediction
table_name: string
name of the time series table in the database
value_column: string
name of column than contain time series value
interface: db_class object
object used to communicate with the DB. see ../database/db_class for the abstract class
t1: (int or timestamp)
index or timestamp indicating the start of the queried range
t2: (int or timestamp)
index or timestamp indicating the end of the queried range
uq: boolean optional (default=true)
if true, return upper and lower bound of the c% confidenc interval
uq_method: string optional (defalut = 'Gaussian') options: {'Gaussian', 'Chebyshev'}
Uncertainty quantification method used to estimate the confidence interval
c: float optional (default 95.)
confidence level for uncertainty quantification, 0<c<100
----------
Returns
----------
prediction array, shape [(t1 - t2 +1) ]
Values of the predicted point of the time series in the time interval t1 to t2
deviation array, shape [1, (t1 - t2 +1) ]
The deviation from the mean to get the desired confidence level
"""
# query pindex parameters
T,T_var, L, k,k_var, L_var, last_model, MUpdateIndex,var_direct, interval, start_ts, last_TS_seen, last_TS_seen_var, index_col, value_columns, MUpdateIndex_var, p = interface.query_table( index_name+'_meta',['T','T_var', 'L', 'k','k_var','L_var', 'no_submodels', 'last_TS_inc', 'var_direct_method', 'agg_interval','start_time', "last_TS_seen", "last_TS_seen_var", "time_column","indexed_column",'last_TS_inc_var','p'])[0]
last_model -= 1
value_columns = value_columns.split(',')
no_ts = len(value_columns)
try: value_index = value_columns.index(value_column)
except: raise Exception('The value column %s selected is not indexed by the chosen pindex'%(value_column))
if not isinstance(t1, (int, np.integer)):
t1 = | pd.to_datetime(t1) | pandas.to_datetime |
import os
import time
import configparser
import joblib
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.optim as optim
from tensorboardX import SummaryWriter
from sklearn.metrics import accuracy_score, classification_report
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from torch.utils.data import Dataset, random_split, DataLoader, Subset
from tqdm import tqdm
import argparse
from model import MLPClassifier
events = {
'normal': 0,
'ixnetwork-bgp-hijacking-start': 1,
'ixnetwork-bgp-injection-start': 2,
'node-down': 3,
'interface-down': 4,
'tap-loss-delay': 5,
}
config = configparser.ConfigParser()
config.read('config/mlp_config.ini')
model_path = config.get('MAIN', 'model_path')
model_dir = model_path
window = config.getint('MAIN', 'window')
seed = config.getint('MAIN', 'seed')
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(device)
torch.manual_seed(seed)
if not os.path.exists(model_path):
os.makedirs(model_path)
class PreProcessing:
def __init__(self, is_train=True):
self.transfermer = dict()
self.is_train = is_train
if not self.is_train:
self.transfermer = self.load()
def __call__(self, df, s_type='minmax', is_timeseries=False):
if self.is_train:
return self.fit_transform(df, s_type, is_timeseries)
else:
return self.transform(df, is_timeseries)
def fit_transform(self, df, s_type, is_timeseries):
for column in df.columns:
# feature scaling
if s_type == 'minmax':
self.transfermer[column] = MinMaxScaler()
elif s_type == 'standard':
self.transfermer[column] = StandardScaler()
value = self.transfermer[column].fit_transform(
pd.DataFrame(df[column]))
df.loc[:, column] = value
# lag feature
if is_timeseries:
df[column + "_diff"] = df[column].diff()
df[column + "_mean_5"] = df[column].rolling(5).mean()
df = df.fillna(df.median())
return df
def transform(self, df, is_timeseries):
for column in df.columns:
value = self.transfermer[column].transform(
pd.DataFrame(df[column]))
df.loc[:, column] = value
if is_timeseries:
df[column + "_diff"] = df[column].diff()
df[column + "_mean_5"] = df[column].rolling(5).mean()
df = df.fillna(df.median())
return df
def dump(self, filename='/tmp/mlp_transfer.bin'):
with open(filename, 'wb') as f:
joblib.dump(self.transfermer, f)
def load(self, filename='/tmp/mlp_transfer.bin'):
with open(filename, 'rb') as f:
data = joblib.load(f)
return data
class MyDataset(Dataset):
def __init__(self, data_path, label_path, metrics, transform=None):
self.transform = transform
self.metrics = metrics
data = []
for metric in tqdm(self.metrics):
df = pd.read_csv(os.path.join(data_path, metric + '.tsv'), sep="\t", index_col=0)
df = df.fillna(0)
df = df.sort_values("timestamp")
df = df.set_index("timestamp")
columns_name = {name: metric + '_' + name for name in df.columns}
df.rename(columns=columns_name, inplace=True)
if self.transform:
if metric != "admin-status":
df = self.transform(df, s_type='standard')
data.append(df)
self.dataframe = pd.concat(data, axis=1)
self.dataframe = self.dataframe.reindex(columns=sorted(self.dataframe.columns))
self.data = self.dataframe.values
self.data_num = len(self.dataframe)
self.label = | pd.read_csv(label_path, sep="\t", index_col=0) | pandas.read_csv |
import pandas as pd
from tarpan.shared.compare_parameters import (
save_compare_parameters, CompareParametersType)
def run_model():
data1 = {
"x": [1, 2, 3, 4, 5, 6],
"y": [-1, -2, -3, -4, -5, -6],
"z": [40, 21, 32, 41, 11, 31]
}
df1 = pd.DataFrame(data1)
data2 = {
"x": [2, 3, 1, 1, 3, 4],
"y": [-2.1, -2, -2, -3, -1, -4],
"z": [23, 19, 21, 13, 29, 10]
}
extra = [{"x": 2.2, "y": 1.3}] # Add extra values (optional)
df2 = | pd.DataFrame(data2) | pandas.DataFrame |
import os
import sys
import random
import re
import copy
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import logging
import datetime as dt
from math import radians, cos, sin, asin, sqrt
from datetime import datetime,timedelta
from objects.objects import Cluster,Order,Vehicle,Transition,Grid
from config.setting import *
from preprocessing.readfiles import *
###########################################################################
class Simulation(object):
"""
This simulator is used to simulate urban vehicle traffic.The system divides the day into several time slots.
System information is updated at the beginning of each time slot to update vehicle arrivals and order completion.
Then the system generates the order that needs to be started within the current time slot, and then finds the optimal
idle vehicle to match the order. If the match fails or the recent vehicles have timed out, the order is marked as Reject.
If it is successful, the vehicle service order is arranged. The shortest path in the road network first reaches the
place where the order occurred, and then arrives at the order destination, and repeats matching the order until all
the orders in the current time slot have been completed. Then the system generates orders that occur within the current
time slot, finds the nearest idle vehicle to match the order, and if there is no idle vehicle or the nearest idle vehicle
reaches the current position of the order and exceeds the limit time, the match fails, and if the match is successful, the
selected vehicle service is arranged Order. After the match is successful, the vehicle's idle record in the current cluster
is deleted, and the time to be reached is added to the cluster where the order destination is located. The vehicle must
first arrive at the place where the order occurred, pick up the passengers, and then complete the order at the order destination.
Repeat the matching order until a match All orders in this phase are completed.
At the end of the matching phase, you can useyour own matching method to dispatch idle vehicles in each cluster to other
clusters that require more vehicles to meet future order requirements.
"""
def __init__(self,ClusterMode,DemandPredictionMode,
DispatchMode,VehiclesNumber,TimePeriods,LocalRegionBound,
SideLengthMeter,VehiclesServiceMeter,
NeighborCanServer,FocusOnLocalRegion):
#Component
self.DispatchModule = None
self.DemandPredictorModule = None
#Statistical variables
self.OrderNum = 0
self.RejectNum = 0
self.DispatchNum = 0
self.TotallyDispatchCost = 0
self.TotallyWaitTime = 0
self.TotallyUpdateTime = dt.timedelta()
self.TotallyRewardTime = dt.timedelta()
self.TotallyNextStateTime = dt.timedelta()
self.TotallyLearningTime = dt.timedelta()
self.TotallyDispatchTime = dt.timedelta()
self.TotallyMatchTime = dt.timedelta()
self.TotallyDemandPredictTime = dt.timedelta()
#Data variable
self.Clusters = None
self.Orders = None
self.Vehicles = None
self.Map = None
self.Node = None
self.NodeIDList = None
self.NodeID2Cluseter = {}
self.NodeID2NodesLocation = {}
self.TransitionTempPool = []
self.MapWestBound = LocalRegionBound[0]
self.MapEastBound = LocalRegionBound[1]
self.MapSouthBound = LocalRegionBound[2]
self.MapNorthBound = LocalRegionBound[3]
#Weather data
#------------------------------------------
self.WeatherType = np.array([2,1,1,1,1,0,1,2,1,1,3,3,3,3,3,
3,3,0,0,0,2,1,1,1,1,0,1,0,1,1,
1,3,1,1,0,2,2,1,0,0,2,3,2,2,2,
1,2,2,2,1,0,0,2,2,2,1,2,1,1,1])
self.MinimumTemperature = np.array([12,12,11,12,14,12,9,8,7,8,9,7,9,10,11,
12,13,13,11,11,11,6,5,5,4,4,6,6,5,6])
self.MaximumTemperature = np.array([17,19,19,20,20,19,13,12,13,15,16,18,18,19,19,
18,20,21,19,20,19,12,9,9,10,13,12,12,13,15])
self.WindDirection = np.array([1,2,0,2,7,6,3,2,3,7,1,0,7,1,7,
0,0,7,0,7,7,7,0,7,5,7,6,6,7,7])
self.WindPower = np.array([1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,2,1,1,1,1,1,1,1,1])
self.WeatherType = self.Normaliztion_1D(self.WeatherType)
self.MinimumTemperature = self.Normaliztion_1D(self.MinimumTemperature)
self.MaximumTemperature = self.Normaliztion_1D(self.MaximumTemperature)
self.WindDirection = self.Normaliztion_1D(self.WindDirection)
self.WindPower = self.Normaliztion_1D(self.WindPower)
#------------------------------------------
#Input parameters
self.ClusterMode = ClusterMode
self.DispatchMode = DispatchMode
self.VehiclesNumber = VehiclesNumber
self.TimePeriods = TimePeriods
self.LocalRegionBound = LocalRegionBound
self.SideLengthMeter = SideLengthMeter
self.VehiclesServiceMeter = VehiclesServiceMeter
self.ClustersNumber = None
self.NumGrideWidth = None
self.NumGrideHeight = None
self.NeighborServerDeepLimit = None
#Control variable
self.NeighborCanServer = NeighborCanServer
self.FocusOnLocalRegion = FocusOnLocalRegion
#Process variable
self.RealExpTime = None
self.NowOrder = None
self.step = None
self.Episode = 0
self.CalculateTheScaleOfDivision()
#Demand predictor variable
self.DemandPredictionMode = DemandPredictionMode
self.SupplyExpect = None
return
def Reload(self,OrderFileDate="1101"):
"""
Read a new order into the simulator and
reset some variables of the simulator
"""
print("Load order " + OrderFileDate + "and reset the experimental environment")
self.OrderNum = 0
self.RejectNum = 0
self.DispatchNum = 0
self.TotallyDispatchCost = 0
self.TotallyWaitTime = 0
self.TotallyUpdateTime = dt.timedelta()
self.TotallyNextStateTime = dt.timedelta()
self.TotallyLearningTime = dt.timedelta()
self.TotallyDispatchTime = dt.timedelta()
self.TotallyMatchTime = dt.timedelta()
self.TotallyDemandPredictTime = dt.timedelta()
self.Orders = None
self.TransitionTempPool.clear()
self.RealExpTime = None
self.NowOrder = None
self.step = None
#read orders
#-----------------------------------------
if self.FocusOnLocalRegion == False:
Orders = ReadOrder(input_file_path="./data/test/order_2016"+ str(OrderFileDate) + ".csv")
self.Orders = [Order(i[0],i[1],self.NodeIDList.index(i[2]),self.NodeIDList.index(i[3]),i[1]+PICKUPTIMEWINDOW,None,None,None) for i in Orders]
else:
SaveLocalRegionBoundOrdersPath = "./data/test/order_2016" + str(self.LocalRegionBound) + str(OrderFileDate) + ".csv"
if os.path.exists(SaveLocalRegionBoundOrdersPath):
Orders = ReadResetOrder(input_file_path=SaveLocalRegionBoundOrdersPath)
self.Orders = [Order(i[0],string_pdTimestamp(i[1]),self.NodeIDList.index(i[2]),self.NodeIDList.index(i[3]),string_pdTimestamp(i[1])+PICKUPTIMEWINDOW,None,None,None) for i in Orders]
else:
Orders = ReadOrder(input_file_path="./data/test/order_2016"+ str(OrderFileDate) + ".csv")
self.Orders = [Order(i[0],i[1],self.NodeIDList.index(i[2]),self.NodeIDList.index(i[3]),i[1]+PICKUPTIMEWINDOW,None,None,None) for i in Orders]
#Limit order generation area
#-------------------------------
for i in self.Orders[:]:
if self.IsOrderInLimitRegion(i) == False:
self.Orders.remove(i)
#-------------------------------
LegalOrdersSet = []
for i in self.Orders:
LegalOrdersSet.append(i.ID)
OutBoundOrdersSet = []
for i in range(len(Orders)):
if not i in LegalOrdersSet:
OutBoundOrdersSet.append(i)
Orders = pd.DataFrame(Orders)
Orders = Orders.drop(OutBoundOrdersSet)
Orders.to_csv(SaveLocalRegionBoundOrdersPath,index=0)
#-----------------------------------------
#Rename orders'ID
#-------------------------------
for i in range(len(self.Orders)):
self.Orders[i].ID = i
#-------------------------------
#Calculate the value of all orders in advance
#-------------------------------
for EachOrder in self.Orders:
EachOrder.OrderValue = self.RoadCost(EachOrder.PickupPoint,EachOrder.DeliveryPoint)
#-------------------------------
#Reset the Clusters and Vehicles
#-------------------------------
for i in self.Clusters:
i.Reset()
for i in self.Vehicles:
i.Reset()
self.InitVehiclesIntoCluster()
#-------------------------------
return
def Reset(self):
print("Reset the experimental environment")
self.OrderNum = 0
self.RejectNum = 0
self.DispatchNum = 0
self.TotallyDispatchCost = 0
self.TotallyWaitTime = 0
self.TotallyUpdateTime = dt.timedelta()
self.TotallyNextStateTime = dt.timedelta()
self.TotallyLearningTime = dt.timedelta()
self.TotallyDispatchTime = dt.timedelta()
self.TotallyMatchTime = dt.timedelta()
self.TotallyDemandPredictTime = dt.timedelta()
self.TransitionTempPool.clear()
self.RealExpTime = None
self.NowOrder = None
self.step = None
#Reset the Orders and Clusters and Vehicles
#-------------------------------
for i in self.Orders:
i.Reset()
for i in self.Clusters:
i.Reset()
for i in self.Vehicles:
i.Reset()
self.InitVehiclesIntoCluster()
#-------------------------------
return
def InitVehiclesIntoCluster(self):
print("Initialization Vehicles into Clusters or Grids")
for i in self.Vehicles:
while True:
RandomNode = random.choice(range(len(self.Node)))
if RandomNode in self.NodeID2Cluseter:
i.LocationNode = RandomNode
i.Cluster = self.NodeID2Cluseter[i.LocationNode]
i.Cluster.IdleVehicles.append(i)
break
def LoadDispatchComponent(self,DispatchModule):
self.DispatchModule = DispatchModule
def RoadCost(self,start,end):
return int(self.Map[start][end])
def haversine(self, lon1, lat1, lon2, lat2):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees)
"""
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
#haversine
dlon = lon2 - lon1
dlat = lat2 - lat1
a = np.sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2
c = 2 * asin(sqrt(a))
r = 6371
return c * r * 1000
def CalculateTheScaleOfDivision(self):
EastWestSpan = self.LocalRegionBound[1] - self.LocalRegionBound[0]
NorthSouthSpan = self.LocalRegionBound[3] - self.LocalRegionBound[2]
AverageLongitude = (self.MapEastBound-self.MapWestBound)/2
AverageLatitude = (self.MapNorthBound-self.MapSouthBound)/2
self.NumGrideWidth = int(self.haversine(self.MapWestBound,AverageLatitude,self.MapEastBound,AverageLatitude) / self.SideLengthMeter + 1)
self.NumGrideHeight = int(self.haversine(AverageLongitude,self.MapSouthBound,AverageLongitude,self.MapNorthBound) / self.SideLengthMeter + 1)
self.NeighborServerDeepLimit = int((self.VehiclesServiceMeter - (0.5 * self.SideLengthMeter))//self.SideLengthMeter)
self.ClustersNumber = self.NumGrideWidth * self.NumGrideHeight
print("----------------------------")
print("Map extent",self.LocalRegionBound)
print("The width of each grid",self.SideLengthMeter,"meters")
print("Vehicle service range",self.VehiclesServiceMeter,"meters")
print("Number of grids in east-west direction",self.NumGrideWidth)
print("Number of grids in north-south direction",self.NumGrideHeight)
print("Number of grids",self.ClustersNumber)
print("----------------------------")
return
def CreateAllInstantiate(self,OrderFileDate="1101"):
print("Read all files")
self.Node,self.NodeIDList,Orders,Vehicles,self.Map = ReadAllFiles(OrderFileDate)
if self.ClusterMode != "Grid":
print("Create Clusters")
self.Clusters = self.CreateCluster()
elif self.ClusterMode == "Grid":
print("Create Grids")
self.Clusters = self.CreateGrid()
#Construct NodeID to Cluseter map for Fast calculation
NodeID = self.Node['NodeID'].values
for i in range(len(NodeID)):
NodeID[i] = self.NodeIDList.index(NodeID[i])
for i in NodeID:
for j in self.Clusters:
for k in j.Nodes:
if i == k[0]:
self.NodeID2Cluseter[i] = j
print("Create Orders set")
self.Orders = [Order(i[0],i[1],self.NodeIDList.index(i[2]),self.NodeIDList.index(i[3]),i[1]+PICKUPTIMEWINDOW,None,None,None) for i in Orders]
#Limit order generation area
#-------------------------------
if self.FocusOnLocalRegion == True:
print("Remove out-of-bounds Orders")
for i in self.Orders[:]:
if self.IsOrderInLimitRegion(i) == False:
self.Orders.remove(i)
for i in range(len(self.Orders)):
self.Orders[i].ID = i
#-------------------------------
#Calculate the value of all orders in advance
#-------------------------------
print("Pre-calculated order value")
for EachOrder in self.Orders:
EachOrder.OrderValue = self.RoadCost(EachOrder.PickupPoint,EachOrder.DeliveryPoint)
#-------------------------------
#Select number of vehicles
#-------------------------------
Vehicles = Vehicles[:self.VehiclesNumber]
#-------------------------------
print("Create Vehicles set")
self.Vehicles = [Vehicle(i[0],self.NodeIDList.index(i[1]),None,[],None) for i in Vehicles]
self.InitVehiclesIntoCluster()
return
def IsOrderInLimitRegion(self,Order):
if not Order.PickupPoint in self.NodeID2NodesLocation:
return False
if not Order.DeliveryPoint in self.NodeID2NodesLocation:
return False
return True
def IsNodeInLimitRegion(self,TempNodeList):
if TempNodeList[0][0] < self.LocalRegionBound[0] or TempNodeList[0][0] > self.LocalRegionBound[1]:
return False
elif TempNodeList[0][1] < self.LocalRegionBound[2] or TempNodeList[0][1] > self.LocalRegionBound[3]:
return False
return True
def CreateGrid(self):
NumGrideHeight = self.NumGrideHeight
NumGride = self.NumGrideWidth * self.NumGrideHeight
NodeLocation = self.Node[['Longitude','Latitude']].values.round(7)
NodeID = self.Node['NodeID'].values.astype('int64')
#Select small area simulation
#----------------------------------------------------
if self.FocusOnLocalRegion == True:
NodeLocation = NodeLocation.tolist()
NodeID = NodeID.tolist()
TempNodeList = []
for i in range(len(NodeLocation)):
TempNodeList.append((NodeLocation[i],NodeID[i]))
for i in TempNodeList[:]:
if self.IsNodeInLimitRegion(i) == False:
TempNodeList.remove(i)
NodeLocation.clear()
NodeID.clear()
for i in TempNodeList:
NodeLocation.append(i[0])
NodeID.append(i[1])
NodeLocation = np.array(NodeLocation)
#--------------------------------------------------
NodeSet = {}
for i in range(len(NodeID)):
NodeSet[(NodeLocation[i][0],NodeLocation[i][1])] = self.NodeIDList.index(NodeID[i])
#Build each grid
#------------------------------------------------------
if self.FocusOnLocalRegion == True:
TotalWidth = self.LocalRegionBound[1] - self.LocalRegionBound[0]
TotalHeight = self.LocalRegionBound[3] - self.LocalRegionBound[2]
else:
TotalWidth = self.MapEastBound - self.MapWestBound
TotalHeight = self.MapNorthBound - self.MapSouthBound
IntervalWidth = TotalWidth / self.NumGrideWidth
IntervalHeight = TotalHeight / self.NumGrideHeight
AllGrid = [Grid(i,[],[],0,[],{},[]) for i in range(NumGride)]
for key,value in NodeSet.items():
NowGridWidthNum = None
NowGridHeightNum = None
for i in range(self.NumGrideWidth):
if self.FocusOnLocalRegion == True:
LeftBound = (self.LocalRegionBound[0] + i * IntervalWidth)
RightBound = (self.LocalRegionBound[0] + (i+1) * IntervalWidth)
else:
LeftBound = (self.MapWestBound + i * IntervalWidth)
RightBound = (self.MapWestBound + (i+1) * IntervalWidth)
if key[0] > LeftBound and key[0] < RightBound:
NowGridWidthNum = i
break
for i in range(self.NumGrideHeight):
if self.FocusOnLocalRegion == True:
DownBound = (self.LocalRegionBound[2] + i * IntervalHeight)
UpBound = (self.LocalRegionBound[2] + (i+1) * IntervalHeight)
else:
DownBound = (self.MapSouthBound + i * IntervalHeight)
UpBound = (self.MapSouthBound + (i+1) * IntervalHeight)
if key[1] > DownBound and key[1] < UpBound:
NowGridHeightNum = i
break
if NowGridWidthNum == None or NowGridHeightNum == None :
print(key[0],key[1])
raise Exception('error')
else:
AllGrid[self.NumGrideWidth * NowGridHeightNum + NowGridWidthNum].Nodes.append((value,(key[0],key[1])))
#------------------------------------------------------
for i in AllGrid:
for j in i.Nodes:
self.NodeID2NodesLocation[j[0]] = j[1]
#Add neighbors to each grid
#------------------------------------------------------
for i in AllGrid:
#Bound Check
#----------------------------
UpNeighbor = True
DownNeighbor = True
LeftNeighbor = True
RightNeighbor = True
LeftUpNeighbor = True
LeftDownNeighbor = True
RightUpNeighbor = True
RightDownNeighbor = True
if i.ID >= self.NumGrideWidth * (self.NumGrideHeight - 1):
UpNeighbor = False
LeftUpNeighbor = False
RightUpNeighbor = False
if i.ID < self.NumGrideWidth:
DownNeighbor = False
LeftDownNeighbor = False
RightDownNeighbor = False
if i.ID % self.NumGrideWidth == 0:
LeftNeighbor = False
LeftUpNeighbor = False
LeftDownNeighbor = False
if (i.ID+1) % self.NumGrideWidth == 0:
RightNeighbor = False
RightUpNeighbor = False
RightDownNeighbor = False
#----------------------------
#Add all neighbors
#----------------------------
if UpNeighbor:
i.Neighbor.append(AllGrid[i.ID+self.NumGrideWidth])
if DownNeighbor:
i.Neighbor.append(AllGrid[i.ID-self.NumGrideWidth])
if LeftNeighbor:
i.Neighbor.append(AllGrid[i.ID-1])
if RightNeighbor:
i.Neighbor.append(AllGrid[i.ID+1])
if LeftUpNeighbor:
i.Neighbor.append(AllGrid[i.ID+self.NumGrideWidth-1])
if LeftDownNeighbor:
i.Neighbor.append(AllGrid[i.ID-self.NumGrideWidth-1])
if RightUpNeighbor:
i.Neighbor.append(AllGrid[i.ID+self.NumGrideWidth+1])
if RightDownNeighbor:
i.Neighbor.append(AllGrid[i.ID-self.NumGrideWidth+1])
#----------------------------
#You can draw every grid(red) and neighbor(random color) here
#----------------------------------------------
'''
for i in range(len(AllGrid)):
print("Grid ID ",i,AllGrid[i])
print(AllGrid[i].Neighbor)
self.DrawOneCluster(Cluster = AllGrid[i],random = False,show = False)
for j in AllGrid[i].Neighbor:
if j.ID == AllGrid[i].ID :
continue
print(j.ID)
self.DrawOneCluster(Cluster = j,random = True,show = False)
plt.xlim(104.007, 104.13)
plt.ylim(30.6119, 30.7092)
plt.show()
'''
#----------------------------------------------
return AllGrid
def CreateCluster(self):
NodeLocation = self.Node[['Longitude','Latitude']].values.round(7)
NodeID = self.Node['NodeID'].values.astype('int64')
#Set Nodes In Limit Region
#----------------------------------------
if self.FocusOnLocalRegion == True:
print("Remove out-of-bounds Nodes")
NodeLocation = NodeLocation.tolist()
NodeID = NodeID.tolist()
TempNodeList = []
for i in range(len(NodeLocation)):
TempNodeList.append((NodeLocation[i],NodeID[i]))
for i in TempNodeList[:]:
if self.IsNodeInLimitRegion(i) == False:
TempNodeList.remove(i)
NodeLocation.clear()
NodeID.clear()
for i in TempNodeList:
#NodeLocation.append(i[0])
NodeLocation.append(i[0])
NodeID.append(i[1])
NodeLocation = np.array(NodeLocation)
#----------------------------------------
N = {}
for i in range(len(NodeID)):
N[(NodeLocation[i][0],NodeLocation[i][1])] = NodeID[i]
Clusters=[Cluster(i,[],[],0,[],{},[]) for i in range(self.ClustersNumber)]
ClusterPath = './data/'+str(self.LocalRegionBound)+str(self.ClustersNumber)+str(self.ClusterMode)+'Clusters.csv'
if os.path.exists(ClusterPath):
reader = pd.read_csv(ClusterPath,chunksize = 1000)
label_pred = []
for chunk in reader:
label_pred.append(chunk)
label_pred = pd.concat(label_pred)
label_pred = label_pred.values
label_pred = label_pred.flatten()
label_pred = label_pred.astype('int64')
else:
raise Exception('Cluster Path not found')
#Loading Clustering results into simulator
print("Loading Clustering results")
for i in range(self.ClustersNumber):
temp = NodeLocation[label_pred == i]
for j in range(len(temp)):
Clusters[i].Nodes.append((self.NodeIDList.index(N[(temp[j,0],temp[j,1])]),(temp[j,0],temp[j,1])))
SaveClusterNeighborPath = './data/'+str(self.LocalRegionBound)+str(self.ClustersNumber)+str(self.ClusterMode)+'Neighbor.csv'
if not os.path.exists(SaveClusterNeighborPath):
print("Computing Neighbor relationships between clusters")
AllNeighborList = []
for i in Clusters:
NeighborList = []
for j in Clusters:
if i == j:
continue
else:
TempSumCost = 0
for k in i.Nodes:
for l in j.Nodes:
TempSumCost += self.RoadCost(k[0],l[0])
if (len(i.Nodes)*len(j.Nodes)) == 0:
RoadNetworkDistance = 99999
else:
RoadNetworkDistance = TempSumCost / (len(i.Nodes)*len(j.Nodes))
NeighborList.append((j,RoadNetworkDistance))
NeighborList.sort(key=lambda X: X[1])
AllNeighborList.append([])
for j in NeighborList:
AllNeighborList[-1].append((j[0].ID,j[1]))
AllNeighborList = | pd.DataFrame(AllNeighborList) | pandas.DataFrame |
################################################################################
# Module: schedule.py
# Description: Functions for handling conversion of EnergyPlus schedule objects
# License: MIT, see full license in LICENSE.txt
# Web: https://github.com/samuelduchesne/archetypal
################################################################################
import functools
import logging as lg
from datetime import datetime, timedelta
from itertools import groupby
import numpy as np
import pandas as pd
from eppy.bunch_subclass import EpBunch
from numpy import ndarray
from archetypal.energypandas import EnergySeries, plot_energyseries_map
from archetypal.utils import log
class Schedule(object):
"""An object designed to handle any EnergyPlus schedule object"""
def __init__(
self,
Name,
idf=None,
start_day_of_the_week=None,
strict=False,
base_year=2018,
schType=None,
Type=None,
Values=None,
epbunch=None,
**kwargs,
):
"""
Args:
Name (str): The schedule name in the idf model.
idf (IDF): The IDF model.
start_day_of_the_week (int): 0-based day of week (Monday=0). Default is
None which looks for the start day in the IDF model.
strict (bool): if True, schedules that have the Field-Sets such as
Holidays and CustomDay will raise an error if they are absent
from the IDF file. If False, any missing qualifiers will be
ignored.
base_year (int): The base year of the schedule. Defaults to 2018
since the first day of that year is a Monday.
schType (str): The EnergyPlus schedule type. eg.: "Schedule:Year"
Type (str): This field contains a reference to the
Schedule Type Limits object. If found in a list of Schedule Type
Limits (see above), then the restrictions from the referenced
object will be used to validate the current field values.
Values (ndarray): A 24 or 8760 list of schedule values.
epbunch (EpBunch): An EpBunch object from which this schedule can
be created.
**kwargs:
"""
try:
kwargs["idf"] = idf
Name = kwargs.pop("Name", Name)
super(Schedule, self).__init__(Name, **kwargs)
except Exception as e:
pass # todo: make this more robust
self.strict = strict
self._idf = idf
self.Name = Name
self.startDayOfTheWeek = self.get_sdow(start_day_of_the_week)
self.year = base_year
self.count = 0
self.startHOY = 1
self.endHOY = 24
self.unit = "unknown"
self.index_ = None
self._values = Values
self.schType = schType
self.Type = Type
try:
self.epbunch = epbunch or self.idf.get_schedule_epbunch(self.Name)
except KeyError:
self.epbunch = None
if self.Type is None:
self.Type = self.get_schedule_type_limits_name(sch_type=self.schType)
@property
def idf(self):
if self._idf is None:
from .idfclass.idf import IDF
self._idf = IDF()
return self._idf
@classmethod
def from_values(cls, Name, Values, idf, Type="Fraction", **kwargs):
"""
Args:
Name:
Values:
idf:
Type:
**kwargs:
"""
return cls(Name=Name, Values=Values, Type=Type, idf=idf, **kwargs)
@classmethod
def constant_schedule(
cls, hourly_value=1, Name="AlwaysOn", idf=None, Type="Fraction", **kwargs
):
"""Create a schedule with a constant value for the whole year. Defaults
to a schedule with a value of 1, named 'AlwaysOn'.
Args:
hourly_value (float, optional): The value for the constant schedule.
Defaults to 1.
Name (str, optional): The name of the schedule. Defaults to Always
On.
idf:
**kwargs:
"""
if not idf:
from archetypal import IDF
idf = IDF(prep_outputs=False)
# Add the schedule to the existing idf
epbunch = idf.anidfobject(
key="Schedule:Constant".upper(),
Name=Name,
Schedule_Type_Limits_Name=Type,
Hourly_Value=hourly_value,
)
return cls(
Name=Name,
Values=np.ones(8760) * hourly_value,
idf=idf,
epbunch=epbunch,
**kwargs,
)
@property
def all_values(self) -> np.ndarray:
"""returns the values array"""
if self._values is None:
self._values = self.get_schedule_values(self.epbunch)
return self._values
@property
def max(self):
return max(self.all_values)
@property
def min(self):
return min(self.all_values)
@property
def mean(self):
return np.mean(self.all_values)
@property
def series(self):
"""Returns the schedule values as a pd.Series object with a
DateTimeIndex
"""
index = pd.date_range(
start=self.startDate, periods=len(self.all_values), freq="1H"
)
return pd.Series(self.all_values, index=index)
def get_schedule_type_limits_name(self, sch_type=None):
"""Return the Schedule Type Limits name associated to this schedule
Args:
sch_type:
"""
if self.epbunch is None:
schedule_values = self.idf.get_schedule_epbunch(
self.Name, sch_type=sch_type
)
else:
schedule_values = self.epbunch
try:
schedule_limit_name = schedule_values.Schedule_Type_Limits_Name
except:
return "unknown"
else:
return schedule_limit_name
def get_schedule_type_limits_data(self, name=None):
"""Returns Schedule Type Limits data from schedule name
Args:
name:
"""
if name is None:
name = self.Name
schedule_values = self.epbunch
try:
schedule_limit_name = schedule_values.Schedule_Type_Limits_Name
except:
# this schedule is probably a 'Schedule:Week:Daily' which does
# not have a Schedule_Type_Limits_Name field
return "", "", "", ""
else:
(
lower_limit,
upper_limit,
numeric_type,
unit_type,
) = self.idf.get_schedule_type_limits_data_by_name(schedule_limit_name)
self.unit = unit_type
if self.unit == "unknown":
self.unit = numeric_type
return lower_limit, upper_limit, numeric_type, unit_type
def get_schedule_type(self, name=None):
"""Return the schedule type, eg.: "Schedule:Year"
Args:
name:
"""
if name is None:
name = self.Name
schedule_values = self.epbunch
sch_type = schedule_values.key
return sch_type
@property
def startDate(self):
"""The start date of the schedule. Satisfies `startDayOfTheWeek`"""
import calendar
c = calendar.Calendar(firstweekday=self.startDayOfTheWeek)
start_date = c.monthdatescalendar(self.year, 1)[0][0]
return datetime(start_date.year, start_date.month, start_date.day)
def plot(self, slice=None, **kwargs):
"""Plot the schedule. Implements the .loc accessor on the series object.
Examples:
>>> from archetypal import IDF
>>> idf = IDF()
>>> s = Schedule(
>>> Name="NECB-A-Thermostat Setpoint-Heating",
>>> idf=idf)
>>> )
>>> s.plot(slice=("2018/01/02", "2018/01/03"), drawstyle="steps-post")
Args:
slice (tuple): define a 2-tuple object the will be passed to
:class:`pandas.IndexSlice` as a range.
**kwargs (dict): keyword arguments passed to
:meth:`pandas.Series.plot`.
"""
hourlyvalues = self.all_values
index = pd.date_range(self.startDate, periods=len(hourlyvalues), freq="1H")
series = pd.Series(hourlyvalues, index=index, dtype=float)
if slice is None:
slice = pd.IndexSlice[:]
elif len(slice) > 1:
slice = pd.IndexSlice[slice[0] : slice[1]]
label = kwargs.pop("label", self.Name)
ax = series.loc[slice].plot(**kwargs, label=label)
return ax
def plot2d(self, **kwargs):
"""Plot the carpet plot of the schedule"""
return plot_energyseries_map(
EnergySeries(self.series, name=self.Name), **kwargs
)
plot2d.__doc__ = plot_energyseries_map.__doc__
def get_interval_day_ep_schedule_values(self, epbunch: EpBunch) -> np.ndarray:
"""Schedule:Day:Interval
Args:
epbunch (EpBunch): The schedule EpBunch object.
"""
(
lower_limit,
upper_limit,
numeric_type,
unit_type,
) = self.get_schedule_type_limits_data(epbunch.Name)
number_of_day_sch = int((len(epbunch.fieldvalues) - 3) / 2)
hourly_values = np.arange(24, dtype=float)
start_hour = 0
for i in range(number_of_day_sch):
value = float(epbunch["Value_Until_Time_{}".format(i + 1)])
until_time = [
int(s.strip())
for s in epbunch["Time_{}".format(i + 1)].split(":")
if s.strip().isdigit()
]
end_hour = int(until_time[0] + until_time[1] / 60)
for hour in range(start_hour, end_hour):
hourly_values[hour] = value
start_hour = end_hour
if numeric_type.strip().lower() == "discrete":
hourly_values = hourly_values.astype(int)
return hourly_values
def get_hourly_day_ep_schedule_values(self, epbunch):
"""Schedule:Day:Hourly
Args:
epbunch (EpBunch): The schedule EpBunch object.
"""
fieldvalues_ = np.array(epbunch.fieldvalues[3:])
return fieldvalues_
def get_compact_weekly_ep_schedule_values(
self, epbunch, start_date=None, index=None
) -> np.ndarray:
"""schedule:week:compact
Args:
epbunch (EpBunch): the name of the schedule
start_date:
index:
"""
if start_date is None:
start_date = self.startDate
if index is None:
idx = pd.date_range(start=start_date, periods=168, freq="1H")
slicer_ = pd.Series([False] * (len(idx)), index=idx)
else:
slicer_ = pd.Series([False] * (len(index)), index=index)
weekly_schedules = pd.Series([0] * len(slicer_), index=slicer_.index)
# update last day of schedule
if self.count == 0:
self.schType = epbunch.key
self.endHOY = 168
num_of_daily_schedules = int(len(epbunch.fieldvalues[2:]) / 2)
for i in range(num_of_daily_schedules):
day_type = epbunch["DayType_List_{}".format(i + 1)].lower()
# This field can optionally contain the prefix “For”
how = self.field_set(day_type.strip("for: "), slicer_)
if not weekly_schedules.loc[how].empty:
# Loop through days and replace with day:schedule values
days = []
for name, day in weekly_schedules.loc[how].groupby(
pd.Grouper(freq="D")
):
if not day.empty:
ref = epbunch.get_referenced_object(
"ScheduleDay_Name_{}".format(i + 1)
)
day.loc[:] = self.get_schedule_values(sched_epbunch=ref)
days.append(day)
new = pd.concat(days)
slicer_.update(pd.Series([True] * len(new.index), index=new.index))
slicer_ = slicer_.apply(lambda x: x == True)
weekly_schedules.update(new)
else:
return weekly_schedules.values
return weekly_schedules.values
def get_daily_weekly_ep_schedule_values(self, epbunch) -> np.ndarray:
"""schedule:week:daily
Args:
epbunch (EpBunch): The schedule EpBunch object.
"""
# 7 list for 7 days of the week
hourly_values = []
for day in [
"Monday",
"Tuesday",
"Wednesday",
"Thursday",
"Friday",
"Saturday",
"Sunday",
]:
ref = epbunch.get_referenced_object("{}_ScheduleDay_Name".format(day))
h = self.get_schedule_values(sched_epbunch=ref)
hourly_values.append(h)
hourly_values = np.array(hourly_values)
# shift days earlier by self.startDayOfTheWeek
hourly_values = np.roll(hourly_values, -self.startDayOfTheWeek, axis=0)
return hourly_values.ravel()
def get_list_day_ep_schedule_values(self, epbunch) -> np.ndarray:
"""schedule:day:list
Args:
epbunch (EpBunch): The schedule epbunch object.
"""
import pandas as pd
freq = int(epbunch["Minutes_per_Item"]) # Frequency of the values
num_values = epbunch.fieldvalues[5:] # List of values
method = epbunch["Interpolate_to_Timestep"] # How to resample
# fill a list of available values and pad with zeros (this is safer
# but should not occur)
all_values = np.arange(int(24 * 60 / freq))
for i in all_values:
try:
all_values[i] = num_values[i]
except:
all_values[i] = 0
# create a fake index to help us with the resampling
index = pd.date_range(
start=self.startDate, periods=(24 * 60) / freq, freq="{}T".format(freq)
)
series = pd.Series(all_values, index=index)
# resample series to hourly values and apply resampler function
series = series.resample("1H").apply(_how(method))
return series.values
def get_constant_ep_schedule_values(self, epbunch) -> np.ndarray:
"""schedule:constant
Args:
epbunch (EpBunch): The schedule epbunch object.
"""
(
lower_limit,
upper_limit,
numeric_type,
unit_type,
) = self.get_schedule_type_limits_data(epbunch.Name)
hourly_values = np.arange(8760)
value = float(epbunch["Hourly_Value"])
for hour in hourly_values:
hourly_values[hour] = value
if numeric_type.strip().lower() == "discrete":
hourly_values = hourly_values.astype(int)
return hourly_values
def get_file_ep_schedule_values(self, epbunch) -> np.ndarray:
"""schedule:file
Args:
epbunch (EpBunch): The schedule epbunch object.
"""
filename = epbunch["File_Name"]
column = epbunch["Column_Number"]
rows = epbunch["Rows_to_Skip_at_Top"]
hours = epbunch["Number_of_Hours_of_Data"]
sep = epbunch["Column_Separator"]
interp = epbunch["Interpolate_to_Timestep"]
file = self.idf.simulation_dir.files(filename)[0]
delimeter = _separator(sep)
skip_rows = int(rows) - 1 # We want to keep the column
col = [int(column) - 1] # zero-based
epbunch = pd.read_csv(
file, delimiter=delimeter, skiprows=skip_rows, usecols=col
)
return epbunch.iloc[:, 0].values
def get_compact_ep_schedule_values(self, epbunch) -> np.ndarray:
"""schedule:compact
Args:
epbunch (EpBunch): The schedule epbunch object.
"""
field_sets = ["through", "for", "interpolate", "until", "value"]
fields = epbunch.fieldvalues[3:]
index = pd.date_range(start=self.startDate, periods=8760, freq="H")
zeros = np.zeros(len(index))
slicer_ = pd.Series([False] * len(index), index=index)
series = | pd.Series(zeros, index=index) | pandas.Series |
#############################################################
#
# Test 2. regression model for a fixed input string size N, Ternary
#
#############################################################
import sys, os
sys.path.append("../..")
sys.path.append("..")
sys.path.append(os.getcwd())
import numpy as np
import pandas as pd
import copy
import pickle
from math import log, e, pi
from sklearn.linear_model import LinearRegression
from matplotlib import pyplot as plt
from utils import *
from ternary import *
def import_data():
print("*** importing data ***")
annual_pred = pd.read_pickle("annual_pred_2016.pkl")
target_players = list(annual_pred.columns)
data = pd.read_csv("../../data/nba-enhanced-stats/2012-18_playerBoxScore.csv")
game_metrics = ['playPTS', 'playAST', 'playTO','playFG%','playFT%','play3PM','playTRB','playSTL', 'playBLK']
year_metrics = ['PTS_G','AST_G','TOV_G','TRB_G','STL_G','BLK_G','3P_G','FG%','FT%']
colname_dict = {'playPTS': 'PTS_G', 'playAST': 'AST_G', 'playTO':'TOV_G',
'playFG%': 'FG%','playFT%':'FT%','play3PM':'3P_G',
'playTRB':'TRB_G','playSTL':'STL_G','playBLK':'BLK_G'}
# edit column names to fit with the yearly data
data = data.rename(columns=colname_dict)
date_col = pd.to_datetime(data.gmDate + " " + data.gmTime, format='%Y-%m-%d %H:%M').rename("date")
data = pd.concat([date_col,data], axis=1)
stats_game = data[["date","gmDate","playDispNm"]+year_metrics]
stats_game = stats_game.rename(columns={"playDispNm": "Player"})
df = | pd.read_pickle("../../data/nba-hosoi/nba_scores_2103-2018.pkl") | pandas.read_pickle |
import pandas as pd
from pandas import date_range
ave_daily_balance_keyword_lst = ['结息', '利息', '季息', '入息']
# 生成3、6、9、12月21号到25号的字符串
ms = ["%s" % x for x in range(3, 13, 3)]
ds = ["%s" % x for x in range(21, 26)]
ave_days = ["%s%s" % (x, y) for x in ms for y in ds]
def get_month(datetime_str):
month = datetime_str.month
day = datetime_str.day
month_day = "%s%s" % (month, day)
if month_day in ave_days:
return 1
else:
return 0
def ave_daily_balance_count(df):
"""
:param df: 网银流水df
:return: 日均存款余额
"""
df['transDate'] = | pd.to_datetime(df['transDate']) | pandas.to_datetime |
import glob
import matplotlib
matplotlib.use("Agg")
import bokeh.plotting as plt
from bokeh.embed import file_html
from bokeh.resources import CDN
import cherrypy
import pandas as pd
import numpy as np
class Main(object):
@cherrypy.expose
def index(self):
df = pd.concat([pd.read_csv(fname) for fname in glob.glob("/home/joerg/data/thermostat*.csv")])
df["dt_local"] = | pd.to_datetime(df.dt_local) | pandas.to_datetime |
# -*- coding: UTF-8 -*-
"""
此脚本用于展示hard margin和soft margin
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.svm import SVC
def generate_data(n):
"""
生成模型所需数据
"""
np.random.seed(2046)
X = np.r_[np.random.randn(n, 2) - [1, 1], np.random.randn(n, 2) + [3, 3]]
Y = [[0]] * n + [[1]] * n
data = np.concatenate((Y, X), axis=1)
data = | pd.DataFrame(data, columns=["y", "x1", "x2"]) | pandas.DataFrame |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
@version:
@author: zzh
@file: factor_earning_expectation.py
@time: 2019-9-19
"""
import pandas as pd
class FactorEarningExpectation():
"""
盈利预期
"""
def __init__(self):
__str__ = 'factor_earning_expectation'
self.name = '盈利预测'
self.factor_type1 = '盈利预测'
self.factor_type2 = '盈利预测'
self.description = '个股盈利预测因子'
@staticmethod
def NPFY1(tp_earning, factor_earning_expect, trade_date, dependencies=['net_profit_fy1']):
"""
:name: 一致预期净利润(FY1)
:desc: 一致预期净利润的未来第一年度的预测
:unit: 元
:view_dimension: 10000
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'net_profit_fy1': 'NPFY1'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def NPFY2(tp_earning, factor_earning_expect, trade_date, dependencies=['net_profit_fy2']):
"""
:name: 一致预期净利润(FY2)
:desc: 一致预期净利润的未来第二年度的预测
:unit: 元
:view_dimension: 10000
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'net_profit_fy2': 'NPFY2'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def EPSFY1(tp_earning, factor_earning_expect, trade_date, dependencies=['eps_fy1']):
"""
:name: 一致预期每股收益(FY1)
:desc: 一致预期每股收益未来第一年度的预测均值
:unit: 元
:view_dimension: 1
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'eps_fy1': 'EPSFY1'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def EPSFY2(tp_earning, factor_earning_expect, trade_date, dependencies=['eps_fy2']):
"""
:name: 一致预期每股收益(FY2)
:desc: 一致预期每股收益未来第二年度的预测均值
:unit: 元
:view_dimension: 1
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'eps_fy2': 'EPSFY2'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def OptIncFY1(tp_earning, factor_earning_expect, trade_date, dependencies=['operating_revenue_fy1']):
"""
:name: 一致预期营业收入(FY1)
:desc: 一致预期营业收入未来第一年度的预测均值
:unit: 元
:view_dimension: 10000
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'operating_revenue_fy1': 'OptIncFY1'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def OptIncFY2(tp_earning, factor_earning_expect, trade_date, dependencies=['operating_revenue_fy2']):
"""
:name: 一致预期营业收入(FY2)
:desc: 一致预期营业收入未来第二年度的预测均值
:unit: 元
:view_dimension: 10000
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'operating_revenue_fy2': 'OptIncFY2'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def CEPEFY1(tp_earning, factor_earning_expect, trade_date, dependencies=['pe_fy1']):
"""
:name: 一致预期市盈率(PE)(FY1)
:desc: 一致预期市盈率未来第一年度的预测均值
:unit: 倍
:view_dimension: 1
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'pe_fy1': 'CEPEFY1'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def CEPEFY2(tp_earning, factor_earning_expect, trade_date, dependencies=['pe_fy2']):
"""
:name: 一致预期市盈率(PE)(FY2)
:desc: 一致预期市盈率未来第二年度的预测均值
:unit: 倍
:view_dimension: 1
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'pe_fy2': 'CEPEFY2'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def CEPBFY1(tp_earning, factor_earning_expect, trade_date, dependencies=['pb_fy1']):
"""
:name: 一致预期市净率(PB)(FY1)
:desc: 一致预期市净率未来第一年度的预测均值
:unit: 倍
:view_dimension: 1
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'pb_fy1': 'CEPBFY1'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def CEPBFY2(tp_earning, factor_earning_expect, trade_date, dependencies=['pb_fy2']):
"""
:name: 一致预期市净率(PB)(FY2)
:desc: 一致预期市净率未来第二年度的预测均值
:unit: 倍
:view_dimension: 1
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'pb_fy2': 'CEPBFY2'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def CEPEGFY1(tp_earning, factor_earning_expect, trade_date, dependencies=['peg_fy1']):
"""
:name: 市盈率相对盈利增长比率(FY1)
:desc: 未来第一年度市盈率相对盈利增长比率
:unit:
:view_dimension: 0.01
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'peg_fy1': 'CEPEGFY1'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def CEPEGFY2(tp_earning, factor_earning_expect, trade_date, dependencies=['peg_fy2']):
"""
:name: 市盈率相对盈利增长比率(FY2)
:desc: 未来第二年度市盈率相对盈利增长比率
:unit:
:view_dimension: 0.01
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'peg_fy2': 'CEPEGFY2'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def _change_rate(tp_earning, trade_date, pre_trade_date, colunm, factor_name):
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, colunm]
earning_expect_pre = tp_earning[tp_earning['publish_date'] == pre_trade_date].loc[:, colunm]
earning_expect = pd.merge(earning_expect, earning_expect_pre, on='security_code', how='left')
earning_expect[factor_name] = (earning_expect[colunm + '_x'] - earning_expect[colunm + '_y']) / \
earning_expect[colunm + '_y']
earning_expect.drop(columns=[colunm + '_x', colunm + '_y'], inplace=True)
return earning_expect
@staticmethod
def _change_value(tp_earning, trade_date, pre_trade_date, colunm, factor_name):
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, colunm]
earning_expect_pre = tp_earning[tp_earning['publish_date'] == pre_trade_date].loc[:, colunm]
earning_expect = pd.merge(earning_expect, earning_expect_pre, on='security_code', how='left')
earning_expect[factor_name] = (earning_expect[colunm + '_x'] - earning_expect[colunm + '_y'])
earning_expect.drop(columns=[colunm + '_x', colunm + '_y'], inplace=True)
return earning_expect
@staticmethod
def NPFY11WRT(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测净利润(FY1)变化率_一周
:desc: 未来第一年度一致预测净利润一周内预测值变化率
:unit:
:view_dimension: 0.01
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 2:
earning_expect = FactorEarningExpectation._change_rate(tp_earning, trade_date, trade_dates[1],
'net_profit_fy1',
'NPFY11WRT')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def NPFY11MRT(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测净利润(FY1)变化率_一月
:desc: 未来第一年度一致预测净利润一月内预测值变化率
:unit:
:view_dimension: 0.01
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 3:
earning_expect = FactorEarningExpectation._change_rate(tp_earning, trade_date, trade_dates[2],
'net_profit_fy1',
'NPFY11MRT')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def NPFY13MRT(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测净利润(FY1)变化率_三月
:desc: 未来第一年度一致预测净利润三月内预测值变化率
:unit:
:view_dimension: 0.01
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 4:
earning_expect = FactorEarningExpectation._change_rate(tp_earning, trade_date, trade_dates[3],
'net_profit_fy1',
'NPFY13MRT')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def NPFY16MRT(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测净利润(FY1)变化率_六月
:desc: 未来第一年度一致预测净利润六月内预测值变化率
:unit:
:view_dimension: 0.01
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 5:
earning_expect = FactorEarningExpectation._change_rate(tp_earning, trade_date, trade_dates[4],
'net_profit_fy1',
'NPFY16MRT')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def EPSFY11WChg(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测每股收益(FY1)变化_一周
:desc: 未来第一年度一致预测每股收益一周内预测值变化
:unit: 元
:view_dimension: 1
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 2:
earning_expect = FactorEarningExpectation._change_value(tp_earning, trade_date, trade_dates[1],
'eps_fy1',
'EPSFY11WChg')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def EPSFY11MChg(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测每股收益(FY1)变化_一月
:desc: 未来第一年度一致预测每股收益一月内预测值变化
:unit: 元
:view_dimension: 1
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 3:
earning_expect = FactorEarningExpectation._change_value(tp_earning, trade_date, trade_dates[2],
'eps_fy1',
'EPSFY11MChg')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def EPSFY13MChg(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测每股收益(FY1)变化_三月
:desc: 未来第一年度一致预测每股收益三月内预测值变化
:unit: 元
:view_dimension: 1
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 4:
earning_expect = FactorEarningExpectation._change_value(tp_earning, trade_date, trade_dates[3],
'eps_fy1',
'EPSFY13MChg')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def EPSFY16MChg(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测每股收益(FY1)变化_六月
:desc: 未来第一年度一致预测每股收益六月内预测值变化
:unit: 元
:view_dimension: 1
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 5:
earning_expect = FactorEarningExpectation._change_value(tp_earning, trade_date, trade_dates[4],
'eps_fy1',
'EPSFY16MChg')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def EPSFY11WRT(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测每股收益(FY1)变化率_一周
:desc: 未来第一年度一致预测每股收益一周内预测值变化率
:unit:
:view_dimension: 0.01
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 2:
earning_expect = FactorEarningExpectation._change_value(tp_earning, trade_date, trade_dates[1],
'eps_fy1',
'EPSFY11WRT')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def EPSFY11MRT(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测每股收益(FY1)变化率_一月
:desc: 未来第一年度一致预测每股收益一月内预测值变化率
:unit:
:view_dimension: 0.01
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 3:
earning_expect = FactorEarningExpectation._change_rate(tp_earning, trade_date, trade_dates[2],
'eps_fy1',
'EPSFY11MRT')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def EPSFY13MRT(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测每股收益(FY1)变化率_三月
:desc: 未来第一年度一致预测每股收益三月内预测值变化率
:unit:
:view_dimension: 0.01
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 4:
earning_expect = FactorEarningExpectation._change_rate(tp_earning, trade_date, trade_dates[3],
'eps_fy1',
'EPSFY13MRT')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def EPSFY16MRT(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测每股收益(FY1)变化率_六月
:desc: 未来第一年度一致预测每股收益六月内预测值变化率
:unit:
:view_dimension: 0.01
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 5:
earning_expect = FactorEarningExpectation._change_rate(tp_earning, trade_date, trade_dates[4],
'eps_fy1',
'EPSFY16MRT')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def NPFY11WChg(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测净利润(FY1)变化_一周
:desc: 未来第一年度一致预测净利润一周内预测值变化
:unit: 元
:view_dimension: 10000
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 2:
earning_expect = FactorEarningExpectation._change_value(tp_earning, trade_date, trade_dates[1],
'net_profit_fy1',
'NPFY11WChg')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def NPFY11MChg(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测净利润(FY1)变化_一月
:desc: 未来第一年度一致预测净利润一月内预测值变化
:unit: 元
:view_dimension: 10000
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 3:
earning_expect = FactorEarningExpectation._change_value(tp_earning, trade_date, trade_dates[2],
'net_profit_fy1',
'NPFY11MChg')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def NPFY13MChg(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测净利润(FY1)变化_三月
:desc: 未来第一年度一致预测净利润三月内预测值变化
:unit: 元
:view_dimension: 10000
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 4:
earning_expect = FactorEarningExpectation._change_value(tp_earning, trade_date, trade_dates[3],
'net_profit_fy1',
'NPFY13MChg')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def NPFY16MChg(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测净利润(FY1)变化_六月
:desc: 未来第一年度一致预测净利润六月内预测值变化
:unit: 元
:view_dimension: 10000
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 5:
earning_expect = FactorEarningExpectation._change_value(tp_earning, trade_date, trade_dates[4],
'net_profit_fy1',
'NPFY16MChg')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def ChgNPFY1FY2(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测净利润(FY2)与一致预期净利润(FY1)的变化率
:desc: 未来第二年度一致预测净利润与未来第一年度一致预测净利润变化率
:unit:
:view_dimension: 0.01
"""
factor_earning_expect['ChgNPFY1FY2'] = factor_earning_expect['NPFY2'] - factor_earning_expect['NPFY1'] / abs(
factor_earning_expect['NPFY1']) * 100
return factor_earning_expect
@staticmethod
def ChgEPSFY1FY2(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测每股收益(FY2)与一致预期每股收益(FY1)的变化率
:desc: 未来第二年度一致预测每股收益与未来第一年度一致预测每股收益变化率
:unit:
:view_dimension: 0.01
"""
factor_earning_expect['ChgEPSFY1FY2'] = factor_earning_expect['EPSFY2'] - factor_earning_expect['EPSFY1'] / abs(
factor_earning_expect['EPSFY1']) * 100
return factor_earning_expect
@staticmethod
def OptIncFY11WRT(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测营业收入(FY1)变化_一周
:desc: 未来第一年度一致预测营业收入一周内预测值变化
:unit: 元
:view_dimension: 10000
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 2:
earning_expect = FactorEarningExpectation._change_value(tp_earning, trade_date, trade_dates[1],
'operating_revenue_fy1',
'OptIncFY11WRT')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def OptIncFY11MRT(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测营业收入(FY1)变化_一月
:desc: 未来第一年度一致预测营业收入一月内预测值变化
:unit: 元
:view_dimension: 10000
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 3:
earning_expect = FactorEarningExpectation._change_value(tp_earning, trade_date, trade_dates[2],
'operating_revenue_fy1',
'OptIncFY11MRT')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def OptIncFY13MRT(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测营业收入(FY1)变化_三月
:desc: 未来第一年度一致预测营业收入三月内预测值变化
:unit: 元
:view_dimension: 10000
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 4:
earning_expect = FactorEarningExpectation._change_value(tp_earning, trade_date, trade_dates[3],
'operating_revenue_fy1',
'OptIncFY13MRT')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def OptIncFY16MRT(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测营业收入(FY1)变化_六月
:desc: 未来第一年度一致预测营业收入六月内预测值变化
:unit: 元
:view_dimension: 10000
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 5:
earning_expect = FactorEarningExpectation._change_value(tp_earning, trade_date, trade_dates[4],
'operating_revenue_fy1',
'OptIncFY16MRT')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def OptIncFY11WChg(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测营业收入(FY1)变化率_一周
:desc: 未来第一年度一致预测营业收入一周内预测值变化率
:unit:
:view_dimension: 0.01
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 2:
earning_expect = FactorEarningExpectation._change_rate(tp_earning, trade_date, trade_dates[1],
'operating_revenue_fy1',
'OptIncFY11WChg')
factor_earning_expect = | pd.merge(factor_earning_expect, earning_expect, on='security_code') | pandas.merge |
# -*- coding: utf-8 -*-
import logging
import os
import xml.etree.ElementTree as ET
from urllib.request import urlretrieve
import pandas as pd
from .constants import TRAIN_DATA_FILE_PATH, TRAIN_DATA_URL, TEST_DATA_FILE_PATH, TEST_DATA_URL, col_names
log = logging.getLogger(__name__)
def download_scai_mirna_corpora(force_download=False):
"""Download the scai-mirna-corpora as XMLs
:param bool force_download: If true, forces the data to get downloaded again; defaults to False
:return: The system file path of the downloaded files
:rtype: str
"""
# Load training set
log.info("Load training data")
if os.path.exists(TRAIN_DATA_FILE_PATH) and not force_download:
log.info('using cached data at %s', TRAIN_DATA_FILE_PATH)
else:
log.info('downloading %s to %s', TRAIN_DATA_URL, TRAIN_DATA_FILE_PATH)
urlretrieve(TRAIN_DATA_URL, TRAIN_DATA_FILE_PATH)
# Load test set
log.info("Load test data")
if os.path.exists(TEST_DATA_FILE_PATH) and not force_download:
log.info('using cached data at %s', TEST_DATA_FILE_PATH)
else:
log.info('downloading %s to %s', TEST_DATA_URL, TEST_DATA_FILE_PATH)
urlretrieve(TEST_DATA_URL, TEST_DATA_FILE_PATH)
return TRAIN_DATA_FILE_PATH, TEST_DATA_FILE_PATH
def get_scai_mirna_dfs(url=None, cache=True, force_download=False):
"""Loads the pairs annotated in the training and test set
1) PubMed ID
2) Pair ID
3) Entity-1 term
4) Entity-1 type
5) Entity-1 offset
6) Entity-2 term
7) Entity-2 type"
8) Entity-2 offset
9) Sentence
10) Interaction
11) Interaction type
:param Optional[str] train_url: A custom path to use for data
:param bool cache: If true, the data is downloaded to the file system, else it is loaded from the internet
:param bool force_download: If true, overwrites a previously cached file
:rtype: pandas.DataFrame
"""
if url is None and cache:
train_url, test_url = download_scai_mirna_corpora(force_download=force_download)
# Create data frame for the training set
training_df = create_dataframe_of_pairs(url=train_url)
# Create data frame for the test set
test_df = create_dataframe_of_pairs(url=test_url)
merged_df = | pd.concat([training_df, test_df]) | pandas.concat |
import argparse
import sys
import os
from pathlib import Path
import logging
from typing import Dict
import numpy as np
import pandas as pd
import scipy.sparse as sp
from joblib import dump
from knodle.trainer.utils import log_section
from examples.data_preprocessing.tac_based_dataset.utils.utils import count_file_lines, encode_labels
logger = logging.getLogger(__name__)
PRINT_EVERY = 1000000
Z_MATRIX_OUTPUT_TRAIN = "train_rule_matches_z.lib"
Z_MATRIX_OUTPUT_DEV = "dev_rule_matches_z.lib"
Z_MATRIX_OUTPUT_TEST = "test_rule_matches_z.lib"
T_MATRIX_OUTPUT_TRAIN = "mapping_rules_labels.lib"
TRAIN_SAMPLES_OUTPUT = "df_train.lib"
DEV_SAMPLES_OUTPUT = "df_dev.lib"
TEST_SAMPLES_OUTPUT = "df_test.lib"
def preprocess_data(
path_train_data: str,
path_dev_data: str,
path_test_data: str,
path_labels: str,
path_lfs: str,
path_output: str
) -> None:
""" This function reads train and dev data and saved resulted files to output directory"""
Path(path_output).mkdir(parents=True, exist_ok=True)
labels2ids = get_labels(path_labels)
num_labels = len(labels2ids)
other_class_id = max(labels2ids.values()) + 1 # used for dev and test sets
lfs = pd.read_csv(path_lfs)
rule2rule_id = dict(zip(lfs.rule, lfs.rule_id))
num_lfs = max(lfs.rule_id.values) + 1
rule_assignments_t = get_t_matrix(lfs, num_labels)
dump(sp.csr_matrix(rule_assignments_t), os.path.join(path_output, T_MATRIX_OUTPUT_TRAIN))
get_train_data(
path_train_data,
path_output,
rule2rule_id,
num_lfs,
Z_MATRIX_OUTPUT_TRAIN,
TRAIN_SAMPLES_OUTPUT
)
get_dev_test_data(
path_dev_data,
path_output,
labels2ids,
rule2rule_id,
num_lfs,
Z_MATRIX_OUTPUT_DEV,
DEV_SAMPLES_OUTPUT,
other_class_id
)
get_dev_test_data(
path_test_data,
path_output,
labels2ids,
rule2rule_id,
num_lfs,
Z_MATRIX_OUTPUT_TEST,
TEST_SAMPLES_OUTPUT,
other_class_id
)
def get_labels(path_labels: str) -> Dict:
""" Reads the labels from the file and encode them with ids """
relation2ids = {}
with open(path_labels, encoding="UTF-8") as file:
for line in file.readlines():
relation, relation_enc = line.replace("\n", "").split(",")
relation2ids[relation] = int(relation_enc)
return relation2ids
def get_train_data(
path_train_data: str, path_output: str, rule2rule_id: Dict, num_lfs: int, z_matrix: str, samples: str
) -> None:
""" Processes the train data and saves t_matrix, z_matrix and training set info to output directory. """
log_section("Processing of train data has started", logger)
train_data = annotate_conll_data_with_lfs(path_train_data, rule2rule_id, False)
rule_matches_z = get_z_matrix(train_data, num_lfs)
dump(train_data, os.path.join(path_output, samples))
dump(rule_matches_z, os.path.join(path_output, z_matrix))
logger.info("Processing of train data has finished")
def get_dev_test_data(
path_data: str, path_output: str, labels2ids: dict, rule2rule_id: Dict, num_lfs: int, z_matrix: str,
samples: str, other_class_id: int
) -> None:
"""
This function processes the development data and save it as DataFrame with samples as row text and gold labels
(encoded with ids) to output directory. Additionally it saved z matrix for testing purposes.
"""
log_section("Processing of eval data has started", logger)
val_data = get_conll_data_with_ent_pairs(path_data, rule2rule_id, labels2ids, other_class_id)
rule_matches_z = get_z_matrix(val_data, num_lfs)
dump(rule_matches_z, os.path.join(path_output, z_matrix))
dump(val_data, os.path.join(path_output, samples))
logger.info("Processing of eval data has finished")
def annotate_conll_data_with_lfs(conll_data: str, rule2rule_id: Dict, filter_out_other: bool = True) -> pd.DataFrame:
num_lines = count_file_lines(conll_data)
processed_lines = 0
samples, rules, enc_rules = [], [], []
with open(conll_data, encoding='utf-8') as f:
for line in f:
processed_lines += 1
line = line.strip()
if line.startswith("# id="): # Instance starts
sample = ""
subj, obj = {}, {}
elif line == "": # Instance ends
if len(list(subj.keys())) == 0 or len(list(obj.keys())) == 0:
continue
if min(list(subj.keys())) < min(list(obj.keys())):
rule = "_".join(list(subj.values())) + " " + "_".join(list(obj.values()))
else:
rule = "_".join(list(subj.values())) + " " + "_".join(list(obj.values()))
if rule in rule2rule_id.keys():
samples.append(sample.lstrip())
rules.append(rule)
rule_id = rule2rule_id[rule]
enc_rules.append(rule_id)
elif not filter_out_other:
samples.append(sample.lstrip())
rules.append(None)
enc_rules.append(None)
else:
continue
elif line.startswith("#"): # comment
continue
else:
splitted_line = line.split("\t")
token = splitted_line[1]
if splitted_line[2] == "SUBJECT":
subj[splitted_line[0]] = token
sample += " " + token
elif splitted_line[4] == "OBJECT":
obj[splitted_line[0]] = token
sample += " " + token
else:
if (bool(subj) and not bool(obj)) or (not bool(subj) and bool(obj)):
sample += " " + token
if processed_lines % PRINT_EVERY == 0:
logger.info("Processed {:0.2f}% of {} file".format(100 * processed_lines / num_lines,
conll_data.split("/")[-1]))
return | pd.DataFrame.from_dict({"sample": samples, "rules": rules, "enc_rules": enc_rules}) | pandas.DataFrame.from_dict |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import os
from scipy import integrate, stats
from numpy import absolute, mean
from pandas import DataFrame
from itertools import islice
import researchpy as rp
import seaborn as sns
import statsmodels.api as sm
from statsmodels.formula.api import ols
import statsmodels.stats.multicomp
#####################################################################################################
# Before beginning: make sure the file name matches the version of experiment you want to analyze.
# Ensure the output .csv's reflect the desired version as well.
headers = [
'participant_id',
'block',
'type',
'occurence',
'switch_type',
'accuracy',
]
df_accuracy = pd.read_csv(r'C:\Users\danie\Documents\SURREY\Project_1\task_switching_paradigm\pilot3_withoccurence.csv', usecols = headers)
df_accuracy1 = pd.DataFrame()
df_accuracy2 = pd.DataFrame()
# OVERALL % ACCURACY
df_accuracy.set_index(['participant_id', 'type', 'block', 'occurence'], inplace = True)
for group_i, group_v in df_accuracy.groupby(level=[0, 1, 2, 3]):
number_of_trials = 0
overall_accuracy = 0
for index, row in group_v.iterrows():
number_of_trials = number_of_trials + 1
overall_accuracy = overall_accuracy + row['accuracy']
j = ((overall_accuracy/number_of_trials) * 100)
group_v.at[index, 'overall_percent_acc'] = j
group_v.reset_index(drop = False, inplace = True)
df_accuracy1 = | pd.concat([df_accuracy1, group_v]) | pandas.concat |
"""PD hate crimes _jobs file."""
import glob
import os
import csv
import string
import logging
import pandas as pd
from datetime import datetime
from trident.util import general
conf = general.config
prod_file = f"{conf['prod_data_dir']}/hate_crimes_datasd.csv"
def get_data():
"""Download Hate Crimes data from FTP."""
wget_str = "wget -np --continue " \
+ "--user=$ftp_user " \
+ "--password='<PASSWORD>' " \
+ "--directory-prefix=$temp_dir " \
+ "ftp://ftp.datasd.org/uploads/sdpd/" \
+ "Hate_Crimes/Hate_Crimes_Data_Portal_SDPD_*.xlsx"
tmpl = string.Template(wget_str)
command = tmpl.substitute(
ftp_user=conf['ftp_datasd_user'],
ftp_pass=conf['ftp_datasd_pass'],
temp_dir=conf['temp_data_dir']
)
return command
def process_data():
"""Process hate crimes data."""
filename = conf['temp_data_dir'] + "/Hate_Crimes_Data_Portal_SDPD_*.xlsx"
list_of_files = glob.glob(filename)
latest_file = max(list_of_files, key=os.path.getmtime)
logging.info(f"Reading in {latest_file}")
df = pd.read_excel(latest_file,sheet_name='hate_crimes_datasd')
df['date'] = | pd.to_datetime(df['date'],errors='coerce') | pandas.to_datetime |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.