prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# Copyright 2020-2020, <NAME>;
# Licensed under the terms of the Apache License, Version 2.0. See the LICENSE file associated with the project for terms.
"""
Test utilities for :term:`json pointer path` modifier.
Copied from pypi/pandalone.
"""
from copy import deepcopy
from random import shuffle
import pandas as pd
import pytest
from graphtik.jsonpointer import (
ResolveError,
escape_jsonpointer_part,
jsonp_path,
pop_path,
resolve_path,
set_path_value,
unescape_jsonpointer_part,
update_paths,
)
pytestmark = pytest.mark.usefixtures("log_levels")
def test_jsonpointer_escape_parts():
def un_esc(part):
return unescape_jsonpointer_part(escape_jsonpointer_part(part))
part = "hi/there"
assert un_esc(part) == part
part = "hi~there"
assert un_esc(part) == part
part = "/hi~there/"
assert un_esc(part) == part
def test_jsonp_path_empty():
assert jsonp_path("") == []
def test_jsonp_path_root():
assert jsonp_path("/") == [""]
def test_jsonp_path_regular():
assert jsonp_path("/a") == ["", "a"]
assert jsonp_path("/a/b") == ["", "a", "b"]
def test_jsonp_path_folder():
assert jsonp_path("/a/") == [""]
def test_jsonp_path_None():
with pytest.raises(TypeError):
jsonp_path(None)
def test_jsonp_path_with_spaces():
assert jsonp_path("/ some ") == ["", " some "]
assert jsonp_path("/ some / ") == ["", " some ", " "]
assert jsonp_path(" some ") == [" some "]
assert jsonp_path(" some / ") == [" some ", " "]
def test_jsonp_path_cached():
class C(str):
pass
p = C("a/b")
assert jsonp_path(p) == ["a", "b"]
p._jsonp = False
assert jsonp_path(p) == [p]
assert p._jsonp == False
p._jsonp = None
assert jsonp_path(p) == ["a", "b"]
assert p._jsonp == None
@pytest.mark.parametrize(
"inp, exp",
[
("/a", ["a"]),
("/a/", ["a", ""]),
("/a/b", ["a", "b"]),
("/a/b/", ["a", "b", ""]),
("/a//b", ["a", "", "b"]),
("/a/../b", ["a", "..", "b"]),
("/", [""]),
("/ some ", [" some "]),
("/ some /", [" some ", ""]),
("/ some / ", [" some ", " "]),
("/ some / /", [" some ", " ", ""]),
(None, AttributeError()),
("a", ValueError()),
],
)
def test_jsonp_path_massive(inp, exp):
if isinstance(exp, Exception):
with pytest.raises(type(exp), match=str(exp)):
jsonp_path(inp)
else:
assert jsonp_path(inp) == exp
@pytest.mark.parametrize(
"inp, exp",
[
("/a", ["", "a"]),
("/a/", [""]),
("/a/b", ["", "a", "b"]),
("/a/b/", [""]),
("/a//b", ["", "b"]),
("/", [""]),
("/ some ", ["", " some "]),
("/ some /", [""]),
("/ some / ", ["", " some ", " "]),
(None, TypeError()),
("a", ["a"]),
("a/", [""]),
("a/b", ["a", "b"]),
("a/b/", [""]),
("a/../b/.", ["a", "..", "b", "."]),
("a/../b/.", ["a", "..", "b", "."]),
(" some ", [" some "]),
(" some / ", [" some ", " "]),
],
)
def test_jsonp_path_massive(inp, exp):
if isinstance(exp, Exception):
with pytest.raises(type(exp), match=str(exp)):
jsonp_path(inp)
else:
assert jsonp_path(inp) == exp
@pytest.mark.parametrize(
"inp, exp",
[
("/foo", 1),
("/bar/0", 11),
("/bar/1/a", 222),
("/bar/1/a", 222),
],
)
def test_resolve_simple(inp, exp):
doc = {"foo": 1, "bar": [11, {"a": 222}]}
assert resolve_path(doc, inp) == exp
def test_resolve_path_sequence():
doc = [1, [22, 33]]
path = "/0"
assert resolve_path(doc, path) == 1
path = "/1"
assert resolve_path(doc, path) == [22, 33]
path = "/1/0"
assert resolve_path(doc, path) == 22
path = "/1/1"
assert resolve_path(doc, path) == 33
def test_resolve_path_missing_screams():
doc = {}
path = "/foo"
with pytest.raises(ResolveError):
resolve_path(doc, path)
def test_resolve_path_empty_path():
doc = {}
path = ""
assert resolve_path(doc, path) == doc
doc = {"foo": 1}
assert resolve_path(doc, path) == doc
@pytest.fixture
def std_doc():
"""From https://tools.ietf.org/html/rfc6901#section-5 """
return {
r"foo": ["bar", r"baz"],
r"": 0,
r"a/b": 1,
r"c%d": 2,
r"e^f": 3,
r"g|h": 4,
r"i\\j": 5,
r"k\"l": 6,
r" ": 7,
r"m~n": 8,
}
@pytest.fixture(
params=[
(r"", ...),
(r"/foo", ["bar", "baz"]),
(r"/foo/0", "bar"),
# (r"/", 0), #resolve_path() resolves '/' to root (instead of to '' key)"
(r"/", ...),
(r"/a~1b", 1),
(r"/c%d", 2),
(r"/e^f", 3),
(r"/g|h", 4),
(r"/i\\j", 5),
(r"/k\"l", 6),
(r"/ ", 7),
(r"/m~0n", 8),
]
)
def std_case(std_doc, request):
"""From https://tools.ietf.org/html/rfc6901#section-5 """
path, exp = request.param
if exp is ...:
exp = deepcopy(std_doc)
return path, exp
def test_resolve_path_examples_from_spec(std_doc, std_case):
path, exp = std_case
assert resolve_path(std_doc, path) == exp
def test_resolve_root_path_only():
doc = {}
path = "/"
assert resolve_path(doc, path) == doc
doc = {"foo": 1}
assert resolve_path(doc, path) == doc
doc = {"": 1}
assert resolve_path(doc, path) == doc
@pytest.mark.parametrize(
"inp, exp",
[
("/", ...),
("//", ...),
("///", ...),
("/bar//", ...),
("/bar/1/", ...),
("/foo//", ...),
("/bar/1//foo", 1),
("/bar/1//foo/", ...),
("/foo//bar/1/a", 222),
],
)
def test_resolve_path_re_root(inp, exp):
doc = {"foo": 1, "bar": [11, {"a": 222}]}
assert resolve_path(doc, inp) == doc if exp is ... else exp
def test_set_path_empty_doc():
doc = {}
path = "/foo"
value = "value"
set_path_value(doc, path, value)
assert resolve_path(doc, path) == value
assert doc == {"foo": "value"}
doc = {}
path = "/foo/bar"
value = "value"
set_path_value(doc, path, value)
assert resolve_path(doc, path) == value
@pytest.mark.parametrize("path", ["", "/"])
def test_set_path_root_nothing(path):
"""Changing root simply don't work."""
doc = {"a": 1}
exp = doc.copy()
set_path_value(doc, path, 1)
assert doc == exp
set_path_value(doc, path, {"b": 2})
assert doc == exp
def test_set_path_replace_value():
doc = {"foo": "bar", 1: 2}
path = "/foo"
value = "value"
set_path_value(doc, path, value)
assert resolve_path(doc, path) == value
assert doc[1] == 2
doc = {"foo": 1, 1: 2}
path = "/foo"
value = "value"
set_path_value(doc, path, value)
assert resolve_path(doc, path) == value
assert doc[1] == 2
doc = {"foo": {"bar": 1}, 1: 2}
path = "/foo"
value = 2
set_path_value(doc, path, value)
assert resolve_path(doc, path) == value
assert doc[1] == 2
def test_set_path_deepen_map_str_value():
doc = {"foo": "bar", 1: 2}
path = "/foo/bar"
value = "value"
set_path_value(doc, path, value)
assert resolve_path(doc, path) == value
assert doc[1] == 2
doc = {"foo": "bar", 1: 2}
path = "/foo/bar/some/other"
value = "value"
set_path_value(doc, path, value)
assert resolve_path(doc, path) == value
assert doc[1] == 2
def test_set_path_append_path_preserves_intermediate():
doc = {"foo": {"bar": 1}, 1: 2}
path = "/foo/foo2"
value = "value"
set_path_value(doc, path, value)
print(doc)
assert resolve_path(doc, path) == value
assert doc[1] == 2
assert resolve_path(doc, "/foo/bar") == 1
def test_set_path_deepen_map_int_value():
doc = {"foo": 1, 1: 2}
path = "/foo/bar"
value = "value"
set_path_value(doc, path, value)
assert resolve_path(doc, path) == value
assert doc[1] == 2
doc = {"foo": 1, 1: 2}
path = "/foo/bar/some/other"
value = "value"
set_path_value(doc, path, value)
assert resolve_path(doc, path) == value
assert doc[1] == 2
def test_set_path_deepen_sequence_scalar_item():
doc = [1, 2]
path = "/1"
value = "value"
set_path_value(doc, path, value)
assert resolve_path(doc, path) == value
doc = [1, 2]
path = "/1/foo/bar"
value = "value"
set_path_value(doc, path, value)
assert resolve_path(doc, path) == value
@pytest.mark.xfail(reason="Use dash(-) instead to append lists")
def test_set_path_sequence_insert_end():
doc = [0, 1]
path = "/2"
value = "value"
set_path_value(doc, path, value)
assert resolve_path(doc, path) == value
assert resolve_path(doc, "/0") == 0
assert resolve_path(doc, "/1") == 1
def test_set_path_sequence_tail_dash():
doc = [0, 1]
path = "/-"
value = "value"
set_path_value(doc, path, value)
assert doc == [0, 1, "value"]
def test_set_path_sequence_out_of_bounds():
doc = [0, 1]
path = "/3"
value = "value"
with pytest.raises(ValueError):
set_path_value(doc, path, value)
def test_set_path_sequence_with_str_screams():
doc = [0, 1]
path = "/str"
value = "value"
with pytest.raises(ValueError):
set_path_value(doc, path, value)
def _mutate_df(df):
col = df.columns[0]
return (df * 2).rename({col: 2 * col}, axis=1)
def check_dfs_eq(got, exp):
assert (got.fillna(0) == exp.fillna(0)).all(axis=None)
assert got.index.names == exp.index.names
assert got.columns.names == exp.columns.names
@pytest.mark.parametrize("path", ["", "/"])
def test_set_path_df_root_nothing(path):
"""Changing root simply don't work."""
doc = pd.DataFrame({"A": [1, 2]})
exp = doc.copy()
set_path_value(doc, path, 7, concat_axis=1)
check_dfs_eq(doc, exp)
set_path_value(doc, path, _mutate_df(doc), concat_axis=0)
check_dfs_eq(doc, exp)
set_path_value(doc, path, _mutate_df(doc), concat_axis=1)
check_dfs_eq(doc, exp)
@pytest.fixture(params=["a", "/a"])
def root_df_paths(request):
return request.param
def test_set_path_df_root_scream(root_df_paths):
path = root_df_paths
doc = pd.DataFrame({"A": [1, 2]})
with pytest.raises(ValueError, match="^Cannot modify given doc/root"):
set_path_value(doc, path, _mutate_df(doc), concat_axis=0)
with pytest.raises(ValueError, match="^Cannot modify given doc/root"):
set_path_value(doc, path, _mutate_df(doc), concat_axis=1)
def test_set_path_df_concat_ok():
df = pd.DataFrame({"A": [1, 2]})
orig_doc = {"a": df}
val = _mutate_df(df)
doc = orig_doc.copy()
path = "a/Hf"
set_path_value(doc, path, val, concat_axis=1)
got = doc["a"]
exp = | pd.concat((df, val), axis=1) | pandas.concat |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 28 09:13:14 2021
@author: sudhir
"""
# =============================================================================
# Import library
# =============================================================================
import os
import pandas as pd
import numpy as np
import joblib
from .skmetrics import regression_result
from . import dispatcher
MODELS_FOLDER = "models"
FEATURE_PIPELINE = "pipeline_X.pkl"
TARGET_PIPELINE = "pipeline_target.pkl"
TEST_DATA = "input/test_folds.csv"
# =============================================================================
# Predict
# =============================================================================
class MultiRegressionPredict:
"""
Multi Regression Predict
"""
def __init__(self, Id, kfold, drop_cols, feature_pipeline_pkl, **kwargs):
self.Id = Id
self.kfold = kfold
self.drop_cols = drop_cols
self.feature_pipeline_pkl = feature_pipeline_pkl
self.models_folder = "models"
def inverse_transform_y(self, pred, target_col):
# pipe y
file = f"{self.models_folder}/pipeline_{target_col}.pkl"
pipe_y = joblib.load(file)
pred = pipe_y.inverse_transform(pred.reshape(-1, 1))
return pred
def predict_folds_model_avg(
self, model_name, target_col, X, y=None, print_cv=False
):
# predict proba for the regression
y_pred = np.zeros((X.shape[0], 1))
for f in range(self.kfold):
# read model file
model_file = f"{self.models_folder}/{model_name}_{f}.pkl"
clf = joblib.load(model_file)
# predict
pred = clf.predict(X)
y_pred = y_pred + pred.reshape(-1, 1)
# cross valid prediction
if print_cv:
regression_result(y, pred, printf=True)
# average prediction of models
y_pred = y_pred / self.kfold
return y_pred
def multi_model_prediction(self, df_test, MODELS, last_stage=False):
pred = []
Id = df_test[self.Id]
X = df_test.drop(self.drop_cols, axis=1)
# multi model prediction
for model_name in MODELS:
target_col = model_name.split("_")[-1]
y = df_test[target_col]
# predict
y_pred = self.predict_folds_model_avg(model_name, target_col, X, y)
# print result
pred_tmp = self.inverse_transform_y(y_pred, target_col)
print(f"Avearage Prediction For The Model {model_name}", "_" * 10)
regression_result(y, pred_tmp, printf=True)
if last_stage:
y_pred = pred_tmp
pred.append(pd.DataFrame(y_pred, columns=[model_name]))
pred = pd.concat(pred, axis=1)
return pred
def single_model_prediction(self, df_test, MODELS, last_stage=False):
# single model
Id = df_test[self.Id]
X = df_test.drop(self.drop_cols, axis=1)
# multi model prediction
for model_name in MODELS:
target_col = "target"
# predict
y_pred = self.predict_folds_model_avg(model_name, target_col, X)
# print result
# pred_tmp = self.inverse_transform_y(y_pred, target_col)
# if last_stage:
# y_pred = pred_tmp
pred = pd.DataFrame(y_pred, columns=["target"])
return pred
def final_predict(self):
# read test data
df_test = | pd.read_csv(TEST_DATA) | pandas.read_csv |
__author__ = '<NAME>'
from flask import Flask, request, send_file
import json
import requests
import pandas as pd
from prediction.regression import linear_regression
from prediction.random_forest import random_forest
from prediction.SVR import svr, linear_svr, nu_svr
import pickle
from io import BytesIO
app = Flask(__name__)
@app.route('/', methods=['GET'])
def index():
return {'about': "Prediction service for 5Genesis Analytics Component. Visit /help for more info."}, 200
@app.route('/help')
@app.route('/API')
@app.route('/api')
def get_help():
response = {
"/predict/datasource/algorithm/target": {
"algorithm": "linreg, rf, svr, linear_svr, nu_svr",
"default parameters": {
'experimentid': "None (at least one experiment ID is mandatory)",
'measurement': "None (individual measurement name, e.g. Throughput_Measures)",
'drop_feature': "None (any feature to be ignored for training)",
'remove_outliers': "None (zscore or mad)",
'normalize': "False (or True, not relevant for some algorithms, e.g. Random Forest or SVR)"
},
"datasource": "uma, athens_iperf, athens_rtt"
},
"/download_model": "Pickled Scikit-Learn model"
}
return response, 200
@app.route("/model")
def download_model():
if not last_trained_model:
return {"warning": "No trained model available."}, 200
pickled_model_string = pickle.dumps(last_trained_model[1])
return send_file(
BytesIO(pickled_model_string),
mimetype='application/octet-stream',
as_attachment=True,
attachment_filename=last_trained_model[0] + '.pickle'
), 200
@app.route("/train/<string:datasource>/<string:algorithm>/<string:target>")
def predict(datasource, algorithm, target):
global last_trained_model
last_trained_model = None
experimentIds = request.args.getlist('experimentid')
if not experimentIds or experimentIds == []:
return {"error": "Must specify at least one experimentId with experimentid=123."}, 400
measurements = request.args.getlist('measurement')
drop_features = request.args.getlist('drop_feature')
remove_outliers = request.args.get('remove_outliers')
normalize = request.args.get('normalize')
normalize = normalize.lower() == 'true' if normalize else False
max_lag = request.args.get('max_lag', '1s')
coefficients = None
results = None
y_values = None
series = pd.DataFrame()
for experimentId in experimentIds:
param_dict = {
'measurement': measurements,
'drop_feature': drop_features,
'remove_outliers': remove_outliers,
'normalize': normalize,
'max_lag': max_lag
}
r = requests.get(f'http://data_handler:5000/get_data/{datasource}/{experimentId}', params=param_dict)
data = r.json()
series = series.append( | pd.DataFrame(data) | pandas.DataFrame |
import pandas as pd
import numpy as np
import datetime as dt
from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error
import matplotlib.pyplot as plt
import os
import logging
import json
# data directory containing the raw NMIR files
NMIR_DATA_DIR = "data/NMIR"
# savepath for training job outputs
OUTPUT_DIR = "output"
if not os.path.exists(OUTPUT_DIR):
os.makedirs(OUTPUT_DIR)
# Import the database for AIRACs
AIRAC_DF = pd.read_csv('data/AIRAC_dates.csv')
AIRAC_DF['start_date'] = pd.to_datetime(AIRAC_DF['start_date'])
AIRAC_DF['end_date'] = | pd.to_datetime(AIRAC_DF['end_date']) | pandas.to_datetime |
import os
import gc
import pandas as pd
import numpy as np
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import LabelEncoder
def load_talkinigdata():
print(os.curdir)
dtypes ={
'ip':'uint32',
'app': 'uint16',
'device': 'uint16',
'os': 'uint16',
'channel': 'uint16',
'is_attributed': 'uint8'
}
to_read = ['ip', 'app', 'device', 'os', 'channel', 'is_attributed']
to_parse = ['click_time']
categorical_features = ['app', 'device', 'os', 'channel']
file_path = 'data/xgb_dataset/talkingdata/train.csv'
df = pd.read_csv(file_path, usecols=to_read, dtype=dtypes, nrows=1000000)
clicks_by_ip = df.groupby(['ip']).size().rename('click_by_ip', inplace=True)
print(df.groupby(['ip']))
df = df.join(clicks_by_ip, on='ip')
del clicks_by_ip
gc.collect()
del df['ip']
for c in categorical_features:
ohe = | pd.get_dummies(df[c]) | pandas.get_dummies |
from typing import Any, Dict
import numpy as np
import pandas as pd
import core.artificial_signal_generators as sig_gen
import core.statistics as stats
import core.timeseries_study as tss
import helpers.unit_test as hut
class TestTimeSeriesDailyStudy(hut.TestCase):
def test_usual_case(self) -> None:
idx = pd.date_range("2018-12-31", "2019-01-31")
vals = np.random.randn(len(idx))
ts = pd.Series(vals, index=idx)
tsds = tss.TimeSeriesDailyStudy(ts)
tsds.execute()
class TestTimeSeriesMinutelyStudy(hut.TestCase):
def test_usual_case(self) -> None:
idx = pd.date_range("2018-12-31", "2019-01-31", freq="5T")
vals = np.random.randn(len(idx))
ts = | pd.Series(vals, index=idx) | pandas.Series |
import os
import re
import unicodedata
from twitter import OAuth, Twitter
import numpy as np
import pandas as pd
import arrow
from . import templates, plots
from loonathetrends.utils import get_video_title_lookup, get_video_ismv_lookup
auth = OAuth(
os.environ["TWITTER_ACCESSTOKEN"],
os.environ["TWITTER_ACCESSSECRET"],
os.environ["TWITTER_CONSUMERKEY"],
os.environ["TWITTER_CONSUMERSECRET"],
)
t = Twitter(auth=auth)
t_upload = Twitter(domain="upload.twitter.com", auth=auth)
MILESTONES = {
100_000: "100k",
200_000: "200k",
500_000: "500k",
1_000_000: "1M",
2_000_000: "2M",
5_000_000: "5M",
10_000_000: "10M",
20_000_000: "20M",
50_000_000: "50M",
100_000_000: "100M",
}
REGEX_YOUTUBEURL = r"(?:.+?)?(?:\/v\/|watch\/|\?v=|\&v=|youtu\.be\/|\/v=|^youtu\.be\/)([a-zA-Z0-9_-]{11})+"
def _status_length(status):
return len(unicodedata.normalize("NFC", status))
def followers_update(db, freq, dry_run=False, post_plots=False):
if freq == "daily":
ndays = 1
elif freq == "weekly":
ndays = 7
else:
raise RuntimeError("Parameter freq provided not valid")
query = (
"SELECT * FROM followers "
"WHERE tstamp = current_date "
"OR tstamp >= current_date - %s "
"ORDER BY tstamp"
)
template = templates.followers_update
df = pd.read_sql(query, db, params=(ndays,))
date = arrow.get(df["tstamp"].iloc[-1]).format("YYMMDD")
grouped = df.groupby("site")
tots = grouped.last()["count"].to_dict()
difs = (grouped.last()["count"] - grouped.first()["count"]).to_dict()
status = template(freq=freq, date=date, tots=tots, difs=difs)
if _status_length(status) > 280:
raise RuntimeError(f"The status update is {_status_length(status)} characters long.")
if post_plots:
media = plots.new_followers(db)
else:
media = []
if not dry_run:
media_ids = []
for img in media:
media_id = t_upload.media.upload(media=img)["media_id_string"]
media_ids.append(media_id)
if media_ids:
def chunk_four(l):
for i in range(0, len(l), 4):
yield l[i : i + 4]
last_tweet = None
for chunk in chunk_four(media_ids):
if last_tweet == None:
last_tweet = t.statuses.update(
status=status, media_ids=",".join(chunk)
)
else:
last_tweetid = last_tweet["id_str"]
last_tweet = t.statuses.update(
status="@loonathetrends",
media_ids=",".join(chunk),
in_reply_to_status_id=last_tweetid,
)
else:
t.statuses.update(status=status)
else:
for n, img in enumerate(media, 1):
with open("test{}.png".format(n), "wb") as f:
f.write(img)
return status
def youtube_update(db, kind, dry_run=False):
# create DataFrame for stats
stats = pd.read_sql(
"SELECT * FROM video_stats WHERE "
"tstamp >= (current_date - 8)"
"ORDER BY tstamp",
db,
parse_dates=["tstamp"],
).set_index("tstamp")
lookup = get_video_title_lookup(db)
# find out what video to post about
func = lambda x: x.diff().last("7d").sum()
if kind == "latest":
mvlookup = pd.Series(get_video_ismv_lookup(db))
videoid = (
pd.read_sql(
"SELECT published_at, video_id FROM videos ORDER BY published_at", db
)
.set_index("video_id")
.loc[mvlookup]
.index[-1]
)
elif kind == "views":
videoid = stats.groupby("video_id")["views"].agg(func).idxmax()
elif kind == "likes":
videoid = stats.groupby("video_id")["likes"].agg(func).idxmax()
elif kind == "comments":
videoid = stats.groupby("video_id")["comments"].agg(func).idxmax()
# get and trim stats
stats = stats[stats.video_id == videoid].drop("video_id", axis=1)
last = stats.index[-1]
length = | pd.Timedelta("1d") | pandas.Timedelta |
import re
import numpy as np
import pandas as pd
import pytest
from woodwork import DataTable
from woodwork.logical_types import (
URL,
Boolean,
Categorical,
CountryCode,
Datetime,
Double,
Filepath,
FullName,
Integer,
IPAddress,
LatLong,
NaturalLanguage,
Ordinal,
PhoneNumber,
SubRegionCode,
ZIPCode
)
def test_datatable_physical_types(sample_df):
dt = DataTable(sample_df)
assert isinstance(dt.physical_types, dict)
assert set(dt.physical_types.keys()) == set(sample_df.columns)
for k, v in dt.physical_types.items():
assert isinstance(k, str)
assert v == sample_df[k].dtype
def test_sets_category_dtype_on_init():
column_name = 'test_series'
series_list = [
pd.Series(['a', 'b', 'c'], name=column_name),
pd.Series(['a', None, 'c'], name=column_name),
pd.Series(['a', np.nan, 'c'], name=column_name),
pd.Series(['a', pd.NA, 'c'], name=column_name),
pd.Series(['a', pd.NaT, 'c'], name=column_name),
]
logical_types = [
Categorical,
CountryCode,
Ordinal(order=['a', 'b', 'c']),
SubRegionCode,
ZIPCode,
]
for series in series_list:
series = series.astype('object')
for logical_type in logical_types:
ltypes = {
column_name: logical_type,
}
dt = DataTable(pd.DataFrame(series), logical_types=ltypes)
assert dt.columns[column_name].logical_type == logical_type
assert dt.columns[column_name].dtype == logical_type.pandas_dtype
assert dt.to_dataframe()[column_name].dtype == logical_type.pandas_dtype
def test_sets_category_dtype_on_update():
column_name = 'test_series'
series = pd.Series(['a', 'b', 'c'], name=column_name)
series = series.astype('object')
logical_types = [
Categorical,
CountryCode,
Ordinal(order=['a', 'b', 'c']),
SubRegionCode,
ZIPCode,
]
for logical_type in logical_types:
ltypes = {
column_name: NaturalLanguage,
}
dt = DataTable(pd.DataFrame(series), logical_types=ltypes)
dt = dt.set_types(logical_types={column_name: logical_type})
assert dt.columns[column_name].logical_type == logical_type
assert dt.columns[column_name].dtype == logical_type.pandas_dtype
assert dt.to_dataframe()[column_name].dtype == logical_type.pandas_dtype
def test_sets_object_dtype_on_init(latlong_df):
for column_name in latlong_df.columns:
ltypes = {
column_name: LatLong,
}
dt = DataTable(latlong_df.loc[:, [column_name]], logical_types=ltypes)
assert dt.columns[column_name].logical_type == LatLong
assert dt.columns[column_name].dtype == LatLong.pandas_dtype
assert dt.to_dataframe()[column_name].dtype == LatLong.pandas_dtype
def test_sets_object_dtype_on_update(latlong_df):
for column_name in latlong_df.columns:
ltypes = {
column_name: NaturalLanguage
}
dt = DataTable(latlong_df.loc[:, [column_name]], logical_types=ltypes)
dt = dt.set_types(logical_types={column_name: LatLong})
assert dt.columns[column_name].logical_type == LatLong
assert dt.columns[column_name].dtype == LatLong.pandas_dtype
assert dt.to_dataframe()[column_name].dtype == LatLong.pandas_dtype
def test_sets_string_dtype_on_init():
column_name = 'test_series'
series_list = [
pd.Series(['a', 'b', 'c'], name=column_name),
pd.Series(['a', None, 'c'], name=column_name),
pd.Series(['a', np.nan, 'c'], name=column_name),
pd.Series(['a', pd.NA, 'c'], name=column_name),
]
logical_types = [
Filepath,
FullName,
IPAddress,
NaturalLanguage,
PhoneNumber,
URL,
]
for series in series_list:
series = series.astype('object')
for logical_type in logical_types:
ltypes = {
column_name: logical_type,
}
dt = DataTable(pd.DataFrame(series), logical_types=ltypes)
assert dt.columns[column_name].logical_type == logical_type
assert dt.columns[column_name].dtype == logical_type.pandas_dtype
assert dt.to_dataframe()[column_name].dtype == logical_type.pandas_dtype
def test_sets_string_dtype_on_update():
column_name = 'test_series'
series = pd.Series(['a', 'b', 'c'], name=column_name)
series = series.astype('object')
logical_types = [
Filepath,
FullName,
IPAddress,
NaturalLanguage,
PhoneNumber,
URL,
]
for logical_type in logical_types:
ltypes = {
column_name: Categorical,
}
dt = DataTable(pd.DataFrame(series), logical_types=ltypes)
dt = dt.set_types(logical_types={column_name: logical_type})
assert dt.columns[column_name].logical_type == logical_type
assert dt.columns[column_name].dtype == logical_type.pandas_dtype
assert dt.to_dataframe()[column_name].dtype == logical_type.pandas_dtype
def test_sets_boolean_dtype_on_init():
column_name = 'test_series'
series_list = [
pd.Series([True, False, True], name=column_name),
pd.Series([True, None, True], name=column_name),
pd.Series([True, np.nan, True], name=column_name),
| pd.Series([True, pd.NA, True], name=column_name) | pandas.Series |
# BUG: pd.Categorical turns all values into NaN #43334
import numpy as np
import pandas as pd
print(pd.__version__)
data = | pd.DataFrame({"Survived": [1, 0, 1], "Sex": [0, 1, 1]}) | pandas.DataFrame |
from flask import Flask, make_response
import pandas as pd
from flask_cors import CORS
from flask import request
import json
def init():
print('starting backend by reading data')
init_df = pd.read_csv('data.csv', keep_default_na=True)
# Remove the credit statements - these are just paying off the card
init_df.dropna(subset=[' Debit'], inplace=True)
# Rename columns to prepare for json
debit_df = | pd.DataFrame() | pandas.DataFrame |
# pylint: disable-msg=W0612,E1101,W0141
import nose
from numpy.random import randn
import numpy as np
from pandas.core.index import Index, MultiIndex
from pandas import Panel, DataFrame, Series, notnull, isnull
from pandas.util.testing import (assert_almost_equal,
assert_series_equal,
assert_frame_equal,
assertRaisesRegexp)
import pandas.core.common as com
import pandas.util.testing as tm
from pandas.compat import (range, lrange, StringIO, lzip, u, cPickle,
product as cart_product, zip)
import pandas as pd
import pandas.index as _index
class TestMultiLevel(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
self.frame = DataFrame(np.random.randn(10, 3), index=index,
columns=Index(['A', 'B', 'C'], name='exp'))
self.single_level = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]],
names=['first'])
# create test series object
arrays = [['bar', 'bar', 'baz', 'baz', 'qux', 'qux', 'foo', 'foo'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
tuples = lzip(*arrays)
index = MultiIndex.from_tuples(tuples)
s = Series(randn(8), index=index)
s[3] = np.NaN
self.series = s
tm.N = 100
self.tdf = tm.makeTimeDataFrame()
self.ymd = self.tdf.groupby([lambda x: x.year, lambda x: x.month,
lambda x: x.day]).sum()
# use Int64Index, to make sure things work
self.ymd.index.set_levels([lev.astype('i8')
for lev in self.ymd.index.levels],
inplace=True)
self.ymd.index.set_names(['year', 'month', 'day'],
inplace=True)
def test_append(self):
a, b = self.frame[:5], self.frame[5:]
result = a.append(b)
tm.assert_frame_equal(result, self.frame)
result = a['A'].append(b['A'])
tm.assert_series_equal(result, self.frame['A'])
def test_dataframe_constructor(self):
multi = DataFrame(np.random.randn(4, 4),
index=[np.array(['a', 'a', 'b', 'b']),
np.array(['x', 'y', 'x', 'y'])])
tm.assert_isinstance(multi.index, MultiIndex)
self.assertNotIsInstance(multi.columns, MultiIndex)
multi = DataFrame(np.random.randn(4, 4),
columns=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
tm.assert_isinstance(multi.columns, MultiIndex)
def test_series_constructor(self):
multi = Series(1., index=[np.array(['a', 'a', 'b', 'b']),
np.array(['x', 'y', 'x', 'y'])])
tm.assert_isinstance(multi.index, MultiIndex)
multi = Series(1., index=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
tm.assert_isinstance(multi.index, MultiIndex)
multi = Series(lrange(4), index=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
tm.assert_isinstance(multi.index, MultiIndex)
def test_reindex_level(self):
# axis=0
month_sums = self.ymd.sum(level='month')
result = month_sums.reindex(self.ymd.index, level=1)
expected = self.ymd.groupby(level='month').transform(np.sum)
assert_frame_equal(result, expected)
# Series
result = month_sums['A'].reindex(self.ymd.index, level=1)
expected = self.ymd['A'].groupby(level='month').transform(np.sum)
assert_series_equal(result, expected)
# axis=1
month_sums = self.ymd.T.sum(axis=1, level='month')
result = month_sums.reindex(columns=self.ymd.index, level=1)
expected = self.ymd.groupby(level='month').transform(np.sum).T
assert_frame_equal(result, expected)
def test_binops_level(self):
def _check_op(opname):
op = getattr(DataFrame, opname)
month_sums = self.ymd.sum(level='month')
result = op(self.ymd, month_sums, level='month')
broadcasted = self.ymd.groupby(level='month').transform(np.sum)
expected = op(self.ymd, broadcasted)
assert_frame_equal(result, expected)
# Series
op = getattr(Series, opname)
result = op(self.ymd['A'], month_sums['A'], level='month')
broadcasted = self.ymd['A'].groupby(
level='month').transform(np.sum)
expected = op(self.ymd['A'], broadcasted)
assert_series_equal(result, expected)
_check_op('sub')
_check_op('add')
_check_op('mul')
_check_op('div')
def test_pickle(self):
def _test_roundtrip(frame):
pickled = cPickle.dumps(frame)
unpickled = cPickle.loads(pickled)
assert_frame_equal(frame, unpickled)
_test_roundtrip(self.frame)
_test_roundtrip(self.frame.T)
_test_roundtrip(self.ymd)
_test_roundtrip(self.ymd.T)
def test_reindex(self):
reindexed = self.frame.ix[[('foo', 'one'), ('bar', 'one')]]
expected = self.frame.ix[[0, 3]]
assert_frame_equal(reindexed, expected)
def test_reindex_preserve_levels(self):
new_index = self.ymd.index[::10]
chunk = self.ymd.reindex(new_index)
self.assertIs(chunk.index, new_index)
chunk = self.ymd.ix[new_index]
self.assertIs(chunk.index, new_index)
ymdT = self.ymd.T
chunk = ymdT.reindex(columns=new_index)
self.assertIs(chunk.columns, new_index)
chunk = ymdT.ix[:, new_index]
self.assertIs(chunk.columns, new_index)
def test_sort_index_preserve_levels(self):
result = self.frame.sort_index()
self.assertEquals(result.index.names, self.frame.index.names)
def test_repr_to_string(self):
repr(self.frame)
repr(self.ymd)
repr(self.frame.T)
repr(self.ymd.T)
buf = StringIO()
self.frame.to_string(buf=buf)
self.ymd.to_string(buf=buf)
self.frame.T.to_string(buf=buf)
self.ymd.T.to_string(buf=buf)
def test_repr_name_coincide(self):
index = MultiIndex.from_tuples([('a', 0, 'foo'), ('b', 1, 'bar')],
names=['a', 'b', 'c'])
df = DataFrame({'value': [0, 1]}, index=index)
lines = repr(df).split('\n')
self.assert_(lines[2].startswith('a 0 foo'))
def test_getitem_simple(self):
df = self.frame.T
col = df['foo', 'one']
assert_almost_equal(col.values, df.values[:, 0])
self.assertRaises(KeyError, df.__getitem__, ('foo', 'four'))
self.assertRaises(KeyError, df.__getitem__, 'foobar')
def test_series_getitem(self):
s = self.ymd['A']
result = s[2000, 3]
result2 = s.ix[2000, 3]
expected = s.reindex(s.index[42:65])
expected.index = expected.index.droplevel(0).droplevel(0)
assert_series_equal(result, expected)
result = s[2000, 3, 10]
expected = s[49]
self.assertEquals(result, expected)
# fancy
result = s.ix[[(2000, 3, 10), (2000, 3, 13)]]
expected = s.reindex(s.index[49:51])
assert_series_equal(result, expected)
# key error
self.assertRaises(KeyError, s.__getitem__, (2000, 3, 4))
def test_series_getitem_corner(self):
s = self.ymd['A']
# don't segfault, GH #495
# out of bounds access
self.assertRaises(IndexError, s.__getitem__, len(self.ymd))
# generator
result = s[(x > 0 for x in s)]
expected = s[s > 0]
assert_series_equal(result, expected)
def test_series_setitem(self):
s = self.ymd['A']
s[2000, 3] = np.nan
self.assert_(isnull(s.values[42:65]).all())
self.assert_(notnull(s.values[:42]).all())
self.assert_(notnull(s.values[65:]).all())
s[2000, 3, 10] = np.nan
self.assert_(isnull(s[49]))
def test_series_slice_partial(self):
pass
def test_frame_getitem_setitem_boolean(self):
df = self.frame.T.copy()
values = df.values
result = df[df > 0]
expected = df.where(df > 0)
assert_frame_equal(result, expected)
df[df > 0] = 5
values[values > 0] = 5
assert_almost_equal(df.values, values)
df[df == 5] = 0
values[values == 5] = 0
assert_almost_equal(df.values, values)
# a df that needs alignment first
df[df[:-1] < 0] = 2
np.putmask(values[:-1], values[:-1] < 0, 2)
assert_almost_equal(df.values, values)
with assertRaisesRegexp(TypeError, 'boolean values only'):
df[df * 0] = 2
def test_frame_getitem_setitem_slice(self):
# getitem
result = self.frame.ix[:4]
expected = self.frame[:4]
assert_frame_equal(result, expected)
# setitem
cp = self.frame.copy()
cp.ix[:4] = 0
self.assert_((cp.values[:4] == 0).all())
self.assert_((cp.values[4:] != 0).all())
def test_frame_getitem_setitem_multislice(self):
levels = [['t1', 't2'], ['a', 'b', 'c']]
labels = [[0, 0, 0, 1, 1], [0, 1, 2, 0, 1]]
midx = MultiIndex(labels=labels, levels=levels, names=[None, 'id'])
df = DataFrame({'value': [1, 2, 3, 7, 8]}, index=midx)
result = df.ix[:, 'value']
assert_series_equal(df['value'], result)
result = df.ix[1:3, 'value']
assert_series_equal(df['value'][1:3], result)
result = df.ix[:, :]
assert_frame_equal(df, result)
result = df
df.ix[:, 'value'] = 10
result['value'] = 10
assert_frame_equal(df, result)
df.ix[:, :] = 10
assert_frame_equal(df, result)
def test_frame_getitem_multicolumn_empty_level(self):
f = DataFrame({'a': ['1', '2', '3'],
'b': ['2', '3', '4']})
f.columns = [['level1 item1', 'level1 item2'],
['', 'level2 item2'],
['level3 item1', 'level3 item2']]
result = f['level1 item1']
expected = DataFrame([['1'], ['2'], ['3']], index=f.index,
columns=['level3 item1'])
assert_frame_equal(result, expected)
def test_frame_setitem_multi_column(self):
df = DataFrame(randn(10, 4), columns=[['a', 'a', 'b', 'b'],
[0, 1, 0, 1]])
cp = df.copy()
cp['a'] = cp['b']
assert_frame_equal(cp['a'], cp['b'])
# set with ndarray
cp = df.copy()
cp['a'] = cp['b'].values
assert_frame_equal(cp['a'], cp['b'])
#----------------------------------------
# #1803
columns = MultiIndex.from_tuples([('A', '1'), ('A', '2'), ('B', '1')])
df = DataFrame(index=[1, 3, 5], columns=columns)
# Works, but adds a column instead of updating the two existing ones
df['A'] = 0.0 # Doesn't work
self.assertTrue((df['A'].values == 0).all())
# it broadcasts
df['B', '1'] = [1, 2, 3]
df['A'] = df['B', '1']
assert_series_equal(df['A', '1'], df['B', '1'])
assert_series_equal(df['A', '2'], df['B', '1'])
def test_getitem_tuple_plus_slice(self):
# GH #671
df = DataFrame({'a': lrange(10),
'b': | lrange(10) | pandas.compat.lrange |
# %%
from datetime import datetime, timedelta
from pathlib import Path
import random
import pandas as pd
# %%
data = pd.read_csv("../data/base2020.csv", sep=";")
# %%
def report(state, date, last_date, last_state, age, sex):
if last_state is not None:
events.append(dict(
from_state=last_state,
to_state=state,
age=age,
sex=sex,
days=(date - last_date).days if not pd.isna(last_date) else 0,
))
return date, state
# %%
events = []
for i,r in data.iterrows():
# NOTA: NO HAY SEXO EN LOS DATOS!!!!
sex = random.choice(["M", "F"])
age = r["Edad2020"]
date = pd.NaT
state = None
symptoms_date = pd.to_datetime(r['FIS2020'], format="%m/%d/%Y", errors="coerce")
hospital_date = pd.to_datetime(r['Fingreso2020'], format="%m/%d/%Y", errors="coerce")
confirm_date = pd.to_datetime(r['F.Conf2020'], format="%m/%d/%Y", errors="coerce")
uci_enter_date = pd.to_datetime(r['FechaingresoUCI3112'], format="%m/%d/%Y", errors="coerce")
uci_exit_date = pd.to_datetime(r['FechaegresoUTI'], format="%m/%d/%Y", errors="coerce")
release_date = pd.to_datetime(r['FechaAltaN'], format="%m/%d/%Y", errors="coerce")
if pd.isna(confirm_date) or pd.isna(release_date):
# Si estas fechas no se conocen, pues entonces no se tiene datos suficientes
continue
# Si es fuente de infección en el exterior, entonces entra como viajero
# a la simulación
if r["Fuente2020"] == "Exterior":
arrival_date = | pd.to_datetime(r['FechaArribo2020'], format="%m/%d/%Y", errors="coerce") | pandas.to_datetime |
# To add a new cell, type '# %%'
# To add a new markdown cell, type '# %% [markdown]'
# %%
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
# %%
DATA_ROOT = '../../data/raw'
# %% [markdown]
# ## LOADING DATA
# %%
print('Loading raw datasets...', flush=True)
GIT_COMMITS_PATH = f"{DATA_ROOT}/GIT_COMMITS.csv"
GIT_COMMITS_CHANGES = f"{DATA_ROOT}/GIT_COMMITS_CHANGES.csv"
SONAR_MEASURES_PATH = f"{DATA_ROOT}/SONAR_MEASURES.csv"
SZZ_FAULT_INDUCING_COMMITS = f"{DATA_ROOT}/SZZ_FAULT_INDUCING_COMMITS.csv"
JIRA_ISSUES = f"{DATA_ROOT}/JIRA_ISSUES.csv"
# %%
git_commits = | pd.read_csv(GIT_COMMITS_PATH) | pandas.read_csv |
# Copyright (c) 2020 Civic Knowledge. This file is licensed under the terms of the
# MIT license included in this distribution as LICENSE
import logging
import re
from collections import defaultdict, deque
from pathlib import Path
from time import time
import pandas as pd
from synpums.util import *
''
_logger = logging.getLogger(__name__)
def sample_to_sum(N, df, col, weights):
"""Sample a number of records from a dataset, then return the smallest set of
rows at the front of the dataset where the weight sums to more than N"""
t = df.sample(n=N, weights=weights, replace=True)
# Get the number of records that sum to N.
arg = t[col].cumsum().sub(N).abs().astype(int).argmin()
return t.iloc[:arg + 1]
def rms(s):
"""Root mean square"""
return np.sqrt(np.sum(np.square(s)))
def vector_walk_callback(puma_task, tract_task, data, memo):
pass
def make_acs_target_df(acs, columns, geoid):
t = acs.loc[geoid]
target_map = {c + '_m90': c for c in columns if "WGTP" not in columns}
target_df = pd.DataFrame({
'est': t[target_map.values()],
'm90': t[target_map.keys()].rename(target_map)
})
target_df['est_min'] = target_df.est - target_df.m90
target_df['est_max'] = target_df.est + target_df.m90
target_df.loc[target_df.est_min < 0, 'est_min'] = 0
return target_df.astype('Int64')
def geoid_path(geoid):
from pathlib import Path
from geoid.acs import AcsGeoid
go = AcsGeoid.parse(geoid)
try:
return Path(f"{go.level}/{go.stusab}/{go.county:03d}/{str(go)}.csv")
except AttributeError:
return Path(f"{go.level}/{go.stusab}/{str(go)}.csv")
class AllocationTask(object):
"""Represents the allocation process to one tract"""
def __init__(self, region_geoid, puma_geoid, acs_ref, hh_ref, cache_dir):
self.region_geoid = region_geoid
self.puma_geoid = puma_geoid
self.acs_ref = acs_ref
self.hh_ref = hh_ref
self.cache_dir = cache_dir
self.sample_pop = None
self.sample_weights = None
self.unallocated_weights = None # Initialized to the puma weights, gets decremented
self.target_marginals = None
self.allocated_weights = None
self.household_count = None
self.population_count = None
self.gq_count = None
self.gq_cols = None
self.sex_age_cols = None
self.hh_size_cols = None
self.hh_race_type_cols = None
self.hh_eth_type_cols = None
self.hh_income_cols = None
self._init = False
self.running_allocated_marginals = None
# A version of the sample_pop constructed by map_cp, added as an instance var so
# the probabilities can be manipulated during the vector walk.
self.cp_df = None
self.cp_prob = None
@property
def row(self):
from geoid.acs import AcsGeoid
tract = AcsGeoid.parse(self.region_geoid)
return [tract.state, tract.stusab, tract.county, self.region_geoid, self.puma_geoid, str(self.acs_ref),
str(self.hh_ref)]
def init(self, use_sample_weights=False, puma_weights=None):
"""Load all of the data, just before running the allocation"""
if isinstance(self.hh_ref, pd.DataFrame):
hh_source = self.hh_ref
else:
hh_source = pd.read_csv(self.hh_ref, index_col='SERIALNO', low_memory=False) \
.drop(columns=['geoid'], errors='ignore').astype('Int64')
if isinstance(self.acs_ref, pd.DataFrame):
acs = self.acs_ref
else:
acs = pd.read_csv(self.acs_ref, index_col='geoid', low_memory=False)
# These are only for debugging.
#self.hh_source = hh_source
#self.tract_acs = acs
return self._do_init(hh_source, acs, puma_weights=puma_weights)
def _do_init(self, hh_source, acs, puma_weights=None):
self.serialno = hh_source.index
# Col 0 is the WGTP column
w_cols = [c for c in hh_source.columns if "WGTP" in c]
not_w_cols = [c for c in hh_source.columns if "WGTP" not in c]
# Not actually a sample pop --- populations are supposed to be unweighted
self.sample_pop = hh_source[['WGTP'] + not_w_cols].iloc[:, 1:].reset_index(drop=True).astype(int)
# Shouldn't this be:
# self.sample_pop = hh_source[not_w_cols].reset_index(drop=True).astype(int)
self.sample_weights = hh_source.iloc[:, 0].reset_index(drop=True).astype(int)
assert self.sample_pop.shape[0] == self.sample_weights.shape[0]
not_w_cols = [c for c in hh_source.columns if "WGTP" not in c]
self.target_marginals = make_acs_target_df(acs, not_w_cols, self.region_geoid)
self.household_count = acs.loc[self.region_geoid].b11016_001
self.population_count = acs.loc[self.region_geoid].b01003_001
self.gq_count = acs.loc[self.region_geoid].b26001_001
self.total_count = self.household_count + self.gq_count
self.allocated_weights = np.zeros(len(self.sample_pop))
self.unallocated_weights = puma_weights if puma_weights is not None else self.sample_weights.copy()
self.running_allocated_marginals = pd.Series(0, index=self.target_marginals.index)
# Sample pop, normalized to unit length to speed up cosine similarity
self.sample_pop_norm = vectors_normalize(self.sample_pop.values)
# Column sets
self.gq_cols = ['b26001_001']
self.sex_age_cols = [c for c in hh_source.columns if c.startswith('b01001')]
self.hh_size_cols = [c for c in hh_source.columns if c.startswith('b11016')]
p = re.compile(r'b11001[^hi]_')
self.hh_race_type_cols = [c for c in hh_source.columns if p.match(c)]
p = re.compile(r'b11001[hi]_')
self.hh_eth_type_cols = [c for c in hh_source.columns if p.match(c)]
p = re.compile(r'b19025')
self.hh_income_cols = [c for c in hh_source.columns if p.match(c)]
# We will use this identity in the numpy version of step_scjhedule
# assert all((self.cp.index / 2).astype(int) == self['index'])
self.rng = np.random.default_rng()
self.make_cp(self.sample_pop)
self._init = True
return acs
def make_cp(self, sp):
"""Make a version of the sample population with two records for each
row, one the negative of the one before it. This is used to generate
rows that can be used in the vector walk."""
self.cp = pd.concat([sp, sp]).sort_index().reset_index()
self.cp.insert(1, 'sign', 1)
self.cp.insert(2, 'select_weight', 0)
self.cp.iloc[0::2, 1:] = self.cp.iloc[0::2, 1:] * -1 # flip sign on the marginal counts
self.update_cp()
return self.cp
def update_cp(self):
self.cp.loc[0::2, 'select_weight'] = self.allocated_weights.tolist()
self.cp.loc[1::2, 'select_weight'] = self.unallocated_weights.tolist()
def set_cp_prob(self, cp_prob):
pass
@property
def path(self):
return Path(self.cache_dir).joinpath(geoid_path(str(self.region_geoid))).resolve()
@property
def pums(self):
"""Return the PUMS household and personal records for this PUMA"""
from .pums import build_pums_dfp_dfh
from geoid.acs import Puma
puma = Puma.parse(self.puma_geoid)
dfp, dfh = build_pums_dfp_dfh(puma.stusab, year=2018, release=5)
return dfp, dfh
def get_saved_frame(self):
if self.path.exists():
return pd.read_csv(self.path.resolve(), low_memory=False)
else:
return None
@property
def results_frame(self):
return pd.DataFrame({
'geoid': self.region_geoid,
'serialno': self.serialno,
'weight': self.allocated_weights
})
def save_frame(self):
self.path.parent.mkdir(parents=True, exist_ok=True)
df = pd.DataFrame({
'serialno': self.serialno,
'weight': self.allocated_weights
})
df = df[df.weight > 0]
df.to_csv(self.path, index=False)
def load_frame(self):
df = pd.read_csv(self.path, low_memory=False)
self.init()
aw, _ = df.align(self.sample_weights, axis=0)
self.allocated_weights = df.set_index('serialno').reindex(self.serialno).fillna(0).values[:, 0]
def inc(self, rown, n=1):
if self.allocated_weights[rown] > 0 or n > 0:
self.allocated_weights[rown] += n # Increment the count column
self.running_allocated_marginals += n * self.sample_pop.iloc[rown]
@property
def allocated_pop(self):
return self.sample_pop.mul(self.allocated_weights, axis=0)
@property
def allocated_marginals(self):
t = self.allocated_pop.sum()
t.name = 'allocated_marginals'
return t
def calc_region_sum(self):
return self.allocated_weights.sum()
def column_diff(self, column):
return (self.target_marginals.est[column] - self.allocated_marginals[column])
@property
def target_diff(self):
return self.target_marginals.est - self.allocated_marginals
@property
def rel_target_diff(self):
return ((self.target_marginals.est - self.allocated_marginals) / self.target_marginals.est) \
.replace({np.inf: 0, -np.inf: 0})
@property
def running_target_diff(self):
return self.target_marginals.est - self.running_allocated_marginals
@property
def error_frame(self):
return self.target_marginals \
.join(self.allocated_marginals.to_frame('allocated')) \
.join(self.m90_error.to_frame('m_90')) \
.join(self.target_diff.to_frame('diff')) \
.join(self.rel_target_diff.to_frame('rel'))
@property
def total_error(self):
"""Magnitude of the error vector"""
return np.sqrt(np.sum(np.square(self.target_diff)))
@property
def running_total_error(self):
"""Magnitude of the error vector"""
return np.sqrt(np.sum(np.square(self.running_target_diff)))
@property
def m90_error(self):
"""Error that is relative to the m90 limits. Any value within the m90 limits is an error of 0"""
# There the allocated marginal is withing the m90 range, return the target marginal estimate
# otherwise, return amount of the allocated marginals that is outside of the m90 range
t = self.allocated_marginals - self.target_marginals.est
t[self.allocated_marginals.between(self.target_marginals.est_min, self.target_marginals.est_max)] = 0
t[t > self.target_marginals.m90] = t - self.target_marginals.m90
t[t < -1 * self.target_marginals.m90] = t + self.target_marginals.m90
return t
@property
def m90_total_error(self):
return np.sqrt(np.sum(np.square(self.m90_error)))
@property
def m90_rms_error(self):
"""RMS error of the m90 differences. Like m90 total error, but divides
by the number of marginal value variables"""
return np.sqrt(np.sum(np.square(self.m90_total_error)) / len(self.target_marginals))
# Equivalent to cosine similarity when the vectors are both normalized
def cosine_similarities(self):
'''Calculate the cosine similaries for all of the sample population records
to the normalized error vector'''
return self.sample_pop_norm.dot(vector_normalize(self.target_diff.values).T)
def sample_multicol(self, columns):
targets = self.target_marginals.est
frames = []
for col in columns:
target = targets.loc[col]
if target > 0:
t = self.sample_pop[self.sample_pop[col] > 0]
w = self.sample_weights[self.sample_pop[col] > 0]
if len(t) > 0 and w.sum() > 0:
frames.append(sample_to_sum(target, t, col, w))
if frames:
return pd.concat(frames)
else:
return None
def _pop_to_weights(self, pop):
'''Return weights by counting the records in a population'''
t = pop.copy()
t.insert(0, 'dummy', 1)
t = t.groupby(t.index).dummy.count()
t = t.align(self.sample_weights)[0].fillna(0).values
return t
def initialize_weights_set_sample(self, f=0.85):
"""Sample from the sample population one column at a time, in groups of
columns that describe exclusive measures ( a household can contribute to
only one marginal column) Then, resample the population to match the correct number of
households"""
assert self._init
if f == 0:
return
frames = [
self.sample_multicol(self.hh_race_type_cols + self.gq_cols),
self.sample_multicol(self.hh_eth_type_cols),
self.sample_multicol(self.sex_age_cols),
]
frames = [f for f in frames if f is not None]
if len(frames) == 0:
return
# An initial population, which is of the wrong size, so just
# convert it to weights
t = pd.concat(frames)
initial_weights = self._pop_to_weights(t)
# These use those weights to re-sample the population.
target_count = self.household_count + self.gq_count
# Sample some fraction less than the target count, so we can vector walk to the final value
target_count = int(target_count * f)
t = self.sample_pop.sample(target_count, weights=initial_weights, replace=True)
self.allocated_weights = self._pop_to_weights(t)
self.unallocated_weights -= self.allocated_weights
self.running_allocated_marginals = self.allocated_marginals
def _rake(self, f=1):
# Sort the columns by the error
cols = list(self.error_frame.sort_values('diff', ascending=False).index)
# cols = random.sample(list(self.sample_pop.columns), len(self.sample_pop.columns)):
for col in cols:
b = self.sample_pop[col].mul(self.allocated_weights, axis=0).sum()
if b != 0:
a = self.target_marginals.loc[col].replace({pd.NA: 0}).est
r = a / b * f
self.allocated_weights[self.sample_pop[col] > 0] *= r
self.allocated_weights = np.round(self.allocated_weights, 0)
def initialize_weights_raking(self, n_iter=5, initial_weights='sample'):
"""Set the allocated weights to an initial value by 1-D raking, adjusting the
weights to fit the target marginal value for each column. """
if initial_weights == 'sample':
assert self.allocated_weights.shape == self.sample_weights.shape
self.allocated_weights = self.sample_weights
else:
self.allocated_weights = np.ones(self.allocated_weights.shape)
for i in range(n_iter):
# Sort the columns by the error
cols = list(self.error_frame.sort_values('diff', ascending=False).index)
# cols = random.sample(list(self.sample_pop.columns), len(self.sample_pop.columns)):
for col in cols:
b = self.sample_pop[col].mul(self.allocated_weights, axis=0).sum()
if b != 0:
a = self.target_marginals.loc[col].replace({pd.NA: 0}).est
r = a / b
self.allocated_weights[self.sample_pop[col] > 0] *= r
self.allocated_weights = np.round(self.allocated_weights, 0)
try:
self.allocated_weights = self.allocated_weights.values
except AttributeError:
pass
def initialize_weights_sample(self):
"""Initialize the allocated weights proportional to the sample population weights,
adjusted to the total population. """
self.allocated_weights = (self.sample_weights / (self.sample_weights.sum())).multiply(
self.household_count).values.round(0).astype(float)
self.unallocated_weights -= self.allocated_weights
def step_schedule_np(self, i, N, te, td, step_size_max, step_size_min, reversal_rate):
""" Return the next set of samples to add or remove
:param i: Loop index
:param N: Max number of iterations
:param cp: Sample population, transformed by make_cp
:param te: Total error
:param td: Marginals difference vector
:param step_size_max: Maximum step size
:param step_size_min: Minimum step size
:param reversal_rate: Probability to allow an increase in error
:param p: Probability to select each sample row. If None, use column 2 of cp
:return: Records to add or remove from the allocated population
"""
# Compute change in each column of the error vector for adding or subtracting in
# each of the sample population records
# idx 0 is the index of the row in self.sample_pop
# idx 1 is the sign, 1 or -1
# idx 2 is the selection weight
# idx 3 and up are the census count columns
expanded_pop = self.cp.values.astype(int)
p = expanded_pop[:, 2]
# For each new error vector, compute total error ( via vector length). By
# removing the current total error, we get the change in total error for
# adding or removing each row. ( positive values are better )
total_errors = (np.sqrt(np.square(expanded_pop[:, 3:] + td).sum(axis=1))) - te
# For error reducing records, sort them and then mutliply
# the weights by a linear ramp, so the larger values of
# reduction get a relative preference over the lower reduction values.
gt0 = np.argwhere(total_errors > 0).flatten() # Error reducing records
srt = np.argsort(total_errors) # Sorted by error
reducing_error = srt[np.in1d(srt, gt0)][::-1] # get the intersection. These are index values into self.cp
# Selection probabilities, multiply by linear ramp to preference higher values.
reducing_p = ((p[reducing_error]) * np.linspace(1, 0, len(reducing_error)))
rps = np.sum(reducing_p)
if rps > 0:
reducing_p = np.nan_to_num(reducing_p / rps)
else:
reducing_p = []
increasing_error = np.argwhere(total_errors < 0).flatten() # Error increasing indexes
increasing_p = p[increasing_error].flatten().clip(min=0)
ips = np.sum(increasing_p)
if ips != 0:
increasing_p = np.nan_to_num(increasing_p / ips) # normalize to 1
else:
increasing_p =[]
# Min number of record to return in this step. The error-increasing records are in
# addition to this number
step_size = int((step_size_max - step_size_min) * ((N - i) / N) + step_size_min)
# Randomly select from each group of increasing or reducing indexes.
cc = []
if len(increasing_error) > 0 and ips > 0:
cc.append(self.rng.choice(increasing_error, int(step_size * reversal_rate), p=increasing_p))
if len(reducing_error) > 0 and rps > 0:
cc.append(self.rng.choice(reducing_error, int(step_size), p=reducing_p))
idx = np.concatenate(cc)
# Columns are : 'index', 'sign', 'delta_err'
delta_err = total_errors[idx].reshape(-1, 1).round(0).astype(int)
return np.hstack([expanded_pop[idx][:, 0:2], delta_err]) # Return the index and sign columns of cp
def _loop_asignment(self, ss):
for j, (idx, sgn, *_) in enumerate(ss):
idx = int(idx)
if (self.allocated_weights[idx] > 0 and sgn < 0) or \
(self.unallocated_weights[idx]>0 and sgn > 0) :
self.running_allocated_marginals += (sgn * self.sample_pop.iloc[idx])
self.allocated_weights[idx] += sgn # Increment the count column
self.unallocated_weights[idx] -= sgn
def _numpy_assignment(self, ss):
# The following code is the numpy equivalent of the loop version of
# assignment to the allocated marginals. It is about 20% faster than the loop
# This selection on ss is the equivalent to this if statement in the loop version:
# if self.allocated_weights[idx] > 0 or sgn > 0:
#
ss = ss[np.logical_or(
np.isin(ss[:, 0], np.nonzero(self.allocated_weights > 0)), # self.allocated_weights[idx] > 0
ss[:, 1] > 0) # sgn > 0
]
# Assign the steps from the step schedule into the allocated weights
if len(ss):
idx = ss[:, 0].astype(int)
sgn = ss[:, 1]
# Update all weights by the array of signs
self.allocated_weights[idx] += sgn
# Don't allow negative weights
self.allocated_weights[self.allocated_weights < 0] = 0
# Add in the signed sampled to the running marginal, to save the cost
# of re-calculating the marginals.
self.running_allocated_marginals += \
np.multiply(self.sample_pop.iloc[idx], sgn.reshape(ss.shape[0], -1)).sum()
def _vector_walk(self, N=2000, min_iter=750, target_error=0.03,
step_size_min=3, step_size_max=15, reversal_rate=.3,
max_ssm=250, cb=None, memo=None):
"""Allocate PUMS records to this object's region.
Args:
N:
min_iter:
target_error:
step_size_min:
step_size_max:
reversal_rate:
max_ssm:
"""
assert self._init
if target_error < 1:
target_error = self.household_count * target_error
min_allocation = None # allocated weights at last minimum
steps_since_min = 0
min_error = self.total_error
self.running_allocated_marginals = self.allocated_marginals
if cb:
# vector_walk_callback(puma_task, tract_task, data, memo):
cb(memo.get('puma_task'), self, None, memo)
for i in range(N):
td = self.running_target_diff.values.astype(int)
te = vector_length(td)
# The unallocated weights can be updated both internally and externally --
# the array can be shared among all tracts in the puma
self.update_cp()
if te < min_error:
min_error = te
min_allocation = self.allocated_weights
steps_since_min = 0
else:
steps_since_min += 1
min_error = min(te, min_error)
if (i > min_iter and te < target_error) or steps_since_min > max_ssm:
break
try:
ss = self.step_schedule_np(i, N, te, td,
step_size_max, step_size_min, reversal_rate)
self._loop_asignment(ss)
yield (i, te, min_error, steps_since_min, len(ss))
except ValueError as e:
# Usually b/c numpy choice() got an empty array
pass
print(e)
raise
if min_allocation is not None:
self.allocated_weights = min_allocation
def vector_walk(self, N=2000, min_iter=750, target_error=0.03, step_size_min=3, step_size_max=10,
reversal_rate=.3, max_ssm=250, callback=None, memo=None,
stats = True):
"""Consider the target state and each household to be a vector. For each iteration
select a household vector with the best cosine similarity to the vector to the
target and add that household to the population. """
assert self._init
rows = []
ts = time()
errors = deque(maxlen=20)
errors.extend([self.total_error] * 20)
g = self._vector_walk(
N=N, min_iter=min_iter, target_error=target_error,
step_size_min=step_size_min, step_size_max=step_size_max,
reversal_rate=reversal_rate, max_ssm=max_ssm,
cb=callback, memo=memo)
if stats is not True:
list(g)
return []
else:
for i, te, min_error, steps_since_min, n_iter in g :
d = {'i': i, 'time': time() - ts, 'step_size': n_iter, 'error': te,
'target_error': target_error,
'total_error': te,
'size': np.sum(self.allocated_weights),
'ssm': steps_since_min,
'min_error': min_error,
'mean_error': np.mean(errors),
'std_error': np.std(errors),
'uw_sum': np.sum(self.unallocated_weights),
'total_count': self.total_count
}
rows.append(d)
errors.append(te)
if callback and i % 10 == 0:
# vector_walk_callback(puma_task, tract_task, data, memo):
callback(None, self, None, memo)
return rows
@classmethod
def get_us_tasks(cls, cache_dir, sl='tract', year=2018, release=5, limit=None, ignore_completed=True):
"""Return all of the tasks for all US states"""
from geoid.censusnames import stusab
tasks = []
for state in stusab.values():
state_tasks = cls.get_state_tasks(cache_dir, state, sl, year, release, limit, ignore_completed)
tasks.extend(state_tasks)
return tasks
@classmethod
def get_tasks(cls, cache_dir, state, sl='tract', year=2018, release=5,
limit=None, use_tqdm=False, ignore_completed=True):
if state.upper() == 'US':
return cls.get_us_tasks(cache_dir, sl, year, release, limit, use_tqdm, ignore_completed)
else:
return cls.get_state_tasks(cache_dir, state, sl, year, release, limit, ignore_completed)
@classmethod
def get_state_tasks(cls, cache_dir, state, sl='tract', year=2018, release=5,
limit=None, ignore_completed=True):
"""Fetch ( possibly download) the source data to generate allocation tasks,
and cache the data if a cache_dir is provided"""
from .acs import puma_tract_map
from synpums import build_acs, build_pums_households
from functools import partial
import pickle
_logger.info(f'Loading tasks for {state} from cache {cache_dir}')
cp = Path(cache_dir).joinpath('tasks', 'source', f"{state}-{year}-{release}/")
cp.mkdir(parents=True, exist_ok=True)
asc_p = cp.joinpath("acs.csv")
hh_p = cp.joinpath("households.csv")
tasks_p = cp.joinpath("tasks.pkl")
if limit:
from itertools import islice
limiter = partial(islice, limit)
else:
def limiter(g, *args, **kwargs):
yield from g
if tasks_p and tasks_p.exists():
with tasks_p.open('rb') as f:
_logger.debug(f"Returning cached tasks from {str(tasks_p)}")
return pickle.load(f)
# Cached ACS files
if asc_p and asc_p.exists():
tract_acs = pd.read_csv(asc_p, index_col='geoid', low_memory=False)
else:
tract_acs = build_acs(state, sl, year, release)
if asc_p:
tract_acs.to_csv(asc_p, index=True)
# Cached Households
if hh_p and hh_p.exists():
households = pd.read_csv(hh_p, index_col='SERIALNO', low_memory=False)
else:
households = build_pums_households(state, year=year, release=release)
if hh_p:
households.to_csv(hh_p, index=True)
hh = households.groupby('geoid')
hh_file_map = {}
for key, g in hh:
puma_p = cp.joinpath(f"pumas/{key}.csv")
puma_p.parent.mkdir(parents=True, exist_ok=True)
_logger.debug(f"Write puma file {str(puma_p)}")
g.to_csv(puma_p)
hh_file_map[key] = puma_p
pt_map = puma_tract_map()
tasks = []
for tract_geoid, targets in limiter(tract_acs.iterrows(), desc='Generate Tasks'):
try:
puma_geoid = pt_map[tract_geoid]
t = AllocationTask(tract_geoid, puma_geoid, asc_p, hh_file_map[puma_geoid], cache_dir)
if not t.path.exists() or ignore_completed is False:
tasks.append(t)
except Exception as e:
print("Error", tract_geoid, type(e), e)
if tasks_p:
with tasks_p.open('wb') as f:
_logger.debug(f"Write tasks file {str(tasks_p)}")
pickle.dump(tasks, f, pickle.HIGHEST_PROTOCOL)
return tasks
def run(self, *args, callback=None, memo=None, **kwargs):
self.init()
self.initialize_weights_sample()
rows = self.vector_walk(*args, callback=callback, memo=memo, **kwargs)
self.save_frame()
return rows
class PumaAllocator(object):
"""Simultaneously allocate all of the tracts in a pums, attempting to reduce the
error between the sum of the allocated weights and the PUMS weights"""
def __init__(self, puma_geoid, tasks, cache_dir, state, year=2018, release=5):
self.cache_dir = cache_dir
self.puma_geoid = puma_geoid
self.tasks = tasks
self.year = year
self.release = release
self.state = state
pums_files = [task.hh_ref for task in self.tasks]
assert all([e == pums_files[0] for e in pums_files])
self.pums_file = pums_files[0]
self._puma_target_marginals = None
self._puma_allocated_marginals = None
self._puma_max_weights = None
self._puma_allocated_weights = None
self._puma_unallocated_weights = None
self.pums = pd.read_csv(pums_files[0], low_memory=False)
self.weights = pd.DataFrame({
'allocated': 0,
'pums': self.pums.WGTP, # Original PUMS weights
'remaining': self.pums.WGTP # Remaining
})
self.prob = None
self.gq_cols = None
self.sex_age_cols = None
self.hh_size_cols = None
self.hh_race_type_cols = None
self.hh_eth_type_cols = None
self.hh_income_cols = None
self.replicate = 0
def init(self, init_method='sample'):
"""Initialize the weights of all of the tasks"""
from tqdm import tqdm
self.hh_ref = hh_source = pd.read_csv(self.tasks[0].hh_ref, index_col='SERIALNO', low_memory=False) \
.drop(columns=['geoid'], errors='ignore').astype('Int64')
self._puma_max_weights = hh_source.iloc[:, 0].reset_index(drop=True).astype(int)
self._puma_unallocated_weights = self._puma_max_weights.copy()
for task in tqdm(self.tasks):
task.init(puma_weights=self._puma_unallocated_weights)
if init_method == 'sample':
self.initialize_weights_sample(task)
if init_method == 'set':
task.initialize_weights_set_sample()
t0 = self.tasks[0] # Just to copy out some internal info.
self.gq_cols = t0.gq_cols
self.sex_age_cols = t0.sex_age_cols
self.hh_size_cols = t0.hh_size_cols
self.hh_race_type_cols = t0.hh_race_type_cols
self.hh_eth_type_cols = t0.hh_eth_type_cols
p = re.compile(r'b19025')
self.hh_income_cols = [c for c in t0.hh_source.columns if p.match(c)]
@classmethod
def get_tasks(cls, cache_dir, state, year=2018, release=5):
tasks = AllocationTask.get_state_tasks(cache_dir, state, sl='tract', year=2018, release=5)
puma_tasks = defaultdict(list)
for task in tasks:
puma_tasks[task.puma_geoid].append(task)
return puma_tasks
@classmethod
def get_allocators(cls, cache_dir, state, year=2018, release=5):
tasks = AllocationTask.get_state_tasks(cache_dir, state, sl='tract', year=2018, release=5)
puma_tasks = defaultdict(list)
for task in tasks:
puma_tasks[task.puma_geoid].append(task)
return [PumaAllocator(puma_geoid, tasks, cache_dir, state, year, release) for puma_geoid, tasks in
puma_tasks.items()]
def initialize_weights_sample(self, task, frac=.7):
"""Initialize the allocated weights proportional to the sample population weights,
adjusted to the total population. """
wf = self.weights_frame
assert wf.remaining.sum() != 0
wn1 = wf.remaining / wf.remaining.sum() # weights normalized to 1
task.allocated_weights = rand_round(wn1.multiply(task.household_count).values.astype(float))
task.unallocated_weights = np.clip(task.unallocated_weights-task.allocated_weights, a_min=0, a_max=None)
assert not any(task.unallocated_weights<0)
def vector_walk(self, N=1200, min_iter=5000, target_error=0.03, step_size_min=1,
step_size_max=10, reversal_rate=.3, max_ssm=150,
callback=None, memo=None):
"""Run a vector walk on all of the tracts tasks in this puma """
from itertools import cycle
rows = []
ts = time()
memo['puma_task'] = self
def make_vw(task):
return iter(task._vector_walk(
N=N, min_iter=min_iter, target_error=target_error,
step_size_min=step_size_min, step_size_max=step_size_max,
reversal_rate=reversal_rate, max_ssm=max_ssm,
cb=callback, memo=memo))
task_iters = [(task, make_vw(task)) for task in self.tasks]
stopped = set()
running = set([e[0] for e in task_iters])
memo['n_stopped'] = len(stopped)
memo['n_running'] = len(running)
memo['n_calls'] = 0
while True:
for task, task_iter in task_iters:
if task in running:
try:
i, te, min_error, steps_since_min, n_iter = next(task_iter)
memo['n_calls'] += 1
d = {'i': i, 'time': time() - ts, 'step_size': n_iter, 'error': te,
'target_error': target_error,
'size': np.sum(task.allocated_weights),
'ssm': steps_since_min,
'min_error': min_error,
'task': task
}
rows.append(d)
if callback and i % 10 == 0:
callback(self, task, d, memo)
except StopIteration:
stopped.add(task)
running.remove(task)
memo['n_stopped'] = len(stopped)
memo['n_running'] = len(running)
if len(running) == 0:
return rows
if callback:
# vector_walk_callback(puma_task, tract_task, data, memo):
callback(self, None, None, memo)
assert False # Should never get here.
def run(self, *args, callback=None, memo=None, **kwargs):
self.init(init_method='sample')
rows = self.vector_walk(*args, callback=callback, memo=memo, **kwargs)
self.save_frame()
return rows
def get_task(self, geoid):
for task in self.tasks:
if geoid == task.region_geoid:
return task
return None
def tune_puma_allocation(self):
"""Re-run all of the tasks in the puma, trying to reduce the discrepancy
between the """
task_iters = [(task, iter(task._vector_walk())) for task in self.tasks]
for task, task_iter in task_iters:
try:
task.cp_prob = self._update_probabilities()
row = next(task_iter)
print(task.region_geoid, self.rms_error, self.rms_weight_error, np.sum(task.cp_prob))
except StopIteration:
print(task.region_geoid, 'stopped')
@property
def weights_frame(self):
self.weights[
'allocated'] = self.puma_allocated_weights # np.sum(np.array([task.allocated_weights for task in self.tasks]), axis=0)
self.weights['remaining'] = self.weights.pums - self.weights.allocated
self.weights['dff'] = self.weights.allocated - self.weights.pums
self.weights['rdff'] = (self.weights.dff / self.weights.pums).fillna(0)
self.weights['p'] = self.weights.rdff
return self.weights
def _update_probabilities(self):
"""Update the running cp_probs, the probabilities for selecting each PUMS
household from the sample_pop, based on the error in weights for
the households at the Puma level"""
w = self.weights_frame
w['p_pos'] = - w.p.where(w.p < 0, 0)
w['p_neg'] = w.p.where(w.p > 0, 0)
self.prob = np.array(w[['p_neg', 'p_pos']].values.flat)
return self.prob
@property
def puma_target_marginals(self):
from .acs import build_acs
if self._puma_target_marginals is None:
_puma_marginals = build_acs(state=self.state, sl='puma', year=self.year, release=self.release)
cols = self.tasks[
0].target_marginals.index # [c for c in _puma_marginals.columns if c.startswith('b') and not c.endswith('_m90')]
self._puma_target_marginals = _puma_marginals.loc[self.puma_geoid][cols]
return self._puma_target_marginals
@property
def puma_allocated_marginals(self):
return self.allocated_marginals.sum()
@property
def allocated_marginals(self):
series = {task.region_geoid: task.allocated_marginals for task in self.tasks}
return pd.DataFrame(series).T
@property
def allocated_weights(self):
series = {task.region_geoid: task.allocated_weights for task in self.tasks}
return pd.DataFrame(series).T
@property
def puma_allocated_weights(self):
return self.allocated_weights.sum()
@property
def target_marginals(self):
series = {task.region_geoid: task.target_marginals.est for task in self.tasks}
return pd.DataFrame(series).T
@property
def target_errors(self):
series = {task.region_geoid: task.total_error for task in self.tasks}
return pd.Series(series)
@property
def target_diff(self):
series = {task.region_geoid: task.target_diff for task in self.tasks}
return pd.DataFrame(series).T
@property
def rms_error(self):
"""RMS error in all of the individual task marginals"""
t = pd.concat([task.target_diff for task in self.tasks], axis=1).values
return np.sqrt(np.mean(np.nan_to_num(np.square(t))))
@property
def rms_weight_error(self):
return np.sqrt(np.mean(np.square(self.weights_frame.dff)))
@property
def file_name(self):
return f"{self.state}/{self.year}-{self.release}-{self.replicate}/{self.puma_geoid}.csv"
@property
def path(self):
return Path(self.cache_dir).joinpath(self.file_name)
def save_frame(self, path=None):
if path is None:
path = self.path
else:
path = Path(path)
path.parent.mkdir(parents=True, exist_ok=True)
def rf(df):
return df[df.weight > 0]
frames = [rf(task.results_frame) for task in self.tasks]
df = | pd.concat(frames) | pandas.concat |
import datetime
from unittest import TestCase
import numpy as np
import pandas as pd
from mlnext import pipeline
class TestColumnSelector(TestCase):
def setUp(self):
data = np.arange(8).reshape(-1, 2)
cols = ['a', 'b']
self.df = pd.DataFrame(data, columns=cols)
def test_select_columns(self):
t = pipeline.ColumnSelector(keys=['a'])
expected = self.df.loc[:, ['a']]
result = t.fit_transform(self.df)
pd.testing.assert_frame_equal(result, expected)
class TestColumnDropper(TestCase):
def setUp(self):
data = np.arange(8).reshape(-1, 2)
cols = ['a', 'b']
self.df = pd.DataFrame(data, columns=cols)
def test_drop_columns(self):
t = pipeline.ColumnDropper(columns=['b'])
expected = self.df.loc[:, ['a']]
result = t.fit_transform(self.df)
pd.testing.assert_frame_equal(result, expected)
def test_drop_columns_verbose(self):
t = pipeline.ColumnDropper(columns=['b'], verbose=True)
expected = self.df.loc[:, ['a']]
result = t.transform(self.df)
pd.testing.assert_frame_equal(result, expected)
def test_drop__missing_columns(self):
t = pipeline.ColumnDropper(columns=['c'])
with self.assertWarns(Warning):
t.transform(self.df)
class TestColumnRename(TestCase):
def test_rename_columns(self):
t = pipeline.ColumnRename(lambda x: x.split('.')[-1])
df = pd.DataFrame(columns=['a.b.c', 'd.e.f'])
expected = pd.DataFrame(columns=['c', 'f'])
result = t.fit_transform(df)
pd.testing.assert_frame_equal(result, expected)
class TestNaDropper(TestCase):
def test_drop_na(self):
t = pipeline.NaDropper()
df = pd.DataFrame([1, 0, pd.NA])
expected = pd.DataFrame([1, 0], dtype=object)
result = t.fit_transform(df)
pd.testing.assert_frame_equal(result, expected)
class TestClip(TestCase):
def test_clip(self):
t = pipeline.Clip(lower=0.5, upper=1.5)
df = pd.DataFrame([[0.1, 0.4, 0.6, 0.8, 1.2, 1.5]])
expected = pd.DataFrame([[0.5, 0.5, 0.6, 0.8, 1.2, 1.5]])
result = t.fit_transform(df)
pd.testing.assert_frame_equal(result, expected)
class TestDatetimeTransformer(TestCase):
# FIXME: fails in gitlab pipeline but succeeds locally
def test_datetime(self):
t = pipeline.DatetimeTransformer(columns=['time'])
df = pd.DataFrame([['2021-01-04 14:12:31']], columns=['time'])
expected = pd.DataFrame([[datetime.datetime(2021, 1, 4, 14, 12, 31)]],
columns=['time'])
result = t.fit_transform(df)
pd.testing.assert_frame_equal(result, expected)
def test_datetime_missing_cols(self):
t = pipeline.DatetimeTransformer(columns=['t'])
df = pd.DataFrame([['2021-01-04 14:12:31']], columns=['time'])
with self.assertRaises(ValueError):
t.fit_transform(df)
class TestNumericTransformer(TestCase):
# FIXME: fails in gitlab pipeline but succeeds locally
def test_numeric(self):
t = pipeline.NumericTransformer(columns=['1'])
df = pd.DataFrame([0, 1], columns=['1'], dtype=object)
expected = pd.DataFrame([0, 1], columns=['1'], dtype=np.int64)
result = t.fit_transform(df)
pd.testing.assert_frame_equal(result, expected)
def test_numeric_missing_column(self):
t = pipeline.NumericTransformer(columns=['2'])
df = pd.DataFrame([0, 1], columns=['1'], dtype=object)
with self.assertRaises(ValueError):
t.fit_transform(df)
def test_numeric_additional_column(self):
t = pipeline.NumericTransformer(columns=['2'])
df = pd.DataFrame([[0, 1]], columns=['1', '2'], dtype=object)
expected = pd.DataFrame([[0, 1]], columns=['1', '2'], dtype=object)
expected['2'] = expected['2'].apply(pd.to_numeric)
result = t.fit_transform(df)
pd.testing.assert_frame_equal(result, expected)
def test_numeric_multiple_column(self):
t = pipeline.NumericTransformer(columns=['1', '2'])
df = pd.DataFrame([[0, 1]], columns=['1', '2'], dtype=object)
expected = pd.DataFrame([[0, 1]], columns=['1', '2'])
result = t.fit_transform(df)
| pd.testing.assert_frame_equal(result, expected) | pandas.testing.assert_frame_equal |
import streamlit as st
from streamlit_drawable_canvas import st_canvas
import pandas as pd
from helpers import Homography, VoronoiPitch, Play, PitchImage, PitchDraw, get_table_download_link
from pitch import FootballPitch
from narya.narya.tracker.full_tracker import FootballTracker
import cv2
import numpy as np
from mplsoccer.pitch import Pitch
import matplotlib.pyplot as plt
plt.style.use('dark_background')
import keras.backend.tensorflow_backend as tb
tb._SYMBOLIC_SCOPE.value = True
st.set_option('deprecation.showfileUploaderEncoding', False)
image = None
@st.cache(allow_output_mutation=True)
def create_tracker():
tracker = FootballTracker(pretrained=True,
frame_rate=23,
track_buffer = 60,
ctx=None)
return tracker
@st.cache(allow_output_mutation=True)
def run_tracker(tracker, img_list):
trajectories = tracker(img_list,
split_size = 512,
save_tracking_folder = 'narya_output/',
template = template,
skip_homo = [])
return trajectories
#tracker = create_tracker()
template = cv2.imread('narya/world_cup_template.png')
template = cv2.resize(template, (512,512))/255.
image_selection = st.selectbox('Choose image:', ['', 'Example Image', 'My own image'],
format_func=lambda x: 'Choose image' if x == '' else x)
if image_selection:
if image_selection == 'Example Image':
image = cv2.imread("atm_che_23022021_62_07_2.jpg")
else:
st.title('Upload Image or Video')
uploaded_file = st.file_uploader("Select Image file to open:", type=["png", "jpg", "mp4"])
pitch = FootballPitch()
if uploaded_file:
if uploaded_file.type == 'video/mp4':
play = Play(uploaded_file)
t = st.slider('You have uploaded a video. Choose the frame you want to process:', 0.0,60.0)
image = play.get_frame(t)
image = cv2.imread("atm_che_23022021_62_07_2.jpg")
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
#image = PitchImage(pitch, image=play.get_frame(t))
else:
file_bytes = np.asarray(bytearray(uploaded_file.read()),dtype=np.uint8)
image = cv2.imdecode(file_bytes, 1)
#image = cv2.imread("atm_che_23022021_62_07_2.jpg")
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
if image is not None:
img_list = [image]
tracker = create_tracker()
trajectories = tracker(img_list,
split_size = 512,
save_tracking_folder = 'narya_output/',
template = template,
skip_homo = [])
x_coords = [val[0][0] for val in trajectories.values()]
y_coords = [val[0][1] for val in trajectories.values()]
x_coords = [x/320*120 for x in x_coords]
y_coords = [y/320*80 for y in y_coords]
pitch = Pitch(view='full', figsize=(6.8, 10.5), orientation='horizontal')
fig, ax = pitch.draw()
pitch.scatter(x_coords, y_coords, ax=ax, c='#c34c45', s=150)
st.title('Tracking Results')
st.write('From left to right: the original image, overlayed bounding boxes + homography and a schematic represenation',
expanded=True)
col1, col2, col3 = st.beta_columns(3)
with col1:
st.image(image, use_column_width= 'always')
with col2:
st.image("narya_output/test_00000.jpg", use_column_width= 'always')
with col3:
st.pyplot(fig)
review = st.selectbox('Do the results look good?:', ['', 'Yes and export', 'No and manually fix'],
format_func=lambda x: 'Do the results look good?' if x == '' else x)
if review:
if review == 'Yes and export':
df = | pd.DataFrame({'x': x_coords, 'y': y_coords}) | pandas.DataFrame |
"""Process the USCRN station table
ftp://ftp.ncdc.noaa.gov/pub/data/uscrn/products/stations.tsv
"""
import pandas as pd
from pyiem.util import get_dbconn
def main():
"""Go"""
pgconn = get_dbconn('mesosite', user='mesonet')
cursor = pgconn.cursor()
df = | pd.read_csv('stations.tsv', sep=r'\t', engine='python') | pandas.read_csv |
############################################
# User-Based Collaborative Filtering (User-User Filtering)
#############################################
# Kullanıcıların davranış benzerlikleri üzerinden film önerileri yapılacak.
# Adım 1: Veri Setinin Hazırlanması
# Adım 2: Öneri yapılacak kullanıcının izlediği filmlerin belirlenmesi
# Adım 3: Aynı filmleri izleyen diğer kullanıcıların verisine ve id'lerine erişmek
# Adım 4: Öneri yapılacak kullanıcı ile en benzer kullanıcıların belirlenmesi
# Adım 5: Weighted rating'lerin hesaplanması
# Adım 6: Weighted average recommendation score'un hesaplanması
#############################################
# Adım 1: Veri Setinin Hazırlanması
#############################################
import pandas as pd
from helpers.helpers import create_user_movie_df
user_movie_df = create_user_movie_df()
# import pickle
# user_movie_df = pickle.load(open('user_movie_df.pkl', 'rb'))
# user_movie_df.head()
random_user = int(pd.Series(user_movie_df.index).sample(1, random_state=45).values)
#############################################
# Adım 2: Öneri yapılacak kullanıcının izlediği filmlerin belirlenmesi
#############################################
random_user_df = user_movie_df[user_movie_df.index == random_user]
random_user_df
movies_watched = random_user_df.columns[random_user_df.notna().any()].tolist()
user_movie_df.loc[user_movie_df.index == random_user, user_movie_df.columns == "Schindler's List"]
len(movies_watched)
#############################################
# Adım 3: Aynı filmleri izleyen diğer kullanıcıların verisine ve id'lerine erişmek
#############################################
pd.set_option('display.max_columns', 5)
movies_watched_df = user_movie_df[movies_watched]
movies_watched_df.head()
movies_watched_df.shape
user_movie_count = movies_watched_df.T.notnull().sum()
user_movie_count = user_movie_count.reset_index()
user_movie_count.columns = ["userId", "movie_count"]
user_movie_count[user_movie_count["movie_count"] > 20].sort_values("movie_count", ascending=False)
user_movie_count[user_movie_count["movie_count"] == 33].count()
users_same_movies = user_movie_count[user_movie_count["movie_count"] > 20]["userId"]
#############################################
# Adım 4: Öneri yapılacak kullanıcı ile en benzer kullanıcıların belirlenmesi
#############################################
# Bunun için 3 adım gerçekleştireceğiz:
# 1. Sinan ve diğer kullanıcıların verilerini bir araya getireceğiz.
# 2. Korelasyon df'ini oluşturacağız.
# 3. En benzer bullanıcıları (Top Users) bulacağız.
final_df = pd.concat([movies_watched_df[movies_watched_df.index.isin(users_same_movies.index)],
random_user_df[movies_watched]])
final_df.head()
final_df.T.corr()
corr_df = final_df.T.corr().unstack().sort_values().drop_duplicates()
corr_df = pd.DataFrame(corr_df, columns=["corr"])
corr_df.index.names = ['user_id_1', 'user_id_2']
corr_df = corr_df.reset_index()
corr_df.head()
top_users = corr_df[(corr_df["user_id_1"] == random_user) & (corr_df["corr"] >= 0.65)][
["user_id_2", "corr"]].reset_index(drop=True)
top_users = top_users.sort_values(by='corr', ascending=False)
top_users.rename(columns={"user_id_2": "userId"}, inplace=True)
top_users
rating = pd.read_csv('datasets/movie_lens_dataset/rating.csv')
top_users_ratings = top_users.merge(rating[["userId", "movieId", "rating"]], how='inner')
top_users_ratings
#############################################
# Adım 5: Weighted rating'lerin hesaplanması
#############################################
top_users_ratings['weighted_rating'] = top_users_ratings['corr'] * top_users_ratings['rating']
top_users_ratings.head()
#############################################
# Adım 6: Weighted average recommendation score'un hesaplanması
#############################################
temp = top_users_ratings.groupby('movieId').sum()[['corr', 'weighted_rating']]
temp.columns = ['sum_corr', 'sum_weighted_rating']
temp.head()
recommendation_df = pd.DataFrame()
recommendation_df['weighted_average_recommendation_score'] = temp['sum_weighted_rating'] / temp['sum_corr']
recommendation_df['movieId'] = temp.index
recommendation_df = recommendation_df.sort_values(by='weighted_average_recommendation_score', ascending=False)
recommendation_df.head(10)
recommendation_df
movie = pd.read_csv('datasets/movie_lens_dataset/movie.csv')
movie.loc[movie['movieId'].isin(recommendation_df.head(10)['movieId'])]
#############################################
# Fonksiyonlaştırma:
#############################################
def user_based_recommender():
import pickle
import pandas as pd
user_movie_df = pickle.load(open('user_movie_df.pkl', 'rb'))
random_user = int(pd.Series(user_movie_df.index).sample(1, random_state=45).values)
random_user_df = user_movie_df[user_movie_df.index == random_user]
movies_watched = random_user_df.columns[random_user_df.notna().any()].tolist()
movies_watched_df = user_movie_df[movies_watched]
user_movie_count = movies_watched_df.T.notnull().sum()
user_movie_count = user_movie_count.reset_index()
user_movie_count.columns = ["userId", "movie_count"]
users_same_movies = user_movie_count[user_movie_count["movie_count"] > 20]["userId"]
final_df = pd.concat([movies_watched_df[movies_watched_df.index.isin(users_same_movies.index)],
random_user_df[movies_watched]])
corr_df = final_df.T.corr().unstack().sort_values().drop_duplicates()
corr_df = pd.DataFrame(corr_df, columns=["corr"])
corr_df.index.names = ['user_id_1', 'user_id_2']
corr_df = corr_df.reset_index()
top_users = corr_df[(corr_df["user_id_1"] == random_user) & (corr_df["corr"] >= 0.65)][
["user_id_2", "corr"]].reset_index(drop=True)
top_users = top_users.sort_values(by='corr', ascending=False)
top_users.rename(columns={"user_id_2": "userId"}, inplace=True)
rating = | pd.read_csv('datasets/movie_lens_dataset/rating.csv') | pandas.read_csv |
#!/usr/bin/env python
"""Tests for `filly` package."""
import os
import sys
import pytest
from contextlib import nullcontext
import pandas as pd
from pandas._testing import assert_frame_equal
# from click.testing import CliRunner
from filly.filly import Filly, FileUploadError
from filly import cli
##TODO: write CLI runner
# def test_command_line_interface():
# """Test the CLI."""
# runner = CliRunner()
# result = runner.invoke(cli.main)
# assert result.exit_code == 0
# assert 'filly.cli.main' in result.output
# help_result = runner.invoke(cli.main, ['--help'])
# assert help_result.exit_code == 0
# assert '--help Show this message and exit.' in help_result.output
## TODO: mock S3
@pytest.fixture
def df():
return pd.read_csv('tests/data/test.csv')
@pytest.mark.parametrize(
"filename, filepath, to_raise, expected_raises",
[
('valid_name', 'valid_path', False, nullcontext()),
(
'valid_name', 'valid_path', True,
pytest.raises(
FileUploadError,
match='Upload failed for File valid_name upload to valid_path'
)
)
]
)
def test_file_upload_error(filename, filepath, to_raise, expected_raises):
if to_raise:
with expected_raises:
raise FileUploadError(filename, filepath)
@pytest.mark.parametrize(
"remote, bucket_name", [
('s3', None),
('s3', '')
]
)
def test_remote(remote, bucket_name):
with pytest.raises(ValueError):
Filly(
remote=remote,
bucket_name=bucket_name
)
@pytest.mark.parametrize(
"filename, filepath, fullpath, mode",
[
('test_csv.csv', 'tests/data/', None, 'w'),
('test_csv_read.csv', 'tests/data/', None, 'r'),
(None, None, 'tests/data/test_csv_read.csv', 'r')
]
)
def test_csv_handler(filename, filepath, fullpath, mode):
dict1 = pd.DataFrame({
"A": [0,0,0],
"B": [1,1,1],
"C": [2,2,2]
})
if mode == 'w':
try:
file_handler = Filly()
file_handler.write_data(
filename=filename,
filepath=filepath,
fullpath=fullpath,
data=dict1
)
dict2 = pd.read_csv(open(os.path.join(filepath, filename), 'r'))
assert_frame_equal(dict1, dict2)
except Exception as err:
print(err)
finally:
os.remove(file_handler.fullpath)
elif mode == 'r':
file_handler = Filly(remote=None)
file_handler.read_data(filename=filename, filepath=filepath, fullpath=fullpath)
assert_frame_equal(file_handler.data, dict1)
@pytest.mark.parametrize(
"filename, filepath, fullpath, mode",
[
('test_pickle.pkl', 'tests/data/', None,'w'),
('test_pickle_read.pkl', 'tests/data/', None, 'r'),
(None, None, 'tests/data/test_pickle.pkl', 'w'),
(None, None, 'tests/data/test_pickle_read.pkl', 'r')
]
)
def test_pickle_handler(filename, filepath, fullpath, mode):
dict1 = pd.DataFrame({
"A": [0,0,0],
"B": [1,1,1],
"C": [2,2,2]
})
if mode == 'w':
try:
file_handler = Filly()
file_handler.write_data(
filename=filename,
filepath=filepath,
fullpath=fullpath,
data=dict1
)
dict2 = pd.read_csv(open(os.path.join(filepath, filename), 'r'))
assert_frame_equal(dict1, dict2)
except Exception as err:
print(err)
finally:
os.remove(file_handler.fullpath)
elif mode == 'r':
file_handler = Filly(remote=None)
file_handler.read_data(filename=filename, filepath=filepath, fullpath=fullpath)
| assert_frame_equal(file_handler.data, dict1) | pandas._testing.assert_frame_equal |
import os
import typing
import numpy as np
import pandas as pd
import audeer
import audformat
import audresample
import audiofile as af
from audinterface.core.typing import Timestamps
def assert_index(obj: pd.Index):
r"""Check if index is conform to audformat."""
if isinstance(obj, pd.MultiIndex) and len(obj.levels) == 2:
if obj.has_duplicates:
max_display = 10
duplicates = obj[obj.duplicated()]
msg_tail = '\n...' if len(duplicates) > max_display else ''
msg_duplicates = '\n'.join(
[
str(duplicate) for duplicate
in duplicates[:max_display].tolist()
]
)
raise ValueError(
'Found duplicates:\n'
f'{msg_duplicates}{msg_tail}'
)
if not (
obj.names[0] == audformat.define.IndexField.START
and obj.names[1] == audformat.define.IndexField.END
):
expected_names = [
audformat.define.IndexField.START,
audformat.define.IndexField.END,
]
raise ValueError(
'Found two levels with names '
f'{obj.names}, '
f'but expected names '
f'{expected_names}.'
)
if not pd.api.types.is_timedelta64_dtype(obj.levels[0].dtype):
raise ValueError(
"Level 'start' must contain values of type 'timedelta64[ns]'."
)
if not | pd.api.types.is_timedelta64_dtype(obj.levels[1].dtype) | pandas.api.types.is_timedelta64_dtype |
"""
Functions to clean up neighborhood data
and feed into interactive charts
"""
import numpy as np
import pandas as pd
from datetime import date, timedelta
S3_FILE_PATH = "s3://public-health-dashboard/jhu_covid19/"
NEIGHBORHOOD_URL = f"{S3_FILE_PATH}la-county-neighborhood-time-series.parquet"
CROSSWALK_URL = f"{S3_FILE_PATH}la_neighborhoods_population_crosswalk.parquet"
NEIGHBORHOOD_APPENDED_URL = f"{S3_FILE_PATH}la-county-neighborhood-testing-appended.parquet"
def clean_data():
df = pd.read_parquet(NEIGHBORHOOD_URL)
crosswalk = pd.read_parquet(CROSSWALK_URL)
# Get rid of duplicates
# We keep the incorporated and unincorporated labels because
# If there are duplicate dates, but diff values for cases and deaths, let's keep the max
df = (df[df.Region != "Long Beach"]
.assign(
# Had to convert date to string to write to parquet, but we want it as datetime/object
date = pd.to_datetime(df.date).dt.date,
cases = df.groupby(["Region", "date", "date2"])["cases"].transform("max"),
deaths = df.groupby(["Region", "date", "date2"])["deaths"].transform("max"),
).drop_duplicates(subset = ["Region", "date", "date2", "cases", "deaths"])
.drop(columns = ["LCITY", "COMMUNITY", "LABEL"])
)
# Our crosswalk is more extensive, get rid of stuff so we can have a m:1 merge
crosswalk = (crosswalk[crosswalk.Region.notna()]
[["Region", "aggregate_region", "population"]]
.drop_duplicates()
)
# Merge in pop
df = pd.merge(df, crosswalk, on = "Region", how = "inner", validate = "m:1")
# Be explicit about which missing values to fill in
# Add new lines if more missing data appears later
df = interpolate_linearly(df, "11/18/20", "11/20/20")
df = interpolate_linearly(df, "12/19/20", "12/22/20")
df = interpolate_linearly(df, "1/21/21", "1/30/21")
df = interpolate_linearly(df, "3/29/21", "4/3/21")
# Aggregate
keep_cols = ["aggregate_region", "population", "date", "date2"]
aggregated = (df.groupby(keep_cols)
.agg({"cases": "sum", "deaths": "sum"})
.reset_index()
)
sort_cols = ["aggregate_region", "date", "date2"]
group_cols = ["aggregate_region"]
final = derive_columns(aggregated, sort_cols, group_cols)
return final
def interpolate_linearly(df, start_date, end_date):
"""
Interpolate and fill in missing data
df: pandas.DataFrame
start_date: inclusive, provide the date where there is case/death numbers,
right before the set of missing values.
end_date: inclusive, provide the date where there is case/death numbers,
right after the set of missing values.
Ex: if 12/20 and 12/21 are missing, start_date is 12/19 and end_date is 12/22.
"""
start_date = pd.to_datetime(start_date)
end_date = pd.to_datetime(end_date)
days_in_between = (end_date - start_date).days - 1
# Now interpolate, but do it just around where the missing area is
starting_df = df.loc[df.date2==start_date]
ending_df = df.loc[df.date2 == end_date]
sort_cols = ["Region", "date2"]
group_cols = ["Region"]
df2 = starting_df.copy()
for i in range(1, days_in_between + 1):
df2 = (df2.append(starting_df)
.reset_index(drop=True)
)
df2["obs"] = df2.groupby(group_cols).cumcount() + 1
df2 = df2.assign(
date2 = df2.apply(lambda x: x.date2 + timedelta(days = i) if x.obs==i
else x.date2, axis=1),
cases = df2.apply(lambda x: np.nan if x.obs==i else x.cases, axis=1),
deaths = df2.apply(lambda x: np.nan if x.obs==i else x.deaths, axis=1),
)
df2["date"] = df2.date2.dt.date
if i == days_in_between:
df2 = (df2.append(ending_df)
.sort_values(sort_cols)
.drop(columns = "obs")
.reset_index(drop=True)
)
# Find our start / end points to calculate change
for col in ["cases", "deaths"]:
df2 = (df2.sort_values(sort_cols)
.assign(
start = df2.groupby(group_cols)[col].transform("min"),
change = (df2.groupby(group_cols)[col].transform("max") -
df2.groupby(group_cols)[col].transform("min")),
)
)
df2 = (df2.assign(
daily_change = (df2.change / (days_in_between + 1))
).rename(columns = {"daily_change": f"change_{col}",
"start": f"start_{col}"})
)
df2 = df2.assign(
days_since = df2.sort_values(sort_cols).groupby(group_cols).cumcount(),
)
for col in ["cases", "deaths"]:
start_col = f"start_{col}"
change_col = f"change_{col}"
df2[col] = (df2[col].fillna(
df2[start_col] + (df2[change_col] * df2.days_since))
.astype(int)
)
# Append it back to original df
full_df = (df[(df.date2 != start_date) & (df.date2 != end_date)]
.append(df2.drop(
columns = ["days_since", "change",
"start_cases", "start_deaths",
"change_cases", "change_deaths"]), sort=False)
.sort_values(sort_cols)
.reset_index(drop=True)
)
return full_df
def derive_columns(df, sort_cols, group_cols):
# Derive columns
POP_DENOM = 100_000
df = (df.assign(
new_cases = (df.sort_values(sort_cols).groupby(group_cols)["cases"]
.diff(periods=1)
),
cases_per100k = df.cases / df.population * POP_DENOM,
).sort_values(sort_cols)
.reset_index(drop=True)
)
df = df.assign(
new_cases = df.new_cases.fillna(0)
)
# Calculate rolling averages
df = (df.assign(
cases_avg7 = df.cases.rolling(window=7).mean(),
new_cases_avg7 = df.new_cases.rolling(window=7).mean(),
cases_per100k_avg7 = df.cases_per100k.rolling(window=7).mean(),
)
)
# Calculate quartiles
case_quartiles = (df.groupby("date")["cases_avg7"].describe()[["25%", "50%", "75%"]]
.rename(columns = {"25%": "cases_p25",
"50%": "cases_p50",
"75%" :"cases_p75"})
.reset_index()
)
normalized_case_quartiles = (df.groupby("date")["cases_per100k_avg7"].describe()[["25%", "50%", "75%"]]
.rename(columns = {"25%": "ncases_p25",
"50%": "ncases_p50",
"75%" :"ncases_p75"})
.reset_index()
)
df2 = pd.merge(df, case_quartiles, on = "date", how = "left", validate = "m:1")
df3 = pd.merge(df2, normalized_case_quartiles, on = "date", how = "left", validate = "m:1")
# Add rankings
df3["rank"] = df3.groupby("date")["cases_per100k"].rank("dense", ascending=False).astype("Int64")
df3["max_rank"] = df3.groupby("date")["rank"].transform("max").astype(int)
return df3
def clean_testing_data():
df = | pd.read_parquet(NEIGHBORHOOD_APPENDED_URL) | pandas.read_parquet |
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
from __future__ import division
from datetime import datetime
from sklearn import linear_model
import pandas as pd
import numpy as np
import scipy.stats as st
import statsmodels.distributions.empirical_distribution as edis
import seaborn as sns; sns.set(color_codes=True)
import matplotlib.pyplot as plt
#########################################################################
# This purpose of this script is to use historical temperature and streamflow data
# to calculate synthetic time series of daily flows at each of the stream gages
# used in the hydropower production models.
# Regression and vector-autoregressive errors are used to simulate total annual
# streamflows, and these are then paired with daily streamflow fractions tied
# to daily temperature dynamics
#########################################################################
# Import historical tmeperature data
df_temp = pd.read_excel('Synthetic_streamflows/hist_temps_1953_2007.xlsx')
his_temp_matrix = df_temp.values
# Import calender
calender=pd.read_excel('Synthetic_streamflows/BPA_hist_streamflow.xlsx',sheet_name='Calender',header= None)
calender=calender.values
julian=calender[:,2]
###############################
# Synthetic HDD CDD calculation
# Simulation data
sim_weather=pd.read_csv('Synthetic_weather/synthetic_weather_data.csv',header=0)
# Load temperature data only
cities = ['SALEM_T','EUGENE_T','SEATTLE_T','BOISE_T','PORTLAND_T','SPOKANE_T','FRESNO_T','LOS ANGELES_T','SAN DIEGO_T','SACRAMENTO_T','SAN JOSE_T','SAN FRANCISCO_T','TUCSON_T','PHOENIX_T','LAS VEGAS_T']
sim_temperature=sim_weather[cities]
# Convert temperatures to Fahrenheit
sim_temperature= (sim_temperature*(9/5))+32
sim_temperature=sim_temperature.values
num_cities = len(cities)
num_sim_days = len(sim_temperature)
HDD_sim = np.zeros((num_sim_days,num_cities))
CDD_sim = np.zeros((num_sim_days,num_cities))
# calculate daily records of heating (HDD) and cooling (CDD) degree days
for i in range(0,num_sim_days):
for j in range(0,num_cities):
HDD_sim[i,j] = np.max((0,65-sim_temperature[i,j]))
CDD_sim[i,j] = np.max((0,sim_temperature[i,j] - 65))
# calculate annual totals of heating and cooling degree days for each city
annual_HDD_sim=np.zeros((int(len(HDD_sim)/365),num_cities))
annual_CDD_sim=np.zeros((int(len(CDD_sim)/365),num_cities))
for i in range(0,int(len(HDD_sim)/365)):
for j in range(0,num_cities):
annual_HDD_sim[i,j]=np.sum(HDD_sim[0+(i*365):365+(i*365),j])
annual_CDD_sim[i,j]=np.sum(CDD_sim[0+(i*365):365+(i*365),j])
########################################################################
#Calculate HDD and CDD for historical temperature data
num_cities = len(cities)
num_days = len(his_temp_matrix)
# daily records
HDD = np.zeros((num_days,num_cities))
CDD = np.zeros((num_days,num_cities))
for i in range(0,num_days):
for j in range(0,num_cities):
HDD[i,j] = np.max((0,65-his_temp_matrix[i,j+1]))
CDD[i,j] = np.max((0,his_temp_matrix[i,j+1] - 65))
# annual sums
annual_HDD=np.zeros((int(len(HDD)/365),num_cities))
annual_CDD=np.zeros((int(len(CDD)/365),num_cities))
for i in range(0,int(len(HDD)/365)):
for j in range(0,num_cities):
annual_HDD[i,j]=np.sum(HDD[0+(i*365):365+(i*365),j])
annual_CDD[i,j]=np.sum(CDD[0+(i*365):365+(i*365),j])
###########################################################################################
#This section is used for calculating total hydro
# Load relevant streamflow data (1953-2007)
BPA_streamflow=pd.read_excel('Synthetic_streamflows/BPA_hist_streamflow.xlsx',sheet_name='Inflows',header=0)
Hoover_streamflow= | pd.read_csv('Synthetic_streamflows/Hoover_hist_streamflow.csv',header=0) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
docstring goes here.
:copyright: Copyright 2014 by the Elephant team, see AUTHORS.txt.
:license: Modified BSD, see LICENSE.txt for details.
"""
from __future__ import division, print_function
import unittest
from itertools import chain
from neo.test.generate_datasets import fake_neo
import numpy as np
from numpy.testing.utils import assert_array_equal
import quantities as pq
try:
import pandas as pd
from pandas.util.testing import assert_frame_equal, assert_index_equal
except ImportError:
HAVE_PANDAS = False
else:
import elephant.pandas_bridge as ep
HAVE_PANDAS = True
@unittest.skipUnless(HAVE_PANDAS, 'requires pandas')
class MultiindexFromDictTestCase(unittest.TestCase):
def test__multiindex_from_dict(self):
inds = {'test1': 6.5,
'test2': 5,
'test3': 'test'}
targ = pd.MultiIndex(levels=[[6.5], [5], ['test']],
labels=[[0], [0], [0]],
names=['test1', 'test2', 'test3'])
res0 = ep._multiindex_from_dict(inds)
self.assertEqual(targ.levels, res0.levels)
self.assertEqual(targ.names, res0.names)
self.assertEqual(targ.labels, res0.labels)
def _convert_levels(levels):
"""Convert a list of levels to the format pandas returns for a MultiIndex.
Parameters
----------
levels : list
The list of levels to convert.
Returns
-------
list
The the level in `list` converted to values like what pandas will give.
"""
levels = list(levels)
for i, level in enumerate(levels):
if hasattr(level, 'lower'):
try:
level = unicode(level)
except NameError:
pass
elif hasattr(level, 'date'):
levels[i] = | pd.DatetimeIndex(data=[level]) | pandas.DatetimeIndex |
# flake8: noqa
import nose
import pandas
from pandas.compat import u
from pandas.util.testing import network
from pandas.util.testing import assert_frame_equal
from numpy.testing.decorators import slow
import pandas.util.testing as tm
# deprecated
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
from pandas.io.wb import search, download, get_countries
class TestWB(tm.TestCase):
@slow
@network
def test_wdi_search(self):
# Test that a name column exists, and that some results were returned
# ...without being too strict about what the actual contents of the
# results actually are. The fact that there are some, is good enough.
result = search('gdp.*capita.*constant')
self.assertTrue(result.name.str.contains('GDP').any())
@slow
@network
def test_wdi_download(self):
# Test a bad indicator with double (US), triple (USA),
# standard (CA, MX), non standard (KSV),
# duplicated (US, US, USA), and unknown (BLA) country codes
# ...but NOT a crash inducing country code (World bank strips pandas
# users of the luxury of laziness, because they create their
# own exceptions, and don't clean up legacy country codes.
# ...but NOT a retired indicator (User should want it to error.)
cntry_codes = ['CA', 'MX', 'USA', 'US', 'US', 'KSV', 'BLA']
inds = ['NY.GDP.PCAP.CD','BAD.INDICATOR']
expected = {'NY.GDP.PCAP.CD': {('Canada', '2003'): 28026.006013044702, ('Mexico', '2003'): 6601.0420648056606, ('Canada', '2004'): 31829.522562759001, ('Kosovo', '2003'): 1969.56271307405, ('Mexico', '2004'): 7042.0247834044303, ('United States', '2004'): 41928.886136479705, ('United States', '2003'): 39682.472247320402, ('Kosovo', '2004'): 2135.3328465238301}}
expected = pandas.DataFrame(expected)
#Round, to ignore revisions to data.
expected = pandas.np.round(expected,decimals=-3)
expected.sort(inplace=True)
result = download(country=cntry_codes, indicator=inds,
start=2003, end=2004, errors='ignore')
result.sort(inplace=True)
#Round, to ignore revisions to data.
result = pandas.np.round(result,decimals=-3)
expected.index = result.index
assert_frame_equal(result, pandas.DataFrame(expected))
@slow
@network
def test_wdi_download_w_retired_indicator(self):
cntry_codes = ['CA', 'MX', 'US']
# Despite showing up in the search feature, and being listed online,
# the api calls to GDPPCKD don't work in their own query builder, nor
# pandas module. GDPPCKD used to be a common symbol.
# This test is written to ensure that error messages to pandas users
# continue to make sense, rather than a user getting some missing
# key error, cause their JSON message format changed. If
# World bank ever finishes the deprecation of this symbol,
# this nose test should still pass.
inds = ['GDPPCKD']
try:
result = download(country=cntry_codes, indicator=inds,
start=2003, end=2004, errors='ignore')
# If for some reason result actually ever has data, it's cause WB
# fixed the issue with this ticker. Find another bad one.
except ValueError as e:
raise nose.SkipTest("No indicators returned data: {0}".format(e))
# if it ever gets here, it means WB unretired the indicator.
# even if they dropped it completely, it would still get caught above
# or the WB API changed somehow in a really unexpected way.
if len(result) > 0:
raise nose.SkipTest("Invalid results")
@slow
@network
def test_wdi_download_w_crash_inducing_countrycode(self):
cntry_codes = ['CA', 'MX', 'US', 'XXX']
inds = ['NY.GDP.PCAP.CD']
try:
result = download(country=cntry_codes, indicator=inds,
start=2003, end=2004, errors='ignore')
except ValueError as e:
raise nose.SkipTest("No indicators returned data: {0}".format(e))
# if it ever gets here, it means the country code XXX got used by WB
# or the WB API changed somehow in a really unexpected way.
if len(result) > 0:
raise nose.SkipTest("Invalid results")
@slow
@network
def test_wdi_get_countries(self):
result = | get_countries() | pandas.io.wb.get_countries |
#
# Copyright (C) 2021 The Delta Lake Project Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from datetime import date
import pandas as pd
import pytest
from delta_sharing.delta_sharing import SharingClient, load_as_pandas, load_as_spark, _parse_url
from delta_sharing.protocol import Schema, Share, Table
from delta_sharing.tests.conftest import ENABLE_INTEGRATION, SKIP_MESSAGE
@pytest.mark.skipif(not ENABLE_INTEGRATION, reason=SKIP_MESSAGE)
def test_list_shares(sharing_client: SharingClient):
shares = sharing_client.list_shares()
assert shares == [
Share(name="share1"),
Share(name="share2"),
Share(name="share3"),
Share(name="share4"),
Share(name="share5"),
Share(name="share6"),
Share(name="share7"),
Share(name="share_azure"),
]
@pytest.mark.skipif(not ENABLE_INTEGRATION, reason=SKIP_MESSAGE)
def test_list_schemas(sharing_client: SharingClient):
schemas = sharing_client.list_schemas(Share(name="share1"))
assert schemas == [Schema(name="default", share="share1")]
schemas = sharing_client.list_schemas(Share(name="share2"))
assert schemas == [Schema(name="default", share="share2")]
@pytest.mark.skipif(not ENABLE_INTEGRATION, reason=SKIP_MESSAGE)
def test_list_tables(sharing_client: SharingClient):
tables = sharing_client.list_tables(Schema(name="default", share="share1"))
assert tables == [
Table(name="table1", share="share1", schema="default"),
Table(name="table3", share="share1", schema="default"),
Table(name="table7", share="share1", schema="default"),
]
tables = sharing_client.list_tables(Schema(name="default", share="share2"))
assert tables == [Table(name="table2", share="share2", schema="default")]
@pytest.mark.skipif(not ENABLE_INTEGRATION, reason=SKIP_MESSAGE)
def test_list_all_tables(sharing_client: SharingClient):
tables = sharing_client.list_all_tables()
assert tables == [
Table(name="table1", share="share1", schema="default"),
Table(name="table3", share="share1", schema="default"),
Table(name="table7", share="share1", schema="default"),
Table(name="table2", share="share2", schema="default"),
Table(name="table4", share="share3", schema="default"),
Table(name="table5", share="share3", schema="default"),
Table(name="test_gzip", share="share4", schema="default"),
Table(name="table8", share="share7", schema="schema1"),
Table(name="table9", share="share7", schema="schema2"),
Table(name="table_wasb", share="share_azure", schema="default"),
Table(name="table_abfs", share="share_azure", schema="default"),
]
@pytest.mark.skipif(not ENABLE_INTEGRATION, reason=SKIP_MESSAGE)
@pytest.mark.parametrize(
"fragments,expected",
[
pytest.param(
"share1.default.table1",
pd.DataFrame(
{
"eventTime": [
pd.Timestamp("2021-04-28 06:32:22.421"),
pd.Timestamp("2021-04-28 06:32:02.070"),
],
"date": [date(2021, 4, 28), date(2021, 4, 28)],
}
),
id="non partitioned",
),
pytest.param(
"share2.default.table2",
pd.DataFrame(
{
"eventTime": [
pd.Timestamp("2021-04-28 23:33:57.955"),
pd.Timestamp("2021-04-28 23:33:48.719"),
],
"date": [date(2021, 4, 28), date(2021, 4, 28)],
}
),
id="partitioned",
),
pytest.param(
"share1.default.table3",
pd.DataFrame(
{
"eventTime": [
pd.Timestamp("2021-04-28 23:36:51.945"),
pd.Timestamp("2021-04-28 23:36:47.599"),
pd.Timestamp("2021-04-28 23:35:53.156"),
],
"date": [date(2021, 4, 28), date(2021, 4, 28), date(2021, 4, 28)],
"type": ["bar", "foo", None],
}
),
id="partitioned and different schemas",
),
pytest.param(
"share3.default.table4",
pd.DataFrame(
{
"type": [None, None],
"eventTime": [
pd.Timestamp("2021-04-28 23:33:57.955"),
pd.Timestamp("2021-04-28 23:33:48.719"),
],
"date": [date(2021, 4, 28), date(2021, 4, 28)],
}
),
id="table column order is not the same as parquet files",
),
pytest.param(
"share4.default.test_gzip",
pd.DataFrame({"a": [True], "b": pd.Series([1], dtype="int32"), "c": ["Hi"]}),
id="table column order is not the same as parquet files",
),
pytest.param(
"share_azure.default.table_wasb",
pd.DataFrame({"c1": ["foo bar"], "c2": ["foo bar"],}),
id="Azure Blob Storage",
),
pytest.param(
"share_azure.default.table_abfs",
pd.DataFrame({"c1": ["foo bar"], "c2": ["foo bar"],}),
id="Azure Data Lake Storage Gen2",
),
],
)
def test_load(profile_path: str, fragments: str, expected: pd.DataFrame):
pdf = load_as_pandas(f"{profile_path}#{fragments}")
| pd.testing.assert_frame_equal(pdf, expected) | pandas.testing.assert_frame_equal |
# -*- coding: utf-8 -*-
import json
import os
from typing import Optional, Union, Iterator, List
from functools import partial
import pystow
import pandas as pd
from tqdm.auto import tqdm
from prodec import Descriptor, Transform
from .utils.IO import locate_file, process_data_version, TypeDecoder
def read_papyrus(is3d: bool = False, version: str = 'latest', chunksize: Optional[int] = None, source_path: Optional[str] = None) -> Union[
Iterator[pd.DataFrame], pd.DataFrame]:
"""Read the Papyrus dataset.
:param is3d: whether to consider stereochemistry or not (default: False)
:param version: version of the dataset to be read
:param chunksize: number of lines per chunk. To read without chunks, set to None
:param source_path: folder containing the bioactivity dataset (default: pystow's home folder)
:return: the Papyrus activity dataset
"""
# Determine default paths
if source_path is not None:
os.environ['PYSTOW_HOME'] = os.path.abspath(source_path)
version = process_data_version(version=version, root_folder=source_path)
source_path = pystow.module('papyrus', version)
# Load data types
dtype_file = source_path.join(name='data_types.json').as_posix()
with open(dtype_file, 'r') as jsonfile:
dtypes = json.load(jsonfile, cls=TypeDecoder)['papyrus']
# Find the file
filenames = locate_file(source_path.base.as_posix(),
f'*.*_combined_set_with{"out" if not is3d else ""}_stereochemistry.tsv*')
return pd.read_csv(filenames[0], sep='\t', chunksize=chunksize, dtype=dtypes, low_memory=True)
def read_protein_set(source_path: Optional[str] = None, version: str = 'latest') -> pd.DataFrame:
"""Read the protein targets of the Papyrus dataset.
:param source_path: folder containing the molecular descriptor datasets
:param version: version of the dataset to be read
:return: the set of protein targets in the Papyrus dataset
"""
version = process_data_version(version=version, root_folder=source_path)
# Determine default paths
if source_path is not None:
os.environ['PYSTOW_HOME'] = os.path.abspath(source_path)
source_path = pystow.module('papyrus', version)
# Find the file
filenames = locate_file(source_path.base.as_posix(), f'*.*_combined_set_protein_targets.tsv*')
return pd.read_csv(filenames[0], sep='\t', keep_default_na=False)
def read_molecular_descriptors(desc_type: str = 'mold2', is3d: bool = False,
version: str = 'latest', chunksize: Optional[int] = None,
source_path: Optional[str] = None):
"""Get molecular descriptors
:param desc_type: type of descriptor {'mold2', 'mordred', 'cddd', 'fingerprint', 'all'}
:param is3d: whether to load descriptors of the dataset containing stereochemistry
:param version: version of the dataset to be read
:param chunksize: number of lines per chunk. To read without chunks, set to None
:param source_path: folder containing the molecular descriptor datasets
:return: the dataframe of molecular descriptors
"""
if desc_type not in ['mold2', 'mordred', 'cddd', 'fingerprint', 'moe', 'all']:
raise ValueError("descriptor type not supported")
# Determine default paths
if source_path is not None:
os.environ['PYSTOW_HOME'] = os.path.abspath(source_path)
version = process_data_version(version=version, root_folder=source_path)
source_path = pystow.module('papyrus', version)
# Load data types
dtype_file = source_path.join(name='data_types.json').as_posix()
with open(dtype_file, 'r') as jsonfile:
dtypes = json.load(jsonfile, cls=TypeDecoder)
# Find the files
if desc_type in ['mold2', 'all']:
mold2_files = locate_file(source_path.join('descriptors').as_posix(),
f'*.*_combined_{3 if is3d else 2}D_moldescs_mold2.tsv*')
elif desc_type in ['mordred', 'all']:
mordd_files = locate_file(source_path.join('descriptors').as_posix(),
f'*.*_combined_{3 if is3d else 2}D_moldescs_mordred{3 if is3d else 2}D.tsv*')
elif desc_type in ['cddd', 'all']:
cddds_files = locate_file(source_path.join('descriptors').as_posix(),
f'*.*_combined_{3 if is3d else 2}D_moldescs_CDDDs.tsv*')
elif desc_type in ['fingerprint', 'all']:
molfp_files = locate_file(source_path.join('descriptors').as_posix(),
f'*.*_combined_{3 if is3d else 2}D_moldescs_{"E3FP" if is3d else "ECFP6"}.tsv*')
elif desc_type in ['moe', 'all']:
moe_files = locate_file(source_path.join('descriptors').as_posix(),
f'*.*_combined_{3 if is3d else 2}D_moldescs_MOE.tsv*')
if desc_type == 'mold2':
return pd.read_csv(mold2_files[0], sep='\t', dtype=dtypes['mold2'], low_memory=True, chunksize=chunksize)
elif desc_type == 'mordred':
return pd.read_csv(mordd_files[0], sep='\t', dtype=dtypes[f'mordred_{3 if is3d else 2}D'], low_memory=True,
chunksize=chunksize)
elif desc_type == 'cddd':
return pd.read_csv(cddds_files[0], sep='\t', dtype=dtypes['CDDD'], low_memory=True, chunksize=chunksize)
elif desc_type == 'fingerprint':
return pd.read_csv(molfp_files[0], sep='\t', dtype=dtypes[f'{"E3FP" if is3d else "ECFP6"}'], low_memory=True,
chunksize=chunksize)
elif desc_type == 'moe':
return pd.read_csv(moe_files[0], sep='\t', low_memory=True, chunksize=chunksize)
elif desc_type == 'all':
mold2 = pd.read_csv(mold2_files[0], sep='\t', dtype=dtypes['mold2'], low_memory=True, chunksize=chunksize)
mordd = pd.read_csv(mordd_files[0], sep='\t', dtype=dtypes[f'mordred_{3 if is3d else 2}D'], low_memory=True,
chunksize=chunksize)
cddds = | pd.read_csv(cddds_files[0], sep='\t', dtype=dtypes['CDDD'], low_memory=True, chunksize=chunksize) | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 2 15:03:54 2018
@author: yusu
"""
from keras.layers import Input, Dense, Flatten, Dropout, Reshape, Concatenate
from keras.layers import BatchNormalization, Activation, Conv2D, Conv2DTranspose,UpSampling2D
from keras.layers.advanced_activations import LeakyReLU
from keras.models import Model
from keras.optimizers import Adam
from keras.datasets import cifar10
import keras.backend as K
from keras.preprocessing import image
import matplotlib.pyplot as plt
import sys
import numpy as np
import os
import pandas as pd
import matplotlib
from matplotlib import pyplot as plt
os.makedirs('images', exist_ok=True)
matplotlib.interactive(True)
channels = 1
img_size = 28
img_w = img_h = img_size
img_shape = (img_size, img_size, channels)
n_epochs = 200
classes = ['saxophone',
'raccoon',
'piano',
'panda',
'leg',
'headphones',
'ceiling_fan',
'bed',
'basket',
'aircraft_carrier']
# Generator
def get_generator(input_layer, condition_layer):
depth = 64
p = 0.4
merged_input = Concatenate()([input_layer, condition_layer])
dense1 = Dense(7*7*64)(merged_input)
dense1 = BatchNormalization(axis=-1,momentum=0.9)(dense1)
dense1 = Activation(activation='relu')(dense1)
dense1 = Reshape((7,7,64))(dense1)
dense1 = Dropout(p)(dense1)
# Convolutional layers
conv1 = UpSampling2D()(dense1)
conv1 = Conv2DTranspose(int(depth/2), kernel_size=5, padding='same', activation=None,)(conv1)
conv1 = BatchNormalization(axis=-1,momentum=0.9)(conv1)
conv1 = Activation(activation='relu')(conv1)
conv2 = UpSampling2D()(conv1)
conv2 = Conv2DTranspose(int(depth/4), kernel_size=5, padding='same', activation=None,)(conv2)
conv2 = BatchNormalization(axis=-1,momentum=0.9)(conv2)
conv2 = Activation(activation='relu')(conv2)
#conv3 = UpSampling2D()(conv2)
conv3 = Conv2DTranspose(int(depth/8), kernel_size=5, padding='same', activation=None,)(conv2)
conv3 = BatchNormalization(axis=-1,momentum=0.9)(conv3)
conv3 = Activation(activation='relu')(conv3)
# Define output layers
output = Conv2D(1, kernel_size=5, strides=1, padding="same")(conv3)
out = Activation("sigmoid")(output)
model = Model(inputs=[input_layer, condition_layer], outputs=out)
model.summary()
return model,out
# discriminator
def get_discriminator(input_layer, condition_layer,depth = 64,p = 0.4):
#merged_input = Concatenate()([input_layer, condition_layer])
conv1 = Conv2D(depth*1, 5, strides=2, padding='same', activation='relu')(input_layer)
conv1 = Dropout(p)(conv1)
conv2 = Conv2D(depth*2, 5, strides=2, padding='same', activation='relu')(conv1)
conv2 = Dropout(p)(conv2)
conv3 = Conv2D(depth*4, 5, strides=2, padding='same', activation='relu')(conv2)
conv3 = Dropout(p)(conv3)
conv4 = Conv2D(depth*8, 5, strides=1, padding='same', activation='relu')(conv3)
conv4 = Flatten()(Dropout(p)(conv4))
merged_layer = Concatenate()([conv4, condition_layer])
output = Dense(512, activation='relu')(merged_layer)
#hid = Dropout(0.4)(hid)
out = Dense(1, activation='sigmoid')(output)
model = Model(inputs=[input_layer, condition_layer], outputs=out)
model.summary()
return model,out
def one_hot_encode(y):
z = np.zeros((len(y), 10))
idx = np.arange(len(y))
#print(type(idx[0]))
#for i in range(len(y)):
# z[i,y[i]] = 1
z[idx,y] = 1
return z
def generate_noise(n_samples, noise_dim):
X = np.random.normal(0, 1, size=(n_samples, noise_dim))
return X
def generate_random_labels(n):
y = np.random.choice(10, n)
y = one_hot_encode(y)
#print(y.shape)
return y
img_input = Input(shape=(28,28,1))
disc_condition_input = Input(shape=(10,))
discriminator, disc_out = get_discriminator(img_input, disc_condition_input)
discriminator.compile(optimizer=Adam(0.0002, 0.5), loss='binary_crossentropy', metrics=['accuracy'])
#discriminator.trainable = False
noise_input = Input(shape=(100,))
gen_condition_input = Input(shape=(10,))
generator, gen_out = get_generator(noise_input, gen_condition_input)
gan_input = Input(shape=(100,))
x = generator([gan_input, gen_condition_input])
gan_out = discriminator([x, disc_condition_input])
AM = Model(inputs=[gan_input, gen_condition_input, disc_condition_input], output=gan_out)
AM.summary()
AM.compile(optimizer=Adam(0.0002, 0.5), loss='binary_crossentropy')
def get_all_classes():
df = pd.DataFrame([], columns=['Image', 'Label'])
for i, label in enumerate(classes):
data = np.load('./data/%s.npy' % label) / 255
data = np.reshape(data, [data.shape[0], img_size, img_size, 1])
df2 = | pd.DataFrame([(row, i) for row in data], columns=['Image', 'Label']) | pandas.DataFrame |
import pandas as pd
import mysql.connector
import os.path
import shutil
import datetime
path = desktop = os.path.normpath(os.path.expanduser("~/Desktop"))
try:
os.mkdir(path)
except OSError:
print ("Already exist" )
else:
print ("Successfully created the directory %s " % path)
now=datetime.datetime.now()
month=now.month
savepath= 'C:/user'
db = mysql.connector.connect(user='root', password='<PASSWORD>', host='localhost', port=3306 , database= 'face_recognization' )
mycursor = db.cursor()
mycursor.execute("SELECT COLUMN_NAME FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = N'studentrecord' ")
data=mycursor.fetchall()
df=pd.read_json('data.json')
df1= | pd.DataFrame(data) | pandas.DataFrame |
###############
#
# Transform R to Python Copyright (c) 2016 <NAME> Released under the MIT license
#
###############
import os
import numpy as np
import pystan
import pandas
import pickle
import seaborn as sns
import matplotlib.pyplot as plt
import arviz as az
file_beer_sales_4 = pandas.read_csv('3-7-1-beer-sales-4.csv')
print(file_beer_sales_4.head())
sns.scatterplot(
x='temperature',
y='sales',
hue='weather',
data=file_beer_sales_4
)
plt.show()
file_beer_sales_4_d = pandas.get_dummies(file_beer_sales_4)
print(file_beer_sales_4_d.head())
sample_num = len(file_beer_sales_4_d['sales'])
sales = file_beer_sales_4_d['sales']
weather_rainy = file_beer_sales_4_d['weather_rainy']
weather_sunny = file_beer_sales_4_d['weather_sunny']
temperature = file_beer_sales_4_d['temperature']
# for pred
# cloudy, rainy, sunny
weather_rainy_pred = [0, 1, 0]
weather_sunny_pred = [0, 0, 1]
temperature_pred = range(11,31)
stan_data = {
'N': sample_num,
'sales': sales,
'weather_rainy': weather_rainy,
'weather_sunny': weather_sunny,
'temperature': temperature,
'N_pred_w': 3,
'weather_rainy_pred': weather_rainy_pred,
'weather_sunny_pred': weather_sunny_pred,
'N_pred_t': len(temperature_pred),
'temperature_pred': temperature_pred
}
if os.path.exists('3-7-1-cat-lm.pkl'):
sm = pickle.load(open('3-7-1-cat-lm.pkl', 'rb'))
else:
# a model using prior for mu and sigma.
sm = pystan.StanModel(file='3-7-1-cat-lm.stan')
mcmc_result = sm.sampling(
data=stan_data,
seed=1,
chains=4,
iter=2000,
warmup=1000,
thin=1
)
print(mcmc_result)
mcmc_sample = mcmc_result.extract()
sales_pred = mcmc_sample['sales_pred']
print(type(sales_pred))
print(sales_pred.shape)
print(sales_pred.T.shape)
label_one = ['cloudy', 'rainy', 'sunny']
label_two = np.arange(11,31)
cols = | pandas.MultiIndex.from_product([label_one, label_two]) | pandas.MultiIndex.from_product |
import sys
import joblib
import os.path as op
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import RepeatedStratifiedKFold, GridSearchCV
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import make_pipeline
from joblib import Parallel, delayed
sys.path.append(op.abspath(op.dirname(op.dirname(__file__))))
from data_io import DataLoader
from model import cross_val_predict_and_score
from metrics import roc_auc_score_per_class
def classify_subjects_parallel(sub, subs, feature_spaces, model, cv):
""" Helper function to parallelize analysis across subjects. """
scores, coefs = [], dict()
for i, fs in enumerate(feature_spaces):
if not isinstance(fs, (tuple, list)):
fs = (fs,)
fs_name = '+'.join(fs)
dl = DataLoader(sub=sub, log_level=30)
dl.load_y(strategy_doubles='hard')
dl.load_X(feature_set=fs, n_comp=100)
X_val, y_val = dl.return_Xy()
other_X, other_y = [], []
other_subs = [s for s in subs if s != sub]
for other_sub in other_subs:
dl = DataLoader(sub=other_sub, log_level=30)
dl.load_y(strategy_doubles='hard')
dl.load_X(feature_set=fs, n_comp=100)
this_X, this_y = dl.return_Xy()
other_X.append(this_X)
other_y.append(this_y)
X = pd.concat(other_X, axis=0)
y = pd.concat(other_y, axis=0)
scores_, coefs_, model_ = cross_val_predict_and_score(
estimator=model,
X=X, y=y,
cv=cv,
scoring=roc_auc_score_per_class,
X_val=X_val,
y_val=y_val,
per_class=True,
return_model=True
)
joblib.dump(model_, f'models/sub-{sub}_type-between_fs-{fs_name}_model.jl')
dl.log.warning(f"sub-{sub} scores: {np.round(scores_, 2)} (fs = {fs_name})")
scores_df = pd.DataFrame(scores_, columns=['score'])
scores_df['feature_set'] = fs_name
scores_df['emotion'] = dl.le.classes_
scores_df['sub'] = sub
scores.append(scores_df)
coefs_df = pd.DataFrame(data=coefs_, columns=X.columns)
coefs_df['feature_set'] = fs_name
coefs_df['emotion'] = dl.le.classes_
coefs_df['sub'] = sub
coefs[fs_name] = coefs_df
scores_df = | pd.concat(scores, axis=0) | pandas.concat |
from pathlib import Path
import json
import logging
import requests
import yaml
from functools import lru_cache
import numpy as np
import pandas as pd
from collections.abc import Mapping
from pyam.core import IamDataFrame
from pyam.utils import META_IDX, IAMC_IDX, isstr, pattern_match
from pyam.logging import deprecation_warning
logger = logging.getLogger(__name__)
# set requests-logger to WARNING only
logging.getLogger('requests').setLevel(logging.WARNING)
_AUTH_URL = 'https://db1.ene.iiasa.ac.at/EneAuth/config/v1'
_CITE_MSG = """
You are connected to the {} scenario explorer hosted by IIASA.
If you use this data in any published format, please cite the
data as provided in the explorer guidelines: {}
""".replace('\n', '')
# path to local configuration settings
DEFAULT_IIASA_CREDS = Path('~').expanduser() / '.local' / 'pyam' / 'iiasa.yaml'
def set_config(user, password, file=None):
"""Save username and password for IIASA API connection to file"""
file = Path(file) if file is not None else DEFAULT_IIASA_CREDS
if not file.parent.exists():
file.parent.mkdir(parents=True)
with open(file, mode='w') as f:
logger.info(f'Setting IIASA-connection configuration file: {file}')
yaml.dump(dict(username=user, password=password), f)
def _get_config(file=None):
"""Read username and password for IIASA API connection from file"""
file = Path(file) if file is not None else DEFAULT_IIASA_CREDS
if file.exists():
with open(file, 'r') as stream:
return yaml.safe_load(stream)
def _check_response(r, msg='Trouble with request', error=RuntimeError):
if not r.ok:
raise error('{}: {}'.format(msg, str(r.text)))
def _get_token(creds, base_url):
"""Parse credentials and get token from IIASA authentication service"""
plaintextcreds = True
# try reading default config or parse file
if creds is None:
creds = _get_config()
plaintextcreds = False
elif isinstance(creds, Path) or isstr(creds):
_creds = _get_config(creds)
if _creds is None:
logger.error(f'Could not read credentials from `{creds}`')
creds = _creds
plaintextcreds = False
# if (still) no creds, get anonymous auth and return
if creds is None:
url = '/'.join([base_url, 'anonym'])
r = requests.get(url)
_check_response(r, 'Could not get anonymous token')
return r.json(), None
# parse creds, write warning
if isinstance(creds, Mapping):
user, pw = creds['username'], creds['password']
else:
user, pw = creds
if plaintextcreds:
logger.warning('You provided credentials in plain text. DO NOT save '
'these in a repository or otherwise post them online')
deprecation_warning('Please use `pyam.iiasa.set_config(<user>, <pwd>)`'
' to store your credentials in a file!',
'Providing credentials in plain text')
# get user token
headers = {'Accept': 'application/json',
'Content-Type': 'application/json'}
data = {'username': user, 'password': pw}
url = '/'.join([base_url, 'login'])
r = requests.post(url, headers=headers, data=json.dumps(data))
_check_response(r, 'Login failed for user: {}'.format(user))
return r.json(), user
class Connection(object):
"""A class to facilitate querying an IIASA Scenario Explorer database API
Parameters
----------
name : str, optional
The name of a database API.
See :attr:`pyam.iiasa.Connection.valid_connections` for a list
of available APIs.
creds : str, :class:`pathlib.Path`, list-like, or dict, optional
By default, this function will (try to) read user credentials which
were set using :meth:`pyam.iiasa.set_config(<user>, <password>)`.
Alternatively, you can provide a path to a yaml file
with entries of 'username' and 'password'.
base_url : str, custom authentication server URL
Notes
-----
Providing credentials as an ordered container (tuple, list, etc.)
or as a dictionary with keys `user` and `password` is (still) supported
for backwards compatibility. However, this option is NOT RECOMMENDED
and will be deprecated in future releases of pyam.
"""
def __init__(self, name=None, creds=None, auth_url=_AUTH_URL):
self._auth_url = auth_url
self._token, self._user = _get_token(creds, base_url=self._auth_url)
# connect if provided a name
self._connected = None
if name:
self.connect(name)
if self._user:
logger.info(f'You are connected as user `{self._user}`')
else:
logger.info('You are connected as an anonymous user')
@property
@lru_cache()
def _connection_map(self):
url = '/'.join([self._auth_url, 'applications'])
headers = {'Authorization': 'Bearer {}'.format(self._token)}
r = requests.get(url, headers=headers)
_check_response(r, 'Could not get valid connection list')
aliases = set()
conn_map = {}
for x in r.json():
if 'config' in x:
env = next((r['value'] for r in x['config']
if r['path'] == 'env'), None)
name = x['name']
if env is not None:
if env in aliases:
logger.warning('Duplicate instance alias {}'
.format(env))
conn_map[name] = name
first_duplicate = conn_map.pop(env)
conn_map[first_duplicate] = first_duplicate
else:
conn_map[env] = name
aliases.add(env)
else:
conn_map[name] = name
return conn_map
@property
@lru_cache()
def valid_connections(self):
"""Return available resources (database API connections)"""
return list(self._connection_map.keys())
def connect(self, name):
"""Connect to a specific resource (database API)"""
if name in self._connection_map:
name = self._connection_map[name]
valid = self._connection_map.values()
if len(valid) == 0:
raise RuntimeError(
'No valid connections found for the provided credentials.'
)
if name not in valid:
msg = """
{} not recognized as a valid connection name.
Choose from one of the supported connections for your user: {}.
"""
raise ValueError(msg.format(name, self._connection_map.keys()))
url = '/'.join([self._auth_url, 'applications', name, 'config'])
headers = {'Authorization': 'Bearer {}'.format(self._token)}
r = requests.get(url, headers=headers)
_check_response(r, 'Could not get application information')
response = r.json()
idxs = {x['path']: i for i, x in enumerate(response)}
self._auth_url = response[idxs['baseUrl']]['value']
# TODO: proper citation (as metadata) instead of link to the about page
if 'uiUrl' in idxs:
about = '/'.join([response[idxs['uiUrl']]['value'], '#', 'about'])
logger.info(_CITE_MSG.format(name, about))
# TODO: use API "nice-name"
self._connected = name
@property
def current_connection(self):
"""Currently connected resource (database API connection)"""
return self._connected
def index(self, default=True):
"""Return the index of models and scenarios in the connected resource
Parameters
----------
default : bool, optional
If `True`, return *only* the default version of a model/scenario.
Any model/scenario without a default version is omitted.
If `False`, returns all versions.
"""
cols = ['version'] if default else ['version', 'is_default']
return self._query_index(default)[META_IDX + cols].set_index(META_IDX)
def scenario_list(self, default=True):
"""Deprecated, use :meth:`Connection.index`"""
deprecation_warning('Use `Connection.index()` instead.')
return self._query_index(default)
@lru_cache()
def _query_index(self, default=True):
# TODO merge this function with `meta()`
default = 'true' if default else 'false'
add_url = 'runs?getOnlyDefaultRuns={}'
url = '/'.join([self._auth_url, add_url.format(default)])
headers = {'Authorization': 'Bearer {}'.format(self._token)}
r = requests.get(url, headers=headers)
_check_response(r, 'Could not retrieve the resource index')
return | pd.read_json(r.content, orient='records') | pandas.read_json |
import pandas as pd
import numpy as np
import pytest
from conftest import DATA_DIR, assert_series_equal
from numpy.testing import assert_allclose
from pvlib import temperature
@pytest.fixture
def sapm_default():
return temperature.TEMPERATURE_MODEL_PARAMETERS['sapm'][
'open_rack_glass_glass']
def test_sapm_cell(sapm_default):
default = temperature.sapm_cell(900, 20, 5, sapm_default['a'],
sapm_default['b'], sapm_default['deltaT'])
assert_allclose(default, 43.509, 3)
def test_sapm_module(sapm_default):
default = temperature.sapm_module(900, 20, 5, sapm_default['a'],
sapm_default['b'])
assert_allclose(default, 40.809, 3)
def test_sapm_cell_from_module(sapm_default):
default = temperature.sapm_cell_from_module(50, 900,
sapm_default['deltaT'])
assert_allclose(default, 50 + 900 / 1000 * sapm_default['deltaT'])
def test_sapm_ndarray(sapm_default):
temps = np.array([0, 10, 5])
irrads = np.array([0, 500, 0])
winds = np.array([10, 5, 0])
cell_temps = temperature.sapm_cell(irrads, temps, winds, sapm_default['a'],
sapm_default['b'],
sapm_default['deltaT'])
module_temps = temperature.sapm_module(irrads, temps, winds,
sapm_default['a'],
sapm_default['b'])
expected_cell = np.array([0., 23.06066166, 5.])
expected_module = np.array([0., 21.56066166, 5.])
assert_allclose(expected_cell, cell_temps, 3)
assert_allclose(expected_module, module_temps, 3)
def test_sapm_series(sapm_default):
times = | pd.date_range(start='2015-01-01', end='2015-01-02', freq='12H') | pandas.date_range |
# 新建:7月22日
# 辅助性函数库
# 7月22日: 增加保存模型的函数
import os
import numpy as np
import torch
import time
import pandas as pd
class Recorder(object):
def __init__(self, method_version, path=r'../Saved_resultes/'):
self.date = time.strftime('%Y%m%d', time.localtime())
self.save_dir = path + self.date + '_' + method_version
self.check_dir()
def check_dir(self):
if not os.path.exists(self.save_dir):
os.makedirs(self.save_dir)
def recode_state(self, ite_num, net_parameter, optimizer_parameter, loss, loss_all, loss_scheduler_parameter):
state = {'net': net_parameter,
'optimizer': optimizer_parameter,
'loss': loss,
'loss_all': loss_all,
'scheduler': loss_scheduler_parameter
}
save_file = r'i_' + str(ite_num) + '_full_net_state.pkl'
torch.save(state, os.path.join(self.save_dir, save_file))
def general_record(self, ite_num, name, info):
save_file = r'i_' + str(ite_num) + str(name) + '.pkl'
torch.save(info, os.path.join(self.save_dir, save_file))
def save_test(self):
x = | pd.DataFrame() | pandas.DataFrame |
import warnings
import numpy as np
import matplotlib.pyplot as plt
from pandas import Series, DataFrame
import pandas as pd
class MeteoDrought:
"""Calculate statistical measures of meteorological drought from
time series
Parameters
----------
prc : pd.Series
time series with precipitation data
evp : pd.Series
time series with evaporation data
Examples
--------
dro = aq.MeteoDrought(prc,evp)
drday = dro.daydrought()
drsum = dro.summersum()
drcum = dro.summercum()
"""
YEARSTART = 'YEAR-01-01'
YEAREND = 'YEAR-12-31'
SUMMERSTART = 'YEAR-04-01'
SUMMEREND = 'YEAR-09-30'
SUMMERDAYS = 183
def __init__(self,prc=None,evp=None,stn=None):
self._prc = prc
self._evp = evp
self._rch = self.recharge()
self._rchsmr = self.summer_recharge()
if stn is None:
stn = 'unknown'
self._stn = stn
def __repr__(self):
return (f'{self.__class__.__name__} (n={len(self._rchsmr)})')
def recharge(self):
"""Return time series of recharge for all available days
Calculated as difference between precipitation and evaporation.
Leading and trailing NaN values are removed."""
self._rch = self._prc - self._evp
# remove leading and trailing NaNs
first = self._rch.first_valid_index()
last = self._rch.sort_index(ascending=False).first_valid_index()
self._rch = self._rch[first:last]
# remove first year if first date is after april 1st
if not self._rch.index[0].month<4:
firstyear = self._rch.index[0].year+1
firstdate = pd.to_datetime(
self.YEARSTART.replace('YEAR',str(firstyear)))
self._rch = self._rch[firstdate:]
# remove last year is last date is before september 30th
if not self._rch.index[-1].month>9:
lastyear = self._rch.index[-1].year-1
lastdate = pd.to_datetime(
self.YEAREND.replace('YEAR',str(lastyear)))
self._rch = self._rch[:lastdate]
return self._rch
def summer_recharge(self):
"""Return table with array of daily recharges for each summer"""
# empty table
years = list(set(self._rch.index.year))
days = np.arange(0,183)
self._rchsmr = Series(index=years,dtype=object)
self._rchsmr.index.name = 'year'
# daily rechsrge for all years
for year,rch in self._rch.groupby(by=self._rch.index.year):
firstdate = self.SUMMERSTART.replace('YEAR',str(year))
lastdate = self.SUMMEREND.replace('YEAR',str(year))
rchsmr = self._rch[firstdate:lastdate].values
self._rchsmr[year] = rchsmr
return self._rchsmr
def _cumulative_drought(self,rchsmr):
"""Return daily values of cumulative drought for one summer
Parameters
----------
rchsmr : np.array
daily values of recharge between april 1st and september 30th
Returns
-------
numpy.array with daily cumulative drought
"""
daydr = -1*rchsmr
cumdr = np.zeros(len(daydr))
for i,val in enumerate(daydr):
if i==0:
if np.isnan(daydr[i]):
cumdr[i] = daydr[i]
elif daydr[i] > 0:
cumdr[i] = daydr[i]
else:
cumdr[i] = 0
else:
cumdr[i] = cumdr[i-1] + daydr[i]
if cumdr[i]<0:
cumdr[i]=0
return cumdr
def daydrought(self):
"""Return cumulative drought on daily basis for all years
Returns
-------
pd.DataFrame
Notes
-----
Cumulative meteorological drought is calculated between april 1st
and september 30th.
"""
years = list(set(self._rch.index.year))
days = np.arange(1,self.SUMMERDAYS+1)
self._daydrought = DataFrame(columns=years,index=days)
self._daydrought.index.name = 'daynr'
for year,rch in self._rchsmr.iteritems():
self._daydrought[year] = self._cumulative_drought(rch)
return self._daydrought
def summercum(self):
"""Return maximum cumulative drought for each year"""
return self.daydrought().max(axis=0)
def summersum(self):
"""Return sum of drought for all years"""
self._summersum = | Series(index=self._rchsmr.index,dtype=float) | pandas.Series |
"""
Module to work with CSV files of measurements of Cloud Condensation Nuclei
measurements from the CCN-100 probe
"A DMT single-column cloud condensation counter model measures the spectrum of
CCN concentration as a function of supersaturation continuously using
uninterrupted flow and a multichannel, optical particle counter that measures
the size of the activated droplet."
"""
import xarray as xr
import pandas as pd
from pathlib import Path
def _load_meta(fn, nlines=3):
meta = {}
with open(fn) as fh:
for n in range(3):
line = fh.readline()
key, value = line.split(",")
meta[key.strip().lower()] = value.strip()
return meta
def load_csv(fn, n_header_lines=3):
"""
Load CCN datafile with filename `fn` and return as xarray.Dataset
"""
meta = _load_meta(fn, nlines=n_header_lines)
df = pd.read_csv(fn, skiprows=n_header_lines)
# cleanup column names
df.columns = [s.strip().lower().replace(" ", "_") for s in df.columns]
# make times into datetimes
df["time"] = meta["date"] + "T" + df["time"] + "Z"
df["time"] = | pd.to_datetime(df["time"], format="%m/%d/%yT%H:%M:%SZ", utc=True) | pandas.to_datetime |
from src.evaluation.gnn_evaluation_module import eval_gnn
from src.models.gat_models import MonoGAT#, BiGAT, TriGAT
from src.models.rgcn_models import MonoRGCN, RGCN2
from src.models.appnp_model import MonoAPPNPModel
from src.models.multi_layered_model import MonoModel#, BiModel, TriModel
from torch_geometric.nn import GCNConv, SAGEConv, GATConv, RGCNConv, SGConv, APPNP, ClusterGCNConv
from src.data.data_loader import GraphDataset
import warnings
import pandas as pd
import os
import argparse
import numpy as np
import pickle
import torch
from src.evaluation.network_split import NetworkSplitShchur
from src.data.create_modified_configuration_model import generate_modified_conf_model
from torch_geometric.utils import from_networkx, to_networkx
from community import best_partition
import networkx as nx
def parse_args():
parser = argparse.ArgumentParser(description="Test accuracy for GCN/SAGE/GAT/RGCN/SGC/APPNP")
parser.add_argument('--size',
type=int,
default=96,
help='Channel size. Default is 12.')
parser.add_argument('--lr',
type=float,
default=0.01,
help='Learning rate. Default is 0.01.')
parser.add_argument('--wd',
type=float,
default=0.01,
help='Regularization weight. Default is 0.01.')
parser.add_argument('--dropout',
type=float,
default=0.8,
help='Dropout probability. Default is 0.6.')
parser.add_argument('--conf',
type=bool,
default=False,
help='Is configuration model evaluation. Default is False.')
parser.add_argument('--shifting',
type=bool,
default=False,
help='Is shifting evaluation. Default is False.')
parser.add_argument('--sbm',
type=bool,
default=False,
help='Is SBM evaluation. Default is False.')
parser.add_argument('--sbm_label',
type=bool,
default=False,
help='Is SBM_label evaluation. Default is False.')
parser.add_argument('--flipped',
type=bool,
default=False,
help='Evaluating with flipped edges? Default is False.')
parser.add_argument('--removed_hubs',
type=bool,
default=False,
help='Evaluating with removed hubs? Default is False.')
parser.add_argument('--added_2hop_edges',
type=bool,
default=False,
help='Evaluating with added 2-hop edges? Default is False.')
parser.add_argument('--label_sbm',
type=bool,
default=False,
help='Evaluating with SBMs created from labels? Default is False.')
parser.add_argument('--heads',
type=int,
default=4,
help='Attention heads. Default is 4.')
parser.add_argument('--attention_dropout',
type=float,
default=0.4,
help='Attention dropout for GAT. Default is 0.4.')
parser.add_argument('--dataset',
default="cora",
help='Dataset name. Default is cora.')
parser.add_argument('--model',
default="gcn",
help='Model name. Default is GCN.')
parser.add_argument('--splits',
type=int,
default=100,
help='Number of random train/validation/test splits. Default is 100.')
parser.add_argument('--runs',
type=int,
default=20,
help='Number of random initializations of the model. Default is 20.')
parser.add_argument('--conf_inits',
type=int,
default=10,
help='Number of configuration model runs. Default is 10.')
parser.add_argument('--sbm_inits',
type=int,
default=10,
help='Number of SBM runs. Default is 10.')
parser.add_argument('--directionality',
default='undirected',
help='Directionality: undirected/directed/reversed. Default is undirected.')
parser.add_argument('--train_examples',
type=int,
default=20,
help='Number of training examples per class. Default is 20.')
parser.add_argument('--val_examples',
type=int,
default=30,
help='Number of validation examples per class. Default is 30.')
args = parser.parse_args()
return args
name2conv = {'gcn': GCNConv, 'sage': SAGEConv, 'gat': GATConv, 'rgcn': RGCNConv, 'rgcn2':RGCN2, 'sgc':SGConv, 'appnp':APPNP, 'cgcn':ClusterGCNConv}
def eval_archs_gat(dataset, dataset_name, channel_size, dropout, lr, wd, heads,attention_dropout,runs,splits,train_examples,val_examples, models=[MonoGAT],isDirected = False):
if isDirected:
models = [MonoGAT]
return eval_gnn(dataset, dataset_name, GATConv, channel_size, dropout, lr, wd, heads=heads, attention_dropout=attention_dropout,
models=models, num_runs=runs, num_splits=splits, test_score=True,
train_examples = train_examples, val_examples = val_examples)
def eval_archs_gcn(dataset, dataset_name, conv, channel_size, dropout, lr, wd, runs,splits,train_examples,val_examples, models=[MonoModel], isDirected=False):
if isDirected:
models = [MonoModel]
return eval_gnn(dataset, dataset_name, conv, channel_size, dropout, lr, wd, heads=1,attention_dropout=0.3, # dummy values for heads and attention_dropout
models=models, num_runs=runs, num_splits=splits,test_score=True,
train_examples = train_examples, val_examples = val_examples)
def eval_archs_appnp(dataset, dataset_name, conv, channel_size, dropout, lr, wd, runs,splits,train_examples,val_examples, models=[MonoAPPNPModel]):
return eval_gnn(dataset, dataset_name, conv, channel_size, dropout, lr, wd, heads=1,attention_dropout=0.3, # dummy values for heads and attention_dropout
models=models, num_runs=runs, num_splits=splits,test_score=True,
train_examples = train_examples, val_examples = val_examples)
def eval_archs_rgcn(dataset, dataset_name, conv, channel_size, dropout, lr, wd, runs,splits,train_examples,val_examples, models=[MonoRGCN]):
return eval_gnn(dataset, dataset_name, conv, channel_size, dropout, lr, wd, heads=1,attention_dropout=0.3, # dummy values for heads and attention_dropout
models=models, num_runs=runs, num_splits=splits,test_score=True,
train_examples = train_examples, val_examples = val_examples)
def eval(model, dataset, dataset_name, channel_size, dropout, lr, wd, heads, attention_dropout, runs, splits, train_examples, val_examples, isDirected):
if model == 'gat':
return eval_archs_gat(dataset, dataset_name, channel_size, dropout, lr, wd, heads, attention_dropout, splits=splits, runs=runs, train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
elif model == 'rgcn' or model == 'rgcn2':
return eval_archs_rgcn(dataset, dataset_name, name2conv[model], channel_size, dropout, lr, wd, splits=splits, runs=runs, train_examples = train_examples, val_examples = val_examples)
elif model == 'appnp':
return eval_archs_appnp(dataset, dataset_name, name2conv[model], channel_size, dropout, lr, wd, splits=splits, runs=runs, train_examples = train_examples, val_examples = val_examples)
else:
return eval_archs_gcn(dataset, dataset_name, name2conv[model], channel_size, dropout, lr, wd, splits=splits, runs=runs, train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
def eval_original(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}', dataset_name,
f'data/graphs/processed/{dataset_name}/{dataset_name}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads, attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
return df_cur
def eval_shuffled_features(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}', dataset_name,
f'data/graphs/processed/{dataset_name}/{dataset_name}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
dataset.x = dataset.x[torch.randperm(dataset.x.size()[0])]
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads, attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
return df_cur
def eval_random_features(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}', dataset_name,
f'data/graphs/processed/{dataset_name}/{dataset_name}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
dataset.x = torch.randint(0, 2, dataset.x.shape, dtype=torch.float)
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads, attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
return df_cur
def eval_cm_communities(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples, inits):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
df_val = pd.DataFrame()
for i in range(inits):
dataset = GraphDataset(f'data/tmp/{dataset_name}-cm_communities-{i}', dataset_name,
f'data/graphs/cm_communities/{dataset_name}/{dataset_name}_cm_communities_{i}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
# G = to_networkx(dataset)
# G = nx.DiGraph(G)
# node_communities = best_partition(nx.to_undirected(G))
# nx.set_node_attributes(G,node_communities,'label')
# # print(dataset.edge_index)
# old_edges = dataset.edge_index
# G = generate_modified_conf_model(G)
# # dir_path = f'data/graphs/cm_communities/{dataset_name}'
# # if not os.path.exists(dir_path):
# # os.mkdir(dir_path)
# # nx.write_edgelist(G, f'{dir_path}/{dataset_name}_cm_communities_{i}.cites')
# dataset.edge_index = torch.tensor(data=np.array(list(G.edges)).T,dtype=torch.long)
# print((torch.tensor(data=np.array(list(G.edges)).T,dtype=torch.long)-old_edges).abs().sum())
# print(dataset.edge_index)
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads, attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
df_cur['graph'] = i
df_val = pd.concat([df_val, df_cur])
return df_val
def eval_random(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples, random_inits):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
df_val = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 14 13:52:36 2020
@author: diego
"""
import os
import sqlite3
import numpy as np
import pandas as pd
import plots as _plots
import update_prices
import update_companies_info
pd.set_option("display.width", 400)
pd.set_option("display.max_columns", 10)
pd.options.mode.chained_assignment = None
update_prices.update_prices()
update_companies_info.update_db()
cwd = os.getcwd()
conn = sqlite3.connect(os.path.join(cwd, "data", "finance.db"))
cur = conn.cursor()
# %% Functions
class Ticker:
"""
Attributes and Methods to analyse stocks traded in B3 -BOLSA BRASIL BALCÃO
"""
def __init__(self, ticker, group="consolidated"):
"""
Creates a Ticker Class Object
Args:
ticker: string
string of the ticker
group: string
Financial statements group. Can be 'consolidated' or 'individual'
"""
self.ticker = ticker.upper()
df = pd.read_sql(
f"""SELECT cnpj, type, sector, subsector, segment, denom_comerc
FROM tickers
WHERE ticker = '{self.ticker}'""",
conn,
)
if len(df) == 0:
print('unknown ticker')
return
self.cnpj = df["cnpj"][0]
self.type = df["type"][0]
self.sector = df["sector"][0]
self.subsector = df["subsector"][0]
self.segment = df["segment"][0]
self.denom_comerc = df["denom_comerc"][0]
Ticker.set_group(self, group)
on_ticker = pd.read_sql(
f"SELECT ticker FROM tickers WHERE cnpj = '{self.cnpj}' AND type = 'ON'",
conn,
)
on_ticker = on_ticker[on_ticker["ticker"].str[-1] == "3"]
self.on_ticker = on_ticker.values[0][0]
try:
self.pn_ticker = pd.read_sql(
f"SELECT ticker FROM tickers WHERE cnpj = '{self.cnpj}' AND type = 'PN'",
conn,
).values[0][0]
except:
pass
def set_group(self, new_group):
"""
To change the financial statement group attribute of a object
Args:
new_group: string
can be 'consolidated' or 'individual'
"""
if new_group in ["individual", "consolidado", "consolidated"]:
if new_group == "individual":
self.grupo = "Individual"
else:
self.grupo = "Consolidado"
# Infer the frequency of the reports
dates = pd.read_sql(
f"""SELECT DISTINCT dt_fim_exerc as date
FROM dre
WHERE cnpj = '{self.cnpj}'
AND grupo_dfp = '{self.grupo}'
ORDER BY dt_fim_exerc""",
conn,
)
if len(dates) == 0:
self.grupo = "Individual"
print(
f"The group of {self.ticker} was automatically switched to individual due to the lack of consolidated statements."
)
dates = pd.read_sql(
f"""SELECT DISTINCT dt_fim_exerc as date
FROM dre
WHERE cnpj = '{self.cnpj}'
AND grupo_dfp = '{self.grupo}'
ORDER BY dt_fim_exerc""",
conn,
)
try:
freq = pd.infer_freq(dates["date"])
self.freq = freq[0]
except ValueError:
self.freq = "Q"
except TypeError:
dates["date"] = pd.to_datetime(dates["date"])
number_of_observations = len(dates)
period_of_time = (
dates.iloc[-1, 0] - dates.iloc[0, 0]
) / np.timedelta64(1, "Y")
if number_of_observations / period_of_time > 1:
self.freq = "Q"
else:
self.freq = "A"
if self.freq == "A":
print(
f"""
The {self.grupo} statements of {self.ticker} are only available on an annual basis.
Only YTD values will be available in the functions and many functions will not work.
Try setting the financial statements to individual:
Ticker.set_group(Ticker object, 'individual')
"""
)
else:
print("new_group needs to be 'consolidated' or 'individual'.")
def get_begin_period(self, function, start_period):
"""
Support method for other methods of the Class
"""
if start_period == "all":
begin_period = pd.to_datetime("1900-01-01")
return begin_period.date()
elif start_period not in ["all", "last"]:
try:
pd.to_datetime(start_period)
except:
print(
"start_period must be 'last', 'all', or date formated as 'YYYY-MM-DD'."
)
return
if start_period == "last":
if function in ["prices", "total_shares", "market_value"]:
last_date = pd.read_sql(
f"SELECT date FROM prices WHERE ticker = '{self.ticker}' ORDER BY date DESC LIMIT(1)",
conn,
)
else:
last_date = pd.read_sql(
f"SELECT dt_fim_exerc FROM dre WHERE cnpj = '{self.cnpj}' AND grupo_dfp = '{self.grupo}' ORDER BY dt_fim_exerc DESC LIMIT(1)",
conn,
)
begin_period = pd.to_datetime(last_date.values[0][0])
else:
begin_period = pd.to_datetime(start_period)
return begin_period.date()
def create_pivot_table(df):
"""
Support method for other methods of the Class
"""
##### Creates a pivot table and add % change columns #####
# create columns with % change of the values
# value_types: ytd, quarter_value, ttm_value
first_type = df.columns.get_loc('ds_conta') + 1
value_types = list(df.columns[first_type:])
new_columns = [i + " % change" for i in value_types]
df[new_columns] = df[value_types].div(
df.groupby("cd_conta")[value_types].shift(1))
# the calculation of %change from ytd is different:
if 'ytd' in value_types:
shifted_values = df[['dt_fim_exerc', 'cd_conta', 'ytd']]
shifted_values = shifted_values.set_index(
[(pd.to_datetime(shifted_values['dt_fim_exerc']) + pd.DateOffset(years=1)), shifted_values['cd_conta']])
df = df.set_index([df['dt_fim_exerc'], df['cd_conta']])
df['ytd % change'] = df['ytd'] / shifted_values['ytd']
df[new_columns] = (df[new_columns] - 1) * 100
# reshape
df = df.pivot(
index=["cd_conta", "ds_conta"],
columns=["dt_fim_exerc"],
values=value_types + new_columns
)
# rename multiIndex column levels
df.columns = df.columns.rename("value", level=0)
df.columns = df.columns.rename("date", level=1)
# sort columns by date
df = df.sort_values([("date"), ("value")], axis=1, ascending=False)
# So times, the description of the accounts have small differences for the
# same account in different periods, as punctuation. The purpose of the df_index
# is to keep only one description to each account, avoiding duplicated rows.
df_index = df.reset_index().iloc[:, 0:2]
df_index.columns = df_index.columns.droplevel(1)
df_index = df_index.groupby("cd_conta").first()
# This groupby adds the duplicated rows
df = df.groupby(level=0, axis=0).sum()
# The next two lines add the account description to the dataframe multiIndex
df["ds_conta"] = df_index["ds_conta"]
df = df.set_index("ds_conta", append=True)
# Reorder the multiIndex column levels
df = df.reorder_levels(order=[1, 0], axis=1)
# Due to the command line 'df = df.sort_values([('dt_fim_exerc'), ('value')],
# axis=1, ascending=False)'
# the columns are ordered by date descending, and value descending. The pupose
# here is to set the order as: date descending and value ascending
df_columns = df.columns.to_native_types()
new_order = []
for i in range(1, len(df_columns), 2):
new_order.append(df_columns[i])
new_order.append(df_columns[i - 1])
new_order = pd.MultiIndex.from_tuples(
new_order, names=("date", "value"))
df = df[new_order]
return df
def income_statement(self, quarter=True, ytd=True, ttm=True, start_period="all"):
"""
Creates a dataframe with the income statement of the object.
Args:
quarter: boolean
includes or not quarter values
ytd: boolean
includes or not year to date values
ttm: boolean
includes or not trailing twelve months value
start_period: string
Returns: pandas dataframe
"""
if self.freq == "A":
quarter = False
ttm = False
begin_period = Ticker.get_begin_period(
self, function="income_statement", start_period=start_period
)
begin_period = begin_period + pd.DateOffset(months=-12)
query = f"""SELECT dt_fim_exerc, fiscal_quarter, cd_conta, ds_conta, vl_conta AS ytd
FROM dre
WHERE cnpj = '{self.cnpj}'
AND grupo_dfp = '{self.grupo}'
AND dt_fim_exerc >= '{begin_period.date()}'
ORDER BY dt_fim_exerc"""
df = pd.read_sql(query, conn)
df["quarter_value"] = df[["cd_conta", "ytd"]
].groupby("cd_conta").diff()
df["quarter_value"][df["fiscal_quarter"] == 1] = df["ytd"][
df["fiscal_quarter"] == 1
]
if ttm == True:
df["ttm_value"] = (
df[["dt_fim_exerc", "cd_conta", "quarter_value"]]
.groupby("cd_conta")
.rolling(window=4, min_periods=4)
.sum()
.reset_index(0, drop=True)
)
if quarter == False:
df = df.drop(["quarter_value"], axis=1)
if ytd == False:
df = df.drop(["ytd"], axis=1)
df["dt_fim_exerc"] = pd.to_datetime(df["dt_fim_exerc"])
df = df[df["dt_fim_exerc"] >= begin_period + pd.DateOffset(months=12)]
df = df.drop(columns=["fiscal_quarter"])
df = Ticker.create_pivot_table(df)
return df
def balance_sheet(self, start_period="all", plot=False):
"""
Creates a dataframe with the balance sheet statement of the object.
Args:
start_period: string
plot: boolean
Returns: pandas dataframe
"""
begin_period = Ticker.get_begin_period(
self, function="bp", start_period=start_period
)
query = f"""SELECT dt_fim_exerc, cd_conta, ds_conta, vl_conta
FROM bpa
WHERE cnpj = '{self.cnpj}'
AND grupo_dfp = '{self.grupo}'
AND dt_fim_exerc >= '{begin_period}'
UNION ALL
SELECT dt_fim_exerc, cd_conta, ds_conta, vl_conta
FROM bpp
WHERE cnpj = '{self.cnpj}'
AND grupo_dfp = '{self.grupo}'
AND dt_fim_exerc >= '{begin_period}'
ORDER BY dt_fim_exerc"""
df = pd.read_sql(query, conn, parse_dates=['dt_fim_exerc'])
df = Ticker.create_pivot_table(df)
if plot:
_plots.bs_plot(df, self.ticker, self.grupo)
return df
def cash_flow(self, quarter=True, ytd=True, ttm=True, start_period="all"):
"""
Creates a dataframe with the cash flow statement of the object.
Args:
quarter: boolean
includes or not quarter values
ytd: boolean
includes or not year to date values
ttm: boolean
includes or not trailing twelve months value
start_period: string
Returns: pandas dataframe
"""
if self.freq == "A":
quarter = False
ttm = False
begin_period = Ticker.get_begin_period(
self, function="dfc", start_period=start_period
)
begin_period = begin_period + pd.DateOffset(months=-12)
query = f"""SELECT dt_fim_exerc, fiscal_quarter, cd_conta, ds_conta, vl_conta AS ytd
FROM dfc
WHERE cnpj = '{self.cnpj}'
AND grupo_dfp = '{self.grupo}'
AND dt_fim_exerc >= '{begin_period.date()}'
ORDER BY dt_fim_exerc"""
df = pd.read_sql(query, conn)
df["quarter_value"] = df[["cd_conta", "ytd"]
].groupby("cd_conta").diff()
df["quarter_value"][df["fiscal_quarter"] == 1] = df["ytd"][
df["fiscal_quarter"] == 1
]
if ttm:
df["ttm_value"] = (
df[["dt_fim_exerc", "cd_conta", "quarter_value"]]
.groupby("cd_conta")
.rolling(window=4, min_periods=4)
.sum()
.reset_index(0, drop=True)
)
if not quarter:
df = df.drop(["quarter_value"], axis=1)
if not ytd:
df = df.drop(["ytd"], axis=1)
df["dt_fim_exerc"] = pd.to_datetime(df["dt_fim_exerc"])
df = df[df["dt_fim_exerc"] >= begin_period + pd.DateOffset(months=12)]
df = df.drop(columns=["fiscal_quarter"])
df = Ticker.create_pivot_table(df)
return df
def prices(self, start_period="all"):
"""
Support method for other methods of the Class
"""
begin_period = Ticker.get_begin_period(
self, function="prices", start_period=start_period
)
prices = pd.read_sql(
f"""SELECT date, preult AS price
FROM prices
WHERE ticker = '{self.ticker}' AND date >= '{begin_period}'
ORDER BY date""",
conn,
index_col="date", parse_dates=['date']
)
return prices
def total_shares(self, start_period="all"):
"""
Support method for other methods of the Class
"""
begin_period = Ticker.get_begin_period(
self, function="total_shares", start_period=start_period
)
query = f"""SELECT date, number_shares AS on_shares
FROM prices
WHERE ticker = '{self.on_ticker}' AND date >= '{begin_period}'
ORDER BY date"""
nshares_on = pd.read_sql(query, conn)
try:
query = f"""SELECT date, number_shares AS pn_shares
FROM prices
WHERE ticker = '{self.pn_ticker}' AND date >= '{begin_period}'
ORDER BY date"""
nshares_pn = pd.read_sql(query, conn)
shares = nshares_on.merge(nshares_pn, how="left")
shares["total_shares"] = shares["on_shares"] + \
shares["pn_shares"].fillna(0)
except:
shares = nshares_on.rename({"on_shares": "total_shares"}, axis=1)
shares.index = shares["date"]
shares.index = pd.to_datetime(shares.index)
return shares[["total_shares"]]
def net_income(self, quarter=True, ytd=True, ttm=True, start_period="all", plot=False):
"""
Creates a dataframe with the net income information of the object.
Args:
quarter: boolean
includes or not quarter values
ytd: boolean
includes or not year to date values
ttm: boolean
includes or not trailing twelve months value
start_period: string
plot: boolean
Returns: pandas dataframe
"""
if self.freq == "A":
quarter = False
ttm = False
begin_period = Ticker.get_begin_period(
self, function="net_income", start_period=start_period
)
begin_period = begin_period + pd.DateOffset(months=-12)
query = f"""SELECT dt_fim_exerc AS date, fiscal_quarter, ds_conta, vl_conta AS ytd_net_income
FROM dre
WHERE cnpj = '{self.cnpj}'
AND grupo_dfp = '{self.grupo}'
AND dt_fim_exerc >= '{begin_period.date()}'
AND (ds_conta = 'Resultado Líquido das Operações Continuadas' OR ds_conta = 'Lucro/Prejuízo do Período')
ORDER BY dt_fim_exerc"""
income_statement = pd.read_sql(
query, conn, index_col="date", parse_dates=['date'])
df = income_statement[
income_statement["ds_conta"]
== "Resultado Líquido das Operações Continuadas"
]
if len(df) == 0:
df = income_statement[
income_statement["ds_conta"] == "Lucro/Prejuízo do Período"
]
df = df.drop(["ds_conta"], axis=1)
df["quarter_net_income"] = df["ytd_net_income"] - \
df["ytd_net_income"].shift(1)
df["quarter_net_income"][df["fiscal_quarter"] == 1] = df["ytd_net_income"][
df["fiscal_quarter"] == 1
]
if ttm == True:
df["ttm_net_income"] = (
df["quarter_net_income"].rolling(window=4, min_periods=4).sum()
)
if quarter == False:
df = df.drop(["quarter_net_income"], axis=1)
if ytd == False:
df = df.drop(["ytd_net_income"], axis=1)
df = df[df.index >= begin_period + pd.DateOffset(months=12)]
df = df.drop(columns=["fiscal_quarter"])
if plot:
_plots.bar_plot(df, self.ticker, self.grupo,
bars=' Net Income (R$,000) ')
return df
def ebit(self, quarter=True, ytd=True, ttm=True, start_period="all", plot=False):
"""
Creates a dataframe with the ebit information of the object.
Args:
quarter: boolean
includes or not quarter values
ytd: boolean
includes or not year to date values
ttm: boolean
includes or not trailing twelve months value
start_period: string
plot: boolean
Returns: pandas dataframe
"""
if self.freq == "A":
quarter = False
ttm = False
begin_period = Ticker.get_begin_period(
self, function="ebit", start_period=start_period
)
begin_period = begin_period + pd.DateOffset(months=-12)
query = f"""SELECT dt_fim_exerc AS date, fiscal_quarter, ds_conta, vl_conta AS ytd_ebit
FROM dre
WHERE cnpj = '{self.cnpj}'
AND grupo_dfp = '{self.grupo}'
AND dt_fim_exerc >= '{begin_period.date()}'
AND (ds_conta = 'Resultado Antes do Resultado Financeiro e dos Tributos' OR ds_conta = 'Resultado Operacional')
ORDER BY dt_fim_exerc"""
income_statement = pd.read_sql(
query, conn, index_col="date", parse_dates=['date'])
df = income_statement[
income_statement["ds_conta"]
== "Resultado Antes do Resultado Financeiro e dos Tributos"
]
if len(df) == 0:
df = income_statement[
income_statement["ds_conta"] == "Resultado Operacional"
]
df = df.drop(["ds_conta"], axis=1)
df["quarter_ebit"] = df["ytd_ebit"] - df["ytd_ebit"].shift(1)
df["quarter_ebit"][df["fiscal_quarter"] == 1] = df["ytd_ebit"][
df["fiscal_quarter"] == 1
]
if ttm == True:
df["ttm_ebit"] = df["quarter_ebit"].rolling(
window=4, min_periods=4).sum()
if quarter == False:
df = df.drop(["quarter_ebit"], axis=1)
if ytd == False:
df = df.drop(["ytd_ebit"], axis=1)
df = df[df.index >= begin_period + pd.DateOffset(months=12)]
df = df.drop(columns=["fiscal_quarter"])
if plot:
_plots.bar_plot(df, self.ticker, self.grupo,
bars=' EBIT (R$,000) ')
return df
def depre_amort(self, quarter=True, ytd=True, ttm=True, start_period="all", plot=False):
"""
Creates a dataframe with the depreciationa and amortization information of the object.
Args:
quarter: boolean
includes or not quarter values
ytd: boolean
includes or not year to date values
ttm: boolean
includes or not trailing twelve months value
start_period: string
plot: boolean
Returns: pandas dataframe
"""
if self.freq == "A":
quarter = False
ttm = False
begin_period = Ticker.get_begin_period(
self, function="depre_amort", start_period=start_period
)
begin_period = begin_period + pd.DateOffset(months=-12)
query = f"""SELECT dt_fim_exerc AS date, fiscal_quarter, vl_conta AS ytd_d_a
FROM dva
WHERE cnpj = '{self.cnpj}'
AND grupo_dfp = '{self.grupo}'
AND ds_conta = 'Depreciação, Amortização e Exaustão'
AND dt_fim_exerc >= '{begin_period.date()}'
ORDER BY dt_fim_exerc"""
df = pd.read_sql(query, conn, index_col="date", parse_dates=['date'])
df["quarter_d_a"] = df["ytd_d_a"] - df["ytd_d_a"].shift(1)
df["quarter_d_a"][df["fiscal_quarter"] ==
1] = df["ytd_d_a"][df["fiscal_quarter"] == 1]
if ttm == True:
df["ttm_d_a"] = df["quarter_d_a"].rolling(
window=4, min_periods=4).sum()
if quarter == False:
df = df.drop(["quarter_d_a"], axis=1)
if ytd == False:
df = df.drop(["ytd_d_a"], axis=1)
df = df[df.index >= begin_period + pd.DateOffset(months=12)]
df = df.drop(columns=["fiscal_quarter"])
if plot:
_plots.bar_plot(df, self.ticker, self.grupo, bars=' D&A (R$,000)')
return df
def ebitda(self, quarter=True, ytd=True, ttm=True, start_period="all", plot=False):
"""
Creates a dataframe with the ebitda information of the object.
Args:
quarter: boolean
includes or not quarter values
ytd: boolean
includes or not year to date values
ttm: boolean
includes or not trailing twelve months value
start_period: string
plot: boolean
Returns: pandas dataframe
"""
if self.freq == "A":
quarter = False
ttm = False
begin_period = Ticker.get_begin_period(
self, function="ebitda", start_period=start_period
)
begin_period = begin_period + pd.DateOffset(months=-12)
query = f"""SELECT dre.dt_fim_exerc AS date,
dre.fiscal_quarter,
dre.ds_conta,
dre.vl_conta AS ytd_ebit,
dva.vl_conta AS ytd_d_a
FROM dre
LEFT JOIN dva ON (dre.dt_fim_exerc=dva.dt_fim_exerc AND dre.grupo_dfp=dva.grupo_dfp AND dre.cnpj=dva.cnpj)
WHERE dre.cnpj = '{self.cnpj}'
AND dre.grupo_dfp = '{self.grupo}'
AND dre.dt_fim_exerc >= '{begin_period.date()}'
AND (dre.ds_conta = 'Resultado Antes do Resultado Financeiro e dos Tributos' OR dre.ds_conta = 'Resultado Operacional')
AND dva.ds_conta = 'Depreciação, Amortização e Exaustão'
ORDER BY dre.dt_fim_exerc"""
income_statement = pd.read_sql(
query, conn, index_col="date", parse_dates=['date'])
df = income_statement[
income_statement["ds_conta"]
== "Resultado Antes do Resultado Financeiro e dos Tributos"
]
if len(df) == 0:
df = income_statement[
income_statement["ds_conta"] == "Resultado Operacional"
]
df["ebit"] = df["ytd_ebit"] - df["ytd_ebit"].shift(1)
df["ebit"][df["fiscal_quarter"] == 1] = df["ytd_ebit"][
df["fiscal_quarter"] == 1
]
df["d_a"] = df["ytd_d_a"] - df["ytd_d_a"].shift(1)
df["d_a"][df["fiscal_quarter"] ==
1] = df["ytd_d_a"][df["fiscal_quarter"] == 1]
df["quarter_ebitda"] = df["ebit"] - df["d_a"]
if ttm == True:
df["ttm_ebitda"] = df["quarter_ebitda"].rolling(
window=4, min_periods=4).sum()
if quarter == False:
df = df.drop(["quarter_ebitda"], axis=1)
if ytd == True:
df["ytd_ebitda"] = df["ytd_ebit"] - df["ytd_d_a"]
df = df[df.index >= begin_period + pd.DateOffset(months=12)]
df = df.drop(
columns=["fiscal_quarter", "ds_conta",
"ytd_ebit", "ytd_d_a", "d_a", "ebit"]
)
if plot:
_plots.bar_plot(df, self.ticker, self.grupo,
bars=' EBITDA (R$,000) ')
return df
def revenue(self, quarter=True, ytd=True, ttm=True, start_period="all", plot=False):
"""
Creates a dataframe with the revenue information of the object.
Args:
quarter: boolean
includes or not quarter values
ytd: boolean
includes or not year to date values
ttm: boolean
includes or not trailing twelve months value
start_period: string
plot: boolean
Returns: pandas dataframe
"""
if self.freq == "A":
quarter = False
ttm = False
begin_period = Ticker.get_begin_period(
self, function="revenue", start_period=start_period
)
begin_period = begin_period + pd.DateOffset(months=-12)
query = f"""SELECT dt_fim_exerc AS date, fiscal_quarter, vl_conta AS ytd_revenue
FROM dre
WHERE cnpj = '{self.cnpj}'
AND grupo_dfp = '{self.grupo}'
AND dt_fim_exerc >= '{begin_period.date()}'
AND cd_conta = '3.01'
ORDER BY dt_fim_exerc"""
df = pd.read_sql(query, conn, index_col="date", parse_dates=['date'])
df["quarter_revenue"] = df["ytd_revenue"] - df["ytd_revenue"].shift(1)
df["quarter_revenue"][df["fiscal_quarter"] == 1] = df["ytd_revenue"][
df["fiscal_quarter"] == 1
]
if ttm == True:
df["ttm_revenue"] = df["quarter_revenue"].rolling(
window=4, min_periods=4).sum()
if quarter == False:
df = df.drop(["quarter_revenue"], axis=1)
if ytd == False:
df = df.drop(["ytd_revenue"], axis=1)
df = df[df.index >= begin_period + | pd.DateOffset(months=12) | pandas.DateOffset |
"""
Python tools and algorithms gathered through out the development projects and tutorials.
Sections:
1. File Read/Write/Convert/Save Operations
2. Pandas Utils
3. Path Operations for File/Folder/System
4. Algorithms for Hierarchical Structures
5. Utility functions for xlrd library and read_spir function
"""
import collections
import os
import re
import warnings # xlsx writer warning is eliminated
from tkinter import Tk, filedialog, messagebox
import pandas as pd
import xlrd as xl
import xlsxwriter
from six import string_types
from advanced_tools.IO_path_utils import checkfile, get_filepaths
##############################################################################################
### Pandas Utils & Excel Utils
##############################################################################################
def combine_multiple_csv_into_excel(full_path_to_folder=None, sep='\t', encoding='latin1'):
r"""
Combine csv files that can be converted to Dataframe and have same exact structure.
:param full_path_to_folder:
:param sep: Text separator, default is '\t'
:param encoding: Text encoding, default is 'latin1'
:return: excel file with one extra column showing the name of the file.
"""
csv_files = sorted(get_filepaths(full_path_to_folder))
folder_name = os.path.split(full_path_to_folder)[1] # For folder location and folder name
df_base = | pd.read_csv(csv_files[0], sep=sep, encoding=encoding, low_memory=False) | pandas.read_csv |
import pytest
import numpy as np
import pandas as pd
from rapidfuzz import fuzz
from polyfuzz import PolyFuzz
from polyfuzz.models import EditDistance, TFIDF, RapidFuzz, BaseMatcher
from tests.utils import get_test_strings
from_list, to_list = get_test_strings()
class MyModel(BaseMatcher):
def match(self, from_list, to_list):
# Calculate distances
matches = [[fuzz.ratio(from_string, to_string) / 100 for to_string in to_list] for from_string in from_list]
# Get best matches
mappings = [to_list[index] for index in np.argmax(matches, axis=1)]
scores = np.max(matches, axis=1)
# Prepare dataframe
matches = | pd.DataFrame({'From': from_list, 'To': mappings, 'Similarity': scores}) | pandas.DataFrame |
#Author: <NAME>, <NAME>
#Make sure you are connected to DataRobot Client.
#The functions below will help you evaluate a DataRobot TS project.
import datarobot as dr
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
from ts_metrics import *
######################
# Project Evaluation
######################
def get_top_models_from_project(
project, n_models=1, data_subset='allBacktests', include_blenders=True, metric=None
):
"""
project: project object
DataRobot project
n_models: int
Number of top models to return
data_subset: str (optional)
Can be set to either allBacktests or holdout
include_blenders: boolean (optional)
Controls whether to include ensemble models
metric: str (optional)
Choose from list of 'MASE', 'RMSE', 'MAPE', 'SMAPE', 'MAE', 'R Squared', 'Gamma Deviance',
'SMAPE', 'Tweedie Deviance', 'Poisson Deviance', or 'RMSLE'
Returns:
--------
List of model objects from a DataRobot project
"""
assert data_subset in [
'backtest_1',
'allBacktests',
'holdout',
], 'data_subset must be either backtest_1, allBacktests, or holdout'
if n_models is not None:
assert isinstance(n_models, int), 'n_models must be an int'
if n_models is not None:
assert n_models >= 1, 'n_models must be greater than or equal to 1'
assert isinstance(include_blenders, bool), 'include_blenders must be a boolean'
mapper = {
'backtest_1': 'backtestingScores',
'allBacktests': 'backtesting',
'holdout': 'holdout',
}
if metric is None:
metric = project.metric
if data_subset == 'holdout':
project.unlock_holdout()
models = [
m
for m in project.get_datetime_models()
if m.backtests[0]['status'] != 'BACKTEST_BOUNDARIES_EXCEEDED'
] # if m.holdout_status != 'HOLDOUT_BOUNDARIES_EXCEEDED']
if data_subset == 'backtest_1':
# models = sorted(models, key=lambda m: np.mean([i for i in m.metrics[metric][mapper[data_subset]][0] if i]), reverse=False)
models = sorted(
models, key=lambda m: m.metrics[metric][mapper[data_subset]][0], reverse=False
)
elif data_subset == 'allBacktests':
models = sorted(
models,
key=lambda m: m.metrics[metric][mapper[data_subset]]
if m.metrics[metric][mapper[data_subset]] is not None
else np.nan,
reverse=False,
)
else:
models = sorted(models, key=lambda m: m.metrics[metric][mapper[data_subset]], reverse=False)
if not include_blenders:
models = [m for m in models if m.model_category != 'blend']
if n_models is None:
n_models = len(models)
models = models[0:n_models]
assert len(models) > 0, 'You have not run any models for this project'
return models
def get_top_models_from_projects(
projects, n_models=1, data_subset='allBacktests', include_blenders=True, metric=None
):
"""
Pull top models from leaderboard across multiple DataRobot projects
projects: list
DataRobot project object(s)
n_models: int
Number of top models to return
data_subset: str (optional)
Can be set to either allBacktests or holdout
include_blenders: boolean (optional)
Controls whether to include ensemble models
metric: str (optional)
Project metric used to sort the DataRobot leaderboard
Choose from list of 'MASE', 'RMSE', 'MAPE', 'SMAPE', 'MAE', 'R Squared', 'Gamma Deviance',
'SMAPE', 'Tweedie Deviance', 'Poisson Deviance', or 'RMSLE'
Returns:
--------
List of model objects from DataRobot project(s)
"""
assert isinstance(projects, list), 'Projects must be a list object'
models_all = []
for p in projects:
models = get_top_models_from_project(p, n_models, data_subset, include_blenders, metric)
models_all.extend(models)
return models_all
def compute_backtests(
projects, n_models=5, data_subset='backtest_1', include_blenders=True, metric=None
):
"""
Compute all backtests for top models across multiple DataRobot projects
projects: list
DataRobot project object(s)
n_models: int
Number of top models to return
data_subset: str (optional)
Can be set to either allBacktests or holdout
include_blenders: boolean (optional)
Controls whether to include ensemble models
metric: str (optional)
Project metric used to sort the DataRobot leaderboard
Choose from list of 'MASE', 'RMSE', 'MAPE', 'SMAPE', 'MAE', 'R Squared', 'Gamma Deviance',
'SMAPE', 'Tweedie Deviance', 'Poisson Deviance', or 'RMSLE'
"""
assert isinstance(projects, list), 'Projects must be a list object'
for p in projects:
models = get_top_models_from_project(
p,
n_models=n_models,
data_subset=data_subset,
include_blenders=include_blenders,
metric=metric,
)
for m in models:
try:
m.score_backtests() # request backtests for top models
print(f'Computing backtests for model {m.id} in Project {p.project_name}')
except dr.errors.ClientError:
pass
print(
f'All available backtests have been submitted for scoring for project {p.project_name}'
)
def get_or_request_backtest_scores(
projects, n_models=5, data_subset='allBacktests', include_blenders=True, metric=None
):
"""
Get or request backtest and holdout scores from top models across multiple DataRobot projects
projects: list
DataRobot project object(s)
n_models: int
Number of top models to return
data_subset: str (optional)
Can be set to either allBacktests or holdout
include_blenders: boolean (optional)
Controls whether to include ensemble models
metric: str (optional)
Project metric used to sort the DataRobot leaderboard
Choose from list of 'MASE', 'RMSE', 'MAPE', 'SMAPE', 'MAE', 'R Squared', 'Gamma Deviance',
'SMAPE', 'Tweedie Deviance', 'Poisson Deviance', or 'RMSLE'
Returns:
--------
pandas df
"""
assert isinstance(projects, list), 'Projects must be a list object'
scores = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import decimal
import json
import multiprocessing as mp
from collections import OrderedDict
from datetime import date, datetime, time, timedelta
import numpy as np
import numpy.testing as npt
import pandas as pd
import pandas.util.testing as tm
import pytest
import pyarrow as pa
import pyarrow.types as patypes
from pyarrow.compat import PY2
from .pandas_examples import dataframe_with_arrays, dataframe_with_lists
def _alltypes_example(size=100):
return pd.DataFrame({
'uint8': np.arange(size, dtype=np.uint8),
'uint16': np.arange(size, dtype=np.uint16),
'uint32': np.arange(size, dtype=np.uint32),
'uint64': np.arange(size, dtype=np.uint64),
'int8': np.arange(size, dtype=np.int16),
'int16': np.arange(size, dtype=np.int16),
'int32': np.arange(size, dtype=np.int32),
'int64': np.arange(size, dtype=np.int64),
'float32': np.arange(size, dtype=np.float32),
'float64': np.arange(size, dtype=np.float64),
'bool': np.random.randn(size) > 0,
# TODO(wesm): Pandas only support ns resolution, Arrow supports s, ms,
# us, ns
'datetime': np.arange("2016-01-01T00:00:00.001", size,
dtype='datetime64[ms]'),
'str': [str(x) for x in range(size)],
'str_with_nulls': [None] + [str(x) for x in range(size - 2)] + [None],
'empty_str': [''] * size
})
def _check_pandas_roundtrip(df, expected=None, use_threads=True,
expected_schema=None,
check_dtype=True, schema=None,
preserve_index=False,
as_batch=False):
klass = pa.RecordBatch if as_batch else pa.Table
table = klass.from_pandas(df, schema=schema,
preserve_index=preserve_index,
nthreads=2 if use_threads else 1)
result = table.to_pandas(use_threads=use_threads)
if expected_schema:
# all occurences of _check_pandas_roundtrip passes expected_schema
# without the pandas generated key-value metadata, so we need to
# add it before checking schema equality
expected_schema = expected_schema.add_metadata(table.schema.metadata)
assert table.schema.equals(expected_schema)
if expected is None:
expected = df
tm.assert_frame_equal(result, expected, check_dtype=check_dtype,
check_index_type=('equiv' if preserve_index
else False))
def _check_series_roundtrip(s, type_=None, expected_pa_type=None):
arr = pa.array(s, from_pandas=True, type=type_)
if type_ is not None and expected_pa_type is None:
expected_pa_type = type_
if expected_pa_type is not None:
assert arr.type == expected_pa_type
result = pd.Series(arr.to_pandas(), name=s.name)
if patypes.is_timestamp(arr.type) and arr.type.tz is not None:
result = (result.dt.tz_localize('utc')
.dt.tz_convert(arr.type.tz))
tm.assert_series_equal(s, result)
def _check_array_roundtrip(values, expected=None, mask=None,
type=None):
arr = pa.array(values, from_pandas=True, mask=mask, type=type)
result = arr.to_pandas()
values_nulls = pd.isnull(values)
if mask is None:
assert arr.null_count == values_nulls.sum()
else:
assert arr.null_count == (mask | values_nulls).sum()
if mask is None:
tm.assert_series_equal(pd.Series(result), pd.Series(values),
check_names=False)
else:
expected = pd.Series(np.ma.masked_array(values, mask=mask))
tm.assert_series_equal(pd.Series(result), expected,
check_names=False)
def _check_array_from_pandas_roundtrip(np_array, type=None):
arr = pa.array(np_array, from_pandas=True, type=type)
result = arr.to_pandas()
npt.assert_array_equal(result, np_array)
class TestConvertMetadata(object):
"""
Conversion tests for Pandas metadata & indices.
"""
def test_non_string_columns(self):
df = pd.DataFrame({0: [1, 2, 3]})
table = pa.Table.from_pandas(df)
assert table.column(0).name == '0'
def test_from_pandas_with_columns(self):
df = pd.DataFrame({0: [1, 2, 3], 1: [1, 3, 3], 2: [2, 4, 5]})
table = pa.Table.from_pandas(df, columns=[0, 1])
expected = pa.Table.from_pandas(df[[0, 1]])
assert expected.equals(table)
record_batch_table = pa.RecordBatch.from_pandas(df, columns=[0, 1])
record_batch_expected = pa.RecordBatch.from_pandas(df[[0, 1]])
assert record_batch_expected.equals(record_batch_table)
def test_column_index_names_are_preserved(self):
df = pd.DataFrame({'data': [1, 2, 3]})
df.columns.names = ['a']
_check_pandas_roundtrip(df, preserve_index=True)
def test_multiindex_columns(self):
columns = pd.MultiIndex.from_arrays([
['one', 'two'], ['X', 'Y']
])
df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')], columns=columns)
_check_pandas_roundtrip(df, preserve_index=True)
def test_multiindex_columns_with_dtypes(self):
columns = pd.MultiIndex.from_arrays(
[
['one', 'two'],
pd.DatetimeIndex(['2017-08-01', '2017-08-02']),
],
names=['level_1', 'level_2'],
)
df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')], columns=columns)
_check_pandas_roundtrip(df, preserve_index=True)
def test_multiindex_columns_unicode(self):
columns = pd.MultiIndex.from_arrays([[u'あ', u'い'], ['X', 'Y']])
df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')], columns=columns)
_check_pandas_roundtrip(df, preserve_index=True)
def test_integer_index_column(self):
df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')])
_check_pandas_roundtrip(df, preserve_index=True)
def test_index_metadata_field_name(self):
# test None case, and strangely named non-index columns
df = pd.DataFrame(
[(1, 'a', 3.1), (2, 'b', 2.2), (3, 'c', 1.3)],
index=pd.MultiIndex.from_arrays(
[['c', 'b', 'a'], [3, 2, 1]],
names=[None, 'foo']
),
columns=['a', None, '__index_level_0__'],
)
t = pa.Table.from_pandas(df, preserve_index=True)
raw_metadata = t.schema.metadata
js = json.loads(raw_metadata[b'pandas'].decode('utf8'))
col1, col2, col3, idx0, foo = js['columns']
assert col1['name'] == 'a'
assert col1['name'] == col1['field_name']
assert col2['name'] is None
assert col2['field_name'] == 'None'
assert col3['name'] == '__index_level_0__'
assert col3['name'] == col3['field_name']
idx0_name, foo_name = js['index_columns']
assert idx0_name == '__index_level_0__'
assert idx0['field_name'] == idx0_name
assert idx0['name'] is None
assert foo_name == 'foo'
assert foo['field_name'] == foo_name
assert foo['name'] == foo_name
def test_categorical_column_index(self):
df = pd.DataFrame(
[(1, 'a', 2.0), (2, 'b', 3.0), (3, 'c', 4.0)],
columns=pd.Index(list('def'), dtype='category')
)
t = pa.Table.from_pandas(df, preserve_index=True)
raw_metadata = t.schema.metadata
js = json.loads(raw_metadata[b'pandas'].decode('utf8'))
column_indexes, = js['column_indexes']
assert column_indexes['name'] is None
assert column_indexes['pandas_type'] == 'categorical'
assert column_indexes['numpy_type'] == 'int8'
md = column_indexes['metadata']
assert md['num_categories'] == 3
assert md['ordered'] is False
def test_string_column_index(self):
df = pd.DataFrame(
[(1, 'a', 2.0), (2, 'b', 3.0), (3, 'c', 4.0)],
columns=pd.Index(list('def'), name='stringz')
)
t = pa.Table.from_pandas(df, preserve_index=True)
raw_metadata = t.schema.metadata
js = json.loads(raw_metadata[b'pandas'].decode('utf8'))
column_indexes, = js['column_indexes']
assert column_indexes['name'] == 'stringz'
assert column_indexes['name'] == column_indexes['field_name']
assert column_indexes['pandas_type'] == ('bytes' if PY2 else 'unicode')
assert column_indexes['numpy_type'] == 'object'
md = column_indexes['metadata']
if not PY2:
assert len(md) == 1
assert md['encoding'] == 'UTF-8'
else:
assert md is None or 'encoding' not in md
def test_datetimetz_column_index(self):
df = pd.DataFrame(
[(1, 'a', 2.0), (2, 'b', 3.0), (3, 'c', 4.0)],
columns=pd.date_range(
start='2017-01-01', periods=3, tz='America/New_York'
)
)
t = pa.Table.from_pandas(df, preserve_index=True)
raw_metadata = t.schema.metadata
js = json.loads(raw_metadata[b'pandas'].decode('utf8'))
column_indexes, = js['column_indexes']
assert column_indexes['name'] is None
assert column_indexes['pandas_type'] == 'datetimetz'
assert column_indexes['numpy_type'] == 'datetime64[ns]'
md = column_indexes['metadata']
assert md['timezone'] == 'America/New_York'
def test_datetimetz_row_index(self):
df = pd.DataFrame({
'a': pd.date_range(
start='2017-01-01', periods=3, tz='America/New_York'
)
})
df = df.set_index('a')
_check_pandas_roundtrip(df, preserve_index=True)
def test_categorical_row_index(self):
df = pd.DataFrame({'a': [1, 2, 3], 'b': [1, 2, 3]})
df['a'] = df.a.astype('category')
df = df.set_index('a')
_check_pandas_roundtrip(df, preserve_index=True)
def test_duplicate_column_names_does_not_crash(self):
df = pd.DataFrame([(1, 'a'), (2, 'b')], columns=list('aa'))
with pytest.raises(ValueError):
pa.Table.from_pandas(df)
def test_dictionary_indices_boundscheck(self):
# ARROW-1658. No validation of indices leads to segfaults in pandas
indices = [[0, 1], [0, -1]]
for inds in indices:
arr = pa.DictionaryArray.from_arrays(inds, ['a'], safe=False)
batch = pa.RecordBatch.from_arrays([arr], ['foo'])
table = pa.Table.from_batches([batch, batch, batch])
with pytest.raises(pa.ArrowInvalid):
arr.to_pandas()
with pytest.raises(pa.ArrowInvalid):
table.to_pandas()
def test_unicode_with_unicode_column_and_index(self):
df = pd.DataFrame({u'あ': [u'い']}, index=[u'う'])
_check_pandas_roundtrip(df, preserve_index=True)
def test_mixed_unicode_column_names(self):
df = pd.DataFrame({u'あ': [u'い'], b'a': 1}, index=[u'う'])
# TODO(phillipc): Should this raise?
with pytest.raises(AssertionError):
_check_pandas_roundtrip(df, preserve_index=True)
def test_binary_column_name(self):
column_data = [u'い']
key = u'あ'.encode('utf8')
data = {key: column_data}
df = pd.DataFrame(data)
# we can't use _check_pandas_roundtrip here because our metdata
# is always decoded as utf8: even if binary goes in, utf8 comes out
t = pa.Table.from_pandas(df, preserve_index=True)
df2 = t.to_pandas()
assert df.values[0] == df2.values[0]
assert df.index.values[0] == df2.index.values[0]
assert df.columns[0] == key
def test_multiindex_duplicate_values(self):
num_rows = 3
numbers = list(range(num_rows))
index = pd.MultiIndex.from_arrays(
[['foo', 'foo', 'bar'], numbers],
names=['foobar', 'some_numbers'],
)
df = pd.DataFrame({'numbers': numbers}, index=index)
table = pa.Table.from_pandas(df)
result_df = table.to_pandas()
tm.assert_frame_equal(result_df, df)
def test_metadata_with_mixed_types(self):
df = pd.DataFrame({'data': [b'some_bytes', u'some_unicode']})
table = pa.Table.from_pandas(df)
metadata = table.schema.metadata
assert b'mixed' not in metadata[b'pandas']
js = json.loads(metadata[b'pandas'].decode('utf8'))
data_column = js['columns'][0]
assert data_column['pandas_type'] == 'bytes'
assert data_column['numpy_type'] == 'object'
def test_list_metadata(self):
df = pd.DataFrame({'data': [[1], [2, 3, 4], [5] * 7]})
schema = pa.schema([pa.field('data', type=pa.list_(pa.int64()))])
table = pa.Table.from_pandas(df, schema=schema)
metadata = table.schema.metadata
assert b'mixed' not in metadata[b'pandas']
js = json.loads(metadata[b'pandas'].decode('utf8'))
data_column = js['columns'][0]
assert data_column['pandas_type'] == 'list[int64]'
assert data_column['numpy_type'] == 'object'
def test_decimal_metadata(self):
expected = pd.DataFrame({
'decimals': [
decimal.Decimal('394092382910493.12341234678'),
-decimal.Decimal('314292388910493.12343437128'),
]
})
table = pa.Table.from_pandas(expected)
metadata = table.schema.metadata
assert b'mixed' not in metadata[b'pandas']
js = json.loads(metadata[b'pandas'].decode('utf8'))
data_column = js['columns'][0]
assert data_column['pandas_type'] == 'decimal'
assert data_column['numpy_type'] == 'object'
assert data_column['metadata'] == {'precision': 26, 'scale': 11}
def test_table_column_subset_metadata(self):
# ARROW-1883
df = pd.DataFrame({
'a': [1, 2, 3],
'b': pd.date_range("2017-01-01", periods=3, tz='Europe/Brussels')})
table = pa.Table.from_pandas(df)
table_subset = table.remove_column(1)
result = table_subset.to_pandas()
tm.assert_frame_equal(result, df[['a']])
table_subset2 = table_subset.remove_column(1)
result = table_subset2.to_pandas()
tm.assert_frame_equal(result, df[['a']])
# non-default index
for index in [
pd.Index(['a', 'b', 'c'], name='index'),
pd.date_range("2017-01-01", periods=3, tz='Europe/Brussels')]:
df = pd.DataFrame({'a': [1, 2, 3],
'b': [.1, .2, .3]}, index=index)
table = pa.Table.from_pandas(df)
table_subset = table.remove_column(1)
result = table_subset.to_pandas()
tm.assert_frame_equal(result, df[['a']])
table_subset2 = table_subset.remove_column(1)
result = table_subset2.to_pandas()
tm.assert_frame_equal(result, df[['a']].reset_index(drop=True))
def test_empty_list_metadata(self):
# Create table with array of empty lists, forced to have type
# list(string) in pyarrow
c1 = [["test"], ["a", "b"], None]
c2 = [[], [], []]
arrays = OrderedDict([
('c1', pa.array(c1, type=pa.list_(pa.string()))),
('c2', pa.array(c2, type=pa.list_(pa.string()))),
])
rb = pa.RecordBatch.from_arrays(
list(arrays.values()),
list(arrays.keys())
)
tbl = pa.Table.from_batches([rb])
# First roundtrip changes schema, because pandas cannot preserve the
# type of empty lists
df = tbl.to_pandas()
tbl2 = pa.Table.from_pandas(df, preserve_index=True)
md2 = json.loads(tbl2.schema.metadata[b'pandas'].decode('utf8'))
# Second roundtrip
df2 = tbl2.to_pandas()
expected = pd.DataFrame(OrderedDict([('c1', c1), ('c2', c2)]))
tm.assert_frame_equal(df2, expected)
assert md2['columns'] == [
{
'name': 'c1',
'field_name': 'c1',
'metadata': None,
'numpy_type': 'object',
'pandas_type': 'list[unicode]',
},
{
'name': 'c2',
'field_name': 'c2',
'metadata': None,
'numpy_type': 'object',
'pandas_type': 'list[empty]',
},
{
'name': None,
'field_name': '__index_level_0__',
'metadata': None,
'numpy_type': 'int64',
'pandas_type': 'int64',
}
]
class TestConvertPrimitiveTypes(object):
"""
Conversion tests for primitive (e.g. numeric) types.
"""
def test_float_no_nulls(self):
data = {}
fields = []
dtypes = [('f2', pa.float16()),
('f4', pa.float32()),
('f8', pa.float64())]
num_values = 100
for numpy_dtype, arrow_dtype in dtypes:
values = np.random.randn(num_values)
data[numpy_dtype] = values.astype(numpy_dtype)
fields.append(pa.field(numpy_dtype, arrow_dtype))
df = pd.DataFrame(data)
schema = pa.schema(fields)
_check_pandas_roundtrip(df, expected_schema=schema)
def test_float_nulls(self):
num_values = 100
null_mask = np.random.randint(0, 10, size=num_values) < 3
dtypes = [('f2', pa.float16()),
('f4', pa.float32()),
('f8', pa.float64())]
names = ['f2', 'f4', 'f8']
expected_cols = []
arrays = []
fields = []
for name, arrow_dtype in dtypes:
values = np.random.randn(num_values).astype(name)
arr = pa.array(values, from_pandas=True, mask=null_mask)
arrays.append(arr)
fields.append(pa.field(name, arrow_dtype))
values[null_mask] = np.nan
expected_cols.append(values)
ex_frame = pd.DataFrame(dict(zip(names, expected_cols)),
columns=names)
table = pa.Table.from_arrays(arrays, names)
assert table.schema.equals(pa.schema(fields))
result = table.to_pandas()
tm.assert_frame_equal(result, ex_frame)
def test_float_nulls_to_ints(self):
# ARROW-2135
df = pd.DataFrame({"a": [1.0, 2.0, pd.np.NaN]})
schema = pa.schema([pa.field("a", pa.int16(), nullable=True)])
table = pa.Table.from_pandas(df, schema=schema, safe=False)
assert table[0].to_pylist() == [1, 2, None]
tm.assert_frame_equal(df, table.to_pandas())
def test_integer_no_nulls(self):
data = OrderedDict()
fields = []
numpy_dtypes = [
('i1', pa.int8()), ('i2', pa.int16()),
('i4', pa.int32()), ('i8', pa.int64()),
('u1', pa.uint8()), ('u2', pa.uint16()),
('u4', pa.uint32()), ('u8', pa.uint64()),
('longlong', pa.int64()), ('ulonglong', pa.uint64())
]
num_values = 100
for dtype, arrow_dtype in numpy_dtypes:
info = np.iinfo(dtype)
values = np.random.randint(max(info.min, np.iinfo(np.int_).min),
min(info.max, np.iinfo(np.int_).max),
size=num_values)
data[dtype] = values.astype(dtype)
fields.append(pa.field(dtype, arrow_dtype))
df = pd.DataFrame(data)
schema = pa.schema(fields)
_check_pandas_roundtrip(df, expected_schema=schema)
def test_all_integer_types(self):
# Test all Numpy integer aliases
data = OrderedDict()
numpy_dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8',
'byte', 'ubyte', 'short', 'ushort', 'intc', 'uintc',
'int_', 'uint', 'longlong', 'ulonglong']
for dtype in numpy_dtypes:
data[dtype] = np.arange(12, dtype=dtype)
df = pd.DataFrame(data)
_check_pandas_roundtrip(df)
# Do the same with pa.array()
# (for some reason, it doesn't use the same code paths at all)
for np_arr in data.values():
arr = pa.array(np_arr)
assert arr.to_pylist() == np_arr.tolist()
def test_integer_with_nulls(self):
# pandas requires upcast to float dtype
int_dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8']
num_values = 100
null_mask = np.random.randint(0, 10, size=num_values) < 3
expected_cols = []
arrays = []
for name in int_dtypes:
values = np.random.randint(0, 100, size=num_values)
arr = pa.array(values, mask=null_mask)
arrays.append(arr)
expected = values.astype('f8')
expected[null_mask] = np.nan
expected_cols.append(expected)
ex_frame = pd.DataFrame(dict(zip(int_dtypes, expected_cols)),
columns=int_dtypes)
table = pa.Table.from_arrays(arrays, int_dtypes)
result = table.to_pandas()
tm.assert_frame_equal(result, ex_frame)
def test_array_from_pandas_type_cast(self):
arr = np.arange(10, dtype='int64')
target_type = pa.int8()
result = pa.array(arr, type=target_type)
expected = pa.array(arr.astype('int8'))
assert result.equals(expected)
def test_boolean_no_nulls(self):
num_values = 100
np.random.seed(0)
df = pd.DataFrame({'bools': np.random.randn(num_values) > 0})
field = pa.field('bools', pa.bool_())
schema = pa.schema([field])
_check_pandas_roundtrip(df, expected_schema=schema)
def test_boolean_nulls(self):
# pandas requires upcast to object dtype
num_values = 100
np.random.seed(0)
mask = np.random.randint(0, 10, size=num_values) < 3
values = np.random.randint(0, 10, size=num_values) < 5
arr = pa.array(values, mask=mask)
expected = values.astype(object)
expected[mask] = None
field = pa.field('bools', pa.bool_())
schema = pa.schema([field])
ex_frame = pd.DataFrame({'bools': expected})
table = pa.Table.from_arrays([arr], ['bools'])
assert table.schema.equals(schema)
result = table.to_pandas()
tm.assert_frame_equal(result, ex_frame)
def test_float_object_nulls(self):
arr = np.array([None, 1.5, np.float64(3.5)] * 5, dtype=object)
df = pd.DataFrame({'floats': arr})
expected = pd.DataFrame({'floats': pd.to_numeric(arr)})
field = pa.field('floats', pa.float64())
schema = pa.schema([field])
_check_pandas_roundtrip(df, expected=expected,
expected_schema=schema)
def test_int_object_nulls(self):
arr = np.array([None, 1, np.int64(3)] * 5, dtype=object)
df = pd.DataFrame({'ints': arr})
expected = pd.DataFrame({'ints': pd.to_numeric(arr)})
field = pa.field('ints', pa.int64())
schema = pa.schema([field])
_check_pandas_roundtrip(df, expected=expected,
expected_schema=schema)
def test_boolean_object_nulls(self):
arr = np.array([False, None, True] * 100, dtype=object)
df = pd.DataFrame({'bools': arr})
field = pa.field('bools', pa.bool_())
schema = pa.schema([field])
_check_pandas_roundtrip(df, expected_schema=schema)
def test_all_nulls_cast_numeric(self):
arr = np.array([None], dtype=object)
def _check_type(t):
a2 = pa.array(arr, type=t)
assert a2.type == t
assert a2[0].as_py() is None
_check_type(pa.int32())
_check_type(pa.float64())
def test_half_floats_from_numpy(self):
arr = np.array([1.5, np.nan], dtype=np.float16)
a = pa.array(arr, type=pa.float16())
x, y = a.to_pylist()
assert isinstance(x, np.float16)
assert x == 1.5
assert isinstance(y, np.float16)
assert np.isnan(y)
a = pa.array(arr, type=pa.float16(), from_pandas=True)
x, y = a.to_pylist()
assert isinstance(x, np.float16)
assert x == 1.5
assert y is None
@pytest.mark.parametrize('dtype',
['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8'])
def test_array_integer_object_nulls_option(dtype):
num_values = 100
null_mask = np.random.randint(0, 10, size=num_values) < 3
values = np.random.randint(0, 100, size=num_values, dtype=dtype)
array = pa.array(values, mask=null_mask)
if null_mask.any():
expected = values.astype('O')
expected[null_mask] = None
else:
expected = values
result = array.to_pandas(integer_object_nulls=True)
np.testing.assert_equal(result, expected)
@pytest.mark.parametrize('dtype',
['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8'])
def test_table_integer_object_nulls_option(dtype):
num_values = 100
null_mask = np.random.randint(0, 10, size=num_values) < 3
values = np.random.randint(0, 100, size=num_values, dtype=dtype)
array = pa.array(values, mask=null_mask)
if null_mask.any():
expected = values.astype('O')
expected[null_mask] = None
else:
expected = values
expected = pd.DataFrame({dtype: expected})
table = pa.Table.from_arrays([array], [dtype])
result = table.to_pandas(integer_object_nulls=True)
tm.assert_frame_equal(result, expected)
class TestConvertDateTimeLikeTypes(object):
"""
Conversion tests for datetime- and timestamp-like types (date64, etc.).
"""
def test_timestamps_notimezone_no_nulls(self):
df = pd.DataFrame({
'datetime64': np.array([
'2007-07-13T01:23:34.123456789',
'2006-01-13T12:34:56.432539784',
'2010-08-13T05:46:57.437699912'],
dtype='datetime64[ns]')
})
field = pa.field('datetime64', pa.timestamp('ns'))
schema = pa.schema([field])
_check_pandas_roundtrip(
df,
expected_schema=schema,
)
def test_timestamps_notimezone_nulls(self):
df = pd.DataFrame({
'datetime64': np.array([
'2007-07-13T01:23:34.123456789',
None,
'2010-08-13T05:46:57.437699912'],
dtype='datetime64[ns]')
})
field = pa.field('datetime64', pa.timestamp('ns'))
schema = pa.schema([field])
_check_pandas_roundtrip(
df,
expected_schema=schema,
)
def test_timestamps_with_timezone(self):
df = pd.DataFrame({
'datetime64': np.array([
'2007-07-13T01:23:34.123',
'2006-01-13T12:34:56.432',
'2010-08-13T05:46:57.437'],
dtype='datetime64[ms]')
})
df['datetime64'] = (df['datetime64'].dt.tz_localize('US/Eastern')
.to_frame())
_check_pandas_roundtrip(df)
_check_series_roundtrip(df['datetime64'])
# drop-in a null and ns instead of ms
df = pd.DataFrame({
'datetime64': np.array([
'2007-07-13T01:23:34.123456789',
None,
'2006-01-13T12:34:56.432539784',
'2010-08-13T05:46:57.437699912'],
dtype='datetime64[ns]')
})
df['datetime64'] = (df['datetime64'].dt.tz_localize('US/Eastern')
.to_frame())
_check_pandas_roundtrip(df)
def test_python_datetime(self):
# ARROW-2106
date_array = [datetime.today() + timedelta(days=x) for x in range(10)]
df = pd.DataFrame({
'datetime': pd.Series(date_array, dtype=object)
})
table = pa.Table.from_pandas(df)
assert isinstance(table[0].data.chunk(0), pa.TimestampArray)
result = table.to_pandas()
expected_df = pd.DataFrame({
'datetime': date_array
})
tm.assert_frame_equal(expected_df, result)
def test_python_datetime_subclass(self):
class MyDatetime(datetime):
# see https://github.com/pandas-dev/pandas/issues/21142
nanosecond = 0.0
date_array = [MyDatetime(2000, 1, 1, 1, 1, 1)]
df = pd.DataFrame({"datetime": pd.Series(date_array, dtype=object)})
table = pa.Table.from_pandas(df)
assert isinstance(table[0].data.chunk(0), pa.TimestampArray)
result = table.to_pandas()
expected_df = pd.DataFrame({"datetime": date_array})
# https://github.com/pandas-dev/pandas/issues/21142
expected_df["datetime"] = pd.to_datetime(expected_df["datetime"])
tm.assert_frame_equal(expected_df, result)
def test_python_date_subclass(self):
class MyDate(date):
pass
date_array = [MyDate(2000, 1, 1)]
df = pd.DataFrame({"date": pd.Series(date_array, dtype=object)})
table = pa.Table.from_pandas(df)
assert isinstance(table[0].data.chunk(0), pa.Date32Array)
result = table.to_pandas()
expected_df = pd.DataFrame(
{"date": np.array(["2000-01-01"], dtype="datetime64[ns]")}
)
tm.assert_frame_equal(expected_df, result)
def test_datetime64_to_date32(self):
# ARROW-1718
arr = pa.array([date(2017, 10, 23), None])
c = pa.Column.from_array("d", arr)
s = c.to_pandas()
arr2 = pa.Array.from_pandas(s, type=pa.date32())
assert arr2.equals(arr.cast('date32'))
@pytest.mark.parametrize('mask', [
None,
np.array([True, False, False]),
])
def test_pandas_datetime_to_date64(self, mask):
s = pd.to_datetime([
'2018-05-10T00:00:00',
'2018-05-11T00:00:00',
'2018-05-12T00:00:00',
])
arr = pa.Array.from_pandas(s, type=pa.date64(), mask=mask)
data = np.array([
date(2018, 5, 10),
date(2018, 5, 11),
date(2018, 5, 12)
])
expected = pa.array(data, mask=mask, type=pa.date64())
assert arr.equals(expected)
@pytest.mark.parametrize('mask', [
None,
np.array([True, False, False])
])
def test_pandas_datetime_to_date64_failures(self, mask):
s = pd.to_datetime([
'2018-05-10T10:24:01',
'2018-05-11T10:24:01',
'2018-05-12T10:24:01',
])
expected_msg = 'Timestamp value had non-zero intraday milliseconds'
with pytest.raises(pa.ArrowInvalid, match=expected_msg):
pa.Array.from_pandas(s, type=pa.date64(), mask=mask)
def test_array_date_as_object(self):
data = [date(2000, 1, 1),
None,
date(1970, 1, 1),
date(2040, 2, 26)]
expected = np.array(['2000-01-01',
None,
'1970-01-01',
'2040-02-26'], dtype='datetime64')
arr = pa.array(data)
assert arr.equals(pa.array(expected))
result = arr.to_pandas()
assert result.dtype == expected.dtype
npt.assert_array_equal(arr.to_pandas(), expected)
result = arr.to_pandas(date_as_object=True)
expected = expected.astype(object)
assert result.dtype == expected.dtype
npt.assert_array_equal(result, expected)
def test_chunked_array_convert_date_as_object(self):
data = [date(2000, 1, 1),
None,
date(1970, 1, 1),
date(2040, 2, 26)]
expected = np.array(['2000-01-01',
None,
'1970-01-01',
'2040-02-26'], dtype='datetime64')
carr = pa.chunked_array([data])
result = carr.to_pandas()
assert result.dtype == expected.dtype
npt.assert_array_equal(carr.to_pandas(), expected)
result = carr.to_pandas(date_as_object=True)
expected = expected.astype(object)
assert result.dtype == expected.dtype
npt.assert_array_equal(result, expected)
def test_column_convert_date_as_object(self):
data = [date(2000, 1, 1),
None,
date(1970, 1, 1),
date(2040, 2, 26)]
expected = np.array(['2000-01-01',
None,
'1970-01-01',
'2040-02-26'], dtype='datetime64')
arr = pa.array(data)
column = pa.column('date', arr)
result = column.to_pandas()
npt.assert_array_equal(column.to_pandas(), expected)
result = column.to_pandas(date_as_object=True)
expected = expected.astype(object)
assert result.dtype == expected.dtype
npt.assert_array_equal(result, expected)
def test_table_convert_date_as_object(self):
df = pd.DataFrame({
'date': [date(2000, 1, 1),
None,
date(1970, 1, 1),
date(2040, 2, 26)]})
table = pa.Table.from_pandas(df, preserve_index=False)
df_datetime = table.to_pandas()
df_object = table.to_pandas(date_as_object=True)
tm.assert_frame_equal(df.astype('datetime64[ns]'), df_datetime,
check_dtype=True)
tm.assert_frame_equal(df, df_object, check_dtype=True)
def test_date_infer(self):
df = pd.DataFrame({
'date': [date(2000, 1, 1),
None,
date(1970, 1, 1),
date(2040, 2, 26)]})
table = pa.Table.from_pandas(df, preserve_index=False)
field = pa.field('date', pa.date32())
# schema's metadata is generated by from_pandas conversion
expected_schema = pa.schema([field], metadata=table.schema.metadata)
assert table.schema.equals(expected_schema)
result = table.to_pandas()
expected = df.copy()
expected['date'] = pd.to_datetime(df['date'])
tm.assert_frame_equal(result, expected)
def test_date_mask(self):
arr = np.array([date(2017, 4, 3), date(2017, 4, 4)],
dtype='datetime64[D]')
mask = [True, False]
result = pa.array(arr, mask=np.array(mask))
expected = np.array([None, date(2017, 4, 4)], dtype='datetime64[D]')
expected = pa.array(expected, from_pandas=True)
assert expected.equals(result)
def test_date_objects_typed(self):
arr = np.array([
date(2017, 4, 3),
None,
date(2017, 4, 4),
date(2017, 4, 5)], dtype=object)
arr_i4 = np.array([17259, -1, 17260, 17261], dtype='int32')
arr_i8 = arr_i4.astype('int64') * 86400000
mask = np.array([False, True, False, False])
t32 = pa.date32()
t64 = pa.date64()
a32 = pa.array(arr, type=t32)
a64 = pa.array(arr, type=t64)
a32_expected = pa.array(arr_i4, mask=mask, type=t32)
a64_expected = pa.array(arr_i8, mask=mask, type=t64)
assert a32.equals(a32_expected)
assert a64.equals(a64_expected)
# Test converting back to pandas
colnames = ['date32', 'date64']
table = pa.Table.from_arrays([a32, a64], colnames)
table_pandas = table.to_pandas()
ex_values = (np.array(['2017-04-03', '2017-04-04', '2017-04-04',
'2017-04-05'],
dtype='datetime64[D]')
.astype('datetime64[ns]'))
ex_values[1] = pd.NaT.value
expected_pandas = pd.DataFrame({'date32': ex_values,
'date64': ex_values},
columns=colnames)
tm.assert_frame_equal(table_pandas, expected_pandas)
def test_dates_from_integers(self):
t1 = pa.date32()
t2 = pa.date64()
arr = np.array([17259, 17260, 17261], dtype='int32')
arr2 = arr.astype('int64') * 86400000
a1 = pa.array(arr, type=t1)
a2 = pa.array(arr2, type=t2)
expected = date(2017, 4, 3)
assert a1[0].as_py() == expected
assert a2[0].as_py() == expected
@pytest.mark.xfail(reason="not supported ATM",
raises=NotImplementedError)
def test_timedelta(self):
# TODO(jreback): Pandas only support ns resolution
# Arrow supports ??? for resolution
df = pd.DataFrame({
'timedelta': np.arange(start=0, stop=3 * 86400000,
step=86400000,
dtype='timedelta64[ms]')
})
pa.Table.from_pandas(df)
def test_pytime_from_pandas(self):
pytimes = [time(1, 2, 3, 1356),
time(4, 5, 6, 1356)]
# microseconds
t1 = pa.time64('us')
aobjs = np.array(pytimes + [None], dtype=object)
parr = pa.array(aobjs)
assert parr.type == t1
assert parr[0].as_py() == pytimes[0]
assert parr[1].as_py() == pytimes[1]
assert parr[2] is pa.NA
# DataFrame
df = pd.DataFrame({'times': aobjs})
batch = pa.RecordBatch.from_pandas(df)
assert batch[0].equals(parr)
# Test ndarray of int64 values
arr = np.array([_pytime_to_micros(v) for v in pytimes],
dtype='int64')
a1 = pa.array(arr, type=pa.time64('us'))
assert a1[0].as_py() == pytimes[0]
a2 = pa.array(arr * 1000, type=pa.time64('ns'))
assert a2[0].as_py() == pytimes[0]
a3 = pa.array((arr / 1000).astype('i4'),
type=pa.time32('ms'))
assert a3[0].as_py() == pytimes[0].replace(microsecond=1000)
a4 = pa.array((arr / 1000000).astype('i4'),
type=pa.time32('s'))
assert a4[0].as_py() == pytimes[0].replace(microsecond=0)
def test_arrow_time_to_pandas(self):
pytimes = [time(1, 2, 3, 1356),
time(4, 5, 6, 1356),
time(0, 0, 0)]
expected = np.array(pytimes[:2] + [None])
expected_ms = np.array([x.replace(microsecond=1000)
for x in pytimes[:2]] +
[None])
expected_s = np.array([x.replace(microsecond=0)
for x in pytimes[:2]] +
[None])
arr = np.array([_pytime_to_micros(v) for v in pytimes],
dtype='int64')
arr = np.array([_pytime_to_micros(v) for v in pytimes],
dtype='int64')
null_mask = np.array([False, False, True], dtype=bool)
a1 = pa.array(arr, mask=null_mask, type=pa.time64('us'))
a2 = pa.array(arr * 1000, mask=null_mask,
type=pa.time64('ns'))
a3 = pa.array((arr / 1000).astype('i4'), mask=null_mask,
type=pa.time32('ms'))
a4 = pa.array((arr / 1000000).astype('i4'), mask=null_mask,
type=pa.time32('s'))
names = ['time64[us]', 'time64[ns]', 'time32[ms]', 'time32[s]']
batch = pa.RecordBatch.from_arrays([a1, a2, a3, a4], names)
arr = a1.to_pandas()
assert (arr == expected).all()
arr = a2.to_pandas()
assert (arr == expected).all()
arr = a3.to_pandas()
assert (arr == expected_ms).all()
arr = a4.to_pandas()
assert (arr == expected_s).all()
df = batch.to_pandas()
expected_df = pd.DataFrame({'time64[us]': expected,
'time64[ns]': expected,
'time32[ms]': expected_ms,
'time32[s]': expected_s},
columns=names)
tm.assert_frame_equal(df, expected_df)
def test_numpy_datetime64_columns(self):
datetime64_ns = np.array([
'2007-07-13T01:23:34.123456789',
None,
'2006-01-13T12:34:56.432539784',
'2010-08-13T05:46:57.437699912'],
dtype='datetime64[ns]')
_check_array_from_pandas_roundtrip(datetime64_ns)
datetime64_us = np.array([
'2007-07-13T01:23:34.123456',
None,
'2006-01-13T12:34:56.432539',
'2010-08-13T05:46:57.437699'],
dtype='datetime64[us]')
_check_array_from_pandas_roundtrip(datetime64_us)
datetime64_ms = np.array([
'2007-07-13T01:23:34.123',
None,
'2006-01-13T12:34:56.432',
'2010-08-13T05:46:57.437'],
dtype='datetime64[ms]')
_check_array_from_pandas_roundtrip(datetime64_ms)
datetime64_s = np.array([
'2007-07-13T01:23:34',
None,
'2006-01-13T12:34:56',
'2010-08-13T05:46:57'],
dtype='datetime64[s]')
_check_array_from_pandas_roundtrip(datetime64_s)
@pytest.mark.parametrize('dtype', [pa.date32(), pa.date64()])
def test_numpy_datetime64_day_unit(self, dtype):
datetime64_d = np.array([
'2007-07-13',
None,
'2006-01-15',
'2010-08-19'],
dtype='datetime64[D]')
_check_array_from_pandas_roundtrip(datetime64_d, type=dtype)
def test_array_from_pandas_date_with_mask(self):
m = np.array([True, False, True])
data = pd.Series([
date(1990, 1, 1),
date(1991, 1, 1),
date(1992, 1, 1)
])
result = pa.Array.from_pandas(data, mask=m)
expected = pd.Series([None, date(1991, 1, 1), None])
assert pa.Array.from_pandas(expected).equals(result)
def test_fixed_offset_timezone(self):
df = pd.DataFrame({
'a': [
pd.Timestamp('2012-11-11 00:00:00+01:00'),
pd.NaT
]
})
_check_pandas_roundtrip(df)
_check_serialize_components_roundtrip(df)
# ----------------------------------------------------------------------
# Conversion tests for string and binary types.
class TestConvertStringLikeTypes(object):
def test_pandas_unicode(self):
repeats = 1000
values = [u'foo', None, u'bar', u'mañana', np.nan]
df = pd.DataFrame({'strings': values * repeats})
field = pa.field('strings', pa.string())
schema = pa.schema([field])
_check_pandas_roundtrip(df, expected_schema=schema)
def test_bytes_to_binary(self):
values = [u'qux', b'foo', None, bytearray(b'barz'), 'qux', np.nan]
df = pd.DataFrame({'strings': values})
table = pa.Table.from_pandas(df)
assert table[0].type == pa.binary()
values2 = [b'qux', b'foo', None, b'barz', b'qux', np.nan]
expected = pd.DataFrame({'strings': values2})
_check_pandas_roundtrip(df, expected)
@pytest.mark.large_memory
def test_bytes_exceed_2gb(self):
v1 = b'x' * 100000000
v2 = b'x' * 147483646
# ARROW-2227, hit exactly 2GB on the nose
df = pd.DataFrame({
'strings': [v1] * 20 + [v2] + ['x'] * 20
})
arr = pa.array(df['strings'])
assert isinstance(arr, pa.ChunkedArray)
assert arr.num_chunks == 2
arr = None
table = pa.Table.from_pandas(df)
assert table[0].data.num_chunks == 2
def test_fixed_size_bytes(self):
values = [b'foo', None, bytearray(b'bar'), None, None, b'hey']
df = pd.DataFrame({'strings': values})
schema = pa.schema([pa.field('strings', pa.binary(3))])
table = pa.Table.from_pandas(df, schema=schema)
assert table.schema[0].type == schema[0].type
assert table.schema[0].name == schema[0].name
result = table.to_pandas()
tm.assert_frame_equal(result, df)
def test_fixed_size_bytes_does_not_accept_varying_lengths(self):
values = [b'foo', None, b'ba', None, None, b'hey']
df = pd.DataFrame({'strings': values})
schema = pa.schema([pa.field('strings', pa.binary(3))])
with pytest.raises(pa.ArrowInvalid):
pa.Table.from_pandas(df, schema=schema)
def test_variable_size_bytes(self):
s = pd.Series([b'123', b'', b'a', None])
_check_series_roundtrip(s, type_=pa.binary())
def test_binary_from_bytearray(self):
s = pd.Series([bytearray(b'123'), bytearray(b''), bytearray(b'a'),
None])
# Explicitly set type
_check_series_roundtrip(s, type_=pa.binary())
# Infer type from bytearrays
_check_series_roundtrip(s, expected_pa_type=pa.binary())
def test_table_empty_str(self):
values = ['', '', '', '', '']
df = pd.DataFrame({'strings': values})
field = pa.field('strings', pa.string())
schema = pa.schema([field])
table = pa.Table.from_pandas(df, schema=schema)
result1 = table.to_pandas(strings_to_categorical=False)
expected1 = pd.DataFrame({'strings': values})
tm.assert_frame_equal(result1, expected1, check_dtype=True)
result2 = table.to_pandas(strings_to_categorical=True)
expected2 = pd.DataFrame({'strings': pd.Categorical(values)})
tm.assert_frame_equal(result2, expected2, check_dtype=True)
def test_selective_categoricals(self):
values = ['', '', '', '', '']
df = pd.DataFrame({'strings': values})
field = pa.field('strings', pa.string())
schema = pa.schema([field])
table = pa.Table.from_pandas(df, schema=schema)
expected_str = pd.DataFrame({'strings': values})
expected_cat = pd.DataFrame({'strings': pd.Categorical(values)})
result1 = table.to_pandas(categories=['strings'])
tm.assert_frame_equal(result1, expected_cat, check_dtype=True)
result2 = table.to_pandas(categories=[])
tm.assert_frame_equal(result2, expected_str, check_dtype=True)
result3 = table.to_pandas(categories=('strings',))
tm.assert_frame_equal(result3, expected_cat, check_dtype=True)
result4 = table.to_pandas(categories=tuple())
tm.assert_frame_equal(result4, expected_str, check_dtype=True)
def test_table_str_to_categorical_without_na(self):
values = ['a', 'a', 'b', 'b', 'c']
df = pd.DataFrame({'strings': values})
field = pa.field('strings', pa.string())
schema = pa.schema([field])
table = pa.Table.from_pandas(df, schema=schema)
result = table.to_pandas(strings_to_categorical=True)
expected = pd.DataFrame({'strings': pd.Categorical(values)})
tm.assert_frame_equal(result, expected, check_dtype=True)
with pytest.raises(pa.ArrowInvalid):
table.to_pandas(strings_to_categorical=True,
zero_copy_only=True)
def test_table_str_to_categorical_with_na(self):
values = [None, 'a', 'b', np.nan]
df = pd.DataFrame({'strings': values})
field = pa.field('strings', pa.string())
schema = pa.schema([field])
table = pa.Table.from_pandas(df, schema=schema)
result = table.to_pandas(strings_to_categorical=True)
expected = pd.DataFrame({'strings': pd.Categorical(values)})
tm.assert_frame_equal(result, expected, check_dtype=True)
with pytest.raises(pa.ArrowInvalid):
table.to_pandas(strings_to_categorical=True,
zero_copy_only=True)
# Regression test for ARROW-2101
def test_array_of_bytes_to_strings(self):
converted = pa.array(np.array([b'x'], dtype=object), pa.string())
assert converted.type == pa.string()
# Make sure that if an ndarray of bytes is passed to the array
# constructor and the type is string, it will fail if those bytes
# cannot be converted to utf-8
def test_array_of_bytes_to_strings_bad_data(self):
with pytest.raises(
pa.lib.ArrowInvalid,
match="was not a utf8 string"):
pa.array(np.array([b'\x80\x81'], dtype=object), pa.string())
def test_numpy_string_array_to_fixed_size_binary(self):
arr = np.array([b'foo', b'bar', b'baz'], dtype='|S3')
converted = pa.array(arr, type=pa.binary(3))
expected = pa.array(list(arr), type=pa.binary(3))
assert converted.equals(expected)
mask = np.array([True, False, True])
converted = pa.array(arr, type=pa.binary(3), mask=mask)
expected = pa.array([b'foo', None, b'baz'], type=pa.binary(3))
assert converted.equals(expected)
with pytest.raises(pa.lib.ArrowInvalid,
match=r'Got bytestring of length 3 \(expected 4\)'):
arr = np.array([b'foo', b'bar', b'baz'], dtype='|S3')
pa.array(arr, type=pa.binary(4))
with pytest.raises(
pa.lib.ArrowInvalid,
match=r'Got bytestring of length 12 \(expected 3\)'):
arr = np.array([b'foo', b'bar', b'baz'], dtype='|U3')
pa.array(arr, type=pa.binary(3))
class TestConvertDecimalTypes(object):
"""
Conversion test for decimal types.
"""
decimal32 = [
decimal.Decimal('-1234.123'),
decimal.Decimal('1234.439')
]
decimal64 = [
decimal.Decimal('-129934.123331'),
decimal.Decimal('129534.123731')
]
decimal128 = [
decimal.Decimal('394092382910493.12341234678'),
decimal.Decimal('-314292388910493.12343437128')
]
@pytest.mark.parametrize(('values', 'expected_type'), [
pytest.param(decimal32, pa.decimal128(7, 3), id='decimal32'),
pytest.param(decimal64, pa.decimal128(12, 6), id='decimal64'),
pytest.param(decimal128, pa.decimal128(26, 11), id='decimal128')
])
def test_decimal_from_pandas(self, values, expected_type):
expected = pd.DataFrame({'decimals': values})
table = pa.Table.from_pandas(expected, preserve_index=False)
field = pa.field('decimals', expected_type)
# schema's metadata is generated by from_pandas conversion
expected_schema = pa.schema([field], metadata=table.schema.metadata)
assert table.schema.equals(expected_schema)
@pytest.mark.parametrize('values', [
pytest.param(decimal32, id='decimal32'),
pytest.param(decimal64, id='decimal64'),
pytest.param(decimal128, id='decimal128')
])
def test_decimal_to_pandas(self, values):
expected = pd.DataFrame({'decimals': values})
converted = pa.Table.from_pandas(expected)
df = converted.to_pandas()
tm.assert_frame_equal(df, expected)
def test_decimal_fails_with_truncation(self):
data1 = [decimal.Decimal('1.234')]
type1 = pa.decimal128(10, 2)
with pytest.raises(pa.ArrowInvalid):
pa.array(data1, type=type1)
data2 = [decimal.Decimal('1.2345')]
type2 = pa.decimal128(10, 3)
with pytest.raises(pa.ArrowInvalid):
pa.array(data2, type=type2)
def test_decimal_with_different_precisions(self):
data = [
decimal.Decimal('0.01'),
decimal.Decimal('0.001'),
]
series = pd.Series(data)
array = pa.array(series)
assert array.to_pylist() == data
assert array.type == pa.decimal128(3, 3)
array = pa.array(data, type=pa.decimal128(12, 5))
expected = [decimal.Decimal('0.01000'), decimal.Decimal('0.00100')]
assert array.to_pylist() == expected
def test_decimal_with_None_explicit_type(self):
series = pd.Series([decimal.Decimal('3.14'), None])
_check_series_roundtrip(series, type_=pa.decimal128(12, 5))
# Test that having all None values still produces decimal array
series = pd.Series([None] * 2)
_check_series_roundtrip(series, type_=pa.decimal128(12, 5))
def test_decimal_with_None_infer_type(self):
series = pd.Series([decimal.Decimal('3.14'), None])
_check_series_roundtrip(series, expected_pa_type=pa.decimal128(3, 2))
def test_strided_objects(self, tmpdir):
# see ARROW-3053
data = {
'a': {0: 'a'},
'b': {0: decimal.Decimal('0.0')}
}
# This yields strided objects
df = pd.DataFrame.from_dict(data)
_check_pandas_roundtrip(df)
class TestListTypes(object):
"""
Conversion tests for list<> types.
"""
def test_column_of_arrays(self):
df, schema = dataframe_with_arrays()
_check_pandas_roundtrip(df, schema=schema, expected_schema=schema)
table = pa.Table.from_pandas(df, schema=schema, preserve_index=False)
# schema's metadata is generated by from_pandas conversion
expected_schema = schema.add_metadata(table.schema.metadata)
assert table.schema.equals(expected_schema)
for column in df.columns:
field = schema.field_by_name(column)
_check_array_roundtrip(df[column], type=field.type)
def test_column_of_arrays_to_py(self):
# Test regression in ARROW-1199 not caught in above test
dtype = 'i1'
arr = np.array([
np.arange(10, dtype=dtype),
np.arange(5, dtype=dtype),
None,
np.arange(1, dtype=dtype)
])
type_ = pa.list_(pa.int8())
parr = pa.array(arr, type=type_)
assert parr[0].as_py() == list(range(10))
assert parr[1].as_py() == list(range(5))
assert parr[2].as_py() is None
assert parr[3].as_py() == [0]
def test_column_of_lists(self):
df, schema = dataframe_with_lists()
_check_pandas_roundtrip(df, schema=schema, expected_schema=schema)
table = pa.Table.from_pandas(df, schema=schema, preserve_index=False)
# schema's metadata is generated by from_pandas conversion
expected_schema = schema.add_metadata(table.schema.metadata)
assert table.schema.equals(expected_schema)
for column in df.columns:
field = schema.field_by_name(column)
_check_array_roundtrip(df[column], type=field.type)
def test_column_of_lists_first_empty(self):
# ARROW-2124
num_lists = [[], [2, 3, 4], [3, 6, 7, 8], [], [2]]
series = pd.Series([np.array(s, dtype=float) for s in num_lists])
arr = pa.array(series)
result = pd.Series(arr.to_pandas())
tm.assert_series_equal(result, series)
def test_column_of_lists_chunked(self):
# ARROW-1357
df = pd.DataFrame({
'lists': np.array([
[1, 2],
None,
[2, 3],
[4, 5],
[6, 7],
[8, 9]
], dtype=object)
})
schema = pa.schema([
pa.field('lists', pa.list_(pa.int64()))
])
t1 = pa.Table.from_pandas(df[:2], schema=schema)
t2 = pa.Table.from_pandas(df[2:], schema=schema)
table = pa.concat_tables([t1, t2])
result = table.to_pandas()
tm.assert_frame_equal(result, df)
def test_column_of_lists_chunked2(self):
data1 = [[0, 1], [2, 3], [4, 5], [6, 7], [10, 11],
[12, 13], [14, 15], [16, 17]]
data2 = [[8, 9], [18, 19]]
a1 = pa.array(data1)
a2 = pa.array(data2)
t1 = pa.Table.from_arrays([a1], names=['a'])
t2 = pa.Table.from_arrays([a2], names=['a'])
concatenated = pa.concat_tables([t1, t2])
result = concatenated.to_pandas()
expected = pd.DataFrame({'a': data1 + data2})
tm.assert_frame_equal(result, expected)
def test_column_of_lists_strided(self):
df, schema = dataframe_with_lists()
df = pd.concat([df] * 6, ignore_index=True)
arr = df['int64'].values[::3]
assert arr.strides[0] != 8
_check_array_roundtrip(arr)
def test_nested_lists_all_none(self):
data = np.array([[None, None], None], dtype=object)
arr = pa.array(data)
expected = pa.array(list(data))
assert arr.equals(expected)
assert arr.type == pa.list_(pa.null())
data2 = np.array([None, None, [None, None],
np.array([None, None], dtype=object)],
dtype=object)
arr = pa.array(data2)
expected = pa.array([None, None, [None, None], [None, None]])
assert arr.equals(expected)
def test_nested_lists_all_empty(self):
# ARROW-2128
data = pd.Series([[], [], []])
arr = pa.array(data)
expected = pa.array(list(data))
assert arr.equals(expected)
assert arr.type == pa.list_(pa.null())
def test_nested_list_first_empty(self):
# ARROW-2711
data = pd.Series([[], [u"a"]])
arr = pa.array(data)
expected = pa.array(list(data))
assert arr.equals(expected)
assert arr.type == pa.list_(pa.string())
def test_nested_smaller_ints(self):
# ARROW-1345, ARROW-2008, there were some type inference bugs happening
# before
data = pd.Series([np.array([1, 2, 3], dtype='i1'), None])
result = pa.array(data)
result2 = pa.array(data.values)
expected = pa.array([[1, 2, 3], None], type=pa.list_(pa.int8()))
assert result.equals(expected)
assert result2.equals(expected)
data3 = pd.Series([np.array([1, 2, 3], dtype='f4'), None])
result3 = pa.array(data3)
expected3 = pa.array([[1, 2, 3], None], type=pa.list_(pa.float32()))
assert result3.equals(expected3)
def test_infer_lists(self):
data = OrderedDict([
('nan_ints', [[None, 1], [2, 3]]),
('ints', [[0, 1], [2, 3]]),
('strs', [[None, u'b'], [u'c', u'd']]),
('nested_strs', [[[None, u'b'], [u'c', u'd']], None])
])
df = pd.DataFrame(data)
expected_schema = pa.schema([
pa.field('nan_ints', pa.list_(pa.int64())),
pa.field('ints', pa.list_(pa.int64())),
pa.field('strs', pa.list_(pa.string())),
pa.field('nested_strs', pa.list_(pa.list_(pa.string())))
])
_check_pandas_roundtrip(df, expected_schema=expected_schema)
def test_infer_numpy_array(self):
data = OrderedDict([
('ints', [
np.array([0, 1], dtype=np.int64),
np.array([2, 3], dtype=np.int64)
])
])
df = pd.DataFrame(data)
expected_schema = pa.schema([
pa.field('ints', pa.list_(pa.int64()))
])
_check_pandas_roundtrip(df, expected_schema=expected_schema)
@pytest.mark.parametrize('t,data,expected', [
(
pa.int64,
[[1, 2], [3], None],
[None, [3], None]
),
(
pa.string,
[[u'aaa', u'bb'], [u'c'], None],
[None, [u'c'], None]
),
(
pa.null,
[[None, None], [None], None],
[None, [None], None]
)
])
def test_array_from_pandas_typed_array_with_mask(self, t, data, expected):
m = np.array([True, False, True])
s = pd.Series(data)
result = pa.Array.from_pandas(s, mask=m, type=pa.list_(t()))
assert pa.Array.from_pandas(expected,
type=pa.list_(t())).equals(result)
def test_empty_list_roundtrip(self):
empty_list_array = np.empty((3,), dtype=object)
empty_list_array.fill([])
df = pd.DataFrame({'a': np.array(['1', '2', '3']),
'b': empty_list_array})
tbl = pa.Table.from_pandas(df)
result = tbl.to_pandas()
tm.assert_frame_equal(result, df)
def test_array_from_nested_arrays(self):
df, schema = dataframe_with_arrays()
for field in schema:
arr = df[field.name].values
expected = pa.array(list(arr), type=field.type)
result = pa.array(arr)
assert result.type == field.type # == list<scalar>
assert result.equals(expected)
class TestConvertStructTypes(object):
"""
Conversion tests for struct types.
"""
def test_to_pandas(self):
ints = pa.array([None, 2, 3], type=pa.int64())
strs = pa.array([u'a', None, u'c'], type=pa.string())
bools = pa.array([True, False, None], type=pa.bool_())
arr = pa.StructArray.from_arrays(
[ints, strs, bools],
['ints', 'strs', 'bools'])
expected = pd.Series([
{'ints': None, 'strs': u'a', 'bools': True},
{'ints': 2, 'strs': None, 'bools': False},
{'ints': 3, 'strs': u'c', 'bools': None},
])
series = pd.Series(arr.to_pandas())
| tm.assert_series_equal(series, expected) | pandas.util.testing.assert_series_equal |
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from pathlib import Path
from collections import defaultdict
import mudcod.utils.visualization as VIS # noqa: E402
from mudcod.utils import sutils # noqa: E402
MAIN_DIR = Path(__file__).absolute().parent.parent
SIMULATION_DIR = MAIN_DIR / "simulations"
RESULT_DIR = MAIN_DIR / "results"
RESULTS_PATH = RESULT_DIR / "simulation_results"
FIGURE_DIR = RESULT_DIR / "simulation_figures"
sns.set_theme(style="whitegrid")
sutils.safe_create_dir(RESULTS_PATH)
sutils.safe_create_dir(FIGURE_DIR)
objkey = "loglikelihood"
def get_dataframe(result_dict, index_row, columns, multi_index):
resultsDf = | pd.DataFrame(result_dict["val"], index=index_row, columns=columns) | pandas.DataFrame |
# coding:utf-8
#
# The MIT License (MIT)
#
# Copyright (c) 2016-2020 yutiansut/QUANTAXIS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import random
import sched
import threading
import time
import asyncio
import pandas as pd
from QUANTAXIS.QAEngine.QAEvent import QA_Event, QA_Worker
from QUANTAXIS.QAEngine.QATask import QA_Task
from QUANTAXIS.QAMarket.QAOrder import QA_OrderQueue
from QUANTAXIS.QASU.save_orderhandler import QA_SU_save_deal, QA_SU_save_order
from QUANTAXIS.QAUtil.QADate_trade import QA_util_if_tradetime
from QUANTAXIS.QAUtil.QAParameter import (BROKER_EVENT, BROKER_TYPE,
EVENT_TYPE, MARKET_EVENT,
ORDER_EVENT)
class QA_OrderHandlerAsync(QA_Worker):
"""ORDER执行器
异步handler
"""
def __init__(self, *args, **kwargs):
super().__init__()
self.order_queue = QA_OrderQueue()
self.type = EVENT_TYPE.MARKET_EVENT
self.event = QA_Event()
self.order_status = pd.DataFrame()
self.deal_status = pd.DataFrame()
self.if_start_orderquery = False
self.monitor = {} # 1.1新增 用于监控订单
def run(self, event):
if event.event_type is BROKER_EVENT.RECEIVE_ORDER:
# 此时的message应该是订单类
"""
OrderHandler 收到订单
orderhandler 调控转发给broker
broker返回发单结果(成功/失败)
orderhandler.order_queue 插入一个订单
执行回调
"""
order = event.order
order = event.broker.receive_order(
QA_Event(event_type=BROKER_EVENT.TRADE, order=event.order, market_data=event.market_data))
# print(threading.current_thread().ident)
order = self.order_queue.insert_order(order)
if event.callback:
event.callback(order)
elif event.event_type is BROKER_EVENT.TRADE:
# 实盘和本地 同步执行
self._trade()
# event.event_queue.task_done()
elif event.event_type is BROKER_EVENT.SETTLE:
"""订单队列的结算:
当队列中的订单都被处理过后:
算可以结算了
"""
print('SETTLE ORDERHANDLER')
# if len(self.order_queue.untrade) > 0:
# self.if_start_orderquery = False
# event.event_type = BROKER_EVENT.TRADE
# event.event_queue.put(
# QA_Task(
# worker=self,
# engine='ORDER',
# event=event
# )
# )
if len(self.order_queue.untrade)==0:
self._trade()
else:
self._trade()
# print(self.order_queue.untrade)
self.order_queue.settle()
self.order_status = pd.DataFrame()
self.deal_status = pd.DataFrame()
try:
event.event_queue.task_done()
except:
pass
elif event.event_type is MARKET_EVENT.QUERY_ORDER:
"""query_order和query_deal 需要联动使用
query_order 得到所有的订单列表
query_deal 判断订单状态--> 运行callback函数
实盘涉及到外部订单问题:
及 订单的来源 不完全从QUANTAXIS中发出, 则QA无法记录来源 (标记为外部订单)
"""
if self.if_start_orderquery:
# if QA_util_if_tradetime(datetime.datetime.now()):
# print(event.broker)
# print(event.account_cookie)
try:
# 做一些容错处理
res = [self.monitor[account].query_orders(
account.account_cookie, '') for account in list(self.monitor.keys())]
res = pd.concat(res, axis=0) if len(
res) > 0 else None
except:
time.sleep(1)
self.order_status = res if res is not None else self.order_status
if len(self.order_status) > 0:
QA_SU_save_order(self.order_status)
# else:
# time.sleep(1)
# 这里加入随机的睡眠时间 以免被发现固定的刷新请求
# event=event
event.event_type = MARKET_EVENT.QUERY_DEAL
if event.event_queue.qsize() < 1:
time.sleep(random.randint(1, 2))
# event.event_queue.task_done()
# 非阻塞
if self.if_start_orderquery:
event.event_queue.put(
QA_Task(
worker=self,
engine='ORDER',
event=event
)
)
# time.sleep(random.randint(2,5))
# print(event.event_type)
# print(event2.event_type)
# self.run(event)
# print(self.order_status)
# print('UPDATE ORDERS')
elif event.event_type is MARKET_EVENT.QUERY_DEAL:
"""order_handler- query_deal
将order_handler订单队列中的订单---和deal中匹配起来
"""
# if len(self.order_queue.pending) > 0:
# for order in self.order_queue.pending:
# #self.query
# waiting_realorder_id = [
# order.realorder_id for order in self.order_queue.trade_list]
# result = event.broker.query_deal
# time.sleep(1)
if self.if_start_orderquery:
res = [self.monitor[account].query_orders(
account.account_cookie, 'filled') for account in list(self.monitor.keys())]
try:
#res=[pd.DataFrame() if not isinstance(item,pd.DataFrame) else item for item in res]
res = | pd.concat(res, axis=0) | pandas.concat |
""" Extract reports from the pipeline output data.
Input is an output_json prepared by the run tool.
"""
from importlib_resources import files
from pythologist_schemas import get_validator
import logging, argparse, json
import pandas as pd
from collections import OrderedDict
def cli():
args = do_inputs()
main(args)
def _prepend(d,input_key,input_value):
d2 = [(k,v) for k,v in d.items()]
d2 = [(input_key,input_value)] + d2
return OrderedDict(d2)
def _append(d,input_key,input_value):
d2 = [(k,v) for k,v in d.items()]
d2 = d2+[(input_key,input_value)]
return OrderedDict(d2)
def main(args):
"We need to take the platform and return an appropriate input template"
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.WARNING)
logger = logging.getLogger("report extraction")
report = json.loads(open(args.report_json,'rt').read())
logger.info("check report json format")
get_validator(files('schema_data').joinpath('report_output.json')).\
validate(report)
logger.info("report json validated")
# Lets start formulating the dataframes
# cache these in arrays here but concatonate to dataframes the end
sheets = OrderedDict({
'smp_cnt_cumulative_lf':[],
'smp_cnt_aggregate_lf':[],
'smp_pct_cumulative_lf':[],
'smp_pct_aggregate_lf':[],
'img_cnt_lf':[],
'img_pct_lf':[]
})
info = {}
for sample in report['sample_outputs']:
sample_name = sample['sample_name']
_df = pd.DataFrame([row for row in sample['sample_reports']['sample_cumulative_count_densities']])
_df['sample_name'] = sample_name
sheets['smp_cnt_cumulative_lf'].append(_df)
info['smp_cnt_cumulative_lf'] = {
'index':False,
'description':'sample-level count density measurements treating all ROIs as a single large image in long table format.'
}
_df = pd.DataFrame([row for row in sample['sample_reports']['sample_aggregate_count_densities']])
_df['sample_name'] = sample_name
sheets['smp_cnt_aggregate_lf'].append(_df)
info['smp_cnt_aggregate_lf'] = {
'index':False,
'description':'sample-level count density measurements averaging the measures from ROIs in long table format.'
}
_df = | pd.DataFrame([row for row in sample['sample_reports']['sample_cumulative_count_percentages']]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 6 20:43:35 2019
@author: michaelek
"""
import numpy as np
import pandas as pd
import xarray as xr
from pyproj import Proj, CRS, Transformer
from hydrointerp import interp2d
#import interp2d
from hydrointerp import util
# import util
import random
###################################
### base class
class Interp(object):
"""
Base Interp class to prepare the input data for the interpolation functions.
Parameters
----------
data : DataFrame or Dataset
A pandas DataFrame containing four columns as shown in the below parameters or an Xarray Dataset.
time_name : str
If grid is a DataFrame, then time_name is the time column name. If grid is a Dataset, then time_name is the time coordinate name.
x_name : str
If grid is a DataFrame, then x_name is the x column name. If grid is a Dataset, then x_name is the x coordinate name.
y_name : str
If grid is a DataFrame, then y_name is the y column name. If grid is a Dataset, then y_name is the y coordinate name.
data_name : str
If grid is a DataFrame, then data_name is the data column name. If grid is a Dataset, then data_name is the data variable name.
from_crs : int or str or None
The projection info for the input data if the result should be reprojected to the to_crs projection (either a proj4 str or epsg int).
Returns
-------
Interp object
"""
def __init__(self, grid_data=None, grid_time_name=None, grid_x_name=None, grid_y_name=None, grid_data_name=None, grid_crs=None, point_data=None, point_time_name=None, point_x_name=None, point_y_name=None, point_data_name=None, point_crs=None):
## Assign variables
self._grid_crs = grid_crs
self._point_crs = point_crs
## Check data type
if point_data is not None:
if isinstance(point_data, pd.DataFrame):
self.point_data = point_data.rename(columns={point_x_name: 'x', point_y_name: 'y', point_time_name: 'time', point_data_name: 'precip'}).copy()
if self.point_data.dtypes['time'].name != 'datetime64[ns]':
self.point_data['time'] = | pd.to_datetime(self.point_data['time']) | pandas.to_datetime |
"""
This script visualises the prevention parameters of the first and second COVID-19 waves.
Arguments:
----------
-f:
Filename of samples dictionary to be loaded. Default location is ~/data/interim/model_parameters/COVID19_SEIRD/calibrations/national/
Returns:
--------
Example use:
------------
"""
__author__ = "<NAME>"
__copyright__ = "Copyright (c) 2020 by <NAME>, BIOMATH, Ghent University. All Rights Reserved."
# ----------------------
# Load required packages
# ----------------------
import json
import argparse
import datetime
import random
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.transforms import offset_copy
from covid19model.models import models
from covid19model.data import mobility, sciensano, model_parameters
from covid19model.models.time_dependant_parameter_fncs import ramp_fun
from covid19model.visualization.output import _apply_tick_locator
from covid19model.visualization.utils import colorscale_okabe_ito, moving_avg
# covid 19 specific parameters
plt.rcParams.update({
"axes.prop_cycle": plt.cycler('color',
list(colorscale_okabe_ito.values())),
})
# -----------------------
# Handle script arguments
# -----------------------
parser = argparse.ArgumentParser()
parser.add_argument("-n", "--n_samples", help="Number of samples used to visualise model fit", default=100, type=int)
parser.add_argument("-k", "--n_draws_per_sample", help="Number of binomial draws per sample drawn used to visualize model fit", default=1, type=int)
args = parser.parse_args()
#################################################
## PART 1: Comparison of total number of cases ##
#################################################
youth = moving_avg((df_sciensano['C_0_9']+df_sciensano['C_10_19']).to_frame())
cases_youth_nov21 = youth[youth.index == pd.to_datetime('2020-11-21')].values
cases_youth_rel = moving_avg((df_sciensano['C_0_9']+df_sciensano['C_10_19']).to_frame())/cases_youth_nov21*100
work = moving_avg((df_sciensano['C_20_29']+df_sciensano['C_30_39']+df_sciensano['C_40_49']+df_sciensano['C_50_59']).to_frame())
cases_work_nov21 = work[work.index == pd.to_datetime('2020-11-21')].values
cases_work_rel = work/cases_work_nov21*100
old = moving_avg((df_sciensano['C_60_69']+df_sciensano['C_70_79']+df_sciensano['C_80_89']+df_sciensano['C_90+']).to_frame())
cases_old_nov21 = old[old.index == pd.to_datetime('2020-11-21')].values
cases_old_rel = old/cases_old_nov21*100
fig,ax=plt.subplots(figsize=(12,4.3))
ax.plot(df_sciensano.index, cases_youth_rel, linewidth=1.5, color='black')
ax.plot(df_sciensano.index, cases_work_rel, linewidth=1.5, color='orange')
ax.plot(df_sciensano.index, cases_old_rel, linewidth=1.5, color='blue')
ax.axvspan(pd.to_datetime('2020-11-21'), pd.to_datetime('2020-12-18'), color='black', alpha=0.2)
ax.axvspan(pd.to_datetime('2021-01-09'), pd.to_datetime('2021-02-15'), color='black', alpha=0.2)
ax.set_xlim([pd.to_datetime('2020-11-05'), pd.to_datetime('2021-02-01')])
ax.set_ylim([0,320])
ax.set_ylabel('Relative number of cases as compared\n to November 16th, 2020 (%)')
#ax.set_xticks([pd.to_datetime('2020-11-16'), pd.to_datetime('2020-12-18'), pd.to_datetime('2021-01-04')])
ax.legend(['$[0,20[$','$[20,60[$','$[60,\infty[$'], bbox_to_anchor=(1.05, 1), loc='upper left')
ax = _apply_tick_locator(ax)
ax.set_yticks([0,100,200,300])
ax.grid(False)
plt.tight_layout()
plt.show()
def crosscorr(datax, datay, lag=0):
""" Lag-N cross correlation.
Parameters
----------
lag : int, default 0
datax, datay : pandas.Series objects of equal length
Returns
----------
crosscorr : float
"""
return datax.corr(datay.shift(lag))
lag_series = range(-15,8)
covariance_youth_work = []
covariance_youth_old = []
covariance_work_old = []
for lag in lag_series:
covariance_youth_work.append(crosscorr(cases_youth_rel[pd.to_datetime('2020-11-02'):pd.to_datetime('2021-02-01')].squeeze(),cases_work_rel[pd.to_datetime('2020-11-02'):pd.to_datetime('2021-02-01')].squeeze(),lag=lag))
covariance_youth_old.append(crosscorr(cases_youth_rel[pd.to_datetime('2020-11-02'):pd.to_datetime('2021-02-01')].squeeze(),cases_old_rel[pd.to_datetime('2020-11-02'):pd.to_datetime('2021-02-01')].squeeze(),lag=lag))
covariance_work_old.append(crosscorr(cases_work_rel[pd.to_datetime('2020-11-02'):pd.to_datetime('2021-02-01')].squeeze(),cases_old_rel[pd.to_datetime('2020-11-02'):pd.to_datetime('2021-02-01')].squeeze(),lag=lag))
covariances = [covariance_youth_work, covariance_youth_old, covariance_work_old]
for i in range(3):
n = len(covariances[i])
k = max(covariances[i])
idx=np.argmax(covariances[i])
tau = lag_series[idx]
sig = 2/np.sqrt(n-abs(k))
if k >= sig:
print(tau, k, True)
else:
print(tau, k, False)
fig,(ax1,ax2)=plt.subplots(nrows=2,ncols=1,figsize=(15,10))
# First part
ax1.plot(df_sciensano.index, cases_youth_rel, linewidth=1.5, color='black')
ax1.plot(df_sciensano.index, cases_work_rel, linewidth=1.5, color='orange')
ax1.plot(df_sciensano.index, cases_old_rel, linewidth=1.5, color='blue')
ax1.axvspan(pd.to_datetime('2020-11-21'), pd.to_datetime('2020-12-18'), color='black', alpha=0.2)
ax1.axvspan(pd.to_datetime('2021-01-09'), pd.to_datetime('2021-02-15'), color='black', alpha=0.2)
ax1.set_xlim([pd.to_datetime('2020-11-05'), pd.to_datetime('2021-02-01')])
ax1.set_ylim([0,300])
ax1.set_ylabel('Relative number of cases as compared\n to November 16th, 2020 (%)')
#ax.set_xticks([pd.to_datetime('2020-11-16'), pd.to_datetime('2020-12-18'), pd.to_datetime('2021-01-04')])
ax1.legend(['$[0,20[$','$[20,60[$','$[60,\infty[$'], bbox_to_anchor=(1.05, 1), loc='upper left')
ax1 = _apply_tick_locator(ax1)
# Second part
ax2.scatter(lag_series, covariance_youth_work, color='black',alpha=0.6,linestyle='None',facecolors='none', s=30, linewidth=1)
ax2.scatter(lag_series, covariance_youth_old, color='black',alpha=0.6, linestyle='None',facecolors='none', s=30, linewidth=1, marker='s')
ax2.scatter(lag_series, covariance_work_old, color='black',alpha=0.6, linestyle='None',facecolors='none', s=30, linewidth=1, marker='D')
ax2.legend(['$[0,20[$ vs. $[20,60[$', '$[0,20[$ vs. $[60,\infty[$', '$[20,60[$ vs. $[60, \infty[$'], bbox_to_anchor=(1.05, 1), loc='upper left')
ax2.plot(lag_series, covariance_youth_work, color='black', linestyle='--', linewidth=1)
ax2.plot(lag_series, covariance_youth_old, color='black',linestyle='--', linewidth=1)
ax2.plot(lag_series, covariance_work_old, color='black',linestyle='--', linewidth=1)
ax2.axvline(0,linewidth=1, color='black')
ax2.grid(False)
ax2.set_ylabel('lag-$\\tau$ cross correlation (-)')
ax2.set_xlabel('$\\tau$ (days)')
plt.tight_layout()
plt.show()
fig,ax = plt.subplots(figsize=(15,5))
ax.scatter(lag_series, covariance_youth_work, color='black',alpha=0.6,linestyle='None',facecolors='none', s=30, linewidth=1)
ax.scatter(lag_series, covariance_youth_old, color='black',alpha=0.6, linestyle='None',facecolors='none', s=30, linewidth=1, marker='s')
ax.scatter(lag_series, covariance_work_old, color='black',alpha=0.6, linestyle='None',facecolors='none', s=30, linewidth=1, marker='D')
ax.legend(['$[0,20[$ vs. $[20,60[$', '$[0,20[$ vs. $[60,\infty[$', '$[20,60[$ vs. $[60, \infty[$'], bbox_to_anchor=(1.05, 1), loc='upper left')
ax.plot(lag_series, covariance_youth_work, color='black', linestyle='--', linewidth=1)
ax.plot(lag_series, covariance_youth_old, color='black',linestyle='--', linewidth=1)
ax.plot(lag_series, covariance_work_old, color='black',linestyle='--', linewidth=1)
ax.axvline(0,linewidth=1, color='black')
ax.grid(False)
ax.set_ylabel('lag-$\\tau$ cross correlation (-)')
ax.set_xlabel('$\\tau$ (days)')
plt.tight_layout()
plt.show()
#####################################################
## PART 1: Calibration robustness figure of WAVE 1 ##
#####################################################
n_calibrations = 6
n_prevention = 3
conf_int = 0.05
# -------------------------
# Load samples dictionaries
# -------------------------
samples_dicts = [
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE1_BETA_COMPLIANCE_2021-02-15.json')), # 2020-04-04
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE1_BETA_COMPLIANCE_2021-02-13.json')), # 2020-04-15
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE1_BETA_COMPLIANCE_2021-02-23.json')), # 2020-05-01
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE1_BETA_COMPLIANCE_2021-02-18.json')), # 2020-05-15
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE1_BETA_COMPLIANCE_2021-02-21.json')), # 2020-06-01
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE1_BETA_COMPLIANCE_2021-02-22.json')) # 2020-07-01
]
warmup = int(samples_dicts[0]['warmup'])
# Start of data collection
start_data = '2020-03-15'
# First datapoint used in inference
start_calibration = '2020-03-15'
# Last datapoint used in inference
end_calibrations = ['2020-04-04', '2020-04-15', '2020-05-01', '2020-05-15', '2020-06-01', '2020-07-01']
# Start- and enddate of plotfit
start_sim = start_calibration
end_sim = '2020-07-14'
# ---------
# Load data
# ---------
# Contact matrices
initN, Nc_home, Nc_work, Nc_schools, Nc_transport, Nc_leisure, Nc_others, Nc_total = model_parameters.get_interaction_matrices(dataset='willem_2012')
Nc_all = {'total': Nc_total, 'home':Nc_home, 'work': Nc_work, 'schools': Nc_schools, 'transport': Nc_transport, 'leisure': Nc_leisure, 'others': Nc_others}
levels = initN.size
# Google Mobility data
df_google = mobility.get_google_mobility_data(update=False)
# ---------------------------------
# Time-dependant parameter function
# ---------------------------------
# Extract build contact matrix function
from covid19model.models.time_dependant_parameter_fncs import make_contact_matrix_function, ramp_fun
contact_matrix_4prev, all_contact, all_contact_no_schools = make_contact_matrix_function(df_google, Nc_all)
# Define policy function
def policies_wave1_4prev(t, states, param, l , tau, prev_schools, prev_work, prev_rest, prev_home):
# Convert tau and l to dates
tau_days = pd.Timedelta(tau, unit='D')
l_days = pd.Timedelta(l, unit='D')
# Define key dates of first wave
t1 = pd.Timestamp('2020-03-15') # start of lockdown
t2 = pd.Timestamp('2020-05-15') # gradual re-opening of schools (assume 50% of nominal scenario)
t3 = pd.Timestamp('2020-07-01') # start of summer holidays
t4 = pd.Timestamp('2020-09-01') # end of summer holidays
# Define key dates of second wave
t5 = pd.Timestamp('2020-10-19') # lockdown (1)
t6 = pd.Timestamp('2020-11-02') # lockdown (2)
t7 = pd.Timestamp('2020-11-16') # schools re-open
t8 = pd.Timestamp('2020-12-18') # Christmas holiday starts
t9 = pd.Timestamp('2021-01-04') # Christmas holiday ends
t10 = pd.Timestamp('2021-02-15') # Spring break starts
t11 = pd.Timestamp('2021-02-21') # Spring break ends
t12 = pd.Timestamp('2021-04-05') # Easter holiday starts
t13 = pd.Timestamp('2021-04-18') # Easter holiday ends
# ------
# WAVE 1
# ------
if t <= t1:
t = pd.Timestamp(t.date())
return all_contact(t)
elif t1 < t < t1 + tau_days:
t = pd.Timestamp(t.date())
return all_contact(t)
elif t1 + tau_days < t <= t1 + tau_days + l_days:
t = pd.Timestamp(t.date())
policy_old = all_contact(t)
policy_new = contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=0)
return ramp_fun(policy_old, policy_new, t, tau_days, l, t1)
elif t1 + tau_days + l_days < t <= t2:
t = pd.Timestamp(t.date())
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=0)
elif t2 < t <= t3:
t = pd.Timestamp(t.date())
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=0)
elif t3 < t <= t4:
t = pd.Timestamp(t.date())
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=0)
# ------
# WAVE 2
# ------
elif t4 < t <= t5 + tau_days:
return contact_matrix_4prev(t, school=1)
elif t5 + tau_days < t <= t5 + tau_days + l_days:
policy_old = contact_matrix_4prev(t, school=1)
policy_new = contact_matrix_4prev(t, prev_schools, prev_work, prev_rest,
school=1)
return ramp_fun(policy_old, policy_new, t, tau_days, l, t5)
elif t5 + tau_days + l_days < t <= t6:
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=1)
elif t6 < t <= t7:
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=0)
elif t7 < t <= t8:
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=1)
elif t8 < t <= t9:
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=0)
elif t9 < t <= t10:
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=1)
elif t10 < t <= t11:
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=0)
elif t11 < t <= t12:
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=1)
elif t12 < t <= t13:
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=0)
else:
t = pd.Timestamp(t.date())
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=1)
# --------------------
# Initialize the model
# --------------------
# Load the model parameters dictionary
params = model_parameters.get_COVID19_SEIRD_parameters()
# Add the time-dependant parameter function arguments
params.update({'l': 21, 'tau': 21, 'prev_schools': 0, 'prev_work': 0.5, 'prev_rest': 0.5, 'prev_home': 0.5})
# Define initial states
initial_states = {"S": initN, "E": np.ones(9)}
# Initialize model
model = models.COVID19_SEIRD(initial_states, params,
time_dependent_parameters={'Nc': policies_wave1_4prev})
# ------------------------
# Define sampling function
# ------------------------
def draw_fcn(param_dict,samples_dict):
# Sample first calibration
idx, param_dict['beta'] = random.choice(list(enumerate(samples_dict['beta'])))
param_dict['da'] = samples_dict['da'][idx]
param_dict['omega'] = samples_dict['omega'][idx]
param_dict['sigma'] = 5.2 - samples_dict['omega'][idx]
# Sample second calibration
param_dict['l'] = samples_dict['l'][idx]
param_dict['tau'] = samples_dict['tau'][idx]
param_dict['prev_home'] = samples_dict['prev_home'][idx]
param_dict['prev_work'] = samples_dict['prev_work'][idx]
param_dict['prev_rest'] = samples_dict['prev_rest'][idx]
return param_dict
# -------------------------------------
# Define necessary function to plot fit
# -------------------------------------
LL = conf_int/2
UL = 1-conf_int/2
def add_poisson(state_name, output, n_samples, n_draws_per_sample, UL=1-0.05*0.5, LL=0.05*0.5):
data = output[state_name].sum(dim="Nc").values
# Initialize vectors
vector = np.zeros((data.shape[1],n_draws_per_sample*n_samples))
# Loop over dimension draws
for n in range(data.shape[0]):
binomial_draw = np.random.poisson( np.expand_dims(data[n,:],axis=1),size = (data.shape[1],n_draws_per_sample))
vector[:,n*n_draws_per_sample:(n+1)*n_draws_per_sample] = binomial_draw
# Compute mean and median
mean = np.mean(vector,axis=1)
median = np.median(vector,axis=1)
# Compute quantiles
LL = np.quantile(vector, q = LL, axis = 1)
UL = np.quantile(vector, q = UL, axis = 1)
return mean, median, LL, UL
def plot_fit(ax, state_name, state_label, data_df, time, vector_mean, vector_LL, vector_UL, start_calibration='2020-03-15', end_calibration='2020-07-01' , end_sim='2020-09-01'):
ax.fill_between(pd.to_datetime(time), vector_LL, vector_UL,alpha=0.30, color = 'blue')
ax.plot(time, vector_mean,'--', color='blue', linewidth=1.5)
ax.scatter(data_df[start_calibration:end_calibration].index,data_df[state_name][start_calibration:end_calibration], color='black', alpha=0.5, linestyle='None', facecolors='none', s=30, linewidth=1)
ax.scatter(data_df[pd.to_datetime(end_calibration)+datetime.timedelta(days=1):end_sim].index,data_df[state_name][pd.to_datetime(end_calibration)+datetime.timedelta(days=1):end_sim], color='red', alpha=0.5, linestyle='None', facecolors='none', s=30, linewidth=1)
ax = _apply_tick_locator(ax)
ax.set_xlim(start_calibration,end_sim)
ax.set_ylabel(state_label)
return ax
# -------------------------------
# Visualize prevention parameters
# -------------------------------
# Method 1: all in on page
fig,axes= plt.subplots(nrows=n_calibrations,ncols=n_prevention+1, figsize=(13,8.27), gridspec_kw={'width_ratios': [1, 1, 1, 3]})
prevention_labels = ['$\Omega_{home}$ (-)', '$\Omega_{work}$ (-)', '$\Omega_{rest}$ (-)']
prevention_names = ['prev_home', 'prev_work', 'prev_rest']
row_labels = ['(a)', '(b)', '(c)', '(d)', '(e)', '(f)']
pad = 5 # in points
for i in range(n_calibrations):
print('Simulation no. {} out of {}'.format(i+1,n_calibrations))
out = model.sim(end_sim,start_date=start_sim,warmup=warmup,N=args.n_samples,draw_fcn=draw_fcn,samples=samples_dicts[i])
vector_mean, vector_median, vector_LL, vector_UL = add_poisson('H_in', out, args.n_samples, args.n_draws_per_sample)
for j in range(n_prevention+1):
if j != n_prevention:
n, bins, patches = axes[i,j].hist(samples_dicts[i][prevention_names[j]], color='blue', bins=15, density=True, alpha=0.6)
axes[i,j].axvline(np.mean(samples_dicts[i][prevention_names[j]]), ymin=0, ymax=1, linestyle='--', color='black')
max_n = 1.05*max(n)
axes[i,j].annotate('$\hat{\mu} = $'+"{:.2f}".format(np.mean(samples_dicts[i][prevention_names[j]])), xy=(np.mean(samples_dicts[i][prevention_names[j]]),max_n),
rotation=0,va='bottom', ha='center',annotation_clip=False,fontsize=10)
if j == 0:
axes[i,j].annotate(row_labels[i], xy=(0, 0.5), xytext=(-axes[i,j].yaxis.labelpad - pad, 0),
xycoords=axes[i,j].yaxis.label, textcoords='offset points',
ha='right', va='center')
axes[i,j].set_xlim([0,1])
axes[i,j].set_xticks([0.0, 0.5, 1.0])
axes[i,j].set_yticks([])
axes[i,j].grid(False)
if i == n_calibrations-1:
axes[i,j].set_xlabel(prevention_labels[j])
axes[i,j].spines['left'].set_visible(False)
else:
axes[i,j] = plot_fit(axes[i,j], 'H_in','$H_{in}$ (-)', df_sciensano, out['time'].values, vector_median, vector_LL, vector_UL, end_calibration=end_calibrations[i], end_sim=end_sim)
axes[i,j].xaxis.set_major_locator(plt.MaxNLocator(3))
axes[i,j].set_yticks([0,300, 600])
axes[i,j].set_ylim([0,700])
plt.tight_layout()
plt.show()
model_results_WAVE1 = {'time': out['time'].values, 'vector_mean': vector_mean, 'vector_median': vector_median, 'vector_LL': vector_LL, 'vector_UL': vector_UL}
#####################################
## PART 2: Hospitals vs. R0 figure ##
#####################################
def compute_R0(initN, Nc, samples_dict, model_parameters):
N = initN.size
sample_size = len(samples_dict['beta'])
R0 = np.zeros([N,sample_size])
R0_norm = np.zeros([N,sample_size])
for i in range(N):
for j in range(sample_size):
R0[i,j] = (model_parameters['a'][i] * samples_dict['da'][j] + samples_dict['omega'][j]) * samples_dict['beta'][j] * np.sum(Nc, axis=1)[i]
R0_norm[i,:] = R0[i,:]*(initN[i]/sum(initN))
R0_age = np.mean(R0,axis=1)
R0_overall = np.mean(np.sum(R0_norm,axis=0))
return R0, R0_overall
R0, R0_overall = compute_R0(initN, Nc_all['total'], samples_dicts[-1], params)
cumsum = out['H_in'].cumsum(dim='time').values
cumsum_mean = np.mean(cumsum[:,:,-1], axis=0)/sum(np.mean(cumsum[:,:,-1],axis=0))
cumsum_LL = cumsum_mean - np.quantile(cumsum[:,:,-1], q = 0.05/2, axis=0)/sum(np.mean(cumsum[:,:,-1],axis=0))
cumsum_UL = np.quantile(cumsum[:,:,-1], q = 1-0.05/2, axis=0)/sum(np.mean(cumsum[:,:,-1],axis=0)) - cumsum_mean
cumsum = (out['H_in'].mean(dim="draws")).cumsum(dim='time').values
fraction = cumsum[:,-1]/sum(cumsum[:,-1])
fig,ax = plt.subplots(figsize=(12,4))
bars = ('$[0, 10[$', '$[10, 20[$', '$[20, 30[$', '$[30, 40[$', '$[40, 50[$', '$[50, 60[$', '$[60, 70[$', '$[70, 80[$', '$[80, \infty[$')
x_pos = np.arange(len(bars))
#ax.bar(x_pos, np.mean(R0,axis=1), yerr = [np.mean(R0,axis=1) - np.quantile(R0,q=0.05/2,axis=1), np.quantile(R0,q=1-0.05/2,axis=1) - np.mean(R0,axis=1)], width=1, color='b', alpha=0.5, capsize=10)
ax.bar(x_pos, np.mean(R0,axis=1), width=1, color='b', alpha=0.8)
ax.set_ylabel('$R_0$ (-)')
ax.grid(False)
ax2 = ax.twinx()
#ax2.bar(x_pos, cumsum_mean, yerr = [cumsum_LL, cumsum_UL], width=1,color='orange',alpha=0.9,hatch="/", capsize=10)
ax2.bar(x_pos, cumsum_mean, width=1,color='orange',alpha=0.6,hatch="/")
ax2.set_ylabel('Fraction of hospitalizations (-)')
ax2.grid(False)
plt.xticks(x_pos, bars)
plt.tight_layout()
plt.show()
#########################################
## Part 3: Robustness figure of WAVE 2 ##
#########################################
n_prevention = 4
conf_int = 0.05
# -------------------------
# Load samples dictionaries
# -------------------------
samples_dicts = [
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE2_BETA_COMPLIANCE_2021-03-06.json')), # 2020-11-04
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE2_BETA_COMPLIANCE_2021-03-05.json')), # 2020-11-16
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE2_BETA_COMPLIANCE_2021-03-04.json')), # 2020-12-24
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE2_BETA_COMPLIANCE_2021-03-02.json')), # 2021-02-01
]
n_calibrations = len(samples_dicts)
warmup = int(samples_dicts[0]['warmup'])
# Start of data collection
start_data = '2020-03-15'
# First datapoint used in inference
start_calibration = '2020-09-01'
# Last datapoint used in inference
end_calibrations = ['2020-11-06','2020-11-16','2020-12-24','2021-02-01']
# Start- and enddate of plotfit
start_sim = start_calibration
end_sim = '2021-02-14'
# --------------------
# Initialize the model
# --------------------
# Load the model parameters dictionary
params = model_parameters.get_COVID19_SEIRD_parameters()
# Add the time-dependant parameter function arguments
params.update({'l': 21, 'tau': 21, 'prev_schools': 0, 'prev_work': 0.5, 'prev_rest': 0.5, 'prev_home': 0.5})
# Model initial condition on September 1st
warmup = 0
with open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/initial_states_2020-09-01.json', 'r') as fp:
initial_states = json.load(fp)
initial_states.update({
'VE': np.zeros(9),
'V': np.zeros(9),
'V_new': np.zeros(9),
'alpha': np.zeros(9)
})
#initial_states['ICU_tot'] = initial_states.pop('ICU')
# Initialize model
model = models.COVID19_SEIRD(initial_states, params,
time_dependent_parameters={'Nc': policies_wave1_4prev})
# ------------------------
# Define sampling function
# ------------------------
def draw_fcn(param_dict,samples_dict):
# Sample first calibration
idx, param_dict['beta'] = random.choice(list(enumerate(samples_dict['beta'])))
param_dict['da'] = samples_dict['da'][idx]
param_dict['omega'] = samples_dict['omega'][idx]
param_dict['sigma'] = 5.2 - samples_dict['omega'][idx]
# Sample second calibration
param_dict['l'] = samples_dict['l'][idx]
param_dict['tau'] = samples_dict['tau'][idx]
param_dict['prev_schools'] = samples_dict['prev_schools'][idx]
param_dict['prev_home'] = samples_dict['prev_home'][idx]
param_dict['prev_work'] = samples_dict['prev_work'][idx]
param_dict['prev_rest'] = samples_dict['prev_rest'][idx]
return param_dict
# -------------------------------
# Visualize prevention parameters
# -------------------------------
# Method 1: all in on page
fig,axes= plt.subplots(nrows=n_calibrations,ncols=n_prevention+1, figsize=(13,8.27), gridspec_kw={'width_ratios': [1, 1, 1, 1, 6]})
prevention_labels = ['$\Omega_{home}$ (-)', '$\Omega_{schools}$ (-)', '$\Omega_{work}$ (-)', '$\Omega_{rest}$ (-)']
prevention_names = ['prev_home', 'prev_schools', 'prev_work', 'prev_rest']
row_labels = ['(a)', '(b)', '(c)', '(d)', '(e)', '(f)']
pad = 5 # in points
for i in range(n_calibrations):
print('Simulation no. {} out of {}'.format(i+1,n_calibrations))
out = model.sim(end_sim,start_date=start_sim,warmup=warmup,N=args.n_samples,draw_fcn=draw_fcn,samples=samples_dicts[i])
vector_mean, vector_median, vector_LL, vector_UL = add_poisson('H_in', out, args.n_samples, args.n_draws_per_sample)
for j in range(n_prevention+1):
if j != n_prevention:
n, bins, patches = axes[i,j].hist(samples_dicts[i][prevention_names[j]], color='blue', bins=15, density=True, alpha=0.6)
axes[i,j].axvline(np.mean(samples_dicts[i][prevention_names[j]]), ymin=0, ymax=1, linestyle='--', color='black')
max_n = 1.05*max(n)
axes[i,j].annotate('$\hat{\mu} = $'+"{:.2f}".format(np.mean(samples_dicts[i][prevention_names[j]])), xy=(np.mean(samples_dicts[i][prevention_names[j]]),max_n),
rotation=0,va='bottom', ha='center',annotation_clip=False,fontsize=10)
if j == 0:
axes[i,j].annotate(row_labels[i], xy=(0, 0.5), xytext=(-axes[i,j].yaxis.labelpad - pad, 0),
xycoords=axes[i,j].yaxis.label, textcoords='offset points',
ha='right', va='center')
axes[i,j].set_xlim([0,1])
axes[i,j].set_xticks([0.0, 1.0])
axes[i,j].set_yticks([])
axes[i,j].grid(False)
if i == n_calibrations-1:
axes[i,j].set_xlabel(prevention_labels[j])
axes[i,j].spines['left'].set_visible(False)
else:
axes[i,j] = plot_fit(axes[i,j], 'H_in','$H_{in}$ (-)', df_sciensano, out['time'].values, vector_median, vector_LL, vector_UL, start_calibration = start_calibration, end_calibration=end_calibrations[i], end_sim=end_sim)
axes[i,j].xaxis.set_major_locator(plt.MaxNLocator(3))
axes[i,j].set_yticks([0,250, 500, 750])
axes[i,j].set_ylim([0,850])
plt.tight_layout()
plt.show()
model_results_WAVE2 = {'time': out['time'].values, 'vector_mean': vector_mean, 'vector_median': vector_median, 'vector_LL': vector_LL, 'vector_UL': vector_UL}
model_results = [model_results_WAVE1, model_results_WAVE2]
#################################################################
## Part 4: Comparing the maximal dataset prevention parameters ##
#################################################################
samples_dict_WAVE1 = json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE1_BETA_COMPLIANCE_2021-02-22.json'))
samples_dict_WAVE2 = json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE2_BETA_COMPLIANCE_2021-03-02.json'))
labels = ['$\Omega_{schools}$','$\Omega_{work}$', '$\Omega_{rest}$', '$\Omega_{home}$']
keys = ['prev_schools','prev_work','prev_rest','prev_home']
fig,axes = plt.subplots(1,4,figsize=(12,4))
for idx,ax in enumerate(axes):
if idx != 0:
(n1, bins, patches) = ax.hist(samples_dict_WAVE1[keys[idx]],bins=15,color='blue',alpha=0.4, density=True)
(n2, bins, patches) =ax.hist(samples_dict_WAVE2[keys[idx]],bins=15,color='black',alpha=0.4, density=True)
max_n = max([max(n1),max(n2)])*1.10
ax.axvline(np.mean(samples_dict_WAVE1[keys[idx]]),ls=':',ymin=0,ymax=1,color='blue')
ax.axvline(np.mean(samples_dict_WAVE2[keys[idx]]),ls=':',ymin=0,ymax=1,color='black')
if idx ==1:
ax.annotate('$\mu_1 = \mu_2 = $'+"{:.2f}".format(np.mean(samples_dict_WAVE1[keys[idx]])), xy=(np.mean(samples_dict_WAVE1[keys[idx]]),max_n),
rotation=90,va='bottom', ha='center',annotation_clip=False,fontsize=12)
else:
ax.annotate('$\mu_1 = $'+"{:.2f}".format(np.mean(samples_dict_WAVE1[keys[idx]])), xy=(np.mean(samples_dict_WAVE1[keys[idx]]),max_n),
rotation=90,va='bottom', ha='center',annotation_clip=False,fontsize=12)
ax.annotate('$\mu_2 = $'+"{:.2f}".format(np.mean(samples_dict_WAVE2[keys[idx]])), xy=(np.mean(samples_dict_WAVE2[keys[idx]]),max_n),
rotation=90,va='bottom', ha='center',annotation_clip=False,fontsize=12)
ax.set_xlabel(labels[idx])
ax.set_yticks([])
ax.spines['left'].set_visible(False)
else:
ax.hist(samples_dict_WAVE2['prev_schools'],bins=15,color='black',alpha=0.6, density=True)
ax.set_xlabel('$\Omega_{schools}$')
ax.set_yticks([])
ax.spines['left'].set_visible(False)
ax.set_xlim([0,1])
ax.xaxis.grid(False)
ax.yaxis.grid(False)
plt.tight_layout()
plt.show()
################################################################
## Part 5: Relative contributions of each contact: both waves ##
################################################################
# --------------------------------
# Re-define function to compute R0
# --------------------------------
def compute_R0(initN, Nc, samples_dict, model_parameters):
N = initN.size
sample_size = len(samples_dict['beta'])
R0 = np.zeros([N,sample_size])
R0_norm = np.zeros([N,sample_size])
for i in range(N):
for j in range(sample_size):
R0[i,j] = (model_parameters['a'][i] * samples_dict['da'][j] + samples_dict['omega'][j]) * samples_dict['beta'][j] *Nc[i,j]
R0_norm[i,:] = R0[i,:]*(initN[i]/sum(initN))
R0_age = np.mean(R0,axis=1)
R0_mean = np.sum(R0_norm,axis=0)
return R0, R0_mean
# -----------------------
# Pre-allocate dataframes
# -----------------------
index=df_google.index
columns = [['1','1','1','1','1','1','1','1','1','1','1','1','1','1','1','2','2','2','2','2','2','2','2','2','2','2','2','2','2','2'],['work_mean','work_LL','work_UL','schools_mean','schools_LL','schools_UL','rest_mean','rest_LL','rest_UL',
'home_mean','home_LL','home_UL','total_mean','total_LL','total_UL','work_mean','work_LL','work_UL','schools_mean','schools_LL','schools_UL',
'rest_mean','rest_LL','rest_UL','home_mean','home_LL','home_UL','total_mean','total_LL','total_UL']]
tuples = list(zip(*columns))
columns = pd.MultiIndex.from_tuples(tuples, names=["WAVE", "Type"])
data = np.zeros([len(df_google.index),30])
df_rel = pd.DataFrame(data=data, index=df_google.index, columns=columns)
df_abs = pd.DataFrame(data=data, index=df_google.index, columns=columns)
df_Re = pd.DataFrame(data=data, index=df_google.index, columns=columns)
samples_dicts = [samples_dict_WAVE1, samples_dict_WAVE2]
start_dates =[pd.to_datetime('2020-03-15'), pd.to_datetime('2020-10-19')]
waves=["1", "2"]
for j,samples_dict in enumerate(samples_dicts):
print('\n WAVE: ' + str(j)+'\n')
# ---------------
# Rest prevention
# ---------------
print('Rest\n')
data_rest = np.zeros([len(df_google.index.values), len(samples_dict['prev_rest'])])
Re_rest = np.zeros([len(df_google.index.values), len(samples_dict['prev_rest'])])
for idx, date in enumerate(df_google.index):
tau = np.mean(samples_dict['tau'])
l = np.mean(samples_dict['l'])
tau_days = pd.Timedelta(tau, unit='D')
l_days = pd.Timedelta(l, unit='D')
date_start = start_dates[j]
if date <= date_start + tau_days:
data_rest[idx,:] = 0.01*(100+df_google['retail_recreation'][date])* (np.sum(np.mean(Nc_leisure,axis=0)))\
+ 0.01*(100+df_google['transport'][date])* (np.sum(np.mean(Nc_transport,axis=0)))\
+ 0.01*(100+df_google['grocery'][date])* (np.sum(np.mean(Nc_others,axis=0)))*np.ones(len(samples_dict['prev_rest']))
contacts = np.expand_dims(0.01*(100+df_google['retail_recreation'][date])* (np.sum(Nc_leisure,axis=1))\
+ 0.01*(100+df_google['transport'][date])* (np.sum(Nc_transport,axis=1))\
+ 0.01*(100+df_google['grocery'][date])* (np.sum(Nc_others,axis=1)),axis=1)*np.ones([1,len(samples_dict['prev_rest'])])
R0, Re_rest[idx,:] = compute_R0(initN, contacts, samples_dict, params)
elif date_start + tau_days < date <= date_start + tau_days + l_days:
old = 0.01*(100+df_google['retail_recreation'][date])* (np.sum(np.mean(Nc_leisure,axis=0)))\
+ 0.01*(100+df_google['transport'][date])* (np.sum(np.mean(Nc_transport,axis=0)))\
+ 0.01*(100+df_google['grocery'][date])* (np.sum(np.mean(Nc_others,axis=0)))*np.ones(len(samples_dict['prev_rest']))
new = (0.01*(100+df_google['retail_recreation'][date])* (np.sum(np.mean(Nc_leisure,axis=0)))\
+ 0.01*(100+df_google['transport'][date])* (np.sum(np.mean(Nc_transport,axis=0)))\
+ 0.01*(100+df_google['grocery'][date])* (np.sum(np.mean(Nc_others,axis=0)))\
)*np.array(samples_dict['prev_rest'])
data_rest[idx,:]= old + (new-old)/l * (date-date_start-tau_days)/pd.Timedelta('1D')
old_contacts = np.expand_dims(0.01*(100+df_google['retail_recreation'][date])* (np.sum(Nc_leisure,axis=1))\
+ 0.01*(100+df_google['transport'][date])* (np.sum(Nc_transport,axis=1))\
+ 0.01*(100+df_google['grocery'][date])* (np.sum(Nc_others,axis=1)),axis=1)*np.ones([1,len(samples_dict['prev_rest'])])
new_contacts = np.expand_dims(0.01*(100+df_google['retail_recreation'][date])* (np.sum(Nc_leisure,axis=1))\
+ 0.01*(100+df_google['transport'][date])* (np.sum(Nc_transport,axis=1))\
+ 0.01*(100+df_google['grocery'][date])* (np.sum(Nc_others,axis=1)),axis=1)*np.array(samples_dict['prev_rest'])
contacts = old_contacts + (new_contacts-old_contacts)/l * (date-date_start-tau_days)/ | pd.Timedelta('1D') | pandas.Timedelta |
# Import the class
import pandas
import numpy
from sklearn.cluster import DBSCAN
from sklearn.preprocessing import MinMaxScaler
from tmap.tda import mapper
from tmap.tda.Filter import _filter_dict
from tmap.tda.cover import Cover
from tmap.tda.metric import Metric
from tmap.tda.utils import optimize_dbscan_eps
import matplotlib.pyplot as plt
#========== Define Data and Labels here==========
b_data=pandas.read_csv("./../Results/bronchieactasis_data.csv",index_col=0)
c_data=pandas.read_csv("./../Results/COPD.csv",index_col=0)
#=======Data creation merging=======
data=pandas.concat([b_data,c_data])
allergens=data.columns.values
labels_b=pandas.read_csv("./../../Bronchiectasis_Clinical_Metadata_V3.4.csv",index_col=0)
labels_c= | pandas.read_csv("./../../COPD_Clinical_Metadata_V3.3_tpyxlsx.csv",index_col=0) | pandas.read_csv |
import ast
import inspect
import logging
import os
import pathlib
import sys
import typing
import numpy as np
import pandas as pd
from pymatgen.core.periodic_table import _pt_data, Element
from AnalysisModule.calculator.fp import GetFpArray
from AnalysisModule.routines.util import MDefined, NMDefined
from AnalysisModule.routines.util import load_pkl, save_pkl
from MLModule.utils import load_input_tables
from MLModule.utils import variance_threshold_selector, split_columns
this_dir = os.path.dirname(os.path.abspath(__file__))
def Encode_bus(bus_column: [str], BuidTable=None):
BUid_in_dataset = sorted(BuidTable.keys())
num_bus = len(BUid_in_dataset)
buid_encoding_dict = {v: k for k, v in enumerate(BUid_in_dataset)}
ohe_array = np.zeros((len(bus_column), num_bus), dtype=np.float32)
for i_entry, bu_entry in enumerate(bus_column):
for buid in ast.literal_eval(bu_entry):
ohe_array[i_entry][buid_encoding_dict[buid]] = 1
logging.info("{} gives # of columns: {}".format(inspect.stack()[0][3], ohe_array.shape[1]))
columns = ["Buid_bit_{}".format(num) for num in range(ohe_array.shape[1])]
ohe_df = pd.DataFrame(ohe_array, columns=columns)
return ohe_array, ohe_df
def Encode_elements(
compositions: [str],
possible_elements: [str] = None,
exclude_elements=("H", "O"),
exclude_groups=("noble_gas",),
feature_header="Elements_bit"
):
"""
one hot encoder for elementary strings
"""
if possible_elements is None:
possible_elements = sorted(_pt_data.keys())
elements = []
for e in possible_elements:
if e in exclude_elements:
continue
element = Element(e)
if any(getattr(element, "is_{}".format(p)) for p in exclude_groups):
continue
elements.append(e)
elements = sorted(elements)
n_compositions = len(compositions)
ohe_array = np.zeros((n_compositions, len(elements)), dtype=np.float32)
presented = []
for icomp, composition in enumerate(compositions):
symbol_list = ast.literal_eval(composition)
for string in symbol_list:
presented.append(string)
if string == "O":
continue
ind = elements.index(string)
ohe_array[icomp][ind] = 1
logging.info("{} gives # of columns: {}".format(inspect.stack()[0][3], ohe_array.shape[1]))
# columns = ["{}_{}".format(feature_header, num) for num in range(ohe_array.shape[1])]
columns = [elements[num] for num in range(ohe_array.shape[1])]
ohe_df = | pd.DataFrame(ohe_array, columns=columns) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pandas as pd
from datetime import datetime, timedelta
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.ticker as tck
import matplotlib.font_manager as fm
import math as m
import matplotlib.dates as mdates
import netCDF4 as nc
from netCDF4 import Dataset
id
import itertools
import datetime
from scipy.stats import ks_2samp
import matplotlib.colors as colors
from mpl_toolkits.basemap import Basemap, addcyclic, shiftgrid
import matplotlib.cm as cm
import os
#------------------------------------------------------------------------------
# Motivación codigo -----------------------------------------------------------
"Codigo para encntrar los porcentajes a partir de los cuales se consida un dato de nubosidad"
"tomando enc uenta los cambios en los datos de piranómetros, así encontrar un umbral promedio"
"apartir del cual se considere un porcentaje nublado en los datos de las Fisheye. Los datos"
"se trabajaran a resolucion minutal."
################################################################################################################
## -------------------------LECTURA DE LOS DATOS DE COBERTURA DE NUBES FISH EYE-------------------------------##
################################################################################################################
df_cloud_TS = | pd.read_csv('/home/nacorreasa/Maestria/Datos_Tesis/Fish_Eye/Totales/Total_Timeseries_FishEye_TS.csv', sep=',') | pandas.read_csv |
import numpy as np
import pandas as pd
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
from MalGAN_preprocess import preprocess
def fgsm(model, inp, pad_idx, pad_len, e, step_size=0.001):
adv = inp.copy()
loss = K.mean(model.output[:, 0])
grads = K.gradients(loss, model.layers[1].output)[0]
grads /= (K.sqrt(K.mean(K.square(grads))) + 1e-8)
mask = np.zeros(model.layers[1].output.shape[1:]) # embedding layer output shape
mask[pad_idx:pad_idx+pad_len] = 1
grads *= K.constant(mask)
iterate = K.function([model.layers[1].output], [loss, grads])
g = 0.
step = int(1/step_size)*10
for _ in range(step):
loss_value, grads_value = iterate([adv])
grads_value *= step_size
g += grads_value
adv += grads_value
#print (e, loss_value, end='\r')
if loss_value >= 0.9:
break
return adv, g, loss_value
def limit_gpu_memory(per):
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = per
set_session(tf.Session(config=config))
def train_test_split(data, label, val_size=0.1):
idx = np.arange(len(data))
np.random.shuffle(idx)
split = int(len(data)*val_size)
x_train, x_test = data[idx[split:]], data[idx[:split]]
y_train, y_test = label[idx[split:]], label[idx[:split]]
return x_train, x_test, y_train, y_test
def data_generator(data, labels, max_len=200000, batch_size=64, shuffle=True):
idx = np.arange(len(data))
if shuffle:
np.random.shuffle(idx)
batches = [idx[range(batch_size*i, min(len(data), batch_size*(i+1)))] for i in range(len(data)//batch_size+1)]
while True:
for i in batches:
xx = preprocess(data[i], max_len)[0]
yy = labels[i]
yield (xx, yy)
class logger():
def __init__(self):
self.fn = []
self.len = []
self.pad_len = []
self.loss = []
self.pred = []
self.org = []
def write(self, fn, org_score, file_len, pad_len, loss, pred):
self.fn.append(fn.split('/')[-1])
self.org.append(org_score)
self.len.append(file_len)
self.pad_len.append(pad_len)
self.loss.append(loss)
self.pred.append(pred)
print('\nFILE:', fn)
if pad_len > 0:
print('\tfile length:', file_len)
print('\tpad length:', pad_len)
#if not np.isnan(loss):
print('\tloss:', loss)
print('\tscore:', pred)
else:
print('\tfile length:', file_len, ', Exceed max length ! Ignored !')
print('\toriginal score:', org_score)
def save(self, path):
d = {'filename':self.fn,
'original score':self.org,
'file length':self.len,
'pad length':self.pad_len,
'loss':self.loss,
'predict score':self.pred}
df = | pd.DataFrame(data=d) | pandas.DataFrame |
'''
Created on 19 maj 2020
@author: spasz
@brief: Trend inidicator. Rising/Falling, based on given data by an argument.
'''
from scipy import signal
import numpy
import datetime
import pandas as pd
import matplotlib.pyplot as plt
from core.indicator import indicator
class trend(indicator):
def __init__(self, data, ttype='rising'):
indicator.__init__(self, 'Trend', 'trend', data.index)
self.type = ttype
self.trends = self.Init(data)
def Init(self, data):
'''Init trend based on given data'''
if (self.type == 'rising'):
return self.FindUptrends(data)
return self.FindDowntrends(data)
@staticmethod
def FindMaxPeaks(data, n=7):
'''Return series of max points from given data'''
maxs = data.iloc[signal.argrelextrema(
data.values, numpy.greater_equal, order=n)[0]]
return maxs
@staticmethod
def FindMinPeaks(data, n=7):
'''Return series of min points from given data'''
mins = data.iloc[signal.argrelextrema(
data.values, numpy.less_equal, order=n)[0]]
return mins
@staticmethod
def GetTrendDaysLength(trend):
''' Returns trend days length '''
delta = trend.index[-1]-trend.index[0]
return delta.days
def FindUptrends(self, data, days=6, n=2):
''' Downtrend calculation is based on mins '''
uptrends = []
trend = pd.Series()
mins = self.FindMinPeaks(data, n)
# Find rising series. Start from end
for i in range(len(mins.values) - 1):
# If rising
if (mins[i] <= mins[i + 1]):
trend = trend.append(
pd.Series(mins.values[i], index=[mins.index[i]]))
trend = trend.append(
pd.Series(mins.values[i + 1], index=[mins.index[i + 1]]))
elif (trend.size > 0):
trend = trend.loc[~trend.index.duplicated()]
if (self.GetTrendDaysLength(trend) >= days):
uptrends.append(trend)
trend = pd.Series()
# Add last trend
if (trend.size > 0):
trend = trend.loc[~trend.index.duplicated()]
if (self.GetTrendDaysLength(trend) >= days):
uptrends.append(trend)
# Calculate regression line most fitting.
# If some point is far away from line then drop it.
# Add to data.
return uptrends
def FindDowntrends(self, data, days=6, n=2):
''' Downtrend calculation is based on maxs '''
downtrends = []
trend = pd.Series()
maxs = self.FindMaxPeaks(data, n)
# Find falling series. Start from end
for i in range(len(maxs.values) - 1):
# If falling
if (maxs[i] >= maxs[i + 1]):
trend = trend.append(
pd.Series(maxs.values[i], index=[maxs.index[i]]))
trend = trend.append(
pd.Series(maxs.values[i + 1], index=[maxs.index[i + 1]]))
elif (trend.size > 0):
trend = trend.loc[~trend.index.duplicated()]
if (self.GetTrendDaysLength(trend) >= days):
downtrends.append(trend)
trend = pd.Series()
# Add last trend
if (trend.size > 0):
trend = trend.loc[~trend.index.duplicated()]
if (self.GetTrendDaysLength(trend) >= days):
downtrends.append(trend)
return downtrends
@staticmethod
def ExtendedTrendForward(trend, days=7):
# Delta of values
dy = trend[-1] - trend[-2]
# Time delta in days
dt = trend.index[-1] - trend.index[-2]
dt = dt.days
# Append last element
t = trend.index[-1] + datetime.timedelta(days=days)
y = trend[-1] + days * (dy / dt)
return trend.append( | pd.Series(y, index=[t]) | pandas.Series |
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 3 18:54:36 2021
@author: <NAME>
"""
from datetime import datetime
from pytz import timezone
import spiceypy
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import geocoder
import static as s
# Load the SPICE kernels via a meta file
spiceypy.furnsh('./skyplot/kernel_meta.txt')
# Set the geographic longitude / latitude for Bengaluru, India
loc = 'Bengaluru, India' #Bengaluru, India, Sydney, Australia, Stuttgart, Germany
tzone = 'Europe/Berlin' # Asia/Kolkata, Australia/Sydney, Europe/Berlin
format = "%Y-%m-%d %H:%M:%S"
# Current time in UTC
local_tz = datetime.now(timezone(tzone)) #local time zone
tz = local_tz.strftime(format)
#print(local_tz)
# Convert to local timezone i.e. in this case Asia/Kolkata time zone
datetime_obj = local_tz.astimezone(timezone('UTC')) #Asia/Kolkata, UTC, Asia/Kuwait, Europe/Berlin, Australia/Sydney, Asia/Singapore
#print(datetime_obj.strftime(format))
# Create an initial date-time object that is converted to a string
# datetime_now = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S')
datetime_now = datetime_obj.strftime(format)
#print (datetime_now)
# Convert to Ephemeris Time (ET) using the SPICE function utc2et
datetime_et = spiceypy.utc2et(datetime_now)
#print(datetime_et)
g = geocoder.opencage( loc , key = s.open_cage_api_key)
coord = g.latlng
city = g.city
geo_lat_dec = (round(g.latlng[0],4))
geo_long_dec = (round(g.latlng[1],4))
geo_lat_rad = np.radians(geo_lat_dec)
print (city, geo_lat_dec, geo_long_dec)
print (tz)
# We want to compute the coordinates for different Solar System bodies as seen
# from our planet. First, a pandas dataframe is set that is used to append the
# computed data
solsys_df = pd.DataFrame()
# Add the ET and the corresponding UTC date-time string
solsys_df.loc[:, 'ET'] = [datetime_et]
solsys_df.loc[:, ' Hrs'] = [datetime_now]
# Set a dictionary that lists some body names and the corresponding NAIF ID
# code. Mars has the ID 499, however the loaded kernels do not contain the
# positional information. We use the Mars barycentre instead
# Uranus, Neptune, Pluto cannot be seen without a telescope
SOLSYS_DICT = {'SUN': 10, 'VENUS': 299, 'MOON': 301, 'MARS': 4, 'JUPITER': 5, 'SATURN': 6, 'URANUS':7, 'NEPTUNE':8, 'PLUTO': 9}
# Each body shall have an individual color; set a list with some colors
BODY_COLOR_ARRAY = ['y', 'tab:orange', 'tab:gray', 'tab:red', 'tab:blue', 'tab:purple', 'tab:pink' ,'tab:green', 'tab:brown']
# Now we want the coordinates in equatorial J2000. For this purpose we
# iterate through all celestial bodies
for body_name in SOLSYS_DICT:
# First, compute the directional vector of the body as seen from Earth in
# J2000
solsys_df.loc[:, f'dir_{body_name}_wrt_earth_equ'] = solsys_df['ET'] \
.apply(lambda x: spiceypy.spkezp(targ=SOLSYS_DICT[body_name], \
et=x, \
ref='J2000', \
abcorr='LT+S', \
obs=399)[0])
# Compute the longitude and latitude values in equatorial J2000
# coordinates
solsys_df.loc[:, f'{body_name}_long_rad_equ'] = solsys_df[f'dir_{body_name}_wrt_earth_equ'] \
.apply(lambda x: spiceypy.recrad(x)[1])
solsys_df.loc[:, f'{body_name}_lat_rad_equ'] = solsys_df[f'dir_{body_name}_wrt_earth_equ'] \
.apply(lambda x: spiceypy.recrad(x)[2])
# Apply the same logic as shown before to compute the longitudes for the
# matplotlib figure
solsys_df.loc[:, f'{body_name}_long_rad4plot_equ'] = \
solsys_df[f'{body_name}_long_rad_equ'] \
.apply(lambda x: -1*((x % np.pi) - np.pi) if x > np.pi \
else -1*x)
# Before we plot the data, let's add the Ecliptic plane for the visualisation.
# In ECLIPJ2000 the Ecliptic plane is the equator line (see corresponding
# figure. The latitude is 0 degrees.
# First, we create a separate dataframe for the ecliptic plane
eclip_plane_df = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# # Read model.onnx and modelmeta.json to dataframes
#
# ONNX operators https://github.com/onnx/onnx/blob/master/docs/Operators.md
#
# Metadata file contains the details about model input and output dimensions.
#
# value_info stores input and output type and shape information of the inner-nodes. It is populated after inferring shapes.
#
# Shape inference is not guaranteed to be complete. It works only with constants and simple variables. Reshape to a dynamically-provide shape and arithmetic expressions containing variables block shape inference.
# In[ ]:
import pandas as pd
import json
import onnx
from google.protobuf.json_format import MessageToDict
def extract_nodes(graph):
df = pd.DataFrame({'name':[''],'op_type':[''],'op_id':[0],
'num_in':[0],'num_out':[0],'attr':['']}).drop(0)
for op_id, op in enumerate(graph.node):
name = op.name if op.name else op.op_type + "_" + str(op_id)
d = {'name':name,'op_type':op.op_type,'op_id':op_id,
'num_in':len(op.input),'num_out':len(op.output),'attr':''}
#attr = {'name','type','i','f','s','g','t','ints','floats','strings','graphs','tensors'}
# Note that if you try to convert a field from op rather than entire op message,
# you will get object has no attribute 'DESCRIPTOR exception':
# da = proto.Message.to_json(op.attribute)
da = MessageToDict(op)
if 'attribute' in da.keys():
for dd in da['attribute']:
# TODO: implement processing 'TENSOR', 'GRAPH', 'INTS', 'STRINGS'
if dd['type'] == 'TENSOR': dd["t"]["rawData"] = ''
if dd['type'] == 'GRAPH': dd["g"] = ''
if dd['type'] == 'INTS' and dd['name'] == "cats_int64s":
dd["ints"] = [len(dd["ints"])]
if dd['type'] == 'STRINGS' and dd['name'] == "cats_strings":
dd["strings"] = [str(len(dd["strings"]))]
# Convert INT and INTS from strings to int
if dd['type'] == 'INT':
dd["i"] = int(dd["i"])
if dd['type'] == 'INTS':
dd["ints"] = [int(x) for x in dd["ints"]]
if op.op_type == 'Constant':
dd = da['attribute'][0]
dnew = [{'name': 'dataType', 'i': dd['t']['dataType'], 'type': 'INT'}]
if 'dims' in dd['t']:
dnew += [{'name': 'dims', 'ints': [int(n) for n in dd['t']['dims']], 'type': 'INTS'}]
da['attribute'] = dnew
d['attr'] = da['attribute']
for i, n in enumerate(op.input):
d['in'+str(i)] = n
for i, n in enumerate(op.output):
d['out'+str(i)] = n
df = df.append(d, ignore_index=True)
return df
def extract_io(inouts):
df = pd.DataFrame({'name':[''],'tt':[0],'ts':[[]]}).drop(0)
for n in inouts:
dims = []
for d in n.type.tensor_type.shape.dim:
dims.append(d.dim_value)
d = {'name':n.name,
'tt': n.type.tensor_type.elem_type,
'ts': dims}
df = df.append(d, ignore_index=True)
return df
def extract_model(fmodel):
# Read the model, check it and infer shapes
model = onnx.load(fmodel)
try:
onnx.checker.check_model(model)
except onnx.checker.ValidationError as e:
print('The model is invalid: %s' % e)
model = onnx.shape_inference.infer_shapes(model)
# Extract info
nodes = extract_nodes(model.graph)
inputs = extract_io(model.graph.input)
outputs = extract_io(model.graph.output)
values = extract_io(model.graph.value_info)
return nodes, inputs, outputs, values
def concat_string_columns(model, prefix='in'):
cols_to_join = []
for col in model.columns:
if col.startswith(prefix):
cols_to_join.append(col)
if len(cols_to_join) == 0: return
cols = model[cols_to_join].aggregate(lambda x: [x.tolist()], axis=0).map(lambda x:x[0])
res = cols[0]
for i in range(len(res)):
v = res[i]
if pd.notna(v):
res[i] = [str(v)]
else:
res[i] = []
for j in range(1,len(cols)):
for i in range(len(res)):
v = cols[j][i]
if pd.notna(v):
res[i].extend([str(v)])
model[prefix] = res
model.drop(columns=cols_to_join, inplace=True)
def concat_columns(model, prefix='tt_in', num='num_in', bflatten=False):
cols_to_join = []
for col in model.columns:
if col.startswith(prefix):
cols_to_join.append(col)
if len(cols_to_join) == 0: return
cols = model[cols_to_join].aggregate(lambda x: [x.tolist()], axis=0).map(lambda x:x[0])
res = cols[0]
for i in range(len(res)):
v = res[i]
if not isinstance(v,(list,tuple)):
if pd.notna(v):
res[i] = [int(v)]
else:
res[i] = []
else:
v = [ int(x) for x in v ]
res[i] = [v]
nums = model[num].tolist()
for j in range(1,len(cols)):
for i in range(len(res)):
v = cols[j][i]
if not isinstance(v,(list,tuple)):
if | pd.notna(v) | pandas.notna |
import sys
from pathlib import Path
from itertools import chain
from typing import List
import numpy as np
import pandas as pd
import pandas_flavor as pf
from janitor import clean_names
sys.path.append(str(Path.cwd()))
from config import root_dir # noqa E402
from utils import ( # noqa: E402
get_module_purpose,
read_args,
read_ff_csv,
retrieve_team_abbreviation,
)
def clean_game_date(season_year: int, date: str) -> str:
"""Creates a date string from a season year and date string.
Args:
season_year (int): The season year.
date (str): The date string.
Returns:
str: The date string if the date is part of the regular season, otherwise
returns 'non-regular-season'.
"""
if len(date) == 3 and date[0] == "9":
return f"{season_year}-09-{date[1:]}"
elif len(date) == 4 and int(date[:2]) > 9:
return f"{season_year}-{date[:2]}-{date[2:]}"
elif len(date) == 3 and date[0] in ["1", "2"]:
season_year += 1
return f"{season_year}-0{date[0]}-{date[1:]}"
else:
return "non-regular-season"
@pf.register_dataframe_method
def clean_games_date(
df: pd.DataFrame, season_year: int, date_column: str = "date"
) -> pd.DataFrame:
df[date_column] = (
df[date_column].astype(str).apply(lambda x: clean_game_date(season_year, x))
)
return df
# create game id
@pf.register_dataframe_method
def create_game_id(df: pd.DataFrame) -> pd.DataFrame:
"""Create a unique id for each game. Assumes every two rows are a single game.
Args:
df (pd.DataFrame): Dataframe with betting lines data.
Returns:
pd.DataFrame: Dataframe with unique id for
each game that links to the betting lines data.
Raises:
ValueError: Occurs when the dataframe has an odd number of rows.
An odd row count indicates that there is an incomplete game.
"""
if divmod(df.shape[0], 2)[1] != 0:
raise ValueError("Dataframe must have an even number of rows.")
_id = list(range(1, (len(df) // 2) + 1))
df["game_id"] = list(chain(*zip(_id, _id)))
return df
@pf.register_dataframe_method
def add_team_abbreviation(df: pd.DataFrame, team_column: str = "team") -> pd.DataFrame:
"""Convert team name to team abbreviation.
Args:
df (pd.DataFrame): Dataframe with betting lines data and team column.
team_column (str, optional): Column with full team name. Defaults to "team".
Returns:
pd.DataFrame: Dataframe with team abbreviation column.
"""
df[team_column] = df[team_column].apply(lambda x: retrieve_team_abbreviation(x))
return df
@pf.register_dataframe_method
def create_point_spread_df(df: pd.DataFrame):
"""Convert over-under (O/U) and line to projected points for each team.
For example, if the O/U is 49 and Team 1 is favored by -7 over Team 2,
the point projection for Team 1 is 28 and 21 for Team 2.
Args:
df (pd.DataFrame): Dataframe with betting lines data.
Returns:
pd.DataFrame: Dataframe with point spread data.
"""
is_even_moneyline = df[df["ml"] < 0].shape[0] > 1
is_pick = any(df["open"].str.contains("pk"))
if any([is_even_moneyline, is_pick]):
fav_team, underdog_team = df["team"]
fav_pts, underdog_pts = [
float(max([x for x in df["open"] if x != "pk"])) / 2
] * 2
else:
fav_team_index = [index for index, value in enumerate(df["ml"]) if value < 0][0]
underdog_team_index = int(not fav_team_index)
fav_team = df["team"].iloc[fav_team_index]
underdog_team = df["team"].iloc[underdog_team_index]
pt_spread = df["open"].astype(float).min()
over_under = df["open"].astype(float).max()
fav_pts = (over_under / 2) + pt_spread * 0.5
underdog_pts = (over_under / 2) - pt_spread * 0.5
spread_df = pd.DataFrame(
[[fav_team, underdog_team, fav_pts], [underdog_team, fav_team, underdog_pts]],
columns=["team", "opp", "projected_off_pts"],
)
spread_df["projected_off_pts"] = spread_df["projected_off_pts"].apply(
lambda x: round(x)
)
return spread_df
@pf.register_dataframe_method
def process_betting(df: pd.DataFrame, season_year: int) -> pd.DataFrame:
"""Converts raw betting lines data to include point projections for each
team in each game. Point projections can be used to inform how much
scoring is expected to occur in each game.
Args:
df (pd.DataFrame): Raw betting lines data.
season_year (int): The season year.
Returns:
pd.DataFrame: Dataframe with point projections for each team in each game.
"""
process_betting_df = pd.DataFrame()
game_ids = df["game_id"].unique()
for game_id in game_ids:
game_df = df[df["game_id"] == game_id]
point_spread_df = game_df.create_point_spread_df()
point_spread_df["date"] = game_df["date"].iloc[0]
point_spread_df["season_year"] = season_year
process_betting_df = pd.concat([process_betting_df, point_spread_df])
return process_betting_df
def impute_missing_projections(
df: pd.DataFrame,
calendar_df: pd.DataFrame,
keys: List[str] = ["date", "season_year", "team", "opp"],
default_point_projection: int = 25,
) -> pd.DataFrame:
"""Impute missing point projections for each team in each game.
Some dates for betting are incorrect. In these instances,
the point projections are imputed with the average point projection
for all previous games played in the same season.
Args:
df (pd.DataFrame): Dataframe with point projections for each team in each game.
calendar_df (pd.DataFrame): Dataframe with dates for each game.
keys (List[str], optional): Columns to join the point projections and calendar.
Defaults to ["date", "season_year", "team", "opp"].
default_point_projection (int, optional): If a team has no
previous games (i.e., it's the first
game of the season), the point projection is imputed with this value.
Defaults to 25.
Returns:
pd.DataFrame: Dataframe with imputed point projections for each team in
each game. There should be no NA values for the point projections.
"""
# identify games without a projection value
missing_projection_df = pd.merge(calendar_df, df, on=keys, how="left")
if not missing_projection_df["projected_off_pts"].isnull().sum():
return | pd.DataFrame() | pandas.DataFrame |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
from numpy import nan
import numpy as np
import pandas as pd
from pandas.types.common import is_integer, is_scalar
from pandas import Index, Series, DataFrame, isnull, date_range
from pandas.core.index import MultiIndex
from pandas.core.indexing import IndexingError
from pandas.tseries.index import Timestamp
from pandas.tseries.offsets import BDay
from pandas.tseries.tdi import Timedelta
from pandas.compat import lrange, range
from pandas import compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from pandas.tests.series.common import TestData
JOIN_TYPES = ['inner', 'outer', 'left', 'right']
class TestSeriesIndexing(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_get(self):
# GH 6383
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56, 45,
51, 39, 55, 43, 54, 52, 51, 54]))
result = s.get(25, 0)
expected = 0
self.assertEqual(result, expected)
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56,
45, 51, 39, 55, 43, 54, 52, 51, 54]),
index=pd.Float64Index(
[25.0, 36.0, 49.0, 64.0, 81.0, 100.0,
121.0, 144.0, 169.0, 196.0, 1225.0,
1296.0, 1369.0, 1444.0, 1521.0, 1600.0,
1681.0, 1764.0, 1849.0, 1936.0],
dtype='object'))
result = s.get(25, 0)
expected = 43
self.assertEqual(result, expected)
# GH 7407
# with a boolean accessor
df = pd.DataFrame({'i': [0] * 3, 'b': [False] * 3})
vc = df.i.value_counts()
result = vc.get(99, default='Missing')
self.assertEqual(result, 'Missing')
vc = df.b.value_counts()
result = vc.get(False, default='Missing')
self.assertEqual(result, 3)
result = vc.get(True, default='Missing')
self.assertEqual(result, 'Missing')
def test_delitem(self):
# GH 5542
# should delete the item inplace
s = Series(lrange(5))
del s[0]
expected = Series(lrange(1, 5), index=lrange(1, 5))
assert_series_equal(s, expected)
del s[1]
expected = Series(lrange(2, 5), index=lrange(2, 5))
assert_series_equal(s, expected)
# empty
s = Series()
def f():
del s[0]
self.assertRaises(KeyError, f)
# only 1 left, del, add, del
s = Series(1)
del s[0]
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='int64')))
s[0] = 1
assert_series_equal(s, Series(1))
del s[0]
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='int64')))
# Index(dtype=object)
s = Series(1, index=['a'])
del s['a']
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='object')))
s['a'] = 1
assert_series_equal(s, Series(1, index=['a']))
del s['a']
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='object')))
def test_getitem_setitem_ellipsis(self):
s = Series(np.random.randn(10))
np.fix(s)
result = s[...]
assert_series_equal(result, s)
s[...] = 5
self.assertTrue((result == 5).all())
def test_getitem_negative_out_of_bounds(self):
s = Series(tm.rands_array(5, 10), index=tm.rands_array(10, 10))
self.assertRaises(IndexError, s.__getitem__, -11)
self.assertRaises(IndexError, s.__setitem__, -11, 'foo')
def test_pop(self):
# GH 6600
df = DataFrame({'A': 0, 'B': np.arange(5, dtype='int64'), 'C': 0, })
k = df.iloc[4]
result = k.pop('B')
self.assertEqual(result, 4)
expected = Series([0, 0], index=['A', 'C'], name=4)
assert_series_equal(k, expected)
def test_getitem_get(self):
idx1 = self.series.index[5]
idx2 = self.objSeries.index[5]
self.assertEqual(self.series[idx1], self.series.get(idx1))
self.assertEqual(self.objSeries[idx2], self.objSeries.get(idx2))
self.assertEqual(self.series[idx1], self.series[5])
self.assertEqual(self.objSeries[idx2], self.objSeries[5])
self.assertEqual(
self.series.get(-1), self.series.get(self.series.index[-1]))
self.assertEqual(self.series[5], self.series.get(self.series.index[5]))
# missing
d = self.ts.index[0] - BDay()
self.assertRaises(KeyError, self.ts.__getitem__, d)
# None
# GH 5652
for s in [Series(), Series(index=list('abc'))]:
result = s.get(None)
self.assertIsNone(result)
def test_iget(self):
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.iget(1)
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.irow(1)
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.iget_value(1)
for i in range(len(s)):
result = s.iloc[i]
exp = s[s.index[i]]
assert_almost_equal(result, exp)
# pass a slice
result = s.iloc[slice(1, 3)]
expected = s.ix[2:4]
assert_series_equal(result, expected)
# test slice is a view
result[:] = 0
self.assertTrue((s[1:3] == 0).all())
# list of integers
result = s.iloc[[0, 2, 3, 4, 5]]
expected = s.reindex(s.index[[0, 2, 3, 4, 5]])
assert_series_equal(result, expected)
def test_iget_nonunique(self):
s = Series([0, 1, 2], index=[0, 1, 0])
self.assertEqual(s.iloc[2], 2)
def test_getitem_regression(self):
s = Series(lrange(5), index=lrange(5))
result = s[lrange(5)]
assert_series_equal(result, s)
def test_getitem_setitem_slice_bug(self):
s = Series(lrange(10), lrange(10))
result = s[-12:]
assert_series_equal(result, s)
result = s[-7:]
assert_series_equal(result, s[3:])
result = s[:-12]
assert_series_equal(result, s[:0])
s = Series(lrange(10), lrange(10))
s[-12:] = 0
self.assertTrue((s == 0).all())
s[:-12] = 5
self.assertTrue((s == 0).all())
def test_getitem_int64(self):
idx = np.int64(5)
self.assertEqual(self.ts[idx], self.ts[5])
def test_getitem_fancy(self):
slice1 = self.series[[1, 2, 3]]
slice2 = self.objSeries[[1, 2, 3]]
self.assertEqual(self.series.index[2], slice1.index[1])
self.assertEqual(self.objSeries.index[2], slice2.index[1])
self.assertEqual(self.series[2], slice1[1])
self.assertEqual(self.objSeries[2], slice2[1])
def test_getitem_boolean(self):
s = self.series
mask = s > s.median()
# passing list is OK
result = s[list(mask)]
expected = s[mask]
assert_series_equal(result, expected)
self.assert_index_equal(result.index, s.index[mask])
def test_getitem_boolean_empty(self):
s = Series([], dtype=np.int64)
s.index.name = 'index_name'
s = s[s.isnull()]
self.assertEqual(s.index.name, 'index_name')
self.assertEqual(s.dtype, np.int64)
# GH5877
# indexing with empty series
s = Series(['A', 'B'])
expected = Series(np.nan, index=['C'], dtype=object)
result = s[Series(['C'], dtype=object)]
assert_series_equal(result, expected)
s = Series(['A', 'B'])
expected = Series(dtype=object, index=Index([], dtype='int64'))
result = s[Series([], dtype=object)]
assert_series_equal(result, expected)
# invalid because of the boolean indexer
# that's empty or not-aligned
def f():
s[Series([], dtype=bool)]
self.assertRaises(IndexingError, f)
def f():
s[Series([True], dtype=bool)]
self.assertRaises(IndexingError, f)
def test_getitem_generator(self):
gen = (x > 0 for x in self.series)
result = self.series[gen]
result2 = self.series[iter(self.series > 0)]
expected = self.series[self.series > 0]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
def test_type_promotion(self):
# GH12599
s = pd.Series()
s["a"] = pd.Timestamp("2016-01-01")
s["b"] = 3.0
s["c"] = "foo"
expected = Series([pd.Timestamp("2016-01-01"), 3.0, "foo"],
index=["a", "b", "c"])
assert_series_equal(s, expected)
def test_getitem_boolean_object(self):
# using column from DataFrame
s = self.series
mask = s > s.median()
omask = mask.astype(object)
# getitem
result = s[omask]
expected = s[mask]
assert_series_equal(result, expected)
# setitem
s2 = s.copy()
cop = s.copy()
cop[omask] = 5
s2[mask] = 5
assert_series_equal(cop, s2)
# nans raise exception
omask[5:10] = np.nan
self.assertRaises(Exception, s.__getitem__, omask)
self.assertRaises(Exception, s.__setitem__, omask, 5)
def test_getitem_setitem_boolean_corner(self):
ts = self.ts
mask_shifted = ts.shift(1, freq=BDay()) > ts.median()
# these used to raise...??
self.assertRaises(Exception, ts.__getitem__, mask_shifted)
self.assertRaises(Exception, ts.__setitem__, mask_shifted, 1)
# ts[mask_shifted]
# ts[mask_shifted] = 1
self.assertRaises(Exception, ts.ix.__getitem__, mask_shifted)
self.assertRaises(Exception, ts.ix.__setitem__, mask_shifted, 1)
# ts.ix[mask_shifted]
# ts.ix[mask_shifted] = 2
def test_getitem_setitem_slice_integers(self):
s = Series(np.random.randn(8), index=[2, 4, 6, 8, 10, 12, 14, 16])
result = s[:4]
expected = s.reindex([2, 4, 6, 8])
assert_series_equal(result, expected)
s[:4] = 0
self.assertTrue((s[:4] == 0).all())
self.assertTrue(not (s[4:] == 0).any())
def test_getitem_out_of_bounds(self):
# don't segfault, GH #495
self.assertRaises(IndexError, self.ts.__getitem__, len(self.ts))
# GH #917
s = Series([])
self.assertRaises(IndexError, s.__getitem__, -1)
def test_getitem_setitem_integers(self):
# caused bug without test
s = Series([1, 2, 3], ['a', 'b', 'c'])
self.assertEqual(s.ix[0], s['a'])
s.ix[0] = 5
self.assertAlmostEqual(s['a'], 5)
def test_getitem_box_float64(self):
value = self.ts[5]
tm.assertIsInstance(value, np.float64)
def test_getitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
self.assertRaises(KeyError, s.__getitem__, 1)
self.assertRaises(KeyError, s.ix.__getitem__, 1)
def test_getitem_unordered_dup(self):
obj = Series(lrange(5), index=['c', 'a', 'a', 'b', 'b'])
self.assertTrue(is_scalar(obj['c']))
self.assertEqual(obj['c'], 0)
def test_getitem_dups_with_missing(self):
# breaks reindex, so need to use .ix internally
# GH 4246
s = Series([1, 2, 3, 4], ['foo', 'bar', 'foo', 'bah'])
expected = s.ix[['foo', 'bar', 'bah', 'bam']]
result = s[['foo', 'bar', 'bah', 'bam']]
assert_series_equal(result, expected)
def test_getitem_dups(self):
s = Series(range(5), index=['A', 'A', 'B', 'C', 'C'], dtype=np.int64)
expected = Series([3, 4], index=['C', 'C'], dtype=np.int64)
result = s['C']
assert_series_equal(result, expected)
def test_getitem_dataframe(self):
rng = list(range(10))
s = pd.Series(10, index=rng)
df = pd.DataFrame(rng, index=rng)
self.assertRaises(TypeError, s.__getitem__, df > 5)
def test_getitem_callable(self):
# GH 12533
s = pd.Series(4, index=list('ABCD'))
result = s[lambda x: 'A']
self.assertEqual(result, s.loc['A'])
result = s[lambda x: ['A', 'B']]
tm.assert_series_equal(result, s.loc[['A', 'B']])
result = s[lambda x: [True, False, True, True]]
tm.assert_series_equal(result, s.iloc[[0, 2, 3]])
def test_setitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
# equivalent of an append
s2 = s.copy()
s2[1] = 5
expected = s.append(Series([5], index=[1]))
assert_series_equal(s2, expected)
s2 = s.copy()
s2.ix[1] = 5
expected = s.append(Series([5], index=[1]))
assert_series_equal(s2, expected)
def test_setitem_float_labels(self):
# note labels are floats
s = Series(['a', 'b', 'c'], index=[0, 0.5, 1])
tmp = s.copy()
s.ix[1] = 'zoo'
tmp.iloc[2] = 'zoo'
assert_series_equal(s, tmp)
def test_setitem_callable(self):
# GH 12533
s = pd.Series([1, 2, 3, 4], index=list('ABCD'))
s[lambda x: 'A'] = -1
tm.assert_series_equal(s, pd.Series([-1, 2, 3, 4], index=list('ABCD')))
def test_setitem_other_callable(self):
# GH 13299
inc = lambda x: x + 1
s = pd.Series([1, 2, -1, 4])
s[s < 0] = inc
expected = pd.Series([1, 2, inc, 4])
tm.assert_series_equal(s, expected)
def test_slice(self):
numSlice = self.series[10:20]
numSliceEnd = self.series[-10:]
objSlice = self.objSeries[10:20]
self.assertNotIn(self.series.index[9], numSlice.index)
self.assertNotIn(self.objSeries.index[9], objSlice.index)
self.assertEqual(len(numSlice), len(numSlice.index))
self.assertEqual(self.series[numSlice.index[0]],
numSlice[numSlice.index[0]])
self.assertEqual(numSlice.index[1], self.series.index[11])
self.assertTrue(tm.equalContents(numSliceEnd, np.array(self.series)[
-10:]))
# test return view
sl = self.series[10:20]
sl[:] = 0
self.assertTrue((self.series[10:20] == 0).all())
def test_slice_can_reorder_not_uniquely_indexed(self):
s = Series(1, index=['a', 'a', 'b', 'b', 'c'])
s[::-1] # it works!
def test_slice_float_get_set(self):
self.assertRaises(TypeError, lambda: self.ts[4.0:10.0])
def f():
self.ts[4.0:10.0] = 0
self.assertRaises(TypeError, f)
self.assertRaises(TypeError, self.ts.__getitem__, slice(4.5, 10.0))
self.assertRaises(TypeError, self.ts.__setitem__, slice(4.5, 10.0), 0)
def test_slice_floats2(self):
s = Series(np.random.rand(10), index=np.arange(10, 20, dtype=float))
self.assertEqual(len(s.ix[12.0:]), 8)
self.assertEqual(len(s.ix[12.5:]), 7)
i = np.arange(10, 20, dtype=float)
i[2] = 12.2
s.index = i
self.assertEqual(len(s.ix[12.0:]), 8)
self.assertEqual(len(s.ix[12.5:]), 7)
def test_slice_float64(self):
values = np.arange(10., 50., 2)
index = Index(values)
start, end = values[[5, 15]]
s = Series(np.random.randn(20), index=index)
result = s[start:end]
expected = s.iloc[5:16]
assert_series_equal(result, expected)
result = s.loc[start:end]
assert_series_equal(result, expected)
df = DataFrame(np.random.randn(20, 3), index=index)
result = df[start:end]
expected = df.iloc[5:16]
tm.assert_frame_equal(result, expected)
result = df.loc[start:end]
tm.assert_frame_equal(result, expected)
def test_setitem(self):
self.ts[self.ts.index[5]] = np.NaN
self.ts[[1, 2, 17]] = np.NaN
self.ts[6] = np.NaN
self.assertTrue(np.isnan(self.ts[6]))
self.assertTrue(np.isnan(self.ts[2]))
self.ts[np.isnan(self.ts)] = 5
self.assertFalse(np.isnan(self.ts[2]))
# caught this bug when writing tests
series = Series(tm.makeIntIndex(20).astype(float),
index=tm.makeIntIndex(20))
series[::2] = 0
self.assertTrue((series[::2] == 0).all())
# set item that's not contained
s = self.series.copy()
s['foobar'] = 1
app = Series([1], index=['foobar'], name='series')
expected = self.series.append(app)
assert_series_equal(s, expected)
# Test for issue #10193
key = pd.Timestamp('2012-01-01')
series = pd.Series()
series[key] = 47
expected = pd.Series(47, [key])
assert_series_equal(series, expected)
series = pd.Series([], pd.DatetimeIndex([], freq='D'))
series[key] = 47
expected = pd.Series(47, pd.DatetimeIndex([key], freq='D'))
assert_series_equal(series, expected)
def test_setitem_dtypes(self):
# change dtypes
# GH 4463
expected = Series([np.nan, 2, 3])
s = Series([1, 2, 3])
s.iloc[0] = np.nan
assert_series_equal(s, expected)
s = Series([1, 2, 3])
s.loc[0] = np.nan
assert_series_equal(s, expected)
s = Series([1, 2, 3])
s[0] = np.nan
assert_series_equal(s, expected)
s = Series([False])
s.loc[0] = np.nan
assert_series_equal(s, Series([np.nan]))
s = Series([False, True])
s.loc[0] = np.nan
assert_series_equal(s, Series([np.nan, 1.0]))
def test_set_value(self):
idx = self.ts.index[10]
res = self.ts.set_value(idx, 0)
self.assertIs(res, self.ts)
self.assertEqual(self.ts[idx], 0)
# equiv
s = self.series.copy()
res = s.set_value('foobar', 0)
self.assertIs(res, s)
self.assertEqual(res.index[-1], 'foobar')
self.assertEqual(res['foobar'], 0)
s = self.series.copy()
s.loc['foobar'] = 0
self.assertEqual(s.index[-1], 'foobar')
self.assertEqual(s['foobar'], 0)
def test_setslice(self):
sl = self.ts[5:20]
self.assertEqual(len(sl), len(sl.index))
self.assertTrue(sl.index.is_unique)
def test_basic_getitem_setitem_corner(self):
# invalid tuples, e.g. self.ts[:, None] vs. self.ts[:, 2]
with tm.assertRaisesRegexp(ValueError, 'tuple-index'):
self.ts[:, 2]
with tm.assertRaisesRegexp(ValueError, 'tuple-index'):
self.ts[:, 2] = 2
# weird lists. [slice(0, 5)] will work but not two slices
result = self.ts[[slice(None, 5)]]
expected = self.ts[:5]
assert_series_equal(result, expected)
# OK
self.assertRaises(Exception, self.ts.__getitem__,
[5, slice(None, None)])
self.assertRaises(Exception, self.ts.__setitem__,
[5, slice(None, None)], 2)
def test_basic_getitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
result = self.ts[indices]
expected = self.ts.reindex(indices)
assert_series_equal(result, expected)
result = self.ts[indices[0]:indices[2]]
expected = self.ts.ix[indices[0]:indices[2]]
assert_series_equal(result, expected)
# integer indexes, be careful
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
inds = [0, 2, 5, 7, 8]
arr_inds = np.array([0, 2, 5, 7, 8])
result = s[inds]
expected = s.reindex(inds)
assert_series_equal(result, expected)
result = s[arr_inds]
expected = s.reindex(arr_inds)
assert_series_equal(result, expected)
# GH12089
# with tz for values
s = Series(pd.date_range("2011-01-01", periods=3, tz="US/Eastern"),
index=['a', 'b', 'c'])
expected = Timestamp('2011-01-01', tz='US/Eastern')
result = s.loc['a']
self.assertEqual(result, expected)
result = s.iloc[0]
self.assertEqual(result, expected)
result = s['a']
self.assertEqual(result, expected)
def test_basic_setitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices] = 0
exp.ix[indices] = 0
assert_series_equal(cp, exp)
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices[0]:indices[2]] = 0
exp.ix[indices[0]:indices[2]] = 0
assert_series_equal(cp, exp)
# integer indexes, be careful
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
inds = [0, 4, 6]
arr_inds = np.array([0, 4, 6])
cp = s.copy()
exp = s.copy()
s[inds] = 0
s.ix[inds] = 0
assert_series_equal(cp, exp)
cp = s.copy()
exp = s.copy()
s[arr_inds] = 0
s.ix[arr_inds] = 0
assert_series_equal(cp, exp)
inds_notfound = [0, 4, 5, 6]
arr_inds_notfound = np.array([0, 4, 5, 6])
self.assertRaises(Exception, s.__setitem__, inds_notfound, 0)
self.assertRaises(Exception, s.__setitem__, arr_inds_notfound, 0)
# GH12089
# with tz for values
s = Series(pd.date_range("2011-01-01", periods=3, tz="US/Eastern"),
index=['a', 'b', 'c'])
s2 = s.copy()
expected = Timestamp('2011-01-03', tz='US/Eastern')
s2.loc['a'] = expected
result = s2.loc['a']
self.assertEqual(result, expected)
s2 = s.copy()
s2.iloc[0] = expected
result = s2.iloc[0]
self.assertEqual(result, expected)
s2 = s.copy()
s2['a'] = expected
result = s2['a']
self.assertEqual(result, expected)
def test_ix_getitem(self):
inds = self.series.index[[3, 4, 7]]
assert_series_equal(self.series.ix[inds], self.series.reindex(inds))
assert_series_equal(self.series.ix[5::2], self.series[5::2])
# slice with indices
d1, d2 = self.ts.index[[5, 15]]
result = self.ts.ix[d1:d2]
expected = self.ts.truncate(d1, d2)
assert_series_equal(result, expected)
# boolean
mask = self.series > self.series.median()
assert_series_equal(self.series.ix[mask], self.series[mask])
# ask for index value
self.assertEqual(self.ts.ix[d1], self.ts[d1])
self.assertEqual(self.ts.ix[d2], self.ts[d2])
def test_ix_getitem_not_monotonic(self):
d1, d2 = self.ts.index[[5, 15]]
ts2 = self.ts[::2][[1, 2, 0]]
self.assertRaises(KeyError, ts2.ix.__getitem__, slice(d1, d2))
self.assertRaises(KeyError, ts2.ix.__setitem__, slice(d1, d2), 0)
def test_ix_getitem_setitem_integer_slice_keyerrors(self):
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
# this is OK
cp = s.copy()
cp.ix[4:10] = 0
self.assertTrue((cp.ix[4:10] == 0).all())
# so is this
cp = s.copy()
cp.ix[3:11] = 0
self.assertTrue((cp.ix[3:11] == 0).values.all())
result = s.ix[4:10]
result2 = s.ix[3:11]
expected = s.reindex([4, 6, 8, 10])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# non-monotonic, raise KeyError
s2 = s.iloc[lrange(5) + lrange(5, 10)[::-1]]
self.assertRaises(KeyError, s2.ix.__getitem__, slice(3, 11))
self.assertRaises(KeyError, s2.ix.__setitem__, slice(3, 11), 0)
def test_ix_getitem_iterator(self):
idx = iter(self.series.index[:10])
result = self.series.ix[idx]
assert_series_equal(result, self.series[:10])
def test_setitem_with_tz(self):
for tz in ['US/Eastern', 'UTC', 'Asia/Tokyo']:
orig = pd.Series(pd.date_range('2016-01-01', freq='H', periods=3,
tz=tz))
self.assertEqual(orig.dtype, 'datetime64[ns, {0}]'.format(tz))
# scalar
s = orig.copy()
s[1] = pd.Timestamp('2011-01-01', tz=tz)
exp = pd.Series([pd.Timestamp('2016-01-01 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2016-01-01 02:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
# vector
vals = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz)], index=[1, 2])
self.assertEqual(vals.dtype, 'datetime64[ns, {0}]'.format(tz))
s[[1, 2]] = vals
exp = pd.Series([pd.Timestamp('2016-01-01 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2012-01-01 00:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
def test_setitem_with_tz_dst(self):
# GH XXX
tz = 'US/Eastern'
orig = pd.Series(pd.date_range('2016-11-06', freq='H', periods=3,
tz=tz))
self.assertEqual(orig.dtype, 'datetime64[ns, {0}]'.format(tz))
# scalar
s = orig.copy()
s[1] = pd.Timestamp('2011-01-01', tz=tz)
exp = pd.Series([pd.Timestamp('2016-11-06 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2016-11-06 02:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
# vector
vals = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz)], index=[1, 2])
self.assertEqual(vals.dtype, 'datetime64[ns, {0}]'.format(tz))
s[[1, 2]] = vals
exp = pd.Series([pd.Timestamp('2016-11-06 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2012-01-01 00:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
def test_where(self):
s = Series(np.random.randn(5))
cond = s > 0
rs = s.where(cond).dropna()
rs2 = s[cond]
assert_series_equal(rs, rs2)
rs = s.where(cond, -s)
assert_series_equal(rs, s.abs())
rs = s.where(cond)
assert (s.shape == rs.shape)
assert (rs is not s)
# test alignment
cond = Series([True, False, False, True, False], index=s.index)
s2 = -(s.abs())
expected = s2[cond].reindex(s2.index[:3]).reindex(s2.index)
rs = s2.where(cond[:3])
assert_series_equal(rs, expected)
expected = s2.abs()
expected.ix[0] = s2[0]
rs = s2.where(cond[:3], -s2)
assert_series_equal(rs, expected)
self.assertRaises(ValueError, s.where, 1)
self.assertRaises(ValueError, s.where, cond[:3].values, -s)
# GH 2745
s = Series([1, 2])
s[[True, False]] = [0, 1]
expected = Series([0, 2])
assert_series_equal(s, expected)
# failures
self.assertRaises(ValueError, s.__setitem__, tuple([[[True, False]]]),
[0, 2, 3])
self.assertRaises(ValueError, s.__setitem__, tuple([[[True, False]]]),
[])
# unsafe dtype changes
for dtype in [np.int8, np.int16, np.int32, np.int64, np.float16,
np.float32, np.float64]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
s[mask] = lrange(2, 7)
expected = Series(lrange(2, 7) + lrange(5, 10), dtype=dtype)
assert_series_equal(s, expected)
self.assertEqual(s.dtype, expected.dtype)
# these are allowed operations, but are upcasted
for dtype in [np.int64, np.float64]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
values = [2.5, 3.5, 4.5, 5.5, 6.5]
s[mask] = values
expected = Series(values + lrange(5, 10), dtype='float64')
assert_series_equal(s, expected)
self.assertEqual(s.dtype, expected.dtype)
# GH 9731
s = Series(np.arange(10), dtype='int64')
mask = s > 5
values = [2.5, 3.5, 4.5, 5.5]
s[mask] = values
expected = Series(lrange(6) + values, dtype='float64')
assert_series_equal(s, expected)
# can't do these as we are forced to change the itemsize of the input
# to something we cannot
for dtype in [np.int8, np.int16, np.int32, np.float16, np.float32]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
values = [2.5, 3.5, 4.5, 5.5, 6.5]
self.assertRaises(Exception, s.__setitem__, tuple(mask), values)
# GH3235
s = Series(np.arange(10), dtype='int64')
mask = s < 5
s[mask] = lrange(2, 7)
expected = Series(lrange(2, 7) + lrange(5, 10), dtype='int64')
assert_series_equal(s, expected)
self.assertEqual(s.dtype, expected.dtype)
s = Series(np.arange(10), dtype='int64')
mask = s > 5
s[mask] = [0] * 4
expected = Series([0, 1, 2, 3, 4, 5] + [0] * 4, dtype='int64')
assert_series_equal(s, expected)
s = Series(np.arange(10))
mask = s > 5
def f():
s[mask] = [5, 4, 3, 2, 1]
self.assertRaises(ValueError, f)
def f():
s[mask] = [0] * 5
self.assertRaises(ValueError, f)
# dtype changes
s = Series([1, 2, 3, 4])
result = s.where(s > 2, np.nan)
expected = Series([np.nan, np.nan, 3, 4])
assert_series_equal(result, expected)
# GH 4667
# setting with None changes dtype
s = Series(range(10)).astype(float)
s[8] = None
result = s[8]
self.assertTrue(isnull(result))
s = Series(range(10)).astype(float)
s[s > 8] = None
result = s[isnull(s)]
expected = Series(np.nan, index=[9])
assert_series_equal(result, expected)
def test_where_setitem_invalid(self):
# GH 2702
# make sure correct exceptions are raised on invalid list assignment
# slice
s = Series(list('abc'))
def f():
s[0:3] = list(range(27))
self.assertRaises(ValueError, f)
s[0:3] = list(range(3))
expected = Series([0, 1, 2])
assert_series_equal(s.astype(np.int64), expected, )
# slice with step
s = Series(list('abcdef'))
def f():
s[0:4:2] = list(range(27))
self.assertRaises(ValueError, f)
s = Series(list('abcdef'))
s[0:4:2] = list(range(2))
expected = Series([0, 'b', 1, 'd', 'e', 'f'])
assert_series_equal(s, expected)
# neg slices
s = Series(list('abcdef'))
def f():
s[:-1] = list(range(27))
self.assertRaises(ValueError, f)
s[-3:-1] = list(range(2))
expected = Series(['a', 'b', 'c', 0, 1, 'f'])
assert_series_equal(s, expected)
# list
s = Series(list('abc'))
def f():
s[[0, 1, 2]] = list(range(27))
self.assertRaises(ValueError, f)
s = Series(list('abc'))
def f():
s[[0, 1, 2]] = list(range(2))
self.assertRaises(ValueError, f)
# scalar
s = Series(list('abc'))
s[0] = list(range(10))
expected = Series([list(range(10)), 'b', 'c'])
assert_series_equal(s, expected)
def test_where_broadcast(self):
# Test a variety of differently sized series
for size in range(2, 6):
# Test a variety of boolean indices
for selection in [
# First element should be set
np.resize([True, False, False, False, False], size),
# Set alternating elements]
np.resize([True, False], size),
# No element should be set
np.resize([False], size)]:
# Test a variety of different numbers as content
for item in [2.0, np.nan, np.finfo(np.float).max,
np.finfo(np.float).min]:
# Test numpy arrays, lists and tuples as the input to be
# broadcast
for arr in [np.array([item]), [item], (item, )]:
data = np.arange(size, dtype=float)
s = Series(data)
s[selection] = arr
# Construct the expected series by taking the source
# data or item based on the selection
expected = Series([item if use_item else data[
i] for i, use_item in enumerate(selection)])
assert_series_equal(s, expected)
s = Series(data)
result = s.where(~selection, arr)
assert_series_equal(result, expected)
def test_where_inplace(self):
s = Series(np.random.randn(5))
cond = s > 0
rs = s.copy()
rs.where(cond, inplace=True)
assert_series_equal(rs.dropna(), s[cond])
assert_series_equal(rs, s.where(cond))
rs = s.copy()
rs.where(cond, -s, inplace=True)
assert_series_equal(rs, s.where(cond, -s))
def test_where_dups(self):
# GH 4550
# where crashes with dups in index
s1 = Series(list(range(3)))
s2 = Series(list(range(3)))
comb = pd.concat([s1, s2])
result = comb.where(comb < 2)
expected = Series([0, 1, np.nan, 0, 1, np.nan],
index=[0, 1, 2, 0, 1, 2])
assert_series_equal(result, expected)
# GH 4548
# inplace updating not working with dups
comb[comb < 1] = 5
expected = Series([5, 1, 2, 5, 1, 2], index=[0, 1, 2, 0, 1, 2])
assert_series_equal(comb, expected)
comb[comb < 2] += 10
expected = Series([5, 11, 2, 5, 11, 2], index=[0, 1, 2, 0, 1, 2])
assert_series_equal(comb, expected)
def test_where_datetime(self):
s = Series(date_range('20130102', periods=2))
expected = Series([10, 10], dtype='datetime64[ns]')
mask = np.array([False, False])
rs = s.where(mask, [10, 10])
assert_series_equal(rs, expected)
rs = s.where(mask, 10)
assert_series_equal(rs, expected)
rs = s.where(mask, 10.0)
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, 10.0])
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, np.nan])
expected = Series([10, None], dtype='datetime64[ns]')
assert_series_equal(rs, expected)
def test_where_timedelta(self):
s = Series([1, 2], dtype='timedelta64[ns]')
expected = Series([10, 10], dtype='timedelta64[ns]')
mask = np.array([False, False])
rs = s.where(mask, [10, 10])
assert_series_equal(rs, expected)
rs = s.where(mask, 10)
assert_series_equal(rs, expected)
rs = s.where(mask, 10.0)
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, 10.0])
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, np.nan])
expected = Series([10, None], dtype='timedelta64[ns]')
assert_series_equal(rs, expected)
def test_mask(self):
# compare with tested results in test_where
s = Series(np.random.randn(5))
cond = s > 0
rs = s.where(~cond, np.nan)
assert_series_equal(rs, s.mask(cond))
rs = s.where(~cond)
rs2 = s.mask(cond)
assert_series_equal(rs, rs2)
rs = s.where(~cond, -s)
rs2 = s.mask(cond, -s)
assert_series_equal(rs, rs2)
cond = Series([True, False, False, True, False], index=s.index)
s2 = -(s.abs())
rs = s2.where(~cond[:3])
rs2 = s2.mask(cond[:3])
assert_series_equal(rs, rs2)
rs = s2.where(~cond[:3], -s2)
rs2 = s2.mask(cond[:3], -s2)
assert_series_equal(rs, rs2)
self.assertRaises(ValueError, s.mask, 1)
self.assertRaises(ValueError, s.mask, cond[:3].values, -s)
# dtype changes
s = Series([1, 2, 3, 4])
result = s.mask(s > 2, np.nan)
expected = Series([1, 2, np.nan, np.nan])
assert_series_equal(result, expected)
def test_mask_broadcast(self):
# GH 8801
# copied from test_where_broadcast
for size in range(2, 6):
for selection in [
# First element should be set
np.resize([True, False, False, False, False], size),
# Set alternating elements]
np.resize([True, False], size),
# No element should be set
np.resize([False], size)]:
for item in [2.0, np.nan, np.finfo(np.float).max,
np.finfo(np.float).min]:
for arr in [np.array([item]), [item], (item, )]:
data = np.arange(size, dtype=float)
s = Series(data)
result = s.mask(selection, arr)
expected = Series([item if use_item else data[
i] for i, use_item in enumerate(selection)])
assert_series_equal(result, expected)
def test_mask_inplace(self):
s = Series(np.random.randn(5))
cond = s > 0
rs = s.copy()
rs.mask(cond, inplace=True)
assert_series_equal(rs.dropna(), s[~cond])
assert_series_equal(rs, s.mask(cond))
rs = s.copy()
rs.mask(cond, -s, inplace=True)
assert_series_equal(rs, s.mask(cond, -s))
def test_ix_setitem(self):
inds = self.series.index[[3, 4, 7]]
result = self.series.copy()
result.ix[inds] = 5
expected = self.series.copy()
expected[[3, 4, 7]] = 5
assert_series_equal(result, expected)
result.ix[5:10] = 10
expected[5:10] = 10
assert_series_equal(result, expected)
# set slice with indices
d1, d2 = self.series.index[[5, 15]]
result.ix[d1:d2] = 6
expected[5:16] = 6 # because it's inclusive
assert_series_equal(result, expected)
# set index value
self.series.ix[d1] = 4
self.series.ix[d2] = 6
self.assertEqual(self.series[d1], 4)
self.assertEqual(self.series[d2], 6)
def test_where_numeric_with_string(self):
# GH 9280
s = pd.Series([1, 2, 3])
w = s.where(s > 1, 'X')
self.assertFalse(is_integer(w[0]))
self.assertTrue(is_integer(w[1]))
self.assertTrue(is_integer(w[2]))
self.assertTrue(isinstance(w[0], str))
self.assertTrue(w.dtype == 'object')
w = s.where(s > 1, ['X', 'Y', 'Z'])
self.assertFalse(is_integer(w[0]))
self.assertTrue(is_integer(w[1]))
self.assertTrue(is_integer(w[2]))
self.assertTrue(isinstance(w[0], str))
self.assertTrue(w.dtype == 'object')
w = s.where(s > 1, np.array(['X', 'Y', 'Z']))
self.assertFalse(is_integer(w[0]))
self.assertTrue(is_integer(w[1]))
self.assertTrue(is_integer(w[2]))
self.assertTrue(isinstance(w[0], str))
self.assertTrue(w.dtype == 'object')
def test_setitem_boolean(self):
mask = self.series > self.series.median()
# similiar indexed series
result = self.series.copy()
result[mask] = self.series * 2
expected = self.series * 2
assert_series_equal(result[mask], expected[mask])
# needs alignment
result = self.series.copy()
result[mask] = (self.series * 2)[0:5]
expected = (self.series * 2)[0:5].reindex_like(self.series)
expected[-mask] = self.series[mask]
assert_series_equal(result[mask], expected[mask])
def test_ix_setitem_boolean(self):
mask = self.series > self.series.median()
result = self.series.copy()
result.ix[mask] = 0
expected = self.series
expected[mask] = 0
assert_series_equal(result, expected)
def test_ix_setitem_corner(self):
inds = list(self.series.index[[5, 8, 12]])
self.series.ix[inds] = 5
self.assertRaises(Exception, self.series.ix.__setitem__,
inds + ['foo'], 5)
def test_get_set_boolean_different_order(self):
ordered = self.series.sort_values()
# setting
copy = self.series.copy()
copy[ordered > 0] = 0
expected = self.series.copy()
expected[expected > 0] = 0
assert_series_equal(copy, expected)
# getting
sel = self.series[ordered > 0]
exp = self.series[self.series > 0]
assert_series_equal(sel, exp)
def test_setitem_na(self):
# these induce dtype changes
expected = Series([np.nan, 3, np.nan, 5, np.nan, 7, np.nan, 9, np.nan])
s = Series([2, 3, 4, 5, 6, 7, 8, 9, 10])
s[::2] = np.nan
assert_series_equal(s, expected)
# get's coerced to float, right?
expected = Series([np.nan, 1, np.nan, 0])
s = Series([True, True, False, False])
s[::2] = np.nan
assert_series_equal(s, expected)
expected = Series([np.nan, np.nan, np.nan, np.nan, np.nan, 5, 6, 7, 8,
9])
s = Series(np.arange(10))
s[:5] = np.nan
assert_series_equal(s, expected)
def test_basic_indexing(self):
s = Series(np.random.randn(5), index=['a', 'b', 'a', 'a', 'b'])
self.assertRaises(IndexError, s.__getitem__, 5)
self.assertRaises(IndexError, s.__setitem__, 5, 0)
self.assertRaises(KeyError, s.__getitem__, 'c')
s = s.sort_index()
self.assertRaises(IndexError, s.__getitem__, 5)
self.assertRaises(IndexError, s.__setitem__, 5, 0)
def test_int_indexing(self):
s = Series(np.random.randn(6), index=[0, 0, 1, 1, 2, 2])
self.assertRaises(KeyError, s.__getitem__, 5)
self.assertRaises(KeyError, s.__getitem__, 'c')
# not monotonic
s = Series(np.random.randn(6), index=[2, 2, 0, 0, 1, 1])
self.assertRaises(KeyError, s.__getitem__, 5)
self.assertRaises(KeyError, s.__getitem__, 'c')
def test_datetime_indexing(self):
from pandas import date_range
index = date_range('1/1/2000', '1/7/2000')
index = index.repeat(3)
s = Series(len(index), index=index)
stamp = Timestamp('1/8/2000')
self.assertRaises(KeyError, s.__getitem__, stamp)
s[stamp] = 0
self.assertEqual(s[stamp], 0)
# not monotonic
s = Series(len(index), index=index)
s = s[::-1]
self.assertRaises(KeyError, s.__getitem__, stamp)
s[stamp] = 0
self.assertEqual(s[stamp], 0)
def test_timedelta_assignment(self):
# GH 8209
s = Series([])
s.loc['B'] = timedelta(1)
tm.assert_series_equal(s, Series(Timedelta('1 days'), index=['B']))
s = s.reindex(s.index.insert(0, 'A'))
tm.assert_series_equal(s, Series(
[np.nan, Timedelta('1 days')], index=['A', 'B']))
result = s.fillna(timedelta(1))
expected = Series(Timedelta('1 days'), index=['A', 'B'])
tm.assert_series_equal(result, expected)
s.loc['A'] = timedelta(1)
tm.assert_series_equal(s, expected)
# GH 14155
s = Series(10 * [np.timedelta64(10, 'm')])
s.loc[[1, 2, 3]] = np.timedelta64(20, 'm')
expected = pd.Series(10 * [np.timedelta64(10, 'm')])
expected.loc[[1, 2, 3]] = pd.Timedelta(np.timedelta64(20, 'm'))
tm.assert_series_equal(s, expected)
def test_underlying_data_conversion(self):
# GH 4080
df = DataFrame(dict((c, [1, 2, 3]) for c in ['a', 'b', 'c']))
df.set_index(['a', 'b', 'c'], inplace=True)
s = Series([1], index=[(2, 2, 2)])
df['val'] = 0
df
df['val'].update(s)
expected = DataFrame(
dict(a=[1, 2, 3], b=[1, 2, 3], c=[1, 2, 3], val=[0, 1, 0]))
expected.set_index(['a', 'b', 'c'], inplace=True)
tm.assert_frame_equal(df, expected)
# GH 3970
# these are chained assignments as well
pd.set_option('chained_assignment', None)
df = DataFrame({"aa": range(5), "bb": [2.2] * 5})
df["cc"] = 0.0
ck = [True] * len(df)
df["bb"].iloc[0] = .13
# TODO: unused
df_tmp = df.iloc[ck] # noqa
df["bb"].iloc[0] = .15
self.assertEqual(df['bb'].iloc[0], 0.15)
pd.set_option('chained_assignment', 'raise')
# GH 3217
df = DataFrame(dict(a=[1, 3], b=[np.nan, 2]))
df['c'] = np.nan
df['c'].update(pd.Series(['foo'], index=[0]))
expected = DataFrame(dict(a=[1, 3], b=[np.nan, 2], c=['foo', np.nan]))
tm.assert_frame_equal(df, expected)
def test_preserveRefs(self):
seq = self.ts[[5, 10, 15]]
seq[1] = np.NaN
self.assertFalse(np.isnan(self.ts[10]))
def test_drop(self):
# unique
s = Series([1, 2], index=['one', 'two'])
expected = Series([1], index=['one'])
result = s.drop(['two'])
assert_series_equal(result, expected)
result = s.drop('two', axis='rows')
assert_series_equal(result, expected)
# non-unique
# GH 5248
s = Series([1, 1, 2], index=['one', 'two', 'one'])
expected = Series([1, 2], index=['one', 'one'])
result = s.drop(['two'], axis=0)
assert_series_equal(result, expected)
result = s.drop('two')
assert_series_equal(result, expected)
expected = Series([1], index=['two'])
result = s.drop(['one'])
assert_series_equal(result, expected)
result = s.drop('one')
assert_series_equal(result, expected)
# single string/tuple-like
s = Series(range(3), index=list('abc'))
self.assertRaises(ValueError, s.drop, 'bc')
self.assertRaises(ValueError, s.drop, ('a', ))
# errors='ignore'
s = Series(range(3), index=list('abc'))
result = s.drop('bc', errors='ignore')
assert_series_equal(result, s)
result = s.drop(['a', 'd'], errors='ignore')
expected = s.ix[1:]
assert_series_equal(result, expected)
# bad axis
self.assertRaises(ValueError, s.drop, 'one', axis='columns')
# GH 8522
s = Series([2, 3], index=[True, False])
self.assertTrue(s.index.is_object())
result = s.drop(True)
expected = Series([3], index=[False])
assert_series_equal(result, expected)
def test_align(self):
def _check_align(a, b, how='left', fill=None):
aa, ab = a.align(b, join=how, fill_value=fill)
join_index = a.index.join(b.index, how=how)
if fill is not None:
diff_a = aa.index.difference(join_index)
diff_b = ab.index.difference(join_index)
if len(diff_a) > 0:
self.assertTrue((aa.reindex(diff_a) == fill).all())
if len(diff_b) > 0:
self.assertTrue((ab.reindex(diff_b) == fill).all())
ea = a.reindex(join_index)
eb = b.reindex(join_index)
if fill is not None:
ea = ea.fillna(fill)
eb = eb.fillna(fill)
assert_series_equal(aa, ea)
assert_series_equal(ab, eb)
self.assertEqual(aa.name, 'ts')
self.assertEqual(ea.name, 'ts')
self.assertEqual(ab.name, 'ts')
self.assertEqual(eb.name, 'ts')
for kind in JOIN_TYPES:
_check_align(self.ts[2:], self.ts[:-5], how=kind)
_check_align(self.ts[2:], self.ts[:-5], how=kind, fill=-1)
# empty left
_check_align(self.ts[:0], self.ts[:-5], how=kind)
_check_align(self.ts[:0], self.ts[:-5], how=kind, fill=-1)
# empty right
_check_align(self.ts[:-5], self.ts[:0], how=kind)
_check_align(self.ts[:-5], self.ts[:0], how=kind, fill=-1)
# both empty
_check_align(self.ts[:0], self.ts[:0], how=kind)
_check_align(self.ts[:0], self.ts[:0], how=kind, fill=-1)
def test_align_fill_method(self):
def _check_align(a, b, how='left', method='pad', limit=None):
aa, ab = a.align(b, join=how, method=method, limit=limit)
join_index = a.index.join(b.index, how=how)
ea = a.reindex(join_index)
eb = b.reindex(join_index)
ea = ea.fillna(method=method, limit=limit)
eb = eb.fillna(method=method, limit=limit)
assert_series_equal(aa, ea)
assert_series_equal(ab, eb)
for kind in JOIN_TYPES:
for meth in ['pad', 'bfill']:
_check_align(self.ts[2:], self.ts[:-5], how=kind, method=meth)
_check_align(self.ts[2:], self.ts[:-5], how=kind, method=meth,
limit=1)
# empty left
_check_align(self.ts[:0], self.ts[:-5], how=kind, method=meth)
_check_align(self.ts[:0], self.ts[:-5], how=kind, method=meth,
limit=1)
# empty right
_check_align(self.ts[:-5], self.ts[:0], how=kind, method=meth)
_check_align(self.ts[:-5], self.ts[:0], how=kind, method=meth,
limit=1)
# both empty
_check_align(self.ts[:0], self.ts[:0], how=kind, method=meth)
_check_align(self.ts[:0], self.ts[:0], how=kind, method=meth,
limit=1)
def test_align_nocopy(self):
b = self.ts[:5].copy()
# do copy
a = self.ts.copy()
ra, _ = a.align(b, join='left')
ra[:5] = 5
self.assertFalse((a[:5] == 5).any())
# do not copy
a = self.ts.copy()
ra, _ = a.align(b, join='left', copy=False)
ra[:5] = 5
self.assertTrue((a[:5] == 5).all())
# do copy
a = self.ts.copy()
b = self.ts[:5].copy()
_, rb = a.align(b, join='right')
rb[:3] = 5
self.assertFalse((b[:3] == 5).any())
# do not copy
a = self.ts.copy()
b = self.ts[:5].copy()
_, rb = a.align(b, join='right', copy=False)
rb[:2] = 5
self.assertTrue((b[:2] == 5).all())
def test_align_sameindex(self):
a, b = self.ts.align(self.ts, copy=False)
self.assertIs(a.index, self.ts.index)
self.assertIs(b.index, self.ts.index)
# a, b = self.ts.align(self.ts, copy=True)
# self.assertIsNot(a.index, self.ts.index)
# self.assertIsNot(b.index, self.ts.index)
def test_align_multiindex(self):
# GH 10665
midx = pd.MultiIndex.from_product([range(2), range(3), range(2)],
names=('a', 'b', 'c'))
idx = pd.Index(range(2), name='b')
s1 = pd.Series(np.arange(12, dtype='int64'), index=midx)
s2 = pd.Series(np.arange(2, dtype='int64'), index=idx)
# these must be the same results (but flipped)
res1l, res1r = s1.align(s2, join='left')
res2l, res2r = s2.align(s1, join='right')
expl = s1
tm.assert_series_equal(expl, res1l)
tm.assert_series_equal(expl, res2r)
expr = pd.Series([0, 0, 1, 1, np.nan, np.nan] * 2, index=midx)
tm.assert_series_equal(expr, res1r)
tm.assert_series_equal(expr, res2l)
res1l, res1r = s1.align(s2, join='right')
res2l, res2r = s2.align(s1, join='left')
exp_idx = pd.MultiIndex.from_product([range(2), range(2), range(2)],
names=('a', 'b', 'c'))
expl = pd.Series([0, 1, 2, 3, 6, 7, 8, 9], index=exp_idx)
tm.assert_series_equal(expl, res1l)
tm.assert_series_equal(expl, res2r)
expr = pd.Series([0, 0, 1, 1] * 2, index=exp_idx)
tm.assert_series_equal(expr, res1r)
tm.assert_series_equal(expr, res2l)
def test_reindex(self):
identity = self.series.reindex(self.series.index)
# __array_interface__ is not defined for older numpies
# and on some pythons
try:
self.assertTrue(np.may_share_memory(self.series.index,
identity.index))
except (AttributeError):
pass
self.assertTrue(identity.index.is_(self.series.index))
self.assertTrue(identity.index.identical(self.series.index))
subIndex = self.series.index[10:20]
subSeries = self.series.reindex(subIndex)
for idx, val in compat.iteritems(subSeries):
self.assertEqual(val, self.series[idx])
subIndex2 = self.ts.index[10:20]
subTS = self.ts.reindex(subIndex2)
for idx, val in compat.iteritems(subTS):
self.assertEqual(val, self.ts[idx])
stuffSeries = self.ts.reindex(subIndex)
self.assertTrue(np.isnan(stuffSeries).all())
# This is extremely important for the Cython code to not screw up
nonContigIndex = self.ts.index[::2]
subNonContig = self.ts.reindex(nonContigIndex)
for idx, val in compat.iteritems(subNonContig):
self.assertEqual(val, self.ts[idx])
# return a copy the same index here
result = self.ts.reindex()
self.assertFalse((result is self.ts))
def test_reindex_nan(self):
ts = Series([2, 3, 5, 7], index=[1, 4, nan, 8])
i, j = [nan, 1, nan, 8, 4, nan], [2, 0, 2, 3, 1, 2]
assert_series_equal(ts.reindex(i), ts.iloc[j])
ts.index = ts.index.astype('object')
# reindex coerces index.dtype to float, loc/iloc doesn't
assert_series_equal(ts.reindex(i), ts.iloc[j], check_index_type=False)
def test_reindex_corner(self):
# (don't forget to fix this) I think it's fixed
self.empty.reindex(self.ts.index, method='pad') # it works
# corner case: pad empty series
reindexed = self.empty.reindex(self.ts.index, method='pad')
# pass non-Index
reindexed = self.ts.reindex(list(self.ts.index))
assert_series_equal(self.ts, reindexed)
# bad fill method
ts = self.ts[::2]
self.assertRaises(Exception, ts.reindex, self.ts.index, method='foo')
def test_reindex_pad(self):
s = Series(np.arange(10), dtype='int64')
s2 = s[::2]
reindexed = s2.reindex(s.index, method='pad')
reindexed2 = s2.reindex(s.index, method='ffill')
assert_series_equal(reindexed, reindexed2)
expected = Series([0, 0, 2, 2, 4, 4, 6, 6, 8, 8], index=np.arange(10))
assert_series_equal(reindexed, expected)
# GH4604
s = Series([1, 2, 3, 4, 5], index=['a', 'b', 'c', 'd', 'e'])
new_index = ['a', 'g', 'c', 'f']
expected = Series([1, 1, 3, 3], index=new_index)
# this changes dtype because the ffill happens after
result = s.reindex(new_index).ffill()
assert_series_equal(result, expected.astype('float64'))
result = s.reindex(new_index).ffill(downcast='infer')
assert_series_equal(result, expected)
expected = Series([1, 5, 3, 5], index=new_index)
result = s.reindex(new_index, method='ffill')
assert_series_equal(result, expected)
# inferrence of new dtype
s = Series([True, False, False, True], index=list('abcd'))
new_index = 'agc'
result = s.reindex(list(new_index)).ffill()
expected = Series([True, True, False], index=list(new_index))
assert_series_equal(result, expected)
# GH4618 shifted series downcasting
s = Series(False, index=lrange(0, 5))
result = s.shift(1).fillna(method='bfill')
expected = Series(False, index=lrange(0, 5))
assert_series_equal(result, expected)
def test_reindex_nearest(self):
s = Series(np.arange(10, dtype='int64'))
target = [0.1, 0.9, 1.5, 2.0]
actual = s.reindex(target, method='nearest')
expected = Series(np.around(target).astype('int64'), target)
assert_series_equal(expected, actual)
actual = s.reindex_like(actual, method='nearest')
assert_series_equal(expected, actual)
actual = s.reindex_like(actual, method='nearest', tolerance=1)
| assert_series_equal(expected, actual) | pandas.util.testing.assert_series_equal |
#!/usr/bin/env python
# coding=utf-8
from __future__ import absolute_import
from __future__ import print_function
import unittest
import pytest
import numpy as np
import pandas as pd
from plaidcloud.utilities import frame_manager
from plaidcloud.utilities.frame_manager import coalesce
__author__ = "<NAME>"
__copyright__ = "© Copyright 2009-2014, Tartan Solutions, Inc"
__credits__ = ["<NAME>", "<NAME>"]
__license__ = "Apache 2.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
nan = np.nan
# Test to see that 2 data frames are equal
# http://stackoverflow.com/questions/14224172/equality-in-pandas-dataframes-column-order-matters
def assertFrameEqual(df1, df2, **kwargs):
""" Assert that two dataframes are equal, ignoring ordering of columns
Args:
df1 (`pandas.DataFrame`): The DataFrame to compare against `df2`
df2 (`pandas.DataFrame`): The DataFrame to compare against `df1`
**kwargs (dict): A dict to pass to `pandas.util.testing.assert_frame_equal`
"""
from pandas.util.testing import assert_frame_equal
return assert_frame_equal(df1, df2, check_names=True, check_like=True, **kwargs)
class TestFrameManager(unittest.TestCase):
"""These tests validate the data model methods"""
def setUp(self):
"Constructs a test environment if necessary"
self.df = frame_manager.pd.DataFrame([('Andrew', 31, 500), ('Optimus', 30, 1000), ('Iron Man', 51, 1250), ('Batman', 75, 50), ('Andrew', 31, 2500)], columns=['Name', 'Age', 'Points'])
# duplicate
self.df2 = frame_manager.pd.DataFrame([('Andrew', 31, 500), ('Optimus', 30, 1000), ('Iron Man', 51, 1250), ('Batman', 75, 50), ('Andrew', 31, 2500)], columns=['Name', 'Age', 'Points'])
self.df9 = frame_manager.pd.DataFrame([('Andrew', 31, 5), ('Optimus', 30, 10), ('Iron Man', 51, 12), ('Batman', 75, 11)], columns=['Name', 'age', 'Level'])
# Deadpool is villain aged 23... not listed
self.df3 = frame_manager.pd.DataFrame([(30, 'Autobot'), (51, 'Superhero'), (75, 'Superhero'), (23, 'Villain')], columns=['Age', 'Title'])
self.df_blank = frame_manager.pd.DataFrame()
self.df_mon_val = frame_manager.pd.DataFrame([('Jan', 5), ('Feb', 10), ('Mar', 15), ('Jan', 20), ('Feb', 25), ('Mar', 30)], columns = ['mon', 'val'])
self.df6 = frame_manager.pd.DataFrame([(30, 'Autobot', 2354, 0), (30, 'Decepticon', 18, 0), (51, 'Superhero', 234, 0), (75, 'Superhero', 897, 0), (23, 'Villain', 46546, 0)], columns=['Age', 'Title', 'DropMe', 'Points'])
# def test_get_frame_model_path(self):
# pass
# def test_get_frame_zone_path(self):
# pass
# def test_load_frame(self):
# pass
# def test_load_frame_meta(self):
# pass
# def test_clear_frame(self):
# pass
# def test_clear_zone_frame(self):
# pass
# def test_load_zone_frame(self):
# pass
# def test_load_zone_frame_meta(self):
# pass
# def test_save_frame(self):
# pass
# def test_get_tmp_frame_path(self):
# pass
# def test_compress_frame(self):
# pass
# def test_uncompress_frame(self):
# pass
# def test_append_frame(self):
# #x = frame_manager.append_frame(
# pass
def test_describe(self):
"""Tests to verify descriptive statistics about data frame"""
x = frame_manager.describe(self.df)
self.assertEqual(x['Age']['max'], max(self.df['Age']))
self.assertEqual(x['Points']['min'], min(self.df['Points']))
self.assertEqual(x['Age']['mean'], np.mean(self.df['Age']))
self.assertEqual(x['Points']['mean'], np.mean(self.df['Points']))
def test_count_unique(self):
"""Tests to verify count of distinct records in data frame"""
x = frame_manager.count_unique('Name', 'Points', self.df)
y = self.df.groupby('Name').count()['Age']['Andrew']
z = self.df.groupby('Name').count()['Age']['Iron Man']
self.assertEqual(x['Andrew'], y)
self.assertEqual(x['Iron Man'], z)
def test_sum(self):
"""Tests to verify sum of records in data frame"""
x = frame_manager.sum('Name', self.df)
y = self.df.groupby('Name').sum()
self.assertEqual(x['Points']['Andrew'], y['Points']['Andrew'])
self.assertEqual(x['Age']['Batman'], y['Age']['Batman'])
def test_std(self):
"""Tests to verify standard deviation of records in data frame"""
x = frame_manager.std('mon', self.df_mon_val)
y = self.df_mon_val.groupby('mon').std()
assertFrameEqual(x, y)
def test_mean(self):
"""Tests to verify mean of records in data frame"""
x = frame_manager.mean('Name', self.df)
y = self.df.groupby(['Name']).mean()
self.assertEqual(x['Points'][1], y['Points'][1])
def test_count(self):
"""Tests to verify count of records in data frame"""
x = frame_manager.count('Name', self.df)
y = self.df.groupby('Name').count()
self.assertEqual(x['Points'][1], y['Points'][1])
def test_inner_join(self):
"""Tests to verify inner join capability"""
x = frame_manager.inner_join(self.df, self.df3, ['Age'])
y = frame_manager.pd.merge(self.df, self.df3, 'inner', ['Age'])
assertFrameEqual(x, y)
def test_outer_join(self):
"""Tests to verify outer join capability"""
x = frame_manager.outer_join(self.df, self.df3, ['Age'])
y = frame_manager.pd.merge(self.df, self.df3, 'outer', ['Age'])
assertFrameEqual(x, y)
def test_left_join(self):
"""Tests to verify left join capability"""
x = frame_manager.left_join(self.df, self.df3, ['Age'])
y = frame_manager.pd.merge(self.df, self.df3, 'left', ['Age'])
assertFrameEqual(x, y)
def test_right_join(self):
"""Tests to verify right join capability"""
x = frame_manager.right_join(self.df, self.df3, ['Age'])
y = frame_manager.pd.merge(self.df, self.df3, 'right', ['Age'])
assertFrameEqual(x, y)
# def test_memoize(self):
# pass
# def test_geo_distance(self):
# pass
# def test_geo_location(self):
# pass
# def test_trailing_negative(self):
# pass
def test_now(self):
"""Tests to verify current time"""
x = frame_manager.now()
y = frame_manager.utc.timestamp()
self.assertEqual(x, y)
# def test_concat(self):
# df2 = self.df
# x = frame_manager.concat([self.df, df2], [self.df])
# print x
# def test_covariance(self):
# pass
# def test_correlation(self):
# pass
# def test_apply_agg(self):
# pass
# def test_distinct(self):
# pass
# def test_find_duplicates(self):
# pass
# def test_sort(self):
# pass
# def test_replace_column(self):
# pass
def test_replace(self):
"""Tests to verify replacement using dictionary key/value combinations"""
replace_dict = {'Optimus': 'Optimus Prime', 50: 5000}
x = frame_manager.replace(self.df, replace_dict)
y = self.df.replace(replace_dict)
assertFrameEqual(x, y)
# def test_reindex(self):
# pass
def test_rename_columns(self):
"""Tests to verify renamed columns using dictionary key/value combinations"""
rename_dict = {'Name': 'Title', 'Points': 'Salary'}
x = frame_manager.rename_columns(self.df, rename_dict)
y = self.df.rename(columns=rename_dict)
assertFrameEqual(x, y)
# def test_column_info(self):
# pass
@pytest.mark.skip('Dtypes seem to be wrong, should be passing sql types?')
def test_set_column_types(self):
"""Tests to verify data type conversion for columns"""
type_dict = {'Name': 's32', 'Points': 'float16', 'Age': 'int8'}
self.assertNotEqual('int8', self.df['Age'].dtypes)
self.assertNotEqual('float16', self.df['Points'].dtypes)
x = frame_manager.set_column_types(self.df, type_dict)
self.assertEqual('float32', x['Points'].dtypes)
self.assertEqual('int64', x['Age'].dtypes)
self.assertEqual('object', x['Name'].dtypes)
def test_drop_column(self):
"""Tests to verify columns dropped appropriately"""
x = frame_manager.drop_column(self.df, ['Age'])
y = self.df2
del y['Age']
assertFrameEqual(x, y)
def test_has_data(self):
"""Tests to verify a data frame does/doesn't have data"""
x = frame_manager.has_data(self.df_blank)
y = frame_manager.has_data(self.df)
self.assertFalse(x)
self.assertTrue(y)
# def test_in_column(self):
# pass
# def test_frame_source_reduce(self):
# """Tests to verify that data is filtered as expected (aka SQL Where)"""
# x = frame_manager.frame_source_reduce(self.df)
# assertFrameEqual(x, self.df2)
# def test_apply_variables(self):
# pass
# def test_frame_map_update(self):
# pass
# def test_get_entity_frame(self):
# pass
# def test_save_entity_frame(self):
# pass
def test_lookup(self):
"""Tests to verify lookup capability"""
# x = frame_manager.lookup(self.df, self.df6, ['Age'], None, ['Age', 'Title'])
orig_lookup = self.df6.copy()
w = frame_manager.lookup(self.df, self.df9, left_on=['Name', 'Age'], right_on=['Name', 'age'])
print(w)
x = frame_manager.lookup(self.df, self.df6, ['Age'])
y = frame_manager.distinct(self.df6, ['Age'])
z = frame_manager.left_join(self.df, y, ['Age'])
print(x)
print(z)
assertFrameEqual(x, z)
# ensure lookup frame integrity
assertFrameEqual(orig_lookup, self.df6)
def tearDown(self):
"Clean up any test structure or records generated during the testing"
del self.df
del self.df2
del self.df_blank
del self.df_mon_val
del self.df6
class TestCoalesce(unittest.TestCase):
def setUp(self):
self.reference_data = {
'A': [nan, 'aa', nan, nan, nan],
'B': ['b', 'bb', None, nan, 'bbbbb'],
'C': ['c', 'cc', 'ccc', 'cccc', 'ccccc'],
'D': ['d', '', nan, nan, nan],
'E': ['e', 'ee', nan, None, 7],
'one': [1, nan, nan, nan, nan], # float64
'two': [2, 2, 2.2, nan, 0], # float64
'three': [nan, nan, nan, 3, 3]
}
def test_string_columns(self):
"""Test the basic case with strings."""
df = pd.DataFrame(data=self.reference_data)
# Two columns
result = coalesce(df['A'], df['C'])
self.assertTrue(
(result == pd.Series(['c', 'aa', 'ccc', 'cccc', 'ccccc']))
.all()
)
# Three columns
result = coalesce(df['A'], df['D'], df['C'])
self.assertTrue(
(result == pd.Series(['d', 'aa', 'ccc', 'cccc', 'ccccc']))
.all()
)
# None is equivalent to NaN
result = coalesce(df['B'], df['C'])
self.assertTrue(
(result == pd.Series(['b', 'bb', 'ccc', 'cccc', 'bbbbb']))
.all()
)
def test_one_column(self):
"""Test that using one column is a no-op, returning no changes."""
df = pd.DataFrame(data=self.reference_data)
for c in df.columns:
col = df.loc[:, c]
result = coalesce(col)
self.assertTrue((result.fillna('nan') == col.fillna('nan')).all())
self.assertTrue((result.index == col.index).all())
def test_value_preservation(self):
"""Make sure valid values aren't overwritten by nulls."""
df = pd.DataFrame(data=self.reference_data)
result = coalesce(df['C'], df['A'])
self.assertTrue((result == df['C']).all())
def test_numeric_columns(self):
"""Test the basic case with numbers."""
df = pd.DataFrame(data=self.reference_data)
# Two columns
result = coalesce(df['one'], df['two'])
result = result.fillna('nan')
self.assertTrue(
(result == pd.Series([1., 2., 2.2, 'nan', 0.]))
.all()
)
# Three columns
result = coalesce(df['one'], df['two'], df['three'])
self.assertTrue(
(result == pd.Series([1., 2., 2.2, 3., 0.]))
.all()
)
def test_index_mismatch(self):
"""Indexes can be different as long as they're the same length.
The returned Series will have an index matching the first column's."""
df = pd.DataFrame(data=self.reference_data)
# Same-length columns with mismatched indexes compare just fine.
a = df.loc[:, 'A']
a.index = test_index = ['v', 'w', 'x', 'y', 'z']
result = coalesce(a, df['C'])
self.assertTrue(
(result.index == test_index)
.all()
)
self.assertTrue(
(result.index != df['C'].index)
.all()
)
self.assertTrue(
(result.values == pd.Series(['c', 'aa', 'ccc', 'cccc', 'ccccc']).values)
.all()
)
# Columns must be the same length, however.
too_short = | pd.Series(['foo', 'bar']) | pandas.Series |
from __future__ import division
from pdfminer.layout import LAParams
import pandas as pd
from pdfminer.pdfpage import PDFTextExtractionNotAllowed
from pdfminer.pdfinterp import PDFResourceManager
from pdfminer.pdfinterp import PDFPageInterpreter
from pdfminer.layout import LAParams
from pdfminer.converter import PDFPageAggregator
import pdfminer
from pdfminer.pdfdevice import PDFDevice
from sklearn.cluster import KMeans
from os.path import basename
import re
from pdfminer.pdfparser import PDFParser
from pdfminer.pdfdocument import PDFDocument
from pdfminer.pdfpage import PDFPage
from pdfminer.pdfinterp import resolve1
import os
import csv
def helper_anomaly(param):
pass
def auto_table_extract(example_file):
all_tables = list()
#example_file = r"C:\Users\divesh.kubal\Downloads\H& B PDF\H& B PDF\16154.pdf"
#my_pass = '<PASSWORD>'
file = open(example_file, 'rb')
parser = PDFParser(file)
#document = PDFDocument(parser,password=<PASSWORD>)
document = PDFDocument(parser)
total_pages = resolve1(document.catalog['Pages'])['Count']
# print('page numbers: ', total_pages)
total_pages = resolve1(document.catalog['Pages'])['Count']
base_filename = basename(example_file)
bs= base_filename
#page_number1 = int(input('Enter Page Number: '))
#page_number = page_number1 - 1
#base_filename = base_filename.replace('.pdf','') + '_pg_' + str(page_number1)
f = open('math_log.txt', 'a', encoding='utf-8')
number_of_clusters_list = []
for page_number in range(0,total_pages):
base_filename = base_filename.replace('.pdf', '') + '_pg_' + str(page_number)
class pdfPositionHandling:
xo = list()
yo = list()
text = list()
def parse_obj(self, lt_objs):
# loop over the object list
for obj in lt_objs:
if isinstance(obj, pdfminer.layout.LTTextLine):
pdfPositionHandling.xo.append(int(obj.bbox[0]))
pdfPositionHandling.yo.append(int(obj.bbox[1]))
pdfPositionHandling.text.append(str(obj.get_text()))
math_log = str(obj.bbox[0]) + ' ' + str(obj.bbox[1]) + ' ' + str(obj.get_text().replace('\n', '_'))
f.write(math_log + '\n')
# if it's a textbox, also recurse
if isinstance(obj, pdfminer.layout.LTTextBoxHorizontal):
self.parse_obj(obj._objs)
# if it's a container, recurse
elif isinstance(obj, pdfminer.layout.LTFigure):
self.parse_obj(obj._objs)
def parsepdf(self, filename, startpage, endpage):
# Open a PDF file.
fp = open(filename, 'rb')
# Create a PDF parser object associated with the file object.
parser = PDFParser(fp)
# Create a PDF document object that stores the document structure.
# Password for initialization as 2nd parameter
document = PDFDocument(parser)
# Check if the document allows text extraction. If not, abort.
if not document.is_extractable:
raise PDFTextExtractionNotAllowed
# Create a PDF resource manager object that stores shared resources.
rsrcmgr = PDFResourceManager()
# Create a PDF device object.
device = PDFDevice(rsrcmgr)
# BEGIN LAYOUT ANALYSIS
# Set parameters for analysis.
laparams = LAParams()
# Create a PDF page aggregator object.
device = PDFPageAggregator(rsrcmgr, laparams=laparams)
# Create a PDF interpreter object.
interpreter = PDFPageInterpreter(rsrcmgr, device)
i = 0
# loop over all pages in the document
for page in PDFPage.create_pages(document):
if i >= startpage and i <= endpage:
# read the page into a layout object
interpreter.process_page(page)
layout = device.get_result()
# extract text from this object
self.parse_obj(layout._objs)
i += 1
def table_without_border():
obj = pdfPositionHandling()
obj.parsepdf(r'input_pdf.pdf', 0, 0)
y0 = pdfPositionHandling.yo
x0 = pdfPositionHandling.xo
text = pdfPositionHandling.text
from collections import defaultdict
def list_duplicates(seq):
tally = defaultdict(list)
for i, item in enumerate(seq):
tally[item].append(i)
return ((key, locs) for key, locs in tally.items())
rep = list()
for each_elem in y0:
for each_elem2 in y0:
if (math.fabs(each_elem - each_elem2) == 1):
rep.append((each_elem, each_elem2))
for t in rep:
for n, i in enumerate(y0):
if i == t[0]:
y0[n] = t[1]
l = []
for dup in sorted(list_duplicates(y0), reverse=True):
l.append(dup)
table_df = pd.DataFrame([])
res_table = list()
final_table = list()
temp_text = ''
final_table2 = list()
for dup in sorted(list_duplicates(y0), reverse=True):
for each_dup in dup[1]:
text_append = str(text[each_dup]).replace('\n', '')
text_append = text_append
res_table.append(text_append)
final_table.append(res_table)
while ' ' in res_table:
res_table.remove(' ')
while ' ' in res_table:
res_table.remove(' ')
while ' ' in res_table:
res_table.remove(' ')
while '$' in res_table:
res_table.remove('$')
final_table2.append(res_table)
res_table = []
for each_row in final_table:
table_df = table_df.append(pd.Series(each_row), ignore_index=True)
s_xo = list(set(x0))
s_xo = sorted(s_xo)
for row in final_table2:
if len(row) == 1:
row.clear()
number_of_clusters = len(max(final_table2, key=len))
if number_of_clusters<18 and number_of_clusters>15:
number_of_clusters = 20
number_of_clusters_list.append(number_of_clusters)
# import math
if (int(math.fabs(number_of_clusters_list[0]-number_of_clusters))==1):
number_of_clusters = number_of_clusters_list[0]
#print(number_of_clusters)
import numpy as np
kmeans = KMeans(n_clusters=number_of_clusters)
arr = np.asarray(x0)
arr = arr.reshape(-1, 1)
kmeansoutput = kmeans.fit(arr)
centroids = kmeansoutput.cluster_centers_
new_centroids = list()
centroids = centroids.tolist()
for each_centroid in centroids:
each_centroid = int(each_centroid[0])
new_centroids.append(each_centroid)
new_centroids = sorted(new_centroids)
new_centroids = sorted(new_centroids)
#new_centroids = [21, 42, 80, 150, 199, 278, 339, 406, 433, 460, 515, 551]
#number_of_clusters = number_of_clusters+1
rep = list()
for each_elem in y0:
for each_elem2 in y0:
if (math.fabs(each_elem - each_elem2) < 6): #Minimum Distance for new Line
rep.append((each_elem, each_elem2))
for t in rep:
for n, i in enumerate(y0):
if i == t[0]:
y0[n] = t[1]
l2 = list()
table_df = pd.DataFrame([])
res_table = list()
final_table = list()
for i in range(0, number_of_clusters):
res_table.append(' ')
l2.append(' ')
for dup in sorted(list_duplicates(y0), reverse=True):
for each_dup in dup[1]:
text_append = str(text[each_dup]).replace('\n', '')
text_append = text_append.strip()
text_append = re.sub(' +',' ',text_append)
cluster = min(range(len(new_centroids)), key=lambda i: abs(new_centroids[i] - x0[each_dup]))
# print('clusterr: ', text_append, cluster)
# print ('res: ', res_table)
leading_sp = len(text_append) - len(text_append.lstrip())
if (leading_sp>5):
text_append = 'my_pdf_dummy' + ' '+text_append
text_append_split = text_append.split(' ')
text_append_split_res = []
for each_ss in text_append_split:
if each_ss!='':
each_ss = each_ss.replace('my_pdf_dummy',' ')
text_append_split_res.append(each_ss)
text_append = text_append.replace('my_pdf_dummy','')
# print('tsss: ', text_append_split_res)
if (res_table[cluster] != ' ' ):
# print ('tt: ', text_append)
# print ('tt: ', cluster)
app = str(res_table[cluster] + text_append)
res_table[cluster] = app
#elif(len(text_append_split_res)>1 and res_table[cluster] != ' '):
elif(len(text_append_split_res) > 1):
ap = cluster
for each_ss in text_append_split_res:
try:
res_table[ap]=each_ss
ap = ap+1
except:
res_table.insert(ap,each_ss)
ap = ap + 1
else:
res_table[cluster]=text_append
#res_table.insert(cluster, text_append)
for i in range(0, number_of_clusters):
res_table.append(' ')
if not all(' ' == s or s.isspace() for s in res_table):
final_table.append(res_table)
res_table = []
for i in range(0, number_of_clusters):
res_table.append(' ')
for each_row in final_table:
table_df = table_df.append( | pd.Series(each_row) | pandas.Series |
from copy import deepcopy
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pyfolio
from finrl.marketdata.yahoodownloader import YahooDownloader
from pyfolio import timeseries
def get_daily_return(df, value_col_name="account_value"):
df = deepcopy(df)
df["daily_return"] = df[value_col_name].pct_change(1)
df["date"] = pd.to_datetime(df["date"])
df.set_index("date", inplace=True, drop=True)
df.index = df.index.tz_localize("UTC")
return | pd.Series(df["daily_return"], index=df.index) | pandas.Series |
from django.shortcuts import render, redirect, get_object_or_404
from django.http import HttpResponse, JsonResponse
from django.contrib.auth.models import User, auth
from django.contrib.auth import get_user_model
from django.db.models import Sum, Q, F
from django.contrib import messages
from django.views.generic import FormView
from rest_framework.views import APIView
from rest_framework.response import Response
from sqlalchemy import create_engine
from .mixins import AjaxFormMixin
from .forms import Item_Form, Day_Form, New_Item_Form
from .models import Item, New_item
from . import connectpsql
from .filters import ItemFilters
import pandas as pd
import numpy as np
import datetime
import matplotlib.pyplot as plt
from numpy import exp, array, random, dot
from sklearn.preprocessing import MinMaxScaler, scale
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn.metrics import mean_squared_error
from keras.models import Sequential
from keras.layers import Dense, LSTM
from keras import models
from statsmodels.tsa.arima_model import ARIMA, ARMA
from statsmodels.tsa.vector_ar.var_model import VAR
from statsmodels.graphics.tsaplots import plot_pacf
from pmdarima.arima.utils import ndiffs
from TFANN import ANNR
from pandas.plotting import autocorrelation_plot
from flask import Flask, render_template
from django.views.decorators.csrf import csrf_exempt, csrf_protect
# point to CustomUser table
User = get_user_model()
# fill up the empty rows with zero
def insertZero(costList):
# daily total
dfDay = costList.cost.resample('D').sum()
today = datetime.datetime.today() #yyyy-mm-dd
last_date = dfDay.iloc[[-1]].index # find the last date of dfDay
# add zero until today
while last_date < today - datetime.timedelta(days=1):
last_date += datetime.timedelta(days=1) # add 1 day
new_row = pd.Series(data={" ": 0}, index=last_date) # create a new row
dfDay = dfDay.append(new_row, ignore_index=False) # insert into dfDay
dfDay = dfDay.replace(to_replace=np.nan, value=0)
return round(dfDay, 2)
# predicting
def processPrediction(dfDay, history, prediction_days):
last_date = dfDay.iloc[[-1]].index + datetime.timedelta(days=1)
## orginal days list
dfOrginal = | pd.DataFrame(columns=["date", "cost"]) | pandas.DataFrame |
import datetime
import numpy as np
import pytest
import pytz
import pandas as pd
from pandas import Timedelta, merge_asof, read_csv, to_datetime
import pandas._testing as tm
from pandas.core.reshape.merge import MergeError
class TestAsOfMerge:
def read_data(self, datapath, name, dedupe=False):
path = datapath("reshape", "merge", "data", name)
x = read_csv(path)
if dedupe:
x = x.drop_duplicates(["time", "ticker"], keep="last").reset_index(
drop=True
)
x.time = to_datetime(x.time)
return x
@pytest.fixture(autouse=True)
def setup_method(self, datapath):
self.trades = self.read_data(datapath, "trades.csv")
self.quotes = self.read_data(datapath, "quotes.csv", dedupe=True)
self.asof = self.read_data(datapath, "asof.csv")
self.tolerance = self.read_data(datapath, "tolerance.csv")
self.allow_exact_matches = self.read_data(datapath, "allow_exact_matches.csv")
self.allow_exact_matches_and_tolerance = self.read_data(
datapath, "allow_exact_matches_and_tolerance.csv"
)
def test_examples1(self):
""" doc-string examples """
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 6, 7], "right_val": [1, 2, 3, 6, 7]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [1, 3, 7]}
)
result = pd.merge_asof(left, right, on="a")
tm.assert_frame_equal(result, expected)
def test_examples2(self):
""" doc-string examples """
trades = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.038",
"20160525 13:30:00.048",
"20160525 13:30:00.048",
"20160525 13:30:00.048",
]
),
"ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
},
columns=["time", "ticker", "price", "quantity"],
)
quotes = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.030",
"20160525 13:30:00.041",
"20160525 13:30:00.048",
"20160525 13:30:00.049",
"20160525 13:30:00.072",
"20160525 13:30:00.075",
]
),
"ticker": [
"GOOG",
"MSFT",
"MSFT",
"MSFT",
"GOOG",
"AAPL",
"GOOG",
"MSFT",
],
"bid": [720.50, 51.95, 51.97, 51.99, 720.50, 97.99, 720.50, 52.01],
"ask": [720.93, 51.96, 51.98, 52.00, 720.93, 98.01, 720.88, 52.03],
},
columns=["time", "ticker", "bid", "ask"],
)
pd.merge_asof(trades, quotes, on="time", by="ticker")
pd.merge_asof(
trades, quotes, on="time", by="ticker", tolerance=pd.Timedelta("2ms")
)
expected = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.038",
"20160525 13:30:00.048",
"20160525 13:30:00.048",
"20160525 13:30:00.048",
]
),
"ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
"bid": [np.nan, 51.97, np.nan, np.nan, np.nan],
"ask": [np.nan, 51.98, np.nan, np.nan, np.nan],
},
columns=["time", "ticker", "price", "quantity", "bid", "ask"],
)
result = pd.merge_asof(
trades,
quotes,
on="time",
by="ticker",
tolerance=pd.Timedelta("10ms"),
allow_exact_matches=False,
)
tm.assert_frame_equal(result, expected)
def test_examples3(self):
""" doc-string examples """
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 6, 7], "right_val": [1, 2, 3, 6, 7]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [1, 6, np.nan]}
)
result = pd.merge_asof(left, right, on="a", direction="forward")
tm.assert_frame_equal(result, expected)
def test_examples4(self):
""" doc-string examples """
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 6, 7], "right_val": [1, 2, 3, 6, 7]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [1, 6, 7]}
)
result = pd.merge_asof(left, right, on="a", direction="nearest")
tm.assert_frame_equal(result, expected)
def test_basic(self):
expected = self.asof
trades = self.trades
quotes = self.quotes
result = merge_asof(trades, quotes, on="time", by="ticker")
tm.assert_frame_equal(result, expected)
def test_basic_categorical(self):
expected = self.asof
trades = self.trades.copy()
trades.ticker = trades.ticker.astype("category")
quotes = self.quotes.copy()
quotes.ticker = quotes.ticker.astype("category")
expected.ticker = expected.ticker.astype("category")
result = merge_asof(trades, quotes, on="time", by="ticker")
tm.assert_frame_equal(result, expected)
def test_basic_left_index(self):
# GH14253
expected = self.asof
trades = self.trades.set_index("time")
quotes = self.quotes
result = merge_asof(
trades, quotes, left_index=True, right_on="time", by="ticker"
)
# left-only index uses right"s index, oddly
expected.index = result.index
# time column appears after left"s columns
expected = expected[result.columns]
tm.assert_frame_equal(result, expected)
def test_basic_right_index(self):
expected = self.asof
trades = self.trades
quotes = self.quotes.set_index("time")
result = merge_asof(
trades, quotes, left_on="time", right_index=True, by="ticker"
)
tm.assert_frame_equal(result, expected)
def test_basic_left_index_right_index(self):
expected = self.asof.set_index("time")
trades = self.trades.set_index("time")
quotes = self.quotes.set_index("time")
result = merge_asof(
trades, quotes, left_index=True, right_index=True, by="ticker"
)
tm.assert_frame_equal(result, expected)
def test_multi_index_on(self):
def index_by_time_then_arbitrary_new_level(df):
df = df.set_index("time")
df = pd.concat([df, df], keys=["f1", "f2"], names=["f", "time"])
return df.reorder_levels([1, 0]).sort_index()
trades = index_by_time_then_arbitrary_new_level(self.trades)
quotes = index_by_time_then_arbitrary_new_level(self.quotes)
expected = index_by_time_then_arbitrary_new_level(self.asof)
result = merge_asof(trades, quotes, on="time", by=["ticker"])
tm.assert_frame_equal(result, expected)
def test_on_and_index(self):
# "on" parameter and index together is prohibited
trades = self.trades.set_index("time")
quotes = self.quotes.set_index("time")
with pytest.raises(MergeError):
merge_asof(
trades, quotes, left_on="price", left_index=True, right_index=True
)
trades = self.trades.set_index("time")
quotes = self.quotes.set_index("time")
with pytest.raises(MergeError):
merge_asof(
trades, quotes, right_on="bid", left_index=True, right_index=True
)
def test_basic_left_by_right_by(self):
# GH14253
expected = self.asof
trades = self.trades
quotes = self.quotes
result = merge_asof(
trades, quotes, on="time", left_by="ticker", right_by="ticker"
)
tm.assert_frame_equal(result, expected)
def test_missing_right_by(self):
expected = self.asof
trades = self.trades
quotes = self.quotes
q = quotes[quotes.ticker != "MSFT"]
result = merge_asof(trades, q, on="time", by="ticker")
expected.loc[expected.ticker == "MSFT", ["bid", "ask"]] = np.nan
tm.assert_frame_equal(result, expected)
def test_multiby(self):
# GH13936
trades = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.046",
"20160525 13:30:00.048",
"20160525 13:30:00.050",
]
),
"ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"],
"exch": ["ARCA", "NSDQ", "NSDQ", "BATS", "NSDQ"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
},
columns=["time", "ticker", "exch", "price", "quantity"],
)
quotes = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.030",
"20160525 13:30:00.041",
"20160525 13:30:00.045",
"20160525 13:30:00.049",
]
),
"ticker": ["GOOG", "MSFT", "MSFT", "MSFT", "GOOG", "AAPL"],
"exch": ["BATS", "NSDQ", "ARCA", "ARCA", "NSDQ", "ARCA"],
"bid": [720.51, 51.95, 51.97, 51.99, 720.50, 97.99],
"ask": [720.92, 51.96, 51.98, 52.00, 720.93, 98.01],
},
columns=["time", "ticker", "exch", "bid", "ask"],
)
expected = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.046",
"20160525 13:30:00.048",
"20160525 13:30:00.050",
]
),
"ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"],
"exch": ["ARCA", "NSDQ", "NSDQ", "BATS", "NSDQ"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
"bid": [np.nan, 51.95, 720.50, 720.51, np.nan],
"ask": [np.nan, 51.96, 720.93, 720.92, np.nan],
},
columns=["time", "ticker", "exch", "price", "quantity", "bid", "ask"],
)
result = pd.merge_asof(trades, quotes, on="time", by=["ticker", "exch"])
tm.assert_frame_equal(result, expected)
def test_multiby_heterogeneous_types(self):
# GH13936
trades = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.046",
"20160525 13:30:00.048",
"20160525 13:30:00.050",
]
),
"ticker": [0, 0, 1, 1, 2],
"exch": ["ARCA", "NSDQ", "NSDQ", "BATS", "NSDQ"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
},
columns=["time", "ticker", "exch", "price", "quantity"],
)
quotes = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.030",
"20160525 13:30:00.041",
"20160525 13:30:00.045",
"20160525 13:30:00.049",
]
),
"ticker": [1, 0, 0, 0, 1, 2],
"exch": ["BATS", "NSDQ", "ARCA", "ARCA", "NSDQ", "ARCA"],
"bid": [720.51, 51.95, 51.97, 51.99, 720.50, 97.99],
"ask": [720.92, 51.96, 51.98, 52.00, 720.93, 98.01],
},
columns=["time", "ticker", "exch", "bid", "ask"],
)
expected = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.046",
"20160525 13:30:00.048",
"20160525 13:30:00.050",
]
),
"ticker": [0, 0, 1, 1, 2],
"exch": ["ARCA", "NSDQ", "NSDQ", "BATS", "NSDQ"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
"bid": [np.nan, 51.95, 720.50, 720.51, np.nan],
"ask": [np.nan, 51.96, 720.93, 720.92, np.nan],
},
columns=["time", "ticker", "exch", "price", "quantity", "bid", "ask"],
)
result = pd.merge_asof(trades, quotes, on="time", by=["ticker", "exch"])
tm.assert_frame_equal(result, expected)
def test_multiby_indexed(self):
# GH15676
left = pd.DataFrame(
[
[pd.to_datetime("20160602"), 1, "a"],
[pd.to_datetime("20160602"), 2, "a"],
[pd.to_datetime("20160603"), 1, "b"],
[pd.to_datetime("20160603"), 2, "b"],
],
columns=["time", "k1", "k2"],
).set_index("time")
right = pd.DataFrame(
[
[pd.to_datetime("20160502"), 1, "a", 1.0],
[pd.to_datetime("20160502"), 2, "a", 2.0],
[pd.to_datetime("20160503"), 1, "b", 3.0],
[pd.to_datetime("20160503"), 2, "b", 4.0],
],
columns=["time", "k1", "k2", "value"],
).set_index("time")
expected = pd.DataFrame(
[
[pd.to_datetime("20160602"), 1, "a", 1.0],
[pd.to_datetime("20160602"), 2, "a", 2.0],
[pd.to_datetime("20160603"), 1, "b", 3.0],
[pd.to_datetime("20160603"), 2, "b", 4.0],
],
columns=["time", "k1", "k2", "value"],
).set_index("time")
result = pd.merge_asof(
left, right, left_index=True, right_index=True, by=["k1", "k2"]
)
tm.assert_frame_equal(expected, result)
with pytest.raises(MergeError):
pd.merge_asof(
left,
right,
left_index=True,
right_index=True,
left_by=["k1", "k2"],
right_by=["k1"],
)
def test_basic2(self, datapath):
expected = self.read_data(datapath, "asof2.csv")
trades = self.read_data(datapath, "trades2.csv")
quotes = self.read_data(datapath, "quotes2.csv", dedupe=True)
result = merge_asof(trades, quotes, on="time", by="ticker")
tm.assert_frame_equal(result, expected)
def test_basic_no_by(self):
f = (
lambda x: x[x.ticker == "MSFT"]
.drop("ticker", axis=1)
.reset_index(drop=True)
)
# just use a single ticker
expected = f(self.asof)
trades = f(self.trades)
quotes = f(self.quotes)
result = merge_asof(trades, quotes, on="time")
tm.assert_frame_equal(result, expected)
def test_valid_join_keys(self):
trades = self.trades
quotes = self.quotes
with pytest.raises(MergeError):
merge_asof(trades, quotes, left_on="time", right_on="bid", by="ticker")
with pytest.raises(MergeError):
merge_asof(trades, quotes, on=["time", "ticker"], by="ticker")
with pytest.raises(MergeError):
merge_asof(trades, quotes, by="ticker")
def test_with_duplicates(self, datapath):
q = (
pd.concat([self.quotes, self.quotes])
.sort_values(["time", "ticker"])
.reset_index(drop=True)
)
result = merge_asof(self.trades, q, on="time", by="ticker")
expected = self.read_data(datapath, "asof.csv")
tm.assert_frame_equal(result, expected)
def test_with_duplicates_no_on(self):
df1 = pd.DataFrame({"key": [1, 1, 3], "left_val": [1, 2, 3]})
df2 = pd.DataFrame({"key": [1, 2, 2], "right_val": [1, 2, 3]})
result = merge_asof(df1, df2, on="key")
expected = pd.DataFrame(
{"key": [1, 1, 3], "left_val": [1, 2, 3], "right_val": [1, 1, 3]}
)
tm.assert_frame_equal(result, expected)
def test_valid_allow_exact_matches(self):
trades = self.trades
quotes = self.quotes
with pytest.raises(MergeError):
merge_asof(
trades, quotes, on="time", by="ticker", allow_exact_matches="foo"
)
def test_valid_tolerance(self):
trades = self.trades
quotes = self.quotes
# dti
merge_asof(trades, quotes, on="time", by="ticker", tolerance=Timedelta("1s"))
# integer
merge_asof(
trades.reset_index(),
quotes.reset_index(),
on="index",
by="ticker",
tolerance=1,
)
# incompat
with pytest.raises(MergeError):
merge_asof(trades, quotes, on="time", by="ticker", tolerance=1)
# invalid
with pytest.raises(MergeError):
merge_asof(
trades.reset_index(),
quotes.reset_index(),
on="index",
by="ticker",
tolerance=1.0,
)
# invalid negative
with pytest.raises(MergeError):
merge_asof(
trades, quotes, on="time", by="ticker", tolerance=-Timedelta("1s")
)
with pytest.raises(MergeError):
merge_asof(
trades.reset_index(),
quotes.reset_index(),
on="index",
by="ticker",
tolerance=-1,
)
def test_non_sorted(self):
trades = self.trades.sort_values("time", ascending=False)
quotes = self.quotes.sort_values("time", ascending=False)
# we require that we are already sorted on time & quotes
assert not trades.time.is_monotonic
assert not quotes.time.is_monotonic
with pytest.raises(ValueError):
merge_asof(trades, quotes, on="time", by="ticker")
trades = self.trades.sort_values("time")
assert trades.time.is_monotonic
assert not quotes.time.is_monotonic
with pytest.raises(ValueError):
merge_asof(trades, quotes, on="time", by="ticker")
quotes = self.quotes.sort_values("time")
assert trades.time.is_monotonic
assert quotes.time.is_monotonic
# ok, though has dupes
merge_asof(trades, self.quotes, on="time", by="ticker")
@pytest.mark.parametrize(
"tolerance",
[Timedelta("1day"), datetime.timedelta(days=1)],
ids=["pd.Timedelta", "datetime.timedelta"],
)
def test_tolerance(self, tolerance):
trades = self.trades
quotes = self.quotes
result = merge_asof(trades, quotes, on="time", by="ticker", tolerance=tolerance)
expected = self.tolerance
tm.assert_frame_equal(result, expected)
def test_tolerance_forward(self):
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 7, 11], "right_val": [1, 2, 3, 7, 11]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [1, np.nan, 11]}
)
result = pd.merge_asof(left, right, on="a", direction="forward", tolerance=1)
tm.assert_frame_equal(result, expected)
def test_tolerance_nearest(self):
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 7, 11], "right_val": [1, 2, 3, 7, 11]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [1, np.nan, 11]}
)
result = pd.merge_asof(left, right, on="a", direction="nearest", tolerance=1)
tm.assert_frame_equal(result, expected)
def test_tolerance_tz(self):
# GH 14844
left = pd.DataFrame(
{
"date": pd.date_range(
start=pd.to_datetime("2016-01-02"),
freq="D",
periods=5,
tz=pytz.timezone("UTC"),
),
"value1": np.arange(5),
}
)
right = pd.DataFrame(
{
"date": pd.date_range(
start=pd.to_datetime("2016-01-01"),
freq="D",
periods=5,
tz=pytz.timezone("UTC"),
),
"value2": list("ABCDE"),
}
)
result = pd.merge_asof(left, right, on="date", tolerance=pd.Timedelta("1 day"))
expected = pd.DataFrame(
{
"date": pd.date_range(
start=pd.to_datetime("2016-01-02"),
freq="D",
periods=5,
tz=pytz.timezone("UTC"),
),
"value1": np.arange(5),
"value2": list("BCDEE"),
}
)
tm.assert_frame_equal(result, expected)
def test_tolerance_float(self):
# GH22981
left = pd.DataFrame({"a": [1.1, 3.5, 10.9], "left_val": ["a", "b", "c"]})
right = pd.DataFrame(
{"a": [1.0, 2.5, 3.3, 7.5, 11.5], "right_val": [1.0, 2.5, 3.3, 7.5, 11.5]}
)
expected = pd.DataFrame(
{
"a": [1.1, 3.5, 10.9],
"left_val": ["a", "b", "c"],
"right_val": [1, 3.3, np.nan],
}
)
result = pd.merge_asof(left, right, on="a", direction="nearest", tolerance=0.5)
tm.assert_frame_equal(result, expected)
def test_index_tolerance(self):
# GH 15135
expected = self.tolerance.set_index("time")
trades = self.trades.set_index("time")
quotes = self.quotes.set_index("time")
result = pd.merge_asof(
trades,
quotes,
left_index=True,
right_index=True,
by="ticker",
tolerance=pd.Timedelta("1day"),
)
tm.assert_frame_equal(result, expected)
def test_allow_exact_matches(self):
result = merge_asof(
self.trades, self.quotes, on="time", by="ticker", allow_exact_matches=False
)
expected = self.allow_exact_matches
tm.assert_frame_equal(result, expected)
def test_allow_exact_matches_forward(self):
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 7, 11], "right_val": [1, 2, 3, 7, 11]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [2, 7, 11]}
)
result = pd.merge_asof(
left, right, on="a", direction="forward", allow_exact_matches=False
)
tm.assert_frame_equal(result, expected)
def test_allow_exact_matches_nearest(self):
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 7, 11], "right_val": [1, 2, 3, 7, 11]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [2, 3, 11]}
)
result = pd.merge_asof(
left, right, on="a", direction="nearest", allow_exact_matches=False
)
tm.assert_frame_equal(result, expected)
def test_allow_exact_matches_and_tolerance(self):
result = merge_asof(
self.trades,
self.quotes,
on="time",
by="ticker",
tolerance=Timedelta("100ms"),
allow_exact_matches=False,
)
expected = self.allow_exact_matches_and_tolerance
tm.assert_frame_equal(result, expected)
def test_allow_exact_matches_and_tolerance2(self):
# GH 13695
df1 = pd.DataFrame(
{"time": pd.to_datetime(["2016-07-15 13:30:00.030"]), "username": ["bob"]}
)
df2 = pd.DataFrame(
{
"time": pd.to_datetime(
["2016-07-15 13:30:00.000", "2016-07-15 13:30:00.030"]
),
"version": [1, 2],
}
)
result = pd.merge_asof(df1, df2, on="time")
expected = pd.DataFrame(
{
"time": pd.to_datetime(["2016-07-15 13:30:00.030"]),
"username": ["bob"],
"version": [2],
}
)
tm.assert_frame_equal(result, expected)
result = pd.merge_asof(df1, df2, on="time", allow_exact_matches=False)
expected = pd.DataFrame(
{
"time": pd.to_datetime(["2016-07-15 13:30:00.030"]),
"username": ["bob"],
"version": [1],
}
)
tm.assert_frame_equal(result, expected)
result = pd.merge_asof(
df1,
df2,
on="time",
allow_exact_matches=False,
tolerance=pd.Timedelta("10ms"),
)
expected = pd.DataFrame(
{
"time": pd.to_datetime(["2016-07-15 13:30:00.030"]),
"username": ["bob"],
"version": [np.nan],
}
)
tm.assert_frame_equal(result, expected)
def test_allow_exact_matches_and_tolerance3(self):
# GH 13709
df1 = pd.DataFrame(
{
"time": pd.to_datetime(
["2016-07-15 13:30:00.030", "2016-07-15 13:30:00.030"]
),
"username": ["bob", "charlie"],
}
)
df2 = pd.DataFrame(
{
"time": pd.to_datetime(
["2016-07-15 13:30:00.000", "2016-07-15 13:30:00.030"]
),
"version": [1, 2],
}
)
result = pd.merge_asof(
df1,
df2,
on="time",
allow_exact_matches=False,
tolerance=pd.Timedelta("10ms"),
)
expected = pd.DataFrame(
{
"time": pd.to_datetime(
["2016-07-15 13:30:00.030", "2016-07-15 13:30:00.030"]
),
"username": ["bob", "charlie"],
"version": [np.nan, np.nan],
}
)
tm.assert_frame_equal(result, expected)
def test_allow_exact_matches_and_tolerance_forward(self):
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 3, 4, 6, 11], "right_val": [1, 3, 4, 6, 11]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [np.nan, 6, 11]}
)
result = pd.merge_asof(
left,
right,
on="a",
direction="forward",
allow_exact_matches=False,
tolerance=1,
)
tm.assert_frame_equal(result, expected)
def test_allow_exact_matches_and_tolerance_nearest(self):
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 3, 4, 6, 11], "right_val": [1, 3, 4, 7, 11]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [np.nan, 4, 11]}
)
result = pd.merge_asof(
left,
right,
on="a",
direction="nearest",
allow_exact_matches=False,
tolerance=1,
)
tm.assert_frame_equal(result, expected)
def test_forward_by(self):
# GH14887
left = pd.DataFrame(
{
"a": [1, 5, 10, 12, 15],
"b": ["X", "X", "Y", "Z", "Y"],
"left_val": ["a", "b", "c", "d", "e"],
}
)
right = pd.DataFrame(
{
"a": [1, 6, 11, 15, 16],
"b": ["X", "Z", "Y", "Z", "Y"],
"right_val": [1, 6, 11, 15, 16],
}
)
expected = pd.DataFrame(
{
"a": [1, 5, 10, 12, 15],
"b": ["X", "X", "Y", "Z", "Y"],
"left_val": ["a", "b", "c", "d", "e"],
"right_val": [1, np.nan, 11, 15, 16],
}
)
result = pd.merge_asof(left, right, on="a", by="b", direction="forward")
tm.assert_frame_equal(result, expected)
def test_nearest_by(self):
# GH14887
left = pd.DataFrame(
{
"a": [1, 5, 10, 12, 15],
"b": ["X", "X", "Z", "Z", "Y"],
"left_val": ["a", "b", "c", "d", "e"],
}
)
right = pd.DataFrame(
{
"a": [1, 6, 11, 15, 16],
"b": ["X", "Z", "Z", "Z", "Y"],
"right_val": [1, 6, 11, 15, 16],
}
)
expected = pd.DataFrame(
{
"a": [1, 5, 10, 12, 15],
"b": ["X", "X", "Z", "Z", "Y"],
"left_val": ["a", "b", "c", "d", "e"],
"right_val": [1, 1, 11, 11, 16],
}
)
result = pd.merge_asof(left, right, on="a", by="b", direction="nearest")
tm.assert_frame_equal(result, expected)
def test_by_int(self):
# we specialize by type, so test that this is correct
df1 = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.020",
"20160525 13:30:00.030",
"20160525 13:30:00.040",
"20160525 13:30:00.050",
"20160525 13:30:00.060",
]
),
"key": [1, 2, 1, 3, 2],
"value1": [1.1, 1.2, 1.3, 1.4, 1.5],
},
columns=["time", "key", "value1"],
)
df2 = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.015",
"20160525 13:30:00.020",
"20160525 13:30:00.025",
"20160525 13:30:00.035",
"20160525 13:30:00.040",
"20160525 13:30:00.055",
"20160525 13:30:00.060",
"20160525 13:30:00.065",
]
),
"key": [2, 1, 1, 3, 2, 1, 2, 3],
"value2": [2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8],
},
columns=["time", "key", "value2"],
)
result = pd.merge_asof(df1, df2, on="time", by="key")
expected = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.020",
"20160525 13:30:00.030",
"20160525 13:30:00.040",
"20160525 13:30:00.050",
"20160525 13:30:00.060",
]
),
"key": [1, 2, 1, 3, 2],
"value1": [1.1, 1.2, 1.3, 1.4, 1.5],
"value2": [2.2, 2.1, 2.3, 2.4, 2.7],
},
columns=["time", "key", "value1", "value2"],
)
tm.assert_frame_equal(result, expected)
def test_on_float(self):
# mimics how to determine the minimum-price variation
df1 = pd.DataFrame(
{
"price": [5.01, 0.0023, 25.13, 340.05, 30.78, 1040.90, 0.0078],
"symbol": list("ABCDEFG"),
},
columns=["symbol", "price"],
)
df2 = pd.DataFrame(
{"price": [0.0, 1.0, 100.0], "mpv": [0.0001, 0.01, 0.05]},
columns=["price", "mpv"],
)
df1 = df1.sort_values("price").reset_index(drop=True)
result = pd.merge_asof(df1, df2, on="price")
expected = pd.DataFrame(
{
"symbol": list("BGACEDF"),
"price": [0.0023, 0.0078, 5.01, 25.13, 30.78, 340.05, 1040.90],
"mpv": [0.0001, 0.0001, 0.01, 0.01, 0.01, 0.05, 0.05],
},
columns=["symbol", "price", "mpv"],
)
tm.assert_frame_equal(result, expected)
def test_on_specialized_type(self, any_real_dtype):
# see gh-13936
dtype = np.dtype(any_real_dtype).type
df1 = pd.DataFrame(
{"value": [5, 2, 25, 100, 78, 120, 79], "symbol": list("ABCDEFG")},
columns=["symbol", "value"],
)
df1.value = dtype(df1.value)
df2 = pd.DataFrame(
{"value": [0, 80, 120, 125], "result": list("xyzw")},
columns=["value", "result"],
)
df2.value = dtype(df2.value)
df1 = df1.sort_values("value").reset_index(drop=True)
result = pd.merge_asof(df1, df2, on="value")
expected = pd.DataFrame(
{
"symbol": list("BACEGDF"),
"value": [2, 5, 25, 78, 79, 100, 120],
"result": list("xxxxxyz"),
},
columns=["symbol", "value", "result"],
)
expected.value = dtype(expected.value)
tm.assert_frame_equal(result, expected)
def test_on_specialized_type_by_int(self, any_real_dtype):
# see gh-13936
dtype = np.dtype(any_real_dtype).type
df1 = pd.DataFrame(
{
"value": [5, 2, 25, 100, 78, 120, 79],
"key": [1, 2, 3, 2, 3, 1, 2],
"symbol": list("ABCDEFG"),
},
columns=["symbol", "key", "value"],
)
df1.value = dtype(df1.value)
df2 = pd.DataFrame(
{"value": [0, 80, 120, 125], "key": [1, 2, 2, 3], "result": list("xyzw")},
columns=["value", "key", "result"],
)
df2.value = dtype(df2.value)
df1 = df1.sort_values("value").reset_index(drop=True)
result = pd.merge_asof(df1, df2, on="value", by="key")
expected = pd.DataFrame(
{
"symbol": list("BACEGDF"),
"key": [2, 1, 3, 3, 2, 2, 1],
"value": [2, 5, 25, 78, 79, 100, 120],
"result": [np.nan, "x", np.nan, np.nan, np.nan, "y", "x"],
},
columns=["symbol", "key", "value", "result"],
)
expected.value = dtype(expected.value)
tm.assert_frame_equal(result, expected)
def test_on_float_by_int(self):
# type specialize both "by" and "on" parameters
df1 = pd.DataFrame(
{
"symbol": list("AAABBBCCC"),
"exch": [1, 2, 3, 1, 2, 3, 1, 2, 3],
"price": [
3.26,
3.2599,
3.2598,
12.58,
12.59,
12.5,
378.15,
378.2,
378.25,
],
},
columns=["symbol", "exch", "price"],
)
df2 = pd.DataFrame(
{
"exch": [1, 1, 1, 2, 2, 2, 3, 3, 3],
"price": [0.0, 1.0, 100.0, 0.0, 5.0, 100.0, 0.0, 5.0, 1000.0],
"mpv": [0.0001, 0.01, 0.05, 0.0001, 0.01, 0.1, 0.0001, 0.25, 1.0],
},
columns=["exch", "price", "mpv"],
)
df1 = df1.sort_values("price").reset_index(drop=True)
df2 = df2.sort_values("price").reset_index(drop=True)
result = pd.merge_asof(df1, df2, on="price", by="exch")
expected = pd.DataFrame(
{
"symbol": list("AAABBBCCC"),
"exch": [3, 2, 1, 3, 1, 2, 1, 2, 3],
"price": [
3.2598,
3.2599,
3.26,
12.5,
12.58,
12.59,
378.15,
378.2,
378.25,
],
"mpv": [0.0001, 0.0001, 0.01, 0.25, 0.01, 0.01, 0.05, 0.1, 0.25],
},
columns=["symbol", "exch", "price", "mpv"],
)
tm.assert_frame_equal(result, expected)
def test_merge_datatype_error_raises(self):
msg = r"incompatible merge keys \[0\] .*, must be the same type"
left = pd.DataFrame({"left_val": [1, 5, 10], "a": ["a", "b", "c"]})
right = pd.DataFrame({"right_val": [1, 2, 3, 6, 7], "a": [1, 2, 3, 6, 7]})
with pytest.raises(MergeError, match=msg):
merge_asof(left, right, on="a")
def test_merge_datatype_categorical_error_raises(self):
msg = (
r"incompatible merge keys \[0\] .* both sides category, "
"but not equal ones"
)
left = pd.DataFrame(
{"left_val": [1, 5, 10], "a": pd.Categorical(["a", "b", "c"])}
)
right = pd.DataFrame(
{
"right_val": [1, 2, 3, 6, 7],
"a": pd.Categorical(["a", "X", "c", "X", "b"]),
}
)
with pytest.raises(MergeError, match=msg):
merge_asof(left, right, on="a")
def test_merge_groupby_multiple_column_with_categorical_column(self):
# GH 16454
df = pd.DataFrame({"x": [0], "y": [0], "z": pd.Categorical([0])})
result = merge_asof(df, df, on="x", by=["y", "z"])
expected = pd.DataFrame({"x": [0], "y": [0], "z": pd.Categorical([0])})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"func", [lambda x: x, lambda x: to_datetime(x)], ids=["numeric", "datetime"]
)
@pytest.mark.parametrize("side", ["left", "right"])
def test_merge_on_nans(self, func, side):
# GH 23189
msg = f"Merge keys contain null values on {side} side"
nulls = func([1.0, 5.0, np.nan])
non_nulls = func([1.0, 5.0, 10.0])
df_null = pd.DataFrame({"a": nulls, "left_val": ["a", "b", "c"]})
df = pd.DataFrame({"a": non_nulls, "right_val": [1, 6, 11]})
with pytest.raises(ValueError, match=msg):
if side == "left":
merge_asof(df_null, df, on="a")
else:
merge_asof(df, df_null, on="a")
def test_merge_by_col_tz_aware(self):
# GH 21184
left = pd.DataFrame(
{
"by_col": pd.DatetimeIndex(["2018-01-01"]).tz_localize("UTC"),
"on_col": [2],
"values": ["a"],
}
)
right = pd.DataFrame(
{
"by_col": pd.DatetimeIndex(["2018-01-01"]).tz_localize("UTC"),
"on_col": [1],
"values": ["b"],
}
)
result = pd.merge_asof(left, right, by="by_col", on="on_col")
expected = pd.DataFrame(
[[pd.Timestamp("2018-01-01", tz="UTC"), 2, "a", "b"]],
columns=["by_col", "on_col", "values_x", "values_y"],
)
tm.assert_frame_equal(result, expected)
def test_by_mixed_tz_aware(self):
# GH 26649
left = pd.DataFrame(
{
"by_col1": pd.DatetimeIndex(["2018-01-01"]).tz_localize("UTC"),
"by_col2": ["HELLO"],
"on_col": [2],
"value": ["a"],
}
)
right = pd.DataFrame(
{
"by_col1": pd.DatetimeIndex(["2018-01-01"]).tz_localize("UTC"),
"by_col2": ["WORLD"],
"on_col": [1],
"value": ["b"],
}
)
result = pd.merge_asof(left, right, by=["by_col1", "by_col2"], on="on_col")
expected = pd.DataFrame(
[[pd.Timestamp("2018-01-01", tz="UTC"), "HELLO", 2, "a"]],
columns=["by_col1", "by_col2", "on_col", "value_x"],
)
expected["value_y"] = np.array([np.nan], dtype=object)
tm.assert_frame_equal(result, expected)
def test_timedelta_tolerance_nearest(self):
# GH 27642
left = pd.DataFrame(
list(zip([0, 5, 10, 15, 20, 25], [0, 1, 2, 3, 4, 5])),
columns=["time", "left"],
)
left["time"] = pd.to_timedelta(left["time"], "ms")
right = pd.DataFrame(
list(zip([0, 3, 9, 12, 15, 18], [0, 1, 2, 3, 4, 5])),
columns=["time", "right"],
)
right["time"] = | pd.to_timedelta(right["time"], "ms") | pandas.to_timedelta |
import requests
import json
import pandas as pd
import datetime
import random
import argparse
def gen_request_url(base_url, domain, params_base, **params_other):
''' generate request URL for OpenAQ API based on parameters given
params:
base_url : str
domain : str, domains that will be used here include measurements, averages, locations
params_base : dictionary, parameters to pass when crawling API
params_other : other parameters to pass when crawling API apart from what's in params_base
returns:
request_url : str, URL to crawl from
'''
params_allowed_list = ['date_from', 'date_to', 'country', 'city', 'location', 'location_id', 'coordinates', 'parameter', 'limit', 'spatial', 'temporal']
try:
request_url = base_url + str(domain) + '?'
for k, v in params_base.items():
if k in params_allowed_list:
if k == 'date_from' or k == 'date_to':
v = | pd.to_datetime(v) | pandas.to_datetime |
#!/usr/bin/env python
# -*- coding: utf8 -*-
"""
Build SAMI Data-Base
This script receives a path, a list of files or a file
containing a list of files. Then, it walks through them
and create a data-base with several information that can
be read in other ways.
by <NAME>
SOAR Telescope - 2016.10.01
"""
import argparse as _argparse
import glob as _glob
import logging as log
import os.path
import pandas as _pd
from astropy.io import fits as _pyfits
from samfp.old.globals import csv_columns
class DBBuilder:
def __init__(self, _input, debug=False, verbose=True):
self.set_verbose(verbose)
self.set_debug(debug)
self._input = input
self.main(_input)
def main(self, _input):
self.print_header()
files = self.get_list_of_files(_input)
self.build_database(files)
@staticmethod
def build_database(files, database='temp.csv'):
"""
Let us finally build a data-base to store all the images and their
informations.
Parameters
----------
files : list
List of files that will be added to the data-base
database : str
The name of the file that will store the database information.
By default, this is set to 'temp.csv'
"""
files.sort()
df = | _pd.DataFrame(columns=csv_columns) | pandas.DataFrame |
import nose
import warnings
import os
import datetime
import numpy as np
import sys
from distutils.version import LooseVersion
from pandas import compat
from pandas.compat import u, PY3
from pandas import (Series, DataFrame, Panel, MultiIndex, bdate_range,
date_range, period_range, Index, Categorical)
from pandas.core.common import PerformanceWarning
from pandas.io.packers import to_msgpack, read_msgpack
import pandas.util.testing as tm
from pandas.util.testing import (ensure_clean,
assert_categorical_equal,
assert_frame_equal,
assert_index_equal,
assert_series_equal,
patch)
from pandas.tests.test_panel import assert_panel_equal
import pandas
from pandas import Timestamp, NaT, tslib
nan = np.nan
try:
import blosc # NOQA
except ImportError:
_BLOSC_INSTALLED = False
else:
_BLOSC_INSTALLED = True
try:
import zlib # NOQA
except ImportError:
_ZLIB_INSTALLED = False
else:
_ZLIB_INSTALLED = True
_multiprocess_can_split_ = False
def check_arbitrary(a, b):
if isinstance(a, (list, tuple)) and isinstance(b, (list, tuple)):
assert(len(a) == len(b))
for a_, b_ in zip(a, b):
check_arbitrary(a_, b_)
elif isinstance(a, Panel):
assert_panel_equal(a, b)
elif isinstance(a, DataFrame):
assert_frame_equal(a, b)
elif isinstance(a, Series):
assert_series_equal(a, b)
elif isinstance(a, Index):
assert_index_equal(a, b)
elif isinstance(a, Categorical):
# Temp,
# Categorical.categories is changed from str to bytes in PY3
# maybe the same as GH 13591
if PY3 and b.categories.inferred_type == 'string':
pass
else:
tm.assert_categorical_equal(a, b)
elif a is NaT:
assert b is NaT
elif isinstance(a, Timestamp):
assert a == b
assert a.freq == b.freq
else:
assert(a == b)
class TestPackers(tm.TestCase):
def setUp(self):
self.path = '__%s__.msg' % tm.rands(10)
def tearDown(self):
pass
def encode_decode(self, x, compress=None, **kwargs):
with ensure_clean(self.path) as p:
to_msgpack(p, x, compress=compress, **kwargs)
return read_msgpack(p, **kwargs)
class TestAPI(TestPackers):
def test_string_io(self):
df = DataFrame(np.random.randn(10, 2))
s = df.to_msgpack(None)
result = read_msgpack(s)
tm.assert_frame_equal(result, df)
s = df.to_msgpack()
result = read_msgpack(s)
tm.assert_frame_equal(result, df)
s = df.to_msgpack()
result = read_msgpack(compat.BytesIO(s))
tm.assert_frame_equal(result, df)
s = to_msgpack(None, df)
result = read_msgpack(s)
tm.assert_frame_equal(result, df)
with ensure_clean(self.path) as p:
s = df.to_msgpack()
fh = open(p, 'wb')
fh.write(s)
fh.close()
result = read_msgpack(p)
tm.assert_frame_equal(result, df)
def test_iterator_with_string_io(self):
dfs = [DataFrame(np.random.randn(10, 2)) for i in range(5)]
s = to_msgpack(None, *dfs)
for i, result in enumerate(read_msgpack(s, iterator=True)):
tm.assert_frame_equal(result, dfs[i])
def test_invalid_arg(self):
# GH10369
class A(object):
def __init__(self):
self.read = 0
tm.assertRaises(ValueError, read_msgpack, path_or_buf=None)
tm.assertRaises(ValueError, read_msgpack, path_or_buf={})
tm.assertRaises(ValueError, read_msgpack, path_or_buf=A())
class TestNumpy(TestPackers):
def test_numpy_scalar_float(self):
x = np.float32(np.random.rand())
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
def test_numpy_scalar_complex(self):
x = np.complex64(np.random.rand() + 1j * np.random.rand())
x_rec = self.encode_decode(x)
self.assertTrue(np.allclose(x, x_rec))
def test_scalar_float(self):
x = np.random.rand()
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
def test_scalar_complex(self):
x = np.random.rand() + 1j * np.random.rand()
x_rec = self.encode_decode(x)
self.assertTrue(np.allclose(x, x_rec))
def test_list_numpy_float(self):
x = [np.float32(np.random.rand()) for i in range(5)]
x_rec = self.encode_decode(x)
# current msgpack cannot distinguish list/tuple
tm.assert_almost_equal(tuple(x), x_rec)
x_rec = self.encode_decode(tuple(x))
tm.assert_almost_equal(tuple(x), x_rec)
def test_list_numpy_float_complex(self):
if not hasattr(np, 'complex128'):
raise nose.SkipTest('numpy cant handle complex128')
x = [np.float32(np.random.rand()) for i in range(5)] + \
[np.complex128(np.random.rand() + 1j * np.random.rand())
for i in range(5)]
x_rec = self.encode_decode(x)
self.assertTrue(np.allclose(x, x_rec))
def test_list_float(self):
x = [np.random.rand() for i in range(5)]
x_rec = self.encode_decode(x)
# current msgpack cannot distinguish list/tuple
tm.assert_almost_equal(tuple(x), x_rec)
x_rec = self.encode_decode(tuple(x))
tm.assert_almost_equal(tuple(x), x_rec)
def test_list_float_complex(self):
x = [np.random.rand() for i in range(5)] + \
[(np.random.rand() + 1j * np.random.rand()) for i in range(5)]
x_rec = self.encode_decode(x)
self.assertTrue(np.allclose(x, x_rec))
def test_dict_float(self):
x = {'foo': 1.0, 'bar': 2.0}
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
def test_dict_complex(self):
x = {'foo': 1.0 + 1.0j, 'bar': 2.0 + 2.0j}
x_rec = self.encode_decode(x)
self.assertEqual(x, x_rec)
for key in x:
self.assertEqual(type(x[key]), type(x_rec[key]))
def test_dict_numpy_float(self):
x = {'foo': np.float32(1.0), 'bar': np.float32(2.0)}
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
def test_dict_numpy_complex(self):
x = {'foo': np.complex128(1.0 + 1.0j),
'bar': np.complex128(2.0 + 2.0j)}
x_rec = self.encode_decode(x)
self.assertEqual(x, x_rec)
for key in x:
self.assertEqual(type(x[key]), type(x_rec[key]))
def test_numpy_array_float(self):
# run multiple times
for n in range(10):
x = np.random.rand(10)
for dtype in ['float32', 'float64']:
x = x.astype(dtype)
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
def test_numpy_array_complex(self):
x = (np.random.rand(5) + 1j * np.random.rand(5)).astype(np.complex128)
x_rec = self.encode_decode(x)
self.assertTrue(all(map(lambda x, y: x == y, x, x_rec)) and
x.dtype == x_rec.dtype)
def test_list_mixed(self):
x = [1.0, np.float32(3.5), np.complex128(4.25), u('foo')]
x_rec = self.encode_decode(x)
# current msgpack cannot distinguish list/tuple
tm.assert_almost_equal(tuple(x), x_rec)
x_rec = self.encode_decode(tuple(x))
tm.assert_almost_equal(tuple(x), x_rec)
class TestBasic(TestPackers):
def test_timestamp(self):
for i in [Timestamp(
'20130101'), Timestamp('20130101', tz='US/Eastern'),
Timestamp('201301010501')]:
i_rec = self.encode_decode(i)
self.assertEqual(i, i_rec)
def test_nat(self):
nat_rec = self.encode_decode(NaT)
self.assertIs(NaT, nat_rec)
def test_datetimes(self):
# fails under 2.6/win32 (np.datetime64 seems broken)
if LooseVersion(sys.version) < '2.7':
raise nose.SkipTest('2.6 with np.datetime64 is broken')
for i in [datetime.datetime(2013, 1, 1),
datetime.datetime(2013, 1, 1, 5, 1),
datetime.date(2013, 1, 1),
np.datetime64(datetime.datetime(2013, 1, 5, 2, 15))]:
i_rec = self.encode_decode(i)
self.assertEqual(i, i_rec)
def test_timedeltas(self):
for i in [datetime.timedelta(days=1),
datetime.timedelta(days=1, seconds=10),
np.timedelta64(1000000)]:
i_rec = self.encode_decode(i)
self.assertEqual(i, i_rec)
class TestIndex(TestPackers):
def setUp(self):
super(TestIndex, self).setUp()
self.d = {
'string': tm.makeStringIndex(100),
'date': tm.makeDateIndex(100),
'int': tm.makeIntIndex(100),
'rng': tm.makeRangeIndex(100),
'float': tm.makeFloatIndex(100),
'empty': Index([]),
'tuple': Index(zip(['foo', 'bar', 'baz'], [1, 2, 3])),
'period': Index(period_range('2012-1-1', freq='M', periods=3)),
'date2': Index(date_range('2013-01-1', periods=10)),
'bdate': Index(bdate_range('2013-01-02', periods=10)),
}
self.mi = {
'reg': MultiIndex.from_tuples([('bar', 'one'), ('baz', 'two'),
('foo', 'two'),
('qux', 'one'), ('qux', 'two')],
names=['first', 'second']),
}
def test_basic_index(self):
for s, i in self.d.items():
i_rec = self.encode_decode(i)
self.assert_index_equal(i, i_rec)
# datetime with no freq (GH5506)
i = Index([Timestamp('20130101'), Timestamp('20130103')])
i_rec = self.encode_decode(i)
self.assert_index_equal(i, i_rec)
# datetime with timezone
i = Index([Timestamp('20130101 9:00:00'), Timestamp(
'20130103 11:00:00')]).tz_localize('US/Eastern')
i_rec = self.encode_decode(i)
self.assert_index_equal(i, i_rec)
def test_multi_index(self):
for s, i in self.mi.items():
i_rec = self.encode_decode(i)
self.assert_index_equal(i, i_rec)
def test_unicode(self):
i = tm.makeUnicodeIndex(100)
i_rec = self.encode_decode(i)
self.assert_index_equal(i, i_rec)
class TestSeries(TestPackers):
def setUp(self):
super(TestSeries, self).setUp()
self.d = {}
s = tm.makeStringSeries()
s.name = 'string'
self.d['string'] = s
s = tm.makeObjectSeries()
s.name = 'object'
self.d['object'] = s
s = Series(tslib.iNaT, dtype='M8[ns]', index=range(5))
self.d['date'] = s
data = {
'A': [0., 1., 2., 3., np.nan],
'B': [0, 1, 0, 1, 0],
'C': ['foo1', 'foo2', 'foo3', 'foo4', 'foo5'],
'D': date_range('1/1/2009', periods=5),
'E': [0., 1, Timestamp('20100101'), 'foo', 2.],
'F': [Timestamp('20130102', tz='US/Eastern')] * 2 +
[Timestamp('20130603', tz='CET')] * 3,
'G': [Timestamp('20130102', tz='US/Eastern')] * 5,
}
self.d['float'] = Series(data['A'])
self.d['int'] = Series(data['B'])
self.d['mixed'] = Series(data['E'])
self.d['dt_tz_mixed'] = Series(data['F'])
self.d['dt_tz'] = Series(data['G'])
def test_basic(self):
# run multiple times here
for n in range(10):
for s, i in self.d.items():
i_rec = self.encode_decode(i)
assert_series_equal(i, i_rec)
class TestCategorical(TestPackers):
def setUp(self):
super(TestCategorical, self).setUp()
self.d = {}
self.d['plain_str'] = Categorical(['a', 'b', 'c', 'd', 'e'])
self.d['plain_str_ordered'] = Categorical(['a', 'b', 'c', 'd', 'e'],
ordered=True)
self.d['plain_int'] = Categorical([5, 6, 7, 8])
self.d['plain_int_ordered'] = Categorical([5, 6, 7, 8], ordered=True)
def test_basic(self):
# run multiple times here
for n in range(10):
for s, i in self.d.items():
i_rec = self.encode_decode(i)
assert_categorical_equal(i, i_rec)
class TestNDFrame(TestPackers):
def setUp(self):
super(TestNDFrame, self).setUp()
data = {
'A': [0., 1., 2., 3., np.nan],
'B': [0, 1, 0, 1, 0],
'C': ['foo1', 'foo2', 'foo3', 'foo4', 'foo5'],
'D': date_range('1/1/2009', periods=5),
'E': [0., 1, Timestamp('20100101'), 'foo', 2.],
'F': [Timestamp('20130102', tz='US/Eastern')] * 5,
'G': [Timestamp('20130603', tz='CET')] * 5,
'H': Categorical(['a', 'b', 'c', 'd', 'e']),
'I': Categorical(['a', 'b', 'c', 'd', 'e'], ordered=True),
}
self.frame = {
'float': DataFrame(dict(A=data['A'], B=Series(data['A']) + 1)),
'int': DataFrame(dict(A=data['B'], B=Series(data['B']) + 1)),
'mixed': DataFrame(data)}
self.panel = {
'float': Panel(dict(ItemA=self.frame['float'],
ItemB=self.frame['float'] + 1))}
def test_basic_frame(self):
for s, i in self.frame.items():
i_rec = self.encode_decode(i)
assert_frame_equal(i, i_rec)
def test_basic_panel(self):
for s, i in self.panel.items():
i_rec = self.encode_decode(i)
assert_panel_equal(i, i_rec)
def test_multi(self):
i_rec = self.encode_decode(self.frame)
for k in self.frame.keys():
assert_frame_equal(self.frame[k], i_rec[k])
l = tuple([self.frame['float'], self.frame['float'].A,
self.frame['float'].B, None])
l_rec = self.encode_decode(l)
check_arbitrary(l, l_rec)
# this is an oddity in that packed lists will be returned as tuples
l = [self.frame['float'], self.frame['float']
.A, self.frame['float'].B, None]
l_rec = self.encode_decode(l)
self.assertIsInstance(l_rec, tuple)
check_arbitrary(l, l_rec)
def test_iterator(self):
l = [self.frame['float'], self.frame['float']
.A, self.frame['float'].B, None]
with ensure_clean(self.path) as path:
to_msgpack(path, *l)
for i, packed in enumerate(read_msgpack(path, iterator=True)):
check_arbitrary(packed, l[i])
def tests_datetimeindex_freq_issue(self):
# GH 5947
# inferring freq on the datetimeindex
df = DataFrame([1, 2, 3], index=date_range('1/1/2013', '1/3/2013'))
result = self.encode_decode(df)
assert_frame_equal(result, df)
df = DataFrame([1, 2], index=date_range('1/1/2013', '1/2/2013'))
result = self.encode_decode(df)
assert_frame_equal(result, df)
def test_dataframe_duplicate_column_names(self):
# GH 9618
expected_1 = DataFrame(columns=['a', 'a'])
expected_2 = DataFrame(columns=[1] * 100)
expected_2.loc[0] = np.random.randn(100)
expected_3 = DataFrame(columns=[1, 1])
expected_3.loc[0] = ['abc', np.nan]
result_1 = self.encode_decode(expected_1)
result_2 = self.encode_decode(expected_2)
result_3 = self.encode_decode(expected_3)
assert_frame_equal(result_1, expected_1)
assert_frame_equal(result_2, expected_2)
assert_frame_equal(result_3, expected_3)
class TestSparse(TestPackers):
def _check_roundtrip(self, obj, comparator, **kwargs):
# currently these are not implemetned
# i_rec = self.encode_decode(obj)
# comparator(obj, i_rec, **kwargs)
self.assertRaises(NotImplementedError, self.encode_decode, obj)
def test_sparse_series(self):
s = | tm.makeStringSeries() | pandas.util.testing.makeStringSeries |
from __future__ import print_function
import os
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from torch.autograd import Variable
import torch.optim as optim
from torchvision import datasets, transforms
from models.wideresnet import *
from models.resnet import *
import matplotlib
matplotlib.use('pdf')
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from datetime import datetime
import pandas as pd
#python pgd_attack_cifar10.py --diff --source-model-path ./checkpoints/model_cifar_wrn.pt --target-model-path ./checkpoints/model-wideres-epoch20.pt
parser = argparse.ArgumentParser(description='PyTorch CIFAR PGD Attack Evaluation')
parser.add_argument('--test-batch-size', type=int, default=200, metavar='N',
help='input batch size for testing (default: 200)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--epsilon', default=0.031,
help='perturbation')
parser.add_argument('--num-steps', default=20,
help='perturb number of steps')
parser.add_argument('--step-size', default=0.003,
help='perturb step size')
parser.add_argument('--random',
default=True,
help='random initialization for PGD')
parser.add_argument('--model-path',
default='./checkpoints/model_cifar_wrn.pt',
help='model for white-box attack evaluation')
parser.add_argument('--source-model-path',
default='./checkpoints/model_cifar_wrn.pt',
help='source model for black-box attack evaluation')
parser.add_argument('--target-model-path',
default='./checkpoints/model_cifar_wrn.pt',
help='target model for black-box attack evaluation')
parser.add_argument('--white-box-attack', action='store_true', default=False,
help='whether perform white-box attack')
parser.add_argument('--gen', action='store_true', default=False,
help='generate dataset')
parser.add_argument('--diff', action='store_true', default=False,
help='computing diff in activation')
parser.add_argument('--heat', action='store_true', default=False,
help='plot heat map')
parser.add_argument('--var', action='store_true', default=False,
help='plot variance graph')
parser.add_argument('--bar', action='store_true', default=False,
help='plot bar chart')
args = parser.parse_args()
# settings
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
# set up data loader
transform_test = transforms.Compose([transforms.ToTensor(),])
testset = torchvision.datasets.CIFAR10(root='../data', train=False, download=True, transform=transform_test)
test_loader = torch.utils.data.DataLoader(testset, batch_size=args.test_batch_size, shuffle=False, **kwargs)
advset = torch.load("../data/cifar10_adv_black.pt")
adv_loader = torch.utils.data.DataLoader(advset, batch_size=args.test_batch_size, shuffle=False, **kwargs)
def _pgd_whitebox(model,
X,
y,
epsilon=args.epsilon,
num_steps=args.num_steps,
step_size=args.step_size):
out = model(X)
err = (out.data.max(1)[1] != y.data).float().sum()
X_pgd = Variable(X.data, requires_grad=True)
if args.random:
random_noise = torch.FloatTensor(*X_pgd.shape).uniform_(-epsilon, epsilon).to(device)
X_pgd = Variable(X_pgd.data + random_noise, requires_grad=True)
for _ in range(num_steps):
opt = optim.SGD([X_pgd], lr=1e-3)
opt.zero_grad()
with torch.enable_grad():
loss = nn.CrossEntropyLoss()(model(X_pgd), y)
loss.backward()
eta = step_size * X_pgd.grad.data.sign()
X_pgd = Variable(X_pgd.data + eta, requires_grad=True)
eta = torch.clamp(X_pgd.data - X.data, -epsilon, epsilon)
X_pgd = Variable(X.data + eta, requires_grad=True)
X_pgd = Variable(torch.clamp(X_pgd, 0, 1.0), requires_grad=True)
err_pgd = (model(X_pgd).data.max(1)[1] != y.data).float().sum()
print('err pgd (white-box): ', err_pgd)
return err, err_pgd
def _pgd_blackbox(model_target,
model_source,
X,
y,
epsilon=args.epsilon,
num_steps=args.num_steps,
step_size=args.step_size):
out = model_target(X)
err = (out.data.max(1)[1] != y.data).float().sum()
X_pgd = Variable(X.data, requires_grad=True)
if args.random:
random_noise = torch.FloatTensor(*X_pgd.shape).uniform_(-epsilon, epsilon).to(device)
X_pgd = Variable(X_pgd.data + random_noise, requires_grad=True)
for _ in range(num_steps):
opt = optim.SGD([X_pgd], lr=1e-3)
opt.zero_grad()
with torch.enable_grad():
loss = nn.CrossEntropyLoss()(model_source(X_pgd), y)
loss.backward()
eta = step_size * X_pgd.grad.data.sign()
X_pgd = Variable(X_pgd.data + eta, requires_grad=True)
eta = torch.clamp(X_pgd.data - X.data, -epsilon, epsilon)
X_pgd = Variable(X.data + eta, requires_grad=True)
X_pgd = Variable(torch.clamp(X_pgd, 0, 1.0), requires_grad=True)
err_pgd = (model_target(X_pgd).data.max(1)[1] != y.data).float().sum()
print('err pgd black-box: ', err_pgd)
return err, err_pgd
# def _pgd_blackbox(model_target,
# model_source,
# X,
# y,
# epsilon=args.epsilon,
# num_steps=args.num_steps,
# step_size=args.step_size):
# out = model_target(X)
# print("X: " + str(X.data[0][0][0]))
# err = (out.data.max(1)[1] != y.data).float().sum()
# X_pgd = Variable(X.data, requires_grad=True)
# if args.random:
# random_noise = torch.FloatTensor(*X_pgd.shape).uniform_(-epsilon, epsilon).to(device)
# X_pgd = Variable(X_pgd.data + random_noise, requires_grad=True)
#
# for _ in range(int(num_steps)):
# opt = optim.SGD([X_pgd], lr=1e-3)
# opt.zero_grad()
# with torch.enable_grad():
# loss = nn.CrossEntropyLoss()(model_source(X_pgd), y)
# loss.backward()
# eta = step_size * X_pgd.grad.data.sign()
# X_pgd = Variable(X_pgd.data + eta, requires_grad=True)
# eta = torch.clamp(X_pgd.data - X.data, -epsilon, epsilon)
# X_pgd = Variable(X.data + eta, requires_grad=True)
# print("X_pgd before clamp: " + str(X_pgd.data[0][0][0]))
# X_pgd = Variable(torch.clamp(X_pgd, 0, 1.0), requires_grad=True)
# print("X_pgd after clamp: " + str(X_pgd.data[0][0][0]))
# # return X_pgd
def _pgd_blackbox_gen(model_target,
model_source,
X,
y,
epsilon=args.epsilon,
num_steps=args.num_steps,
step_size=args.step_size):
out = model_target(X)
print("X: " + str(X.data[0][0][0]))
err = (out.data.max(1)[1] != y.data).float().sum()
X_pgd = Variable(X.data, requires_grad=True)
if args.random:
random_noise = torch.FloatTensor(*X_pgd.shape).uniform_(-epsilon, epsilon).to(device)
X_pgd = Variable(X_pgd.data + random_noise, requires_grad=True)
for _ in range(int(num_steps)):
opt = optim.SGD([X_pgd], lr=1e-3)
opt.zero_grad()
with torch.enable_grad():
loss = nn.CrossEntropyLoss()(model_source(X_pgd), y)
loss.backward()
eta = step_size * X_pgd.grad.data.sign()
X_pgd = Variable(X_pgd.data + eta, requires_grad=True)
eta = torch.clamp(X_pgd.data - X.data, -epsilon, epsilon)
X_pgd = Variable(X.data + eta, requires_grad=True)
print("X_pgd before clamp: " + str(X_pgd.data[0][0][0]))
X_pgd = Variable(torch.clamp(X_pgd, 0, 1.0), requires_grad=True)
print("X_pgd after clamp: " + str(X_pgd.data[0][0][0]))
return X_pgd
def eval_adv_test_blackbox(model_target, model_source, device, test_loader):
"""
evaluate model by white-box attack
"""
model_target.eval()
model_source.eval()
robust_err_total = 0
natural_err_total = 0
if args.gen:
all_datasets = []
print("Start generating data")
for data, target in test_loader:
data, target = data.to(device), target.to(device)
# pgd attack
X, y = Variable(data, requires_grad=True), Variable(target)
X_pgd = _pgd_blackbox_gen(model_target, model_source, X, y)
all_datasets.append(torch.utils.data.TensorDataset(X_pgd, y))
# break
final_dataset = torch.utils.data.ConcatDataset(all_datasets)
print("final_dataset: " + str(final_dataset))
torch.save(final_dataset, "../data/cifar10_adv_black.pt")
elif args.diff:
print("start computing differences")
dateTimeObj = datetime.now()
date_name = str(dateTimeObj).replace(' ', '-').replace(':', '-').replace('.', '-')
# print("advset[0]: " + str(advset[0]))
# return
heat_dict = {}
row_max = 2000
row_count = 0
act_list_len = 0
print("len(advset): " + str(len(advset)))
for i in range(len(advset)):
d_adv, t_adv = advset[i][0].to(device), advset[i][1].to(device)
# pgd attack
X_adv, y_adv = Variable(d_adv), Variable(t_adv)
out_adv, act_list_adv = model_target.forward_act(X_adv.unsqueeze(0))
corr_adv = out_adv.data.max(1)[1] == y_adv.data
# print("y test: " + str(target_test))
if not corr_adv:
d_test, t_test = testset[i][0].to(device), torch.tensor(testset[i][1]).to(device)
# pgd attack
X_test, y_test = Variable(d_test), Variable(t_test)
out_test, act_list_test = model_target.forward_act(X_test.unsqueeze(0))
corr_test = out_test.data.max(1)[1] == y_test.data
if corr_test:
print("now in row: " + str(row_count))
act_list_len = len(act_list_test)
for j in range(act_list_len):
sub = torch.absolute(torch.subtract(act_list_test[j],act_list_adv[j]))
flatten_np = sub.cpu().data.numpy().flatten()
if not j in heat_dict:
heat_dict[j] = [flatten_np]
else:
heat_dict[j].append(flatten_np)
row_count = row_count + 1
if row_count == row_max:
if args.heat:
for j in range(act_list_len):
fig, ax = plt.subplots()
# plt.imshow(np.reshape(flatten_np, (-1, len(flatten_np))), cmap='hot', interpolation='nearest')
ax = sns.heatmap(heat_dict[j])
plt.savefig("fig/cifar_10_" + str(j) + "," + str(row_max) + "," + date_name + ".pdf")
if args.var:
for j in range(1):
# for j in range(act_list_len):
print("plotting "+ str(j))
df = | pd.DataFrame(heat_dict[j]) | pandas.DataFrame |
import io
import math
import numpy as np
import os
import pandas as pd
import random
import sys
import typing
import tempfile
from pathlib import Path
from collections import defaultdict
from io import StringIO
from kgtk.cli.text_embedding import main as main_embedding_function
from scipy.spatial.distance import cosine, euclidean
from tl.utility.utility import Utility
from tl.candidate_generation.es_search import Search
from tl.exceptions import TLException
class EmbeddingVector:
"""
a class support embedding vectors ranking operations
"""
def __init__(self, parameters):
self.vectors_map = {}
self.sentence_map = {}
self.kwargs = parameters
self.loaded_file = None
self.kgtk_format_input = None
self.centroid = {}
self.groups = defaultdict(set)
self.es = Search(self.kwargs["url"], self.kwargs["index"],
es_user=self.kwargs.get("user"), es_pass=self.kwargs.get("password"))
def load_input_file(self, input_file):
"""
read the input file
"""
self.loaded_file = pd.read_csv(input_file, dtype=object)
self._to_kgtk_test_format()
def _to_kgtk_test_format(self):
"""
wrap input file to kgtk format input
:return:
"""
# remove evaluation label equals to 0 (which means no ground truth)
self.groups = defaultdict(set)
if "evaluation_label" in self.loaded_file.columns:
self.loaded_file = self.loaded_file[self.loaded_file['evaluation_label'] != '0']
all_info = {}
count = 0
correspond_key = {"label_clean": "label", "kg_id": "candidates", "GT_kg_id": "kg_id"}
for i, each_part in self.loaded_file.groupby(["column", "row"]):
info = {}
for each_choice in correspond_key.keys():
if each_choice in each_part.columns:
temp = list(set(each_part[each_choice].unique()))
temp_filtered = []
for each in temp:
if each != "" and not isinstance(each, float):
temp_filtered.append(each)
info[correspond_key[each_choice]] = temp_filtered
else:
info[correspond_key[each_choice]] = []
if len(info['kg_id']) > 1 or len(info['label']) > 1:
Utility.eprint("WARNING: pair {} has multiple ground truths?".format(i))
self.groups[i[0]].update(info["candidates"])
self.groups[i[0]].update(info["kg_id"])
info["label"] = info["label"][0]
if len(info["kg_id"]) > 0:
info["kg_id"] = info["kg_id"][0]
else:
info["kg_id"] = " "
info["candidates"] = "|".join(info["candidates"])
all_info[count] = info
count += 1
self.kgtk_format_input = pd.DataFrame.from_dict(all_info, orient='index')
def process_vectors(self):
"""
apply corresponding vector strategy to process the calculated vectors
:return:
"""
vector_strategy = self.kwargs.get("column_vector_strategy", "exact-matches")
if vector_strategy == "page-rank":
self._calculate_page_rank()
elif vector_strategy == "page-rank-precomputed":
self._get_precomputed_page_rank()
else:
self._get_centroid(vector_strategy)
def _generate_graph(self):
"""
function used to calculate page rank
:return:
"""
Utility.eprint("start calculating page rank, it may take some time.")
import networkx as nx
# calculate probability to next stage
# calculate probability base on columns
col_memo = {}
nodes_memo = {}
graph_memo = {}
similarity_memo = {}
for col_number, each_part in self.loaded_file.groupby(["column"]):
# first calculate all distance for memo
all_nodes = set(each_part['kg_id']) - {"", np.nan}
all_nodes_list = list(all_nodes)
for i, each_node in enumerate(all_nodes):
col_memo[each_node] = col_number
for i in range(len(all_nodes_list)):
for j in range(i + 1, len(all_nodes_list)):
similarity = self.compute_distance(self.vectors_map[all_nodes_list[i]], self.vectors_map[all_nodes_list[j]])
similarity_memo[(all_nodes_list[i], all_nodes_list[j])] = similarity
similarity_memo[(all_nodes_list[j], all_nodes_list[i])] = similarity
similarity_graph = nx.DiGraph()
similarity_graph.add_nodes_from(all_nodes)
graph_memo[col_number] = similarity_graph
nodes_memo[col_number] = all_nodes
for i, each_row in self.kgtk_format_input.iterrows():
each_surface = each_row["candidates"].split("|")
if len(each_surface) > 0:
for each_node_i in each_surface:
if each_node_i == "":
continue
col_number = col_memo[each_node_i]
all_nodes_set = nodes_memo[col_number]
remained_nodes = all_nodes_set - set(each_surface)
# calculate sum score first
sum_score = 0
for each_node_j in remained_nodes:
sum_score += similarity_memo[(each_node_i, each_node_j)]
for each_node_j in remained_nodes:
# pos = (pos_memo[each_node_i], pos_memo[each_node_j])
each_weight = similarity_memo[(each_node_i, each_node_j)] / sum_score
graph_memo[col_number].add_edge(each_node_i, each_node_j, weight=each_weight)
return graph_memo
def _calculate_page_rank(self):
import networkx as nx
# just get initial page rank to do filtering
weights_original = {}
graph_memo = self._generate_graph()
for each_graph in graph_memo.values():
weights_original.update(dict(each_graph.degree(weight='weight')))
self.loaded_file['|pr|'] = self.loaded_file['kg_id'].map(weights_original)
from tl.features.normalize_scores import drop_by_score
self.loaded_file = drop_by_score(column="|pr|", df=self.loaded_file, k=20)
# also we need to update kgtk format input
self._to_kgtk_test_format()
# create the graph again base on filtered result
res = {}
graph_memo = self._generate_graph()
# it seems pagerank_numpy runs quickest
for each_graph in graph_memo.values():
res.update(nx.pagerank_numpy(each_graph, alpha=0.9))
self.loaded_file['|pr|'] = self.loaded_file['kg_id'].map(res)
def _get_precomputed_page_rank(self):
"""
get the precomputed pagerank from whole wikidata graph
:return:
"""
pageranks = {k: v[0] if len(v) > 0 else 0
for k, v in self.es.search_node_pagerank(self.loaded_file['kg_id'].dropna().unique().tolist()).items()}
self.loaded_file["|pr|"] = self.loaded_file['kg_id'].map(pageranks).fillna(0)
def _get_centroid(self, vector_strategy: str):
"""
function used to calculate the column-vector(centroid) value
"""
n_value = int(self.kwargs.pop("n_value"))
if vector_strategy == "ground-truth":
if "GT_kg_id" not in self.loaded_file:
raise TLException(
"The input file does not have `GT_kg_id` column! Can't run with ground-truth "
"strategy")
candidate_nodes = list(set(self.loaded_file["GT_kg_id"].tolist()))
elif vector_strategy == "exact-matches":
candidate_nodes = list(set(self.loaded_file["kg_id"].tolist()))
else:
raise TLException("Unknown vector vector strategy {}".format(vector_strategy))
candidate_nodes = [each for each in candidate_nodes if each != "" and each is not np.nan]
# get corresponding column of each candidate nodes
nodes_map = defaultdict(set)
for each_node in candidate_nodes:
for group, nodes in self.groups.items():
if each_node in nodes:
nodes_map[group].add(each_node)
# random sample nodes if needed
nodes_map_updated = {}
for group, nodes in nodes_map.items():
if n_value != 0 and n_value < len(nodes):
nodes_map_updated[group] = random.sample(nodes, n_value)
else:
nodes_map_updated[group] = nodes
# get centroid for each column
for group, nodes in nodes_map_updated.items():
temp = []
for each_node in sorted(list(nodes)):
temp.append(self.vectors_map[each_node])
each_centroid = np.mean(np.array(temp), axis=0)
self.centroid[group] = each_centroid
def compute_distance(self, v1: typing.List[float], v2: typing.List[float]):
if self.kwargs["distance_function"] == "cosine":
val = 1 - cosine(v1, v2)
elif self.kwargs["distance_function"] == "euclidean":
val = euclidean(v1, v2)
# because we need score higher to be better, here we use the reciprocal value
if val == 0:
val = float("inf")
else:
val = 1 / val
else:
raise TLException("Unknown distance function {}".format(self.kwargs["distance_function"]))
return val
def add_score_column(self):
score_column_name = self.kwargs["output_column_name"]
if score_column_name is None:
score_column_name = "score_{}".format(self.kwargs["column_vector_strategy"])
i = 1
while score_column_name in self.loaded_file:
i += 1
score_column_name = "score_{}_{}".format(self.kwargs["column_vector_strategy"], i)
if self.kwargs["column_vector_strategy"] in {"page-rank", "page-rank-precomputed"}:
self.loaded_file = self.loaded_file.rename(columns={'|pr|': score_column_name})
else:
scores = []
for i, each_row in self.loaded_file.iterrows():
# the nan value can also be float
if (isinstance(each_row["kg_id"], float) and math.isnan(each_row["kg_id"])) or each_row["kg_id"] is np.nan:
each_score = ""
else:
each_score = self.compute_distance(self.centroid[each_row["column"]],
self.vectors_map[each_row["kg_id"]])
scores.append(each_score)
self.loaded_file[score_column_name] = scores
if self.kwargs["save_embedding_feature"]:
self.loaded_file['sentence'] = self.loaded_file['kg_id'].map(self.sentence_map)
self.loaded_file['vector'] = self.loaded_file['kg_id'].map(self.vectors_map)
if self.kwargs["ignore_empty_sentences"]:
# remove sentences which is same as kg ids
self.loaded_file = self.loaded_file[
self.loaded_file['kg_id'] != self.loaded_file['sentence'].apply(
lambda x: x[:-1] if isinstance(x, str) else x)
]
def _create_detail_has_properties(self):
"""
By loading the property file, remove unnecessary things and get something inside if needed
:return: None
"""
model_file_path = os.path.join(repr(__file__).replace("'", "").replace("/text_embedding.py", ""),
"predicate_counts_and_labels.tsv")
if os.path.exists(model_file_path):
properties_df = | pd.read_csv(model_file_path, sep='\t') | pandas.read_csv |
#!/bin/env python
#
# Script name: IDP_html_gen.py
#
# Description: Script to generate IDP page of QC html report.
#
## Author: <NAME>
import pandas as pd
import numpy as np
import sys
import os
from ast import literal_eval
def formatter(x):
try:
return "{:e}".format(float(x))
except:
return x
def generate_full_IDPoi_data(df, IDP_dir):
"""Function that adds IDP values to an existing IDP dataframe, using the
relevant IDP txt from the subject's IDP directory. Each IDP txt file
corresponds with a IDP category.
Parameters
----------
df : pd.DataFrame
Dataframe containing details about IDPs, no values present.
IDP_dir : string
Full path to the directory containing the subject's IDP output
txt files.
Returns
----------
output : pd.DataFrame
Dataframe containing details about IDPs, with values included
"""
flag = False
#output df placeholder
output = pd.DataFrame(
columns=[
"num","short","category","num_in_cat","long","unit","dtype","description","value"
],
)
#for each IDP category, access its corresponding IDP value file
for category in df["category"].unique():
#sub-df containing only IDPs for this category
df_sub = df[df["category"] == category]
#open the caregory's IDP value txt file, clean whitespaces, and split into a df
cat_data = []
try:
with open(IDP_dir + category + ".txt") as my_file:
for line in my_file:
line = line.strip()
line = line.split(" ")
cat_data.append(line)
cat_data = pd.DataFrame(cat_data)
cat_data = cat_data.T
cat_data.columns = ["value"]
cat_data["num_in_cat"] = cat_data.index + 1
cat_data = cat_data.astype({"num_in_cat": int})
df_sub = df_sub.astype({"num_in_cat": int})
#inner join the category's IDP values with the sub-df for this category
df_sub = df_sub.merge(cat_data, how="left", on="num_in_cat")
#left should work - used to be inner join. now left join to show if there are any missing IDP values
#if this is the first sub-df, then the output df is the same as the sub-df for now
#otherwise, append sub-df ot output df
if not flag:
output = df_sub
flag = True
else:
output = output.append(df_sub, ignore_index=True)
except:
print(IDP_dir + category + ".txt file missing")
return output
def IDP_postprocessing(subj, IDP_list_path, IDPoi_list_path, thresholds_txt):
"""Function that generates the IDP page of the QC report for a
subject.
TODO: remove duplicate code by having a single function used
twice - once for low-priority and once for high
TODO: handle missing low-priority lines in the IDPoi
TODO: separate low and high priority IDPs in report
Parameters
----------
subj : string
Full path to subject's directory.
IDP_list_path : string
Full path to IDP list (file containing IDP information).
IDPoi_list_path : string
Full path to IDPoi list (file containing list of IDPs of interest).
"""
#remove trailing forward slashes in subject paths
if subj.endswith("/"):
subj = subj[:-1]
QC_dir = subj + "/QC/html/"
IDP_dir = subj + "/IDP_files/"
if not os.path.exists(IDP_dir):
os.makedirs(IDP_dir)
subjName = subj[subj.rfind("/") + 1 :]
#reading and cleaning each line of IDP list
IDP_list = []
with open(IDP_list_path) as my_file:
for line in my_file:
line = line.strip()
IDP_list.append(line)
i = 0
#remove header
IDP_list = IDP_list[1:]
#cleaning just in case - should run fine even if data is already clean
while i < len(IDP_list):
IDP_list[i] = IDP_list[i].replace('"', "")
IDP_list[i] = " ".join(IDP_list[i].split())
IDP_list[i] = " ".join(IDP_list[i].split("\\t"))
IDP_list[i] = IDP_list[i].split(" ", 7)
i += 1
#IDP list dataframe containing details about every IDP
IDP = pd.DataFrame(
IDP_list,
columns=[
"num",
"short",
"category",
"num_in_cat",
"long",
"unit",
"dtype",
"description",
],
)
#reading each line of IDPoi list
IDPoi_list = []
with open(IDPoi_list_path) as my_file:
for line in my_file:
line = line.strip()
IDPoi_list.append(line)
IDPoi = np.array(IDPoi_list)
#splitting IDPois into high and low priority dataframes
priority = True
priority_array = []
non_priority_array = []
for line in IDPoi:
if line == "HIGH_PRIORITY":
priority = True
elif line == "LOW_PRIORITY":
priority = False
else:
if priority:
priority_array.append(line)
else:
non_priority_array.append(line)
priority_df = pd.DataFrame(priority_array, columns=["short"])
non_priority_df = pd.DataFrame(non_priority_array, columns=["short"])
#filling details about the IDPs for both priority and non priority IDPoi dfs
#by merging the dfs with the IDP list dataframe
#TODO handle IDPoi that dont exist/typoed
priority_df = priority_df[["short"]]
priority_df = priority_df.merge(IDP, how="inner", left_on="short", right_on="short")
non_priority_df = non_priority_df[["short"]]
non_priority_df = non_priority_df.merge(
IDP, how="inner", left_on="short", right_on="short"
)
#get values for each IDPoi
#TODO: handle empty IDP values
priority_output = generate_full_IDPoi_data(priority_df, IDP_dir)
non_priority_output = generate_full_IDPoi_data(non_priority_df, IDP_dir)
#retain
priority_output=priority_output[["num","short","category","num_in_cat","long","unit","dtype","description","value"]]
non_priority_output=non_priority_output[["num","short","category","num_in_cat","long","unit","dtype","description","value"]]
new_IDP_output = pd.read_csv(r"" + IDP_dir + "tvb_new_IDPs.tsv", delimiter = "\t")
#new_IDP_output=new_IDP_output[["num","short","category","num_in_cat","long","unit","dtype","description","value"]]
#prior, non prior, new tvb IDP compiled output
last_IDP_num=int(IDP['num'].iloc[-1])
new_IDP_output['num'] += last_IDP_num
compiled_IDPs = | pd.concat([priority_output,new_IDP_output]) | pandas.concat |
import pandas as pd
import numpy as np
import os
# Generate the risk distribution parameters from the risk_distribution.py script
from risk_distribution import *
# Import parameters from parameters.py script
from parameters import *
# Set path for saving dataframes
base_path = '...'
sims = 10000
# Functions to return probabilistic variables in suitable format
def gamma(alpha, beta):
alpha = np.array([alpha] * sims)
beta = np.array([beta] * sims)
samples = np.random.gamma(alpha, beta)
return samples
def gamma_specified(min, multiplier, alpha, beta):
min = np.array([min] * sims).T
alpha = np.array([alpha] * sims)
beta = np.array([beta] * sims)
samples = min + np.random.gamma(alpha, beta) * multiplier
samples = samples.T
return samples
def normal(parameter, sd):
samples = np.random.normal(parameter, sd, sims)
samples = np.array([samples] * 45).T
return samples
def lognormal(parameter, sd):
samples = np.random.lognormal(parameter, sd, sims)
samples = np.array([samples] * 45).T
return samples
def beta(parameter, se):
alpha = np.array([parameter * ((parameter*(1-parameter))/(se**2)-1)] * sims)
beta = (alpha/parameter) - alpha
samples = np.random.beta(alpha, beta)
samples = samples.T
return samples
# Function to deliver PSA simulation matrix for variables not being varied
def psa_function(var):
return np.array([var] * sims)
# Function to generate outcomes
def outcomes(parameter):
# Simulations - one total value per simulation
sims = np.sum(parameter, axis=1)
# Mean value across all simulations
mean = np.mean(parameter, axis=0)
# Total value (mean and sum across all simulations)
total = np.sum(mean)
return sims, mean, total
##############
# Parameters #
##############
# Costs
cost_psa = gamma(33.9,0.3)
cost_psa = np.tile(cost_psa, (45,1)).T # Extend cost_psa to be a matrix of length 45 x sims
cost_prs = gamma(33.9,0.7)
cost_biopsy = gamma(33.9,11.5)
cost_biopsy = np.tile(cost_biopsy, (45,1)).T
cost_refuse_biopsy = gamma(33.9,3.1)
cost_refuse_biopsy = np.tile(cost_refuse_biopsy, (45,1)).T
cost_assessment = gamma(33.9,22.7)
cost_as = gamma(33.9,128.1)
cost_rp = gamma(33.9,241.2)
cost_rt = gamma(33.9,158.9)
cost_brachytherapy = gamma(33.9,45.1)
cost_adt = gamma(33.9,16.5)
cost_chemo = gamma(33.9,219.2)
cost_rt_chemo = cost_rt + cost_chemo
cost_rp_rt = cost_rp + cost_rt
cost_rp_chemo = cost_rp + cost_chemo
cost_rp_rt_chemo = cost_rp + cost_rt + cost_chemo
costs_local = np.stack((cost_chemo, cost_rp,
cost_rt, cost_rt_chemo,
cost_rp_chemo, cost_rp_rt,
cost_rp_rt_chemo, cost_as,
cost_adt, cost_brachytherapy), axis=-1)
costs_adv = np.array(costs_local, copy=True)
# Incident costs / treatment dataframe
tx_costs_local = costs_local * tx_local
tx_costs_adv = costs_adv * tx_adv
pca_death_costs = gamma(1.8,3854.9)
# Utilities
pca_incidence_utility_psa = gamma_specified((pca_incidence_utility-0.05), 0.2, 5, 0.05)
utility_background_psa = gamma_specified((utility_background-0.03), 0.167, 4, 0.06)
# Relative risk of death in screened cohort
rr_death_screening = lognormal(-0.2357, 0.0724)
# Proportion of cancers at risk of overdiagnosis
p_overdiagnosis_psa = beta(p_overdiagnosis, 0.001)
additional_years = psa_function(np.repeat(0,20))
p_overdiagnosis_psa = np.concatenate((p_overdiagnosis_psa, additional_years.T))
p_overdiagnosis_psa[0:10,:] = 0
# Relative risk incidence of advanced cancer (stages III and IV)
rr_adv_screening = lognormal(-0.1625, 0.0829)
rr_adv_screening[:,0:10] = 0
rr_adv_screening[:,25:] = 0
# The relative increase in cancers detected if screened
p_increase_df = pd.read_csv('data/p_increase_df.csv', index_col='age')
[RR_INCIDENCE_SC_55, RR_INCIDENCE_SC_56,
RR_INCIDENCE_SC_57, RR_INCIDENCE_SC_58,
RR_INCIDENCE_SC_59, RR_INCIDENCE_SC_60,
RR_INCIDENCE_SC_61, RR_INCIDENCE_SC_62,
RR_INCIDENCE_SC_63, RR_INCIDENCE_SC_64,
RR_INCIDENCE_SC_65, RR_INCIDENCE_SC_66,
RR_INCIDENCE_SC_67, RR_INCIDENCE_SC_68,
RR_INCIDENCE_SC_69] = [np.random.lognormal(p_increase_df.loc[i, '1.23_log'],
p_increase_df.loc[i, 'se'],
sims)
for i in np.arange(55,70,1)]
rr_incidence = np.vstack((np.array([np.repeat(1,sims)]*10),
RR_INCIDENCE_SC_55, RR_INCIDENCE_SC_56, RR_INCIDENCE_SC_57,
RR_INCIDENCE_SC_58, RR_INCIDENCE_SC_59, RR_INCIDENCE_SC_60,
RR_INCIDENCE_SC_61, RR_INCIDENCE_SC_62, RR_INCIDENCE_SC_63,
RR_INCIDENCE_SC_64, RR_INCIDENCE_SC_65, RR_INCIDENCE_SC_66,
RR_INCIDENCE_SC_67, RR_INCIDENCE_SC_68, RR_INCIDENCE_SC_69))
rr_incidence[rr_incidence < 1] = 1.03 # truncate
# Drop in incidence in the year after screening stops
post_sc_incidence_drop = 0.9
# Number of biopsies per cancer detected
# Proportion having biopsy (screened arms)
p_suspected = normal(0.24,0.05)
p_suspected_refuse_biopsy = normal(0.24,0.05)
# Proportion having biopsy (non-screened arms)
# (201/567) - Ahmed et al. 2017, Table S6 (doi: 10.1016/S0140-6736(16)32401-1)
p_suspected_ns = normal((201/567),0.05)
p_suspected_refuse_biopsy_ns = normal((201/567),0.05)
n_psa_tests = normal(1.2,0.05)
# Relative cost increase if clinically detected
# Source: Pharoah et al. 2013
relative_cost_clinically_detected = normal(1.1,0.04)
# Create a function to append the results to the relevant lists
def gen_list_outcomes(parameter_list, parameter):
parameter_list.append(parameter)
return parameter_list
# Run through each AR threshold in turn:
reference_absolute_risk = np.round(np.arange(0.02,0.105,0.005),3)
for reference_value in reference_absolute_risk:
a_risk = pd.read_csv(base_path+(str(np.round(reference_value*100,2)))+'/a_risk_'+(str(np.round(reference_value*100,2)))+'.csv').set_index('age')
# Generate lists to store the variables
(s_qalys_discount_ns_list, s_cost_discount_ns_list, s_pca_deaths_ns_list,
ns_cohort_list, outcomes_ns_psa_list,
s_qalys_discount_age_list, s_cost_discount_age_list,
s_pca_deaths_age_list, s_overdiagnosis_age_list,
age_cohort_list, outcomes_age_psa_list,
s_qalys_discount_prs_list, s_cost_discount_prs_list,
s_pca_deaths_prs_list, s_overdiagnosis_prs_list,
prs_cohort_list, outcomes_prs_psa_list) = [[] for _ in range(17)]
parameter_list_ns = [s_qalys_discount_ns_list, s_cost_discount_ns_list, s_pca_deaths_ns_list,
ns_cohort_list, outcomes_ns_psa_list]
parameter_list_age = [s_qalys_discount_age_list, s_cost_discount_age_list,
s_pca_deaths_age_list, s_overdiagnosis_age_list,
age_cohort_list, outcomes_age_psa_list]
parameter_list_prs = [s_qalys_discount_prs_list, s_cost_discount_prs_list,
s_pca_deaths_prs_list, s_overdiagnosis_prs_list,
prs_cohort_list, outcomes_prs_psa_list]
# Loop through years 45-69 to build cohorts
for year in (a_risk.index[0:25]):
################################################
# Non-screening Cohort #
################################################
#################################
# Transition rates - no screening
#################################
tr_incidence = psa_function(pca_incidence[year-45:])
tr_pca_death_baseline = psa_function(pca_death_baseline[year-45:])
tr_death_other_causes = psa_function(death_other_causes[year-45:])
psa_stage_local = psa_function(stage_local[year-45:])
psa_stage_adv = psa_function(stage_adv[year-45:])
# Year 1 in the model
#####################
age = np.arange(year,90)
length_df = len(age)
# Cohorts, numbers 'healthy', and incident cases
cohort = np.array([np.repeat(pop[year], length_df)] * sims)
pca_alive = np.array([np.zeros(length_df)] * sims)
healthy = cohort - pca_alive
pca_incidence_ns_cohort = healthy * tr_incidence
# Deaths
pca_death = ((pca_alive * tr_pca_death_baseline)
+ (healthy * tr_pca_death_baseline))
pca_death_other = ((pca_incidence_ns_cohort
+ pca_alive
- pca_death)
* tr_death_other_causes)
healthy_death_other = ((healthy - pca_incidence_ns_cohort)
* tr_death_other_causes)
total_death = (pca_death
+ pca_death_other
+ healthy_death_other)
# Prevalent cases & life-years
pca_prevalence_ns = (pca_incidence_ns_cohort
- pca_death
- pca_death_other)
lyrs_pca_nodiscount = pca_prevalence_ns * 0.5
# Treatment costs
costs_tx = np.array([np.zeros(length_df)] * sims)
costs_tx[:,0] = ((pca_incidence_ns_cohort[:,0]
* psa_stage_local[:,0].T
* tx_costs_local.T).sum(axis=0)
+ (pca_incidence_ns_cohort[:,0]
* psa_stage_adv[:,0].T
* tx_costs_adv.T).sum(axis=0)
* relative_cost_clinically_detected[:,0]) # this variable is tiled to reach 45 - each level is the same
# Year 2 onwards
################
total_cycles = length_df
for i in range(1, total_cycles):
# Cohorts, numbers 'healthy', and incident cases
cohort[:,i] = cohort[:,i-1] - total_death[:,i-1]
pca_alive[:,i] = (pca_alive[:,i-1]
+ pca_incidence_ns_cohort[:,i-1]
- pca_death[:,i-1]
- pca_death_other[:,i-1]) # PCa alive at the beginning of the year
healthy[:,i] = (cohort[:,i] - pca_alive[:,i])
pca_incidence_ns_cohort[:,i] = healthy[:,i] * tr_incidence[:,i]
# Deaths
pca_death[:,i] = ((pca_alive[:,i] * tr_pca_death_baseline[:,i])
+ (healthy[:,i] * tr_pca_death_baseline[:,i]))
pca_death_other[:,i] = ((pca_incidence_ns_cohort[:,i]
+ pca_alive[:,i]
- pca_death[:,i])
* tr_death_other_causes[:,i])
healthy_death_other[:,i] = ((healthy[:,i] - pca_incidence_ns_cohort[:,i])
* tr_death_other_causes[:,i])
total_death[:,i] = (pca_death[:,i]
+ pca_death_other[:,i]
+ healthy_death_other[:,i])
# Prevalent cases & life-years
pca_prevalence_ns[:,i] = (pca_incidence_ns_cohort[:,i]
+ pca_alive[:,i]
- pca_death[:,i]
- pca_death_other[:,i])
lyrs_pca_nodiscount[:,i] = ((pca_prevalence_ns[:,i-1]
+ pca_prevalence_ns[:,i])
* 0.5)
# Costs
costs_tx[:,i] = ((pca_incidence_ns_cohort[:,i]
* psa_stage_local[:,i].T
* tx_costs_local.T).sum(axis=0)
+ (pca_incidence_ns_cohort[:,i]
* psa_stage_adv[:,i].T
* tx_costs_adv.T).sum(axis=0)
* relative_cost_clinically_detected[:,i])
############
# Outcomes #
############
# INDEX:
# s_ = sim (this is the sum across the simulations i.e. one total value per simulation)
# m_ = mean (this is the mean across the simulations i.e. one value for each year of the model)
# t_ = total
# nodiscount = not discounted
# discount = discounted
# _ns = outcomes for the no screening cohort
# Total incident cases
######################
s_cases_ns, m_cases_ns, t_cases_ns = outcomes(pca_incidence_ns_cohort)
# PCa alive
s_pca_alive_ns, m_pca_alive_ns, t_pca_alive_ns = outcomes(pca_alive)
# Healthy
s_healthy_ns, m_healthy_ns, t_healthy_ns = outcomes(healthy)
# Deaths from other causes amongst prostate cancer cases
s_pca_deaths_other_ns, m_pca_deaths_other_ns, t_pca_deaths_other_ns = outcomes(pca_death_other)
# Deaths from other causes amongst the healthy
(s_healthy_deaths_other_ns,
m_healthy_deaths_other_ns,
t_healthy_deaths_other_ns) = outcomes(healthy_death_other)
# Total deaths from other causes
################################
deaths_other_ns = pca_death_other + healthy_death_other
s_deaths_other_ns, m_deaths_other_ns, t_deaths_other_ns = outcomes(deaths_other_ns)
# Total deaths from prostate cancer
###################################
s_deaths_pca_ns, m_deaths_pca_ns, t_deaths_pca_ns = outcomes(pca_death)
# Life-years ('healthy')
lyrs_healthy_nodiscount_ns = healthy-(0.5 * (healthy_death_other + pca_incidence_ns_cohort))
(s_lyrs_healthy_nodiscount_ns,
m_lyrs_healthy_nodiscount_ns,
t_lyrs_healthy_nodiscount_ns) = outcomes(lyrs_healthy_nodiscount_ns)
lyrs_healthy_discount_ns = lyrs_healthy_nodiscount_ns * discount_factor[:total_cycles]
(s_lyrs_healthy_discount_ns,
m_lyrs_healthy_discount_ns,
t_lyrs_healthy_discount_ns) = outcomes(lyrs_healthy_discount_ns)
# Life-years with prostate cancer
lyrs_pca_discount_ns = lyrs_pca_nodiscount * discount_factor[:total_cycles]
(s_lyrs_pca_discount_ns,
m_lyrs_pca_discount_ns,
t_lyrs_pca_discount_ns) = outcomes(lyrs_pca_discount_ns)
# Total life-years
##################
lyrs_nodiscount_ns = lyrs_healthy_nodiscount_ns + lyrs_pca_nodiscount
(s_lyrs_nodiscount_ns,
m_lyrs_nodiscount_ns,
t_lyrs_nodiscount_ns) = outcomes(lyrs_nodiscount_ns)
lyrs_discount_ns = lyrs_healthy_discount_ns + lyrs_pca_discount_ns
(s_lyrs_discount_ns,
m_lyrs_discount_ns,
t_lyrs_discount_ns) = outcomes(lyrs_discount_ns)
# QALYs in the healthy
qalys_healthy_nodiscount_ns = lyrs_healthy_nodiscount_ns * utility_background_psa[:,year-45:]
qalys_healthy_discount_ns = lyrs_healthy_discount_ns * utility_background_psa[:,year-45:]
(s_qalys_healthy_discount_ns,
m_qalys_healthy_discount_ns,
t_qalys_healthy_discount_ns) = outcomes(qalys_healthy_discount_ns)
# QALYs with prostate cancer
qalys_pca_nodiscount_ns = lyrs_pca_nodiscount * pca_incidence_utility_psa[:,year-45:]
qalys_pca_discount_ns = lyrs_pca_discount_ns * pca_incidence_utility_psa[:,year-45:]
(s_qalys_pca_discount_ns,
m_qalys_pca_discount_ns,
t_qalys_pca_discount_ns) = outcomes(qalys_pca_discount_ns)
# Total QALYs
#############
qalys_nodiscount_ns = qalys_healthy_nodiscount_ns + qalys_pca_nodiscount_ns
(s_qalys_nodiscount_ns,
m_qalys_nodiscount_ns,
t_qalys_nodiscount_ns) = outcomes(qalys_nodiscount_ns)
qalys_discount_ns = qalys_healthy_discount_ns + qalys_pca_discount_ns
(s_qalys_discount_ns,
m_qalys_discount_ns,
t_qalys_discount_ns) = outcomes(qalys_discount_ns)
# Cost of PSA testing
n_psa_tests_ns = ((pca_incidence_ns_cohort / p_suspected_ns[:,year-45:])
+ ((pca_incidence_ns_cohort * (1-uptake_biopsy[year-45:]))
/ p_suspected_refuse_biopsy_ns[:,year-45:])) * n_psa_tests[:,year-45:]
(s_n_psa_tests_ns,
m_n_psa_tests_ns,
total_n_psa_tests_ns) = outcomes(n_psa_tests_ns)
cost_psa_testing_nodiscount_ns = n_psa_tests_ns * cost_psa[:,year-45:] * relative_cost_clinically_detected[:,year-45:]
(s_cost_psa_testing_nodiscount_ns,
m_cost_psa_testing_nodiscount_ns,
t_cost_psa_testing_nodiscount_ns) = outcomes(cost_psa_testing_nodiscount_ns)
cost_psa_testing_discount_ns = cost_psa_testing_nodiscount_ns * discount_factor[:total_cycles]
(s_cost_psa_testing_discount_ns,
m_cost_psa_testing_discount_ns,
t_cost_psa_testing_discount_ns) = outcomes(cost_psa_testing_discount_ns)
# Cost of suspected cancer - biopsies
n_biopsies_ns = pca_incidence_ns_cohort / p_suspected_ns[:,year-45:]
(s_n_biopsies_ns,
m_n_biopsies_ns,
total_n_biopsies_ns) = outcomes(n_biopsies_ns)
cost_biopsy_nodiscount_ns = (((pca_incidence_ns_cohort / p_suspected_ns[:,year-45:])
* cost_biopsy[:,year-45:])
+ (((pca_incidence_ns_cohort * (1-uptake_biopsy[year-45:]))
/ p_suspected_refuse_biopsy_ns[:,year-45:])
* cost_refuse_biopsy[:,year-45:])
* relative_cost_clinically_detected[:,year-45:])
(s_cost_biopsy_nodiscount_ns,
m_cost_biopsy_nodiscount_ns,
t_cost_biopsy_nodiscount_ns) = outcomes(cost_biopsy_nodiscount_ns)
cost_biopsy_discount_ns = cost_biopsy_nodiscount_ns * discount_factor[:total_cycles]
(s_cost_biopsy_discount_ns,
m_cost_biopsy_discount_ns,
t_cost_biopsy_discount_ns) = outcomes(cost_biopsy_discount_ns)
# Cost of staging
cost_staging_nodiscount_ns = (cost_assessment
* psa_stage_adv.T
* pca_incidence_ns_cohort.T
* relative_cost_clinically_detected[:,year-45:].T).T
(s_cost_staging_nodiscount_ns,
m_cost_staging_nodiscount_ns,
t_cost_staging_nodiscount_ns) = outcomes(cost_staging_nodiscount_ns)
cost_staging_discount_ns = cost_staging_nodiscount_ns * discount_factor[:total_cycles]
(s_cost_staging_discount_ns,
m_cost_staging_discount_ns,
t_cost_staging_discount_ns) = outcomes(cost_staging_discount_ns)
# Cost in last 12 months of life
cost_eol_nodiscount_ns = (pca_death_costs * pca_death.T).T
(s_cost_eol_nodiscount_ns,
m_cost_eol_nodiscount_ns,
t_cost_eol_nodiscount_ns) = outcomes(cost_eol_nodiscount_ns)
cost_eol_discount_ns = cost_eol_nodiscount_ns * discount_factor[:total_cycles]
(s_cost_eol_discount_ns,
m_cost_eol_discount_ns,
t_cost_eol_discount_ns) = outcomes(cost_eol_discount_ns)
# Costs of treatment
(s_cost_tx_nodiscount_ns,
m_cost_tx_nodiscount_ns,
t_cost_tx_nodiscount_ns) = outcomes(costs_tx)
cost_tx_discount_ns = costs_tx * discount_factor[:total_cycles]
(s_cost_tx_discount_ns,
m_cost_tx_discount_ns,
t_cost_tx_discount_ns) = outcomes(cost_tx_discount_ns)
# Amalgamated costs
cost_nodiscount_ns = (cost_psa_testing_nodiscount_ns
+ cost_biopsy_nodiscount_ns
+ cost_staging_nodiscount_ns
+ costs_tx
+ cost_eol_nodiscount_ns)
(s_cost_nodiscount_ns,
m_cost_nodiscount_ns,
t_cost_nodiscount_ns) = outcomes(cost_nodiscount_ns)
cost_discount_ns = (cost_psa_testing_discount_ns
+ cost_biopsy_discount_ns
+ cost_staging_discount_ns
+ cost_tx_discount_ns
+ cost_eol_discount_ns)
(s_cost_discount_ns,
m_cost_discount_ns,
t_cost_discount_ns) = outcomes(cost_discount_ns)
# Generate a mean dataframe
ns_matrix = [age, m_cases_ns, m_deaths_other_ns, m_deaths_pca_ns,
m_pca_alive_ns, m_healthy_ns, m_lyrs_healthy_nodiscount_ns,
m_lyrs_healthy_discount_ns, m_lyrs_pca_discount_ns, m_lyrs_discount_ns,
m_qalys_healthy_discount_ns, m_qalys_pca_discount_ns, m_qalys_discount_ns,
m_cost_psa_testing_discount_ns, m_cost_biopsy_discount_ns, m_cost_staging_discount_ns,
m_cost_tx_discount_ns, m_cost_eol_discount_ns, m_cost_discount_ns]
ns_columns = ['age', 'pca_cases', 'deaths_other', 'deaths_pca',
'pca_alive', 'healthy', 'lyrs_healthy_nodiscount', 'lyrs_healthy_discount',
'lyrs_pca_discount', 'total_lyrs_discount',
'qalys_healthy_discount', 'qalys_pca_discount', 'total_qalys_discount',
'cost_psa_testing_discount', 'cost_biopsy_discount', 'cost_staging_discount',
'cost_treatment_discount', 'costs_eol_discount', 'total_cost_discount']
ns_cohort = pd.DataFrame(ns_matrix, index = ns_columns).T
t_parameters_ns = [year, t_cases_ns, t_deaths_pca_ns,
t_deaths_other_ns,
t_lyrs_healthy_discount_ns, t_lyrs_pca_discount_ns,
t_lyrs_nodiscount_ns, t_lyrs_discount_ns,
t_qalys_healthy_discount_ns, t_qalys_pca_discount_ns,
t_qalys_nodiscount_ns, t_qalys_discount_ns,
t_cost_psa_testing_nodiscount_ns, t_cost_psa_testing_discount_ns,
t_cost_biopsy_nodiscount_ns, t_cost_biopsy_discount_ns,
t_cost_staging_nodiscount_ns, t_cost_staging_discount_ns,
t_cost_eol_nodiscount_ns, t_cost_eol_discount_ns,
t_cost_tx_nodiscount_ns, t_cost_tx_discount_ns,
t_cost_nodiscount_ns, t_cost_discount_ns,
total_n_psa_tests_ns, total_n_biopsies_ns]
columns_ns = ['cohort_age_at_start', 'pca_cases',
'pca_deaths', 'deaths_other_causes', 'lyrs_healthy_discounted',
'lyrs_pca_discounted', 'lyrs_undiscounted', 'lyrs_discounted',
'qalys_healthy_discounted', 'qalys_pca_discounted',
'qalys_undiscounted', 'qalys_discounted',
'cost_psa_testing_undiscounted', 'cost_psa_testing_discounted',
'cost_biopsy_undiscounted', 'cost_biopsy_discounted',
'cost_staging_undiscounted', 'cost_staging_discounted',
'cost_eol_undiscounted', 'cost_eol_discounted',
'cost_treatment_undiscounted', 'cost_treatment_discounted',
'costs_undiscounted', 'costs_discounted', 'n_psa_tests', 'n_biopsies']
outcomes_ns_psa = pd.DataFrame(t_parameters_ns, index = columns_ns).T
outcomes_ns_psa['overdiagnosis'] = 0
parameters_ns = [s_qalys_discount_ns, s_cost_discount_ns, s_deaths_pca_ns,
ns_cohort, outcomes_ns_psa]
for index, parameter in enumerate(parameter_list_ns):
parameter = gen_list_outcomes(parameter_list_ns[index], parameters_ns[index])
#######################
# Age-based screening #
#######################
###################################
# Specific transition probabilities
###################################
if year < 55:
# Yearly probability of PCa incidence
smoothed_pca_incidence_age = psa_function(pca_incidence[year-45:])
# Yearly probability of death from PCa - smoothed entry and exit
smoothed_pca_mortality_age = psa_function(pca_death_baseline[year-45:])
# Proportion of cancers detected by screening at an advanced stage
stage_screened_adv = psa_function(stage_adv)
psa_stage_screened_adv = stage_screened_adv[:,year-45:]
# Proportion of cancers detected by screening at a localised stage
stage_screened_local = 1-stage_screened_adv
psa_stage_screened_local = stage_screened_local[:,year-45:]
if year > 54:
# Yearly probability of PCa incidence
smoothed_pca_incidence = psa_function(pca_incidence)
smoothed_pca_incidence[:,10:25] = (smoothed_pca_incidence[:,10:25].T * rr_incidence[year-45,:]).T
smoothed_pca_incidence[:,25:35] = (smoothed_pca_incidence[:,25:35] * np.linspace(post_sc_incidence_drop,1,10))
smoothed_pca_incidence_age = smoothed_pca_incidence[:,year-45:]
# Yearly probability of death from PCa - smoothed entry and exit
smoothed_pca_mortality = psa_function(pca_death_baseline)
smoothed_pca_mortality[:,10:15] = smoothed_pca_mortality[:,10:15] * np.linspace(1,0.79,5)
smoothed_pca_mortality[:,15:] = smoothed_pca_mortality[:,15:] * rr_death_screening[:,15:]
smoothed_pca_mortality_age = smoothed_pca_mortality[:,year-45:]
# Proportion of cancers detected by screening at a localised / advanced stage
stage_screened_adv = stage_adv * rr_adv_screening
stage_screened_local = 1-stage_screened_adv
psa_stage_screened_local = stage_screened_local[:,year-45:]
psa_stage_screened_adv = stage_screened_adv[:,year-45:]
#######################
# Year 1 in the model #
#######################
age = np.arange(year,90)
length_df = len(age)
length_screen = len(np.arange(year,70)) # number of screening years depending on age cohort starting
# Cohorts, numbers healthy, and incident cases
cohort_sc = np.array([np.repeat(pop[year], length_df)] * sims) * uptake_psa
cohort_ns = np.array([np.repeat(pop[year], length_df)] * sims) * (1-uptake_psa)
pca_alive_sc = np.array([np.zeros(length_df)] * sims)
pca_alive_ns = np.array([np.zeros(length_df)] * sims)
healthy_sc = cohort_sc - pca_alive_sc
healthy_ns = cohort_ns - pca_alive_ns
pca_incidence_sc = healthy_sc * smoothed_pca_incidence_age # Total incidence in screened arm
if year > 54:
pca_incidence_screened = pca_incidence_sc.copy()
pca_incidence_post_screening = np.array([np.zeros(length_df)] * sims) # Post-screening cancers - 0 until model reaches age 70.
elif year < 55:
pca_incidence_screened = np.array([np.zeros(length_df)] * sims)
pca_incidence_post_screening = np.array([np.zeros(length_df)] * sims) # post-screening cancers 0 as no screening (needed for later code to run smoothly)
pca_incidence_ns = healthy_ns * tr_incidence # Incidence in non-screened
# Deaths
pca_death_sc = ((pca_alive_sc * smoothed_pca_mortality_age)
+ (healthy_sc * smoothed_pca_mortality_age))
pca_death_ns = ((pca_alive_ns * tr_pca_death_baseline)
+ (healthy_ns * tr_pca_death_baseline))
pca_death_other_sc = ((pca_incidence_sc
+ pca_alive_sc
- pca_death_sc)
* tr_death_other_causes)
pca_death_other_ns = ((pca_incidence_ns
+ pca_alive_ns
- pca_death_ns)
* tr_death_other_causes)
healthy_death_other_sc = ((healthy_sc - pca_incidence_sc)
* tr_death_other_causes)
healthy_death_other_ns = ((healthy_ns - pca_incidence_ns)
* tr_death_other_causes)
t_death_sc = (pca_death_sc
+ pca_death_other_sc
+ healthy_death_other_sc) # Total deaths screened arm
t_death_ns = (pca_death_ns
+ pca_death_other_ns
+ healthy_death_other_ns) # Total deaths non-screened arm
t_death = t_death_sc + t_death_ns # Total deaths
# Prevalent cases & life-years
pca_prevalence_sc = (pca_incidence_sc
- pca_death_sc
- pca_death_other_sc)
pca_prevalence_ns = (pca_incidence_ns
- pca_death_ns
- pca_death_other_ns)
lyrs_pca_sc_nodiscount = pca_prevalence_sc * 0.5
lyrs_pca_ns_nodiscount = pca_prevalence_ns * 0.5
# Costs
if year > 54:
costs_tx_screened = np.array([np.zeros(length_df)] * sims)
costs_tx_post_screening = np.array([np.zeros(length_df)] * sims)
costs_tx_screened[:,0] = ((pca_incidence_screened[:,0]
* psa_stage_screened_local[:,0].T
* tx_costs_local.T).sum(axis=0)
+ (pca_incidence_screened[:,0]
* psa_stage_screened_adv[:,0].T
* tx_costs_adv.T).sum(axis=0)) # cost of screen-detected cancers
costs_tx_post_screening[:,0] = ((pca_incidence_post_screening[:,0]
* psa_stage_local[:,0].T
* tx_costs_local.T).sum(axis=0)
+ (pca_incidence_post_screening[:,0]
* psa_stage_adv[:,0].T
* tx_costs_adv.T).sum(axis=0)
* relative_cost_clinically_detected[:,0])
costs_tx_sc = np.array([np.zeros(length_df)] * sims)
costs_tx_sc[:,0] = (costs_tx_screened[:,0] + costs_tx_post_screening[:,0]) # total cost in screened arms
elif year < 55:
costs_tx_sc = np.array([np.zeros(length_df)] * sims)
costs_tx_sc[:,0] = ((pca_incidence_sc[:,0]
* psa_stage_local[:,0].T
* tx_costs_local.T).sum(axis=0)
+ (pca_incidence_sc[:,0]
* psa_stage_adv[:,0].T
* tx_costs_adv.T).sum(axis=0)
* relative_cost_clinically_detected[:,0])
costs_tx_ns = np.array([np.zeros(length_df)] * sims)
costs_tx_ns[:,0] = ((pca_incidence_ns[:,0]
* psa_stage_local[:,0].T
* tx_costs_local.T).sum(axis=0)
+ (pca_incidence_ns[:,0]
* psa_stage_adv[:,0].T
* tx_costs_adv.T).sum(axis=0)
* relative_cost_clinically_detected[:,0])
##################
# Year 2 onwards #
##################
total_cycles = length_df
for i in range(1, total_cycles):
# Cohorts, numbers healthy, incident & prevalent cases
cohort_sc[:,i] = cohort_sc[:,i-1] - t_death_sc[:,i-1]
cohort_ns[:,i] = cohort_ns[:,i-1] - t_death_ns[:,i-1]
pca_alive_sc[:,i] = (pca_alive_sc[:,i-1]
+ pca_incidence_sc[:,i-1]
- pca_death_sc[:,i-1]
- pca_death_other_sc[:,i-1])
pca_alive_ns[:,i] = (pca_alive_ns[:,i-1]
+ pca_incidence_ns[:,i-1]
- pca_death_ns[:,i-1]
- pca_death_other_ns[:,i-1])
healthy_sc[:,i] = (cohort_sc[:,i] - pca_alive_sc[:,i])
healthy_ns[:,i] = (cohort_ns[:,i] - pca_alive_ns[:,i])
pca_incidence_sc[:,i] = healthy_sc[:,i] * smoothed_pca_incidence_age[:,i]
if year > 54:
if i < length_screen:
pca_incidence_screened[:,i] = pca_incidence_sc[:,i].copy() # Screen-detected cancers
pca_incidence_post_screening[:,i] = 0
else:
pca_incidence_screened[:,i] = 0 # Screen-detected cancers
pca_incidence_post_screening[:,i] = pca_incidence_sc[:,i].copy()
elif year < 55:
pca_incidence_screened[:,i] = 0 # Screen-detected cancers
pca_incidence_post_screening[:,i] = 0 # post-screening cancers 0 as no screening (needed for later code to run smoothly)
pca_incidence_ns[:,i] = healthy_ns[:,i] * tr_incidence[:,i]
# Deaths
pca_death_sc[:,i] = ((pca_alive_sc[:,i] * smoothed_pca_mortality_age[:,i])
+ (healthy_sc[:,i] * smoothed_pca_mortality_age[:,i]))
pca_death_ns[:,i] = ((pca_alive_ns[:,i] * tr_pca_death_baseline[:,i])
+ (healthy_ns[:,i] * tr_pca_death_baseline[:,i]))
pca_death_other_sc[:,i] = ((pca_incidence_sc[:,i]
+ pca_alive_sc[:,i]
- pca_death_sc[:,i])
* tr_death_other_causes[:,i])
pca_death_other_ns[:,i] = ((pca_incidence_ns[:,i]
+ pca_alive_ns[:,i]
- pca_death_ns[:,i])
* tr_death_other_causes[:,i])
healthy_death_other_sc[:,i] = ((healthy_sc[:,i] - pca_incidence_sc[:,i])
* tr_death_other_causes[:,i])
healthy_death_other_ns[:,i] = ((healthy_ns[:,i] - pca_incidence_ns[:,i])
* tr_death_other_causes[:,i])
t_death_sc[:,i] = (pca_death_sc[:,i]
+ pca_death_other_sc[:,i]
+ healthy_death_other_sc[:,i])
t_death_ns[:,i] = (pca_death_ns[:,i]
+ pca_death_other_ns[:,i]
+ healthy_death_other_ns[:,i])
t_death[:,i] = t_death_sc[:,i] + t_death_ns[:,i]
# Prevalent cases & life-years
pca_prevalence_sc[:,i] = (pca_incidence_sc[:,i]
+ pca_alive_sc[:,i]
- pca_death_sc[:,i]
- pca_death_other_sc[:,i])
pca_prevalence_ns[:,i] = (pca_incidence_ns [:,i]
+ pca_alive_ns[:,i]
- pca_death_ns[:,i]
- pca_death_other_ns[:,i])
lyrs_pca_sc_nodiscount[:,i] = ((pca_prevalence_sc[:,i-1]
+ pca_prevalence_sc[:,i])
* 0.5)
lyrs_pca_ns_nodiscount[:,i] = ((pca_prevalence_ns[:,i-1]
+ pca_prevalence_ns[:,i])
* 0.5)
# Costs
if year > 54:
costs_tx_screened[:,i] = ((pca_incidence_screened[:,i]
* psa_stage_screened_local[:,i].T
* tx_costs_local.T).sum(axis=0)
+ (pca_incidence_screened[:,i]
* psa_stage_screened_adv[:,i].T
* tx_costs_adv.T).sum(axis=0)) # cost of screen-detected cancers
costs_tx_post_screening[:,i] = ((pca_incidence_post_screening[:,i]
* psa_stage_local[:,i].T
* tx_costs_local.T).sum(axis=0)
+ (pca_incidence_post_screening[:,i]
* psa_stage_adv[:,i].T
* tx_costs_adv.T).sum(axis=0)
* relative_cost_clinically_detected[:,i])
costs_tx_sc[:,i] = (costs_tx_screened[:,i]
+ costs_tx_post_screening[:,i]) # total cost in screened arms
elif year < 55:
costs_tx_sc[:,i] = ((pca_incidence_sc[:,i]
* psa_stage_local[:,i].T
* tx_costs_local.T).sum(axis=0)
+ (pca_incidence_sc[:,i]
* psa_stage_adv[:,i].T
* tx_costs_adv.T).sum(axis=0)
* relative_cost_clinically_detected[:,i])
costs_tx_ns[:,i] = ((pca_incidence_ns[:,i]
* psa_stage_local[:,i].T
* tx_costs_local.T).sum(axis=0)
+ (pca_incidence_ns[:,i]
* psa_stage_adv[:,i].T
* tx_costs_adv.T).sum(axis=0)
* relative_cost_clinically_detected[:,i])
############
# Outcomes #
############
# INDEX:
# s_ = sim (this is the sum across the simulations i.e. one total value per simulation)
# m_ = mean (this is the mean across the simulations i.e. one value for each year of the model)
# t_ = total
# nodiscount = not discounted
# discount = discounted
# _age = outcomes for the age-based screening cohort
# Total incident cases (screened arm)
s_cases_sc_age, m_cases_sc_age, t_cases_sc_age = outcomes(pca_incidence_sc)
# Total screen-detected cancers (screened arm)
s_cases_sc_detected_age, m_cases_sc_detected_age, t_cases_sc_detected_age = outcomes(pca_incidence_screened)
# Total cancers detected after screening stops (screened arm)
s_cases_post_screening_age, m_cases_post_screening_age, t_cases_post_screening_age = outcomes(pca_incidence_post_screening)
# Incident cases (non-screened arm)
s_cases_ns_age, m_cases_ns_age, t_cases_ns_age = outcomes(pca_incidence_ns)
# Incident cases (total)
########################
s_cases_age = s_cases_sc_age + s_cases_ns_age
m_cases_age = m_cases_sc_age + m_cases_ns_age
t_cases_age = t_cases_sc_age + t_cases_ns_age
# PCa alive
s_pca_alive_age, m_pca_alive_age, t_pca_alive_age = outcomes((pca_alive_sc + pca_alive_ns))
# Healthy
s_healthy_age, m_healthy_age, t_healthy_age = outcomes((healthy_sc + healthy_ns))
# Overdiagnosed cases
overdiagnosis_age = pca_incidence_screened * p_overdiagnosis_psa.T[:,year-45:]
s_overdiagnosis_age, m_overdiagnosis_age, t_overdiagnosis_age = outcomes(overdiagnosis_age)
# Deaths from other causes (screened arm)
deaths_sc_other_age = pca_death_other_sc + healthy_death_other_sc
s_deaths_sc_other_age, m_deaths_sc_other_age, t_deaths_sc_other_age = outcomes(deaths_sc_other_age)
# Deaths from other causes (non-screened arm)
deaths_ns_other_age = pca_death_other_ns + healthy_death_other_ns
s_deaths_ns_other_age, m_deaths_ns_other_age, t_deaths_ns_other_age = outcomes(deaths_ns_other_age)
# Deaths from other causes (total)
s_deaths_other_age = s_deaths_sc_other_age + s_deaths_ns_other_age
m_deaths_other_age = m_deaths_sc_other_age + m_deaths_ns_other_age
t_deaths_other_age = t_deaths_sc_other_age + t_deaths_ns_other_age
# Deaths from prosate cancer (screened arm)
s_deaths_sc_pca_age, m_deaths_sc_pca_age, t_deaths_sc_pca_age = outcomes(pca_death_sc)
# Deaths from prosate cancer (non-screened arm)
s_deaths_ns_pca_age, m_deaths_ns_pca_age, t_deaths_ns_pca_age = outcomes(pca_death_ns)
# Deaths from prosate cancer (total)
####################################
s_deaths_pca_age = s_deaths_sc_pca_age + s_deaths_ns_pca_age
m_deaths_pca_age = m_deaths_sc_pca_age + m_deaths_ns_pca_age
t_deaths_pca_age = t_deaths_sc_pca_age + t_deaths_ns_pca_age
# Healthy life-years (screened arm)
lyrs_healthy_sc_nodiscount_age = (healthy_sc
- (0.5 * (healthy_death_other_sc+pca_incidence_sc)))
lyrs_healthy_sc_discount_age = lyrs_healthy_sc_nodiscount_age * discount_factor[:total_cycles]
(s_lyrs_healthy_sc_discount_age,
m_lyrs_healthy_sc_discount_age,
t_lyrs_healthy_sc_discount_age) = outcomes(lyrs_healthy_sc_discount_age)
# Healthy life-years (non-screened arm)
lyrs_healthy_ns_nodiscount_age = (healthy_ns
- (0.5 * (healthy_death_other_ns+pca_incidence_ns)))
lyrs_healthy_ns_discount_age = lyrs_healthy_ns_nodiscount_age * discount_factor[:total_cycles]
(s_lyrs_healthy_ns_discount_age,
m_lyrs_healthy_ns_discount_age,
t_lyrs_healthy_ns_discount_age) = outcomes(lyrs_healthy_ns_discount_age)
# Total healthy life-years
lyrs_healthy_nodiscount_age = lyrs_healthy_sc_nodiscount_age + lyrs_healthy_ns_nodiscount_age
(s_lyrs_healthy_nodiscount_age,
m_lyrs_healthy_nodiscount_age,
t_lyrs_healthy_nodiscount_age) = outcomes(lyrs_healthy_nodiscount_age)
lyrs_healthy_discount_age = lyrs_healthy_nodiscount_age * discount_factor[:total_cycles]
(s_lyrs_healthy_discount_age,
m_lyrs_healthy_discount_age,
t_lyrs_healthy_discount_age) = outcomes(lyrs_healthy_discount_age)
# Life-years with prostate cancer in screened arm
lyrs_pca_sc_discount = lyrs_pca_sc_nodiscount * discount_factor[:total_cycles]
(s_lyrs_pca_sc_discount_age,
m_lyrs_pca_sc_discount_age,
t_lyrs_pca_sc_discount_age) = outcomes(lyrs_pca_sc_discount)
# Life-years with prostate cancer in non-screened arm
lyrs_pca_ns_discount = lyrs_pca_ns_nodiscount * discount_factor[:total_cycles]
(s_lyrs_pca_ns_discount_age,
m_lyrs_pca_ns_discount_age,
t_lyrs_pca_ns_age) = outcomes(lyrs_pca_ns_discount)
# Life-years with prostate cancer in both arms
lyrs_pca_nodiscount_age = lyrs_pca_sc_nodiscount + lyrs_pca_ns_nodiscount
lyrs_pca_discount_age = lyrs_pca_sc_discount + lyrs_pca_ns_discount
(s_lyrs_pca_discount_age,
m_lyrs_pca_discount_age,
t_lyrs_pca_discount_age) = outcomes(lyrs_pca_discount_age)
# Total life-years
##################
lyrs_nodiscount_age = lyrs_healthy_nodiscount_age + lyrs_pca_nodiscount_age
(s_lyrs_nodiscount_age,
m_lyrs_nodiscount_age,
t_lyrs_nodiscount_age) = outcomes(lyrs_nodiscount_age)
lyrs_discount_age = lyrs_healthy_discount_age + lyrs_pca_discount_age
(s_lyrs_discount_age,
m_lyrs_discount_age,
t_lyrs_discount_age) = outcomes(lyrs_discount_age)
# QALYs (healthy life) - screened arm
qalys_healthy_sc_nodiscount_age = lyrs_healthy_sc_nodiscount_age * utility_background_psa[:,year-45:]
qalys_healthy_sc_discount_age = lyrs_healthy_sc_discount_age * utility_background_psa[:,year-45:]
(s_qalys_healthy_sc_discount_age,
m_qalys_healthy_sc_discount_age,
t_qalys_healthy_sc_discount_age) = outcomes(qalys_healthy_sc_discount_age)
# QALYs (healthy life) - non-screened arm
qalys_healthy_ns_nodiscount_age = lyrs_healthy_ns_nodiscount_age * utility_background_psa[:,year-45:]
qalys_healthy_ns_discount_age = lyrs_healthy_ns_discount_age * utility_background_psa[:,year-45:]
(s_qalys_healthy_ns_discount_age,
m_qalys_healthy_ns_discount_age,
t_qalys_healthy_ns_discount_age) = outcomes(qalys_healthy_ns_discount_age)
# Total QALYs (healthy life)
qalys_healthy_nodiscount_age = lyrs_healthy_nodiscount_age * utility_background_psa[:,year-45:]
qalys_healthy_discount_age = lyrs_healthy_discount_age * utility_background_psa[:,year-45:]
(s_qalys_healthy_discount_age,
m_qalys_healthy_discount_age,
t_qalys_healthy_discount_age) = outcomes(qalys_healthy_discount_age)
# QALYS with prostate cancer - screened arm
qalys_pca_sc_nodiscount_age = lyrs_pca_sc_nodiscount * pca_incidence_utility_psa[:,year-45:]
qalys_pca_sc_discount_age = lyrs_pca_sc_discount * pca_incidence_utility_psa[:,year-45:]
(s_qalys_pca_sc_discount_age,
m_qalys_pca_sc_discount_age,
t_qalys_pca_sc_discount_age) = outcomes(qalys_pca_sc_discount_age)
# QALYS with prostate cancer - non-screened arm
qalys_pca_ns_nodiscount_age = lyrs_pca_ns_nodiscount * pca_incidence_utility_psa[:,year-45:]
qalys_pca_ns_discount_age = lyrs_pca_ns_discount * pca_incidence_utility_psa[:,year-45:]
(s_qalys_pca_ns_discount_age,
m_qalys_pca_ns_discount_age,
t_qalys_pca_ns_discount_age) = outcomes(qalys_pca_ns_discount_age)
# Total QALYS with prostate cancer
qalys_pca_nodiscount_age = lyrs_pca_nodiscount_age * pca_incidence_utility_psa[:,year-45:]
qalys_pca_discount_age = lyrs_pca_discount_age * pca_incidence_utility_psa[:,year-45:]
(s_qalys_pca_discount_age,
m_qalys_pca_discount_age,
t_qalys_pca_discount_age) = outcomes(qalys_pca_discount_age)
# Total QALYs
#############
qalys_nodiscount_age = qalys_healthy_nodiscount_age + qalys_pca_nodiscount_age
(s_qalys_nodiscount_age,
m_qalys_nodiscount_age,
t_qalys_nodiscount_age) = outcomes(qalys_nodiscount_age)
qalys_discount_age = qalys_healthy_discount_age + qalys_pca_discount_age
(s_qalys_discount_age,
m_qalys_discount_age,
t_qalys_discount_age) = outcomes(qalys_discount_age)
# Costs of PSA testing in non-screened arm
n_psa_tests_ns_age = ((pca_incidence_ns / p_suspected_ns[:,year-45:])
+ ((pca_incidence_ns * (1-uptake_biopsy[year-45:]))
/ p_suspected_refuse_biopsy_ns[:,year-45:])) * n_psa_tests[:,year-45:]
cost_psa_testing_ns_nodiscount_age = n_psa_tests_ns_age * cost_psa[:,year-45:] * relative_cost_clinically_detected[:,year-45:]
(s_cost_psa_testing_ns_nodiscount_age,
m_cost_psa_testing_ns_nodiscount_age,
t_cost_psa_testing_ns_nodiscount_age) = outcomes(cost_psa_testing_ns_nodiscount_age)
cost_psa_testing_ns_discount_age = cost_psa_testing_ns_nodiscount_age * discount_factor[:total_cycles]
(s_cost_psa_testing_ns_discount_age,
m_cost_psa_testing_ns_discount_age,
t_cost_psa_testing_ns_discount_age) = outcomes(cost_psa_testing_ns_discount_age)
# Costs of PSA testing in screened arm (PSA screening every four years)
# PSA tests during screened and non-screened period
if year < 55:
# Assuming all cancers are clinically detected as these cohorts
# are not eligible for screening (hence p_suspected_ns)
# This uses 1-uptake biopsy as the original part of the equation works out
# the number of biopsies which is then multiplied by n_psa_tests to get the number of PSA tests
n_psa_tests_sc_age = (((pca_incidence_sc / p_suspected_ns[:,year-45:])
+ ((pca_incidence_sc * (1-uptake_biopsy[year-45:]))
/ p_suspected_refuse_biopsy_ns[:,year-45:]))
* uptake_psa
* n_psa_tests[:,year-45:])
cost_psa_testing_sc_nodiscount_age = (n_psa_tests_sc_age
* cost_psa[:,year-45:]
* relative_cost_clinically_detected[:,year-45:])
if year > 54:
# Get the screened years
lyrs_healthy_screened_nodiscount_age = np.array([np.zeros(length_df)] * sims)
lyrs_healthy_screened_nodiscount_age[:,:length_screen] = lyrs_healthy_sc_nodiscount_age[:,:length_screen].copy()
lyrs_healthy_screened_nodiscount_age[:,length_screen:] = 0
# Population-level PSA testing during screening phase
n_psa_tests_screened_age = lyrs_healthy_screened_nodiscount_age * uptake_psa / 4
# Assuming all cancers are clinically detected in the post-screening phase
n_psa_tests_post_screening_age = (((pca_incidence_post_screening / p_suspected_ns[:,year-45:])
+ ((pca_incidence_post_screening * (1-uptake_biopsy[year-45:]))
/ p_suspected_refuse_biopsy_ns[:,year-45:]))
* uptake_psa
* n_psa_tests[:,year-45:])
# Total PSA tests
n_psa_tests_sc_age = (n_psa_tests_screened_age + n_psa_tests_post_screening_age)
cost_psa_testing_screened_age = n_psa_tests_screened_age * cost_psa[:,year-45:]
cost_psa_testing_post_screening_age = (n_psa_tests_post_screening_age
* cost_psa[:,year-45:]
* relative_cost_clinically_detected[:,year-45:])
cost_psa_testing_sc_nodiscount_age = (cost_psa_testing_screened_age
+ cost_psa_testing_post_screening_age)
(s_cost_psa_testing_sc_nodiscount_age,
m_cost_psa_testing_sc_nodiscount_age,
t_cost_psa_testing_sc_nodiscount_age) = outcomes(cost_psa_testing_sc_nodiscount_age)
cost_psa_testing_sc_discount_age = cost_psa_testing_sc_nodiscount_age * discount_factor[:total_cycles]
(s_cost_psa_testing_sc_discount_age,
m_cost_psa_testing_sc_discount_age,
t_cost_psa_testing_sc_discount_age) = outcomes(cost_psa_testing_sc_discount_age)
# Total costs of PSA testing
############################
n_psa_tests_age = n_psa_tests_ns_age + n_psa_tests_sc_age
(s_n_psa_tests_age,
m_n_psa_tests_age,
total_n_psa_tests_age) = outcomes(n_psa_tests_age)
cost_psa_testing_nodiscount_age = cost_psa_testing_ns_nodiscount_age + cost_psa_testing_sc_nodiscount_age
(s_cost_psa_testing_nodiscount_age,
m_cost_psa_testing_nodiscount_age,
t_cost_psa_testing_nodiscount_age) = outcomes(cost_psa_testing_nodiscount_age)
cost_psa_testing_discount_age = cost_psa_testing_ns_discount_age + cost_psa_testing_sc_discount_age
(s_cost_psa_testing_discount_age,
m_cost_psa_testing_discount_age,
t_cost_psa_testing_discount_age) = outcomes(cost_psa_testing_discount_age)
# Costs of biopsy - screened arm
if year < 55:
# Assuming all cancers are clinically detected as these cohorts
# are not eligible for screening (hence p_suspected_ns)
n_biopsies_sc_age = pca_incidence_sc / p_suspected_ns[:,year-45:]
# Costs include the costs of those who turn down biopsy
cost_biopsy_sc_nodiscount_age = (((pca_incidence_sc / p_suspected_ns[:,year-45:])
* cost_biopsy[:,year-45:])
+ (((pca_incidence_sc * (1-uptake_biopsy[year-45:]))
/ p_suspected_refuse_biopsy_ns[:,year-45:])
* cost_refuse_biopsy[:,year-45:])
* relative_cost_clinically_detected[:,year-45:])
if year > 54:
# Screen-detected cancers
n_biopsies_screened_age = pca_incidence_screened / p_suspected[:,year-45:]
cost_biopsy_screened_nodiscount_age = (((pca_incidence_screened / p_suspected[:,year-45:])
* cost_biopsy[:,year-45:])
+ (((pca_incidence_screened * (1-uptake_biopsy[year-45:]))
/ p_suspected_refuse_biopsy[:,year-45:])
* cost_refuse_biopsy[:,year-45:]))
# Assuming all cancers are clinically detected in the post-screening phase
n_biopsies_post_screening_age = pca_incidence_post_screening / p_suspected_ns[:,year-45:]
cost_biopsies_post_screening_nodiscount_age = (((pca_incidence_post_screening / p_suspected_ns[:,year-45:])
* cost_biopsy[:,year-45:])
+ (((pca_incidence_post_screening * (1-uptake_biopsy[year-45:]))
/ p_suspected_refuse_biopsy_ns[:,year-45:])
* cost_refuse_biopsy[:,year-45:])
* relative_cost_clinically_detected[:,year-45:])
# Total biopsies
n_biopsies_sc_age = (n_biopsies_screened_age + n_biopsies_post_screening_age)
# Total cost of biopsies
cost_biopsy_sc_nodiscount_age = (cost_biopsy_screened_nodiscount_age
+ cost_biopsies_post_screening_nodiscount_age)
(s_cost_biopsy_sc_nodiscount_age,
m_cost_biopsy_sc_nodiscount_age,
t_cost_biopsy_sc_nodiscount_age) = outcomes(cost_biopsy_sc_nodiscount_age)
cost_biopsy_sc_discount_age = cost_biopsy_sc_nodiscount_age * discount_factor[:total_cycles]
(s_cost_biopsy_sc_discount_age,
m_cost_biopsy_sc_discount_age,
t_cost_biopsy_sc_discount_age) = outcomes(cost_biopsy_sc_discount_age)
# Costs of biopsy - non-screened arm
n_biopsies_ns_age = pca_incidence_ns / p_suspected_ns[:,year-45:]
cost_biopsy_ns_nodiscount_age = (((pca_incidence_ns / p_suspected_ns[:,year-45:])
* cost_biopsy[:,year-45:])
+ (((pca_incidence_ns * (1-uptake_biopsy[year-45:]))
/ p_suspected_refuse_biopsy_ns[:,year-45:])
* cost_refuse_biopsy[:,year-45:])
* relative_cost_clinically_detected[:,year-45:])
(s_cost_biopsy_ns_nodiscount_age,
m_cost_biopsy_ns_nodiscount_age,
t_cost_biopsy_ns_nodiscount_age) = outcomes(cost_biopsy_ns_nodiscount_age)
cost_biopsy_ns_discount_age = cost_biopsy_ns_nodiscount_age * discount_factor[:total_cycles]
(s_cost_biopsy_ns_discount_age,
m_cost_biopsy_ns_discount_age,
t_cost_biopsy_ns_discount_age) = outcomes(cost_biopsy_ns_discount_age)
# Total costs of biopsy
#######################
n_biopsies_age = n_biopsies_sc_age + n_biopsies_ns_age
(s_n_biopsies_age,
m_n_biopsies_age,
total_n_biopsies_age) = outcomes(n_biopsies_age)
cost_biopsy_nodiscount_age = cost_biopsy_sc_nodiscount_age + cost_biopsy_ns_nodiscount_age
(s_cost_biopsy_nodiscount_age,
m_cost_biopsy_nodiscount_age,
t_cost_biopsy_nodiscount_age) = outcomes(cost_biopsy_nodiscount_age)
cost_biopsy_discount_age = cost_biopsy_sc_discount_age + cost_biopsy_ns_discount_age
(s_cost_biopsy_discount_age,
m_cost_biopsy_discount_age,
t_cost_biopsy_discount_age) = outcomes(cost_biopsy_discount_age)
# Cost of staging in the screened arm
if year < 55:
cost_staging_sc_nodiscount_age = (cost_assessment
* psa_stage_adv.T
* pca_incidence_sc.T
* relative_cost_clinically_detected[:,year-45:].T).T
if year > 54:
cost_staging_screened_nodiscount_age = (cost_assessment
* psa_stage_screened_adv.T
* pca_incidence_screened.T).T
cost_staging_post_screening_nodiscount_age = (cost_assessment
* psa_stage_adv.T
* pca_incidence_post_screening.T
* relative_cost_clinically_detected[:,year-45:].T).T
cost_staging_sc_nodiscount_age = (cost_staging_screened_nodiscount_age
+ cost_staging_post_screening_nodiscount_age)
(s_cost_staging_sc_nodiscount_age,
m_cost_staging_sc_nodiscount_age,
t_cost_staging_sc_nodiscount_age) = outcomes(cost_staging_sc_nodiscount_age)
cost_staging_sc_discount_age = cost_staging_sc_nodiscount_age * discount_factor[:total_cycles]
(s_cost_staging_sc_discount_age,
m_cost_staging_sc_discount_age,
t_cost_staging_sc_discount_age) = outcomes(cost_staging_sc_discount_age)
# Cost of staging in the non-screened arm
cost_staging_ns_nodiscount_age = (cost_assessment
* psa_stage_adv.T
* pca_incidence_ns.T
* relative_cost_clinically_detected[:,year-45:].T).T
(s_cost_staging_ns_nodiscount_age,
m_cost_staging_ns_nodiscount_age,
t_cost_staging_ns_nodiscount_age) = outcomes(cost_staging_ns_nodiscount_age)
cost_staging_ns_discount_age = cost_staging_ns_nodiscount_age * discount_factor[:total_cycles]
(s_cost_staging_ns_discount_age,
m_cost_staging_ns_discount_age,
t_cost_staging_ns_discount_age) = outcomes(cost_staging_ns_discount_age)
# Total costs of staging
########################
cost_staging_nodiscount_age = cost_staging_sc_nodiscount_age + cost_staging_ns_nodiscount_age
(s_cost_staging_nodiscount_age,
m_cost_staging_nodiscount_age,
t_cost_staging_nodiscount_age) = outcomes(cost_staging_nodiscount_age)
cost_staging_discount_age = cost_staging_sc_discount_age + cost_staging_ns_discount_age
(s_cost_staging_discount_age,
m_cost_staging_discount_age,
t_cost_staging_discount_age) = outcomes(cost_staging_discount_age)
# Cost of treatment in screened arm
(s_cost_tx_sc_nodiscount_age,
m_cost_tx_sc_nodiscount_age,
t_cost_tx_sc_nodiscount_age) = outcomes(costs_tx_sc)
cost_tx_sc_nodiscount_age = costs_tx_sc * discount_factor[:total_cycles]
(s_cost_tx_sc_discount_age,
m_cost_tx_sc_discount_age,
t_cost_tx_sc_discount_age) = outcomes(cost_tx_sc_nodiscount_age)
# Cost of treatment in non-screened arm
(s_cost_tx_ns_nodiscount_age,
m_cost_tx_ns_nodiscount_age,
t_cost_tx_ns_nodiscount_age) = outcomes(costs_tx_ns)
cost_tx_ns_nodiscount_age = costs_tx_ns * discount_factor[:total_cycles]
(s_cost_tx_ns_discount_age,
m_cost_tx_ns_discount_age,
t_cost_tx_ns_discount_age) = outcomes(cost_tx_ns_nodiscount_age)
# Total costs of treatment
##########################
cost_tx_nodiscount_age = costs_tx_sc + costs_tx_ns
(s_cost_tx_nodiscount_age,
m_cost_tx_nodiscount_age,
t_cost_tx_nodiscount_age) = outcomes(cost_tx_nodiscount_age)
cost_tx_discount_age = cost_tx_nodiscount_age * discount_factor[:total_cycles]
(s_cost_tx_discount_age,
m_cost_tx_discount_age,
t_cost_tx_discount_age) = outcomes(cost_tx_discount_age)
# Costs of palliation and death in screened arm
cost_eol_sc_nodiscount_age = (pca_death_costs * pca_death_sc.T).T
(s_cost_eol_sc_nodiscount_age,
m_cost_eol_sc_nodiscount_age,
t_cost_eol_sc_nodiscount_age) = outcomes(cost_eol_sc_nodiscount_age)
cost_eol_sc_discount_age = cost_eol_sc_nodiscount_age * discount_factor[:total_cycles]
(s_cost_eol_sc_discount_age,
m_cost_eol_sc_discount_age,
t_cost_eol_sc_discount_age) = outcomes(cost_eol_sc_discount_age)
# Costs of palliation and death in non-screened arm
cost_eol_ns_nodiscount_age = (pca_death_costs * pca_death_ns.T).T
(s_cost_eol_ns_nodiscount_age,
m_cost_eol_ns_nodiscount_age,
t_cost_eol_ns_nodiscount_age) = outcomes(cost_eol_ns_nodiscount_age)
cost_eol_ns_discount_age = cost_eol_ns_nodiscount_age * discount_factor[:total_cycles]
(s_cost_eol_ns_discount_age,
m_cost_eol_ns_discount_age,
t_cost_eol_ns_discount_age) = outcomes(cost_eol_ns_discount_age)
# Total costs of palliation and death
cost_eol_nodiscount_age = cost_eol_sc_nodiscount_age + cost_eol_ns_nodiscount_age
(s_cost_eol_nodiscount_age,
m_cost_eol_nodiscount_age,
t_cost_eol_nodiscount_age) = outcomes(cost_eol_nodiscount_age)
cost_eol_discount_age = cost_eol_sc_discount_age + cost_eol_ns_discount_age
(s_cost_eol_discount_age,
m_cost_eol_discount_age,
t_cost_eol_discount_age) = outcomes(cost_eol_discount_age)
# TOTAL COSTS AGE-BASED SCREENING
#################################
cost_nodiscount_age = (cost_psa_testing_nodiscount_age
+ cost_biopsy_nodiscount_age
+ cost_staging_nodiscount_age
+ cost_tx_nodiscount_age
+ cost_eol_nodiscount_age)
s_cost_nodiscount_age, m_cost_nodiscount_age, t_cost_nodiscount_age = outcomes(cost_nodiscount_age)
cost_discount_age = (cost_psa_testing_discount_age
+ cost_biopsy_discount_age
+ cost_staging_discount_age
+ cost_tx_discount_age
+ cost_eol_discount_age)
s_cost_discount_age, m_cost_discount_age, t_cost_discount_age = outcomes(cost_discount_age)
# Generate a mean dataframe
age_matrix = [age, m_cases_age, m_cases_sc_detected_age,
m_cases_post_screening_age, m_overdiagnosis_age, m_deaths_other_age, m_deaths_pca_age,
m_pca_alive_ns, m_healthy_age, m_lyrs_healthy_nodiscount_age,
m_lyrs_healthy_discount_age, m_lyrs_pca_discount_age, m_lyrs_discount_age,
m_qalys_healthy_discount_age, m_qalys_pca_discount_age, m_qalys_discount_age,
m_cost_psa_testing_discount_age, m_cost_biopsy_discount_age, m_cost_staging_discount_age,
m_cost_tx_discount_age, m_cost_eol_discount_age, m_cost_discount_age]
age_columns = ['age', 'pca_cases', 'screen-detected cases',
'post-screening cases', 'overdiagnosis', 'deaths_other', 'deaths_pca',
'pca_alive', 'healthy','lyrs_healthy_nodiscount', 'lyrs_healthy_discount',
'lyrs_pca_discount', 'total_lyrs_discount',
'qalys_healthy_discount', 'qalys_pca_discount', 'total_qalys_discount',
'cost_psa_testing_discount', 'cost_biopsy_discount', 'cost_staging_discount',
'cost_treatment_discount', 'costs_eol_discount', 'total_cost_discount']
age_cohort = pd.DataFrame(age_matrix, index = age_columns).T
t_parameters_age = [year, t_cases_age, t_overdiagnosis_age,
t_deaths_pca_age, t_deaths_other_age,
t_lyrs_healthy_discount_age, t_lyrs_pca_discount_age,
t_lyrs_nodiscount_age, t_lyrs_discount_age, t_qalys_healthy_discount_age,
t_qalys_pca_discount_age, t_qalys_nodiscount_age, t_qalys_discount_age,
t_cost_psa_testing_discount_age, t_cost_psa_testing_discount_age,
t_cost_biopsy_nodiscount_age, t_cost_biopsy_discount_age,
t_cost_staging_nodiscount_age, t_cost_staging_discount_age,
t_cost_tx_nodiscount_age, t_cost_tx_discount_age,
t_cost_eol_nodiscount_age, t_cost_eol_discount_age,
t_cost_nodiscount_age, t_cost_discount_age,
total_n_psa_tests_age, total_n_biopsies_age]
columns_age = ['cohort_age_at_start', 'pca_cases', 'overdiagnosis',
'pca_deaths', 'deaths_other_causes',
'lyrs_healthy_discounted', 'lyrs_pca_discounted',
'lyrs_undiscounted', 'lyrs_discounted','qalys_healthy_discounted',
'qalys_pca_discounted', 'qalys_undiscounted', 'qalys_discounted',
'cost_psa_testing_undiscounted', 'cost_psa_testing_discounted',
'cost_biopsy_undiscounted', 'cost_biopsy_discounted',
'cost_staging_undiscounted', 'cost_staging_discounted',
'cost_treatment_undiscounted', 'cost_treatment_discounted',
'cost_eol_undiscounted', 'cost_eol_discounted',
'costs_undiscounted', 'costs_discounted', 'n_psa_tests', 'n_biopsies']
outcomes_age_psa = pd.DataFrame(t_parameters_age, index = columns_age).T
s_qalys_discount_age_df = pd.DataFrame(s_qalys_discount_age)
s_cost_discount_age_df = pd.DataFrame(s_cost_discount_age)
parameters_age = [s_qalys_discount_age, s_cost_discount_age,
s_deaths_pca_age, s_overdiagnosis_age,
age_cohort, outcomes_age_psa]
for index, parameter in enumerate(parameter_list_age):
parameter = gen_list_outcomes(parameter_list_age[index], parameters_age[index])
#################################################
# Polygenic risk tailored screening from age 55 #
#################################################
# Yearly probability of PCa incidence
smoothed_pca_incidence_prs = psa_function(pca_incidence)
smoothed_pca_incidence_prs[:,10:25] = (smoothed_pca_incidence_prs[:,10:25].T * rr_incidence[year-45,:]).T
smoothed_pca_incidence_prs[:,25:35] = smoothed_pca_incidence_prs[:,25:35] * np.linspace(post_sc_incidence_drop,1,10)
smoothed_pca_incidence_prs = smoothed_pca_incidence_prs[:,year-45:]
# Yearly probability of death from PCa - smoothed entry and exit
smoothed_pca_mortality_prs = psa_function(pca_death_baseline)
smoothed_pca_mortality_prs[:,10:15] = smoothed_pca_mortality_prs[:,10:15] * np.linspace(1,0.79,5)
smoothed_pca_mortality_prs[:,15:] = smoothed_pca_mortality_prs[:,15:] * rr_death_screening[:,15:]
smoothed_pca_mortality_prs = smoothed_pca_mortality_prs[:,year-45:]
# Probability of being screened
p_screened = np.array(uptake_prs * a_risk.loc[year,'p_above_threshold'])
p_ns = np.array((1-uptake_prs) * a_risk.loc[year,'p_above_threshold'])
p_nos = np.array(compliance * (1-a_risk.loc[year,'p_above_threshold']))
p_nos_screened = np.array((1-compliance) * (1-a_risk.loc[year,'p_above_threshold']))
if year < 55:
# Yearly probability of PCa incidence
p_pca_screened = tr_incidence
p_pca_ns = tr_incidence
p_pca_nos = tr_incidence
p_pca_nos_screened = tr_incidence
# Yearly probability of death from PCa
p_pca_death_screened = tr_pca_death_baseline
p_pca_death_ns = tr_pca_death_baseline
p_pca_death_nos = tr_pca_death_baseline
p_pca_death_nos_screened = tr_pca_death_baseline
# Proportion of cancers detected by screening at a localised / advanced stage
psa_stage_adv_sc = psa_function(stage_adv[year-45:])
psa_stage_adv_ns = psa_function(stage_adv[year-45:])
psa_stage_adv_nos_sc = psa_function(stage_adv[year-45:])
psa_stage_adv_nos = psa_function(stage_adv[year-45:])
psa_stage_local_sc = psa_function(stage_local[year-45:])
psa_stage_local_ns = psa_function(stage_local[year-45:])
psa_stage_local_nos_sc = psa_function(stage_local[year-45:])
psa_stage_local_nos = psa_function(stage_local[year-45:])
elif year > 54:
# Yearly probability of PCa incidence
p_pca_screened = smoothed_pca_incidence_prs * a_risk.loc[year, 'rr_high']
p_pca_ns = tr_incidence * a_risk.loc[year,'rr_high']
p_pca_nos = tr_incidence * a_risk.loc[year,'rr_low']
p_pca_nos_screened = smoothed_pca_incidence_prs * a_risk.loc[year,'rr_low']
# Yearly probability of death from PCa
p_pca_death_screened = smoothed_pca_mortality_prs * a_risk.loc[year,'rr_high']
p_pca_death_ns = tr_pca_death_baseline * a_risk.loc[year,'rr_high']
p_pca_death_nos = tr_pca_death_baseline * a_risk.loc[year,'rr_low']
p_pca_death_nos_screened = smoothed_pca_mortality_prs * a_risk.loc[year,'rr_low']
# Proportion of cancers detected by screening at a localised / advanced stage
stage_screened_adv_sc = (stage_adv
* rr_adv_screening
* a_risk.loc[year, 'rr_high'])
psa_stage_adv_sc = stage_screened_adv_sc[:,year-45:]
stage_clinical_adv_ns = stage_adv * a_risk.loc[year, 'rr_high']
psa_stage_adv_ns = psa_function(stage_clinical_adv_ns[year-45:])
stage_screened_adv_nos_sc = (stage_adv
* rr_adv_screening
* a_risk.loc[year, 'rr_low'])
psa_stage_adv_nos_sc = stage_screened_adv_nos_sc[:,year-45:]
stage_clinical_adv_nos = stage_adv * a_risk.loc[year, 'rr_low']
psa_stage_adv_nos = psa_function(stage_clinical_adv_nos[year-45:])
stage_screened_local_sc = 1-stage_screened_adv_sc
psa_stage_local_sc = stage_screened_local_sc[:,year-45:]
stage_clinical_local_ns = 1-stage_clinical_adv_ns
psa_stage_local_ns = psa_function(stage_clinical_local_ns[year-45:])
stage_screened_local_nos_sc = 1-stage_screened_adv_nos_sc
psa_stage_local_nos_sc = stage_screened_local_nos_sc[:, year-45:]
stage_clinical_local_nos = 1-stage_clinical_adv_nos
psa_stage_local_nos = psa_function(stage_clinical_local_nos[year-45:])
#####################
# Year 1 in the model
#####################
age = np.arange(year,90)
length_df = len(age)
length_screen = len(np.arange(year,70)) # number of screening years depending on age cohort starting
# Cohorts, numbers 'healthy', and incident cases
cohort_sc = np.array([np.repeat(pop[year], length_df)] * sims) * p_screened
cohort_ns = np.array([np.repeat(pop[year], length_df)] * sims) * p_ns
cohort_nos = np.array([np.repeat(pop[year], length_df)] * sims) * p_nos
cohort_nos_sc = np.array([np.repeat(pop[year], length_df)] * sims) * p_nos_screened
pca_alive_sc = np.array([np.zeros(length_df)] * sims)
pca_alive_ns = np.array([np.zeros(length_df)] * sims)
pca_alive_nos = np.array([np.zeros(length_df)] * sims)
pca_alive_nos_sc = np.array([np.zeros(length_df)] * sims)
healthy_sc = cohort_sc - pca_alive_sc
healthy_ns = cohort_ns - pca_alive_ns
healthy_nos = cohort_nos - pca_alive_nos
healthy_nos_sc = cohort_nos_sc - pca_alive_nos_sc
pca_incidence_sc = healthy_sc * p_pca_screened
pca_incidence_nos_sc = healthy_nos_sc * p_pca_nos_screened
if year > 54:
pca_incidence_screened = pca_incidence_sc.copy() # Screen-detected cancers
pca_incidence_post_screening = np.array([np.zeros(length_df)] * sims) # Post-screening cancers - 0 until model reaches age 70.
pca_incidence_nos_sc_screened = pca_incidence_nos_sc.copy() # Screen-detected cancers
pca_incidence_nos_sc_post_screening = np.array([np.zeros(length_df)] * sims) # Post-screening cancers - 0 until model reaches age 70.
elif year < 55:
# Zero as no screening in any of these cohorts
pca_incidence_screened = np.array([np.zeros(length_df)] * sims)
pca_incidence_post_screening = np.array([np.zeros(length_df)] * sims)
pca_incidence_nos_sc_screened = np.array([np.zeros(length_df)] * sims)
pca_incidence_nos_sc_post_screening = np.array([np.zeros(length_df)] * sims)
pca_incidence_ns = healthy_ns * p_pca_ns
pca_incidence_nos = healthy_nos * p_pca_nos
# Deaths
pca_death_sc = ((pca_alive_sc * p_pca_death_screened)
+ (healthy_sc * p_pca_death_screened))
pca_death_ns = ((pca_alive_ns * p_pca_death_ns)
+ (healthy_ns * p_pca_death_ns))
pca_death_nos = ((pca_alive_nos * p_pca_death_nos)
+ (healthy_nos * p_pca_death_nos))
pca_death_nos_sc = ((pca_alive_nos_sc * p_pca_death_nos_screened)
+ (healthy_nos_sc * p_pca_death_nos_screened))
pca_death_other_sc = ((pca_incidence_sc
+ pca_alive_sc
- pca_death_sc)
* tr_death_other_causes)
pca_death_other_ns = ((pca_incidence_ns
+ pca_alive_ns
- pca_death_ns)
* tr_death_other_causes)
pca_death_other_nos = ((pca_incidence_nos
+ pca_alive_nos
- pca_death_nos)
* tr_death_other_causes)
pca_death_other_nos_sc = ((pca_incidence_nos_sc
+ pca_alive_nos_sc
- pca_death_nos_sc)
* tr_death_other_causes)
healthy_death_other_sc = ((healthy_sc - pca_incidence_sc)
* tr_death_other_causes)
healthy_death_other_ns = ((healthy_ns - pca_incidence_ns)
* tr_death_other_causes)
healthy_death_other_nos = ((healthy_nos - pca_incidence_nos)
* tr_death_other_causes)
healthy_death_other_nos_sc = ((healthy_nos_sc - pca_incidence_nos_sc)
* tr_death_other_causes)
total_death_sc = (pca_death_sc
+ pca_death_other_sc
+ healthy_death_other_sc)
total_death_ns = (pca_death_ns
+ pca_death_other_ns
+ healthy_death_other_ns)
total_death_nos = (pca_death_nos
+ pca_death_other_nos
+ healthy_death_other_nos)
total_death_nos_sc = (pca_death_nos_sc
+ pca_death_other_nos_sc
+ healthy_death_other_nos_sc)
total_death = (total_death_sc
+ total_death_ns
+ total_death_nos
+ total_death_nos_sc)
# Prevalent cases & life-years
pca_prevalence_sc = (pca_incidence_sc
- pca_death_sc
- pca_death_other_sc)
pca_prevalence_ns = (pca_incidence_ns
- pca_death_ns
- pca_death_other_ns)
pca_prevalence_nos = (pca_incidence_nos
- pca_death_nos
- pca_death_other_nos)
pca_prevalence_nos_sc = (pca_incidence_nos_sc
- pca_death_nos_sc
- pca_death_other_nos_sc)
lyrs_pca_sc_nodiscount = pca_prevalence_sc * 0.5
lyrs_pca_ns_nodiscount = pca_prevalence_ns * 0.5
lyrs_pca_nos_nodiscount = pca_prevalence_nos * 0.5
lyrs_pca_nos_sc_nodiscount = pca_prevalence_nos_sc * 0.5
# Costs
if year > 54:
costs_tx_sc = np.array([np.zeros(length_df)] * sims)
costs_tx_screened = np.array([np.zeros(length_df)] * sims)
costs_tx_post_screening = np.array([np.zeros(length_df)] * sims)
costs_tx_screened[:,0] = ((pca_incidence_screened[:,0]
* psa_stage_local_sc[:,0].T
* tx_costs_local.T).sum(axis=0)
+ (pca_incidence_screened[:,0]
* psa_stage_adv_sc[:,0].T
* tx_costs_adv.T).sum(axis=0)) # cost of screen-detected cancers
costs_tx_post_screening[:,0] = ((pca_incidence_post_screening[:,0]
* psa_stage_local_ns[:,0].T
* tx_costs_local.T).sum(axis=0)
+ (pca_incidence_post_screening[:,0]
* psa_stage_adv_ns[:,0].T
* tx_costs_adv.T).sum(axis=0)
* relative_cost_clinically_detected[:,0]) # cost of post-screening cancers
costs_tx_sc[:,0] = (costs_tx_screened[:,0] + costs_tx_post_screening[:,0]) # total cost in screened arms
costs_tx_nos_sc = np.array([np.zeros(length_df)] * sims)
costs_tx_nos_sc_screened = np.array([np.zeros(length_df)] * sims)
costs_tx_nos_sc_post_screening = np.array([np.zeros(length_df)] * sims)
costs_tx_nos_sc_screened[:,0] = ((pca_incidence_nos_sc_screened[:,0]
* psa_stage_local_nos_sc[:,0].T
* tx_costs_local.T).sum(axis=0)
+ (pca_incidence_nos_sc_screened[:,0]
* psa_stage_adv_nos_sc[:,0].T
* tx_costs_adv.T).sum(axis=0)) # cost of screen-detected cancers
costs_tx_nos_sc_post_screening[:,0] = ((pca_incidence_nos_sc_post_screening[:,0]
* psa_stage_local_nos[:,0].T
* tx_costs_local.T).sum(axis=0)
+ (pca_incidence_nos_sc_post_screening[:,0]
* psa_stage_adv_nos[:,0].T
* tx_costs_adv.T).sum(axis=0)
* relative_cost_clinically_detected[:,0]) # cost of post-screening cancers
costs_tx_nos_sc[:,0] = (costs_tx_nos_sc_screened[:,0] + costs_tx_nos_sc_post_screening[:,0]) # total cost in screened arms
elif year < 55:
costs_tx_sc = np.array([np.zeros(length_df)] * sims)
costs_tx_sc[:,0] = ((pca_incidence_sc[:,0]
* psa_stage_local_sc[:,0].T
* tx_costs_local.T).sum(axis=0)
+ (pca_incidence_sc[:,0]
* psa_stage_adv_sc[:,0].T
* tx_costs_adv.T).sum(axis=0)
* relative_cost_clinically_detected[:,0])
costs_tx_nos_sc = np.array([np.zeros(length_df)] * sims)
costs_tx_nos_sc[:,0] = ((pca_incidence_nos_sc[:,0]
* psa_stage_local_nos_sc[:,0].T
* tx_costs_local.T).sum(axis=0)
+ (pca_incidence_nos_sc[:,0]
* psa_stage_adv_nos_sc[:,0].T
* tx_costs_adv.T).sum(axis=0)
* relative_cost_clinically_detected[:,0])
costs_tx_ns = np.array([np.zeros(length_df)] * sims)
costs_tx_ns[:,0] = ((pca_incidence_ns[:,0]
* psa_stage_local_ns[:,0].T
* tx_costs_local.T).sum(axis=0)
+ (pca_incidence_ns[:,0]
* psa_stage_adv_ns[:,0].T
* tx_costs_adv.T).sum(axis=0)
* relative_cost_clinically_detected[:,0])
costs_tx_nos = np.array([np.zeros(length_df)] * sims)
costs_tx_nos[:,0] = ((pca_incidence_nos[:,0]
* psa_stage_local_nos[:,0].T
* tx_costs_local.T).sum(axis=0)
+ (pca_incidence_nos[:,0]
* psa_stage_adv_nos[:,0].T
* tx_costs_adv.T).sum(axis=0)
* relative_cost_clinically_detected[:,0])
# Year 2 onwards
################
for i in range(1, total_cycles):
# Cohorts, numbers 'healthy', incident & prevalent cases
cohort_sc[:,i] = (cohort_sc[:,i-1] - total_death_sc[:,i-1])
cohort_ns[:,i] = (cohort_ns[:,i-1] - total_death_ns[:,i-1])
cohort_nos[:,i] = (cohort_nos[:,i-1] - total_death_nos[:,i-1])
cohort_nos_sc[:,i] = (cohort_nos_sc[:,i-1] - total_death_nos_sc[:,i-1])
pca_alive_sc[:,i] = (pca_alive_sc[:,i-1]
+ pca_incidence_sc[:,i-1]
- pca_death_sc[:,i-1]
- pca_death_other_sc[:,i-1])
pca_alive_ns[:,i] = (pca_alive_ns[:,i-1]
+ pca_incidence_ns[:,i-1]
- pca_death_ns[:,i-1]
- pca_death_other_ns[:,i-1])
pca_alive_nos[:,i] = (pca_alive_nos[:,i-1]
+ pca_incidence_nos[:,i-1]
- pca_death_nos[:,i-1]
- pca_death_other_nos[:,i-1])
pca_alive_nos_sc[:,i] = (pca_alive_nos_sc[:,i-1]
+ pca_incidence_nos_sc[:,i-1]
- pca_death_nos_sc[:,i-1]
- pca_death_other_nos_sc[:,i-1])
healthy_sc[:,i] = cohort_sc[:,i] - pca_alive_sc[:,i]
healthy_ns[:,i] = cohort_ns[:,i] - pca_alive_ns[:,i]
healthy_nos[:,i] = cohort_nos[:,i] - pca_alive_nos[:,i]
healthy_nos_sc[:,i] = cohort_nos_sc[:,i] - pca_alive_nos_sc[:,i]
pca_incidence_sc[:,i] = healthy_sc[:,i] * p_pca_screened[:,i]
pca_incidence_nos_sc[:,i] = healthy_nos_sc[:,i] * p_pca_nos_screened[:,i]
if year > 54:
if i < length_screen:
pca_incidence_screened[:,i] = pca_incidence_sc[:,i].copy()
pca_incidence_post_screening[:,i] = 0
pca_incidence_nos_sc_screened[:,i] = pca_incidence_nos_sc[:,i].copy()
pca_incidence_nos_sc_post_screening[:,i] = 0
else:
pca_incidence_screened[:,i] = 0
pca_incidence_post_screening[:,i] = pca_incidence_sc[:,i].copy()
pca_incidence_nos_sc_screened[:,i] = 0
pca_incidence_nos_sc_post_screening[:,i] = pca_incidence_nos_sc[:,i].copy()
elif year < 55:
pca_incidence_screened[:,i] = 0
pca_incidence_post_screening[:,i] = 0
pca_incidence_nos_sc_screened[:,i] = 0
pca_incidence_nos_sc_post_screening[:,i] = 0
pca_incidence_ns[:,i] = healthy_ns[:,i] * p_pca_ns[:,i]
pca_incidence_nos[:,i] = healthy_nos[:,i] * p_pca_nos[:,i]
# Deaths
pca_death_sc[:,i] = ((pca_alive_sc[:,i] * p_pca_death_screened[:,i])
+ (healthy_sc[:,i] * p_pca_death_screened[:,i]))
pca_death_ns[:,i] = ((pca_alive_ns[:,i] * p_pca_death_ns[:,i])
+ (healthy_ns[:,i] * p_pca_death_ns[:,i]))
pca_death_nos[:,i] = ((pca_alive_nos[:,i] * p_pca_death_nos[:,i])
+ (healthy_nos[:,i] * p_pca_death_nos[:,i]))
pca_death_nos_sc[:,i] = ((pca_alive_nos_sc[:,i] * p_pca_death_nos_screened[:,i])
+ (healthy_nos_sc[:,i] * p_pca_death_nos_screened[:,i]))
pca_death_other_sc[:,i] = ((pca_incidence_sc[:,i]
+ pca_alive_sc[:,i]
- pca_death_sc[:,i])
* tr_death_other_causes[:,i])
pca_death_other_ns[:,i] = ((pca_incidence_ns[:,i]
+ pca_alive_ns[:,i]
- pca_death_ns[:,i])
* tr_death_other_causes[:,i])
pca_death_other_nos[:,i] = ((pca_incidence_nos[:,i]
+ pca_alive_nos[:,i]
- pca_death_nos[:,i])
* tr_death_other_causes[:,i])
pca_death_other_nos_sc[:,i] = ((pca_incidence_nos_sc[:,i]
+ pca_alive_nos_sc[:,i]
- pca_death_nos_sc[:,i])
* tr_death_other_causes[:,i])
healthy_death_other_sc[:,i] = ((healthy_sc[:,i] - pca_incidence_sc[:,i])
* tr_death_other_causes[:,i])
healthy_death_other_ns[:,i] = ((healthy_ns[:,i] - pca_incidence_ns[:,i])
* tr_death_other_causes[:,i])
healthy_death_other_nos[:,i] = ((healthy_nos[:,i] - pca_incidence_nos[:,i])
* tr_death_other_causes[:,i])
healthy_death_other_nos_sc[:,i] = ((healthy_nos_sc[:,i]
- pca_incidence_nos_sc[:,i])
* tr_death_other_causes[:,i])
total_death_sc[:,i] = (pca_death_sc[:,i]
+ pca_death_other_sc[:,i]
+ healthy_death_other_sc[:,i])
total_death_ns[:,i] = (pca_death_ns[:,i]
+ pca_death_other_ns[:,i]
+ healthy_death_other_ns[:,i])
total_death_nos[:,i] = (pca_death_nos[:,i]
+ pca_death_other_nos[:,i]
+ healthy_death_other_nos[:,i])
total_death_nos_sc[:,i] = (pca_death_nos_sc[:,i]
+ pca_death_other_nos_sc[:,i]
+ healthy_death_other_nos_sc[:,i])
total_death[:,i] = (total_death_sc[:,i]
+ total_death_ns[:,i]
+ total_death_nos[:,i]
+ total_death_nos_sc[:,i])
# Prevalent cases & life-years
pca_prevalence_sc[:,i] = (pca_incidence_sc[:,i]
+ pca_alive_sc[:,i]
- pca_death_sc[:,i]
- pca_death_other_sc[:,i])
pca_prevalence_ns[:,i] = (pca_incidence_ns[:,i]
+ pca_alive_ns[:,i]
- pca_death_ns[:,i]
- pca_death_other_ns[:,i])
pca_prevalence_nos[:,i] = (pca_incidence_nos[:,i]
+ pca_alive_nos[:,i]
- pca_death_nos[:,i]
- pca_death_other_nos[:,i])
pca_prevalence_nos_sc[:,i] = (pca_incidence_nos_sc[:,i]
+ pca_alive_nos_sc[:,i]
- pca_death_nos_sc[:,i]
- pca_death_other_nos_sc[:,i])
lyrs_pca_sc_nodiscount[:,i] = ((pca_prevalence_sc[:,i-1] + pca_prevalence_sc[:,i]) * 0.5) # This calculation is because of the life-table format of the model
lyrs_pca_ns_nodiscount[:,i] = ((pca_prevalence_ns[:,i-1] + pca_prevalence_ns[:,i]) * 0.5)
lyrs_pca_nos_nodiscount[:,i] = ((pca_prevalence_nos[:,i-1] + pca_prevalence_nos[:,i]) * 0.5)
lyrs_pca_nos_sc_nodiscount[:,i] = ((pca_prevalence_nos_sc[:,i-1] + pca_prevalence_nos_sc[:,i]) * 0.5)
# Costs
if year > 54:
costs_tx_screened[:,i] = ((pca_incidence_screened[:,i]
* psa_stage_local_sc[:,i].T
* tx_costs_local.T).sum(axis=0)
+ (pca_incidence_screened[:,i]
* psa_stage_adv_sc[:,i].T
* tx_costs_adv.T).sum(axis=0)) # cost of screen-detected cancers
costs_tx_post_screening[:,i] = ((pca_incidence_post_screening[:,i]
* psa_stage_local_ns[:,i].T
* tx_costs_local.T).sum(axis=0)
+ (pca_incidence_post_screening[:,i]
* psa_stage_adv_ns[:,i].T
* tx_costs_adv.T).sum(axis=0)
* relative_cost_clinically_detected[:,i]) # cost of post-screening cancers
costs_tx_sc[:,i] = (costs_tx_screened[:,i] + costs_tx_post_screening[:,i]) # total cost in screened arms
costs_tx_nos_sc_screened[:,i] = ((pca_incidence_nos_sc_screened[:,i]
* psa_stage_local_nos_sc[:,i].T
* tx_costs_local.T).sum(axis=0)
+ (pca_incidence_nos_sc_screened[:,i]
* psa_stage_adv_nos_sc[:,i].T
* tx_costs_adv.T).sum(axis=0)) # cost of screen-detected cancers
costs_tx_nos_sc_post_screening[:,i] = ((pca_incidence_nos_sc_post_screening[:,i]
* psa_stage_local_nos[:,i].T
* tx_costs_local.T).sum(axis=0)
+ (pca_incidence_nos_sc_post_screening[:,i]
* psa_stage_adv_nos[:,i].T
* tx_costs_adv.T).sum(axis=0)
* relative_cost_clinically_detected[:,i]) # cost of post-screening cancers
costs_tx_nos_sc[:,i] = (costs_tx_nos_sc_screened[:,i] + costs_tx_nos_sc_post_screening[:,i]) # total cost in screened arms
elif year < 55:
costs_tx_sc[:,i] = ((pca_incidence_sc[:,i]
* psa_stage_local_sc[:,i].T
* tx_costs_local.T).sum(axis=0)
+ (pca_incidence_sc[:,i]
* psa_stage_adv_sc[:,i].T
* tx_costs_adv.T).sum(axis=0)
* relative_cost_clinically_detected[:,i])
costs_tx_nos_sc[:,i] = ((pca_incidence_nos_sc[:,i]
* psa_stage_local_nos_sc[:,i].T
* tx_costs_local.T).sum(axis=0)
+ (pca_incidence_nos_sc[:,i]
* psa_stage_adv_nos_sc[:,i].T
* tx_costs_adv.T).sum(axis=0)
* relative_cost_clinically_detected[:,i])
costs_tx_ns[:,i] = ((pca_incidence_ns[:,i]
* psa_stage_local_ns[:,i].T
* tx_costs_local.T).sum(axis=0)
+ (pca_incidence_ns[:,i]
* psa_stage_adv_ns[:,i].T
* tx_costs_adv.T).sum(axis=0)
* relative_cost_clinically_detected[:,i])
costs_tx_nos[:,i] = ((pca_incidence_nos[:,i]
* psa_stage_local_nos[:,i].T
* tx_costs_local.T).sum(axis=0)
+ (pca_incidence_nos[:,i]
* psa_stage_adv_nos[:,i].T
* tx_costs_adv.T).sum(axis=0)
* relative_cost_clinically_detected[:,i])
############
# Outcomes #
############
# INDEX:
# s_ = sim (this is the sum across the simulations i.e. one total value per simulation)
# m_ = mean (this is the mean across the simulations i.e. one value for each year of the model)
# t_ = total
# nodiscount = not discounted
# discount = discounted
# _prs = outcomes for the polygenic risk-tailored screening cohort
# Incident cases (screened arms)
s_cases_sc_prs, m_cases_sc_prs, t_cases_sc_prs = outcomes(pca_incidence_sc)
s_cases_nos_sc_prs, m_cases_nos_sc_prs, t_cases_nos_sc_prs = outcomes(pca_incidence_nos_sc)
# Screen-detected cancers
s_cases_sc_detected_prs, m_cases_sc_detected_prs, t_cases_sc_detected_prs = outcomes(pca_incidence_screened)
s_cases_nos_sc_detected_prs, m_cases_nos_sc_detected_prs, t_cases_nos_sc_detected_prs = outcomes(pca_incidence_nos_sc_screened)
# Cancers in the post-screening phase (amongst those who received screening)
s_cases_post_screening_prs, m_cases_post_screening_prs, t_cases_post_screening_prs = outcomes(pca_incidence_post_screening)
s_cases_nos_sc_post_screening_prs, m_cases_nos_sc_post_screening_prs, t_cases_nos_sc_post_screening_prs = outcomes(pca_incidence_nos_sc_post_screening)
# Incident cases (non-screened arms)
s_cases_ns_prs, m_cases_ns_prs, t_cases_ns_prs = outcomes(pca_incidence_ns)
s_cases_nos_prs, m_cases_nos_prs, t_cases_nos_prs = outcomes(pca_incidence_nos)
# Incident cases (total)
########################
s_cases_prs = (s_cases_sc_prs
+ s_cases_ns_prs
+ s_cases_nos_prs
+ s_cases_nos_sc_prs)
m_cases_prs = (m_cases_sc_prs
+ m_cases_ns_prs
+ m_cases_nos_prs
+ m_cases_nos_sc_prs)
t_cases_prs = (t_cases_sc_prs
+ t_cases_ns_prs
+ t_cases_nos_prs
+ t_cases_nos_sc_prs)
# PCa alive
s_pca_alive_prs, m_pca_alive_prs, t_pca_alive_prs = outcomes((pca_alive_sc
+ pca_alive_ns
+ pca_alive_nos
+ pca_alive_nos_sc))
# Healthy
s_healthy_prs, m_healthy_prs, t_healthy_prs = outcomes((healthy_sc
+ healthy_ns
+ healthy_nos
+ healthy_nos_sc))
# Overdiagnosed cases
overdiagnosis_prs = pca_incidence_screened * p_overdiagnosis_psa.T[:,year-45:]
(s_overdiagnosis_prs,
m_overdiagnosis_prs,
t_overdiagnosis_prs) = outcomes(overdiagnosis_prs)
# Deaths from other causes (screened armss)
deaths_sc_other_prs = pca_death_other_sc + healthy_death_other_sc
(s_deaths_sc_other_prs,
m_deaths_sc_other_prs,
t_deaths_sc_other_prs) = outcomes(deaths_sc_other_prs)
deaths_nos_sc_other_prs = pca_death_other_nos_sc + healthy_death_other_nos_sc
(s_deaths_sc_other_prs,
m_deaths_sc_other_prs,
t_deaths_sc_other_prs) = outcomes(deaths_sc_other_prs)
# Deaths from other causes (non-screened arms)
deaths_ns_other_prs = pca_death_other_ns + healthy_death_other_ns
(s_deaths_ns_other_prs,
m_deaths_ns_other_prs,
t_deaths_ns_other_prs) = outcomes(deaths_ns_other_prs)
deaths_nos_other_prs = pca_death_other_nos + healthy_death_other_nos
(s_deaths_nos_other_prs,
m_deaths_nos_other_prs,
t_deaths_nos_other_prs) = outcomes(deaths_nos_other_prs)
# Total deaths from other causes
################################
deaths_other_prs = (deaths_sc_other_prs
+ deaths_ns_other_prs
+ deaths_nos_other_prs
+ deaths_nos_sc_other_prs)
s_deaths_other_prs, m_deaths_other_prs, t_deaths_other_prs = outcomes(deaths_other_prs)
# Deaths from prosate cancer (screened arms)
s_deaths_sc_pca_prs, m_deaths_sc_pca_prs, t_deaths_sc_pca_prs = outcomes(pca_death_sc)
(s_deaths_nos_sc_pca_prs,
m_deaths_nos_sc_pca_prs,
t_deaths_nos_sc_pca_prs) = outcomes(pca_death_nos_sc)
# Deaths from prosate cancer (non-screened arms)
s_deaths_ns_pca_prs, m_deaths_ns_pca_prs, t_deaths_ns_pca_prs = outcomes(pca_death_ns)
s_deaths_nos_pca_prs, m_deaths_nos_pca_prs, t_deaths_nos_pca_prs = outcomes(pca_death_nos)
# Deaths from prosate cancer (total)
####################################
deaths_pca_prs = (pca_death_sc
+ pca_death_ns
+ pca_death_nos
+ pca_death_nos_sc)
s_deaths_pca_prs, m_deaths_pca_prs, t_deaths_pca_prs = outcomes(deaths_pca_prs)
# Healthy life-years (screened arm)
lyrs_healthy_sc_nodiscount_prs = (healthy_sc
- (0.5 * (healthy_death_other_sc + pca_incidence_sc)))
lyrs_healthy_sc_discount_prs = lyrs_healthy_sc_nodiscount_prs * discount_factor[:total_cycles]
(s_lyrs_healthy_sc_discount_prs,
m_lyrs_healthy_sc_discount_prs,
t_lyrs_healthy_sc_discount_prs) = outcomes(lyrs_healthy_sc_discount_prs)
lyrs_healthy_nos_sc_nodiscount_prs = (healthy_nos_sc
- (0.5 * (healthy_death_other_nos_sc + pca_incidence_nos_sc)))
lyrs_healthy_nos_sc_discount_prs = lyrs_healthy_nos_sc_nodiscount_prs * discount_factor[:total_cycles]
(s_lyrs_healthy_nos_sc_discount_prs,
m_lyrs_healthy_nos_sc_discount_prs,
t_lyrs_healthy_nos_sc_discount_prs) = outcomes(lyrs_healthy_nos_sc_discount_prs)
# Healthy life-years (non-screened arm)
lyrs_healthy_ns_nodiscount_prs = (healthy_ns -
(0.5 * (healthy_death_other_ns + pca_incidence_ns)))
lyrs_healthy_ns_discount_prs = lyrs_healthy_ns_nodiscount_prs * discount_factor[:total_cycles]
(s_lyrs_healthy_ns_discount_prs,
m_lyrs_healthy_ns_discount_prs,
t_lyrs_healthy_ns_discount_prs) = outcomes(lyrs_healthy_ns_discount_prs)
lyrs_healthy_nos_nodiscount_prs = (healthy_nos
- (0.5 * (healthy_death_other_nos + pca_incidence_nos)))
lyrs_healthy_nos_discount_prs = lyrs_healthy_nos_nodiscount_prs * discount_factor[:total_cycles]
(s_lyrs_healthy_nos_discount_prs,
m_lyrs_healthy_nos_discount_prs,
t_lyrs_healthy_nos_discount_prs) = outcomes(lyrs_healthy_nos_discount_prs)
# Total healthy life-years
lyrs_healthy_nodiscount_prs = (lyrs_healthy_sc_nodiscount_prs
+ lyrs_healthy_ns_nodiscount_prs
+ lyrs_healthy_nos_nodiscount_prs
+ lyrs_healthy_nos_sc_nodiscount_prs)
(s_lyrs_healthy_nodiscount_prs,
m_lyrs_healthy_nodiscount_prs,
t_lyrs_healthy_nodiscount_prs) = outcomes(lyrs_healthy_nodiscount_prs)
lyrs_healthy_discount_prs = (lyrs_healthy_sc_discount_prs
+ lyrs_healthy_ns_discount_prs
+ lyrs_healthy_nos_discount_prs
+ lyrs_healthy_nos_sc_discount_prs)
(s_lyrs_healthy_discount_prs,
m_lyrs_healthy_discount_prs,
t_lyrs_healthy_discount_prs) = outcomes(lyrs_healthy_discount_prs)
# Life-years with prostate cancer in screened arms
lyrs_pca_sc_discount = lyrs_pca_sc_nodiscount * discount_factor[:total_cycles]
(s_lyrs_pca_sc_discount_prs,
m_lyrs_pca_sc_discount_prs,
t_lyrs_pca_sc_discount_prs) = outcomes(lyrs_pca_sc_discount)
lyrs_pca_nos_sc_discount = lyrs_pca_nos_sc_nodiscount * discount_factor[:total_cycles]
(s_lyrs_pca_nos_sc_discount_prs,
m_lyrs_pca_nos_sc_discount_prs,
t_lyrs_pca_nos_sc_discount_prs) = outcomes(lyrs_pca_nos_sc_discount)
# Life-years with prostate cancer in non-screened arms
lyrs_pca_ns_discount = lyrs_pca_ns_nodiscount * discount_factor[:total_cycles]
(s_lyrs_pca_ns_discount_prs,
m_lyrs_pca_ns_discount_prs,
t_lyrs_pca_ns_discount_prs) = outcomes(lyrs_pca_ns_discount)
lyrs_pca_nos_discount = lyrs_pca_nos_nodiscount * discount_factor[:total_cycles]
(s_lyrs_pca_nos_discount_prs,
m_lyrs_pca_nos_discount_prs,
t_lyrs_pca_nos_discount_prs) = outcomes(lyrs_pca_nos_discount)
# Life-years with prostate cancer in both arms
lyrs_pca_nodiscount_prs = (lyrs_pca_sc_nodiscount
+ lyrs_pca_ns_nodiscount
+ lyrs_pca_nos_nodiscount
+ lyrs_pca_nos_sc_nodiscount)
lyrs_pca_discount_prs = (lyrs_pca_sc_discount
+ lyrs_pca_ns_discount
+ lyrs_pca_nos_discount
+ lyrs_pca_nos_sc_discount)
(s_lyrs_pca_discount_prs,
m_lyrs_pca_discount_prs,
t_lyrs_pca_discount_prs) = outcomes(lyrs_pca_discount_prs)
# Total Life-years
##################
lyrs_nodiscount_prs = lyrs_healthy_nodiscount_prs + lyrs_pca_nodiscount_prs
(s_lyrs_nodiscount_prs,
m_lyrs_nodiscount_prs,
t_lyrs_nodiscount_prs) = outcomes(lyrs_nodiscount_prs)
lyrs_discount_prs = lyrs_healthy_discount_prs + lyrs_pca_discount_prs
(s_lyrs_discount_prs,
m_lyrs_discount_prs,
t_lyrs_discount_prs) = outcomes(lyrs_discount_prs)
# QALYs (healthy life) - screened arms
qalys_healthy_sc_nodiscount_prs = lyrs_healthy_sc_nodiscount_prs * utility_background_psa[:,year-45:]
qalys_healthy_sc_discount_prs = lyrs_healthy_sc_discount_prs * utility_background_psa[:,year-45:]
(s_qalys_healthy_sc_discount_prs,
m_qalys_healthy_sc_discount_prs,
t_qalys_healthy_sc_discount_prs) = outcomes(qalys_healthy_sc_discount_prs)
qalys_healthy_nos_sc_nodiscount_prs = lyrs_healthy_nos_sc_nodiscount_prs * utility_background_psa[:,year-45:]
qalys_healthy_nos_sc_discount_prs = lyrs_healthy_nos_sc_discount_prs * utility_background_psa[:,year-45:]
(s_qalys_healthy_nos_sc_discount_prs,
m_qalys_healthy_nos_sc_discount_prs,
t_qalys_healthy_nos_sc_discount_prs) = outcomes(qalys_healthy_nos_sc_discount_prs)
# QALYs (healthy life) - non-screened arms
qalys_healthy_ns_nodiscount_prs = lyrs_healthy_ns_nodiscount_prs * utility_background_psa[:,year-45:]
qalys_healthy_ns_discount_prs = lyrs_healthy_ns_discount_prs * utility_background_psa[:,year-45:]
(s_qalys_healthy_ns_discount_prs,
m_qalys_healthy_ns_discount_prs,
t_qalys_healthy_ns_discount_prs) = outcomes(qalys_healthy_ns_discount_prs)
qalys_healthy_nos_nodiscount_prs = lyrs_healthy_nos_nodiscount_prs * utility_background_psa[:,year-45:]
qalys_healthy_nos_discount_prs = lyrs_healthy_nos_discount_prs * utility_background_psa[:,year-45:]
(s_qalys_healthy_nos_discount_prs,
m_qalys_healthy_nos_discount_prs,
t_qalys_healthy_nos_discount_prs) = outcomes(qalys_healthy_nos_discount_prs)
# Total QALYs (healthy life)
qalys_healthy_nodiscount_prs = lyrs_healthy_nodiscount_prs * utility_background_psa[:,year-45:]
qalys_healthy_discount_prs = lyrs_healthy_discount_prs * utility_background_psa[:,year-45:]
(s_qalys_healthy_discount_prs,
m_qalys_healthy_discount_prs,
t_qalys_healthy_discount_prs) = outcomes(qalys_healthy_discount_prs)
# QALYS with prostate cancer - screened arms
qalys_pca_sc_nodiscount_prs = lyrs_pca_sc_nodiscount * pca_incidence_utility_psa[:,year-45:]
qalys_pca_sc_discount_prs = lyrs_pca_sc_discount * pca_incidence_utility_psa[:,year-45:]
(s_qalys_pca_sc_discount_prs,
m_qalys_pca_sc_discount_prs,
t_qalys_pca_sc_discount_prs) = outcomes(qalys_pca_sc_discount_prs)
qalys_pca_nos_sc_nodiscount_prs = lyrs_pca_nos_sc_nodiscount * pca_incidence_utility_psa[:,year-45:]
qalys_pca_nos_sc_discount_prs = lyrs_pca_nos_sc_discount * pca_incidence_utility_psa[:,year-45:]
(s_qalys_pca_nos_sc_discount_prs,
m_qalys_pca_nos_sc_discount_prs,
t_qalys_pca_nos_sc_discount_prs) = outcomes(qalys_pca_nos_sc_discount_prs)
# QALYS with prostate cancer - non-screened arms
qalys_pca_ns_nodiscount_prs = lyrs_pca_ns_nodiscount * pca_incidence_utility_psa[:,year-45:]
qalys_pca_ns_discount_prs = lyrs_pca_ns_discount * pca_incidence_utility_psa[:,year-45:]
(s_qalys_pca_ns_discount_prs,
m_qalys_pca_ns_discount_prs,
t_qalys_pca_ns_discount_prs) = outcomes(qalys_pca_ns_discount_prs)
qalys_pca_nos_nodiscount_prs = lyrs_pca_nos_nodiscount * pca_incidence_utility_psa[:,year-45:]
qalys_pca_nos_discount_prs = lyrs_pca_nos_discount * pca_incidence_utility_psa[:,year-45:]
(s_qalys_pca_nos_discount_prs,
m_qalys_pca_nos_discount_prs,
t_qalys_pca_nos_discount_prs) = outcomes(qalys_pca_nos_discount_prs)
# Total QALYS with prostate cancer
qalys_pca_nodiscount_prs = lyrs_pca_nodiscount_prs * pca_incidence_utility_psa[:,year-45:]
qalys_pca_discount_prs = lyrs_pca_discount_prs * pca_incidence_utility_psa[:,year-45:]
(s_qalys_pca_discount_prs,
m_qalys_pca_discount_prs,
t_qalys_pca_discount_prs) = outcomes(qalys_pca_discount_prs)
# Total QALYs
#############
qalys_nodiscount_prs = qalys_healthy_nodiscount_prs + qalys_pca_nodiscount_prs
(s_qalys_nodiscount_prs,
m_qalys_nodiscount_prs,
t_qalys_nodiscount_prs) = outcomes(qalys_nodiscount_prs)
qalys_discount_prs = qalys_healthy_discount_prs + qalys_pca_discount_prs
(s_qalys_discount_prs,
m_qalys_discount_prs,
t_qalys_discount_prs) = outcomes(qalys_discount_prs)
# Costs of risk-stratification
cost_screening_prs = cost_prs * uptake_prs * pop[year] # There is no discounting of risk-stratification as done at year 1 of the model.
# Costs of PSA testing in non-screened arms
n_psa_tests_ns_prs = (((pca_incidence_ns / p_suspected_ns[:,year-45:])
+ ((pca_incidence_ns * (1-uptake_biopsy[year-45:]))
/ p_suspected_refuse_biopsy_ns[:,year-45:]))
* uptake_psa
* n_psa_tests[:,year-45:])
cost_psa_testing_ns_nodiscount_prs = (n_psa_tests_ns_prs
* cost_psa[:,year-45:]
* relative_cost_clinically_detected[:,year-45:])
(s_cost_psa_testing_ns_nodiscount_prs,
m_cost_psa_testing_ns_nodiscount_prs,
t_cost_psa_testing_ns_nodiscount_prs) = outcomes(cost_psa_testing_ns_nodiscount_prs)
cost_psa_testing_ns_discount_prs = cost_psa_testing_ns_nodiscount_prs * discount_factor[:total_cycles]
(s_cost_psa_testing_ns_discount_prs,
m_cost_psa_testing_ns_discount_prs,
t_cost_psa_testing_ns_discount_prs) = outcomes(cost_psa_testing_ns_discount_prs)
n_psa_tests_nos_prs = (((pca_incidence_nos / p_suspected_ns[:,year-45:])
+ ((pca_incidence_nos * (1-uptake_biopsy[year-45:]))
/ p_suspected_refuse_biopsy_ns[:,year-45:]))
* uptake_psa
* n_psa_tests[:,year-45:])
cost_psa_testing_nos_nodiscount_prs = (n_psa_tests_nos_prs
* cost_psa[:,year-45:]
* relative_cost_clinically_detected[:,year-45:])
(s_cost_psa_testing_nos_nodiscount_prs,
m_cost_psa_testing_nos_nodiscount_prs,
t_cost_psa_testing_nos_nodiscount_prs) = outcomes(cost_psa_testing_nos_nodiscount_prs)
cost_psa_testing_nos_discount_prs = cost_psa_testing_nos_nodiscount_prs * discount_factor[:total_cycles]
(s_cost_psa_testing_nos_discount_prs,
m_cost_psa_testing_nos_discount_prs,
t_cost_psa_testing_nos_discount_prs) = outcomes(cost_psa_testing_nos_discount_prs)
# Costs of PSA testing in screened arms
if year > 54:
# Get the screened years
lyrs_healthy_screened_nodiscount_prs = np.array([np.zeros(length_df)] * sims)
lyrs_healthy_screened_nodiscount_prs[:,:length_screen] = lyrs_healthy_sc_nodiscount_prs[:,:length_screen].copy()
lyrs_healthy_screened_nodiscount_prs[:,length_screen:] = 0
# Population-level PSA testing during screening phase
n_psa_tests_screened_prs = lyrs_healthy_screened_nodiscount_prs * uptake_psa / 4
# Assuming all cancers are clinically detected in the post-screening phase
n_psa_tests_post_screening_prs = (((pca_incidence_post_screening / p_suspected_ns[:,year-45:])
+ ((pca_incidence_post_screening * (1-uptake_biopsy[year-45:]))
/ p_suspected_refuse_biopsy_ns[:,year-45:]))
* uptake_psa
* n_psa_tests[:,year-45:])
n_psa_tests_sc_prs = (n_psa_tests_screened_prs + n_psa_tests_post_screening_prs)
cost_psa_testing_sc_nodiscount_prs = ((n_psa_tests_screened_prs * cost_psa[:,year-45:])
+ (n_psa_tests_post_screening_prs
* cost_psa[:,year-45:]
* relative_cost_clinically_detected[:,year-45:]))
# PSA tests in the not offered screening but screened anyway group
# Get the screened years
lyrs_healthy_nos_sc_screened_nodiscount_prs = np.array([np.zeros(length_df)] * sims)
lyrs_healthy_nos_sc_screened_nodiscount_prs[:,:length_screen] = lyrs_healthy_nos_sc_nodiscount_prs[:,:length_screen].copy()
lyrs_healthy_nos_sc_screened_nodiscount_prs[:,length_screen:] = 0
# Population-level PSA testing during screening phase
n_psa_tests_nos_sc_screened_prs = lyrs_healthy_nos_sc_screened_nodiscount_prs * uptake_psa / 4
# Assuming all cancers are clinically detected in the post-screening phase
n_psa_tests_nos_sc_post_screening_prs = (((pca_incidence_nos_sc_post_screening / p_suspected_ns[:,year-45:])
+ ((pca_incidence_nos_sc_post_screening * (1-uptake_biopsy[year-45:]))
/ p_suspected_refuse_biopsy_ns[:,year-45:]))
* uptake_psa
* n_psa_tests[:,year-45:])
n_psa_tests_nos_sc_prs = (n_psa_tests_nos_sc_screened_prs
+ n_psa_tests_nos_sc_post_screening_prs)
cost_psa_testing_nos_sc_nodiscount_prs = ((n_psa_tests_nos_sc_screened_prs * cost_psa[:,year-45:])
+ (n_psa_tests_nos_sc_post_screening_prs
* cost_psa[:,year-45:]
* relative_cost_clinically_detected[:,year-45:]))
elif year < 55:
n_psa_tests_sc_prs = (((pca_incidence_sc / p_suspected_ns[:,year-45:])
+ ((pca_incidence_sc * (1-uptake_biopsy[year-45:]))
/ p_suspected_refuse_biopsy_ns[:,year-45:]))
* uptake_psa
* n_psa_tests[:,year-45:])
n_psa_tests_nos_sc_prs = ((pca_incidence_nos_sc / p_suspected_ns[:,year-45:])
+ ((pca_incidence_nos_sc * (1-uptake_biopsy[year-45:]))
/ p_suspected_refuse_biopsy_ns[:,year-45:])
* uptake_psa
* n_psa_tests[:,year-45:])
cost_psa_testing_sc_nodiscount_prs = (n_psa_tests_sc_prs
* cost_psa[:,year-45:]
* relative_cost_clinically_detected[:,year-45:])
cost_psa_testing_nos_sc_nodiscount_prs = (n_psa_tests_nos_sc_prs
* cost_psa[:,year-45:]
* relative_cost_clinically_detected[:,year-45:])
(s_cost_psa_testing_sc_nodiscount_prs,
m_cost_psa_testing_sc_nodiscount_prs,
t_cost_psa_testing_sc_nodiscount_prs) = outcomes(cost_psa_testing_sc_nodiscount_prs)
cost_psa_testing_sc_discount_prs = cost_psa_testing_sc_nodiscount_prs * discount_factor[:total_cycles]
(s_cost_psa_testing_sc_discount_prs,
m_cost_psa_testing_sc_discount_prs,
t_cost_psa_testing_sc_discount_prs) = outcomes(cost_psa_testing_sc_discount_prs)
(s_cost_psa_testing_nos_sc_nodiscount_prs,
m_cost_psa_testing_nos_sc_nodiscount_prs,
t_cost_psa_testing_nos_sc_nodiscount_prs) = outcomes(cost_psa_testing_nos_sc_nodiscount_prs)
cost_psa_testing_nos_sc_discount_prs = (cost_psa_testing_nos_sc_nodiscount_prs
* discount_factor[:total_cycles])
(s_cost_psa_testing_nos_sc_discount_prs,
m_cost_psa_testing_nos_sc_discount_prs,
t_cost_psa_testing_nos_sc_discount_prs) = outcomes(cost_psa_testing_nos_sc_discount_prs)
# Total costs of PSA testing
############################
n_psa_tests_prs = (n_psa_tests_sc_prs
+ n_psa_tests_ns_prs
+ n_psa_tests_nos_prs
+ n_psa_tests_nos_sc_prs)
(s_n_psa_tests_prs,
m_n_psa_tests_prs,
total_n_psa_tests_prs) = outcomes(n_psa_tests_prs)
cost_psa_testing_nodiscount_prs = (cost_psa_testing_sc_nodiscount_prs
+ cost_psa_testing_ns_nodiscount_prs
+ cost_psa_testing_nos_nodiscount_prs
+ cost_psa_testing_nos_sc_nodiscount_prs)
(s_cost_psa_testing_nodiscount_prs,
m_cost_psa_testing_nodiscount_prs,
t_cost_psa_testing_nodiscount_prs) = outcomes(cost_psa_testing_nodiscount_prs)
cost_psa_testing_discount_prs = (cost_psa_testing_sc_discount_prs
+ cost_psa_testing_ns_discount_prs
+ cost_psa_testing_nos_discount_prs
+ cost_psa_testing_nos_sc_discount_prs)
(s_cost_psa_testing_discount_prs,
m_cost_psa_testing_discount_prs,
t_cost_psa_testing_discount_prs) = outcomes(cost_psa_testing_discount_prs)
# Costs of biopsy - screened arms
if year > 54:
# Screen-detected cancers
n_biopsies_screened_prs = pca_incidence_screened / p_suspected[:,year-45:]
cost_biopsy_screened_nodiscount_prs = (((pca_incidence_screened / p_suspected[:,year-45:])
* cost_biopsy[:,year-45:])
+ (((pca_incidence_screened * (1-uptake_biopsy[year-45:]))
/ p_suspected_refuse_biopsy[:,year-45:])
* cost_refuse_biopsy[:,year-45:]))
# Assuming all cancers are clinically detected in the post-screening phase
n_biopsies_post_screening_prs = pca_incidence_post_screening / p_suspected_ns[:,year-45:]
cost_biopsies_post_screening_nodiscount_prs = (((pca_incidence_post_screening / p_suspected_ns[:,year-45:])
* cost_biopsy[:,year-45:])
+ (((pca_incidence_post_screening * (1-uptake_biopsy[year-45:]))
/ p_suspected_refuse_biopsy_ns[:,year-45:])
* cost_refuse_biopsy[:,year-45:])
* relative_cost_clinically_detected[:,year-45:])
n_biopsies_sc_prs = (n_biopsies_screened_prs + n_biopsies_post_screening_prs)
# Total cost of biopsies
cost_biopsy_sc_nodiscount_prs = (cost_biopsy_screened_nodiscount_prs
+ cost_biopsies_post_screening_nodiscount_prs)
n_biopsies_nos_sc_screened_prs = pca_incidence_nos_sc_screened / p_suspected[:,year-45:]
cost_biopsy_nos_sc_screened_nodiscount_prs = (((pca_incidence_nos_sc_screened / p_suspected[:,year-45:])
* cost_biopsy[:,year-45:])
+ (((pca_incidence_nos_sc_screened * (1-uptake_biopsy[year-45:]))
/ p_suspected_refuse_biopsy[:,year-45:])
* cost_refuse_biopsy[:,year-45:]))
# Assuming all cancers are clinically detected in the post-screening phase
n_biopsies_nos_sc_post_screening_prs = pca_incidence_nos_sc_post_screening / p_suspected_ns[:,year-45:]
cost_biopsies_nos_sc_post_screening_nodiscount_prs = (((pca_incidence_nos_sc_post_screening / p_suspected_ns[:,year-45:])
* cost_biopsy[:,year-45:])
+ (((pca_incidence_nos_sc_post_screening * (1-uptake_biopsy[year-45:]))
/ p_suspected_refuse_biopsy_ns[:,year-45:])
* cost_refuse_biopsy[:,year-45:])
* relative_cost_clinically_detected[:,year-45:])
# Total biopsies
n_biopsies_nos_sc_prs = (n_biopsies_nos_sc_screened_prs
+ n_biopsies_nos_sc_post_screening_prs)
# Total cost of biopsies
cost_biopsy_nos_sc_nodiscount_prs = (cost_biopsy_nos_sc_screened_nodiscount_prs
+ cost_biopsies_nos_sc_post_screening_nodiscount_prs)
elif year < 55:
n_biopsies_sc_prs = pca_incidence_sc / p_suspected_ns[:,year-45:]
cost_biopsy_sc_nodiscount_prs = (((pca_incidence_sc / p_suspected_ns[:,year-45:])
* cost_biopsy[:,year-45:])
+ (((pca_incidence_sc * (1-uptake_biopsy[year-45:]))
/ p_suspected_refuse_biopsy_ns[:,year-45:])
* cost_refuse_biopsy[:,year-45:])
* relative_cost_clinically_detected[:,year-45:])
n_biopsies_nos_sc_prs = pca_incidence_nos_sc / p_suspected_ns[:,year-45:]
cost_biopsy_nos_sc_nodiscount_prs = (((pca_incidence_nos_sc / p_suspected_ns[:,year-45:])
* cost_biopsy[:,year-45:])
+ (((pca_incidence_nos_sc * (1-uptake_biopsy[year-45:]))
/ p_suspected_refuse_biopsy_ns[:,year-45:])
* cost_refuse_biopsy[:,year-45:])
* relative_cost_clinically_detected[:,year-45:])
(s_cost_biopsy_sc_nodiscount_prs,
m_cost_biopsy_sc_nodiscount_prs,
t_cost_biopsy_sc_nodiscount_prs) = outcomes(cost_biopsy_sc_nodiscount_prs)
cost_biopsy_sc_discount_prs = cost_biopsy_sc_nodiscount_prs * discount_factor[:total_cycles]
(s_cost_biopsy_sc_discount_prs,
m_cost_biopsy_sc_discount_prs,
t_cost_biopsy_sc_discount_prs) = outcomes(cost_biopsy_sc_discount_prs)
(s_cost_biopsy_nos_sc_nodiscount_prs,
m_cost_biopsy_nos_sc_nodiscount_prs,
t_cost_biopsy_nos_sc_nodiscount_prs) = outcomes(cost_biopsy_nos_sc_nodiscount_prs)
cost_biopsy_nos_sc_discount_prs = cost_biopsy_nos_sc_nodiscount_prs * discount_factor[:total_cycles]
(s_cost_biopsy_nos_sc_discount_prs,
m_cost_biopsy_nos_sc_discount_prs,
t_cost_biopsy_nos_sc_discount_prs) = outcomes(cost_biopsy_nos_sc_discount_prs)
# Costs of biopsy - non-screened arms
n_biopsies_ns_prs = pca_incidence_ns / p_suspected_ns[:,year-45:]
cost_biopsy_ns_nodiscount_prs = (((pca_incidence_ns / p_suspected_ns[:,year-45:])
* cost_biopsy[:,year-45:])
+ (((pca_incidence_ns * (1-uptake_biopsy[year-45:]))
/ p_suspected_refuse_biopsy_ns[:,year-45:])
* cost_refuse_biopsy[:,year-45:])
* relative_cost_clinically_detected[:,year-45:])
(s_cost_biopsy_ns_nodiscount_prs,
m_cost_biopsy_ns_nodiscount_prs,
t_cost_biopsy_ns_nodiscount_prs) = outcomes(cost_biopsy_ns_nodiscount_prs)
cost_biopsy_ns_discount_prs = cost_biopsy_ns_nodiscount_prs * discount_factor[:total_cycles]
(s_cost_biopsy_ns_discount_prs,
m_cost_biopsy_ns_discount_prs,
t_cost_biopsy_ns_discount_prs) = outcomes(cost_biopsy_ns_discount_prs)
n_biopsies_nos_prs = pca_incidence_nos / p_suspected_ns[:,year-45:]
cost_biopsy_nos_nodiscount_prs = (((pca_incidence_nos / p_suspected_ns[:,year-45:])
* cost_biopsy[:,year-45:])
+ (((pca_incidence_nos * (1-uptake_biopsy[year-45:]))
/ p_suspected_refuse_biopsy_ns[:,year-45:])
* cost_refuse_biopsy[:,year-45:])
* relative_cost_clinically_detected[:,year-45:])
(s_cost_biopsy_nos_nodiscount_prs,
m_cost_biopsy_nos_nodiscount_prs,
t_cost_biopsy_nos_nodiscount_prs) = outcomes(cost_biopsy_nos_nodiscount_prs)
cost_biopsy_nos_discount_prs = cost_biopsy_nos_nodiscount_prs * discount_factor[:total_cycles]
(s_cost_biopsy_nos_discount_prs,
m_cost_biopsy_nos_discount_prs,
t_cost_biopsy_nos_discount_prs) = outcomes(cost_biopsy_nos_discount_prs)
# Total costs of biopsy
#######################
n_biopsies_prs = (n_biopsies_sc_prs
+ n_biopsies_ns_prs
+ n_biopsies_nos_prs
+ n_biopsies_nos_sc_prs)
(s_n_biopsies_prs,
m_n_biopsies_prs,
total_n_biopsies_prs) = outcomes(n_biopsies_prs)
cost_biopsy_nodiscount_prs = (cost_biopsy_sc_nodiscount_prs
+ cost_biopsy_ns_nodiscount_prs
+ cost_biopsy_nos_nodiscount_prs
+ cost_biopsy_nos_sc_nodiscount_prs)
(s_cost_biopsy_nodiscount_prs,
m_cost_biopsy_nodiscount_prs,
t_cost_biopsy_nodiscount_prs) = outcomes(cost_biopsy_nodiscount_prs)
cost_biopsy_discount_prs = (cost_biopsy_sc_discount_prs
+ cost_biopsy_ns_discount_prs
+ cost_biopsy_nos_discount_prs
+ cost_biopsy_nos_sc_discount_prs)
(s_cost_biopsy_discount_prs,
m_cost_biopsy_discount_prs,
t_cost_biopsy_discount_prs) = outcomes(cost_biopsy_discount_prs)
# Cost of staging in the screened arms
if year > 54:
cost_staging_screened_nodiscount_prs = (cost_assessment
* psa_stage_adv_sc.T
* pca_incidence_screened.T).T
cost_staging_post_screening_nodiscount_prs = (cost_assessment
* psa_stage_adv_ns.T
* pca_incidence_post_screening.T
* relative_cost_clinically_detected[:,year-45:].T).T
cost_staging_sc_nodiscount_prs = (cost_staging_screened_nodiscount_prs
+ cost_staging_post_screening_nodiscount_prs)
cost_staging_nos_sc_screened_nodiscount_prs = (cost_assessment
* psa_stage_adv_nos_sc.T
* pca_incidence_nos_sc_screened.T).T
cost_staging_nos_sc_post_screening_nodiscount_prs = (cost_assessment
* psa_stage_adv_nos.T
* pca_incidence_nos_sc_post_screening.T
* relative_cost_clinically_detected[:,year-45:].T).T
cost_staging_nos_sc_nodiscount_prs = (cost_staging_nos_sc_screened_nodiscount_prs
+ cost_staging_nos_sc_post_screening_nodiscount_prs)
if year < 55:
cost_staging_sc_nodiscount_prs = (cost_assessment
* psa_stage_adv_sc.T
* pca_incidence_sc.T
* relative_cost_clinically_detected[:,year-45:].T).T
cost_staging_nos_sc_nodiscount_prs = (cost_assessment
* psa_stage_adv_nos_sc.T
* pca_incidence_nos_sc.T
* relative_cost_clinically_detected[:,year-45:].T).T
(s_cost_staging_sc_nodiscount_prs,
m_cost_staging_sc_nodiscount_prs,
t_cost_staging_sc_nodiscount_prs) = outcomes(cost_staging_sc_nodiscount_prs)
cost_staging_sc_discount_prs = cost_staging_sc_nodiscount_prs * discount_factor[:total_cycles]
(s_cost_staging_sc_discount_prs,
m_cost_staging_sc_discount_prs,
t_cost_staging_sc_discount_prs) = outcomes(cost_staging_sc_discount_prs)
(s_cost_staging_nos_sc_nodiscount_prs,
m_cost_staging_nos_sc_nodiscount_prs,
t_cost_staging_nos_sc_nodiscount_prs) = outcomes(cost_staging_nos_sc_nodiscount_prs)
cost_staging_nos_sc_discount_prs = cost_staging_nos_sc_nodiscount_prs * discount_factor[:total_cycles]
(s_cost_staging_nos_sc_discount_prs,
m_cost_staging_nos_sc_discount_prs,
t_cost_staging_nos_sc_discount_prs) = outcomes(cost_staging_nos_sc_discount_prs)
# Cost of staging in the non-screened arms
cost_staging_ns_nodiscount_prs = (cost_assessment
* psa_stage_adv_ns.T
* pca_incidence_ns.T
* relative_cost_clinically_detected[:,year-45:].T).T
(s_cost_staging_ns_nodiscount_prs,
m_cost_staging_ns_nodiscount_prs,
t_cost_staging_ns_nodiscount_prs) = outcomes(cost_staging_ns_nodiscount_prs)
cost_staging_ns_discount_prs = cost_staging_ns_nodiscount_prs * discount_factor[:total_cycles]
(s_cost_staging_ns_discount_prs,
m_cost_staging_ns_discount_prs,
t_cost_staging_ns_discount_prs) = outcomes(cost_staging_ns_discount_prs)
cost_staging_nos_nodiscount_prs = (cost_assessment
* psa_stage_adv_nos.T
* pca_incidence_nos.T
* relative_cost_clinically_detected[:,year-45:].T).T
(s_cost_staging_nos_nodiscount_prs,
m_cost_staging_nos_nodiscount_prs,
t_cost_staging_nos_nodiscount_prs) = outcomes(cost_staging_nos_nodiscount_prs)
cost_staging_nos_discount_prs = cost_staging_nos_nodiscount_prs * discount_factor[:total_cycles]
(s_cost_staging_nos_discount_prs,
m_cost_staging_nos_discount_prs,
t_cost_staging_nos_discount_prs) = outcomes(cost_staging_nos_discount_prs)
# Total costs of staging
########################
cost_staging_nodiscount_prs = (cost_staging_sc_nodiscount_prs
+ cost_staging_ns_nodiscount_prs
+ cost_staging_nos_nodiscount_prs
+ cost_staging_nos_sc_nodiscount_prs)
(s_cost_staging_nodiscount_prs,
m_cost_staging_nodiscount_prs,
t_cost_staging_nodiscount_prs) = outcomes(cost_staging_nodiscount_prs)
cost_staging_discount_prs = (cost_staging_sc_discount_prs
+ cost_staging_ns_discount_prs
+ cost_staging_nos_discount_prs
+ cost_staging_nos_sc_discount_prs)
(s_cost_staging_discount_prs,
m_cost_staging_discount_prs,
t_cost_staging_discount_prs) = outcomes(cost_staging_discount_prs)
# Cost of treatment in screened arms
(s_cost_tx_sc_nodiscount_prs,
m_cost_tx_sc_nodiscount_prs,
t_cost_tx_sc_nodiscount_prs) = outcomes(costs_tx_sc)
cost_tx_sc_nodiscount_prs = costs_tx_sc * discount_factor[:total_cycles]
(s_cost_tx_sc_discount_prs,
m_cost_tx_sc_discount_prs,
t_cost_tx_sc_discount_prs) = outcomes(cost_tx_sc_nodiscount_prs)
(s_cost_tx_nos_sc_nodiscount_prs,
m_cost_tx_nos_sc_nodiscount_prs,
t_cost_tx_nos_sc_nodiscount_prs) = outcomes(costs_tx_nos_sc)
cost_tx_nos_sc_nodiscount_prs = costs_tx_nos_sc * discount_factor[:total_cycles]
(s_cost_tx_nos_sc_discount_prs,
m_cost_tx_nos_sc_discount_prs,
t_cost_tx_nos_sc_discount_prs) = outcomes(cost_tx_nos_sc_nodiscount_prs)
# Cost of treatment in non-screened arms
(s_cost_tx_ns_nodiscount_prs,
m_cost_tx_ns_nodiscount_prs,
t_cost_tx_ns_nodiscount_prs) = outcomes(costs_tx_ns)
cost_tx_ns_nodiscount_prs = costs_tx_ns * discount_factor[:total_cycles]
(s_cost_tx_ns_discount_prs,
m_cost_tx_ns_discount_prs,
t_cost_tx_ns_discount_prs) = outcomes(cost_tx_ns_nodiscount_prs)
(s_cost_tx_nos_nodiscount_prs,
m_cost_tx_nos_nodiscount_prs,
t_cost_tx_nos_nodiscount_prs) = outcomes(costs_tx_nos)
cost_tx_nos_nodiscount_prs = costs_tx_nos * discount_factor[:total_cycles]
(s_cost_tx_nos_discount_prs,
m_cost_tx_nos_discount_prs,
t_cost_tx_nos_discount_prs) = outcomes(cost_tx_nos_nodiscount_prs)
# Total costs of treatment
##########################
cost_tx_nodiscount_prs = (costs_tx_sc
+ costs_tx_ns
+ costs_tx_nos
+ costs_tx_nos_sc)
(s_cost_tx_nodiscount_prs,
m_cost_tx_nodiscount_prs,
t_cost_tx_nodiscount_prs) = outcomes(cost_tx_nodiscount_prs)
cost_tx_discount_prs = cost_tx_nodiscount_prs * discount_factor[:total_cycles]
(s_cost_tx_discount_prs,
m_cost_tx_discount_prs,
t_cost_tx_discount_prs) = outcomes(cost_tx_discount_prs)
# Costs of palliation and death in screened arm
cost_eol_sc_nodiscount_prs = (pca_death_costs * pca_death_sc.T).T
(s_cost_eol_sc_nodiscount_prs,
m_cost_eol_sc_nodiscount_prs,
t_cost_eol_sc_nodiscount_prs) = outcomes(cost_eol_sc_nodiscount_prs)
cost_eol_sc_discount_prs = cost_eol_sc_nodiscount_prs * discount_factor[:total_cycles]
(s_cost_eol_sc_discount_prs,
m_cost_eol_sc_discount_prs,
t_cost_eol_sc_discount_prs) = outcomes(cost_eol_sc_discount_prs)
cost_eol_nos_sc_nodiscount_prs = (pca_death_costs * pca_death_nos_sc.T).T
(s_cost_eol_nos_sc_nodiscount_prs,
m_cost_eol_nos_sc_nodiscount_prs,
t_cost_eol_nos_sc_nodiscount_prs) = outcomes(cost_eol_nos_sc_nodiscount_prs)
cost_eol_nos_sc_discount_prs = cost_eol_nos_sc_nodiscount_prs * discount_factor[:total_cycles]
(s_cost_eol_nos_sc_discount_prs,
m_cost_eol_nos_sc_discount_prs,
t_cost_eol_nos_sc_discount_prs) = outcomes(cost_eol_nos_sc_discount_prs)
# Costs of palliation and death in non-screened arm
cost_eol_ns_nodiscount_prs = (pca_death_costs * pca_death_ns.T).T
(s_cost_eol_ns_nodiscount_prs,
m_cost_eol_ns_nodiscount_prs,
t_cost_eol_ns_nodiscount_prs) = outcomes(cost_eol_ns_nodiscount_prs)
cost_eol_ns_discount_prs = cost_eol_ns_nodiscount_prs * discount_factor[:total_cycles]
(s_cost_eol_ns_discount_prs,
m_cost_eol_ns_discount_prs,
t_cost_eol_ns_discount_prs) = outcomes(cost_eol_ns_discount_prs)
cost_eol_nos_nodiscount_prs = (pca_death_costs * pca_death_nos.T).T
(s_cost_eol_nos_nodiscount_prs,
m_cost_eol_nos_nodiscount_prs,
t_cost_eol_nos_nodiscount_prs) = outcomes(cost_eol_nos_nodiscount_prs)
cost_eol_nos_discount_prs = cost_eol_nos_nodiscount_prs * discount_factor[:total_cycles]
(s_cost_eol_nos_discount_prs,
m_cost_eol_nos_discount_prs,
t_cost_eol_nos_discount_prs) = outcomes(cost_eol_nos_discount_prs)
# Total costs of palliation and death
cost_eol_nodiscount_prs = (cost_eol_sc_nodiscount_prs
+ cost_eol_ns_nodiscount_prs
+ cost_eol_nos_nodiscount_prs
+ cost_eol_nos_sc_nodiscount_prs)
(s_cost_eol_nodiscount_prs,
m_cost_eol_nodiscount_prs,
t_cost_eol_nodiscount_prs) = outcomes(cost_eol_nodiscount_prs)
cost_eol_discount_prs = (cost_eol_sc_discount_prs
+ cost_eol_ns_discount_prs
+ cost_eol_nos_discount_prs
+ cost_eol_nos_sc_discount_prs)
(s_cost_eol_discount_prs,
m_cost_eol_discount_prs,
t_cost_eol_discount_prs) = outcomes(cost_eol_discount_prs)
# TOTAL COSTS PRS-BASED SCREENING
#################################
cost_nodiscount_prs = (cost_psa_testing_nodiscount_prs
+ cost_biopsy_nodiscount_prs
+ cost_staging_nodiscount_prs
+ cost_tx_nodiscount_prs
+ cost_eol_nodiscount_prs)
s_cost_nodiscount_prs, m_cost_nodiscount_prs, t_cost_nodiscount_prs = outcomes(cost_nodiscount_prs)
s_cost_nodiscount_prs = s_cost_nodiscount_prs + np.mean(cost_screening_prs)
t_cost_nodiscount_prs = t_cost_nodiscount_prs + np.mean(cost_screening_prs)
cost_discount_prs = (cost_psa_testing_discount_prs
+ cost_biopsy_discount_prs
+ cost_staging_discount_prs
+ cost_tx_discount_prs
+ cost_eol_discount_prs)
s_cost_discount_prs, m_cost_discount_prs, t_cost_discount_prs = outcomes(cost_discount_prs)
s_cost_discount_prs = s_cost_discount_prs + np.mean(cost_screening_prs)
t_cost_discount_prs = t_cost_discount_prs + np.mean(cost_screening_prs)
# Generate a mean dataframe
prs_matrix = [age, m_cases_prs, m_cases_sc_detected_prs,
m_cases_post_screening_prs, m_overdiagnosis_prs, m_deaths_other_prs, m_deaths_pca_prs,
m_pca_alive_prs, m_healthy_prs, m_lyrs_healthy_nodiscount_prs,
m_lyrs_healthy_discount_prs, m_lyrs_pca_discount_prs, m_lyrs_discount_prs,
m_qalys_healthy_discount_prs, m_qalys_pca_discount_prs, m_qalys_discount_prs,
m_cost_psa_testing_discount_prs, m_cost_biopsy_discount_prs, m_cost_staging_discount_prs,
m_cost_tx_discount_prs, m_cost_eol_discount_prs, m_cost_discount_prs]
prs_columns = ['age', 'pca_cases', 'screen-detected cases',
'post-screening cases', 'overdiagnosis', 'deaths_other', 'deaths_pca',
'pca_alive', 'healthy', 'lyrs_healthy_nodiscount', 'lyrs_healthy_discount',
'lyrs_pca_discount', 'total_lyrs_discount',
'qalys_healthy_discount', 'qalys_pca_discount', 'total_qalys_discount',
'cost_psa_testing_discount', 'cost_biopsy_discount', 'cost_staging_discount',
'cost_treatment_discount', 'costs_eol_discount', 'total_cost_discount']
prs_cohort = pd.DataFrame(prs_matrix, index = prs_columns).T
t_parameters_prs = [year, t_cases_prs, t_overdiagnosis_prs,
t_deaths_pca_prs, t_deaths_other_prs,
t_lyrs_healthy_discount_prs, t_lyrs_pca_discount_prs,
t_lyrs_nodiscount_prs, t_lyrs_discount_prs, t_qalys_healthy_discount_prs,
t_qalys_pca_discount_prs, t_qalys_nodiscount_prs, t_qalys_discount_prs,
np.mean(cost_screening_prs), t_cost_psa_testing_nodiscount_prs,
t_cost_psa_testing_discount_prs, t_cost_biopsy_nodiscount_prs,
t_cost_biopsy_discount_prs, t_cost_staging_nodiscount_prs,
t_cost_staging_discount_prs, t_cost_tx_nodiscount_prs,
t_cost_tx_discount_prs, t_cost_eol_nodiscount_prs,
t_cost_eol_discount_prs, t_cost_nodiscount_prs, t_cost_discount_prs,
total_n_psa_tests_prs, total_n_biopsies_prs]
columns_prs = ['cohort_age_at_start', 'pca_cases', 'overdiagnosis',
'pca_deaths', 'deaths_other_causes',
'lyrs_healthy_discounted', 'lyrs_pca_discounted',
'lyrs_undiscounted', 'lyrs_discounted','qalys_healthy_discounted',
'qalys_pca_discounted', 'qalys_undiscounted', 'qalys_discounted',
'cost_screening', 'cost_psa_testing_undiscounted', 'cost_psa_testing_discounted',
'cost_biopsy_undiscounted', 'cost_biopsy_discounted',
'cost_staging_undiscounted', 'cost_staging_discounted',
'cost_treatment_undiscounted','cost_treatment_discounted',
'cost_eol_undiscounted', 'cost_eol_discounted', 'costs_undiscounted', 'costs_discounted',
'n_psa_tests', 'n_biopsies']
outcomes_prs_psa = pd.DataFrame(t_parameters_prs, index = columns_prs).T
parameters_prs = [s_qalys_discount_prs, s_cost_discount_prs,
s_deaths_pca_prs, s_overdiagnosis_prs,
prs_cohort, outcomes_prs_psa]
for index, parameter in enumerate(parameter_list_prs):
parameter = gen_list_outcomes(parameter_list_prs[index], parameters_prs[index])
#######################
# SAVE THE DATAFRAMES #
#######################
# Set path to store outputs of models
path = base_path+(str(np.round(reference_value*100,2)))+"/"
# write the dataframes to an excel file - one sheet for each cohort
def save_excel(list_dataframes, name):
writer = pd.ExcelWriter(name+'.xlsx', engine='xlsxwriter')
for i, df in enumerate(list_dataframes):
df.to_excel(writer,'cohort_%s' % (i+45))
writer.save()
save_excel(ns_cohort_list, path+'non_screening_cohorts_psa')
save_excel(age_cohort_list, path+'age_screening_cohorts_psa')
save_excel(prs_cohort_list, path+'prs_screening_cohorts_psa')
# Save the collated outcome dataframes
outcomes_ns_psa = pd.concat(outcomes_ns_psa_list)
outcomes_age_psa = | pd.concat(outcomes_age_psa_list) | pandas.concat |
"""
Extract basic statistical features from data
"""
import pandas as pd
from utils.timer import Timer
# -------------------------------------------------------------------------------------------------
TRAIN_FILE = 'data/train_preliminary/train.pkl'
TEST_FILE = 'data/test/test.pkl'
TRAIN_STAT_FEAT = 'data/train_feat/train_basic_stat_feat.pkl'
TEST_STAT_FEAT = 'data/test_feat/test_basic_stat_feat.pkl'
na_cols = [
'product_id_count', 'product_id_nunique', 'industry_count', 'industry_nunique', 'duration_std'
]
dtype = {
'creative_id_count': 'uint32',
'creative_id_nunique': 'uint32',
'ad_id_nunique': 'uint32',
'advertiser_id_nunique': 'uint32',
'product_category_nunique': 'uint32',
'click_times_nunique': 'uint32',
'click_times_max': 'uint8',
'click_times_sum': 'uint32',
'click_times_mean': 'float64',
'click_times_std': 'float64',
'time_nunique': 'uint32',
'time_min': 'uint8',
'time_max': 'uint8',
'product_id_count': 'uint32',
'product_id_nunique': 'uint32',
'industry_count': 'uint32',
'industry_nunique': 'uint32',
'duration_nunique': 'uint32',
'duration_min': 'uint8',
'duration_max': 'uint8',
'duration_mean': 'float64',
'duration_median': 'float64',
'duration_std': 'float64',
'creative_id_count_bin_10': 'uint8',
'creative_id_nunique_bin_10': 'uint8',
'ad_id_nunique_bin_10': 'uint8',
'advertiser_id_nunique_bin_10': 'uint8',
'product_category_nunique_bin_10': 'uint8',
'product_id_count_bin_10': 'uint8',
'product_id_nunique_bin_10': 'uint8',
'industry_count_bin_10': 'uint8',
'industry_nunique_bin_10': 'uint8',
'click_times_max_lt_1': 'uint8',
'click_times_sum_bin_10': 'uint8',
'click_times_mean_bin_2': 'uint8',
'click_times_std_bin_2': 'uint8',
'time_nunique_bin_10': 'uint8',
'time_min_bin_4': 'uint8',
'time_max_bin_2': 'uint8',
'duration_nunique_bin_4': 'uint8',
'duration_min_lt_1': 'uint8',
'duration_max_bin_10': 'uint8',
'duration_mean_bin_10': 'uint8',
'duration_median_bin_4': 'uint8',
'duration_std_bin_10': 'uint8'
}
timer = Timer()
# -------------------------------------------------------------------------------------------------
print('Loading train and test data...')
timer.start()
train = pd.read_pickle(TRAIN_FILE)
test = pd.read_pickle(TEST_FILE)
timer.stop()
# -------------------------------------------------------------------------------------------------
print('Generate basic statistical features')
timer.start()
train_stat_basic = pd.DataFrame()
test_stat_basic = pd.DataFrame()
# general
temp = train.groupby('user_id').agg({
'creative_id': ['count', 'nunique'],
'ad_id': ['nunique'],
'advertiser_id': ['nunique'],
'product_category': ['nunique'],
'click_times': ['nunique', 'max', 'sum', 'mean', 'std'],
'time': ['nunique', 'min', 'max']
})
temp.columns = ["_".join(x) for x in temp.columns.ravel()]
train_stat_basic = pd.concat([train_stat_basic, temp], axis=1)
temp = test.groupby('user_id').agg({
'creative_id': ['count', 'nunique'],
'ad_id': ['nunique'],
'advertiser_id': ['nunique'],
'product_category': ['nunique'],
'click_times': ['nunique', 'max', 'sum', 'mean', 'std'],
'time': ['nunique', 'min', 'max']
})
temp.columns = ["_".join(x) for x in temp.columns.ravel()]
test_stat_basic = pd.concat([test_stat_basic, temp], axis=1)
# product_id
temp = train.loc[train['product_id'] != '\\N'].groupby('user_id').agg({
'product_id': ['count', 'nunique']
})
temp.columns = ["_".join(x) for x in temp.columns.ravel()]
train_stat_basic = pd.concat([train_stat_basic, temp], axis=1)
temp = test.loc[test['product_id'] != '\\N'].groupby('user_id').agg({
'product_id': ['count', 'nunique']
})
temp.columns = ["_".join(x) for x in temp.columns.ravel()]
test_stat_basic = pd.concat([test_stat_basic, temp], axis=1)
# industry
temp = train.loc[train['industry'] != '\\N'].groupby('user_id').agg({
'industry': ['count', 'nunique']
})
temp.columns = ["_".join(x) for x in temp.columns.ravel()]
train_stat_basic = pd.concat([train_stat_basic, temp], axis=1)
temp = test.loc[test['industry'] != '\\N'].groupby('user_id').agg({
'industry': ['count', 'nunique']
})
temp.columns = ["_".join(x) for x in temp.columns.ravel()]
test_stat_basic = pd.concat([test_stat_basic, temp], axis=1)
timer.stop()
# -------------------------------------------------------------------------------------------------
print('Generate statistical features based on click date duration...')
timer.start()
# drop all columns except user_id and time
# since only time duration will be taken into consideration
# keep one click log record at each day
train = train.loc[:, ['user_id', 'time']].drop_duplicates().sort_values(['user_id', 'time'])
test = test.loc[:, ['user_id', 'time']].drop_duplicates().sort_values(['user_id', 'time'])
# create time duration statistical features
train['next_time'] = train.groupby('user_id')['time'].shift(-1)
temp = train.groupby('user_id').size()
train.loc[train['user_id'].isin(temp[temp == 1].index), 'next_time'] = 0
train = train.loc[train['next_time'].notna()]
train = train.astype({'next_time': 'uint8'})
train['duration'] = train['next_time'] - train['time']
temp = train.groupby('user_id').agg({'duration': ['nunique', 'min', 'max', 'mean', 'median', 'std']})
temp.columns = ["_".join(x) for x in temp.columns.ravel()]
train_stat_basic = pd.concat([train_stat_basic, temp], axis=1)
test['next_time'] = test.groupby('user_id')['time'].shift(-1)
temp = test.groupby('user_id').size()
test.loc[test['user_id'].isin(temp[temp == 1].index), 'next_time'] = 0
test = test.loc[test['next_time'].notna()]
test = test.astype({'next_time': 'uint8'})
test['duration'] = test['next_time'] - test['time']
temp = test.groupby('user_id').agg({'duration': ['nunique', 'min', 'max', 'mean', 'median', 'std']})
temp.columns = ["_".join(x) for x in temp.columns.ravel()]
test_stat_basic = pd.concat([test_stat_basic, temp], axis=1)
# fill nan values with zeros
train_stat_basic.loc[:, na_cols] = train_stat_basic.loc[:, na_cols].fillna(0)
test_stat_basic.loc[:, na_cols] = test_stat_basic.loc[:, na_cols].fillna(0)
timer.stop()
# -------------------------------------------------------------------------------------------------
print('Bucketing continuous features...')
timer.start()
train_stat_basic['creative_id_count_bin_10'] = pd.qcut(train_stat_basic['creative_id_count'], q=10).cat.codes
train_stat_basic['creative_id_nunique_bin_10'] = pd.qcut(train_stat_basic['creative_id_nunique'], q=10).cat.codes
train_stat_basic['ad_id_nunique_bin_10'] = pd.qcut(train_stat_basic['ad_id_nunique'], q=10).cat.codes
train_stat_basic['advertiser_id_nunique_bin_10'] = pd.qcut(train_stat_basic['advertiser_id_nunique'], q=10).cat.codes
train_stat_basic['product_category_nunique_bin_10'] = pd.qcut(train_stat_basic['product_category_nunique'], q=4).cat.codes
train_stat_basic['product_id_count_bin_10'] = pd.qcut(train_stat_basic['product_id_count'], q=10).cat.codes
train_stat_basic['product_id_nunique_bin_10'] = pd.qcut(train_stat_basic['product_id_nunique'], q=10).cat.codes
train_stat_basic['industry_count_bin_10'] = pd.qcut(train_stat_basic['industry_count'], q=10).cat.codes
train_stat_basic['industry_nunique_bin_10'] = pd.qcut(train_stat_basic['industry_nunique'], q=10).cat.codes
train_stat_basic['click_times_max_lt_1'] = train_stat_basic['click_times_max'].map(lambda s: 0 if s <= 1 else 1)
train_stat_basic['click_times_sum_bin_10'] = pd.qcut(train_stat_basic['click_times_sum'], q=10).cat.codes
train_stat_basic['click_times_mean_bin_2'] = pd.qcut(train_stat_basic['click_times_mean'], q=2).cat.codes
train_stat_basic['click_times_std_bin_2'] = pd.qcut(train_stat_basic['click_times_std'], q=2).cat.codes
train_stat_basic['time_nunique_bin_10'] = pd.qcut(train_stat_basic['time_nunique'], q=10).cat.codes
train_stat_basic['time_min_bin_4'] = pd.qcut(train_stat_basic['time_min'], q=4).cat.codes
train_stat_basic['time_max_bin_2'] = pd.qcut(train_stat_basic['time_max'], q=2).cat.codes
train_stat_basic['duration_nunique_bin_4'] = pd.qcut(train_stat_basic['duration_nunique'], q=4).cat.codes
train_stat_basic['duration_min_lt_1'] = train_stat_basic['duration_min'].map(lambda s: 0 if s <= 1 else 1)
train_stat_basic['duration_max_bin_10'] = | pd.qcut(train_stat_basic['duration_max'], q=10) | pandas.qcut |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.13.0
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [markdown]
# # Alignments
#
# This notebook analyzes page alignments and prepares metrics for final use.
# %% [markdown]
# ## Setup
#
# We begin by loading necessary libraries:
# %%
from pathlib import Path
import pandas as pd
import xarray as xr
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import gzip
import pickle
import binpickle
from natural.size import binarysize
# %%
codec = binpickle.codecs.Blosc('zstd')
# %% [markdown]
# Set up progress bar and logging support:
# %%
from tqdm.auto import tqdm
tqdm.pandas(leave=False)
# %%
import sys, logging
logging.basicConfig(level=logging.INFO, stream=sys.stderr)
log = logging.getLogger('alignment')
# %% [markdown]
# Import metric code:
# %%
# %load_ext autoreload
# %autoreload 1
# %%
# %aimport metrics
from trecdata import scan_runs
# %% [markdown]
# ## Loading Data
#
# We first load the page metadata:
# %%
pages = pd.read_json('data/trec_metadata_eval.json.gz', lines=True)
pages = pages.drop_duplicates('page_id')
pages.info()
# %% [markdown]
# Now we will load the evaluation topics:
# %%
eval_topics = pd.read_json('data/eval-topics-with-qrels.json.gz', lines=True)
eval_topics.info()
# %%
train_topics = pd.read_json('data/trec_topics.json.gz', lines=True)
train_topics.info()
# %% [markdown]
# Train and eval topics use a disjoint set of IDs:
# %%
train_topics['id'].describe()
# %%
eval_topics['id'].describe()
# %% [markdown]
# This allows us to create a single, integrated topics list for convenience:
# %%
topics = pd.concat([train_topics, eval_topics], ignore_index=True)
topics['eval'] = False
topics.loc[topics['id'] >= 100, 'eval'] = True
topics.head()
# %% [markdown]
# Finally, a bit of hard-coded data - the world population:
# %%
world_pop = pd.Series({
'Africa': 0.155070563,
'Antarctica': 1.54424E-07,
'Asia': 0.600202585,
'Europe': 0.103663858,
'Latin America and the Caribbean': 0.08609797,
'Northern America': 0.049616733,
'Oceania': 0.005348137,
})
world_pop.name = 'geography'
# %% [markdown]
# And a gender global target:
# %%
gender_tgt = pd.Series({
'female': 0.495,
'male': 0.495,
'third': 0.01
})
gender_tgt.name = 'gender'
gender_tgt.sum()
# %% [markdown]
# Xarray intesectional global target:
# %%
geo_tgt_xa = xr.DataArray(world_pop, dims=['geography'])
gender_tgt_xa = xr.DataArray(gender_tgt, dims=['gender'])
int_tgt = geo_tgt_xa * gender_tgt_xa
int_tgt
# %% [markdown]
# And the order of work-needed codes:
# %%
work_order = [
'Stub',
'Start',
'C',
'B',
'GA',
'FA',
]
# %% [markdown]
# ## Query Relevance
#
# We now need to get the qrels for the topics. This is done by creating frames with entries for every relevant document; missing documents are assumed irrelevant (0).
#
# First the training topics:
# %%
train_qrels = train_topics[['id', 'rel_docs']].explode('rel_docs', ignore_index=True)
train_qrels.rename(columns={'rel_docs': 'page_id'}, inplace=True)
train_qrels['page_id'] = train_qrels['page_id'].astype('i4')
train_qrels = train_qrels.drop_duplicates()
train_qrels.head()
# %%
eval_qrels = eval_topics[['id', 'rel_docs']].explode('rel_docs', ignore_index=True)
eval_qrels.rename(columns={'rel_docs': 'page_id'}, inplace=True)
eval_qrels['page_id'] = eval_qrels['page_id'].astype('i4')
eval_qrels = eval_qrels.drop_duplicates()
eval_qrels.head()
# %% [markdown]
# And concatenate:
# %%
qrels = pd.concat([train_qrels, eval_qrels], ignore_index=True)
# %% [markdown]
# ## Page Alignments
#
# All of our metrics require page "alignments": the protected-group membership of each page.
# %% [markdown]
# ### Geography
#
# Let's start with the straight page geography alignment for the public evaluation of the training queries. The page metadata has that; let's get the geography column.
# %%
page_geo = pages[['page_id', 'geographic_locations']].explode('geographic_locations', ignore_index=True)
page_geo.head()
# %% [markdown]
# And we will now pivot this into a matrix so we get page alignment vectors:
# %%
page_geo_align = page_geo.assign(x=1).pivot(index='page_id', columns='geographic_locations', values='x')
page_geo_align.rename(columns={np.nan: 'Unknown'}, inplace=True)
page_geo_align.fillna(0, inplace=True)
page_geo_align.head()
# %% [markdown]
# And convert this to an xarray for multidimensional usage:
# %%
page_geo_xr = xr.DataArray(page_geo_align, dims=['page', 'geography'])
page_geo_xr
# %%
binarysize(page_geo_xr.nbytes)
# %% [markdown]
# ### Gender
#
# The "undisclosed personal attribute" is gender. Not all articles have gender as a relevant variable - articles not about a living being generally will not.
#
# We're going to follow the same approach for gender:
# %%
page_gender = pages[['page_id', 'gender']].explode('gender', ignore_index=True)
page_gender.fillna('unknown', inplace=True)
page_gender.head()
# %% [markdown]
# We need to do a little targeted repair - there is an erroneous record of a gender of "Taira no Kiyomori" is actually male. Replace that:
# %%
page_gender = page_gender.loc[page_gender['gender'] != 'Taira no Kiyomori']
# %% [markdown]
# Now, we're going to do a little more work to reduce the dimensionality of the space. Points:
#
# 1. Trans men are men
# 2. Trans women are women
# 3. Cisgender is an adjective that can be dropped for the present purposes
#
# The result is that we will collapse "transgender female" and "cisgender female" into "female".
#
# The **downside** to this is that trans men are probabily significantly under-represented, but are now being collapsed into the dominant group.
# %%
pgcol = page_gender['gender']
pgcol = pgcol.str.replace(r'(?:tran|ci)sgender\s+((?:fe)?male)', r'\1', regex=True)
# %% [markdown]
# Now, we're going to group the remaining gender identities together under the label 'third'. As noted above, this is a debatable exercise that collapses a lot of identity.
# %%
genders = ['unknown', 'male', 'female', 'third']
pgcol[~pgcol.isin(genders)] = 'third'
# %% [markdown]
# Now put this column back in the frame and deduplicate.
# %%
page_gender['gender'] = pgcol
page_gender = page_gender.drop_duplicates()
# %% [markdown]
# And make an alignment matrix (reordering so 'unknown' is first for consistency):
# %%
page_gend_align = page_gender.assign(x=1).pivot(index='page_id', columns='gender', values='x')
page_gend_align.fillna(0, inplace=True)
page_gend_align = page_gend_align.reindex(columns=['unknown', 'female', 'male', 'third'])
page_gend_align.head()
# %% [markdown]
# Let's see how frequent each of the genders is:
# %%
page_gend_align.sum(axis=0).sort_values(ascending=False)
# %% [markdown]
# And convert to an xarray:
# %%
page_gend_xr = xr.DataArray(page_gend_align, dims=['page', 'gender'])
page_gend_xr
# %%
binarysize(page_gend_xr.nbytes)
# %% [markdown]
# ### Intersectional Alignment
#
# We'll now convert this data array to an **intersectional** alignment array:
# %%
page_xalign = page_geo_xr * page_gend_xr
page_xalign
# %%
binarysize(page_xalign.nbytes)
# %% [markdown]
# Make sure that did the right thing and we have intersectional numbers:
# %%
page_xalign.sum(axis=0)
# %% [markdown]
# And make sure combination with targets work as expected:
# %%
(page_xalign.sum(axis=0) + int_tgt) * 0.5
# %% [markdown]
# ## Task 1 Metric Preparation
#
# Now that we have our alignments and qrels, we are ready to prepare the Task 1 metrics.
#
# Task 1 ignores the "unknown" alignment category, so we're going to create a `kga` frame (for **K**nown **G**eographic **A**lignment), and corresponding frames for intersectional alignment.
# %%
page_kga = page_geo_align.iloc[:, 1:]
page_kga.head()
# %% [markdown]
# Intersectional is a little harder to do, because things can be **intersectionally unknown**: we may know gender but not geography, or vice versa. To deal with these missing values for Task 1, we're going to ignore *totally unknown* values, but keep partially-known as a category.
#
# We also need to ravel our tensors into a matrix for compatibility with the metric code. Since 'unknown' is the first value on each axis, we can ravel, and then drop the first column.
# %%
xshp = page_xalign.shape
xshp = (xshp[0], xshp[1] * xshp[2])
page_xa_df = pd.DataFrame(page_xalign.values.reshape(xshp), index=page_xalign.indexes['page'])
page_xa_df.head()
# %% [markdown]
# And drop unknown, to get our page alignment vectors:
# %%
page_kia = page_xa_df.iloc[:, 1:]
# %% [markdown]
# ### Geographic Alignment
#
# We'll start with the metric configuration for public training data, considering only geographic alignment. We configure the metric to do this for both the training and the eval queries.
#
# #### Training Queries
# %%
train_qalign = train_qrels.join(page_kga, on='page_id').drop(columns=['page_id']).groupby('id').sum()
tqa_sums = train_qalign.sum(axis=1)
train_qalign = train_qalign.divide(tqa_sums, axis=0)
# %%
train_qalign.head()
# %%
train_qtarget = (train_qalign + world_pop) * 0.5
train_qtarget.head()
# %% [markdown]
# And we can prepare a metric and save it:
# %%
t1_train_metric = metrics.Task1Metric(train_qrels.set_index('id'), page_kga, train_qtarget)
binpickle.dump(t1_train_metric, 'task1-train-geo-metric.bpk', codec=codec)
# %% [markdown]
# #### Eval Queries
#
# Do the same thing for the eval data for a geo-only eval metric:
# %%
eval_qalign = eval_qrels.join(page_kga, on='page_id').drop(columns=['page_id']).groupby('id').sum()
eqa_sums = eval_qalign.sum(axis=1)
eval_qalign = eval_qalign.divide(eqa_sums, axis=0)
eval_qtarget = (eval_qalign + world_pop) * 0.5
t1_eval_metric = metrics.Task1Metric(eval_qrels.set_index('id'), page_kga, eval_qtarget)
binpickle.dump(t1_eval_metric, 'task1-eval-geo-metric.bpk', codec=codec)
# %% [markdown]
# ### Intersectional Alignment
#
# Now we need to apply similar logic, but for the intersectional (geography * gender) alignment.
#
# As noted as above, we need to carefully handle the unknown cases.
# %% [markdown]
# #### Demo
#
# To demonstrate how the logic works, let's first work it out in cells for one query (1).
#
# What are its documents?
# %%
qdf = qrels[qrels['id'] == 1]
qdf.name = 1
qdf
# %% [markdown]
# We can use these page IDs to get its alignments:
# %%
q_xa = page_xalign.loc[qdf['page_id'].values, :, :]
q_xa
# %% [markdown]
# Summing over the first axis ('page') will produce an alignment matrix:
# %%
q_am = q_xa.sum(axis=0)
q_am
# %% [markdown]
# Now we need to do reset the (0,0) coordinate (full unknown), and normalize to a proportion.
# %%
q_am[0, 0] = 0
q_am = q_am / q_am.sum()
q_am
# %% [markdown]
# Ok, now we have to - very carefully - average with our target modifier. There are three groups:
#
# - known (use intersectional target)
# - known-geo (use geo target)
# - known-gender (use gender target)
#
# For each of these, we need to respect the fraction of the total it represents. Let's compute those fractions:
# %%
q_fk_all = q_am[1:, 1:].sum()
q_fk_geo = q_am[1:, :1].sum()
q_fk_gen = q_am[:1, 1:].sum()
q_fk_all, q_fk_geo, q_fk_gen
# %% [markdown]
# And now do some surgery. Weighted-average to incorporate the target for fully-known:
# %%
q_tm = q_am.copy()
q_tm[1:, 1:] *= 0.5
q_tm[1:, 1:] += int_tgt * 0.5 * q_fk_all
q_tm
# %% [markdown]
# And for known-geo:
# %%
q_tm[1:, :1] *= 0.5
q_tm[1:, :1] += geo_tgt_xa * 0.5 * q_fk_geo
# %% [markdown]
# And known-gender:
# %%
q_tm[:1, 1:] *= 0.5
q_tm[:1, 1:] += gender_tgt_xa * 0.5 * q_fk_gen
# %%
q_tm
# %% [markdown]
# Now we can unravel this and drop the first entry:
# %%
q_tm.values.ravel()[1:]
# %% [markdown]
# #### Implementation
#
# Now, to do this for every query, we'll use a function that takes a data frame for a query's relevant docs and performs all of the above operations:
# %%
def query_xalign(qdf):
pages = qdf['page_id']
pages = pages[pages.isin(page_xalign.indexes['page'])]
q_xa = page_xalign.loc[pages.values, :, :]
q_am = q_xa.sum(axis=0)
# clear and normalize
q_am[0, 0] = 0
q_am = q_am / q_am.sum()
# compute fractions in each section
q_fk_all = q_am[1:, 1:].sum()
q_fk_geo = q_am[1:, :1].sum()
q_fk_gen = q_am[:1, 1:].sum()
# known average
q_am[1:, 1:] *= 0.5
q_am[1:, 1:] += int_tgt * 0.5 * q_fk_all
# known-geo average
q_am[1:, :1] *= 0.5
q_am[1:, :1] += geo_tgt_xa * 0.5 * q_fk_geo
# known-gender average
q_am[:1, 1:] *= 0.5
q_am[:1, 1:] += gender_tgt_xa * 0.5 * q_fk_gen
# and return the result
return pd.Series(q_am.values.ravel()[1:])
# %%
query_xalign(qdf)
# %% [markdown]
# Now with that function, we can compute the alignment vector for each query.
# %%
train_qtarget = train_qrels.groupby('id').apply(query_xalign)
train_qtarget
# %% [markdown]
# And save:
# %%
t1_train_metric = metrics.Task1Metric(train_qrels.set_index('id'), page_kia, train_qtarget)
binpickle.dump(t1_train_metric, 'task1-train-metric.bpk', codec=codec)
# %% [markdown]
# Do the same for eval:
# %%
eval_qtarget = eval_qrels.groupby('id').apply(query_xalign)
t1_eval_metric = metrics.Task1Metric(eval_qrels.set_index('id'), page_kia, eval_qtarget)
binpickle.dump(t1_eval_metric, 'task1-eval-metric.bpk', codec=codec)
# %% [markdown]
# ## Task 2 Metric Preparation
#
# Task 2 requires some different preparation.
#
# We're going to start by computing work-needed information:
# %%
page_work = pages.set_index('page_id').quality_score_disc.astype( | pd.CategoricalDtype(ordered=True) | pandas.CategoricalDtype |
import numpy as np
import pandas as pd
def fetch_students():
''' Fetches the two dataset csv files and merges it '''
student_mat = pd.read_csv("dataset/student-mat.csv")
student_por = pd.read_csv("dataset/student-por.csv")
students = pd.concat([student_mat, student_por])
return students
def create_data_for_nn(students_dataframe):
input_data = pd.DataFrame()
output_data = pd.DataFrame()
# Input data
# Numerical
# Age
input_data['age'] = pd.Series(
data=students_dataframe['age'].values, index=students_dataframe.index)
# Absences count
input_data['absences'] = pd.Series(
data=students_dataframe['absences'].values, index=students_dataframe.index)
# Family relationship status [bad to good -> 0-1]
input_data['famrel'] = pd.Series(
data=((students_dataframe['famrel'].values - 1) / 4), index=students_dataframe.index)
# Health status [bad to good -> 0-1]
input_data['health'] = pd.Series(
data=((students_dataframe['health'].values - 1) / 4), index=students_dataframe.index)
# Free time after school [0-1]
input_data['freetime'] = pd.Series(
data=((students_dataframe['freetime'].values - 1) / 4), index=students_dataframe.index)
# Going out with friends [0-1]
input_data['goout'] = pd.Series(
data=((students_dataframe['goout'].values - 1) / 4), index=students_dataframe.index)
# Travel time in minutes [0 to 60+ minutes -> 0 to 1]
input_data['traveltime'] = pd.Series(
data=((students_dataframe['traveltime'].values) / 4), index=students_dataframe.index)
# Weekly study time in hours [0 to 10+ hours -> 0 to 1]
input_data['studytime'] = pd.Series(
data=((students_dataframe['studytime'].values) / 4), index=students_dataframe.index)
# Number of past class failures [0 to 4+ failures -> 0 to 1]
input_data['failures'] = pd.Series(
data=((students_dataframe['failures'].values) / 4), index=students_dataframe.index)
# School success [Bad to good -> 0-1]
# Rounded average of the G1, G2 and G3 divided by 20 will be used as school success from 0 to 1
input_data['success'] = pd.Series(data=(students_dataframe['G1'] +
students_dataframe['G2'] +
students_dataframe['G3']) / 3 / 20,
index=students_dataframe.index)
# Mother education status
# [None, Primary education (4th grade), 5th to 9th grade, Secondary education, Higher education]
input_data['Medu_none'] = pd.Series(data=0, index=students_dataframe.index)
input_data['Medu_primary'] = pd.Series(
data=0, index=students_dataframe.index)
input_data['Medu_fivenine'] = pd.Series(
data=0, index=students_dataframe.index)
input_data['Medu_secondary'] = pd.Series(
data=0, index=students_dataframe.index)
input_data['Medu_higher'] = pd.Series(
data=0, index=students_dataframe.index)
input_data.loc[students_dataframe['Medu'] == 0, 'Medu_none'] = 1
input_data.loc[students_dataframe['Medu'] == 1, 'Medu_primary'] = 1
input_data.loc[students_dataframe['Medu'] == 2, 'Medu_fivenine'] = 1
input_data.loc[students_dataframe['Medu'] == 3, 'Medu_secondary'] = 1
input_data.loc[students_dataframe['Medu'] == 4, 'Medu_higher'] = 1
# Father education status
# [None, Primary education (4th grade), 5th to 9th grade, Secondary education, Higher education]
input_data['Fedu_none'] = pd.Series(data=0, index=students_dataframe.index)
input_data['Fedu_primary'] = pd.Series(
data=0, index=students_dataframe.index)
input_data['Fedu_fivenine'] = pd.Series(
data=0, index=students_dataframe.index)
input_data['Fedu_secondary'] = pd.Series(
data=0, index=students_dataframe.index)
input_data['Fedu_higher'] = pd.Series(
data=0, index=students_dataframe.index)
input_data.loc[students_dataframe['Fedu'] == 0, 'Fedu_none'] = 1
input_data.loc[students_dataframe['Fedu'] == 1, 'Fedu_primary'] = 1
input_data.loc[students_dataframe['Fedu'] == 2, 'Fedu_fivenine'] = 1
input_data.loc[students_dataframe['Fedu'] == 3, 'Fedu_secondary'] = 1
input_data.loc[students_dataframe['Fedu'] == 4, 'Fedu_higher'] = 1
# Mother's job
# [Teacher, Health care related, Civil services, At home, Other]
input_data['Mjob_teacher'] = pd.Series(
data=0, index=students_dataframe.index)
input_data['Mjob_health'] = pd.Series(
data=0, index=students_dataframe.index)
input_data['Mjob_civilser'] = pd.Series(
data=0, index=students_dataframe.index)
input_data['Mjob_athome'] = pd.Series(
data=0, index=students_dataframe.index)
input_data['Mjob_other'] = pd.Series(
data=0, index=students_dataframe.index)
input_data.loc[students_dataframe['Mjob'] == 'teacher', 'Mjob_teacher'] = 1
input_data.loc[students_dataframe['Mjob'] == 'health', 'Mjob_health'] = 1
input_data.loc[students_dataframe['Mjob']
== 'services', 'Mjob_civilser'] = 1
input_data.loc[students_dataframe['Mjob'] == 'at_home', 'Mjob_athome'] = 1
input_data.loc[students_dataframe['Mjob'] == 'other', 'Mjob_other'] = 1
# Father's job
# [Teacher, Health care related, Civil services, At home, Other]
input_data['Fjob_teacher'] = pd.Series(
data=0, index=students_dataframe.index)
input_data['Fjob_health'] = pd.Series(
data=0, index=students_dataframe.index)
input_data['Fjob_civilser'] = pd.Series(
data=0, index=students_dataframe.index)
input_data['Fjob_athome'] = pd.Series(
data=0, index=students_dataframe.index)
input_data['Fjob_other'] = pd.Series(
data=0, index=students_dataframe.index)
input_data.loc[students_dataframe['Fjob'] == 'teacher', 'Fjob_teacher'] = 1
input_data.loc[students_dataframe['Fjob'] == 'health', 'Fjob_health'] = 1
input_data.loc[students_dataframe['Fjob']
== 'services', 'Fjob_civilser'] = 1
input_data.loc[students_dataframe['Fjob'] == 'at_home', 'Fjob_athome'] = 1
input_data.loc[students_dataframe['Fjob'] == 'other', 'Fjob_other'] = 1
# Reason to chose this school
# [ Close to home, School reputation, Course preference, Other ]
input_data['reason_closehome'] = pd.Series(
data=0, index=students_dataframe.index)
input_data['reason_rep'] = pd.Series(
data=0, index=students_dataframe.index)
input_data['reason_pref'] = pd.Series(
data=0, index=students_dataframe.index)
input_data['reason_other'] = pd.Series(
data=0, index=students_dataframe.index)
input_data.loc[students_dataframe['reason']
== 'home', 'reason_closehome'] = 1
input_data.loc[students_dataframe['reason']
== 'reputation', 'reason_rep'] = 1
input_data.loc[students_dataframe['reason'] == 'course', 'reason_pref'] = 1
input_data.loc[students_dataframe['reason'] == 'other', 'reason_other'] = 1
# One hot
# Sex [M(Male) = 0, F(Female) = 1]
input_data['sex'] = pd.Series(data=0, index=students_dataframe.index)
input_data.loc[students_dataframe['sex'] == 'F', 'sex'] = 1
# Address [R(Rural) = 0, U(Urban) = 1]
input_data['address'] = pd.Series(data=0, index=students_dataframe.index)
input_data.loc[students_dataframe['address'] == 'U', 'address'] = 1
# Family size [LE3(Less or equal than 3) = 0, GT3(Greater than 3) = 1]
input_data['famsize'] = pd.Series(data=0, index=students_dataframe.index)
input_data.loc[students_dataframe['famsize'] == 'GT3', 'famsize'] = 1
# Parent cohabitation status [T(Together) = 0, A(Apart) = 1]
input_data['Pstatus'] = pd.Series(data=0, index=students_dataframe.index)
input_data.loc[students_dataframe['Pstatus'] == 'A', 'Pstatus'] = 1
# Extra educational support [no = 0, yes = 1]
input_data['schoolsup'] = pd.Series(data=0, index=students_dataframe.index)
input_data.loc[students_dataframe['schoolsup'] == 'yes', 'schoolsup'] = 1
# Family educational support [no = 0, yes = 1]
input_data['famsup'] = pd.Series(data=0, index=students_dataframe.index)
input_data.loc[students_dataframe['famsup'] == 'yes', 'famsup'] = 1
# Extra curricular activites [no = 0, yes = 1]
input_data['activities'] = pd.Series(
data=0, index=students_dataframe.index)
input_data.loc[students_dataframe['activities'] == 'yes', 'activities'] = 1
# Extra curricular activites [no = 0, yes = 1]
input_data['nursery'] = pd.Series(data=0, index=students_dataframe.index)
input_data.loc[students_dataframe['nursery'] == 'yes', 'nursery'] = 1
# Wants higher education [no = 0, yes = 1]
input_data['higher'] = pd.Series(data=0, index=students_dataframe.index)
input_data.loc[students_dataframe['higher'] == 'yes', 'higher'] = 1
# Internet access at home [no = 0, yes = 1]
input_data['internet'] = pd.Series(data=0, index=students_dataframe.index)
input_data.loc[students_dataframe['internet'] == 'yes', 'internet'] = 1
# Has romantic relationship [no = 0, yes = 1]
input_data['romantic'] = | pd.Series(data=0, index=students_dataframe.index) | pandas.Series |
import numpy as np
import pandas as pd
import pytest
import woodwork as ww
from evalml.automl import get_default_primary_search_objective
from evalml.data_checks import (
AutoMLDataChecks,
DataCheck,
DataCheckError,
DataCheckMessageCode,
DataChecks,
DataCheckWarning,
DefaultDataChecks,
EmptyDataChecks
)
from evalml.exceptions import DataCheckInitError
def test_data_checks(X_y_binary):
X, y = X_y_binary
class MockDataCheck(DataCheck):
def validate(self, X, y):
return {"warnings": [], "errors": []}
class MockDataCheckWarning(DataCheck):
def validate(self, X, y):
return {"warnings": [DataCheckWarning(message="warning one", data_check_name=self.name, message_code=None).to_dict()], "errors": []}
class MockDataCheckError(DataCheck):
def validate(self, X, y):
return {"warnings": [], "errors": [DataCheckError(message="error one", data_check_name=self.name, message_code=None).to_dict()]}
class MockDataCheckErrorAndWarning(DataCheck):
def validate(self, X, y):
return {"warnings": [DataCheckWarning(message="warning two", data_check_name=self.name, message_code=None).to_dict()],
"errors": [DataCheckError(message="error two", data_check_name=self.name, message_code=None).to_dict()]}
data_checks_list = [MockDataCheck, MockDataCheckWarning, MockDataCheckError, MockDataCheckErrorAndWarning]
data_checks = DataChecks(data_checks=data_checks_list)
assert data_checks.validate(X, y) == {
"warnings": [DataCheckWarning(message="warning one", data_check_name="MockDataCheckWarning").to_dict(),
DataCheckWarning(message="warning two", data_check_name="MockDataCheckErrorAndWarning").to_dict()],
"errors": [DataCheckError(message="error one", data_check_name="MockDataCheckError").to_dict(),
DataCheckError(message="error two", data_check_name="MockDataCheckErrorAndWarning").to_dict()]
}
@pytest.mark.parametrize("input_type", ["pd", "ww", "np"])
def test_empty_data_checks(input_type, X_y_binary):
X, y = X_y_binary
if input_type != "np":
X = pd.DataFrame(X)
y = pd.Series(y)
if input_type == "ww":
X = ww.DataTable(X)
y = ww.DataColumn(y)
data_checks = EmptyDataChecks()
assert data_checks.validate(X, y) == {"warnings": [], "errors": []}
messages = [DataCheckWarning(message="Column 'all_null' is 95.0% or more null",
data_check_name="HighlyNullDataCheck",
message_code=DataCheckMessageCode.HIGHLY_NULL,
details={"column": "all_null"}).to_dict(),
DataCheckWarning(message="Column 'also_all_null' is 95.0% or more null",
data_check_name="HighlyNullDataCheck",
message_code=DataCheckMessageCode.HIGHLY_NULL,
details={"column": "also_all_null"}).to_dict(),
DataCheckWarning(message="Column 'id' is 100.0% or more likely to be an ID column",
data_check_name="IDColumnsDataCheck",
message_code=DataCheckMessageCode.HAS_ID_COLUMN,
details={"column": "id"}).to_dict(),
DataCheckError(message="1 row(s) (20.0%) of target values are null",
data_check_name="InvalidTargetDataCheck",
message_code=DataCheckMessageCode.TARGET_HAS_NULL,
details={"num_null_rows": 1, "pct_null_rows": 20.0}).to_dict(),
DataCheckError(message="lots_of_null has 1 unique value.",
data_check_name="NoVarianceDataCheck",
message_code=DataCheckMessageCode.NO_VARIANCE,
details={"column": "lots_of_null"}).to_dict(),
DataCheckError(message="all_null has 0 unique value.",
data_check_name="NoVarianceDataCheck",
message_code=DataCheckMessageCode.NO_VARIANCE,
details={"column": "all_null"}).to_dict(),
DataCheckError(message="also_all_null has 0 unique value.",
data_check_name="NoVarianceDataCheck",
message_code=DataCheckMessageCode.NO_VARIANCE,
details={"column": "also_all_null"}).to_dict()]
@pytest.mark.parametrize("input_type", ["pd", "ww"])
def test_default_data_checks_classification(input_type):
X = pd.DataFrame({'lots_of_null': [None, None, None, None, "some data"],
'all_null': [None, None, None, None, None],
'also_all_null': [None, None, None, None, None],
'no_null': [1, 2, 3, 4, 5],
'id': [0, 1, 2, 3, 4],
'has_label_leakage': [100, 200, 100, 200, 100]})
y = pd.Series([0, 1, np.nan, 1, 0])
y_multiclass = pd.Series([0, 1, np.nan, 2, 0])
if input_type == "ww":
X = ww.DataTable(X)
y = ww.DataColumn(y)
y_multiclass = ww.DataColumn(y_multiclass)
data_checks = DefaultDataChecks("binary", get_default_primary_search_objective("binary"))
imbalance = [DataCheckError(message="The number of instances of these targets is less than 2 * the number of cross folds = 6 instances: [1.0, 0.0]",
data_check_name="ClassImbalanceDataCheck",
message_code=DataCheckMessageCode.CLASS_IMBALANCE_BELOW_FOLDS,
details={"target_values": [1.0, 0.0]}).to_dict()]
assert data_checks.validate(X, y) == {"warnings": messages[:3], "errors": messages[3:] + imbalance}
data_checks = DataChecks(DefaultDataChecks._DEFAULT_DATA_CHECK_CLASSES,
{"InvalidTargetDataCheck": {"problem_type": "binary",
"objective": get_default_primary_search_objective("binary")}})
assert data_checks.validate(X, y) == {"warnings": messages[:3], "errors": messages[3:]}
# multiclass
imbalance = [DataCheckError(message="The number of instances of these targets is less than 2 * the number of cross folds = 6 instances: [0.0, 2.0, 1.0]",
data_check_name="ClassImbalanceDataCheck",
message_code=DataCheckMessageCode.CLASS_IMBALANCE_BELOW_FOLDS,
details={"target_values": [0.0, 2.0, 1.0]}).to_dict()]
min_2_class_count = [DataCheckError(message="Target does not have at least two instances per class which is required for multiclass classification",
data_check_name="InvalidTargetDataCheck",
message_code=DataCheckMessageCode.TARGET_BINARY_NOT_TWO_EXAMPLES_PER_CLASS,
details={"least_populated_class_labels": [2.0, 1.0]}).to_dict()]
high_class_to_sample_ratio = [DataCheckWarning(
message="Target has a large number of unique values, could be regression type problem.",
data_check_name="InvalidTargetDataCheck",
message_code=DataCheckMessageCode.TARGET_MULTICLASS_HIGH_UNIQUE_CLASS,
details={'class_to_value_ratio': 0.6}).to_dict()]
# multiclass
data_checks = DefaultDataChecks("multiclass", get_default_primary_search_objective("multiclass"))
assert data_checks.validate(X, y_multiclass) == {"warnings": messages[:3] + high_class_to_sample_ratio, "errors": [messages[3]] + min_2_class_count + messages[4:] + imbalance}
data_checks = DataChecks(DefaultDataChecks._DEFAULT_DATA_CHECK_CLASSES,
{"InvalidTargetDataCheck": {"problem_type": "multiclass",
"objective": get_default_primary_search_objective("multiclass")}})
assert data_checks.validate(X, y_multiclass) == {"warnings": messages[:3] + high_class_to_sample_ratio, "errors": [messages[3]] + min_2_class_count + messages[4:]}
@pytest.mark.parametrize("input_type", ["pd", "ww"])
def test_default_data_checks_regression(input_type):
X = pd.DataFrame({'lots_of_null': [None, None, None, None, "some data"],
'all_null': [None, None, None, None, None],
'also_all_null': [None, None, None, None, None],
'no_null': [1, 2, 3, 5, 5],
'id': [0, 1, 2, 3, 4],
'has_label_leakage': [100, 200, 100, 200, 100]})
y = pd.Series([0.3, 100.0, np.nan, 1.0, 0.2])
y_no_variance = | pd.Series([5] * 5) | pandas.Series |
"""Tests suite for Period handling.
Parts derived from scikits.timeseries code, original authors:
- <NAME> & <NAME>
- pierregm_at_uga_dot_edu - mattknow_ca_at_hotmail_dot_com
"""
from unittest import TestCase
from datetime import datetime, timedelta
from numpy.ma.testutils import assert_equal
from pandas.tseries.period import Period, PeriodIndex
from pandas.tseries.index import DatetimeIndex, date_range
from pandas.tseries.tools import to_datetime
import pandas.core.datetools as datetools
import numpy as np
from pandas import Series, TimeSeries
from pandas.util.testing import assert_series_equal
class TestPeriodProperties(TestCase):
"Test properties such as year, month, weekday, etc...."
#
def __init__(self, *args, **kwds):
TestCase.__init__(self, *args, **kwds)
def test_interval_constructor(self):
i1 = Period('1/1/2005', freq='M')
i2 = Period('Jan 2005')
self.assertEquals(i1, i2)
i1 = Period('2005', freq='A')
i2 = Period('2005')
i3 = Period('2005', freq='a')
self.assertEquals(i1, i2)
self.assertEquals(i1, i3)
i4 = Period('2005', freq='M')
i5 = Period('2005', freq='m')
self.assert_(i1 != i4)
self.assertEquals(i4, i5)
i1 = Period.now('Q')
i2 = Period(datetime.now(), freq='Q')
i3 = Period.now('q')
self.assertEquals(i1, i2)
self.assertEquals(i1, i3)
# Biz day construction, roll forward if non-weekday
i1 = Period('3/10/12', freq='B')
i2 = Period('3/12/12', freq='D')
self.assertEquals(i1, i2.asfreq('B'))
i3 = Period('3/10/12', freq='b')
self.assertEquals(i1, i3)
i1 = Period(year=2005, quarter=1, freq='Q')
i2 = Period('1/1/2005', freq='Q')
self.assertEquals(i1, i2)
i1 = Period(year=2005, quarter=3, freq='Q')
i2 = Period('9/1/2005', freq='Q')
self.assertEquals(i1, i2)
i1 = Period(year=2005, month=3, day=1, freq='D')
i2 = Period('3/1/2005', freq='D')
self.assertEquals(i1, i2)
i3 = Period(year=2005, month=3, day=1, freq='d')
self.assertEquals(i1, i3)
i1 = Period(year=2012, month=3, day=10, freq='B')
i2 = Period('3/12/12', freq='B')
self.assertEquals(i1, i2)
i1 = Period('2005Q1')
i2 = Period(year=2005, quarter=1, freq='Q')
i3 = Period('2005q1')
self.assertEquals(i1, i2)
self.assertEquals(i1, i3)
i1 = Period('05Q1')
self.assertEquals(i1, i2)
lower = Period('05q1')
self.assertEquals(i1, lower)
i1 = Period('1Q2005')
self.assertEquals(i1, i2)
lower = Period('1q2005')
self.assertEquals(i1, lower)
i1 = Period('1Q05')
self.assertEquals(i1, i2)
lower = Period('1q05')
self.assertEquals(i1, lower)
i1 = Period('4Q1984')
self.assertEquals(i1.year, 1984)
lower = Period('4q1984')
self.assertEquals(i1, lower)
i1 = Period('1982', freq='min')
i2 = Period('1982', freq='MIN')
self.assertEquals(i1, i2)
i2 = Period('1982', freq=('Min', 1))
self.assertEquals(i1, i2)
def test_freq_str(self):
i1 = Period('1982', freq='Min')
self.assert_(i1.freq[0] != '1')
i2 = Period('11/30/2005', freq='2Q')
self.assertEquals(i2.freq[0], '2')
def test_to_timestamp(self):
intv = Period('1982', freq='A')
start_ts = intv.to_timestamp(which_end='S')
aliases = ['s', 'StarT', 'BEGIn']
for a in aliases:
self.assertEquals(start_ts, intv.to_timestamp(which_end=a))
end_ts = intv.to_timestamp(which_end='E')
aliases = ['e', 'end', 'FINIsH']
for a in aliases:
self.assertEquals(end_ts, intv.to_timestamp(which_end=a))
from_lst = ['A', 'Q', 'M', 'W', 'B',
'D', 'H', 'Min', 'S']
for i, fcode in enumerate(from_lst):
intv = Period('1982', freq=fcode)
result = intv.to_timestamp().to_period(fcode)
self.assertEquals(result, intv)
self.assertEquals(intv.start_time(), intv.to_timestamp('S'))
self.assertEquals(intv.end_time(), intv.to_timestamp('E'))
def test_properties_annually(self):
# Test properties on Periods with annually frequency.
a_date = Period(freq='A', year=2007)
assert_equal(a_date.year, 2007)
def test_properties_quarterly(self):
# Test properties on Periods with daily frequency.
qedec_date = Period(freq="Q-DEC", year=2007, quarter=1)
qejan_date = Period(freq="Q-JAN", year=2007, quarter=1)
qejun_date = Period(freq="Q-JUN", year=2007, quarter=1)
#
for x in range(3):
for qd in (qedec_date, qejan_date, qejun_date):
assert_equal((qd + x).qyear, 2007)
assert_equal((qd + x).quarter, x + 1)
def test_properties_monthly(self):
# Test properties on Periods with daily frequency.
m_date = Period(freq='M', year=2007, month=1)
for x in range(11):
m_ival_x = m_date + x
assert_equal(m_ival_x.year, 2007)
if 1 <= x + 1 <= 3:
assert_equal(m_ival_x.quarter, 1)
elif 4 <= x + 1 <= 6:
assert_equal(m_ival_x.quarter, 2)
elif 7 <= x + 1 <= 9:
assert_equal(m_ival_x.quarter, 3)
elif 10 <= x + 1 <= 12:
assert_equal(m_ival_x.quarter, 4)
assert_equal(m_ival_x.month, x + 1)
def test_properties_weekly(self):
# Test properties on Periods with daily frequency.
w_date = Period(freq='WK', year=2007, month=1, day=7)
#
assert_equal(w_date.year, 2007)
assert_equal(w_date.quarter, 1)
assert_equal(w_date.month, 1)
assert_equal(w_date.week, 1)
assert_equal((w_date - 1).week, 52)
def test_properties_daily(self):
# Test properties on Periods with daily frequency.
b_date = Period(freq='B', year=2007, month=1, day=1)
#
assert_equal(b_date.year, 2007)
assert_equal(b_date.quarter, 1)
assert_equal(b_date.month, 1)
assert_equal(b_date.day, 1)
assert_equal(b_date.weekday, 0)
assert_equal(b_date.day_of_year, 1)
#
d_date = Period(freq='D', year=2007, month=1, day=1)
#
assert_equal(d_date.year, 2007)
assert_equal(d_date.quarter, 1)
assert_equal(d_date.month, 1)
assert_equal(d_date.day, 1)
assert_equal(d_date.weekday, 0)
assert_equal(d_date.day_of_year, 1)
def test_properties_hourly(self):
# Test properties on Periods with hourly frequency.
h_date = Period(freq='H', year=2007, month=1, day=1, hour=0)
#
assert_equal(h_date.year, 2007)
assert_equal(h_date.quarter, 1)
assert_equal(h_date.month, 1)
assert_equal(h_date.day, 1)
assert_equal(h_date.weekday, 0)
assert_equal(h_date.day_of_year, 1)
assert_equal(h_date.hour, 0)
#
def test_properties_minutely(self):
# Test properties on Periods with minutely frequency.
t_date = Period(freq='Min', year=2007, month=1, day=1, hour=0,
minute=0)
#
assert_equal(t_date.quarter, 1)
assert_equal(t_date.month, 1)
assert_equal(t_date.day, 1)
assert_equal(t_date.weekday, 0)
assert_equal(t_date.day_of_year, 1)
assert_equal(t_date.hour, 0)
assert_equal(t_date.minute, 0)
def test_properties_secondly(self):
# Test properties on Periods with secondly frequency.
s_date = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
#
assert_equal(s_date.year, 2007)
assert_equal(s_date.quarter, 1)
assert_equal(s_date.month, 1)
assert_equal(s_date.day, 1)
assert_equal(s_date.weekday, 0)
assert_equal(s_date.day_of_year, 1)
assert_equal(s_date.hour, 0)
assert_equal(s_date.minute, 0)
assert_equal(s_date.second, 0)
def noWrap(item):
return item
class TestFreqConversion(TestCase):
"Test frequency conversion of date objects"
def __init__(self, *args, **kwds):
TestCase.__init__(self, *args, **kwds)
def test_conv_annual(self):
# frequency conversion tests: from Annual Frequency
ival_A = Period(freq='A', year=2007)
ival_AJAN = Period(freq="A-JAN", year=2007)
ival_AJUN = Period(freq="A-JUN", year=2007)
ival_ANOV = Period(freq="A-NOV", year=2007)
ival_A_to_Q_start = Period(freq='Q', year=2007, quarter=1)
ival_A_to_Q_end = Period(freq='Q', year=2007, quarter=4)
ival_A_to_M_start = Period(freq='M', year=2007, month=1)
ival_A_to_M_end = Period(freq='M', year=2007, month=12)
ival_A_to_W_start = Period(freq='WK', year=2007, month=1, day=1)
ival_A_to_W_end = Period(freq='WK', year=2007, month=12, day=31)
ival_A_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_A_to_B_end = Period(freq='B', year=2007, month=12, day=31)
ival_A_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_A_to_D_end = Period(freq='D', year=2007, month=12, day=31)
ival_A_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_A_to_H_end = Period(freq='H', year=2007, month=12, day=31,
hour=23)
ival_A_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_A_to_T_end = Period(freq='Min', year=2007, month=12, day=31,
hour=23, minute=59)
ival_A_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_A_to_S_end = Period(freq='S', year=2007, month=12, day=31,
hour=23, minute=59, second=59)
ival_AJAN_to_D_end = Period(freq='D', year=2007, month=1, day=31)
ival_AJAN_to_D_start = Period(freq='D', year=2006, month=2, day=1)
ival_AJUN_to_D_end = Period(freq='D', year=2007, month=6, day=30)
ival_AJUN_to_D_start = Period(freq='D', year=2006, month=7, day=1)
ival_ANOV_to_D_end = Period(freq='D', year=2007, month=11, day=30)
ival_ANOV_to_D_start = Period(freq='D', year=2006, month=12, day=1)
assert_equal(ival_A.asfreq('Q', 'S'), ival_A_to_Q_start)
assert_equal(ival_A.asfreq('Q', 'e'), ival_A_to_Q_end)
assert_equal(ival_A.asfreq('M', 's'), ival_A_to_M_start)
assert_equal(ival_A.asfreq('M', 'E'), ival_A_to_M_end)
assert_equal(ival_A.asfreq('WK', 'S'), ival_A_to_W_start)
assert_equal(ival_A.asfreq('WK', 'E'), ival_A_to_W_end)
assert_equal(ival_A.asfreq('B', 'S'), ival_A_to_B_start)
assert_equal(ival_A.asfreq('B', 'E'), ival_A_to_B_end)
assert_equal(ival_A.asfreq('D', 'S'), ival_A_to_D_start)
assert_equal(ival_A.asfreq('D', 'E'), ival_A_to_D_end)
assert_equal(ival_A.asfreq('H', 'S'), ival_A_to_H_start)
assert_equal(ival_A.asfreq('H', 'E'), ival_A_to_H_end)
assert_equal(ival_A.asfreq('min', 'S'), ival_A_to_T_start)
assert_equal(ival_A.asfreq('min', 'E'), ival_A_to_T_end)
assert_equal(ival_A.asfreq('T', 'S'), ival_A_to_T_start)
assert_equal(ival_A.asfreq('T', 'E'), ival_A_to_T_end)
assert_equal(ival_A.asfreq('S', 'S'), ival_A_to_S_start)
assert_equal(ival_A.asfreq('S', 'E'), ival_A_to_S_end)
assert_equal(ival_AJAN.asfreq('D', 'S'), ival_AJAN_to_D_start)
assert_equal(ival_AJAN.asfreq('D', 'E'), ival_AJAN_to_D_end)
assert_equal(ival_AJUN.asfreq('D', 'S'), ival_AJUN_to_D_start)
assert_equal(ival_AJUN.asfreq('D', 'E'), ival_AJUN_to_D_end)
assert_equal(ival_ANOV.asfreq('D', 'S'), ival_ANOV_to_D_start)
assert_equal(ival_ANOV.asfreq('D', 'E'), ival_ANOV_to_D_end)
assert_equal(ival_A.asfreq('A'), ival_A)
def test_conv_quarterly(self):
# frequency conversion tests: from Quarterly Frequency
ival_Q = Period(freq='Q', year=2007, quarter=1)
ival_Q_end_of_year = Period(freq='Q', year=2007, quarter=4)
ival_QEJAN = Period(freq="Q-JAN", year=2007, quarter=1)
ival_QEJUN = Period(freq="Q-JUN", year=2007, quarter=1)
ival_Q_to_A = Period(freq='A', year=2007)
ival_Q_to_M_start = Period(freq='M', year=2007, month=1)
ival_Q_to_M_end = Period(freq='M', year=2007, month=3)
ival_Q_to_W_start = Period(freq='WK', year=2007, month=1, day=1)
ival_Q_to_W_end = Period(freq='WK', year=2007, month=3, day=31)
ival_Q_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_Q_to_B_end = Period(freq='B', year=2007, month=3, day=30)
ival_Q_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_Q_to_D_end = Period(freq='D', year=2007, month=3, day=31)
ival_Q_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_Q_to_H_end = Period(freq='H', year=2007, month=3, day=31,
hour=23)
ival_Q_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_Q_to_T_end = Period(freq='Min', year=2007, month=3, day=31,
hour=23, minute=59)
ival_Q_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_Q_to_S_end = Period(freq='S', year=2007, month=3, day=31,
hour=23, minute=59, second=59)
ival_QEJAN_to_D_start = Period(freq='D', year=2006, month=2, day=1)
ival_QEJAN_to_D_end = Period(freq='D', year=2006, month=4, day=30)
ival_QEJUN_to_D_start = Period(freq='D', year=2006, month=7, day=1)
ival_QEJUN_to_D_end = Period(freq='D', year=2006, month=9, day=30)
assert_equal(ival_Q.asfreq('A'), ival_Q_to_A)
assert_equal(ival_Q_end_of_year.asfreq('A'), ival_Q_to_A)
assert_equal(ival_Q.asfreq('M', 'S'), ival_Q_to_M_start)
assert_equal(ival_Q.asfreq('M', 'E'), ival_Q_to_M_end)
assert_equal(ival_Q.asfreq('WK', 'S'), ival_Q_to_W_start)
assert_equal(ival_Q.asfreq('WK', 'E'), ival_Q_to_W_end)
assert_equal(ival_Q.asfreq('B', 'S'), ival_Q_to_B_start)
assert_equal(ival_Q.asfreq('B', 'E'), ival_Q_to_B_end)
assert_equal(ival_Q.asfreq('D', 'S'), ival_Q_to_D_start)
assert_equal(ival_Q.asfreq('D', 'E'), ival_Q_to_D_end)
assert_equal(ival_Q.asfreq('H', 'S'), ival_Q_to_H_start)
assert_equal(ival_Q.asfreq('H', 'E'), ival_Q_to_H_end)
assert_equal(ival_Q.asfreq('Min', 'S'), ival_Q_to_T_start)
assert_equal(ival_Q.asfreq('Min', 'E'), ival_Q_to_T_end)
assert_equal(ival_Q.asfreq('S', 'S'), ival_Q_to_S_start)
assert_equal(ival_Q.asfreq('S', 'E'), ival_Q_to_S_end)
assert_equal(ival_QEJAN.asfreq('D', 'S'), ival_QEJAN_to_D_start)
assert_equal(ival_QEJAN.asfreq('D', 'E'), ival_QEJAN_to_D_end)
assert_equal(ival_QEJUN.asfreq('D', 'S'), ival_QEJUN_to_D_start)
assert_equal(ival_QEJUN.asfreq('D', 'E'), ival_QEJUN_to_D_end)
assert_equal(ival_Q.asfreq('Q'), ival_Q)
def test_conv_monthly(self):
# frequency conversion tests: from Monthly Frequency
ival_M = Period(freq='M', year=2007, month=1)
ival_M_end_of_year = Period(freq='M', year=2007, month=12)
ival_M_end_of_quarter = Period(freq='M', year=2007, month=3)
ival_M_to_A = Period(freq='A', year=2007)
ival_M_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_M_to_W_start = Period(freq='WK', year=2007, month=1, day=1)
ival_M_to_W_end = Period(freq='WK', year=2007, month=1, day=31)
ival_M_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_M_to_B_end = Period(freq='B', year=2007, month=1, day=31)
ival_M_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_M_to_D_end = Period(freq='D', year=2007, month=1, day=31)
ival_M_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_M_to_H_end = Period(freq='H', year=2007, month=1, day=31,
hour=23)
ival_M_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_M_to_T_end = Period(freq='Min', year=2007, month=1, day=31,
hour=23, minute=59)
ival_M_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_M_to_S_end = Period(freq='S', year=2007, month=1, day=31,
hour=23, minute=59, second=59)
assert_equal(ival_M.asfreq('A'), ival_M_to_A)
assert_equal(ival_M_end_of_year.asfreq('A'), ival_M_to_A)
assert_equal(ival_M.asfreq('Q'), ival_M_to_Q)
assert_equal(ival_M_end_of_quarter.asfreq('Q'), ival_M_to_Q)
assert_equal(ival_M.asfreq('WK', 'S'), ival_M_to_W_start)
assert_equal(ival_M.asfreq('WK', 'E'), ival_M_to_W_end)
assert_equal(ival_M.asfreq('B', 'S'), ival_M_to_B_start)
assert_equal(ival_M.asfreq('B', 'E'), ival_M_to_B_end)
assert_equal(ival_M.asfreq('D', 'S'), ival_M_to_D_start)
assert_equal(ival_M.asfreq('D', 'E'), ival_M_to_D_end)
assert_equal(ival_M.asfreq('H', 'S'), ival_M_to_H_start)
assert_equal(ival_M.asfreq('H', 'E'), ival_M_to_H_end)
assert_equal(ival_M.asfreq('Min', 'S'), ival_M_to_T_start)
assert_equal(ival_M.asfreq('Min', 'E'), ival_M_to_T_end)
assert_equal(ival_M.asfreq('S', 'S'), ival_M_to_S_start)
assert_equal(ival_M.asfreq('S', 'E'), ival_M_to_S_end)
assert_equal(ival_M.asfreq('M'), ival_M)
def test_conv_weekly(self):
# frequency conversion tests: from Weekly Frequency
ival_W = Period(freq='WK', year=2007, month=1, day=1)
ival_WSUN = Period(freq='WK', year=2007, month=1, day=7)
ival_WSAT = Period(freq='WK-SAT', year=2007, month=1, day=6)
ival_WFRI = Period(freq='WK-FRI', year=2007, month=1, day=5)
ival_WTHU = Period(freq='WK-THU', year=2007, month=1, day=4)
ival_WWED = Period(freq='WK-WED', year=2007, month=1, day=3)
ival_WTUE = Period(freq='WK-TUE', year=2007, month=1, day=2)
ival_WMON = Period(freq='WK-MON', year=2007, month=1, day=1)
ival_WSUN_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_WSUN_to_D_end = Period(freq='D', year=2007, month=1, day=7)
ival_WSAT_to_D_start = Period(freq='D', year=2006, month=12, day=31)
ival_WSAT_to_D_end = Period(freq='D', year=2007, month=1, day=6)
ival_WFRI_to_D_start = Period(freq='D', year=2006, month=12, day=30)
ival_WFRI_to_D_end = Period(freq='D', year=2007, month=1, day=5)
ival_WTHU_to_D_start = Period(freq='D', year=2006, month=12, day=29)
ival_WTHU_to_D_end = Period(freq='D', year=2007, month=1, day=4)
ival_WWED_to_D_start = Period(freq='D', year=2006, month=12, day=28)
ival_WWED_to_D_end = Period(freq='D', year=2007, month=1, day=3)
ival_WTUE_to_D_start = Period(freq='D', year=2006, month=12, day=27)
ival_WTUE_to_D_end = Period(freq='D', year=2007, month=1, day=2)
ival_WMON_to_D_start = Period(freq='D', year=2006, month=12, day=26)
ival_WMON_to_D_end = Period(freq='D', year=2007, month=1, day=1)
ival_W_end_of_year = Period(freq='WK', year=2007, month=12, day=31)
ival_W_end_of_quarter = Period(freq='WK', year=2007, month=3, day=31)
ival_W_end_of_month = | Period(freq='WK', year=2007, month=1, day=31) | pandas.tseries.period.Period |
import os
from zipfile import ZipFile
import numpy as np
import pandas as pd
import pydicom
import tensorflow as tf
from tensorflow.python.ops.image_ops_impl import ResizeMethod
from RetinaNet import retinanet, retina_loss, predict_box
FILE_TRAIN_IMAGES = "data/stage_1_train_images.zip"
FILE_TRAIN_LABELS = "data/stage_1_train_labels.zip"
FILE_TEST_IMAGES = "data/stage_1_test_images.zip"
TMP_DATASET_DIR = "/tmp/rsna_dataset"
TMP_DIR_TRAIN = "train"
TMP_DIR_TEST = "test"
EPOCHS = 1
BATCH_SIZE = 40
def extract_data(data_filename, dir="data"):
"""Read an archive containing challenge's DICOM images and extract in target directory
Returns the directory to which the files were extracted
:rtype String
"""
target_dir = os.path.join(TMP_DATASET_DIR, dir)
with ZipFile(data_filename) as dataset:
dataset.extractall(path=target_dir)
print("INFO: RSNA dataset extracted to tmp dir ", target_dir)
return target_dir
def read_images(target_dir):
"""Read a directory containing challenge's DICOM images and yield the image's name and pixels.
:rtype generator
"""
for file_name in os.listdir(target_dir):
full_name = os.path.join(target_dir, file_name)
with pydicom.dcmread(full_name) as dcm:
id = file_name.replace(".dcm", "")
yield (f"{id}", dcm.pixel_array)
def generate_training_data(images, labels):
for (patientId, pixels) in images:
patient_entries = labels.loc[labels['patientId'] == patientId]
# print("patient_entries: ", patient_entries)
for index, entry in patient_entries.iterrows():
# print("entry: ", entry)
x = entry['x']
y = entry['y']
width = entry['width']
height = entry['height']
label = entry['Target']
yield (pixels, (x, y, width, height, label))
def generate_kaggle_test_data(images):
for (patientId, pixels) in images:
yield (patientId, pixels)
# TODO consider image augmentation as in the ResNet paper section 3.4 Implementation
# TODO use weight decay of 0.0001 and momentum of 0.9
def main(argv):
train_dataset = make_train_dataset()
iterator = train_dataset.make_initializable_iterator()
iterator_init_op = iterator.initializer
next_batch = iterator.get_next()
pixels = tf.placeholder(dtype=tf.float32, shape=[None, 1024, 1024, 1])
target_class = tf.placeholder(dtype=tf.uint8, shape=[None, 1], name="classs")
target_boxes = tf.placeholder(dtype=tf.float32, shape=[None, 4], name="anchor_box")
inputs = pixels
inputs = tf.image.grayscale_to_rgb(inputs)
outputs = retinanet(inputs, 1)
loss = retina_loss(outputs, target_class, target_boxes)
optimizer = tf.train.AdamOptimizer(learning_rate=0.08)
minimize = optimizer.minimize(loss)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
writer = tf.summary.FileWriter("/tmp/rsna_model", sess.graph)
writer.close()
for epoch in range(EPOCHS):
print(f"EPOCH_{epoch}: ")
sess.run(iterator_init_op)
batch_index = 1
while True:
try:
batch = sess.run(next_batch)
(x, y, width, height, label) = batch[1]
batch_pixels = np.array(batch[0]).reshape(-1, 1024, 1024, 1)
batch_classes = np.array(label).reshape(-1, 1)
batch_boxes = np.array([x, y, width, height]).reshape(-1, 4)
sess.run(minimize, feed_dict={pixels: batch_pixels,
target_boxes: batch_boxes,
target_class: batch_classes})
print(f"{epoch} -> {batch_index}")
batch_index += 1
# break # TODO TMP
except tf.errors.OutOfRangeError:
print(f"Out of range with count: {batch_index}")
break
kaggle_test_dataset = make_kaggle_dataset()
kaggle_iterator = kaggle_test_dataset.make_initializable_iterator()
kaggle_iterator_init_op = kaggle_iterator.initializer
next_batch = kaggle_iterator.get_next()
kaggle_predictions = []
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(kaggle_iterator_init_op)
batch_index = 1
while True:
try:
print(f"Processing batch {batch_index}...")
batch = sess.run(next_batch)
patient_id = batch[0]
batch_pixels = np.array(batch[1]).reshape(-1, 1024, 1024, 1)
probability, box = sess.run(predict_box(outputs), feed_dict={pixels: batch_pixels})
df_prob = pd.DataFrame(probability, columns=['probability'])
df_box = pd.DataFrame(box, columns=['x', 'y', 'width', 'height'])
df_prediction = pd.concat([df_prob, df_box], axis=1)
df = pd.DataFrame(patient_id, columns=['patientId'])
df['PredictionString'] = df_prediction.iloc[:, :].apply(lambda x: " ".join(x.map('{:.4f}'.format)), axis=1)
kaggle_predictions.append(df)
total_processed = sess.run(batch_index * tf.shape(patient_id)[0])
print(f"Total test images processed: {total_processed}")
batch_index += 1
# break # TODO TMP
except tf.errors.OutOfRangeError:
break
df = | pd.concat(kaggle_predictions) | pandas.concat |
"""SQL io tests
The SQL tests are broken down in different classes:
- `PandasSQLTest`: base class with common methods for all test classes
- Tests for the public API (only tests with sqlite3)
- `_TestSQLApi` base class
- `TestSQLApi`: test the public API with sqlalchemy engine
- `TestSQLiteFallbackApi`: test the public API with a sqlite DBAPI
connection
- Tests for the different SQL flavors (flavor specific type conversions)
- Tests for the sqlalchemy mode: `_TestSQLAlchemy` is the base class with
common methods, `_TestSQLAlchemyConn` tests the API with a SQLAlchemy
Connection object. The different tested flavors (sqlite3, MySQL,
PostgreSQL) derive from the base class
- Tests for the fallback mode (`TestSQLiteFallback`)
"""
import csv
from datetime import date, datetime, time
from io import StringIO
import sqlite3
import warnings
import numpy as np
import pytest
from pandas.core.dtypes.common import is_datetime64_dtype, is_datetime64tz_dtype
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
Series,
Timestamp,
concat,
date_range,
isna,
to_datetime,
to_timedelta,
)
import pandas._testing as tm
import pandas.io.sql as sql
from pandas.io.sql import read_sql_query, read_sql_table
try:
import sqlalchemy
import sqlalchemy.schema
import sqlalchemy.sql.sqltypes as sqltypes
from sqlalchemy.ext import declarative
from sqlalchemy.orm import session as sa_session
SQLALCHEMY_INSTALLED = True
except ImportError:
SQLALCHEMY_INSTALLED = False
SQL_STRINGS = {
"create_iris": {
"sqlite": """CREATE TABLE iris (
"SepalLength" REAL,
"SepalWidth" REAL,
"PetalLength" REAL,
"PetalWidth" REAL,
"Name" TEXT
)""",
"mysql": """CREATE TABLE iris (
`SepalLength` DOUBLE,
`SepalWidth` DOUBLE,
`PetalLength` DOUBLE,
`PetalWidth` DOUBLE,
`Name` VARCHAR(200)
)""",
"postgresql": """CREATE TABLE iris (
"SepalLength" DOUBLE PRECISION,
"SepalWidth" DOUBLE PRECISION,
"PetalLength" DOUBLE PRECISION,
"PetalWidth" DOUBLE PRECISION,
"Name" VARCHAR(200)
)""",
},
"insert_iris": {
"sqlite": """INSERT INTO iris VALUES(?, ?, ?, ?, ?)""",
"mysql": """INSERT INTO iris VALUES(%s, %s, %s, %s, "%s");""",
"postgresql": """INSERT INTO iris VALUES(%s, %s, %s, %s, %s);""",
},
"create_test_types": {
"sqlite": """CREATE TABLE types_test_data (
"TextCol" TEXT,
"DateCol" TEXT,
"IntDateCol" INTEGER,
"IntDateOnlyCol" INTEGER,
"FloatCol" REAL,
"IntCol" INTEGER,
"BoolCol" INTEGER,
"IntColWithNull" INTEGER,
"BoolColWithNull" INTEGER
)""",
"mysql": """CREATE TABLE types_test_data (
`TextCol` TEXT,
`DateCol` DATETIME,
`IntDateCol` INTEGER,
`IntDateOnlyCol` INTEGER,
`FloatCol` DOUBLE,
`IntCol` INTEGER,
`BoolCol` BOOLEAN,
`IntColWithNull` INTEGER,
`BoolColWithNull` BOOLEAN
)""",
"postgresql": """CREATE TABLE types_test_data (
"TextCol" TEXT,
"DateCol" TIMESTAMP,
"DateColWithTz" TIMESTAMP WITH TIME ZONE,
"IntDateCol" INTEGER,
"IntDateOnlyCol" INTEGER,
"FloatCol" DOUBLE PRECISION,
"IntCol" INTEGER,
"BoolCol" BOOLEAN,
"IntColWithNull" INTEGER,
"BoolColWithNull" BOOLEAN
)""",
},
"insert_test_types": {
"sqlite": {
"query": """
INSERT INTO types_test_data
VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?)
""",
"fields": (
"TextCol",
"DateCol",
"IntDateCol",
"IntDateOnlyCol",
"FloatCol",
"IntCol",
"BoolCol",
"IntColWithNull",
"BoolColWithNull",
),
},
"mysql": {
"query": """
INSERT INTO types_test_data
VALUES("%s", %s, %s, %s, %s, %s, %s, %s, %s)
""",
"fields": (
"TextCol",
"DateCol",
"IntDateCol",
"IntDateOnlyCol",
"FloatCol",
"IntCol",
"BoolCol",
"IntColWithNull",
"BoolColWithNull",
),
},
"postgresql": {
"query": """
INSERT INTO types_test_data
VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
""",
"fields": (
"TextCol",
"DateCol",
"DateColWithTz",
"IntDateCol",
"IntDateOnlyCol",
"FloatCol",
"IntCol",
"BoolCol",
"IntColWithNull",
"BoolColWithNull",
),
},
},
"read_parameters": {
"sqlite": "SELECT * FROM iris WHERE Name=? AND SepalLength=?",
"mysql": 'SELECT * FROM iris WHERE `Name`="%s" AND `SepalLength`=%s',
"postgresql": 'SELECT * FROM iris WHERE "Name"=%s AND "SepalLength"=%s',
},
"read_named_parameters": {
"sqlite": """
SELECT * FROM iris WHERE Name=:name AND SepalLength=:length
""",
"mysql": """
SELECT * FROM iris WHERE
`Name`="%(name)s" AND `SepalLength`=%(length)s
""",
"postgresql": """
SELECT * FROM iris WHERE
"Name"=%(name)s AND "SepalLength"=%(length)s
""",
},
"create_view": {
"sqlite": """
CREATE VIEW iris_view AS
SELECT * FROM iris
"""
},
}
class MixInBase:
def teardown_method(self, method):
# if setup fails, there may not be a connection to close.
if hasattr(self, "conn"):
for tbl in self._get_all_tables():
self.drop_table(tbl)
self._close_conn()
class MySQLMixIn(MixInBase):
def drop_table(self, table_name):
cur = self.conn.cursor()
cur.execute(f"DROP TABLE IF EXISTS {sql._get_valid_mysql_name(table_name)}")
self.conn.commit()
def _get_all_tables(self):
cur = self.conn.cursor()
cur.execute("SHOW TABLES")
return [table[0] for table in cur.fetchall()]
def _close_conn(self):
from pymysql.err import Error
try:
self.conn.close()
except Error:
pass
class SQLiteMixIn(MixInBase):
def drop_table(self, table_name):
self.conn.execute(
f"DROP TABLE IF EXISTS {sql._get_valid_sqlite_name(table_name)}"
)
self.conn.commit()
def _get_all_tables(self):
c = self.conn.execute("SELECT name FROM sqlite_master WHERE type='table'")
return [table[0] for table in c.fetchall()]
def _close_conn(self):
self.conn.close()
class SQLAlchemyMixIn(MixInBase):
def drop_table(self, table_name):
sql.SQLDatabase(self.conn).drop_table(table_name)
def _get_all_tables(self):
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
table_list = meta.tables.keys()
return table_list
def _close_conn(self):
pass
class PandasSQLTest:
"""
Base class with common private methods for SQLAlchemy and fallback cases.
"""
def _get_exec(self):
if hasattr(self.conn, "execute"):
return self.conn
else:
return self.conn.cursor()
@pytest.fixture(params=[("data", "iris.csv")])
def load_iris_data(self, datapath, request):
import io
iris_csv_file = datapath(*request.param)
if not hasattr(self, "conn"):
self.setup_connect()
self.drop_table("iris")
self._get_exec().execute(SQL_STRINGS["create_iris"][self.flavor])
with io.open(iris_csv_file, mode="r", newline=None) as iris_csv:
r = csv.reader(iris_csv)
next(r) # skip header row
ins = SQL_STRINGS["insert_iris"][self.flavor]
for row in r:
self._get_exec().execute(ins, row)
def _load_iris_view(self):
self.drop_table("iris_view")
self._get_exec().execute(SQL_STRINGS["create_view"][self.flavor])
def _check_iris_loaded_frame(self, iris_frame):
pytype = iris_frame.dtypes[0].type
row = iris_frame.iloc[0]
assert issubclass(pytype, np.floating)
tm.equalContents(row.values, [5.1, 3.5, 1.4, 0.2, "Iris-setosa"])
def _load_test1_data(self):
columns = ["index", "A", "B", "C", "D"]
data = [
(
"2000-01-03 00:00:00",
0.980268513777,
3.68573087906,
-0.364216805298,
-1.15973806169,
),
(
"2000-01-04 00:00:00",
1.04791624281,
-0.0412318367011,
-0.16181208307,
0.212549316967,
),
(
"2000-01-05 00:00:00",
0.498580885705,
0.731167677815,
-0.537677223318,
1.34627041952,
),
(
"2000-01-06 00:00:00",
1.12020151869,
1.56762092543,
0.00364077397681,
0.67525259227,
),
]
self.test_frame1 = DataFrame(data, columns=columns)
def _load_test2_data(self):
df = DataFrame(
dict(
A=[4, 1, 3, 6],
B=["asd", "gsq", "ylt", "jkl"],
C=[1.1, 3.1, 6.9, 5.3],
D=[False, True, True, False],
E=["1990-11-22", "1991-10-26", "1993-11-26", "1995-12-12"],
)
)
df["E"] = to_datetime(df["E"])
self.test_frame2 = df
def _load_test3_data(self):
columns = ["index", "A", "B"]
data = [
("2000-01-03 00:00:00", 2 ** 31 - 1, -1.987670),
("2000-01-04 00:00:00", -29, -0.0412318367011),
("2000-01-05 00:00:00", 20000, 0.731167677815),
("2000-01-06 00:00:00", -290867, 1.56762092543),
]
self.test_frame3 = DataFrame(data, columns=columns)
def _load_raw_sql(self):
self.drop_table("types_test_data")
self._get_exec().execute(SQL_STRINGS["create_test_types"][self.flavor])
ins = SQL_STRINGS["insert_test_types"][self.flavor]
data = [
{
"TextCol": "first",
"DateCol": "2000-01-03 00:00:00",
"DateColWithTz": "2000-01-01 00:00:00-08:00",
"IntDateCol": 535852800,
"IntDateOnlyCol": 20101010,
"FloatCol": 10.10,
"IntCol": 1,
"BoolCol": False,
"IntColWithNull": 1,
"BoolColWithNull": False,
},
{
"TextCol": "first",
"DateCol": "2000-01-04 00:00:00",
"DateColWithTz": "2000-06-01 00:00:00-07:00",
"IntDateCol": 1356998400,
"IntDateOnlyCol": 20101212,
"FloatCol": 10.10,
"IntCol": 1,
"BoolCol": False,
"IntColWithNull": None,
"BoolColWithNull": None,
},
]
for d in data:
self._get_exec().execute(
ins["query"], [d[field] for field in ins["fields"]]
)
def _count_rows(self, table_name):
result = (
self._get_exec()
.execute(f"SELECT count(*) AS count_1 FROM {table_name}")
.fetchone()
)
return result[0]
def _read_sql_iris(self):
iris_frame = self.pandasSQL.read_query("SELECT * FROM iris")
self._check_iris_loaded_frame(iris_frame)
def _read_sql_iris_parameter(self):
query = SQL_STRINGS["read_parameters"][self.flavor]
params = ["Iris-setosa", 5.1]
iris_frame = self.pandasSQL.read_query(query, params=params)
self._check_iris_loaded_frame(iris_frame)
def _read_sql_iris_named_parameter(self):
query = SQL_STRINGS["read_named_parameters"][self.flavor]
params = {"name": "Iris-setosa", "length": 5.1}
iris_frame = self.pandasSQL.read_query(query, params=params)
self._check_iris_loaded_frame(iris_frame)
def _to_sql(self, method=None):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", method=method)
assert self.pandasSQL.has_table("test_frame1")
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
# Nuke table
self.drop_table("test_frame1")
def _to_sql_empty(self):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1.iloc[:0], "test_frame1")
def _to_sql_fail(self):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
assert self.pandasSQL.has_table("test_frame1")
msg = "Table 'test_frame1' already exists"
with pytest.raises(ValueError, match=msg):
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
self.drop_table("test_frame1")
def _to_sql_replace(self):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
# Add to table again
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="replace")
assert self.pandasSQL.has_table("test_frame1")
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
self.drop_table("test_frame1")
def _to_sql_append(self):
# Nuke table just in case
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
# Add to table again
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="append")
assert self.pandasSQL.has_table("test_frame1")
num_entries = 2 * len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
self.drop_table("test_frame1")
def _to_sql_method_callable(self):
check = [] # used to double check function below is really being used
def sample(pd_table, conn, keys, data_iter):
check.append(1)
data = [dict(zip(keys, row)) for row in data_iter]
conn.execute(pd_table.table.insert(), data)
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", method=sample)
assert self.pandasSQL.has_table("test_frame1")
assert check == [1]
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
# Nuke table
self.drop_table("test_frame1")
def _roundtrip(self):
self.drop_table("test_frame_roundtrip")
self.pandasSQL.to_sql(self.test_frame1, "test_frame_roundtrip")
result = self.pandasSQL.read_query("SELECT * FROM test_frame_roundtrip")
result.set_index("level_0", inplace=True)
# result.index.astype(int)
result.index.name = None
tm.assert_frame_equal(result, self.test_frame1)
def _execute_sql(self):
# drop_sql = "DROP TABLE IF EXISTS test" # should already be done
iris_results = self.pandasSQL.execute("SELECT * FROM iris")
row = iris_results.fetchone()
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, "Iris-setosa"])
def _to_sql_save_index(self):
df = DataFrame.from_records(
[(1, 2.1, "line1"), (2, 1.5, "line2")], columns=["A", "B", "C"], index=["A"]
)
self.pandasSQL.to_sql(df, "test_to_sql_saves_index")
ix_cols = self._get_index_columns("test_to_sql_saves_index")
assert ix_cols == [["A"]]
def _transaction_test(self):
with self.pandasSQL.run_transaction() as trans:
trans.execute("CREATE TABLE test_trans (A INT, B TEXT)")
class DummyException(Exception):
pass
# Make sure when transaction is rolled back, no rows get inserted
ins_sql = "INSERT INTO test_trans (A,B) VALUES (1, 'blah')"
try:
with self.pandasSQL.run_transaction() as trans:
trans.execute(ins_sql)
raise DummyException("error")
except DummyException:
# ignore raised exception
pass
res = self.pandasSQL.read_query("SELECT * FROM test_trans")
assert len(res) == 0
# Make sure when transaction is committed, rows do get inserted
with self.pandasSQL.run_transaction() as trans:
trans.execute(ins_sql)
res2 = self.pandasSQL.read_query("SELECT * FROM test_trans")
assert len(res2) == 1
# -----------------------------------------------------------------------------
# -- Testing the public API
class _TestSQLApi(PandasSQLTest):
"""
Base class to test the public API.
From this two classes are derived to run these tests for both the
sqlalchemy mode (`TestSQLApi`) and the fallback mode
(`TestSQLiteFallbackApi`). These tests are run with sqlite3. Specific
tests for the different sql flavours are included in `_TestSQLAlchemy`.
Notes:
flavor can always be passed even in SQLAlchemy mode,
should be correctly ignored.
we don't use drop_table because that isn't part of the public api
"""
flavor = "sqlite"
mode: str
def setup_connect(self):
self.conn = self.connect()
@pytest.fixture(autouse=True)
def setup_method(self, load_iris_data):
self.load_test_data_and_sql()
def load_test_data_and_sql(self):
self._load_iris_view()
self._load_test1_data()
self._load_test2_data()
self._load_test3_data()
self._load_raw_sql()
def test_read_sql_iris(self):
iris_frame = sql.read_sql_query("SELECT * FROM iris", self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_read_sql_view(self):
iris_frame = sql.read_sql_query("SELECT * FROM iris_view", self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_to_sql(self):
sql.to_sql(self.test_frame1, "test_frame1", self.conn)
assert sql.has_table("test_frame1", self.conn)
def test_to_sql_fail(self):
sql.to_sql(self.test_frame1, "test_frame2", self.conn, if_exists="fail")
assert sql.has_table("test_frame2", self.conn)
msg = "Table 'test_frame2' already exists"
with pytest.raises(ValueError, match=msg):
sql.to_sql(self.test_frame1, "test_frame2", self.conn, if_exists="fail")
def test_to_sql_replace(self):
sql.to_sql(self.test_frame1, "test_frame3", self.conn, if_exists="fail")
# Add to table again
sql.to_sql(self.test_frame1, "test_frame3", self.conn, if_exists="replace")
assert sql.has_table("test_frame3", self.conn)
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame3")
assert num_rows == num_entries
def test_to_sql_append(self):
sql.to_sql(self.test_frame1, "test_frame4", self.conn, if_exists="fail")
# Add to table again
sql.to_sql(self.test_frame1, "test_frame4", self.conn, if_exists="append")
assert sql.has_table("test_frame4", self.conn)
num_entries = 2 * len(self.test_frame1)
num_rows = self._count_rows("test_frame4")
assert num_rows == num_entries
def test_to_sql_type_mapping(self):
sql.to_sql(self.test_frame3, "test_frame5", self.conn, index=False)
result = sql.read_sql("SELECT * FROM test_frame5", self.conn)
tm.assert_frame_equal(self.test_frame3, result)
def test_to_sql_series(self):
s = Series(np.arange(5, dtype="int64"), name="series")
sql.to_sql(s, "test_series", self.conn, index=False)
s2 = sql.read_sql_query("SELECT * FROM test_series", self.conn)
tm.assert_frame_equal(s.to_frame(), s2)
def test_roundtrip(self):
sql.to_sql(self.test_frame1, "test_frame_roundtrip", con=self.conn)
result = sql.read_sql_query("SELECT * FROM test_frame_roundtrip", con=self.conn)
# HACK!
result.index = self.test_frame1.index
result.set_index("level_0", inplace=True)
result.index.astype(int)
result.index.name = None
tm.assert_frame_equal(result, self.test_frame1)
def test_roundtrip_chunksize(self):
sql.to_sql(
self.test_frame1,
"test_frame_roundtrip",
con=self.conn,
index=False,
chunksize=2,
)
result = sql.read_sql_query("SELECT * FROM test_frame_roundtrip", con=self.conn)
tm.assert_frame_equal(result, self.test_frame1)
def test_execute_sql(self):
# drop_sql = "DROP TABLE IF EXISTS test" # should already be done
iris_results = sql.execute("SELECT * FROM iris", con=self.conn)
row = iris_results.fetchone()
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, "Iris-setosa"])
def test_date_parsing(self):
# Test date parsing in read_sql
# No Parsing
df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn)
assert not issubclass(df.DateCol.dtype.type, np.datetime64)
df = sql.read_sql_query(
"SELECT * FROM types_test_data", self.conn, parse_dates=["DateCol"]
)
assert issubclass(df.DateCol.dtype.type, np.datetime64)
assert df.DateCol.tolist() == [
pd.Timestamp(2000, 1, 3, 0, 0, 0),
pd.Timestamp(2000, 1, 4, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data",
self.conn,
parse_dates={"DateCol": "%Y-%m-%d %H:%M:%S"},
)
assert issubclass(df.DateCol.dtype.type, np.datetime64)
assert df.DateCol.tolist() == [
pd.Timestamp(2000, 1, 3, 0, 0, 0),
pd.Timestamp(2000, 1, 4, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data", self.conn, parse_dates=["IntDateCol"]
)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
assert df.IntDateCol.tolist() == [
pd.Timestamp(1986, 12, 25, 0, 0, 0),
pd.Timestamp(2013, 1, 1, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data", self.conn, parse_dates={"IntDateCol": "s"}
)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
assert df.IntDateCol.tolist() == [
pd.Timestamp(1986, 12, 25, 0, 0, 0),
pd.Timestamp(2013, 1, 1, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data",
self.conn,
parse_dates={"IntDateOnlyCol": "%Y%m%d"},
)
assert issubclass(df.IntDateOnlyCol.dtype.type, np.datetime64)
assert df.IntDateOnlyCol.tolist() == [
pd.Timestamp("2010-10-10"),
pd.Timestamp("2010-12-12"),
]
def test_date_and_index(self):
# Test case where same column appears in parse_date and index_col
df = sql.read_sql_query(
"SELECT * FROM types_test_data",
self.conn,
index_col="DateCol",
parse_dates=["DateCol", "IntDateCol"],
)
assert issubclass(df.index.dtype.type, np.datetime64)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
def test_timedelta(self):
# see #6921
df = to_timedelta(Series(["00:00:01", "00:00:03"], name="foo")).to_frame()
with tm.assert_produces_warning(UserWarning):
df.to_sql("test_timedelta", self.conn)
result = sql.read_sql_query("SELECT * FROM test_timedelta", self.conn)
tm.assert_series_equal(result["foo"], df["foo"].astype("int64"))
def test_complex_raises(self):
df = DataFrame({"a": [1 + 1j, 2j]})
msg = "Complex datatypes not supported"
with pytest.raises(ValueError, match=msg):
df.to_sql("test_complex", self.conn)
@pytest.mark.parametrize(
"index_name,index_label,expected",
[
# no index name, defaults to 'index'
(None, None, "index"),
# specifying index_label
(None, "other_label", "other_label"),
# using the index name
("index_name", None, "index_name"),
# has index name, but specifying index_label
("index_name", "other_label", "other_label"),
# index name is integer
(0, None, "0"),
# index name is None but index label is integer
(None, 0, "0"),
],
)
def test_to_sql_index_label(self, index_name, index_label, expected):
temp_frame = DataFrame({"col1": range(4)})
temp_frame.index.name = index_name
query = "SELECT * FROM test_index_label"
sql.to_sql(temp_frame, "test_index_label", self.conn, index_label=index_label)
frame = sql.read_sql_query(query, self.conn)
assert frame.columns[0] == expected
def test_to_sql_index_label_multiindex(self):
temp_frame = DataFrame(
{"col1": range(4)},
index=MultiIndex.from_product([("A0", "A1"), ("B0", "B1")]),
)
# no index name, defaults to 'level_0' and 'level_1'
sql.to_sql(temp_frame, "test_index_label", self.conn)
frame = sql.read_sql_query("SELECT * FROM test_index_label", self.conn)
assert frame.columns[0] == "level_0"
assert frame.columns[1] == "level_1"
# specifying index_label
sql.to_sql(
temp_frame,
"test_index_label",
self.conn,
if_exists="replace",
index_label=["A", "B"],
)
frame = sql.read_sql_query("SELECT * FROM test_index_label", self.conn)
assert frame.columns[:2].tolist() == ["A", "B"]
# using the index name
temp_frame.index.names = ["A", "B"]
sql.to_sql(temp_frame, "test_index_label", self.conn, if_exists="replace")
frame = sql.read_sql_query("SELECT * FROM test_index_label", self.conn)
assert frame.columns[:2].tolist() == ["A", "B"]
# has index name, but specifying index_label
sql.to_sql(
temp_frame,
"test_index_label",
self.conn,
if_exists="replace",
index_label=["C", "D"],
)
frame = sql.read_sql_query("SELECT * FROM test_index_label", self.conn)
assert frame.columns[:2].tolist() == ["C", "D"]
msg = "Length of 'index_label' should match number of levels, which is 2"
with pytest.raises(ValueError, match=msg):
sql.to_sql(
temp_frame,
"test_index_label",
self.conn,
if_exists="replace",
index_label="C",
)
def test_multiindex_roundtrip(self):
df = DataFrame.from_records(
[(1, 2.1, "line1"), (2, 1.5, "line2")],
columns=["A", "B", "C"],
index=["A", "B"],
)
df.to_sql("test_multiindex_roundtrip", self.conn)
result = sql.read_sql_query(
"SELECT * FROM test_multiindex_roundtrip", self.conn, index_col=["A", "B"]
)
tm.assert_frame_equal(df, result, check_index_type=True)
def test_integer_col_names(self):
df = DataFrame([[1, 2], [3, 4]], columns=[0, 1])
sql.to_sql(df, "test_frame_integer_col_names", self.conn, if_exists="replace")
def test_get_schema(self):
create_sql = sql.get_schema(self.test_frame1, "test", con=self.conn)
assert "CREATE" in create_sql
def test_get_schema_dtypes(self):
float_frame = DataFrame({"a": [1.1, 1.2], "b": [2.1, 2.2]})
dtype = sqlalchemy.Integer if self.mode == "sqlalchemy" else "INTEGER"
create_sql = sql.get_schema(
float_frame, "test", con=self.conn, dtype={"b": dtype}
)
assert "CREATE" in create_sql
assert "INTEGER" in create_sql
def test_get_schema_keys(self):
frame = DataFrame({"Col1": [1.1, 1.2], "Col2": [2.1, 2.2]})
create_sql = sql.get_schema(frame, "test", con=self.conn, keys="Col1")
constraint_sentence = 'CONSTRAINT test_pk PRIMARY KEY ("Col1")'
assert constraint_sentence in create_sql
# multiple columns as key (GH10385)
create_sql = sql.get_schema(
self.test_frame1, "test", con=self.conn, keys=["A", "B"]
)
constraint_sentence = 'CONSTRAINT test_pk PRIMARY KEY ("A", "B")'
assert constraint_sentence in create_sql
def test_chunksize_read(self):
df = DataFrame(np.random.randn(22, 5), columns=list("abcde"))
df.to_sql("test_chunksize", self.conn, index=False)
# reading the query in one time
res1 = sql.read_sql_query("select * from test_chunksize", self.conn)
# reading the query in chunks with read_sql_query
res2 = DataFrame()
i = 0
sizes = [5, 5, 5, 5, 2]
for chunk in sql.read_sql_query(
"select * from test_chunksize", self.conn, chunksize=5
):
res2 = concat([res2, chunk], ignore_index=True)
assert len(chunk) == sizes[i]
i += 1
tm.assert_frame_equal(res1, res2)
# reading the query in chunks with read_sql_query
if self.mode == "sqlalchemy":
res3 = DataFrame()
i = 0
sizes = [5, 5, 5, 5, 2]
for chunk in sql.read_sql_table("test_chunksize", self.conn, chunksize=5):
res3 = concat([res3, chunk], ignore_index=True)
assert len(chunk) == sizes[i]
i += 1
tm.assert_frame_equal(res1, res3)
def test_categorical(self):
# GH8624
# test that categorical gets written correctly as dense column
df = DataFrame(
{
"person_id": [1, 2, 3],
"person_name": ["<NAME>", "<NAME>", "<NAME>"],
}
)
df2 = df.copy()
df2["person_name"] = df2["person_name"].astype("category")
df2.to_sql("test_categorical", self.conn, index=False)
res = sql.read_sql_query("SELECT * FROM test_categorical", self.conn)
tm.assert_frame_equal(res, df)
def test_unicode_column_name(self):
# GH 11431
df = DataFrame([[1, 2], [3, 4]], columns=["\xe9", "b"])
df.to_sql("test_unicode", self.conn, index=False)
def test_escaped_table_name(self):
# GH 13206
df = DataFrame({"A": [0, 1, 2], "B": [0.2, np.nan, 5.6]})
df.to_sql("d1187b08-4943-4c8d-a7f6", self.conn, index=False)
res = sql.read_sql_query("SELECT * FROM `d1187b08-4943-4c8d-a7f6`", self.conn)
tm.assert_frame_equal(res, df)
@pytest.mark.single
@pytest.mark.skipif(not SQLALCHEMY_INSTALLED, reason="SQLAlchemy not installed")
class TestSQLApi(SQLAlchemyMixIn, _TestSQLApi):
"""
Test the public API as it would be used directly
Tests for `read_sql_table` are included here, as this is specific for the
sqlalchemy mode.
"""
flavor = "sqlite"
mode = "sqlalchemy"
def connect(self):
return sqlalchemy.create_engine("sqlite:///:memory:")
def test_read_table_columns(self):
# test columns argument in read_table
sql.to_sql(self.test_frame1, "test_frame", self.conn)
cols = ["A", "B"]
result = sql.read_sql_table("test_frame", self.conn, columns=cols)
assert result.columns.tolist() == cols
def test_read_table_index_col(self):
# test columns argument in read_table
sql.to_sql(self.test_frame1, "test_frame", self.conn)
result = sql.read_sql_table("test_frame", self.conn, index_col="index")
assert result.index.names == ["index"]
result = sql.read_sql_table("test_frame", self.conn, index_col=["A", "B"])
assert result.index.names == ["A", "B"]
result = sql.read_sql_table(
"test_frame", self.conn, index_col=["A", "B"], columns=["C", "D"]
)
assert result.index.names == ["A", "B"]
assert result.columns.tolist() == ["C", "D"]
def test_read_sql_delegate(self):
iris_frame1 = sql.read_sql_query("SELECT * FROM iris", self.conn)
iris_frame2 = sql.read_sql("SELECT * FROM iris", self.conn)
tm.assert_frame_equal(iris_frame1, iris_frame2)
iris_frame1 = sql.read_sql_table("iris", self.conn)
iris_frame2 = sql.read_sql("iris", self.conn)
tm.assert_frame_equal(iris_frame1, iris_frame2)
def test_not_reflect_all_tables(self):
# create invalid table
qry = """CREATE TABLE invalid (x INTEGER, y UNKNOWN);"""
self.conn.execute(qry)
qry = """CREATE TABLE other_table (x INTEGER, y INTEGER);"""
self.conn.execute(qry)
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
sql.read_sql_table("other_table", self.conn)
sql.read_sql_query("SELECT * FROM other_table", self.conn)
# Verify some things
assert len(w) == 0
def test_warning_case_insensitive_table_name(self):
# see gh-7815
#
# We can't test that this warning is triggered, a the database
# configuration would have to be altered. But here we test that
# the warning is certainly NOT triggered in a normal case.
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# This should not trigger a Warning
self.test_frame1.to_sql("CaseSensitive", self.conn)
# Verify some things
assert len(w) == 0
def _get_index_columns(self, tbl_name):
from sqlalchemy.engine import reflection
insp = reflection.Inspector.from_engine(self.conn)
ixs = insp.get_indexes("test_index_saved")
ixs = [i["column_names"] for i in ixs]
return ixs
def test_sqlalchemy_type_mapping(self):
# Test Timestamp objects (no datetime64 because of timezone) (GH9085)
df = DataFrame(
{"time": to_datetime(["201412120154", "201412110254"], utc=True)}
)
db = sql.SQLDatabase(self.conn)
table = sql.SQLTable("test_type", db, frame=df)
# GH 9086: TIMESTAMP is the suggested type for datetimes with timezones
assert isinstance(table.table.c["time"].type, sqltypes.TIMESTAMP)
def test_database_uri_string(self):
# Test read_sql and .to_sql method with a database URI (GH10654)
test_frame1 = self.test_frame1
# db_uri = 'sqlite:///:memory:' # raises
# sqlalchemy.exc.OperationalError: (sqlite3.OperationalError) near
# "iris": syntax error [SQL: 'iris']
with tm.ensure_clean() as name:
db_uri = "sqlite:///" + name
table = "iris"
test_frame1.to_sql(table, db_uri, if_exists="replace", index=False)
test_frame2 = sql.read_sql(table, db_uri)
test_frame3 = sql.read_sql_table(table, db_uri)
query = "SELECT * FROM iris"
test_frame4 = sql.read_sql_query(query, db_uri)
tm.assert_frame_equal(test_frame1, test_frame2)
tm.assert_frame_equal(test_frame1, test_frame3)
tm.assert_frame_equal(test_frame1, test_frame4)
# using driver that will not be installed on Travis to trigger error
# in sqlalchemy.create_engine -> test passing of this error to user
try:
# the rest of this test depends on pg8000's being absent
import pg8000 # noqa
pytest.skip("pg8000 is installed")
except ImportError:
pass
db_uri = "postgresql+pg8000://user:pass@host/dbname"
with pytest.raises(ImportError, match="pg8000"):
sql.read_sql("select * from table", db_uri)
def _make_iris_table_metadata(self):
sa = sqlalchemy
metadata = sa.MetaData()
iris = sa.Table(
"iris",
metadata,
sa.Column("SepalLength", sa.REAL),
sa.Column("SepalWidth", sa.REAL),
sa.Column("PetalLength", sa.REAL),
sa.Column("PetalWidth", sa.REAL),
sa.Column("Name", sa.TEXT),
)
return iris
def test_query_by_text_obj(self):
# WIP : GH10846
name_text = sqlalchemy.text("select * from iris where name=:name")
iris_df = sql.read_sql(name_text, self.conn, params={"name": "Iris-versicolor"})
all_names = set(iris_df["Name"])
assert all_names == {"Iris-versicolor"}
def test_query_by_select_obj(self):
# WIP : GH10846
iris = self._make_iris_table_metadata()
name_select = sqlalchemy.select([iris]).where(
iris.c.Name == sqlalchemy.bindparam("name")
)
iris_df = sql.read_sql(name_select, self.conn, params={"name": "Iris-setosa"})
all_names = set(iris_df["Name"])
assert all_names == {"Iris-setosa"}
class _EngineToConnMixin:
"""
A mixin that causes setup_connect to create a conn rather than an engine.
"""
@pytest.fixture(autouse=True)
def setup_method(self, load_iris_data):
super().load_test_data_and_sql()
engine = self.conn
conn = engine.connect()
self.__tx = conn.begin()
self.pandasSQL = sql.SQLDatabase(conn)
self.__engine = engine
self.conn = conn
yield
self.__tx.rollback()
self.conn.close()
self.conn = self.__engine
self.pandasSQL = sql.SQLDatabase(self.__engine)
# XXX:
# super().teardown_method(method)
@pytest.mark.single
class TestSQLApiConn(_EngineToConnMixin, TestSQLApi):
pass
@pytest.mark.single
class TestSQLiteFallbackApi(SQLiteMixIn, _TestSQLApi):
"""
Test the public sqlite connection fallback API
"""
flavor = "sqlite"
mode = "fallback"
def connect(self, database=":memory:"):
return sqlite3.connect(database)
def test_sql_open_close(self):
# Test if the IO in the database still work if the connection closed
# between the writing and reading (as in many real situations).
with tm.ensure_clean() as name:
conn = self.connect(name)
sql.to_sql(self.test_frame3, "test_frame3_legacy", conn, index=False)
conn.close()
conn = self.connect(name)
result = sql.read_sql_query("SELECT * FROM test_frame3_legacy;", conn)
conn.close()
tm.assert_frame_equal(self.test_frame3, result)
@pytest.mark.skipif(SQLALCHEMY_INSTALLED, reason="SQLAlchemy is installed")
def test_con_string_import_error(self):
conn = "mysql://root@localhost/pandas_nosetest"
msg = "Using URI string without sqlalchemy installed"
with pytest.raises(ImportError, match=msg):
sql.read_sql("SELECT * FROM iris", conn)
def test_read_sql_delegate(self):
iris_frame1 = sql.read_sql_query("SELECT * FROM iris", self.conn)
iris_frame2 = sql.read_sql("SELECT * FROM iris", self.conn)
tm.assert_frame_equal(iris_frame1, iris_frame2)
msg = "Execution failed on sql 'iris': near \"iris\": syntax error"
with pytest.raises(sql.DatabaseError, match=msg):
sql.read_sql("iris", self.conn)
def test_safe_names_warning(self):
# GH 6798
df = DataFrame([[1, 2], [3, 4]], columns=["a", "b "]) # has a space
# warns on create table with spaces in names
with tm.assert_produces_warning():
sql.to_sql(df, "test_frame3_legacy", self.conn, index=False)
def test_get_schema2(self):
# without providing a connection object (available for backwards comp)
create_sql = sql.get_schema(self.test_frame1, "test")
assert "CREATE" in create_sql
def _get_sqlite_column_type(self, schema, column):
for col in schema.split("\n"):
if col.split()[0].strip('""') == column:
return col.split()[1]
raise ValueError(f"Column {column} not found")
def test_sqlite_type_mapping(self):
# Test Timestamp objects (no datetime64 because of timezone) (GH9085)
df = DataFrame(
{"time": to_datetime(["201412120154", "201412110254"], utc=True)}
)
db = sql.SQLiteDatabase(self.conn)
table = sql.SQLiteTable("test_type", db, frame=df)
schema = table.sql_schema()
assert self._get_sqlite_column_type(schema, "time") == "TIMESTAMP"
# -----------------------------------------------------------------------------
# -- Database flavor specific tests
class _TestSQLAlchemy(SQLAlchemyMixIn, PandasSQLTest):
"""
Base class for testing the sqlalchemy backend.
Subclasses for specific database types are created below. Tests that
deviate for each flavor are overwritten there.
"""
flavor: str
@pytest.fixture(autouse=True, scope="class")
def setup_class(cls):
cls.setup_import()
cls.setup_driver()
conn = cls.connect()
conn.connect()
def load_test_data_and_sql(self):
self._load_raw_sql()
self._load_test1_data()
@pytest.fixture(autouse=True)
def setup_method(self, load_iris_data):
self.load_test_data_and_sql()
@classmethod
def setup_import(cls):
# Skip this test if SQLAlchemy not available
if not SQLALCHEMY_INSTALLED:
pytest.skip("SQLAlchemy not installed")
@classmethod
def setup_driver(cls):
raise NotImplementedError()
@classmethod
def connect(cls):
raise NotImplementedError()
def setup_connect(self):
try:
self.conn = self.connect()
self.pandasSQL = sql.SQLDatabase(self.conn)
# to test if connection can be made:
self.conn.connect()
except sqlalchemy.exc.OperationalError:
pytest.skip(f"Can't connect to {self.flavor} server")
def test_read_sql(self):
self._read_sql_iris()
def test_read_sql_parameter(self):
self._read_sql_iris_parameter()
def test_read_sql_named_parameter(self):
self._read_sql_iris_named_parameter()
def test_to_sql(self):
self._to_sql()
def test_to_sql_empty(self):
self._to_sql_empty()
def test_to_sql_fail(self):
self._to_sql_fail()
def test_to_sql_replace(self):
self._to_sql_replace()
def test_to_sql_append(self):
self._to_sql_append()
def test_to_sql_method_multi(self):
self._to_sql(method="multi")
def test_to_sql_method_callable(self):
self._to_sql_method_callable()
def test_create_table(self):
temp_conn = self.connect()
temp_frame = DataFrame(
{"one": [1.0, 2.0, 3.0, 4.0], "two": [4.0, 3.0, 2.0, 1.0]}
)
pandasSQL = sql.SQLDatabase(temp_conn)
pandasSQL.to_sql(temp_frame, "temp_frame")
assert temp_conn.has_table("temp_frame")
def test_drop_table(self):
temp_conn = self.connect()
temp_frame = DataFrame(
{"one": [1.0, 2.0, 3.0, 4.0], "two": [4.0, 3.0, 2.0, 1.0]}
)
pandasSQL = sql.SQLDatabase(temp_conn)
pandasSQL.to_sql(temp_frame, "temp_frame")
assert temp_conn.has_table("temp_frame")
pandasSQL.drop_table("temp_frame")
assert not temp_conn.has_table("temp_frame")
def test_roundtrip(self):
self._roundtrip()
def test_execute_sql(self):
self._execute_sql()
def test_read_table(self):
iris_frame = sql.read_sql_table("iris", con=self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_read_table_columns(self):
iris_frame = sql.read_sql_table(
"iris", con=self.conn, columns=["SepalLength", "SepalLength"]
)
tm.equalContents(iris_frame.columns.values, ["SepalLength", "SepalLength"])
def test_read_table_absent_raises(self):
msg = "Table this_doesnt_exist not found"
with pytest.raises(ValueError, match=msg):
sql.read_sql_table("this_doesnt_exist", con=self.conn)
def test_default_type_conversion(self):
df = sql.read_sql_table("types_test_data", self.conn)
assert issubclass(df.FloatCol.dtype.type, np.floating)
assert issubclass(df.IntCol.dtype.type, np.integer)
assert issubclass(df.BoolCol.dtype.type, np.bool_)
# Int column with NA values stays as float
assert issubclass(df.IntColWithNull.dtype.type, np.floating)
# Bool column with NA values becomes object
assert issubclass(df.BoolColWithNull.dtype.type, np.object)
def test_bigint(self):
# int64 should be converted to BigInteger, GH7433
df = DataFrame(data={"i64": [2 ** 62]})
df.to_sql("test_bigint", self.conn, index=False)
result = sql.read_sql_table("test_bigint", self.conn)
tm.assert_frame_equal(df, result)
def test_default_date_load(self):
df = sql.read_sql_table("types_test_data", self.conn)
# IMPORTANT - sqlite has no native date type, so shouldn't parse, but
# MySQL SHOULD be converted.
assert issubclass(df.DateCol.dtype.type, np.datetime64)
def test_datetime_with_timezone(self):
# edge case that converts postgresql datetime with time zone types
# to datetime64[ns,psycopg2.tz.FixedOffsetTimezone..], which is ok
# but should be more natural, so coerce to datetime64[ns] for now
def check(col):
# check that a column is either datetime64[ns]
# or datetime64[ns, UTC]
if is_datetime64_dtype(col.dtype):
# "2000-01-01 00:00:00-08:00" should convert to
# "2000-01-01 08:00:00"
assert col[0] == Timestamp("2000-01-01 08:00:00")
# "2000-06-01 00:00:00-07:00" should convert to
# "2000-06-01 07:00:00"
assert col[1] == Timestamp("2000-06-01 07:00:00")
elif is_datetime64tz_dtype(col.dtype):
assert str(col.dt.tz) == "UTC"
# "2000-01-01 00:00:00-08:00" should convert to
# "2000-01-01 08:00:00"
# "2000-06-01 00:00:00-07:00" should convert to
# "2000-06-01 07:00:00"
# GH 6415
expected_data = [
Timestamp("2000-01-01 08:00:00", tz="UTC"),
Timestamp("2000-06-01 07:00:00", tz="UTC"),
]
expected = Series(expected_data, name=col.name)
tm.assert_series_equal(col, expected)
else:
raise AssertionError(
f"DateCol loaded with incorrect type -> {col.dtype}"
)
# GH11216
df = pd.read_sql_query("select * from types_test_data", self.conn)
if not hasattr(df, "DateColWithTz"):
pytest.skip("no column with datetime with time zone")
# this is parsed on Travis (linux), but not on macosx for some reason
# even with the same versions of psycopg2 & sqlalchemy, possibly a
# Postgresql server version difference
col = df.DateColWithTz
assert is_datetime64tz_dtype(col.dtype)
df = pd.read_sql_query(
"select * from types_test_data", self.conn, parse_dates=["DateColWithTz"]
)
if not hasattr(df, "DateColWithTz"):
pytest.skip("no column with datetime with time zone")
col = df.DateColWithTz
assert is_datetime64tz_dtype(col.dtype)
assert str(col.dt.tz) == "UTC"
check(df.DateColWithTz)
df = pd.concat(
list(
pd.read_sql_query(
"select * from types_test_data", self.conn, chunksize=1
)
),
ignore_index=True,
)
col = df.DateColWithTz
assert is_datetime64tz_dtype(col.dtype)
assert str(col.dt.tz) == "UTC"
expected = sql.read_sql_table("types_test_data", self.conn)
col = expected.DateColWithTz
assert is_datetime64tz_dtype(col.dtype)
tm.assert_series_equal(df.DateColWithTz, expected.DateColWithTz)
# xref #7139
# this might or might not be converted depending on the postgres driver
df = sql.read_sql_table("types_test_data", self.conn)
check(df.DateColWithTz)
def test_datetime_with_timezone_roundtrip(self):
# GH 9086
# Write datetimetz data to a db and read it back
# For dbs that support timestamps with timezones, should get back UTC
# otherwise naive data should be returned
expected = DataFrame(
{"A": date_range("2013-01-01 09:00:00", periods=3, tz="US/Pacific")}
)
expected.to_sql("test_datetime_tz", self.conn, index=False)
if self.flavor == "postgresql":
# SQLAlchemy "timezones" (i.e. offsets) are coerced to UTC
expected["A"] = expected["A"].dt.tz_convert("UTC")
else:
# Otherwise, timestamps are returned as local, naive
expected["A"] = expected["A"].dt.tz_localize(None)
result = sql.read_sql_table("test_datetime_tz", self.conn)
tm.assert_frame_equal(result, expected)
result = sql.read_sql_query("SELECT * FROM test_datetime_tz", self.conn)
if self.flavor == "sqlite":
# read_sql_query does not return datetime type like read_sql_table
assert isinstance(result.loc[0, "A"], str)
result["A"] = to_datetime(result["A"])
tm.assert_frame_equal(result, expected)
def test_naive_datetimeindex_roundtrip(self):
# GH 23510
# Ensure that a naive DatetimeIndex isn't converted to UTC
dates = date_range("2018-01-01", periods=5, freq="6H")
expected = DataFrame({"nums": range(5)}, index=dates)
expected.to_sql("foo_table", self.conn, index_label="info_date")
result = sql.read_sql_table("foo_table", self.conn, index_col="info_date")
# result index with gain a name from a set_index operation; expected
tm.assert_frame_equal(result, expected, check_names=False)
def test_date_parsing(self):
# No Parsing
df = sql.read_sql_table("types_test_data", self.conn)
expected_type = object if self.flavor == "sqlite" else np.datetime64
assert issubclass(df.DateCol.dtype.type, expected_type)
df = sql.read_sql_table("types_test_data", self.conn, parse_dates=["DateCol"])
assert issubclass(df.DateCol.dtype.type, np.datetime64)
df = sql.read_sql_table(
"types_test_data", self.conn, parse_dates={"DateCol": "%Y-%m-%d %H:%M:%S"}
)
assert issubclass(df.DateCol.dtype.type, np.datetime64)
df = sql.read_sql_table(
"types_test_data",
self.conn,
parse_dates={"DateCol": {"format": "%Y-%m-%d %H:%M:%S"}},
)
assert issubclass(df.DateCol.dtype.type, np.datetime64)
df = sql.read_sql_table(
"types_test_data", self.conn, parse_dates=["IntDateCol"]
)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
df = sql.read_sql_table(
"types_test_data", self.conn, parse_dates={"IntDateCol": "s"}
)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
df = sql.read_sql_table(
"types_test_data", self.conn, parse_dates={"IntDateCol": {"unit": "s"}}
)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
def test_datetime(self):
df = DataFrame(
{"A": date_range("2013-01-01 09:00:00", periods=3), "B": np.arange(3.0)}
)
df.to_sql("test_datetime", self.conn)
# with read_table -> type information from schema used
result = sql.read_sql_table("test_datetime", self.conn)
result = result.drop("index", axis=1)
tm.assert_frame_equal(result, df)
# with read_sql -> no type information -> sqlite has no native
result = sql.read_sql_query("SELECT * FROM test_datetime", self.conn)
result = result.drop("index", axis=1)
if self.flavor == "sqlite":
assert isinstance(result.loc[0, "A"], str)
result["A"] = to_datetime(result["A"])
tm.assert_frame_equal(result, df)
else:
tm.assert_frame_equal(result, df)
def test_datetime_NaT(self):
df = DataFrame(
{"A": date_range("2013-01-01 09:00:00", periods=3), "B": np.arange(3.0)}
)
df.loc[1, "A"] = np.nan
df.to_sql("test_datetime", self.conn, index=False)
# with read_table -> type information from schema used
result = sql.read_sql_table("test_datetime", self.conn)
tm.assert_frame_equal(result, df)
# with read_sql -> no type information -> sqlite has no native
result = sql.read_sql_query("SELECT * FROM test_datetime", self.conn)
if self.flavor == "sqlite":
assert isinstance(result.loc[0, "A"], str)
result["A"] = to_datetime(result["A"], errors="coerce")
tm.assert_frame_equal(result, df)
else:
tm.assert_frame_equal(result, df)
def test_datetime_date(self):
# test support for datetime.date
df = DataFrame([date(2014, 1, 1), date(2014, 1, 2)], columns=["a"])
df.to_sql("test_date", self.conn, index=False)
res = read_sql_table("test_date", self.conn)
result = res["a"]
expected = to_datetime(df["a"])
# comes back as datetime64
tm.assert_series_equal(result, expected)
def test_datetime_time(self):
# test support for datetime.time
df = DataFrame([time(9, 0, 0), time(9, 1, 30)], columns=["a"])
df.to_sql("test_time", self.conn, index=False)
res = read_sql_table("test_time", self.conn)
tm.assert_frame_equal(res, df)
# GH8341
# first, use the fallback to have the sqlite adapter put in place
sqlite_conn = TestSQLiteFallback.connect()
sql.to_sql(df, "test_time2", sqlite_conn, index=False)
res = sql.read_sql_query("SELECT * FROM test_time2", sqlite_conn)
ref = df.applymap(lambda _: _.strftime("%H:%M:%S.%f"))
tm.assert_frame_equal(ref, res) # check if adapter is in place
# then test if sqlalchemy is unaffected by the sqlite adapter
sql.to_sql(df, "test_time3", self.conn, index=False)
if self.flavor == "sqlite":
res = sql.read_sql_query("SELECT * FROM test_time3", self.conn)
ref = df.applymap(lambda _: _.strftime("%H:%M:%S.%f"))
tm.assert_frame_equal(ref, res)
res = sql.read_sql_table("test_time3", self.conn)
tm.assert_frame_equal(df, res)
def test_mixed_dtype_insert(self):
# see GH6509
s1 = Series(2 ** 25 + 1, dtype=np.int32)
s2 = Series(0.0, dtype=np.float32)
df = DataFrame({"s1": s1, "s2": s2})
# write and read again
df.to_sql("test_read_write", self.conn, index=False)
df2 = sql.read_sql_table("test_read_write", self.conn)
tm.assert_frame_equal(df, df2, check_dtype=False, check_exact=True)
def test_nan_numeric(self):
# NaNs in numeric float column
df = DataFrame({"A": [0, 1, 2], "B": [0.2, np.nan, 5.6]})
df.to_sql("test_nan", self.conn, index=False)
# with read_table
result = sql.read_sql_table("test_nan", self.conn)
tm.assert_frame_equal(result, df)
# with read_sql
result = sql.read_sql_query("SELECT * FROM test_nan", self.conn)
tm.assert_frame_equal(result, df)
def test_nan_fullcolumn(self):
# full NaN column (numeric float column)
df = DataFrame({"A": [0, 1, 2], "B": [np.nan, np.nan, np.nan]})
df.to_sql("test_nan", self.conn, index=False)
# with read_table
result = sql.read_sql_table("test_nan", self.conn)
tm.assert_frame_equal(result, df)
# with read_sql -> not type info from table -> stays None
df["B"] = df["B"].astype("object")
df["B"] = None
result = sql.read_sql_query("SELECT * FROM test_nan", self.conn)
tm.assert_frame_equal(result, df)
def test_nan_string(self):
# NaNs in string column
df = DataFrame({"A": [0, 1, 2], "B": ["a", "b", np.nan]})
df.to_sql("test_nan", self.conn, index=False)
# NaNs are coming back as None
df.loc[2, "B"] = None
# with read_table
result = sql.read_sql_table("test_nan", self.conn)
tm.assert_frame_equal(result, df)
# with read_sql
result = sql.read_sql_query("SELECT * FROM test_nan", self.conn)
tm.assert_frame_equal(result, df)
def _get_index_columns(self, tbl_name):
from sqlalchemy.engine import reflection
insp = reflection.Inspector.from_engine(self.conn)
ixs = insp.get_indexes(tbl_name)
ixs = [i["column_names"] for i in ixs]
return ixs
def test_to_sql_save_index(self):
self._to_sql_save_index()
def test_transactions(self):
self._transaction_test()
def test_get_schema_create_table(self):
# Use a dataframe without a bool column, since MySQL converts bool to
# TINYINT (which read_sql_table returns as an int and causes a dtype
# mismatch)
self._load_test3_data()
tbl = "test_get_schema_create_table"
create_sql = sql.get_schema(self.test_frame3, tbl, con=self.conn)
blank_test_df = self.test_frame3.iloc[:0]
self.drop_table(tbl)
self.conn.execute(create_sql)
returned_df = sql.read_sql_table(tbl, self.conn)
tm.assert_frame_equal(returned_df, blank_test_df, check_index_type=False)
self.drop_table(tbl)
def test_dtype(self):
cols = ["A", "B"]
data = [(0.8, True), (0.9, None)]
df = DataFrame(data, columns=cols)
df.to_sql("dtype_test", self.conn)
df.to_sql("dtype_test2", self.conn, dtype={"B": sqlalchemy.TEXT})
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
sqltype = meta.tables["dtype_test2"].columns["B"].type
assert isinstance(sqltype, sqlalchemy.TEXT)
msg = "The type of B is not a SQLAlchemy type"
with pytest.raises(ValueError, match=msg):
df.to_sql("error", self.conn, dtype={"B": str})
# GH9083
df.to_sql("dtype_test3", self.conn, dtype={"B": sqlalchemy.String(10)})
meta.reflect()
sqltype = meta.tables["dtype_test3"].columns["B"].type
assert isinstance(sqltype, sqlalchemy.String)
assert sqltype.length == 10
# single dtype
df.to_sql("single_dtype_test", self.conn, dtype=sqlalchemy.TEXT)
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
sqltypea = meta.tables["single_dtype_test"].columns["A"].type
sqltypeb = meta.tables["single_dtype_test"].columns["B"].type
assert isinstance(sqltypea, sqlalchemy.TEXT)
assert isinstance(sqltypeb, sqlalchemy.TEXT)
def test_notna_dtype(self):
cols = {
"Bool": Series([True, None]),
"Date": Series([datetime(2012, 5, 1), None]),
"Int": Series([1, None], dtype="object"),
"Float": Series([1.1, None]),
}
df = DataFrame(cols)
tbl = "notna_dtype_test"
df.to_sql(tbl, self.conn)
returned_df = sql.read_sql_table(tbl, self.conn) # noqa
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
if self.flavor == "mysql":
my_type = sqltypes.Integer
else:
my_type = sqltypes.Boolean
col_dict = meta.tables[tbl].columns
assert isinstance(col_dict["Bool"].type, my_type)
assert isinstance(col_dict["Date"].type, sqltypes.DateTime)
assert isinstance(col_dict["Int"].type, sqltypes.Integer)
assert isinstance(col_dict["Float"].type, sqltypes.Float)
def test_double_precision(self):
V = 1.23456789101112131415
df = DataFrame(
{
"f32": Series([V], dtype="float32"),
"f64": Series([V], dtype="float64"),
"f64_as_f32": Series([V], dtype="float64"),
"i32": Series([5], dtype="int32"),
"i64": Series([5], dtype="int64"),
}
)
df.to_sql(
"test_dtypes",
self.conn,
index=False,
if_exists="replace",
dtype={"f64_as_f32": sqlalchemy.Float(precision=23)},
)
res = sql.read_sql_table("test_dtypes", self.conn)
# check precision of float64
assert np.round(df["f64"].iloc[0], 14) == np.round(res["f64"].iloc[0], 14)
# check sql types
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
col_dict = meta.tables["test_dtypes"].columns
assert str(col_dict["f32"].type) == str(col_dict["f64_as_f32"].type)
assert isinstance(col_dict["f32"].type, sqltypes.Float)
assert isinstance(col_dict["f64"].type, sqltypes.Float)
assert isinstance(col_dict["i32"].type, sqltypes.Integer)
assert isinstance(col_dict["i64"].type, sqltypes.BigInteger)
def test_connectable_issue_example(self):
# This tests the example raised in issue
# https://github.com/pandas-dev/pandas/issues/10104
def foo(connection):
query = "SELECT test_foo_data FROM test_foo_data"
return sql.read_sql_query(query, con=connection)
def bar(connection, data):
data.to_sql(name="test_foo_data", con=connection, if_exists="append")
def main(connectable):
with connectable.connect() as conn:
with conn.begin():
foo_data = conn.run_callable(foo)
conn.run_callable(bar, foo_data)
DataFrame({"test_foo_data": [0, 1, 2]}).to_sql("test_foo_data", self.conn)
main(self.conn)
def test_temporary_table(self):
test_data = "Hello, World!"
expected = DataFrame({"spam": [test_data]})
Base = declarative.declarative_base()
class Temporary(Base):
__tablename__ = "temp_test"
__table_args__ = {"prefixes": ["TEMPORARY"]}
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
spam = sqlalchemy.Column(sqlalchemy.Unicode(30), nullable=False)
Session = sa_session.sessionmaker(bind=self.conn)
session = Session()
with session.transaction:
conn = session.connection()
Temporary.__table__.create(conn)
session.add(Temporary(spam=test_data))
session.flush()
df = sql.read_sql_query(sql=sqlalchemy.select([Temporary.spam]), con=conn)
tm.assert_frame_equal(df, expected)
class _TestSQLAlchemyConn(_EngineToConnMixin, _TestSQLAlchemy):
def test_transactions(self):
pytest.skip("Nested transactions rollbacks don't work with Pandas")
class _TestSQLiteAlchemy:
"""
Test the sqlalchemy backend against an in-memory sqlite database.
"""
flavor = "sqlite"
@classmethod
def connect(cls):
return sqlalchemy.create_engine("sqlite:///:memory:")
@classmethod
def setup_driver(cls):
# sqlite3 is built-in
cls.driver = None
def test_default_type_conversion(self):
df = sql.read_sql_table("types_test_data", self.conn)
assert issubclass(df.FloatCol.dtype.type, np.floating)
assert issubclass(df.IntCol.dtype.type, np.integer)
# sqlite has no boolean type, so integer type is returned
assert issubclass(df.BoolCol.dtype.type, np.integer)
# Int column with NA values stays as float
assert issubclass(df.IntColWithNull.dtype.type, np.floating)
# Non-native Bool column with NA values stays as float
assert issubclass(df.BoolColWithNull.dtype.type, np.floating)
def test_default_date_load(self):
df = sql.read_sql_table("types_test_data", self.conn)
# IMPORTANT - sqlite has no native date type, so shouldn't parse, but
assert not issubclass(df.DateCol.dtype.type, np.datetime64)
def test_bigint_warning(self):
# test no warning for BIGINT (to support int64) is raised (GH7433)
df = DataFrame({"a": [1, 2]}, dtype="int64")
df.to_sql("test_bigintwarning", self.conn, index=False)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
sql.read_sql_table("test_bigintwarning", self.conn)
assert len(w) == 0
class _TestMySQLAlchemy:
"""
Test the sqlalchemy backend against an MySQL database.
"""
flavor = "mysql"
@classmethod
def connect(cls):
url = "mysql+{driver}://root@localhost/pandas_nosetest"
return sqlalchemy.create_engine(
url.format(driver=cls.driver), connect_args=cls.connect_args
)
@classmethod
def setup_driver(cls):
pymysql = pytest.importorskip("pymysql")
cls.driver = "pymysql"
cls.connect_args = {"client_flag": pymysql.constants.CLIENT.MULTI_STATEMENTS}
def test_default_type_conversion(self):
df = sql.read_sql_table("types_test_data", self.conn)
assert issubclass(df.FloatCol.dtype.type, np.floating)
assert issubclass(df.IntCol.dtype.type, np.integer)
# MySQL has no real BOOL type (it's an alias for TINYINT)
assert issubclass(df.BoolCol.dtype.type, np.integer)
# Int column with NA values stays as float
assert issubclass(df.IntColWithNull.dtype.type, np.floating)
# Bool column with NA = int column with NA values => becomes float
assert issubclass(df.BoolColWithNull.dtype.type, np.floating)
def test_read_procedure(self):
import pymysql
# see GH7324. Although it is more an api test, it is added to the
# mysql tests as sqlite does not have stored procedures
df = DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3]})
df.to_sql("test_procedure", self.conn, index=False)
proc = """DROP PROCEDURE IF EXISTS get_testdb;
CREATE PROCEDURE get_testdb ()
BEGIN
SELECT * FROM test_procedure;
END"""
connection = self.conn.connect()
trans = connection.begin()
try:
r1 = connection.execute(proc) # noqa
trans.commit()
except pymysql.Error:
trans.rollback()
raise
res1 = sql.read_sql_query("CALL get_testdb();", self.conn)
tm.assert_frame_equal(df, res1)
# test delegation to read_sql_query
res2 = sql.read_sql("CALL get_testdb();", self.conn)
tm.assert_frame_equal(df, res2)
class _TestPostgreSQLAlchemy:
"""
Test the sqlalchemy backend against an PostgreSQL database.
"""
flavor = "postgresql"
@classmethod
def connect(cls):
url = "postgresql+{driver}://postgres@localhost/pandas_nosetest"
return sqlalchemy.create_engine(url.format(driver=cls.driver))
@classmethod
def setup_driver(cls):
pytest.importorskip("psycopg2")
cls.driver = "psycopg2"
def test_schema_support(self):
# only test this for postgresql (schema's not supported in
# mysql/sqlite)
df = DataFrame({"col1": [1, 2], "col2": [0.1, 0.2], "col3": ["a", "n"]})
# create a schema
self.conn.execute("DROP SCHEMA IF EXISTS other CASCADE;")
self.conn.execute("CREATE SCHEMA other;")
# write dataframe to different schema's
df.to_sql("test_schema_public", self.conn, index=False)
df.to_sql(
"test_schema_public_explicit", self.conn, index=False, schema="public"
)
df.to_sql("test_schema_other", self.conn, index=False, schema="other")
# read dataframes back in
res1 = sql.read_sql_table("test_schema_public", self.conn)
tm.assert_frame_equal(df, res1)
res2 = sql.read_sql_table("test_schema_public_explicit", self.conn)
tm.assert_frame_equal(df, res2)
res3 = sql.read_sql_table(
"test_schema_public_explicit", self.conn, schema="public"
)
tm.assert_frame_equal(df, res3)
res4 = sql.read_sql_table("test_schema_other", self.conn, schema="other")
tm.assert_frame_equal(df, res4)
msg = "Table test_schema_other not found"
with pytest.raises(ValueError, match=msg):
sql.read_sql_table("test_schema_other", self.conn, schema="public")
# different if_exists options
# create a schema
self.conn.execute("DROP SCHEMA IF EXISTS other CASCADE;")
self.conn.execute("CREATE SCHEMA other;")
# write dataframe with different if_exists options
df.to_sql("test_schema_other", self.conn, schema="other", index=False)
df.to_sql(
"test_schema_other",
self.conn,
schema="other",
index=False,
if_exists="replace",
)
df.to_sql(
"test_schema_other",
self.conn,
schema="other",
index=False,
if_exists="append",
)
res = sql.read_sql_table("test_schema_other", self.conn, schema="other")
tm.assert_frame_equal(concat([df, df], ignore_index=True), res)
# specifying schema in user-provided meta
# The schema won't be applied on another Connection
# because of transactional schemas
if isinstance(self.conn, sqlalchemy.engine.Engine):
engine2 = self.connect()
meta = sqlalchemy.MetaData(engine2, schema="other")
pdsql = sql.SQLDatabase(engine2, meta=meta)
pdsql.to_sql(df, "test_schema_other2", index=False)
pdsql.to_sql(df, "test_schema_other2", index=False, if_exists="replace")
pdsql.to_sql(df, "test_schema_other2", index=False, if_exists="append")
res1 = sql.read_sql_table("test_schema_other2", self.conn, schema="other")
res2 = pdsql.read_table("test_schema_other2")
tm.assert_frame_equal(res1, res2)
def test_copy_from_callable_insertion_method(self):
# GH 8953
# Example in io.rst found under _io.sql.method
# not available in sqlite, mysql
def psql_insert_copy(table, conn, keys, data_iter):
# gets a DBAPI connection that can provide a cursor
dbapi_conn = conn.connection
with dbapi_conn.cursor() as cur:
s_buf = StringIO()
writer = csv.writer(s_buf)
writer.writerows(data_iter)
s_buf.seek(0)
columns = ", ".join(f'"{k}"' for k in keys)
if table.schema:
table_name = f"{table.schema}.{table.name}"
else:
table_name = table.name
sql_query = f"COPY {table_name} ({columns}) FROM STDIN WITH CSV"
cur.copy_expert(sql=sql_query, file=s_buf)
expected = DataFrame({"col1": [1, 2], "col2": [0.1, 0.2], "col3": ["a", "n"]})
expected.to_sql(
"test_copy_insert", self.conn, index=False, method=psql_insert_copy
)
result = sql.read_sql_table("test_copy_insert", self.conn)
tm.assert_frame_equal(result, expected)
@pytest.mark.single
@pytest.mark.db
class TestMySQLAlchemy(_TestMySQLAlchemy, _TestSQLAlchemy):
pass
@pytest.mark.single
@pytest.mark.db
class TestMySQLAlchemyConn(_TestMySQLAlchemy, _TestSQLAlchemyConn):
pass
@pytest.mark.single
@pytest.mark.db
class TestPostgreSQLAlchemy(_TestPostgreSQLAlchemy, _TestSQLAlchemy):
pass
@pytest.mark.single
@pytest.mark.db
class TestPostgreSQLAlchemyConn(_TestPostgreSQLAlchemy, _TestSQLAlchemyConn):
pass
@pytest.mark.single
class TestSQLiteAlchemy(_TestSQLiteAlchemy, _TestSQLAlchemy):
pass
@pytest.mark.single
class TestSQLiteAlchemyConn(_TestSQLiteAlchemy, _TestSQLAlchemyConn):
pass
# -----------------------------------------------------------------------------
# -- Test Sqlite / MySQL fallback
@pytest.mark.single
class TestSQLiteFallback(SQLiteMixIn, PandasSQLTest):
"""
Test the fallback mode against an in-memory sqlite database.
"""
flavor = "sqlite"
@classmethod
def connect(cls):
return sqlite3.connect(":memory:")
def setup_connect(self):
self.conn = self.connect()
def load_test_data_and_sql(self):
self.pandasSQL = sql.SQLiteDatabase(self.conn)
self._load_test1_data()
@pytest.fixture(autouse=True)
def setup_method(self, load_iris_data):
self.load_test_data_and_sql()
def test_read_sql(self):
self._read_sql_iris()
def test_read_sql_parameter(self):
self._read_sql_iris_parameter()
def test_read_sql_named_parameter(self):
self._read_sql_iris_named_parameter()
def test_to_sql(self):
self._to_sql()
def test_to_sql_empty(self):
self._to_sql_empty()
def test_to_sql_fail(self):
self._to_sql_fail()
def test_to_sql_replace(self):
self._to_sql_replace()
def test_to_sql_append(self):
self._to_sql_append()
def test_to_sql_method_multi(self):
# GH 29921
self._to_sql(method="multi")
def test_create_and_drop_table(self):
temp_frame = DataFrame(
{"one": [1.0, 2.0, 3.0, 4.0], "two": [4.0, 3.0, 2.0, 1.0]}
)
self.pandasSQL.to_sql(temp_frame, "drop_test_frame")
assert self.pandasSQL.has_table("drop_test_frame")
self.pandasSQL.drop_table("drop_test_frame")
assert not self.pandasSQL.has_table("drop_test_frame")
def test_roundtrip(self):
self._roundtrip()
def test_execute_sql(self):
self._execute_sql()
def test_datetime_date(self):
# test support for datetime.date
df = DataFrame([date(2014, 1, 1), date(2014, 1, 2)], columns=["a"])
df.to_sql("test_date", self.conn, index=False)
res = read_sql_query("SELECT * FROM test_date", self.conn)
if self.flavor == "sqlite":
# comes back as strings
tm.assert_frame_equal(res, df.astype(str))
elif self.flavor == "mysql":
tm.assert_frame_equal(res, df)
def test_datetime_time(self):
# test support for datetime.time, GH #8341
df = DataFrame([time(9, 0, 0), time(9, 1, 30)], columns=["a"])
df.to_sql("test_time", self.conn, index=False)
res = read_sql_query("SELECT * FROM test_time", self.conn)
if self.flavor == "sqlite":
# comes back as strings
expected = df.applymap(lambda _: _.strftime("%H:%M:%S.%f"))
tm.assert_frame_equal(res, expected)
def _get_index_columns(self, tbl_name):
ixs = sql.read_sql_query(
"SELECT * FROM sqlite_master WHERE type = 'index' "
+ f"AND tbl_name = '{tbl_name}'",
self.conn,
)
ix_cols = []
for ix_name in ixs.name:
ix_info = sql.read_sql_query(f"PRAGMA index_info({ix_name})", self.conn)
ix_cols.append(ix_info.name.tolist())
return ix_cols
def test_to_sql_save_index(self):
self._to_sql_save_index()
def test_transactions(self):
self._transaction_test()
def _get_sqlite_column_type(self, table, column):
recs = self.conn.execute(f"PRAGMA table_info({table})")
for cid, name, ctype, not_null, default, pk in recs:
if name == column:
return ctype
raise ValueError(f"Table {table}, column {column} not found")
def test_dtype(self):
if self.flavor == "mysql":
pytest.skip("Not applicable to MySQL legacy")
cols = ["A", "B"]
data = [(0.8, True), (0.9, None)]
df = DataFrame(data, columns=cols)
df.to_sql("dtype_test", self.conn)
df.to_sql("dtype_test2", self.conn, dtype={"B": "STRING"})
# sqlite stores Boolean values as INTEGER
assert self._get_sqlite_column_type("dtype_test", "B") == "INTEGER"
assert self._get_sqlite_column_type("dtype_test2", "B") == "STRING"
msg = r"B \(<class 'bool'>\) not a string"
with pytest.raises(ValueError, match=msg):
df.to_sql("error", self.conn, dtype={"B": bool})
# single dtype
df.to_sql("single_dtype_test", self.conn, dtype="STRING")
assert self._get_sqlite_column_type("single_dtype_test", "A") == "STRING"
assert self._get_sqlite_column_type("single_dtype_test", "B") == "STRING"
def test_notna_dtype(self):
if self.flavor == "mysql":
pytest.skip("Not applicable to MySQL legacy")
cols = {
"Bool": | Series([True, None]) | pandas.Series |
"""
test_subcomp_b.py
Contains tests for subcomp_b_process_emissions_factors,
which averages emissions factors for DR days and all days.
"""
import unittest
import pandas as pd
import pandas.testing as pdt
from subcomp_b_process_emissions_factors import seasonal_ave, annual_ave, \
get_hour_ave, alldays_oneyear_seasonal_ave, get_oneyear_hour_ave, subcomp_b_runall
from emissions_parameters import DIR_TESTDATA_IN
df_emissions_data = pd.read_excel(DIR_TESTDATA_IN+'subcomp_b_test_data/emissions_data.xlsx')
df_dr_hours_winter = | pd.read_excel(DIR_TESTDATA_IN+'subcomp_b_test_data/dr_hours_winter.xlsx') | pandas.read_excel |
import re
import pandas as pd
import numpy as np
from functools import partial
from tqdm import tqdm
def drop_single_action_users(df):
user_id_count = df.user_id.value_counts().reset_index(name="count").rename(columns={'index': 'user_id'})
df = df[~df['user_id'].isin(user_id_count[(user_id_count['count'] == 1)]['user_id'])]
return df
def drop_single_and_excessive_step_sessions(df,remove_single,upper_limit):
session_action_count = df.groupby(['session_id'])['action_type'].count().reset_index(name="count")
df = df[~df['session_id'].isin(session_action_count[(session_action_count['count'] == (1 if remove_single else 0)) | (session_action_count['count'] > upper_limit)]['session_id'])]
return df
def drop_duplicate_steps_in_session(df):
df = df.reset_index().drop_duplicates(subset=['session_id','step'],keep='last').set_index('index')
return df
def drop(path):
functions = [
drop_single_action_users,
partial(drop_single_and_excessive_step_sessions, remove_single=True, upper_limit=100),
drop_duplicate_steps_in_session
];
df = | pd.read_csv(path,sep=",") | pandas.read_csv |
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
plt.style.use('seaborn-poster')
# use LaTeX fonts in the plot
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
XLABEL = r'\textbf{Attempt Number}'
YLABEL = r'\textbf{Score}'
REQUIRED_SCORE = 200
##############################################################################
def plot_training_scores():
df = pd.read_csv('../results/train_scores.csv')
df['attempt_num'] = range(1, len(df) + 1)
print(df.head())
fig = plt.figure()
ax = plt.axes()
ax.plot(df['attempt_num'], df['score'], color='tab:blue')
# Draw a red line across the required score
ax.axhline(y=REQUIRED_SCORE, color='tab:red', linestyle='-')
# Draw a green line where the agent first gets the required score
# Returns the attempt number (+ 1 because we're one based)
first_successful_landing = df[df['score'].gt(199.99)].index[0] + 1
print(f"First Successful Landing is attempt # {first_successful_landing}")
ax.axvline(x=first_successful_landing, color='tab:green', linestyle='--')
plt.xlim((1, df['attempt_num'].max()))
plt.xlabel(XLABEL)
plt.ylabel(YLABEL)
plt.title(r'\textbf{Learning Results Over Attempts}')
plt.savefig('../results/train_scores.pdf')
plt.show()
def plot_MA_train_scores():
df = | pd.read_csv('../results/train_scores.csv') | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 8 08:56:36 2016
@author: davidangeles
"""
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import tissue_enrichment_analysis as tea
import os
import mpl_toolkits.mplot3d
import pyrnaseq_graphics as rsq
from sklearn.preprocessing import StandardScaler
# Package to perform PCA
import sklearn.datasets
import sklearn.decomposition
sns.set_context("notebook")
mag = 2 # value of beta from regression
qval = .1 # qvalue from regression
qvalEn = 0.05 # q value for enrichment analysis (tissues)
dirLists = '../output/Gene_lists_for_analysis'
if not os.path.exists(dirLists):
os.makedirs(dirLists)
dirGraphs = '../output/Graphs'
if not os.path.exists(dirLists):
os.makedirs(dirGraphs)
os.chdir('./')
# gene_lists from sleuth
# tpm vals for PCA
dfTPM = pd.read_csv("../input/tpm_table.csv")
dfTPM.dropna(inplace=True)
# pos beta means high old adults
dfBetaA = | pd.read_csv("../input/agebeta_wt.csv") | pandas.read_csv |
import pandas as pd
import numpy as np
import nltk
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from nltk.tokenize import RegexpTokenizer
##### Read data for 5 sample positions ###########
req_ids = ["e3625ad", "39ee3f", "45de815"
,"40a2c38", "63146c6"]
# read raw text data for embeddings
job_text = pd.read_csv("data/cleaned_job.csv", index_col=0)
resume_text = pd.read_csv("data/cleaned_resume.csv", index_col=0)
# read structured
job_features = pd.read_csv("Resume-Parser-JOBS/data/output/job_description_summary.csv")
resume_features = pd.read_csv("data/resumes_5jobs.csv")
# keep only the relevant positions and candidates
job_text = job_text[job_text["Req ID"].isin(req_ids)]
resume_text = resume_text[resume_text["Req ID"].isin(req_ids)]
job_features = job_features[job_features.ReqID.isin(req_ids)]
resume_features = resume_features[resume_features.ReqID.isin(req_ids)]
##### one hot encode the structured features #####
### for jobs ###
# drop unused columns
drop_cols = ['GPA', 'courses', 'hobbies', 'email'
, 'phone','Education', 'Extracurriculars'
,'Language', 'Work', 'Summaries', 'Skill'
, 'Member', 'Writing', 'Researching'
, 'Honor', 'Activity']
job_features.drop(drop_cols, inplace=True, axis=1)
df = job_features
hot = df[['ReqID']]
#honor_societies
df.honor_societies.fillna('', inplace=True)
hot['HonorSociety'] = df.honor_societies.apply(lambda x: 1 if len(x) > 2 else 0)
#latin_honors
df.latin_honors.fillna('', inplace=True)
hot['LatinHonors'] = df.latin_honors.apply(lambda x: 1 if len(x) > 2 else 0)
#scholarships_awards
df.scholarships_awards.fillna('', inplace=True)
hot['ScholarshipsAward'] = df.scholarships_awards.apply(lambda x: 1 if len(x) > 2 else 0)
#schools
df.community_college.fillna('', inplace=True)
hot['CommCollege'] = df.community_college.apply(lambda x: 1 if len(x) > 2 else 0)
df.other_universities.fillna('', inplace=True)
hot['OtherUni'] = df.other_universities.apply(lambda x: 1 if len(x) > 2 else 0)
df.top_100_universities.fillna('', inplace=True)
hot['Top100Uni'] = df.top_100_universities.apply(lambda x: 1 if len(x) > 2 else 0)
df.top_10_universities.fillna('', inplace=True)
hot['Top10Uni'] = df.top_10_universities.apply(lambda x: 1 if len(x) > 2 else 0)
#degrees
df.associate_education_level.fillna('', inplace=True)
hot['Associates'] = df.associate_education_level.apply(lambda x: 1 if len(x) > 2 else 0)
df.bachelor_education_level.fillna('', inplace=True)
hot['Bachelors'] = df.bachelor_education_level.apply(lambda x: 1 if len(x) > 2 else 0)
df.master_education_level.fillna('', inplace=True)
hot['Masters'] = df.master_education_level.apply(lambda x: 1 if len(x) > 2 else 0)
df.doctor_education_level.fillna('', inplace=True)
hot['Doctors'] = df.doctor_education_level.apply(lambda x: 1 if len(x) > 2 else 0)
#companies
df.company_foodbev.fillna('', inplace=True)
hot['FoodBev'] = df.company_foodbev.apply(lambda x: 1 if len(x) > 2 else 0)
df.company_consumer.fillna('', inplace=True)
hot['Consumer'] = df.company_consumer.apply(lambda x: 1 if len(x) > 2 else 0)
df.company_energychem.fillna('', inplace=True)
hot['EnergyChem'] = df.company_energychem.apply(lambda x: 1 if len(x) > 2 else 0)
df.company_fin.fillna('', inplace=True)
hot['Fin'] = df.company_fin.apply(lambda x: 1 if len(x) > 2 else 0)
df.company_health.fillna('', inplace=True)
hot['HealthMed'] = df.company_health.apply(lambda x: 1 if len(x) > 2 else 0)
df.company_industrial.fillna('', inplace=True)
hot['Industrial'] = df.company_industrial.apply(lambda x: 1 if len(x) > 2 else 0)
df.company_tech.fillna('', inplace=True)
hot['Tech'] = df.company_tech.apply(lambda x: 1 if len(x) > 2 else 0)
df.company_services.fillna('', inplace=True)
hot['Services'] = df.company_services.apply(lambda x: 1 if len(x) > 2 else 0)
df.company_other.fillna('', inplace=True)
hot['OtherCo'] = df.company_other.apply(lambda x: 1 if len(x) > 2 else 0)
# ONE HOT ENCODING - EXPLODING COLUMNS
import yaml
with open('Resume-Parser-master-new/confs/config.yaml', 'r') as stream:
yaml_file = yaml.safe_load(stream)
#certifications
df.certifications.fillna('', inplace=True)
for item in yaml_file['case_agnostic_whole_resume']['certifications']:
if type(item) == list:
search_term = item[0].replace('\\x20','').replace(' ','')
col_name = item[1].replace('\\x20','').replace(' ','')
else:
search_term = item.replace('\\x20','').replace(' ','')
col_name = search_term
hot[col_name] = df.certifications.apply(lambda x: 1 if x.find(search_term) >= 0 else 0)
#soft_skills
df.soft_skills.fillna('', inplace=True)
for item in yaml_file['case_agnostic_whole_resume']['soft_skills']:
if type(item) == list:
search_term = item[0].replace('\\x20','').replace(' ','')
col_name = item[1].replace('\\x20','').replace(' ','')
else:
search_term = item.replace('\\x20','').replace(' ','')
col_name = search_term
hot[col_name] = df.soft_skills.apply(lambda x: 1 if x.find(search_term) >= 0 else 0)
#major_minor
df.major_minor.fillna('', inplace=True)
for item in yaml_file['case_agnostic_education']['major_minor']:
if type(item) == list:
search_term = item[0].replace('\\x20','').replace(' ','')
col_name = item[1].replace('\\x20','').replace(' ','')
else:
search_term = item.replace('\\x20','').replace(' ','')
col_name = search_term
hot[col_name] = df.major_minor.apply(lambda x: 1 if x.find(search_term) >= 0 else 0)
#languages
df.languages.fillna('', inplace=True)
for item in yaml_file['case_agnostic_languages']['languages']:
if type(item) == list:
search_term = item[0].replace('\\x20','').replace(' ','')
col_name = item[1].replace('\\x20','').replace(' ','')
else:
search_term = item.replace('\\x20','').replace(' ','')
col_name = search_term
hot[col_name] = df.languages.apply(lambda x: 1 if x.find(search_term) >= 0 else 0)
#technical_skills
df.technical_skills.fillna('', inplace=True)
for item in yaml_file['case_agnostic_skill']['technical_skills']:
if type(item) == list:
search_term = item[0].replace('\\x20','').replace(' ','')
col_name = item[1].replace('\\x20','').replace(' ','')
else:
search_term = item.replace('\\x20','').replace(' ','')
col_name = search_term
hot[col_name] = df.technical_skills.apply(lambda x: 1 if x.find(search_term) >= 0 else 0)
job_dummies = hot
### for resumes ###
# drop unused columns
drop_cols = ['GPA', 'courses', 'hobbies', 'email', 'phone'
,'Education', 'Extracurriculars','Language', 'Work'
, 'Summaries', 'Skill', 'Member', 'Writing', 'Researching'
, 'Honor', 'Activity']
resume_features.drop(drop_cols, inplace=True, axis=1)
df = resume_features
#ONE HOT ENCODING
hot = df[['ReqID', 'CanID']]
#honor_societies
df.honor_societies.fillna('', inplace=True)
hot['HonorSociety'] = df.honor_societies.apply(lambda x: 1 if len(x) > 2 else 0)
#latin_honors
df.latin_honors.fillna('', inplace=True)
hot['LatinHonors'] = df.latin_honors.apply(lambda x: 1 if len(x) > 2 else 0)
#scholarships_awards
df.scholarships_awards.fillna('', inplace=True)
hot['ScholarshipsAward'] = df.scholarships_awards.apply(lambda x: 1 if len(x) > 2 else 0)
#schools
df.community_college.fillna('', inplace=True)
hot['CommCollege'] = df.community_college.apply(lambda x: 1 if len(x) > 2 else 0)
df.other_universities.fillna('', inplace=True)
hot['OtherUni'] = df.other_universities.apply(lambda x: 1 if len(x) > 2 else 0)
df.top_100_universities.fillna('', inplace=True)
hot['Top100Uni'] = df.top_100_universities.apply(lambda x: 1 if len(x) > 2 else 0)
df.top_10_universities.fillna('', inplace=True)
hot['Top10Uni'] = df.top_10_universities.apply(lambda x: 1 if len(x) > 2 else 0)
#degrees
df.associate_education_level.fillna('', inplace=True)
hot['Associates'] = df.associate_education_level.apply(lambda x: 1 if len(x) > 2 else 0)
df.bachelor_education_level.fillna('', inplace=True)
hot['Bachelors'] = df.bachelor_education_level.apply(lambda x: 1 if len(x) > 2 else 0)
df.master_education_level.fillna('', inplace=True)
hot['Masters'] = df.master_education_level.apply(lambda x: 1 if len(x) > 2 else 0)
df.doctor_education_level.fillna('', inplace=True)
hot['Doctors'] = df.doctor_education_level.apply(lambda x: 1 if len(x) > 2 else 0)
#companies
df.company_foodbev.fillna('', inplace=True)
hot['FoodBev'] = df.company_foodbev.apply(lambda x: 1 if len(x) > 2 else 0)
df.company_consumer.fillna('', inplace=True)
hot['Consumer'] = df.company_consumer.apply(lambda x: 1 if len(x) > 2 else 0)
df.company_energychem.fillna('', inplace=True)
hot['EnergyChem'] = df.company_energychem.apply(lambda x: 1 if len(x) > 2 else 0)
df.company_fin.fillna('', inplace=True)
hot['Fin'] = df.company_fin.apply(lambda x: 1 if len(x) > 2 else 0)
df.company_health.fillna('', inplace=True)
hot['HealthMed'] = df.company_health.apply(lambda x: 1 if len(x) > 2 else 0)
df.company_industrial.fillna('', inplace=True)
hot['Industrial'] = df.company_industrial.apply(lambda x: 1 if len(x) > 2 else 0)
df.company_tech.fillna('', inplace=True)
hot['Tech'] = df.company_tech.apply(lambda x: 1 if len(x) > 2 else 0)
df.company_services.fillna('', inplace=True)
hot['Services'] = df.company_services.apply(lambda x: 1 if len(x) > 2 else 0)
df.company_other.fillna('', inplace=True)
hot['OtherCo'] = df.company_other.apply(lambda x: 1 if len(x) > 2 else 0)
#ONE HOT ENCODING - EXPLODING COLUMNS
with open('Resume-Parser-master-new/confs/config.yaml', 'r') as stream:
yaml_file = yaml.safe_load(stream)
#certifications
df.certifications.fillna('', inplace=True)
for item in yaml_file['case_agnostic_whole_resume']['certifications']:
if type(item) == list:
search_term = item[0].replace('\\x20','').replace(' ','')
col_name = item[1].replace('\\x20','').replace(' ','')
else:
search_term = item.replace('\\x20','').replace(' ','')
col_name = search_term
hot[col_name] = df.certifications.apply(lambda x: 1 if x.find(search_term) >= 0 else 0)
#soft_skills
df.soft_skills.fillna('', inplace=True)
for item in yaml_file['case_agnostic_whole_resume']['soft_skills']:
if type(item) == list:
search_term = item[0].replace('\\x20','').replace(' ','')
col_name = item[1].replace('\\x20','').replace(' ','')
else:
search_term = item.replace('\\x20','').replace(' ','')
col_name = search_term
hot[col_name] = df.soft_skills.apply(lambda x: 1 if x.find(search_term) >= 0 else 0)
#major_minor
df.major_minor.fillna('', inplace=True)
for item in yaml_file['case_agnostic_education']['major_minor']:
if type(item) == list:
search_term = item[0].replace('\\x20','').replace(' ','')
col_name = item[1].replace('\\x20','').replace(' ','')
else:
search_term = item.replace('\\x20','').replace(' ','')
col_name = search_term
hot[col_name] = df.major_minor.apply(lambda x: 1 if x.find(search_term) >= 0 else 0)
#languages
df.languages.fillna('', inplace=True)
for item in yaml_file['case_agnostic_languages']['languages']:
if type(item) == list:
search_term = item[0].replace('\\x20','').replace(' ','')
col_name = item[1].replace('\\x20','').replace(' ','')
else:
search_term = item.replace('\\x20','').replace(' ','')
col_name = search_term
hot[col_name] = df.languages.apply(lambda x: 1 if x.find(search_term) >= 0 else 0)
#technical_skills
df.technical_skills.fillna('', inplace=True)
for item in yaml_file['case_agnostic_skill']['technical_skills']:
if type(item) == list:
search_term = item[0].replace('\\x20','').replace(' ','')
col_name = item[1].replace('\\x20','').replace(' ','')
else:
search_term = item.replace('\\x20','').replace(' ','')
col_name = search_term
hot[col_name] = df.technical_skills.apply(lambda x: 1 if x.find(search_term) >= 0 else 0)
empty_cols = []
for i in hot.columns[2:]:
if sum(hot[i]) == 0:
empty_cols.append(i)
resume_dummies = hot
##### text embedding(Count)#########
### a function to repeat the process
def GenerateCountEmbedding(req_id, job_text_df, resume_text_df):
pos_jd_text = job_text[job_text["Req ID"]==req_id]
pos_resume_text = resume_text[resume_text["Req ID"]==req_id]
pos_jd_text.rename(columns = {'Req ID':'ID',
'Job Description':'text'}, inplace=True)
pos_jd_text.ID = req_id
pos_jd_text = pos_jd_text[['ID', 'text']]
pos_resume_text.rename(columns = {'Candidate ID':'ID',
'Resume Text':'text'}, inplace=True)
pos_resume_text = pos_resume_text[['ID', 'text']]
#append to same df
df = pos_jd_text.append(pos_resume_text)
df.set_index('ID', inplace=True)
# join words and vectorize
tokenizer = RegexpTokenizer(r'\w+')
df['text'] = df['text'].apply(lambda x: tokenizer.tokenize(x))
df['text'] = df['text'].apply(lambda x: ' '.join(x))
count = CountVectorizer()
pos_embedding = count.fit_transform(df['text'])
pos_embedding = pd.DataFrame(pos_embedding.toarray())
pos_embedding.insert(loc=0, column="ID", value=df.index)
return pos_embedding
### for position e3625ad
pos1_embedding = GenerateCountEmbedding("e3625ad", job_text
, resume_text)
### for position "39ee3f"
pos2_embedding = GenerateCountEmbedding("39ee3f", job_text
, resume_text)
### for position "45de815"
pos3_embedding = GenerateCountEmbedding("45de815", job_text
, resume_text)
### for position "40a2c38"
pos4_embedding = GenerateCountEmbedding("40a2c38", job_text
, resume_text)
### for position "63146c6"
pos5_embedding = GenerateCountEmbedding("63146c6", job_text
, resume_text)
##### embeddings TFIDF #####
def GenerateTfidfEmbedding(req_id, job_text_df, resume_text_df):
pos_jd_text = job_text[job_text["Req ID"]==req_id]
pos_resume_text = resume_text[resume_text["Req ID"]==req_id]
pos_jd_text.rename(columns = {'Req ID':'ID',
'Job Description':'text'}, inplace=True)
pos_jd_text.ID = req_id
pos_jd_text = pos_jd_text[['ID', 'text']]
pos_resume_text.rename(columns = {'Candidate ID':'ID',
'Resume Text':'text'}, inplace=True)
pos_resume_text = pos_resume_text[['ID', 'text']]
#append to same df
df = pos_jd_text.append(pos_resume_text)
df.set_index('ID', inplace=True)
# join words and vectorize
tokenizer = RegexpTokenizer(r'\w+')
df['text'] = df['text'].apply(lambda x: tokenizer.tokenize(x))
df['text'] = df['text'].apply(lambda x: ' '.join(x))
tfidf = TfidfVectorizer()
tfidf_embedding = tfidf.fit_transform(df['text'])
tfidf_embedding = pd.DataFrame(tfidf_embedding.toarray())
tfidf_embedding.insert(loc=0, column="ID", value=df.index)
return tfidf_embedding
### for position "e3625ad"
pos1_tfidf = GenerateTfidfEmbedding("e3625ad", job_text
, resume_text)
### for position "39ee3f"
pos2_tfidf = GenerateTfidfEmbedding("39ee3f", job_text
, resume_text)
### for position "45de815"
pos3_tfidf = GenerateTfidfEmbedding("45de815", job_text
, resume_text)
### for position "40a2c38"
pos4_tfidf = GenerateTfidfEmbedding("40a2c38", job_text
, resume_text)
### for position "63146c6"
pos5_tfidf = GenerateTfidfEmbedding("63146c6", job_text
, resume_text)
##### combining embedding with dummies ########
# list(set(list(resume_dummies.columns))-set(list(job_dummies.columns)))
#rename their index column
resume_dummies.rename(columns = {'CanID':'ID'}, inplace=True)
resume_dummies.drop(["ReqID"], inplace=True, axis=1)
job_dummies.rename(columns = {'ReqID':'ID'}, inplace=True)
all_dummies = pd.concat([resume_dummies, job_dummies])
### Combine with Count embedding ###
pos1_full_count = pd.DataFrame(pos1_embedding).merge(all_dummies
, how="left"
, on="ID")
pos1_full_count.drop_duplicates(subset="ID", inplace=True)
pos1_full_count = pos1_full_count.fillna(value=0)
pos2_full_count = pd.DataFrame(pos2_embedding).merge(all_dummies
, how="left"
, on="ID")
pos2_full_count.drop_duplicates(subset="ID", inplace=True)
pos2_full_count = pos2_full_count.fillna(value=0)
pos3_full_count = pd.DataFrame(pos3_embedding).merge(all_dummies
, how="left"
, on="ID")
pos3_full_count.drop_duplicates(subset="ID", inplace=True)
pos3_full_count = pos3_full_count.fillna(value=0)
pos4_full_count = pd.DataFrame(pos4_embedding).merge(all_dummies
, how="left"
, on="ID")
pos4_full_count.drop_duplicates(subset="ID", inplace=True)
pos4_full_count = pos4_full_count.fillna(value=0)
pos5_full_count = pd.DataFrame(pos5_embedding).merge(all_dummies
, how="left"
, on="ID")
pos5_full_count.drop_duplicates(subset="ID", inplace=True)
pos5_full_count = pos5_full_count.fillna(value=0)
### Combine with TFIDF embedding ###
pos1_full_tfidf = pos1_tfidf.merge(all_dummies
, how="left"
, on="ID")
pos1_full_tfidf.drop_duplicates(subset="ID", inplace=True)
pos1_full_tfidf = pos1_full_tfidf.fillna(value=0)
pos2_full_tfidf = pos2_tfidf.merge(all_dummies
, how="left"
, on="ID")
pos2_full_tfidf.drop_duplicates(subset="ID", inplace=True)
pos2_full_tfidf = pos2_full_tfidf.fillna(value=0)
pos3_full_tfidf = pos3_tfidf.merge(all_dummies
, how="left"
, on="ID")
pos3_full_tfidf.drop_duplicates(subset="ID", inplace=True)
pos3_full_tfidf = pos3_full_tfidf.fillna(value=0)
pos4_full_tfidf = pos4_tfidf.merge(all_dummies
, how="left"
, on="ID")
pos4_full_tfidf.drop_duplicates(subset="ID", inplace=True)
pos4_full_tfidf = pos4_full_tfidf.fillna(value=0)
pos5_full_tfidf = pos5_tfidf.merge(all_dummies
, how="left"
, on="ID")
pos5_full_tfidf.drop_duplicates(subset="ID", inplace=True)
pos5_full_tfidf = pos5_full_tfidf.fillna(value=0)
##### Run Cos Sim and rank the candidates #####
# define function for returning recommended resume ID's based on Job Description
def RecommendTopTen(jobID, full_df):
recommended_candidates = []
indices = pd.Series(full_df["ID"])
cos_sim = cosine_similarity(full_df.drop("ID", axis=1)
, full_df.drop("ID", axis=1))
idx = indices[indices == jobID].index[0]
score_series = pd.Series(cos_sim[idx]).sort_values(ascending=False)
top_10 = list(score_series.iloc[1:11].index)
for i in top_10:
recommended_candidates.append(list(indices)[i])
return recommended_candidates
#### position 1 with count vectors #####
ordered_candidate_list_pos1 = RecommendTopTen(jobID='e3625ad'
, full_df=pos1_full_count)
df_list = []
for i in ordered_candidate_list_pos1:
df_list.append(resume_text.loc[resume_text['Candidate ID'] == i])
# show result
job_text.loc[job_text["Req ID"] == "e3625ad"]
pd.concat(df_list)
#### position 1 with tfidf vectors #####
ordered_candidate_list_pos1 = RecommendTopTen(jobID='e3625ad'
, full_df=pos1_full_tfidf)
df_list = []
for i in ordered_candidate_list_pos1:
df_list.append(resume_text.loc[resume_text['Candidate ID'] == i])
# show result
job_text.loc[job_text["Req ID"] == "e3625ad"]
pd.concat(df_list)
#### position 2 with count vectors #####
ordered_candidate_list_pos2 = RecommendTopTen(jobID='39ee3f'
, full_df=pos2_full_count)
df_list = []
for i in ordered_candidate_list_pos2:
df_list.append(resume_text.loc[resume_text['Candidate ID'] == i])
# show result
job_text.loc[job_text["Req ID"] == "39ee3f"]
pd.concat(df_list)
#### position 2 with tfidf vectors #####
ordered_candidate_list_pos2 = RecommendTopTen(jobID='39ee3f'
, full_df=pos2_full_tfidf)
df_list = []
for i in ordered_candidate_list_pos2:
df_list.append(resume_text.loc[resume_text['Candidate ID'] == i])
# show result
job_text.loc[job_text["Req ID"] == "39ee3f"]
pd.concat(df_list)
#### position 3 with count vectors #####
ordered_candidate_list_pos3 = RecommendTopTen(jobID='45de815'
, full_df=pos3_full_count)
df_list = []
for i in ordered_candidate_list_pos3:
df_list.append(resume_text.loc[resume_text['Candidate ID'] == i])
# show result
job_text.loc[job_text["Req ID"] == "45de815"]
pd.concat(df_list)
#### position 3 with tfidf vectors #####
ordered_candidate_list_pos3 = RecommendTopTen(jobID='45de815'
, full_df=pos3_full_tfidf)
df_list = []
for i in ordered_candidate_list_pos3:
df_list.append(resume_text.loc[resume_text['Candidate ID'] == i])
# show result
job_text.loc[job_text["Req ID"] == "45de815"]
pd.concat(df_list)
#### position 4 with count vectors #####
ordered_candidate_list_pos4 = RecommendTopTen(jobID='40a2c38'
, full_df=pos4_full_count)
df_list = []
for i in ordered_candidate_list_pos4:
df_list.append(resume_text.loc[resume_text['Candidate ID'] == i])
# show result
job_text.loc[job_text["Req ID"] == "40a2c38"]
pd.concat(df_list)
#### position 4 with tfidf vectors #####
ordered_candidate_list_pos4 = RecommendTopTen(jobID='40a2c38'
, full_df=pos4_full_tfidf)
df_list = []
for i in ordered_candidate_list_pos4:
df_list.append(resume_text.loc[resume_text['Candidate ID'] == i])
# show result
job_text.loc[job_text["Req ID"] == "40a2c38"]
| pd.concat(df_list) | pandas.concat |
#!/usr/bin/env python
# coding: utf-8
# In[2]:
# !wget http://156.253.5.172/hotels.zip
# !unzip -q hotels.zip
# In[1]:
import pandas as pd
import numpy as np
# In[2]:
# pd.read_csv("train.csv").sample(frac=1).to_csv("train_random.csv", index=False)
# In[3]:
# df_train = pd.read_csv("hotels/train.csv")
df_test = pd.read_csv("test.csv")
# In[4]:
df_test.head()
# In[290]:
df_test["user"].unique().shape
# In[367]:
features_columns = ["channel", "is_mobile", "is_package", "n_rooms", "n_adults", "n_children", "hotel_category", "search_date", "checkIn_date", "checkOut_date"]
# In[427]:
channels = df_test["channel"].unique()
# destinations = df_test["destination"].unique()
cats = df_test["hotel_category"].unique()
# diffs = ["diff_{}".format(i) for i in range(10)]
data_columns = list(channels) + list(cats) + ["is_mobile", "is_package", "n_adults", "n_children", "n_rooms", "diff", "diff2", "weekday1", "weekday2", "weekday3", "month1", "month2", "month3", "quartet1", "quartet2", "quartet3", "single", "has_child", "rich", "common_hotels"]
data_columns_set = set(data_columns)
# In[426]:
def add_new_columns(_X):
checkIn_date = pd.to_datetime(_X["checkIn_date"])
checkOut_date = pd.to_datetime(_X["checkOut_date"])
search_date = pd.to_datetime(_X["search_date"])
_X["common_hotels"] = _X['hotel_category'].apply(lambda x: x in ["g7", "g13", "g15", "g32", "g19", "g43", "g6", "g49", "g16"])
_X["diff"] = (checkIn_date - search_date).dt.days.fillna(0).astype(int)
_X["diff2"] = (checkOut_date - checkIn_date).dt.days.fillna(0).astype(int)
_X["weekday1"] = checkIn_date.dt.weekday.fillna(0)
_X["weekday2"] = checkOut_date.dt.weekday.fillna(0)
_X["weekday3"] = search_date.dt.weekday.fillna(0)
_X["month1"] = checkIn_date.dt.month.fillna(0)
_X["month2"] = checkOut_date.dt.month.fillna(0)
_X["month3"] = search_date.dt.month.fillna(0)
_X["quartet1"] = checkIn_date.dt.quarter.fillna(0)
_X["quartet2"] = checkOut_date.dt.quarter.fillna(0)
_X["quartet3"] = search_date.dt.quarter.fillna(0)
_X["single"] = _X['n_adults'].apply(lambda x: x == 0)
_X["has_child"] = _X['n_children'].apply(lambda x: x > 0)
_X["rich"] = _X['n_rooms'].apply(lambda x: x > 1)
del _X["checkIn_date"]
del _X["checkOut_date"]
del _X["search_date"]
# In[399]:
from sklearn.naive_bayes import BernoulliNB, MultinomialNB
from sklearn.linear_model import SGDClassifier, PassiveAggressiveClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.neural_network import MLPClassifier
import xgboost as xgb
# model = BernoulliNB()
model = MultinomialNB()
# model = RandomForestClassifier()
# model = SGDClassifier(loss="modified_huber")
# model = DecisionTreeClassifier()
# model = LatentDirichletAllocation(n_components=2)
# model = MLPClassifier(verbose=True, max_iter=5)
# model = PassiveAggressiveClassifier()
# In[428]:
i = 0
for train_test_df in pd.read_csv("train_random.csv", chunksize=1024 * 8, iterator=True):
if i <= 80:
i += 1
continue
X_test = train_test_df[features_columns]
add_new_columns(X_test)
y_test = train_test_df["is_booking"]
X_test = pd.get_dummies(X_test, columns=["channel", "hotel_category"])
columns_set = set(X_test.columns)
to_be_added = list(data_columns_set - columns_set)
X_test = pd.concat(
[
X_test,
pd.DataFrame(
[[0 for i in range(len(to_be_added))]],
index=X_test.index,
columns=to_be_added
)
], axis=1
)
xg_test = xgb.DMatrix(X_test[data_columns], label=y_test)
break
# In[ ]:
# In[434]:
from imblearn.datasets import make_imbalance
from imblearn.over_sampling import SMOTE
import warnings
warnings.filterwarnings('ignore')
single = True
xg = True
for epoch in range(1 if single else 5):
i = 0
for train_df in | pd.read_csv("train_random.csv", chunksize=1024 * 64, iterator=True) | pandas.read_csv |
# pylint: disable=too-many-lines
import argparse
import calendar
import datetime
import logging
from dataclasses import dataclass
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple
import annofabapi
import more_itertools
import numpy
import pandas
from annofabapi.models import OrganizationMember, Project, ProjectMember
from annofabapi.utils import allow_404_error
from dataclasses_json import DataClassJsonMixin
from more_itertools import first_true
import annofabcli
from annofabcli import AnnofabApiFacade
from annofabcli.common.cli import AbstractCommandLineInterface, build_annofabapi_resource_and_login, get_list_from_args
from annofabcli.common.utils import _catch_exception, isoduration_to_hour
logger = logging.getLogger(__name__)
def _create_required_columns(df: pandas.DataFrame, prior_columns: List[Any]) -> List[str]:
remained_columns = list(df.columns.difference(prior_columns))
all_columns = prior_columns + remained_columns
return all_columns
@dataclass(frozen=True)
class User(DataClassJsonMixin):
account_id: str
user_id: str
username: str
biography: Optional[str]
@dataclass(frozen=True)
class LaborWorktime(DataClassJsonMixin):
"""
労務管理情報
"""
date: str
organization_id: str
organization_name: str
project_id: str
project_title: str
account_id: str
user_id: str
username: str
biography: Optional[str]
worktime_plan_hour: float
"""労務管理画面の予定作業時間"""
worktime_result_hour: float
"""労務管理画面の実績作業時間"""
worktime_monitored_hour: Optional[float]
"""AnnoFabの作業時間"""
working_description: Optional[str]
"""実績作業時間に対する備考"""
@dataclass(frozen=True)
class LaborAvailability(DataClassJsonMixin):
"""
労務管理情報
"""
date: str
account_id: str
user_id: str
username: str
availability_hour: float
@dataclass(frozen=True)
class SumLaborWorktime(DataClassJsonMixin):
"""
出力用の作業時間情報
"""
date: str
user_id: str
worktime_plan_hour: float
worktime_result_hour: float
class ListWorktimeByUserMain:
def __init__(self, service: annofabapi.Resource):
self.service = service
self.facade = AnnofabApiFacade(service)
DATE_FORMAT = "%Y-%m-%d"
MONTH_FORMAT = "%Y-%m"
_dict_account_statistics: Dict[str, List[Dict[str, Any]]] = {}
"""project_idごとの統計情報dict"""
@allow_404_error
def _get_account_statistics(self, project_id) -> Optional[List[Any]]:
account_statistics = self.service.wrapper.get_account_statistics(project_id)
return account_statistics
def _get_worktime_monitored_hour_from_project_id(
self, project_id: str, account_id: str, date: str
) -> Optional[float]:
account_statistics = self._dict_account_statistics.get(project_id)
if account_statistics is None:
result = self._get_account_statistics(project_id)
if result is not None:
account_statistics = result
else:
logger.warning(f"project_id={project_id}: プロジェクトにアクセスできないため、アカウント統計情報を取得できませんでした。")
account_statistics = []
self._dict_account_statistics[project_id] = account_statistics
return self._get_worktime_monitored_hour(account_statistics, account_id=account_id, date=date)
@staticmethod
def _get_worktime_monitored_hour(
account_statistics: List[Dict[str, Any]], account_id: str, date: str
) -> Optional[float]:
"""
AnnoFabの作業時間を取得する。
"""
stat = first_true(account_statistics, pred=lambda e: e["account_id"] == account_id)
if stat is None:
return None
histories = stat["histories"]
hist = first_true(histories, pred=lambda e: e["date"] == date)
if hist is None:
return None
return isoduration_to_hour(hist["worktime"])
@staticmethod
def create_required_columns(df, prior_columns):
remained_columns = list(df.columns.difference(prior_columns))
all_columns = prior_columns + remained_columns
return all_columns
@staticmethod
def get_member_from_user_id(
organization_member_list: List[OrganizationMember], user_id: str
) -> Optional[OrganizationMember]:
member = more_itertools.first_true(organization_member_list, pred=lambda e: e["user_id"] == user_id)
return member
@staticmethod
def get_project_title(project_list: List[Project], project_id: str) -> str:
project = more_itertools.first_true(project_list, pred=lambda e: e["project_id"] == project_id)
if project is not None:
return project["title"]
else:
return ""
@staticmethod
def get_worktime_hour(working_time_by_user: Optional[Dict[str, Any]], key: str) -> float:
if working_time_by_user is None:
return 0
value = working_time_by_user.get(key)
if value is None:
return 0
else:
return value / 3600 / 1000
@staticmethod
def _get_working_description(working_time_by_user: Optional[Dict[str, Any]]) -> Optional[str]:
if working_time_by_user is None:
return None
return working_time_by_user.get("description")
@staticmethod
def _get_labor_worktime(
labor: Dict[str, Any],
member: Optional[ProjectMember],
project_title: str,
organization_name: str,
worktime_monitored_hour: Optional[float],
) -> LaborWorktime:
new_labor = LaborWorktime(
date=labor["date"],
organization_id=labor["organization_id"],
organization_name=organization_name,
project_id=labor["project_id"],
project_title=project_title,
account_id=labor["account_id"],
user_id=member["user_id"] if member is not None else labor["account_id"],
username=member["username"] if member is not None else labor["account_id"],
biography=member["biography"] if member is not None else None,
worktime_plan_hour=labor["plan_worktime"] if labor["plan_worktime"] is not None else 0,
worktime_result_hour=labor["actual_worktime"] if labor["actual_worktime"] is not None else 0,
working_description=labor["working_description"],
worktime_monitored_hour=worktime_monitored_hour,
)
return new_labor
def get_labor_availability_list_dict(
self,
user_list: List[User],
start_date: str,
end_date: str,
) -> Dict[str, List[LaborAvailability]]:
"""
予定稼働時間を取得する
Args:
user_list:
start_date:
end_date:
Returns:
"""
labor_availability_dict = {}
for user in user_list:
# 予定稼働時間を取得するには、特殊な組織IDを渡す
labor_list = self.service.wrapper.get_labor_control_availability(
from_date=start_date,
to_date=end_date,
account_id=user.account_id,
)
new_labor_list = []
for labor in labor_list:
new_labor = LaborAvailability(
date=labor["date"],
account_id=labor["account_id"],
user_id=user.user_id,
username=user.username,
availability_hour=labor["availability"] if labor["availability"] is not None else 0,
)
new_labor_list.append(new_labor)
labor_availability_dict[user.user_id] = new_labor_list
return labor_availability_dict
def get_labor_list_from_project_id(
self,
project_id: str,
account_id_list: Optional[List[str]],
start_date: Optional[str],
end_date: Optional[str],
add_monitored_worktime: bool = False,
) -> List[LaborWorktime]:
organization, _ = self.service.api.get_organization_of_project(project_id)
organization_name = organization["organization_name"]
if account_id_list is None:
logger.debug(f"project_id={project_id}の、すべての労務管理情報を取得しています。")
labor_list = self.service.wrapper.get_labor_control_worktime(
project_id=project_id,
organization_id=organization["organization_id"],
from_date=start_date,
to_date=end_date,
)
else:
labor_list = []
for account_id in account_id_list:
logger.debug(f"project_id={project_id}の、ユーザ{len(account_id_list)}件分の労務管理情報を取得しています。")
tmp_labor_list = self.service.wrapper.get_labor_control_worktime(
project_id=project_id, from_date=start_date, to_date=end_date, account_id=account_id
)
labor_list.extend(tmp_labor_list)
project_title = self.service.api.get_project(project_id)[0]["title"]
labor_list = [
e
for e in labor_list
if (e["actual_worktime"] is not None and e["actual_worktime"] > 0)
or (e["plan_worktime"] is not None and e["plan_worktime"] > 0)
]
logger.info(f"'{project_title}'プロジェクト('{project_id}')の労務管理情報の件数: {len(labor_list)}")
new_labor_list = []
for labor in labor_list:
# 個人に紐付かないデータの場合
if labor["account_id"] is None:
continue
member = self.facade.get_project_member_from_account_id(labor["project_id"], labor["account_id"])
if add_monitored_worktime:
try:
worktime_monitored_hour = self._get_worktime_monitored_hour_from_project_id(
project_id=project_id, account_id=labor["account_id"], date=labor["date"]
)
except Exception: # pylint: disable=broad-except
logger.warning(f"project_id={project_id}: 計測作業時間を取得できませんでした。", exc_info=True)
worktime_monitored_hour = None
else:
worktime_monitored_hour = None
new_labor = self._get_labor_worktime(
labor,
member=member,
project_title=project_title,
organization_name=organization_name,
worktime_monitored_hour=worktime_monitored_hour,
)
new_labor_list.append(new_labor)
return new_labor_list
def get_labor_list_from_organization_name(
self,
organization_name: str,
account_id_list: Optional[List[str]],
start_date: Optional[str],
end_date: Optional[str],
add_monitored_worktime: bool = False,
) -> List[LaborWorktime]:
organization, _ = self.service.api.get_organization(organization_name)
organization_id = organization["organization_id"]
if account_id_list is None:
logger.debug(f"organization_name={organization_name}の、すべての労務管理情報を取得しています。")
labor_list = self.service.wrapper.get_labor_control_worktime(
organization_id=organization_id, from_date=start_date, to_date=end_date
)
else:
labor_list = []
logger.debug(f"organization_name={organization_name}の、ユーザ{len(account_id_list)}件分の労務管理情報を取得しています。")
for account_id in account_id_list:
tmp_labor_list = self.service.wrapper.get_labor_control_worktime(
organization_id=organization_id, from_date=start_date, to_date=end_date, account_id=account_id
)
labor_list.extend(tmp_labor_list)
labor_list = [
e
for e in labor_list
if (e["actual_worktime"] is not None and e["actual_worktime"] > 0)
or (e["plan_worktime"] is not None and e["plan_worktime"] > 0)
]
logger.info(f"'{organization_name}'組織の労務管理情報の件数: {len(labor_list)}")
project_list = self.service.wrapper.get_all_projects_of_organization(organization_name)
new_labor_list = []
for labor in labor_list:
try:
member = self.facade.get_project_member_from_account_id(labor["project_id"], labor["account_id"])
except Exception: # pylint: disable=broad-except
logger.warning(f"project_id={labor['project_id']}: メンバ一覧を取得できませんでした。", exc_info=True)
member = None
project_title = self.get_project_title(project_list, labor["project_id"])
if add_monitored_worktime:
try:
worktime_monitored_hour = self._get_worktime_monitored_hour_from_project_id(
project_id=labor["project_id"], account_id=labor["account_id"], date=labor["date"]
)
except Exception: # pylint: disable=broad-except
logger.warning(f"project_id={labor['project_id']}: 計測作業時間を取得できませんでした。", exc_info=True)
worktime_monitored_hour = None
else:
worktime_monitored_hour = None
new_labor = self._get_labor_worktime(
labor,
member=member,
project_title=project_title,
organization_name=organization_name,
worktime_monitored_hour=worktime_monitored_hour,
)
new_labor_list.append(new_labor)
return new_labor_list
@staticmethod
def get_sum_worktime_list(
labor_list: List[LaborWorktime], user_id: str, start_date: str, end_date: str
) -> List[SumLaborWorktime]:
sum_labor_list = []
for date in pandas.date_range(start=start_date, end=end_date):
str_date = date.strftime(ListWorktimeByUserMain.DATE_FORMAT)
filtered_list = [e for e in labor_list if e.user_id == user_id and e.date == str_date]
worktime_plan_hour = sum([e.worktime_plan_hour for e in filtered_list])
worktime_result_hour = sum([e.worktime_result_hour for e in filtered_list])
labor = SumLaborWorktime(
user_id=user_id,
date=date,
worktime_plan_hour=worktime_plan_hour,
worktime_result_hour=worktime_result_hour,
)
sum_labor_list.append(labor)
return sum_labor_list
@staticmethod
@_catch_exception
def write_sum_worktime_list(sum_worktime_df: pandas.DataFrame, output_dir: Path):
sum_worktime_df.round(3).to_csv(str(output_dir / "ユーザごとの作業時間.csv"), encoding="utf_8_sig", index=False)
@staticmethod
@_catch_exception
def write_sum_plan_worktime_list(sum_worktime_df: pandas.DataFrame, output_dir: Path) -> None:
"""
出勤予定かどうかを判断するため、作業予定時間が"0"のときは"☓", そうでないときは"○"で出力する
Args:
sum_worktime_df:
output_dir:
"""
def create_mark(value) -> str:
if value == 0:
return "×"
else:
return "○"
def is_plan_column(c) -> bool:
c1, c2 = c
if c1 in ["date", "dayofweek"]:
return False
return c2 == "作業予定"
username_list = [e[0] for e in sum_worktime_df.columns if is_plan_column(e)]
for username in username_list:
# SettingWithCopyWarning を避けるため、暫定的に値をコピーする
sum_worktime_df[(username, "作業予定_記号")] = sum_worktime_df[(username, "作業予定")].map(create_mark)
output_columns = [("date", ""), ("dayofweek", "")] + [(e, "作業予定_記号") for e in username_list]
sum_worktime_df[output_columns].to_csv(str(output_dir / "ユーザごとの作業予定_記号.csv"), encoding="utf_8_sig", index=False)
@staticmethod
@_catch_exception
def write_worktime_list(worktime_df: pandas.DataFrame, output_dir: Path, add_monitored_worktime: bool = False):
worktime_df = worktime_df.rename(
columns={
"worktime_plan_hour": "作業予定時間",
"worktime_result_hour": "作業実績時間",
"worktime_monitored_hour": "計測時間",
"working_description": "備考",
}
)
columns = [
"date",
"organization_name",
"project_title",
"project_id",
"username",
"biography",
"user_id",
"作業予定時間",
"作業実績時間",
"計測時間",
"備考",
]
if not add_monitored_worktime:
columns.remove("計測時間")
worktime_df[columns].round(3).to_csv(str(output_dir / "作業時間の詳細一覧.csv"), encoding="utf_8_sig", index=False)
@staticmethod
@_catch_exception
def write_worktime_per_user_date(worktime_df_per_date_user: pandas.DataFrame, output_dir: Path):
add_availability = "availability_hour" in worktime_df_per_date_user.columns
target_renamed_columns = {"worktime_plan_hour": "作業予定時間", "worktime_result_hour": "作業実績時間"}
if add_availability:
target_renamed_columns.update({"availability_hour": "予定稼働時間"})
df = worktime_df_per_date_user.rename(columns=target_renamed_columns)
columns = [
"date",
"user_id",
"username",
"biography",
"予定稼働時間",
"作業予定時間",
"作業実績時間",
]
if not add_availability:
columns.remove("予定稼働時間")
df[columns].round(3).to_csv(str(output_dir / "日ごとの作業時間の一覧.csv"), encoding="utf_8_sig", index=False)
@staticmethod
@_catch_exception
def write_worktime_per_user(worktime_df_per_user: pandas.DataFrame, output_dir: Path, add_availability: bool):
target_renamed_columns = {
"worktime_plan_hour": "作業予定時間",
"worktime_result_hour": "作業実績時間",
"result_working_days": "実績稼働日数",
}
if add_availability:
target_renamed_columns.update({"availability_hour": "予定稼働時間"})
target_renamed_columns.update({"availability_days": "予定稼働日数"})
df = worktime_df_per_user.rename(columns=target_renamed_columns)
columns = [
"user_id",
"username",
"biography",
"予定稼働時間",
"作業予定時間",
"作業実績時間",
"予定稼働日数",
"実績稼働日数",
]
if not add_availability:
columns.remove("予定稼働時間")
columns.remove("予定稼働日数")
df[columns].round(3).to_csv(str(output_dir / "summary.csv"), encoding="utf_8_sig", index=False)
def get_organization_member_list(
self, organization_name_list: Optional[List[str]], project_id_list: Optional[List[str]]
) -> List[OrganizationMember]:
member_list: List[OrganizationMember] = []
if project_id_list is not None:
tmp_organization_name_list = []
for project_id in project_id_list:
organization, _ = self.service.api.get_organization_of_project(project_id)
tmp_organization_name_list.append(organization["organization_name"])
organization_name_list = list(set(tmp_organization_name_list))
if organization_name_list is not None:
for organization_name in organization_name_list:
member_list.extend(self.service.wrapper.get_all_organization_members(organization_name))
return member_list
def _get_user_from_organization_name_list(self, organization_name_list: List[str], user_id: str) -> Optional[User]:
for organization_name in organization_name_list:
organization_member = self.service.wrapper.get_organization_member_or_none(organization_name, user_id)
if organization_member is not None:
return User(
user_id=organization_member["user_id"],
account_id=organization_member["account_id"],
username=organization_member["username"],
biography=organization_member["biography"],
)
return None
def _get_user_from_project_id_list(self, project_id_list: List[str], user_id: str) -> Optional[User]:
for project_id in project_id_list:
project_member = self.service.wrapper.get_project_member_or_none(project_id, user_id)
if project_member is not None:
return User(
user_id=project_member["user_id"],
account_id=project_member["account_id"],
username=project_member["username"],
biography=project_member["biography"],
)
return None
def get_user_list(
self,
labor_list: List[LaborWorktime],
organization_name_list: Optional[List[str]] = None,
project_id_list: Optional[List[str]] = None,
user_id_list: Optional[List[str]] = None,
) -> List[User]:
"""
summary.csvに出力するユーザ一覧を取得する。
Args:
labor_list:
organization_name_list:
project_id_list:
user_id_list:
Returns:
ユーザ一覧
"""
df = (
pandas.DataFrame(labor_list, columns=["account_id", "user_id", "username", "biography"])
.drop_duplicates()
.set_index("user_id")
)
user_list: List[User] = []
if user_id_list is None:
for user_id, row in df.iterrows():
user_list.append(
User(
user_id=str(user_id),
account_id=row["account_id"],
username=row["username"],
biography=row["biography"],
)
)
return user_list
if organization_name_list is not None:
for user_id in user_id_list:
if user_id in df.index:
row = df.loc[user_id]
user_list.append(
User(
user_id=str(user_id),
account_id=row["account_id"],
username=row["username"],
biography=row["biography"],
)
)
continue
user = self._get_user_from_organization_name_list(organization_name_list, user_id)
if user is not None:
user_list.append(user)
else:
logger.warning(f"user_id={user_id} のユーザは、組織メンバに存在しませんでした。")
if project_id_list is not None:
for user_id in user_id_list:
if user_id in df.index:
row = df.loc[user_id]
user_list.append(
User(
user_id=str(user_id),
account_id=row["account_id"],
username=row["username"],
biography=row["biography"],
)
)
continue
user = self._get_user_from_project_id_list(project_id_list, user_id)
if user is not None:
user_list.append(user)
else:
logger.warning(f"user_id={user_id} のユーザは、プロジェクトメンバに存在しませんでした。")
return user_list
@staticmethod
def get_availability_list(
labor_availability_list: List[LaborAvailability],
start_date: str,
end_date: str,
) -> List[Optional[float]]:
def get_availability_hour(str_date: str) -> Optional[float]:
labor = more_itertools.first_true(labor_availability_list, pred=lambda e: e.date == str_date)
if labor is not None:
return labor.availability_hour
else:
return None
availability_list: List[Optional[float]] = []
for date in pandas.date_range(start=start_date, end=end_date):
str_date = date.strftime(ListWorktimeByUserMain.DATE_FORMAT)
availability_list.append(get_availability_hour(str_date))
return availability_list
def get_account_id_list_from_project_id(self, user_id_list: List[str], project_id_list: List[str]) -> List[str]:
"""
project_idのリストから、対象ユーザのaccount_id を取得する。
Args:
user_id_list:
organization_name_list:
Returns:
account_idのリスト
"""
account_id_list = []
not_exists_user_id_list = []
for user_id in user_id_list:
member_exists = False
for project_id in project_id_list:
member = self.facade.get_project_member_from_user_id(project_id, user_id)
if member is not None:
account_id_list.append(member["account_id"])
member_exists = True
break
if not member_exists:
not_exists_user_id_list.append(user_id)
if len(not_exists_user_id_list) == 0:
return account_id_list
else:
raise ValueError(f"以下のユーザは、指定されたプロジェクトのプロジェクトメンバではありませんでした。\n{not_exists_user_id_list}")
def get_account_id_list_from_organization_name(
self, user_id_list: List[str], organization_name_list: List[str]
) -> List[str]:
"""
組織名のリストから、対象ユーザのaccount_id を取得する。
Args:
user_id_list:
organization_name_list:
Returns:
account_idのリスト
"""
def _get_account_id(fuser_id: str) -> Optional[str]:
# 脱退した可能性のあるユーザの組織メンバ情報を取得する
for organization_name in organization_name_list:
member = self.service.wrapper.get_organization_member_or_none(organization_name, fuser_id)
if member is not None:
return member["account_id"]
return None
# 組織メンバの一覧をする(ただし脱退したメンバは取得できない)
all_organization_member_list = []
for organization_name in organization_name_list:
all_organization_member_list.extend(self.service.wrapper.get_all_organization_members(organization_name))
user_id_dict = {e["user_id"]: e["account_id"] for e in all_organization_member_list}
account_id_list = []
not_exists_user_id_list = []
for user_id in user_id_list:
if user_id in user_id_dict:
account_id_list.append(user_id_dict[user_id])
else:
account_id = _get_account_id(user_id)
if account_id is not None:
account_id_list.append(account_id)
else:
not_exists_user_id_list.append(user_id)
if len(not_exists_user_id_list) == 0:
return account_id_list
else:
raise ValueError(f"以下のユーザは、指定された組織の組織メンバではありませんでした。\n{not_exists_user_id_list}")
def get_labor_list(
self,
organization_name_list: Optional[List[str]],
project_id_list: Optional[List[str]],
user_id_list: Optional[List[str]],
start_date: Optional[str],
end_date: Optional[str],
add_monitored_worktime: bool = False,
) -> List[LaborWorktime]:
labor_list: List[LaborWorktime] = []
account_id_list: Optional[List[str]] = None
logger.info(f"労務管理情報を取得します。")
if project_id_list is not None:
if user_id_list is not None:
account_id_list = self.get_account_id_list_from_project_id(
user_id_list, project_id_list=project_id_list
)
for project_id in project_id_list:
labor_list.extend(
self.get_labor_list_from_project_id(
project_id,
account_id_list=account_id_list,
start_date=start_date,
end_date=end_date,
add_monitored_worktime=add_monitored_worktime,
)
)
elif organization_name_list is not None:
if user_id_list is not None:
account_id_list = self.get_account_id_list_from_organization_name(
user_id_list, organization_name_list=organization_name_list
)
for organization_name in organization_name_list:
labor_list.extend(
self.get_labor_list_from_organization_name(
organization_name,
account_id_list=account_id_list,
start_date=start_date,
end_date=end_date,
add_monitored_worktime=add_monitored_worktime,
)
)
else:
raise RuntimeError(f"organization_name_list or project_id_list を指定してください。")
# 集計対象ユーザで絞り込む
if user_id_list is not None:
return [e for e in labor_list if e.user_id in user_id_list]
else:
return labor_list
@staticmethod
def create_sum_worktime_df(
labor_list: List[LaborWorktime],
user_list: List[User],
start_date: str,
end_date: str,
labor_availability_list_dict: Optional[Dict[str, List[LaborAvailability]]] = None,
):
reform_dict = {
("date", ""): [
e.strftime(ListWorktimeByUserMain.DATE_FORMAT)
for e in pandas.date_range(start=start_date, end=end_date)
],
("dayofweek", ""): [e.strftime("%a") for e in pandas.date_range(start=start_date, end=end_date)],
}
username_list = []
for user in user_list:
sum_worktime_list = ListWorktimeByUserMain.get_sum_worktime_list(
labor_list, user_id=user.user_id, start_date=start_date, end_date=end_date
)
username_list.append(user.username)
reform_dict.update(
{
(user.username, "作業予定"): [e.worktime_plan_hour for e in sum_worktime_list],
(user.username, "作業実績"): [e.worktime_result_hour for e in sum_worktime_list],
}
)
if labor_availability_list_dict is not None:
labor_availability_list = labor_availability_list_dict.get(user.user_id, [])
reform_dict.update(
{
(user.username, "予定稼働"): ListWorktimeByUserMain.get_availability_list(
labor_availability_list, start_date, end_date
)
}
)
key_list = ["作業予定", "作業実績", "予定稼働"] if labor_availability_list_dict else ["作業予定", "作業実績"]
for key in key_list:
data = numpy.array([reform_dict[(username, key)] for username in username_list], dtype=float)
data = numpy.nan_to_num(data)
reform_dict[("合計", key)] = list(numpy.sum(data, axis=0)) # type: ignore
columns = (
[("date", ""), ("dayofweek", "")]
+ [("合計", key) for key in key_list]
+ [(username, key) for username in username_list for key in key_list]
)
sum_worktime_df = | pandas.DataFrame(reform_dict, columns=columns) | pandas.DataFrame |
from customer_tools import Customer
import numpy as np
import pandas as pd
def arrival_time_from_hour_distribution(df, date):
""" This function takes as input the average number of customer
entering the shop per hour and return a randomly sampled
arrival time per minute to simulate arrival time of customers
in the shop for one day"""
#Get min and max hours
hour_min = df.index.min()
hour_max = df.index.max()+1
hours = np.arange(hour_min,hour_max,1)
minutes = np.arange(0,60,1)
list_entrance = []
for h in hours:
#Get the number of people per hour
nb_people = df.loc[df.index == h ,'entrance count'].values[0]
#Randomly distribute these expected arrivals as minutes
minutes_entrance = np.random.choice(minutes,nb_people,replace=True)
minutes_in_h=[]
for m in minutes_entrance :
time = f'{h}:{m}:00'
list_entrance.append(time)
#Create dataframe with customer id and time arrival
day_customer = {'time':list_entrance}
df_time = pd.DataFrame(day_customer)
df_time = pd.to_timedelta(df_time['time'], unit='s').copy()
df_time = df_time.sort_values(ascending=True)
df_time = df_time.reset_index()
df_time.drop(columns=['index'],inplace=True)
df_time['date'] = date
df_time['date'] = pd.to_datetime(df_time['date'])
df_time['time'] = df_time['date'] + df_time['time']
d = {'time':df_time['time'],'customer_id':df_time.index+1}
df_time = pd.DataFrame(d)
return df_time
def simulate_customers(df_entry):
"""
This function takes the hour/minute arrival time and the id number of
customers and will simulate the behaviour in the shop for each customer
and return a dataframe with minute by minute location for each customer
"""
df_simulation = pd.DataFrame()
#Simulate behaviour in the shop for each customer id
for id in df_entry['customer_id']:
df_customer = df_entry.loc[df_entry['customer_id']==id,:].copy()
#Create a new customer class
new_customer = Customer(id)
#Calling all generators to have access to history
all_states = list(new_customer.gen)
#Getting the entry time in good format for date_range
entry_time = df_customer.loc[df_customer['customer_id']==id,'time'].iloc[0]
#Number of minute to add to entry time to have leaving time
minutes_to_add = new_customer.nb_state-1
leaving_time = | pd.to_datetime(df_entry.loc[df_entry['customer_id']==id,'time']) | pandas.to_datetime |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import datetime
import pandas as pd
from dateutil.relativedelta import relativedelta
from collections import Iterable
ALLOWED_TIME_COLUMN_TYPES = [
pd.Timestamp,
pd.DatetimeIndex,
datetime.datetime,
datetime.date,
]
def is_datetime_like(x):
"""Function that checks if a data frame column x is of a datetime type."""
return any(isinstance(x, col_type) for col_type in ALLOWED_TIME_COLUMN_TYPES)
def get_datetime_col(df, datetime_colname):
"""
Helper function for extracting the datetime column as datetime type from
a data frame.
Args:
df: pandas DataFrame containing the column to convert
datetime_colname: name of the column to be converted
Returns:
pandas.Series: converted column
Raises:
Exception: if datetime_colname does not exist in the dateframe df.
Exception: if datetime_colname cannot be converted to datetime type.
"""
if datetime_colname in df.index.names:
datetime_col = df.index.get_level_values(datetime_colname)
elif datetime_colname in df.columns:
datetime_col = df[datetime_colname]
else:
raise Exception("Column or index {0} does not exist in the data " "frame".format(datetime_colname))
if not is_datetime_like(datetime_col):
datetime_col = pd.to_datetime(df[datetime_colname])
return datetime_col
def get_month_day_range(date):
"""
Returns the first date and last date of the month of the given date.
"""
# Replace the date in the original timestamp with day 1
first_day = date + relativedelta(day=1)
# Replace the date in the original timestamp with day 1
# Add a month to get to the first day of the next month
# Subtract one day to get the last day of the current month
last_day = date + relativedelta(day=1, months=1, days=-1, hours=23)
return first_day, last_day
def split_train_validation(df, fct_horizon, datetime_colname):
"""
Splits the input dataframe into train and validate folds based on the
forecast creation time (fct) and forecast horizon specified by fct_horizon.
Args:
df: The input data frame to split.
fct_horizon: list of tuples in the format of
(fct, (forecast_horizon_start, forecast_horizon_end))
datetime_colname: name of the datetime column
Note: df[datetime_colname] needs to be a datetime type.
"""
i_round = 0
for fct, horizon in fct_horizon:
i_round += 1
train = df.loc[df[datetime_colname] < fct].copy()
validation = df.loc[(df[datetime_colname] >= horizon[0]) & (df[datetime_colname] <= horizon[1]),].copy()
yield i_round, train, validation
def add_datetime(input_datetime, unit, add_count):
"""
Function to add a specified units of time (years, months, weeks, days,
hours, or minutes) to the input datetime.
Args:
input_datetime: datatime to be added to
unit: unit of time, valid values: 'year', 'month', 'week',
'day', 'hour', 'minute'.
add_count: number of units to add
Returns:
New datetime after adding the time difference to input datetime.
Raises:
Exception: if invalid unit is provided. Valid units are:
'year', 'month', 'week', 'day', 'hour', 'minute'.
"""
if unit == "Y":
new_datetime = input_datetime + relativedelta(years=add_count)
elif unit == "M":
new_datetime = input_datetime + relativedelta(months=add_count)
elif unit == "W":
new_datetime = input_datetime + relativedelta(weeks=add_count)
elif unit == "D":
new_datetime = input_datetime + relativedelta(days=add_count)
elif unit == "h":
new_datetime = input_datetime + relativedelta(hours=add_count)
elif unit == "m":
new_datetime = input_datetime + relativedelta(minutes=add_count)
else:
raise Exception(
"Invalid backtest step unit, {}, provided. Valid " "step units are Y, M, W, D, h, " "and m".format(unit)
)
return new_datetime
def convert_to_tsdf(input_df, time_col_name, time_format):
"""
Convert a time column in a data frame to monotonically increasing time
index.
Args:
input_df(pandas.DataFrame): Input data frame to convert.
time_col_name(str): Name of the time column to use as index.
time_format(str): Format of the time column.
Returns:
pandas.DataFrame: A new data frame with the time column of the input
data frame set as monotonically increasing index.
"""
output_df = input_df.copy()
if not is_datetime_like(output_df[time_col_name]):
output_df[time_col_name] = pd.to_datetime(output_df[time_col_name], format=time_format)
output_df.set_index(time_col_name, inplace=True)
if not output_df.index.is_monotonic:
output_df.sort_index(inplace=True)
return output_df
def is_iterable_but_not_string(obj):
"""
Determine if an object has iterable, list-like properties.
Importantly, this functions *does not* consider a string
to be list-like, even though Python strings are iterable.
"""
return isinstance(obj, Iterable) and not isinstance(obj, str)
def get_offset_by_frequency(frequency):
frequency_to_offset_map = {
"B": pd.offsets.BDay(),
"C": pd.offsets.CDay(),
"W": pd.offsets.Week(),
"WOM": pd.offsets.WeekOfMonth(),
"LWOM": pd.offsets.LastWeekOfMonth(),
"M": pd.offsets.MonthEnd(),
"MS": pd.offsets.MonthBegin(),
"BM": pd.offsets.BMonthEnd(),
"BMS": pd.offsets.BMonthBegin(),
"CBM": pd.offsets.CBMonthEnd(),
"CBMS": pd.offsets.CBMonthBegin(),
"SM": pd.offsets.SemiMonthEnd(),
"SMS": pd.offsets.SemiMonthBegin(),
"Q": pd.offsets.QuarterEnd(),
"QS": pd.offsets.QuarterBegin(),
"BQ": pd.offsets.BQuarterEnd(),
"BQS": pd.offsets.BQuarterBegin(),
"REQ": pd.offsets.FY5253Quarter(),
"A": pd.offsets.YearEnd(),
"AS": pd.offsets.YearBegin(),
"BYS": pd.offsets.YearBegin(),
"BA": pd.offsets.BYearEnd(),
"BAS": pd.offsets.BYearBegin(),
"RE": pd.offsets.FY5253(),
"BH": pd.offsets.BusinessHour(),
"CBH": pd.offsets.CustomBusinessHour(),
"D": pd.offsets.Day(),
"H": | pd.offsets.Hour() | pandas.offsets.Hour |
import dash
import dash_core_components as dcc
import dash_html_components as html
import plotly.graph_objs as go
import os
import numpy as np
import pandas as pd
pd.options.mode.chained_assignment = None # default='warn'
import scipy.spatial.distance as dist
import scipy.cluster.hierarchy as sch
# fn = os.path.join('static/gcorr/opt_corr.tsv.gz')
# fn = os.path.join('static/gcorr/traits.tsv.gz')
def initialize():
# Minimum z-score to display data for.
min_z = 3
# Minimum number of cases for binary phenotypes
min_cases = 250
# Height in px of plot
plot_height = 800
# Width in px of plot
plot_width = 1200
# Maximum number of phenotypes to display
max_phenos = 100
fn = os.path.join('/biobankengine/app/static/gcorr/opt_corr.tsv.gz')
data = pd.read_table(fn, index_col=0)
t = data.copy(deep=True)
t.index = ['_'.join(list(reversed(x.split('_')))) for x in t.index]
for columns in [['p1_code', 'p2_code'], ['tau1', 'tau2'], ['p1', 'p2'],
['p1_num_cases', 'p2_num_cases']]:
a = list(t[columns[0]])
b = list(t[columns[1]])
t.loc[:, columns[1]] = a
t.loc[:, columns[0]] = b
data = pd.concat([data, t])
fn = os.path.join('/biobankengine/app/static/gcorr/traits.tsv.gz')
phenos = pd.read_table(fn, index_col=0)
phenos = phenos.loc[sorted(list(set(data.p1_code) | set(data.p2_code)))]
phenos['phenotype'] = (
phenos['GBE_short_name'] + ' (' +
| pd.Series(phenos.index, index=phenos.index) | pandas.Series |
# python make_gini_table_hesitancy_by_income.py MSA_NAME vaccination_ratio vaccination_time rel_to NUM_GROUPS ACCEPTANCE_SCENARIO
# python make_gini_table_hesitancy_by_income.py Atlanta 0.1 31 Baseline 5 real
import setproctitle
setproctitle.setproctitle("covid-19-vac@chenlin")
import sys
import os
import datetime
import pandas as pd
import numpy as np
import constants
import functions
import pdb
############################################################
# Main variable settings
root = '/data/chenlin/COVID-19/Data'
# Simulation times and random seeds
NUM_SEEDS = 30
MSA_NAME = sys.argv[1]
MSA_NAME_FULL = constants.MSA_NAME_FULL_DICT[MSA_NAME]
print('MSA_NAME:',MSA_NAME)
print('MSA_NAME_FULL:',MSA_NAME_FULL)
# Vaccination_Ratio
VACCINATION_RATIO = float(sys.argv[2])
print('VACCINATION_RATIO: ', VACCINATION_RATIO)
# Vaccination Time
VACCINATION_TIME_STR = sys.argv[3]
print('VACCINATION_TIME:',VACCINATION_TIME_STR)
VACCINATION_TIME = float(VACCINATION_TIME_STR)
# Relative to which variable
REL_TO = sys.argv[4]
print('Relative to: ', REL_TO)
demo_policy_list = ['Age_Flood', 'Income_Flood', 'JUE_EW_Flood']
# Number of groups
NUM_GROUPS = int(sys.argv[5]); print('NUM_GROUPS: ',NUM_GROUPS)
if(REL_TO=='No_Vaccination'):
policy_list = ['No_Vaccination','Baseline', 'Age_Flood', 'Income_Flood', 'JUE_EW_Flood']
elif(REL_TO=='Baseline'):
policy_list = ['Baseline', 'No_Vaccination','Age_Flood', 'Income_Flood', 'JUE_EW_Flood']
else:
print('Invalid REL_TO.')
print('policy list:', policy_list)
# Vaccine acceptance scenario: real, cf1, cf2
ACCEPTANCE_SCENARIO = sys.argv[6]
print('Vaccine acceptance scenario: ', ACCEPTANCE_SCENARIO)
############################################################
# Functions
# Analyze results and produce graphs: All policies
def output_result(cbg_table, demo_feat, policy_list, num_groups, print_result=True, rel_to=REL_TO):
results = {}
for policy in policy_list:
exec("final_deaths_rate_%s_total = cbg_table['Final_Deaths_%s'].sum()/cbg_table['Sum'].sum()" % (policy.lower(),policy))
cbg_table['Final_Deaths_' + policy] = eval('avg_final_deaths_' + policy.lower())
exec("%s = np.zeros(num_groups)" % ('final_deaths_rate_'+ policy.lower()))
deaths_total_abs = eval('final_deaths_rate_%s_total'%(policy.lower()))
deaths_total_abs = np.round(deaths_total_abs,6)
for i in range(num_groups):
eval('final_deaths_rate_'+ policy.lower())[i] = cbg_table[cbg_table[demo_feat + '_Quantile']==i]['Final_Deaths_' + policy].sum()
eval('final_deaths_rate_'+ policy.lower())[i] /= cbg_table[cbg_table[demo_feat + '_Quantile']==i]['Sum'].sum()
deaths_gini_abs = functions.gini(eval('final_deaths_rate_'+ policy.lower()))
deaths_gini_abs = np.round(deaths_gini_abs,6)
if(rel_to=='No_Vaccination'): # compared to No_Vaccination
if(policy=='No_Vaccination'):
deaths_total_no_vaccination = deaths_total_abs
deaths_gini_no_vaccination = deaths_gini_abs
deaths_total_rel = 0
deaths_gini_rel = 0
results[policy] = {
'deaths_total_abs':'%.6f'% deaths_total_abs, #.6f
'deaths_total_rel':'%.6f'% deaths_total_rel,
'deaths_gini_abs':'%.6f'% deaths_gini_abs,
'deaths_gini_rel':'%.6f'% deaths_gini_rel}
else:
deaths_total_rel = (np.round(eval('final_deaths_rate_%s_total'%(policy.lower())),6) - deaths_total_no_vaccination) / deaths_total_no_vaccination
deaths_gini_rel = (np.round(functions.gini(eval('final_deaths_rate_'+ policy.lower())),6) - deaths_gini_no_vaccination) / deaths_gini_no_vaccination
results[policy] = {
'deaths_total_abs':'%.6f'% deaths_total_abs,
'deaths_total_rel':'%.6f'% deaths_total_rel,
'deaths_gini_abs':'%.6f'% deaths_gini_abs,
'deaths_gini_rel':'%.6f'% deaths_gini_rel}
elif(rel_to=='Baseline'): # compared to Baseline
if(policy=='Baseline'):
deaths_total_baseline = deaths_total_abs
deaths_gini_baseline = deaths_gini_abs
deaths_total_rel = 0
deaths_gini_rel = 0
results[policy] = {
'deaths_total_abs':'%.6f'% deaths_total_abs,
'deaths_total_rel':'%.6f'% deaths_total_rel,
'deaths_gini_abs':'%.6f'% deaths_gini_abs,
'deaths_gini_rel':'%.6f'% deaths_gini_rel}
else:
deaths_total_rel = (np.round(eval('final_deaths_rate_%s_total'%(policy.lower())),6) - deaths_total_baseline) / deaths_total_baseline
deaths_gini_rel = (np.round(functions.gini(eval('final_deaths_rate_'+ policy.lower())),6) - deaths_gini_baseline) / deaths_gini_baseline
results[policy] = {
'deaths_total_abs':'%.6f'% deaths_total_abs,
'deaths_total_rel':'%.6f'% deaths_total_rel,
'deaths_gini_abs':'%.6f'% deaths_gini_abs,
'deaths_gini_rel':'%.6f'% deaths_gini_rel}
return results
def make_gini_table(policy_list, demo_feat_list, parallel, num_groups, save_result=False, save_path=None):
cbg_table_name_dict=dict()
cbg_table_name_dict['Age'] = cbg_age_msa
cbg_table_name_dict['Mean_Household_Income'] = cbg_income_msa
cbg_table_name_dict['Essential_Worker'] = cbg_occupation_msa
print('Policy list: ', policy_list)
print('Demographic feature list: ', demo_feat_list)
gini_df = pd.DataFrame(columns=pd.MultiIndex.from_tuples([('All','deaths_total_abs'),('All','deaths_total_rel')]))
if(parallel==True):
display_list = [policy_list[0]]
for i in range(target_num):
display_list.append('Target'+str(i)) # 20210627
gini_df['Policy'] = display_list
else:
gini_df['Policy'] = policy_list
for demo_feat in demo_feat_list:
results = output_result(cbg_table_name_dict[demo_feat],
demo_feat, policy_list, num_groups=NUM_GROUPS,
print_result=False, rel_to=REL_TO)
for i in range(len(policy_list)):
policy = policy_list[i]
gini_df.loc[i,('All','deaths_total_abs')] = results[policy]['deaths_total_abs']
gini_df.loc[i,('All','deaths_total_rel')] = results[policy]['deaths_total_rel']
gini_df.loc[i,(demo_feat,'deaths_gini_abs')] = results[policy]['deaths_gini_abs']
gini_df.loc[i,(demo_feat,'deaths_gini_rel')] = results[policy]['deaths_gini_rel']
gini_df.set_index(['Policy'],inplace=True)
# Transpose
gini_df_trans = pd.DataFrame(gini_df.values.T, index=gini_df.columns, columns=gini_df.index)#转置
# Save .csv
if(save_result==True):
gini_df_trans.to_csv(save_path)
return gini_df_trans
############################################################
# Load Data
# Load ACS Data for matching with NYT Data
acs_data = pd.read_csv(os.path.join(root,'list1.csv'),header=2)
acs_msas = [msa for msa in acs_data['CBSA Title'].unique() if type(msa) == str]
# Extract data specific to one msa, according to ACS data
# MSA list
msa_match = functions.match_msa_name_to_msas_in_acs_data(MSA_NAME_FULL, acs_msas)
print('\nMatching MSA_NAME_FULL to MSAs in ACS Data: ',msa_match)
msa_data = acs_data[acs_data['CBSA Title'] == msa_match].copy()
print('Number of counties matched: ',len(msa_data))
msa_data['FIPS Code'] = msa_data.apply(lambda x : functions.get_fips_codes_from_state_and_county_fp((x['FIPS State Code']),x['FIPS County Code']), axis=1)
good_list = list(msa_data['FIPS Code'].values);#print('Indices of counties matched: ',good_list)
# Load CBG ids belonging to a specific metro area
cbg_ids_msa = pd.read_csv(os.path.join(root,MSA_NAME,'%s_cbg_ids.csv'%MSA_NAME_FULL))
cbg_ids_msa.rename(columns={"cbg_id":"census_block_group"}, inplace=True)
M = len(cbg_ids_msa);#print('Number of CBGs in this metro area:', M)
# Mapping from cbg_ids to columns in hourly visiting matrices
cbgs_to_idxs = dict(zip(cbg_ids_msa['census_block_group'].values, range(M)))
x = {}
for i in cbgs_to_idxs:
x[str(i)] = cbgs_to_idxs[i]
# Select counties belonging to the MSA
y = []
for i in x:
if((len(i)==12) & (int(i[0:5])in good_list)):
y.append(x[i])
if((len(i)==11) & (int(i[0:4])in good_list)):
y.append(x[i])
idxs_msa_all = list(x.values());#print('Number of CBGs in this metro area:', len(idxs_msa_all))
idxs_msa_nyt = y; #print('Number of CBGs in to compare with NYT data:', len(idxs_msa_nyt))
# Load ACS Data for MSA-county matching
acs_data = pd.read_csv(os.path.join(root,'list1.csv'),header=2)
acs_msas = [msa for msa in acs_data['CBSA Title'].unique() if type(msa) == str]
msa_match = functions.match_msa_name_to_msas_in_acs_data(MSA_NAME_FULL, acs_msas)
msa_data = acs_data[acs_data['CBSA Title'] == msa_match].copy()
msa_data['FIPS Code'] = msa_data.apply(lambda x : functions.get_fips_codes_from_state_and_county_fp((x['FIPS State Code']),x['FIPS County Code']), axis=1)
good_list = list(msa_data['FIPS Code'].values)
#print('Counties included: ', good_list)
del acs_data
# Load SafeGraph data to obtain CBG sizes (i.e., populations)
filepath = os.path.join(root,"safegraph_open_census_data/data/cbg_b01.csv")
cbg_agesex = pd.read_csv(filepath)
# Extract CBGs belonging to the MSA - https://covid-mobility.stanford.edu//datasets/
cbg_age_msa = | pd.merge(cbg_ids_msa, cbg_agesex, on='census_block_group', how='left') | pandas.merge |
# -*- coding: utf-8 -*-
"""
Created on Tue May 12 17:07:00 2020
@author: hexx
This code do the following:
(1)saves policy, COVID, and Projection data downloaded online to local folder
(2)process and saved data to be usded to project mobility
"""
import pandas as pd
import numpy as np
import os
from sklearn import datasets, linear_model
import matplotlib.pyplot as plt
from myFunctions import def_add_datashift, createFolder
import warnings
warnings.filterwarnings("ignore")
createFolder('./Mobility projection')
scenario_cases = ['lower', 'mean', 'upper'] #'upper', 'lower',
startDate = '2020-02-24'
today_x = pd.to_datetime('today')
today =today_x.strftime("%Y-%m-%d")
# today ='2020-07-08'
PODA_Model = np.load("./PODA_Model_"+today+".npy",allow_pickle='TRUE').item()
YYG_Date = PODA_Model['YYG_File_Date']
moving_avg = PODA_Model['Moving_Average']
#create folder to save YYG Projection
createFolder('./YYG Data/'+YYG_Date)
# createFolder('./COVID/'+today)
df_StateName_Code = PODA_Model['StateName_StateCode']
ML_Data = PODA_Model['ML_Data']
# load Policy Data
df_Policy = pd.read_csv('https://raw.githubusercontent.com/COVID19StatePolicy/SocialDistancing/master/data/USstatesCov19distancingpolicy.csv', encoding= 'unicode_escape')
createFolder('./Policy File')
df_Policy.to_excel('./Policy File/Policy'+today+'.xlsx') # save policy data
# Read Population Data
df_Population = PODA_Model['State Population']
#Read County Area
df_Area = PODA_Model['State Area']
#Employment
df_Employee = PODA_Model['State Employment']
confirmed = ML_Data[ML_Data['State Name']=='Michigan']
confirmed = confirmed[['US Total Confirmed', 'US Daily Confirmed', 'US Daily Death']]
confirmed = confirmed.rename(columns={"US Total Confirmed":"ML US Total Confirmed", "US Daily Confirmed":"ML US Daily Confirmed",
"US Daily Death":"ML US Daily Death"})
infected_to_Confirmed = pd.DataFrame(columns = ['Country Name', 'scenario', 'shiftDay', 'regr_coef', 'regr_interp'])
infected_to_Confirmed_State = pd.DataFrame(columns = ['State Name', 'scenario', 'shiftDay', 'regr_coef', 'regr_interp'])
for zz, scenario in enumerate(scenario_cases):
'''
Calculate the new infected to confirmed correlation
'''
df_US_Projection = pd.read_csv('https://raw.githubusercontent.com/youyanggu/covid19_projections/master/projections/'+YYG_Date+'/US.csv')
df_US_Projection.to_csv('./YYG Data/'+YYG_Date+'/US.csv') # save US Projection data
df_US_Projection['date'] = pd.to_datetime(df_US_Projection['date'])
df_US_Projection.set_index('date', inplace=True)
YYG_Daily_Infected = df_US_Projection[['predicted_new_infected_'+scenario]]
YYG_Daily_Infected = YYG_Daily_Infected[(YYG_Daily_Infected.index < today_x) & (YYG_Daily_Infected.index > pd.to_datetime('2020-05-01'))]
R2_old=0
for j in range(0, 20):
YYG_Data_shifted = YYG_Daily_Infected['predicted_new_infected_'+scenario].shift(j).to_frame()
YYG_Data_shifted['date']=YYG_Data_shifted.index
YYG_Data_shifted=YYG_Data_shifted.set_index('date')
# merged = pd.merge_asof(YYG_Data_shifted, confirmed, left_index=True, right_index=True).dropna()
merged = confirmed.join(YYG_Data_shifted).dropna()
x_conv=merged['predicted_new_infected_'+scenario].to_numpy()
y_conv = merged['ML US Daily Confirmed'].to_numpy()
x_length = len(x_conv)
x_conv = x_conv.reshape(x_length, 1)
y_conv = y_conv.reshape(x_length, 1)
regr = linear_model.LinearRegression(fit_intercept = False)
regr.fit(x_conv, y_conv)
R2_new = regr.score(x_conv, y_conv)
if R2_new > R2_old:
new_row = {'Country Name': 'US', 'scenario': scenario, 'shiftDay': j,
'regr_coef': regr.coef_[0][0], 'regr_interp':regr.intercept_, 'R2': R2_new}
merged_select = merged
regr_selected = regr
R2_old = R2_new
infected_to_Confirmed=infected_to_Confirmed.append(new_row, ignore_index =True)
fig = plt.figure(figsize=(6, 5))
ax = fig.add_subplot(1, 1, 1)
# normalized scale
ax.plot(merged_select.index, merged_select['predicted_new_infected_'+scenario]*new_row['regr_coef'] + new_row['regr_interp'], 'o', label='YYG Predicted')
# ax.plot(merged_select.index, merged_select['predicted_total_infected_mean'], 'o', label='YYG Predicted')
ax.plot(merged_select.index, merged_select['ML US Daily Confirmed'], label='confirmed')
ax.set_xlabel('Label')
ax.set_ylabel('Prediction')
ax.set_xlim(pd.to_datetime('2020-05-01'), | pd.to_datetime('today') | pandas.to_datetime |
# +
import numpy as np
import pandas as pd
from .plot_spatial import plot_spatial_general as plot_spatial
def interpolate_coord(start=10, end=5, steps=100, accel_power=3, accelerate=True, jitter=None):
r"""
Interpolate coordinates between start_array and end_array positions in N steps
with non-linearity in movement according to acc_power,
and accelerate change in coordinates (True) or slow it down (False).
:param jitter: shift positions by a random number by sampling:
new_coord = np.random.normal(mean=coord, sd=jitter), reasonable values 0.01-0.1
"""
seq = np.linspace(np.zeros_like(start), np.ones_like(end), steps)
seq = seq ** accel_power
if jitter is not None:
seq = np.random.normal(loc=seq, scale=jitter * np.abs(seq))
seq[0] = np.zeros_like(start)
seq[steps - 1] = np.ones_like(end)
if accelerate:
seq = 1 - seq
start
seq = seq * (start - end) + end
if not accelerate:
seq = np.flip(seq, axis=0)
return seq
def expand_1by1(df):
col6 = [df.copy() for i in range(df.shape[1])]
index = df.index.astype(str)
columns = df.columns
for i in range(len(col6)):
col6_1 = col6[i]
col6_1_new = np.zeros_like(col6_1)
col6_1_new[:, i] = col6_1[col6_1.columns[i]].values
col6_1_new = pd.DataFrame(col6_1_new, index=index + str(i), columns=columns)
col6[i] = col6_1_new
return pd.concat(col6, axis=0)
def plot_video_mapping(
adata_vis,
adata,
sample_ids,
spot_factors_df,
sel_clust,
sel_clust_col,
sample_id,
sc_img=None,
sp_img=None,
sp_img_scaling_fac=1,
adata_cluster_col="annotation_1",
cell_fact_df=None,
step_n=[20, 100, 15, 45, 80, 30],
step_quantile=[1, 1, 1, 1, 0.95, 0.95],
sc_point_size=1,
aver_point_size=20,
sp_point_size=5,
reorder_cmap=range(7),
label_clusters=False,
style="dark_background",
adjust_text=False,
sc_alpha=0.6,
sp_alpha=0.8,
img_alpha=0.8,
sc_power=20,
sp_power=20,
sc_accel_power=3,
sp_accel_power=3,
sc_accel_decel=True,
sp_accel_decel=False,
sc_jitter=None,
sp_jitter=None,
save_path="./results/mouse_viseum_snrna/std_model/mapping_video/",
crop_x=None,
crop_y=None,
save_extension="png",
colorbar_shape={"vertical_gaps": 2, "horizontal_gaps": 0.13},
):
r"""
Create frames for a video illustrating the approach from UMAP of single cells to their spatial locations.
We use linear interpolation of UMAP and spot coordinates to create movement.
:param adata_vis: anndata with Visium data (including spatial slot in `.obsm`)
:param adata: anndata with single cell data (including X_umap slot in `.obsm`)
:param sample_ids: pd.Series - sample ID for each spot
:param spot_factors_df: output of the model showing spatial expression of cell types / factors.
:param sel_clust: selected cluster names in `adata_cluster_col` column of adata.obs
:param sel_clust_col: selected cluster column name in spot_factors_df
:param sample_id: sample id to use for visualisation
:param adata_cluster_col: column in adata.obs containing cluster annotations
:param cell_fact_df: alternative to adata_cluster_col, pd.DataFrame specifying class for each cell (can be continuous).
:param step_n: how many frames to record in each step: UMAP, UMAP collapsing into averages, averages, averages expanding into locations, locations.
:param step_quantile: how to choose maximum colorscale limit in each step? (quantile) Use 1 for discrete values.
:param sc_point_size: point size for cells
:param aver_point_size: point size for averages
:param sp_point_size: point size for spots
:param fontsize: size of text label of averages
:param adjust_text: adjust text label position to avoid overlaps
:param sc_alpha, sp_alpha: color alpha scaling for single cells and spatial.
:param sc_power, sp_power: change dot size nonlinearly with this exponent
:param sc_accel_power, sp_accel_power: change movement speed size nonlinearly with this exponent
:param sc_accel_decel, sp_accel_decel: accelerate (True) or decelereate (False)
:param save_path: path where to save frames (named according to order of steps)
"""
from tqdm.auto import tqdm
# extract spot expression and coordinates
coords = adata_vis.obsm["spatial"].copy() * sp_img_scaling_fac
s_ind = sample_ids.isin([sample_id])
sel_clust_df = spot_factors_df.loc[s_ind, sel_clust_col]
sel_coords = coords[s_ind, :]
sample_id = sample_ids[s_ind]
if sc_img is None:
# create a black background image
xy = sel_coords.max(0) + sel_coords.max(0) * 0.05
sc_img = np.zeros((int(xy[1]), int(xy[0]), 3))
if sp_img is None:
# create a black background image
xy = sel_coords.max(0) + sel_coords.max(0) * 0.05
sp_img = np.zeros((int(xy[1]), int(xy[0]), 3))
img_alpha = 1
img_alpha_seq = 1
else:
img_alpha_seq = interpolate_coord(
start=0, end=img_alpha, steps=step_n[3] + 1, accel_power=sc_power, accelerate=True, jitter=None
)
# extract umap coordinates
umap_coord = adata.obsm["X_umap"].copy()
# make positive and rescale to fill the image
umap_coord[:, 0] = umap_coord[:, 0] + abs(umap_coord[:, 0].min()) + abs(umap_coord[:, 0].max()) * 0.01
umap_coord[:, 1] = -umap_coord[:, 1] # flip y axis
umap_coord[:, 1] = umap_coord[:, 1] + abs(umap_coord[:, 1].min()) + abs(umap_coord[:, 1].max()) * 0.01
if crop_x is None:
img_width = sc_img.shape[0] * 0.99
x_offset = 0
umap_coord[:, 0] = umap_coord[:, 0] / umap_coord[:, 0].max() * img_width
else:
img_width = abs(crop_x[0] - crop_x[1]) * 0.99
x_offset = np.array(crop_x).min()
umap_coord[:, 0] = umap_coord[:, 0] / umap_coord[:, 0].max() * img_width
umap_coord[:, 0] = umap_coord[:, 0] + x_offset
if crop_y is None:
img_height = sc_img.shape[1] * 0.99
y_offset = 0
# y_offset2 = 0
umap_coord[:, 1] = umap_coord[:, 1] / umap_coord[:, 1].max() * img_height
else:
img_height = abs(crop_y[0] - crop_y[1]) * 0.99
y_offset = np.array(crop_y).min()
# y_offset2 = sp_img.shape[1] - np.array(crop_y).max()
umap_coord[:, 1] = umap_coord[:, 1] / umap_coord[:, 1].max() * img_height
umap_coord[:, 1] = umap_coord[:, 1] + y_offset
if cell_fact_df is None:
cell_fact_df = pd.get_dummies(adata.obs[adata_cluster_col], columns=[adata_cluster_col])
cell_fact_df = cell_fact_df[sel_clust]
cell_fact_df.columns = cell_fact_df.columns.tolist()
cell_fact_df["other"] = (cell_fact_df.sum(1) == 0).astype(np.int64)
# compute average position weighted by cell density
aver_coord = pd.DataFrame()
for c in cell_fact_df.columns:
dens = cell_fact_df[c].values
dens = dens / dens.sum(0)
aver = np.array((umap_coord * dens.reshape((cell_fact_df.shape[0], 1))).sum(0))
aver_coord_1 = pd.DataFrame(aver.reshape((1, 2)), index=[c], columns=["x", "y"])
aver_coord_1["column"] = c
aver_coord = pd.concat([aver_coord, aver_coord_1])
aver_coord = aver_coord.loc[aver_coord.index != "other"]
# compute movement of cells toward averages (increasing size)
moving_averages1 = [
interpolate_coord(
start=umap_coord,
end=np.ones_like(umap_coord) * aver_coord.loc[i, ["x", "y"]].values,
steps=step_n[1] + 1,
accel_power=sc_accel_power,
accelerate=sc_accel_decel,
jitter=sc_jitter,
)
for i in aver_coord.index
]
moving_averages1 = np.array(moving_averages1)
# (increasing dot size) for cells -> averages
circ_diam1 = interpolate_coord(
start=sc_point_size,
end=aver_point_size,
steps=step_n[1] + 1,
accel_power=sc_power,
accelerate=sc_accel_decel,
jitter=None,
)
# compute movement of spots from averages to locations
moving_averages2 = [
interpolate_coord(
start=np.ones_like(sel_coords) * aver_coord.loc[i, ["x", "y"]].values,
end=sel_coords,
steps=step_n[4] + 1,
accel_power=sp_accel_power,
accelerate=sp_accel_decel,
jitter=sp_jitter,
)
for i in aver_coord.index
]
moving_averages2 = np.array(moving_averages2)
# (decreasing dot size) for averages -> locations
circ_diam2 = interpolate_coord(
start=aver_point_size,
end=sp_point_size,
steps=step_n[4] + 1,
accel_power=sp_power,
accelerate=sp_accel_decel,
jitter=None,
)
#### start saving plots ####
# plot UMAP with no changes
for i0 in tqdm(range(step_n[0])):
fig = plot_spatial(
cell_fact_df,
coords=umap_coord,
labels=cell_fact_df.columns,
circle_diameter=sc_point_size,
alpha_scaling=sc_alpha,
img=sc_img,
img_alpha=1,
style=style,
# determine max color level using data quantiles
max_color_quantile=step_quantile[0], # set to 1 to pick max - essential for discrete scaling
crop_x=crop_x,
crop_y=crop_y,
colorbar_position="right",
colorbar_shape=colorbar_shape,
reorder_cmap=reorder_cmap,
)
fig.savefig(f"{save_path}cell_maps_{i0 + 1}.{save_extension}", bbox_inches="tight")
fig.clear()
# plot evolving UMAP from cells to averages
for i1 in tqdm(range(step_n[1])):
ann_no_other = cell_fact_df[cell_fact_df.columns[cell_fact_df.columns != "other"]]
ann_no_other = expand_1by1(ann_no_other)
coord = np.concatenate(moving_averages1[:, i1, :, :], axis=0)
fig = plot_spatial(
ann_no_other,
coords=coord,
labels=ann_no_other.columns,
circle_diameter=circ_diam1[i1],
alpha_scaling=sc_alpha,
img=sc_img,
img_alpha=1,
style=style,
# determine max color level using data quantiles
max_color_quantile=step_quantile[1], # set to 1 to pick max - essential for discrete scaling
crop_x=crop_x,
crop_y=crop_y,
colorbar_position="right",
colorbar_shape=colorbar_shape,
reorder_cmap=reorder_cmap,
)
fig.savefig(f"{save_path}cell_maps_{i0 + i1 + 2}.{save_extension}", bbox_inches="tight")
fig.clear()
# plot averages
if label_clusters:
label_clusters = aver_coord[["x", "y", "column"]]
else:
label_clusters = None
for i2 in tqdm(range(step_n[2])):
ann_no_other = cell_fact_df[cell_fact_df.columns[cell_fact_df.columns != "other"]]
ann_no_other = expand_1by1(ann_no_other)
coord = np.concatenate(moving_averages1[:, i1 + 1, :, :], axis=0)
fig = plot_spatial(
ann_no_other,
coords=coord,
labels=ann_no_other.columns,
text=label_clusters,
circle_diameter=circ_diam1[i1 + 1],
alpha_scaling=sc_alpha,
img=sc_img,
img_alpha=1,
style=style,
# determine max color level using data quantiles
max_color_quantile=step_quantile[2], # set to 1 to pick max - essential for discrete scaling
crop_x=crop_x,
crop_y=crop_y,
colorbar_position="right",
colorbar_shape=colorbar_shape,
reorder_cmap=reorder_cmap,
)
fig.savefig(f"{save_path}cell_maps_{i0 + i1 + i2 + 3}.{save_extension}", bbox_inches="tight")
fig.clear()
# plot averages & fade-in histology image
for i22 in tqdm(range(step_n[3])):
ann_no_other = cell_fact_df[cell_fact_df.columns[cell_fact_df.columns != "other"]]
ann_no_other = expand_1by1(ann_no_other)
coord = np.concatenate(moving_averages1[:, i1 + 1, :, :], axis=0)
fig = plot_spatial(
ann_no_other,
coords=coord,
labels=ann_no_other.columns,
text=label_clusters,
circle_diameter=circ_diam1[i1 + 1],
alpha_scaling=sc_alpha,
img=sp_img,
img_alpha=img_alpha_seq[i22],
style=style,
# determine max color level using data quantiles
max_color_quantile=step_quantile[3], # set to 1 to pick max - essential for discrete scaling
adjust_text=adjust_text,
crop_x=crop_x,
crop_y=crop_y,
colorbar_position="right",
colorbar_shape=colorbar_shape,
reorder_cmap=reorder_cmap,
)
fig.savefig(f"{save_path}cell_maps_{i0 + i1 + i2 + i22 + 4}.{save_extension}", bbox_inches="tight")
fig.clear()
# plot evolving from averages to spatial locations
for i3 in tqdm(range(step_n[4])):
# sel_clust_df_1 = expand_1by1(sel_clust_df)
dfs = []
clusters = []
for i in range(sel_clust_df.shape[1]):
idx = sel_clust_df.values.argmax(axis=1) == i
dfs.append(moving_averages2[i, i3, idx, :])
clusters.append(sel_clust_df[idx])
# coord = moving_averages2[0, i3, :, :]
coord = np.concatenate(dfs, axis=0)
fig = plot_spatial(
| pd.concat(clusters, axis=0) | pandas.concat |
import operator
from shutil import get_terminal_size
from typing import Dict, Hashable, List, Type, Union, cast
from warnings import warn
import numpy as np
from pandas._config import get_option
from pandas._libs import algos as libalgos, hashtable as htable
from pandas._typing import ArrayLike, Dtype, Ordered, Scalar
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
Appender,
Substitution,
cache_readonly,
deprecate_kwarg,
doc,
)
from pandas.util._validators import validate_bool_kwarg, validate_fillna_kwargs
from pandas.core.dtypes.cast import (
coerce_indexer_dtype,
maybe_cast_to_extension_array,
maybe_infer_to_datetimelike,
)
from pandas.core.dtypes.common import (
ensure_int64,
ensure_object,
is_categorical_dtype,
is_datetime64_dtype,
is_dict_like,
is_dtype_equal,
is_extension_array_dtype,
is_integer_dtype,
is_iterator,
is_list_like,
is_object_dtype,
is_scalar,
is_sequence,
is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.dtypes.generic import ABCIndexClass, ABCSeries
from pandas.core.dtypes.inference import is_hashable
from pandas.core.dtypes.missing import isna, notna
from pandas.core import ops
from pandas.core.accessor import PandasDelegate, delegate_names
import pandas.core.algorithms as algorithms
from pandas.core.algorithms import _get_data_algo, factorize, take, take_1d, unique1d
from pandas.core.array_algos.transforms import shift
from pandas.core.arrays.base import ExtensionArray, _extension_array_shared_docs
from pandas.core.base import NoNewAttributesMixin, PandasObject, _shared_docs
import pandas.core.common as com
from pandas.core.construction import array, extract_array, sanitize_array
from pandas.core.indexers import check_array_indexer, deprecate_ndim_indexing
from pandas.core.missing import interpolate_2d
from pandas.core.ops.common import unpack_zerodim_and_defer
from pandas.core.sorting import nargsort
from pandas.io.formats import console
def _cat_compare_op(op):
opname = f"__{op.__name__}__"
@unpack_zerodim_and_defer(opname)
def func(self, other):
if is_list_like(other) and len(other) != len(self):
# TODO: Could this fail if the categories are listlike objects?
raise ValueError("Lengths must match.")
if not self.ordered:
if opname in ["__lt__", "__gt__", "__le__", "__ge__"]:
raise TypeError(
"Unordered Categoricals can only compare equality or not"
)
if isinstance(other, Categorical):
# Two Categoricals can only be be compared if the categories are
# the same (maybe up to ordering, depending on ordered)
msg = "Categoricals can only be compared if 'categories' are the same."
if len(self.categories) != len(other.categories):
raise TypeError(msg + " Categories are different lengths")
elif self.ordered and not (self.categories == other.categories).all():
raise TypeError(msg)
elif not set(self.categories) == set(other.categories):
raise TypeError(msg)
if not (self.ordered == other.ordered):
raise TypeError(
"Categoricals can only be compared if 'ordered' is the same"
)
if not self.ordered and not self.categories.equals(other.categories):
# both unordered and different order
other_codes = _get_codes_for_values(other, self.categories)
else:
other_codes = other._codes
f = getattr(self._codes, opname)
ret = f(other_codes)
mask = (self._codes == -1) | (other_codes == -1)
if mask.any():
# In other series, the leads to False, so do that here too
if opname == "__ne__":
ret[(self._codes == -1) & (other_codes == -1)] = True
else:
ret[mask] = False
return ret
if is_scalar(other):
if other in self.categories:
i = self.categories.get_loc(other)
ret = getattr(self._codes, opname)(i)
if opname not in {"__eq__", "__ge__", "__gt__"}:
# check for NaN needed if we are not equal or larger
mask = self._codes == -1
ret[mask] = False
return ret
else:
if opname == "__eq__":
return np.zeros(len(self), dtype=bool)
elif opname == "__ne__":
return np.ones(len(self), dtype=bool)
else:
raise TypeError(
f"Cannot compare a Categorical for op {opname} with a "
"scalar, which is not a category."
)
else:
# allow categorical vs object dtype array comparisons for equality
# these are only positional comparisons
if opname in ["__eq__", "__ne__"]:
return getattr(np.array(self), opname)(np.array(other))
raise TypeError(
f"Cannot compare a Categorical for op {opname} with "
f"type {type(other)}.\nIf you want to compare values, "
"use 'np.asarray(cat) <op> other'."
)
func.__name__ = opname
return func
def contains(cat, key, container):
"""
Helper for membership check for ``key`` in ``cat``.
This is a helper method for :method:`__contains__`
and :class:`CategoricalIndex.__contains__`.
Returns True if ``key`` is in ``cat.categories`` and the
location of ``key`` in ``categories`` is in ``container``.
Parameters
----------
cat : :class:`Categorical`or :class:`categoricalIndex`
key : a hashable object
The key to check membership for.
container : Container (e.g. list-like or mapping)
The container to check for membership in.
Returns
-------
is_in : bool
True if ``key`` is in ``self.categories`` and location of
``key`` in ``categories`` is in ``container``, else False.
Notes
-----
This method does not check for NaN values. Do that separately
before calling this method.
"""
hash(key)
# get location of key in categories.
# If a KeyError, the key isn't in categories, so logically
# can't be in container either.
try:
loc = cat.categories.get_loc(key)
except (KeyError, TypeError):
return False
# loc is the location of key in categories, but also the *value*
# for key in container. So, `key` may be in categories,
# but still not in `container`. Example ('b' in categories,
# but not in values):
# 'b' in Categorical(['a'], categories=['a', 'b']) # False
if is_scalar(loc):
return loc in container
else:
# if categories is an IntervalIndex, loc is an array.
return any(loc_ in container for loc_ in loc)
class Categorical(ExtensionArray, PandasObject):
"""
Represent a categorical variable in classic R / S-plus fashion.
`Categoricals` can only take on only a limited, and usually fixed, number
of possible values (`categories`). In contrast to statistical categorical
variables, a `Categorical` might have an order, but numerical operations
(additions, divisions, ...) are not possible.
All values of the `Categorical` are either in `categories` or `np.nan`.
Assigning values outside of `categories` will raise a `ValueError`. Order
is defined by the order of the `categories`, not lexical order of the
values.
Parameters
----------
values : list-like
The values of the categorical. If categories are given, values not in
categories will be replaced with NaN.
categories : Index-like (unique), optional
The unique categories for this categorical. If not given, the
categories are assumed to be the unique values of `values` (sorted, if
possible, otherwise in the order in which they appear).
ordered : bool, default False
Whether or not this categorical is treated as a ordered categorical.
If True, the resulting categorical will be ordered.
An ordered categorical respects, when sorted, the order of its
`categories` attribute (which in turn is the `categories` argument, if
provided).
dtype : CategoricalDtype
An instance of ``CategoricalDtype`` to use for this categorical.
Attributes
----------
categories : Index
The categories of this categorical
codes : ndarray
The codes (integer positions, which point to the categories) of this
categorical, read only.
ordered : bool
Whether or not this Categorical is ordered.
dtype : CategoricalDtype
The instance of ``CategoricalDtype`` storing the ``categories``
and ``ordered``.
Methods
-------
from_codes
__array__
Raises
------
ValueError
If the categories do not validate.
TypeError
If an explicit ``ordered=True`` is given but no `categories` and the
`values` are not sortable.
See Also
--------
CategoricalDtype : Type for categorical data.
CategoricalIndex : An Index with an underlying ``Categorical``.
Notes
-----
See the `user guide
<https://pandas.pydata.org/pandas-docs/stable/user_guide/categorical.html>`_
for more.
Examples
--------
>>> pd.Categorical([1, 2, 3, 1, 2, 3])
[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]
>>> pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c'])
[a, b, c, a, b, c]
Categories (3, object): [a, b, c]
Ordered `Categoricals` can be sorted according to the custom order
of the categories and can have a min and max value.
>>> c = pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c'], ordered=True,
... categories=['c', 'b', 'a'])
>>> c
[a, b, c, a, b, c]
Categories (3, object): [c < b < a]
>>> c.min()
'c'
"""
# For comparisons, so that numpy uses our implementation if the compare
# ops, which raise
__array_priority__ = 1000
_dtype = CategoricalDtype(ordered=False)
# tolist is not actually deprecated, just suppressed in the __dir__
_deprecations = PandasObject._deprecations | frozenset(["tolist"])
_typ = "categorical"
def __init__(
self, values, categories=None, ordered=None, dtype=None, fastpath=False
):
dtype = CategoricalDtype._from_values_or_dtype(
values, categories, ordered, dtype
)
# At this point, dtype is always a CategoricalDtype, but
# we may have dtype.categories be None, and we need to
# infer categories in a factorization step further below
if fastpath:
self._codes = coerce_indexer_dtype(values, dtype.categories)
self._dtype = self._dtype.update_dtype(dtype)
return
# null_mask indicates missing values we want to exclude from inference.
# This means: only missing values in list-likes (not arrays/ndframes).
null_mask = np.array(False)
# sanitize input
if is_categorical_dtype(values):
if dtype.categories is None:
dtype = CategoricalDtype(values.categories, dtype.ordered)
elif not isinstance(values, (ABCIndexClass, ABCSeries)):
# sanitize_array coerces np.nan to a string under certain versions
# of numpy
values = maybe_infer_to_datetimelike(values, convert_dates=True)
if not isinstance(values, np.ndarray):
values = _convert_to_list_like(values)
# By convention, empty lists result in object dtype:
sanitize_dtype = np.dtype("O") if len(values) == 0 else None
null_mask = | isna(values) | pandas.core.dtypes.missing.isna |
# --------------
#Importing header files
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import MinMaxScaler, LabelEncoder
#Loading the data
data= | pd.read_csv(path) | pandas.read_csv |
"""Handles logic relating to nearest neighbours, i.e. which frames are similar to
each other.
"""
import functools
import math
import os
import time
import warnings
from copy import deepcopy
from itertools import compress, repeat
from multiprocessing import Pool
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from tqdm.auto import tqdm
from .frame_extractor import FrameExtractor
from .image_preprocessing import is_monochrome
class Neighbours:
"""Neighbours class, containing values and logic for closest neighbours for frames
in the VRD.
Raises:
Exception: [description]
Returns:
[type]: [description]
Yields:
[type]: [description]
"""
frames: FrameExtractor
distance_list: list
def __init__(self, frames: FrameExtractor, distance_list):
self.frames = frames
self.distance_list = distance_list
self._fix_neighbours()
def copy(self):
new_neighbours = Neighbours(self.frames.copy(), self.distance_list.copy())
return new_neighbours
def filter_same_video(self):
"""Filters any neighbors that belongs to the same video as the original frame.
Raises:
Exception: In case the image list does not match the distance list
"""
all_images = self.frames.all_images
# print(f'Number of frames: {len(all_images)}')
# print(f'Length of distance list: {len(self.distance_list)}')
if len(all_images) != len(self.distance_list):
raise Exception(
"Expected distance list length to match total number of images! Please run this filter first."
)
video_list = list(self.frames.cached_video_index.keys())
# print(video_list)
total_cleaned = 0
for vid in video_list:
vid_indexes = list(self.frames.get_index_from_video_name(vid))
if len(vid_indexes) == 0:
print(
f"Unable to find Indexes matching video {vid}. Skipping for now..."
)
continue
vid_index_min = vid_indexes[0]
vid_index_max = vid_indexes[-1]
for vid_index in vid_indexes:
d, i = self.distance_list[vid_index]
iarr = np.array(i)
if len(iarr) == 0:
continue
in_video = (iarr >= vid_index_min) & (iarr <= vid_index_max)
in_video[0] = False # Keep index 0!
num_removed = np.sum(in_video)
if num_removed > 0:
total_cleaned += 1
# TODO: Make this quicker?
d = [x for x, r in zip(d, in_video) if not r]
i = [x for x, r in zip(i, in_video) if not r]
self.distance_list[vid_index] = (d, i)
def _fix_neighbours(self):
"""Ensures that if index A is a neighbour to index B, the reverse is also true!"""
distance_copy = deepcopy(self.distance_list)
# lookup_table = {_i[0]:(i, set(_i[1:]), {__i:__d for __d, __i in zip(_d[1:],_i[1:])}) for i, (_d,_i) in enumerate(distance_copy)}
lookup_table = {
_i[0]: (i, set(_i[1:])) for i, (_d, _i) in enumerate(distance_copy)
}
added_count = 0
for d, i in tqdm(distance_copy):
# Get the original frame for this distance
_, orig_i = d[0], i[0]
# d_to_add = list()
# i_to_add = list()
for _d, _i in zip(d[1:], i[1:]):
try:
neighbour_list_index, matching_set = lookup_table[_i]
except KeyError:
# This happens if the neighbour frame has been deleted - due to lack of neighbours, or just filtered. Should we create it in this case?
# If it was filtered, this goes against the "wishes of the operator", so safest to skip for now.
# Best case, just run this just after creation; Doing so sidesteps this problem
continue
# Check if the original frame is a neighbour in the neighbours' neighbour list
if orig_i not in matching_set:
added_count += 1
# If not there, it should be added (including distance!)
# print(f'Len before: {len(self.neighbours.distance_list[neighbour_list_index][0])}')
new_d = np.append(
self.distance_list[neighbour_list_index][0], _d
) # add original frame distance to neighbour as distance
new_i = np.append(
self.distance_list[neighbour_list_index][1], orig_i
) # i
self.distance_list[neighbour_list_index] = (new_d, new_i)
self.distance_list = sorted(self.distance_list, key=lambda x: x[1][0])
# print(f"Added {added_count} to distance list.")
def _get_subdir(self, index):
frame = self.frames.all_images[index]
return self.frames.get_subdir_for_frame(frame)
def filter_neighbours_not_in_subfolder(self, folders, keep_reverse_matches=False):
"""Filters any neighbours that are not related to the specified subfolder.
The keep_reverse_matches argument changes how this works:
If set to False:
Only frames in the specified subfolders are retained, all others are removed
If set to True:
The same as above, but additionally frames outside the subfolders,
but with neighbours in one of the specified subfolders are retained
Args:
folders ([type]): A list of folders
keep_reverse_matches (bool, optional): Whether or not to keep frames outside the subfolders but with neighbours in the subfolders.
"""
dlist = self.distance_list
if isinstance(folders, list):
folders = set(folders)
else:
folders = set([folders])
new_dlist = []
for d, i in dlist:
if keep_reverse_matches:
if self._get_subdir(i[0]) in folders:
new_dlist.append((d, i))
else:
new_d, new_i = [d[0]], [i[0]]
for dd, ii in zip(d[1:], i[1:]):
subdir = self._get_subdir(ii)
if subdir in folders:
new_d.append(dd)
new_i.append(ii)
if len(new_d) > 1:
new_dlist.append((new_d, new_i))
elif self._get_subdir(i[0]) in folders:
new_dlist.append((d, i))
self.distance_list = new_dlist
def filter_few_neighbours(self, min_neighbours: int):
"""Filter distance lists with < the specified amount of neighbours"""
new_dlist = []
count = 0
for d, i in self.distance_list:
if len(i) < (min_neighbours + 1):
count += 1
continue
new_dlist.append((d, i))
self.distance_list = new_dlist
# print(f'Removed {count} frames with less than {min_neighbours} neighbours.')
def get_best_match(self, video_1, video_2):
"""
Gets the best video match between two videos, given the FrameExtractor, the associated distance_list,
and the two video names.
"""
v1_indexes = self.frames.get_index_from_video_name(video_1)
v2_indexes = self.frames.get_index_from_video_name(video_2)
v1_dlist = [x for x in self.distance_list if x[1][0] in v1_indexes]
# print(Video_1)
# print(Video_2)
new_id_list = []
for d, i in v1_dlist:
is_v2_index = list(map(lambda x: x in v2_indexes, i))
if np.sum(is_v2_index) == 0: # Skip if there are no matches
continue
is_v2_index[0] = True # Keep reference index!
new_id_list.append(
(list(compress(d, is_v2_index)), list(compress(i, is_v2_index)))
)
# print(f'Final list of matches: {len(new_id_list)}')
# Now new_id_list is only matches with video 2! Sort by shortest distance!
new_id_list = sorted(new_id_list, key=lambda x: x[0][1])
if len(new_id_list) == 0:
return None
# first (and therefore best) index after sorting!
relevant_index = new_id_list[0][1]
return (
self.frames.all_images[relevant_index[0]],
self.frames.all_images[relevant_index[1]],
)
def _get_remaining_count(self):
return np.array([(len(x) - 1) for x, y in self.distance_list])
def plot_remaining_statistics(self, figure_size=(15, 6), **kwargs):
"""Displays histograms for 1) remaining neighbours and 2) remaining distances.
Using these values, the user can detect if a filtering step removed too many or too few neighbours.
"""
remaining_count = self._get_remaining_count()
_, axes = plt.subplots(1, 2, figsize=figure_size)
hist = sns.histplot(remaining_count, ax=axes[0], **kwargs)
# axes[0].set_xlim((0,max_value))
hist.set(
title="Histogram of number of remaining neighbours per frame",
xlabel="Number of neighbours",
ylabel="Number of frames",
)
dlist = self.distance_list
dist_list = []
for d, _ in dlist:
dist_list.extend(d[1:])
hist = sns.histplot(dist_list, ax=axes[1])
hist.set(
title="Histogram of neighbour distances",
ylabel="Number of neighbours",
xlabel="Neighbour distance",
)
plt.xticks(rotation=45)
return hist
def print_zero_remaining_neighbours(self):
"""Generate a dataframe containing per-video statistics of frames with zero neighbours
Returns:
pandas dataframe
"""
zero_remaining_neighbour_videos = {}
remaining_count = self._get_remaining_count()
for i, count in enumerate(remaining_count):
if count == 0:
video_name = self.frames.get_video_name(self.frames.all_images[i])
try:
zero_remaining_neighbour_videos[video_name] += 1
except IndexError:
zero_remaining_neighbour_videos[video_name] = 1
print(" Count: Video name:")
df = pd.DataFrame(columns=["Frames with no self", "Video name"])
for vid, count in zero_remaining_neighbour_videos.items():
df.append(
{"Frames with no self": count, "Video name": vid}, ignore_index=True
)
return df
def get_video_match_statistics(self, distance_threshold=20000):
"""Creates and returns a dataframe containing the statistics for each video
The output includes total valid matches, total amount of frames and video name for each video.
Args:
distance_threshold (int, optional): The maximum allowed threshold for distance. Defaults to 20000.
"""
video_list = list(self.frames.cached_video_index.keys())
if distance_threshold < 0:
# TODO: Verify if this int max is enough
distance_threshold = 9223372036854775807
dlist_sorted = sorted(self.distance_list, key=lambda x: x[1][0]) # Sort index
dlist_indexes = np.array([x[1][0] for x in dlist_sorted])
dict_list = []
columns = [
"Video name",
"Original no. of frames",
"Remaining no. of frames",
"Remaining no. of neighbours",
"Average no. of neighbours",
]
for vid in video_list:
vid_indexes = self.frames.get_index_from_video_name(vid)
if vid_indexes is None or len(vid_indexes) == 0:
print(f"No indexes for video {vid}, continuing...")
continue
vid_index_min = min(vid_indexes)
vid_index_max = max(vid_indexes)
try:
vid_loc = np.where(
(dlist_indexes >= vid_index_min) & (dlist_indexes <= vid_index_max)
)[0]
except:
print(f"Error finding location of video {vid}, continuing...")
continue
if len(vid_loc) == 0:
# Video has likely been completely filtered from the distance list.
dict_list.append(
{x: y for x, y in zip(columns, [vid, len(vid_indexes), 0, 0, 0])}
)
continue
vid_dlist = dlist_sorted[vid_loc[0] : vid_loc[-1]]
# This checks for number of matches where distance < distance_threshold
vid_distances = [
(np.sum(np.array(x) < distance_threshold) - 1) for x, y in vid_dlist
]
if len(vid_distances) == 0:
mean_dist = 0
else:
mean_dist = np.mean(vid_distances)
dict_list.append(
{
x: y
for x, y in zip(
columns,
[
vid,
len(vid_indexes),
len(vid_distances),
np.sum(vid_distances),
mean_dist,
],
)
}
)
return pd.DataFrame(dict_list, columns=columns)
def filter_monochrome_images(self, pool_size=6, allowed_difference=40):
"""Filters any images that have a "difference" lower than the specified threshold.
This is intended to remove frames that are of a solid color.
For details see get_monochrome_images.
Args:
pool_size (int, optional): [description]. Defaults to 6.
allowed_difference (int, optional): [description]. Defaults to 40.
"""
image_indexes = self.get_monochrome_frames(
pool_size=pool_size, allowed_difference=allowed_difference
)
self.remove_indexes_from_distance_list(image_indexes)
return self
# print(f'Removed {len(image_indexes)} images from the distance list.')
def get_monochrome_frames(self, pool_size=6, allowed_difference=40):
"""Get the indexes (as determined by all_images) of all images with largely the same color.
This function is threaded to avoid bottlenecks.
For details, see image_is_same_color.
Args:
distance_list ([type]): The distance list used to look for relevant images
all_images ([type]): An ordered list of all possible images
pool_size (int, optional): How many threads to used. Defaults to 6.
allowed_difference (int, optional): Allowed difference in the images. Defaults to 40.
Returns:
set: A set containing the indexes (corresponding to the location in all_images) of images deemed to have a single color
"""
pool = Pool(pool_size)
# remaining indexes after filtering
relevant_indexes = [x[1][0] for x in self.distance_list]
relevant_files = [self.frames.all_images[x] for x in relevant_indexes]
try:
pool_result = pool.starmap(
is_monochrome, zip(relevant_files, repeat(allowed_difference))
)
finally:
pool.close()
pool.join()
# Create array to enable indexing; create set as it is much faster when looking up later
monochrome_frames = set(np.array(relevant_indexes)[np.nonzero(pool_result)[0]])
return monochrome_frames
def remove_indexes_from_distance_list(self, image_indexes: set):
"""Remove the indexes defined in the image_indexes set from the distance_list.
This means that they are removed both if they are the oringinal (reference) image,
and also when they are the neighbour.
Image indexes is a set as it's faster.
Args:
distance_list (list): The faiss distance list to remove from
image_indexes (set): A set of indexes to remove
Returns:
[list]: A distance list with the specified image indexes removed.
"""
dlist = []
for d, i in self.distance_list:
if i[0] in image_indexes:
continue
new_i = []
new_d = []
for distance, index in zip(d, i):
if index in image_indexes:
continue
new_d.append(distance)
new_i.append(index)
if len(new_d) > 1:
dlist.append((new_d, new_i))
self.distance_list = dlist
return self
def filter_neighbours_in_same_subfolder(self, pool_size=-1, batch_size=-1):
"""Filter neighbours that are from the same subdirectory as the source frame."""
new_distance_list = []
if pool_size < 1:
pool_size = os.cpu_count()
if pool_size is None:
pool_size = 1
pool = Pool(pool_size)
if batch_size <= 0:
# Default value, just split it equally in one batch per thread.
batch_size = math.floor(len(self.distance_list) / pool_size + 1)
# Step 1: Calculate all subdirs
new_iter = (
[x, self.frames.frame_directory]
for x in self._batch(self.frames.all_images, batch_size)
)
subdirs = {}
for img_subdir in pool.imap_unordered(self._get_subdir_for_frame, new_iter):
for img, subdir in img_subdir:
subdirs[img] = subdir
# print(f'Got {len(subdirs.keys())} subdirs...')
new_iter = (
[x, self.frames.all_images, subdirs]
for x in self._batch(self.distance_list, batch_size)
)
new_distance_list = []
# NOTE: This was actually slower using multiprocessing pools, likely because too much was being serialized.
# Reconsider if required, but this approach now is fairly efficient due to precalculations.
for work in new_iter:
partial_dlist = self._same_subdir_fun(work)
new_distance_list.extend(partial_dlist)
self.distance_list = new_distance_list
return self
def filter_maximum_distance(self, max_distance):
"""Filters all neighbours above the specified maximum distance
Args:
max_distance ([int]): The maximum distance as a number
Returns:
self
"""
# TODO: Use slices instead, saves memory?
new_dlist = []
for d, i in self.distance_list:
new_d = []
new_i = []
for _d, _i in zip(d, i):
if _d > max_distance:
break
new_d.append(_d)
new_i.append(_i)
if len(new_d) > 1:
new_dlist.append((new_d, new_i))
self.distance_list = new_dlist
return self
@staticmethod
def _get_subdir_for_frame(args):
images, frame_directory = args
# TODO: THIS IS NOT CORRECT; FIX! THIS REFERENCES A CLASS METHOD
return [
(img, FrameExtractor.get_subdir_for_frame_static(img, frame_directory))
for img in images
]
# as per https://stackoverflow.com/a/8290508
@staticmethod
def _batch(iterable, batch_size=1):
iter_length = len(iterable)
for ndx in range(0, iter_length, batch_size):
yield iterable[ndx : min(ndx + batch_size, iter_length)]
@staticmethod
def _same_subdir_fun(args):
"""Function for calculating if neighoburs are from the same subdirectory.
This is used by the pool function to allow multiprocessing and should not be directly called!
"""
d_is, all_images, subdirs = args
new_dlist = []
for d, i in d_is:
ref_subdirectory = subdirs[all_images[i[0]]]
if ref_subdirectory is None:
continue
# always add reference first (as it will be removed at it has the same subdir as itself)
cleaned_di = list([(d[0], i[0])])
for _d, _i in zip(d, i):
if subdirs[all_images[_i]] != ref_subdirectory:
cleaned_di.append((_d, _i))
if len(cleaned_di) < 2:
continue
new_di = ([x[0] for x in cleaned_di], [x[1] for x in cleaned_di])
new_dlist.append(new_di)
# Convert to correct form of one tuple of two lists
return new_dlist
def filter_neighbours(self, filter_list: list, create_copy=True):
"""Applies the specified functions to filter the list of self.
Args:
filter_list (list): A list of the filters, essentially separate functions
create_copy (bool, optional): Whether or not to create a copy of the neighbour list.
Returns:
The filtered list
"""
if filter_list is None:
return self
if create_copy:
return_copy = self.copy()
else:
return_copy = self
for filter in filter_list:
return_copy = filter(return_copy)
return return_copy
def _find_distance_between_two_frames(self, frame_1, frame_2):
dlist = self.distance_list
f1_d, f1_i = next(
iter([(d, i) for d, i in dlist if i[0] == frame_1]), (None, None)
)
f2_d, f2_i = next(
iter([(d, i) for d, i in dlist if i[0] == frame_2]), (None, None)
)
# Consider if making the distance negative makes sense if it isn't found...
if f1_d is None or f2_d is None:
return (-1, -1)
f1_dist = -1
f2_dist = -1
for d, i in zip(f1_d, f1_i):
if i == frame_2:
f1_dist = d
break
for d, i in zip(f2_d, f2_i):
if i == frame_1:
f2_dist = d
break
return (f1_dist, f2_dist)
def get_top_frames(self, number_to_return=20) -> pd.DataFrame:
"""Get the top frames ith most remaining matching self.
The information includes: Video name, timestamp, and number of self.
Args:
number_to_return (int, optional): [description]. Defaults to 20.
Returns:
DataFrame: A dataframe with the results
"""
dlist = self.distance_list
frames = self.frames
index_and_count = [(idx, len(i) - 1) for idx, (d, i) in enumerate(dlist)]
sort = sorted(index_and_count, key=lambda idx_count: idx_count[1], reverse=True)
to_return = sort[0:number_to_return]
return_list = []
for idx, count in to_return:
vid, start_time = frames.get_video_and_start_time_from_index(idx)
return_list.append(
(vid, start_time, self._secs_to_timestamp(start_time), count)
)
df = pd.DataFrame(
return_list,
columns=[
"Video name",
"Frame (S)",
"Frame (HH:MM:SS)",
"Remaining neighbours",
],
)
df.index += 1
# df = df.style.set_properties(**{'text-align': 'left'})
return df
@staticmethod
def _secs_to_timestamp(secs):
return time.strftime("%H:%M:%S", time.gmtime(secs))
def get_frames_with_closest_distance(self, number_to_return=10):
"""Get frames with closest distances to other frames.
This automatically removed suplicates, i.e if frame1:frame2 and frame2:frame1.
Args:
number_to_return (int, optional): [description]. Defaults to 10.
Returns:
[type]: [description]
"""
frames = self.frames
dlist = self.distance_list
return_list = []
for d, i in dlist:
ref_index = i[0]
return_list.extend([(ref_index, dd, ii) for dd, ii in zip(d[1:], i[1:])])
return_list = sorted(return_list, key=lambda x: x[1], reverse=False)
# Add check to ensure vid1:vid2 and vid2:vid1 are not present at the same time
indexes_to_remove = []
last_distance = -1
for list_index, (vid1_idx, dist, vid2_idx) in enumerate(return_list):
if dist == last_distance:
last_vid1_idx, _, last_vid2_idx = return_list[list_index - 1]
if last_vid2_idx == vid1_idx and last_vid1_idx == vid2_idx:
indexes_to_remove.append(list_index)
last_distance = dist
return_list = [
x for i, x in enumerate(return_list) if i not in indexes_to_remove
]
if len(return_list) > number_to_return:
return_list = return_list[0:number_to_return]
columns = [
"Video 1 name",
"Video 1 frame (S)",
"Video 1 frame (HH:MM:SS)",
"Video 2 name",
"Video 2 frame (S)",
"Video 1 frame (HH:MM:SS)",
"Distance",
]
df_list = []
for v1_index, distance, v2_index in return_list:
vid1, frame_start1 = frames.get_video_and_start_time_from_index(v1_index)
vid2, frame_start2 = frames.get_video_and_start_time_from_index(v2_index)
ts1 = self._secs_to_timestamp(frame_start1)
ts2 = self._secs_to_timestamp(frame_start2)
df_list.append(
{
k: v
for k, v in zip(
columns,
[vid1, frame_start1, ts1, vid2, frame_start2, ts2, distance],
)
}
)
df = pd.DataFrame(df_list, columns=columns)
df.index += 1
# df_styler = df.style.set_properties(**{'text-align': 'left'})
# df_styler.set_table_styles([dict(selector='th', props=[('text-align', 'left')])])
return df
# class BuiltInFilters:
# def __only_from_video(self: Neighbours, video_name):
# distances, indexes = self.distance_list
# allowed_indexes = set(self.frames.get_index_from_video_name(video_name))
# new_distance_list = []
# # I consider this more readable than the list comprehension variant...
# for _d, _i in zip(distances, indexes):
# if _i[0] in allowed_indexes:
# new_distance_list.append((_d, _i))
# self.distance_list = new_distance_list
# return self
# def only_from_video(video_name):
# return lambda self: BuiltInFilters.__only_from_video(self, video_name)
class FilterChange:
"""Show changes in statistics compared to the initial state.
Returns:
[type]: [description]
"""
orig_frame_count: int
curr_frame_count: int
reference_distance_list: list
def __init__(self, neighbours: Neighbours) -> None:
self.orig_frame_count = len(neighbours.frames.all_images)
self.curr_frame_count = len(neighbours.distance_list)
self.reference_distance_list = deepcopy(neighbours.distance_list)
def per_video_comparison(self, neighbours_new: Neighbours) -> pd.DataFrame:
"""Compares the initial state to the new state.
Used to shown what impact a filtering step had.
This function provide this information per step:
Video name, total frames, lost frames, remaining frames, Average number of self before, Average number of self after
Args:
neighbours_new ([Neighbours]): The new self state, generarally after filtering
Returns: A pandas dataframe containing the statistics.
"""
df = pd.DataFrame(
columns=[
"Video name",
"Total frames",
"Lost frames",
"Remaining frames",
"Avg. self before",
"Avg. self after",
]
)
for video, video_indexes in neighbours_new.frames.cached_video_index.items():
ref_dlist = [
(d, i) for d, i in self.reference_distance_list if i[0] in video_indexes
]
new_dlist = [
(d, i) for d, i in neighbours_new.distance_list if i[0] in video_indexes
]
avg_neighbours_ref = 0
avg_neighbours_new = 0
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
avg_neighbours_ref = np.mean([len(i) - 1 for d, i in ref_dlist])
avg_neighbours_new = np.mean([len(i) - 1 for d, i in new_dlist])
total_frames = len(video_indexes)
lost_frames = len(ref_dlist) - len(new_dlist)
df = df.append(
{
"Video name": video,
"Total frames": total_frames,
"Lost frames": lost_frames,
"Remaining frames": len(new_dlist),
"Avg. self before": avg_neighbours_ref,
"Avg. self after": avg_neighbours_new,
},
ignore_index=True,
)
return df
def changes_in_short(self, neighbours) -> pd.DataFrame:
"""
Total frames:
Remaining Frames (before)
Remaining Frames (after)
Lost frames
Average neighbours (before)
Average neighbours (after)
Removed percentage
Args:
neighbours ([type]): [description]
Returns:
pd.DataFrame: [description]
"""
df = pd.DataFrame(columns=["Before this filter", "After this filter"])
def create_row(vals, name):
return pd.Series({k: v for k, v in zip(df.columns, vals)}, name=name)
remaining_before = self.curr_frame_count
remaining_after = len(neighbours.distance_list)
df = df.append(
create_row([remaining_before, remaining_after], "Remaining frames")
)
lost_frames = remaining_before - remaining_after
df = df.append(create_row([np.NaN, lost_frames], "Lost frames (number)"))
removed_percentage = (lost_frames / remaining_before) * 100
df = df.append(
create_row([np.NaN, removed_percentage], "Lost frames (percentage)")
)
avg_neighbours_before = np.sum(
[len(i) - 1 for d, i in self.reference_distance_list]
) / len(self.reference_distance_list)
avg_neighbours_after = np.sum(
[len(i) - 1 for d, i in neighbours.distance_list]
) / len(neighbours.distance_list)
df = df.append(
create_row(
[avg_neighbours_before, avg_neighbours_after],
"Average no. of neighbours",
)
)
# Calculate average distances
def get_average_distance(dlist):
distances = [(len(d) - 1, np.sum(d[1:])) for d, i in dlist]
if len(distances) == 0:
return -1
tot_count, tot_distance = functools.reduce(
lambda d1, d2: (d1[0] + d2[0], d1[1] + d2[1]), distances
)
return tot_distance / tot_count
avg_distance_before = get_average_distance(self.reference_distance_list)
avg_distance_after = get_average_distance(neighbours.distance_list)
df = df.append(
create_row(
[
avg_distance_before,
avg_distance_after if avg_distance_after > -1 else "-",
],
"Average distance metrics",
)
)
df[df.columns[0]] = df[df.columns[0]].apply(lambda v: np.round(v, 2))
df = df.fillna("-")
return df
def overall_comparison(self, neighbours_new: Neighbours) -> pd.DataFrame:
"""Compares two neighbours to see how much has been changed."""
df = | pd.DataFrame(columns=["Total frames", "Lost frames", "Remaining frames"]) | pandas.DataFrame |
import pandas as pd
from sklearn.metrics.pairwise import haversine_distances
from math import radians, degrees
from decouple import config
import numpy as np
from datetime import datetime, timedelta
from models.reoptimization_config import *
from main_config import *
# Sets
num_nodes = 2 * n
num_nodes_and_depots = (
2 * num_vehicles + 2 * n
) # num_vehicles is fetched from reopt config
# Costs and penalties
C_D = 1 # per vehicle
C_F = 60
C_T = 60
# Capacity per vehicle
Q_S = 5
Q_W = 1
# Allowed excess ride time
F = 0.5
# Weighting of Operational Costs vs Quality of Service
alpha = 0.5
# Different parameters per node
df = pd.read_csv(initial_events_path, nrows=n)
# Load for each request
L_S = df["Number of Passengers"].tolist()
L_W = df["Wheelchair"].tolist()
# Lat and lon for each request
origin_lat_lon = list(zip(np.deg2rad(df["Origin Lat"]), np.deg2rad(df["Origin Lng"])))
destination_lat_lon = list(
zip(np.deg2rad(df["Destination Lat"]), np.deg2rad(df["Destination Lng"]))
)
request_lat_lon = origin_lat_lon + destination_lat_lon
# Positions in degrees
origin_lat_lon_deg = list(zip(df["Origin Lat"], df["Origin Lng"]))
destination_lat_lon_deg = list(zip(df["Destination Lat"], df["Destination Lng"]))
request_lat_lon_deg = origin_lat_lon_deg + destination_lat_lon_deg
vehicle_lat_lon = []
vehicle_lat_lon_deg = []
# Origins for each vehicle
for i in range(num_vehicles):
vehicle_lat_lon.append((radians(59.946829115276145), radians(10.779841653639243)))
vehicle_lat_lon_deg.append((59.946829115276145, 10.779841653639243))
# Destinations for each vehicle
for i in range(num_vehicles):
vehicle_lat_lon.append((radians(59.946829115276145), radians(10.779841653639243)))
vehicle_lat_lon_deg.append((59.946829115276145, 10.779841653639243))
# Positions
lat_lon = request_lat_lon + vehicle_lat_lon
Position = request_lat_lon_deg + vehicle_lat_lon_deg
# Distance matrix
D_ij = haversine_distances(lat_lon, lat_lon) * 6371
# Travel time matrix
speed = 40
T_ij = np.empty(shape=(num_nodes_and_depots, num_nodes_and_depots), dtype=timedelta)
for i in range(num_nodes_and_depots):
for j in range(num_nodes_and_depots):
T_ij[i][j] = timedelta(hours=(D_ij[i][j] / speed)).total_seconds() / 3600
# Time windows
T_S_L = pd.to_datetime(df["T_S_L_P"]).tolist() + pd.to_datetime(df["T_S_L_D"]).tolist()
T_S_U = pd.to_datetime(df["T_S_U_P"]).tolist() + pd.to_datetime(df["T_S_U_D"]).tolist()
T_H_L = pd.to_datetime(df["T_H_L_P"]).tolist() + | pd.to_datetime(df["T_H_L_D"]) | pandas.to_datetime |
import pytest
import pandas as pd
from pandas.testing import assert_series_equal
from yeast import Recipe
from yeast.steps import JoinStep, SortStep, RenameColumnsStep
from yeast.errors import YeastValidationError
from tests.data_samples import startrek_starships
from tests.data_samples import startrek_starships_specs
def test_join_on_left_step(startrek_starships, startrek_starships_specs):
"""
Left Join with NA mismmatches
"""
recipe = Recipe([
JoinStep(startrek_starships_specs, by="uid", how="left"),
SortStep('uid')
])
baked_data = recipe.prepare(startrek_starships).bake(startrek_starships)
assert baked_data.shape == (5, 3)
row = pd.Series({'uid': 'NCC-1031', 'name': 'USS Discovery', 'warp': 9.9}, name=0)
assert_series_equal(baked_data.loc[0], row)
row = pd.Series({'uid': 'NX-01', 'name': 'Enterprise', 'warp': None}, name=4)
assert_series_equal(baked_data.loc[4], row)
def test_join_on_inner_step(startrek_starships, startrek_starships_specs):
"""
Inner Join with NA mismmatches
"""
recipe = Recipe([
JoinStep(startrek_starships_specs, by="uid", how="inner"),
SortStep('uid')
])
baked_data = recipe.prepare(startrek_starships).bake(startrek_starships)
assert baked_data.shape == (4, 3)
row = pd.Series({'uid': 'NCC-1031', 'name': 'USS Discovery', 'warp': 9.9}, name=0)
assert_series_equal(baked_data.loc[0], row)
row = pd.Series({'uid': 'NCC-74656', 'name': 'USS Voyager', 'warp': 9.975}, name=3)
assert_series_equal(baked_data.loc[3], row)
def test_join_on_right_step(startrek_starships, startrek_starships_specs):
"""
Right Join with NA mismmatches
"""
recipe = Recipe([
JoinStep(startrek_starships_specs, by="uid", how="right"),
SortStep('uid')
])
baked_data = recipe.prepare(startrek_starships).bake(startrek_starships)
assert baked_data.shape == (4, 3)
row = pd.Series({'uid': 'NCC-1031', 'name': 'USS Discovery', 'warp': 9.9}, name=0)
assert_series_equal(baked_data.loc[0], row)
row = pd.Series({'uid': 'NCC-74656', 'name': 'USS Voyager', 'warp': 9.975}, name=3)
assert_series_equal(baked_data.loc[3], row)
def test_join_on_fullouter_step(startrek_starships, startrek_starships_specs):
"""
Full outer Join with NA mismmatches
"""
recipe = Recipe([
JoinStep(startrek_starships_specs, by="uid", how="full"),
SortStep('uid')
])
baked_data = recipe.prepare(startrek_starships).bake(startrek_starships)
assert baked_data.shape == (5, 3)
row = pd.Series({'uid': 'NCC-1031', 'name': 'USS Discovery', 'warp': 9.9}, name=0)
assert_series_equal(baked_data.loc[0], row)
row = pd.Series({'uid': 'NX-01', 'name': 'Enterprise', 'warp': None}, name=4)
| assert_series_equal(baked_data.loc[4], row) | pandas.testing.assert_series_equal |
"""Module containing local database functions."""
import os
from pathlib import Path
import shutil
import math
import zipfile
from datetime import datetime
from typing import List
from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor
import requests
import pandas as pd
import numpy as np
from . import config as c
URL_DFP = "http://dados.cvm.gov.br/dados/CIA_ABERTA/DOC/DFP/DADOS/"
URL_ITR = "http://dados.cvm.gov.br/dados/CIA_ABERTA/DOC/ITR/DADOS/"
RAW_DIR = c.DATA_PATH / "raw"
PROCESSED_DIR = c.DATA_PATH / "processed"
def list_urls() -> List[str]:
"""Update the CVM Portal file base.
Urls with CVM raw files:
http://dados.cvm.gov.br/dados/CIA_ABERTA/DOC/DFP/DADOS/
http://dados.cvm.gov.br/dados/CIA_ABERTA/DOC/ITR/DADOS/
Links example:
http://dados.cvm.gov.br/dados/CIA_ABERTA/DOC/DFP/DADOS/dfp_cia_aberta_2020.zip
http://dados.cvm.gov.br/dados/CIA_ABERTA/DOC/ITR/DADOS/itr_cia_aberta_2020.zip
Throughout 2021, there are already DFs for the year 2022 because the
company's social calendar may not necessarily coincide with the official
calendar. Because of this, 2 is added to the current year (the second limit
of the range function is exclusive)
"""
first_year = 2010 # First year avaible at CVM Portal.
# Next year files will appear during current year.
last_year = pd.Timestamp.now().year + 1
years = list(range(first_year, last_year + 1))
first_year_itr = last_year - 3
urls = []
for year in years:
filename = f"dfp_cia_aberta_{year}.zip"
url = f"{URL_DFP}{filename}"
urls.append(url)
if year >= first_year_itr:
filename = f"itr_cia_aberta_{year}.zip"
url = f"{URL_ITR}{filename}"
urls.append(url)
return urls
def update_raw_file(url: str) -> Path:
"""Update file from CVM portal. Return a Path object if file is updated."""
raw_path = Path(RAW_DIR, url[-23:]) # filename = url final
with requests.Session() as s:
r = s.get(url, stream=True)
if r.status_code != requests.codes.ok:
return None
if Path.exists(raw_path):
local_file_size = raw_path.stat().st_size
else:
local_file_size = 0
url_file_size = int(r.headers["Content-Length"])
if local_file_size == url_file_size:
return None
with raw_path.open(mode="wb") as f:
f.write(r.content)
# Timestamps
ts_gmt = | pd.to_datetime(r.headers["Last-Modified"]) | pandas.to_datetime |
import pandas as pd
import numpy as np
import os
from sneratio.src.lib import info
from sneratio.src.lib import utils
class Data:
Ia_table = None
cc_table = None
solar_table = None
mass_number_table = None
input_data = None
elements = None
ref_element = None
ref_row_index = None
cc_table_integral_steps = 250
merged_table = None
@staticmethod
def read_data(path, with_header=True, sep="\s+"):
if not os.path.exists(path):
raise FileNotFoundError("{}".format(path))
if with_header is True:
data = pd.read_csv(path, sep=sep)
else:
data = pd.read_csv(path, sep=sep, header=None)
return data
@staticmethod
def read_Ia_table():
Ia_table_path = info.get_selected_option_path("Ia_table")
Data.Ia_table = Data.read_data(Ia_table_path)
@staticmethod
def read_cc_table():
cc_table_path = info.get_selected_option_path("cc_table")
Data.cc_table = Data.read_data(cc_table_path)
@staticmethod
def read_solar_table():
solar_table_path = info.get_selected_option_path("solar_table")
Data.solar_table = Data.read_data(solar_table_path)
@staticmethod
def read_mass_number_table():
mass_number_table_path = info.get_selected_option_path("mass_number_table")
Data.mass_number_table = Data.read_data(mass_number_table_path)
@staticmethod
def read_input_data():
input_data_path = info.get_selected_option_path("input_data")
Data.input_data = Data.read_data(input_data_path)
@staticmethod
def read_all_data():
Data.read_Ia_table()
Data.read_cc_table()
Data.read_solar_table()
Data.read_mass_number_table()
Data.set_input_data()
@staticmethod
def print_all_info():
print(Data.Ia_table.info())
print(Data.cc_table.info())
print(Data.solar_table.info())
print(Data.mass_number_table.info())
print(Data.input_data.info())
@staticmethod
def set_input_data():
element_list = info.elements_dict["element"]
abund_list = [float(a) for a in info.elements_dict["abund"]]
abund_error_list = [float(a) for a in info.elements_dict["abund_err"]]
columns = ["Element", "Abund", "AbundError"]
Data.input_data = pd.DataFrame([*zip(element_list, abund_list, abund_error_list)], columns=columns)
@classmethod
def set_elements_from_input_data(cls):
if cls.input_data is not None:
cls.elements = cls.input_data.iloc[:, 0].unique()
else:
raise ValueError(f"Data.input_data is None!")
@classmethod
def set_ref_element(cls):
cls.ref_element = info.get_selected_option("ref_element")
@classmethod
def set_ref_row_index(cls):
try:
cls.ref_row_index = np.where(cls.elements == cls.ref_element)[0][0]
except:
raise ValueError
@classmethod
def initialise_merged_table(cls):
cls.merged_table = pd.DataFrame(cls.elements, columns=["Element"])
@classmethod
def normalise_input_data(cls):
ref_element = info.get_selected_option("ref_element")
if ref_element != "H":
ref_row = cls.input_data["Element"] == ref_element
cls.ref_row_index = list(ref_row).index(True)
ref_value_ratio = cls.input_data[ref_row]["Abund"]
ref_value_ratio_err = cls.input_data[ref_row]["AbundError"]
if float(ref_value_ratio) == 1.0:
ref_value_ratio_err = float(ref_value_ratio) * 0.001
if float(ref_value_ratio_err) == 0.0:
ref_value_ratio_err = float(ref_value_ratio) * 0.001
normalised_values = []
for r, r_err in zip(cls.input_data["Abund"], cls.input_data["AbundError"]):
normalised_values.append(utils.division_error(r, r_err, ref_value_ratio, ref_value_ratio_err))
normalised_ratio, normalised_ratio_err = zip(*normalised_values)
cls.merged_table["Abund"] = normalised_ratio
cls.merged_table["AbundError"] = normalised_ratio_err
else:
cls.merged_table["Abund"] = cls.input_data["Abund"]
cls.merged_table["AbundError"] = cls.input_data["AbundError"]
@classmethod
def normalise_solar_table(cls):
ref_element = info.get_selected_option("ref_element")
ref_row = cls.solar_table["Element"] == ref_element
ref_value = cls.solar_table[ref_row]["Solar"]
temp_list = 10.0 ** (cls.solar_table["Solar"] - ref_value.values[0])
column_name = "{}_normalised_solar".format(ref_element)
cls.solar_table[column_name] = temp_list
filt = cls.solar_table["Element"].isin( cls.elements)
cls.merged_table["Solar"] = cls.solar_table[filt][column_name].values
@classmethod
def get_Ia_model(cls):
try:
model = cls.Ia_table.columns[2:].values[0]
return model
except:
raise IndexError(f"Ia model couldn't read!")
@classmethod
def set_Ia_yields(cls):
cls.Ia_yields = | pd.DataFrame() | pandas.DataFrame |
"""
Tests that work on both the Python and C engines but do not have a
specific classification into the other test modules.
"""
import csv
from io import StringIO
from pandas import DataFrame
import pandas._testing as tm
from pandas.io.parsers import TextParser
def test_read_data_list(all_parsers):
parser = all_parsers
kwargs = {"index_col": 0}
data = "A,B,C\nfoo,1,2,3\nbar,4,5,6"
data_list = [["A", "B", "C"], ["foo", "1", "2", "3"], ["bar", "4", "5", "6"]]
expected = parser.read_csv(StringIO(data), **kwargs)
with TextParser(data_list, chunksize=2, **kwargs) as parser:
result = parser.read()
tm.assert_frame_equal(result, expected)
def test_reader_list(all_parsers):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
kwargs = {"index_col": 0}
lines = list(csv.reader(StringIO(data)))
with TextParser(lines, chunksize=2, **kwargs) as reader:
chunks = list(reader)
expected = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(chunks[0], expected[:2])
tm.assert_frame_equal(chunks[1], expected[2:4])
tm.assert_frame_equal(chunks[2], expected[4:])
def test_reader_list_skiprows(all_parsers):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
kwargs = {"index_col": 0}
lines = list(csv.reader(StringIO(data)))
with TextParser(lines, chunksize=2, skiprows=[1], **kwargs) as reader:
chunks = list(reader)
expected = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(chunks[0], expected[1:3])
def test_read_csv_parse_simple_list(all_parsers):
parser = all_parsers
data = """foo
bar baz
qux foo
foo
bar"""
result = parser.read_csv(StringIO(data), header=None)
expected = | DataFrame(["foo", "bar baz", "qux foo", "foo", "bar"]) | pandas.DataFrame |
# Copyright 2019 Elasticsearch BV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# File called _pytest for PyCharm compatability
import numpy as np
import pandas as pd
import pytest
from pandas.util.testing import assert_almost_equal
from eland.tests.common import TestData
class TestSeriesFrameHist(TestData):
def test_flight_delay_min_hist(self):
pd_flights = self.pd_flights()
ed_flights = self.ed_flights()
num_bins = 10
# pandas data
pd_flightdelaymin = np.histogram(pd_flights["FlightDelayMin"], num_bins)
pd_bins = | pd.DataFrame({"FlightDelayMin": pd_flightdelaymin[1]}) | pandas.DataFrame |
import unittest
import saspy
import pandas as pd
import tempfile
import os
TEST_DATA = """
data testdata;
format d1 date. dt1 datetime.;
d1 = '03Jan1966'd; dt1 = '03Jan1966:13:30:59.000123'dt; output;
d1 = '03Jan1967'd; dt1 = '03Jan1966:13:30:59.990123'dt; output;
d1 = '03Jan1968'd; dt1 = '03Jan1966:13:30:59'dt; output;
d1 = '03Nov1966'd; dt1 = '03Jan1966:13:30:00.000126'dt; output;
d1 = '04Jan1966'd; dt1 = '03Jan1966:13:30:59'dt; output;
run;
"""
class TestPandasDataFrameIntegration(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.sas = saspy.SASsession()
cls.sas.set_batch(True)
cls.sas.submit(TEST_DATA)
cls.test_data = cls.sas.sasdata('testdata', results='text')
@classmethod
def tearDownClass(cls):
cls.sas._endsas()
def test_pandas_sd2df_instance(self):
"""
Test method sasdata2dataframe returns a pandas.DataFrame
"""
df = self.test_data.to_df()
self.assertIsInstance(df, pd.DataFrame)
def test_pandas_sd2df_values(self):
"""
Test method sasdata2dataframe returns a pandas.DataFrame containing
the correct data.
"""
EXPECTED = ['0', '1966-01-03', '1966-01-03', '13:30:59.000123']
df = self.test_data.to_df()
result = df.head()
# FIXME: May be more robust to compare actual data structures.
rows = result.to_string().splitlines()
retrieved = [x.split() for x in rows]
self.assertIn(EXPECTED, retrieved, msg="df.head() result didn't contain row 1")
def test_pandas_df2sd_instance(self):
"""
Test method dataframe2sasdata properly writes.
"""
df = self.test_data.to_df()
td2 = self.sas.df2sd(df, 'td2', results='text')
self.assertIsInstance(td2, saspy.sasdata.SASdata)
def test_pandas_df2sd_values(self):
"""
Test method dataframe2sasdata properly writes the correct values.
"""
EXPECTED = ['1', '1966-01-03T00:00:00.000000', '1966-01-03T13:30:59.000123']
df = self.test_data.to_df()
td2 = self.sas.df2sd(df, 'td2', results='text')
ll = td2.head()
# FIXME: May be more robust to compare actual data structures.
rows = ll['LST'].splitlines()
retrieved = [x.split() for x in rows]
self.assertIn(EXPECTED, retrieved, msg="td2.head() result didn't contain row 1")
def test_pandas_sd2df_csv_instance(self):
"""
Test method sasdata2dataframe using `method=csv` returns a
pandas.DataFrame.
"""
df = self.test_data.to_df_CSV()
self.assertIsInstance(df, pd.DataFrame)
def test_pandas_sd2df_csv_values(self):
"""
Test method sasdata2dataframe using `method=csv` returns a
pandas.DataFrame containing the correct values.
"""
EXPECTED = ['0', '1966-01-03', '1966-01-03', '13:30:59.000123']
df = self.test_data.to_df_CSV()
result = df.head()
# FIXME: May be more robust to compare actual data structures.
rows = result.to_string().splitlines()
retrieved = [x.split() for x in rows]
self.assertIn(EXPECTED, retrieved, msg="df.head() result didn't contain row 1")
def test_pandas_sd2df_csv_tempfile_instance(self):
"""
Test method sasdata2dataframe using `method=csv` and argument
`tempfile=...` returns a pandas.DataFrame.
"""
tmpdir = tempfile.TemporaryDirectory()
tmpcsv = os.path.join(tmpdir.name, 'tomodsx')
df = self.test_data.to_df_CSV(tempfile=tmpcsv)
tmpdir.cleanup()
self.assertIsInstance(df, pd.core.frame.DataFrame)
def test_pandas_sd2df_csv_tempfile_values(self):
"""
Test method sasdata2dataframe using `method=csv` and argument
`tempfile=...` returns a pandas.DataFrame containing the correct
values.
"""
EXPECTED = ['0', '1966-01-03', '1966-01-03', '13:30:59.000123']
tmpdir = tempfile.TemporaryDirectory()
tmpcsv = os.path.join(tmpdir.name, 'tomodsx.csv')
df = self.test_data.to_df_CSV(tempfile=tmpcsv)
result = df.head()
rows = result.to_string().splitlines()
retrieved = [x.split() for x in rows]
tmpdir.cleanup()
self.assertIn(EXPECTED, retrieved, msg="df.head() result didn't contain row 1")
def test_pandas_sd2df_csv_tempfile_tempkeep_true(self):
"""
Test method sasdata2dataframe using `method=csv` and arguments
`tempfile=..., tempkeep=True` retains the temporary CSV file in the
provided location.
"""
tmpdir = tempfile.TemporaryDirectory()
tmpcsv = os.path.join(tmpdir.name, 'tomodsx.csv')
df = self.test_data.to_df_CSV(tempfile=tmpcsv, tempkeep=True)
if self.sas.sascfg.mode == 'IOM':
self.assertTrue(os.path.isfile(tmpcsv))
tmpdir.cleanup()
def test_pandas_sd2df_csv_tempfile_tempkeep_false(self):
"""
Test method sasdata2dataframe using `method=csv` and arguments
`tempfile=..., tempkeep=False` does not retain the temporary CSV file
in the provided location.
"""
tmpdir = tempfile.TemporaryDirectory()
tmpcsv = os.path.join(tmpdir.name, 'tomodsx.csv')
df = self.test_data.to_df_CSV(tempfile=tmpcsv, tempkeep=False)
self.assertFalse(os.path.isfile(tmpcsv))
tmpdir.cleanup()
def test_sd2df_DISK(self):
"""
Test method sasdata2dataframe using `method=disk` and arguments
"""
data = [
[442.5, '"quoted\x01 string"', 'non\t\tquoted string',44.4,'"leading quote string', '"leading"and embed\x0Aded string','''"all' "over' 'the "place"''',0],
[132.5, '"quoted\x02 string"', 'non quoted string', 41.4,'"leading quote string', '"leading"and embed\x0Dded string','''"all' "over' 'the "place"''',20.7],
[242.5, '"quoted\x03 string"', 'non quoted string', 42.4,'"leading\t\t quote string','"leading"and embed\x0Aded string','''"all' "over' 'the "place"''',20.8],
[342.5, '"quoted\x02 string"', '', 43.4,'"leading quote string', '"leading"and embed\x0Dded string','''"all' "over' 'the "place"''',10.9],
[342.5, "'quoted\x01 string'", 'non quoted string', 43.4,'''"leading'quote string''','"leading"and embed\x0Adedstring"','''"all' "over' 'the "place"''',10.9],
]
df = pd.DataFrame(data)
sd = self.sas.df2sd(df, 'quotes')
df2 = sd.to_df()
df3 = sd.to_df_DISK()
self.assertTrue(df2.shape == (5, 8))
self.assertTrue(df3.shape == (5, 8))
self.assertFalse(False in (df2 == df3))
cars = self.sas.sasdata('cars','sashelp', results='text')
df = cars.to_df_DISK(colsep='A', rowsep='E', colrep='"', rowrep='?')
self.assertTrue(df.shape == (428, 15))
class TestPandasValidVarname(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.sas = saspy.SASsession()
test_dict = {'Salary 2018': [1],
'2019_Salary $(USD)': [1],
'Really_Long_Variable_Name_To_Shorten': [1],
'Really Long Variable Name To Shorten': [1]}
duplicate_dict = {'My String!abc' : [0], 'My String@abc' : [1],
'My String#abc' : [2], 'My String$abc' : [3],
'My String%abc': [4], 'My String^abc' : [5],
'My String&abc' :[6], 'My String*abc' : [7],
'My String(abc' :[8], 'My String)abc' :[9]}
cls.duplicate_data = | pd.DataFrame.from_dict(duplicate_dict) | pandas.DataFrame.from_dict |
import pandas as pd
import numpy as np
from keras.callbacks import EarlyStopping
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from scipy.special import digamma
from numpy import linalg as LA
from feature_based.multiclass_opencrowd.nn_em import nn_em
from sklearn.metrics import roc_auc_score
from sklearn.metrics import classification_report
from matplotlib import pyplot as plt
import random
from feature_based.multiclass_opencrowd import arguments
NUMBER_OF_LABELS = 0
LABEL_INDEX = []
def init_probabilities(n_infls):
# initialize probability z_i (item's quality) randomly
qz = (1.0/NUMBER_OF_LABELS) * np.ones((n_infls, NUMBER_OF_LABELS))
# initialize probability alpha beta (worker's reliability)
A = 2
B = 2
return qz, A, B
def init_alpha_beta(A, B, n_workers):
alpha = np.zeros((n_workers, 1),dtype='float32')
beta = np.zeros((n_workers, 1),dtype='float32')
for w in range(0, n_workers):
alpha[w] = A
beta[w] = B
return alpha, beta
def e_step(y_train, n_workers, q_z_i, annotation_matrix, alpha, beta, theta_i,true_labels,new_order,y_val,start_val,end_val,new_alpha_value,max_it=20):
old_q_z_i = theta_i.copy()
old_alpha = alpha.copy()
old_beta = beta.copy()
diff = []
train_acc = []
y_val_label = np.argmax(y_val,axis=1)
for it in range(max_it):
# update q(z)
for infl in new_order.tolist():
index_infl = np.where(new_order == infl)[0][0]
assert infl == index_infl
updated_q_z_i = theta_i[index_infl].copy()
infl_aij = annotation_matrix[annotation_matrix[:, 1] == infl].copy()
worker_answers = infl_aij[~np.all(infl_aij[:,2:] == 0, axis=1)]
worker_n_answers = infl_aij[np.all(infl_aij[:,2:] == 0, axis=1)]
T_i = worker_answers[:, 0]
for worker in T_i.astype(int):
w_answer = worker_answers[worker_answers[:, 0] == worker][:, 2:]
w_answer_i = np.where(w_answer[0] == 1)[0][0]
alpha_val = alpha[worker]
beta_val = beta[worker]
updated_q_z_i[w_answer_i] = updated_q_z_i[w_answer_i] * np.exp(digamma(alpha_val) - digamma(alpha_val + beta_val))
for no_answer_i in np.delete(LABEL_INDEX,w_answer_i):
updated_q_z_i[no_answer_i] = updated_q_z_i[no_answer_i] * np.exp(digamma(beta_val) - digamma(alpha_val + beta_val))
T_i_n = worker_n_answers[:, 0]
for worker in T_i_n.astype(int):
alpha_val = alpha[worker]
beta_val = beta[worker]
for no_answer_i in LABEL_INDEX:
updated_q_z_i[no_answer_i] = updated_q_z_i[no_answer_i] * np.exp(digamma(beta_val) - digamma(alpha_val + beta_val))
# normalize
new_q_z_i = updated_q_z_i * 1.0 / (updated_q_z_i.sum())
q_z_i[index_infl] = new_q_z_i.copy()
q_z_i = np.concatenate((y_train, q_z_i[y_train.shape[0]:]))
# update q(r)
new_alpha = np.zeros((n_workers, 1))
new_beta = np.zeros((n_workers, 1))
for worker in range(0, n_workers):
new_alpha[worker] = alpha[worker]
new_beta[worker] = beta[worker]
for worker in range(0, n_workers):
worker_aij = annotation_matrix[annotation_matrix[:, 0] == worker].copy()
T_j = worker_aij[~np.all(worker_aij[:,2:] == 0, axis=1)]
T_j_n = worker_aij[np.all(worker_aij[:,2:] == 0, axis=1)]
for infl in T_j[:, 1].astype(int):
index_infl = np.where(new_order == infl)[0][0]
assert infl == index_infl
worker_answer = T_j[T_j[:, 1] == infl][:, 2:]
worker_answer_i = np.where(worker_answer[0] == 1)[0][0]
new_alpha[worker] += q_z_i[index_infl][worker_answer_i]
new_beta[worker] += 1 - q_z_i[index_infl][worker_answer_i]
for infl in T_j_n[:, 1].astype(int):
new_alpha[worker] += new_alpha_value
new_beta[worker] += 1 - new_alpha_value
for worker in range(0, n_workers):
alpha[worker] = new_alpha[worker]
beta[worker] = new_beta[worker]
q_z_i_change = LA.norm(old_q_z_i - q_z_i)
# da = LA.norm(old_alpha - alpha)
# db = LA.norm(old_beta - beta)
old_q_z_i = q_z_i.copy()
old_alpha = alpha.copy()
old_beta = beta.copy()
q_z_i_val_label = np.argmax(q_z_i[start_val:end_val],axis=1)
q_z_i_acc = accuracy_score(y_val_label,q_z_i_val_label)
diff.append(q_z_i_change)
train_acc.append(q_z_i_acc)
print(it, q_z_i_change)
if q_z_i_change < 0.1:
break
return q_z_i,alpha,beta
def m_step(nn_em,q_z_i, classifier, social_features, total_epochs, steps, y_test, y_val,X_val,start_val,alpha, beta):
theta_i, classifier, weights = nn_em.train_m_step_early_stopp(classifier, social_features,
q_z_i,
steps, total_epochs, y_test, y_val,X_val,start_val)
return theta_i,classifier
def var_em(nn_em_in, n_infls_label,aij_s,new_order, n_workers, social_features_labeled, true_labels, supervision_rate, \
column_names, n_neurons, m_feats, weights_before_em,weights_after_em,iterr,total_epochs,evaluation_file,theta_file,steps,new_alpha_value,multiple_input,tweet2vec_dim):
n_infls = n_infls_label
q_z_i, A, B = init_probabilities(n_infls)
alpha, beta = init_alpha_beta(A, B, n_workers)
X_train, X_test, y_train, y_test = train_test_split(social_features_labeled, true_labels,
test_size=(1 - supervision_rate), shuffle=False)
X_val, X_test, y_val, y_test = train_test_split(X_test, y_test, test_size=0.5, shuffle=False)
social_features = social_features_labeled
start_val = X_train.shape[0]
end_val = X_train.shape[0] + X_val.shape[0]
n_stat_feats = m_feats - tweet2vec_dim
if multiple_input:
n_neurons = int((NUMBER_OF_LABELS + n_stat_feats)/2)
classifier = nn_em_in.create_multiple_input_model_mlp(n_neurons,(n_stat_feats,),(tweet2vec_dim,),NUMBER_OF_LABELS)
# classifier = nn_em_in.create_multiple_input_model(n_neurons,(n_stat_feats,),(tweet2vec_dim,1),NUMBER_OF_LABELS)
X_train = [X_train[:,:n_stat_feats],X_train[:,n_stat_feats:]]
X_val = [X_val[:,:n_stat_feats],X_val[:,n_stat_feats:]]
X_test = [X_test[:,:n_stat_feats],X_test[:,n_stat_feats:]]
social_features = [social_features[:,:n_stat_feats],social_features[:,n_stat_feats:]]
# X_train = [X_train[:,:n_stat_feats],X_train[:,n_stat_feats:].reshape(X_train[:,n_stat_feats:].shape[0], X_train[:,n_stat_feats:].shape[1], 1)]
# X_val = [X_val[:,:n_stat_feats],X_val[:,n_stat_feats:].reshape(X_val[:,n_stat_feats:].shape[0], X_val[:,n_stat_feats:].shape[1], 1)]
# X_test = [X_test[:,:n_stat_feats],X_test[:,n_stat_feats:].reshape(X_test[:,n_stat_feats:].shape[0], X_test[:,n_stat_feats:].shape[1], 1)]
# social_features = [social_features[:,:n_stat_feats],social_features[:,n_stat_feats:].reshape(social_features[:,n_stat_feats:].shape[0], social_features[:,n_stat_feats:].shape[1], 1)]
else:
n_neurons = int((NUMBER_OF_LABELS + m_feats)/2)
classifier = nn_em_in.define_multiclass_nn(n_neurons,m_feats,NUMBER_OF_LABELS)
print(classifier.summary())
steps_it0 = 0
epsilon = 1e-4
theta_i = q_z_i.copy()
old_theta_i = np.zeros((n_infls, NUMBER_OF_LABELS))
y_val_label = np.argmax(y_val,axis=1)
y_test_label = np.argmax(y_test,axis=1)
y_train_label = np.argmax(y_train,axis=1)
monitor = EarlyStopping(monitor='val_loss', min_delta=1e-3, patience=10,
verbose=0, mode='auto', restore_best_weights=True)
classifier.fit(X_train, y_train, validation_data=(X_val,y_val), callbacks=[monitor], verbose=2, epochs=100, batch_size=4)
theta_i_val = classifier.predict(X_val)
theta_i_test = classifier.predict(X_test)
theta_i_val_label = np.argmax(theta_i_val,axis=1)
theta_i_test_label = np.argmax(theta_i_test,axis=1)
weights = classifier.get_weights()
# pd.DataFrame(np.concatenate((column_names[1:], weights[0]), axis=1)).to_csv(weights_before_em, encoding="utf-8")
auc_val = roc_auc_score(y_val, theta_i_val,multi_class="ovo",average="macro")
auc_test = roc_auc_score(y_test, theta_i_test,multi_class="ovo",average="macro")
print('Classification Repport for validation set:\n', classification_report(y_val_label, theta_i_val_label))
print('auc_val:', auc_val)
print('Classification Repport for test set:\n', classification_report(y_test_label, theta_i_test_label))
print('auc_test:', auc_test)
theta_i = np.concatenate((y_train, theta_i_val, theta_i_test))
theta_quality = np.concatenate((true_labels, theta_i), axis=1)
pd.DataFrame(theta_quality).to_csv(theta_file, index=False)
accuracy_theta_i_test = []
accuracy_theta_i_val = []
accuracy_q_z_i_test = []
accuracy_q_z_i_val = []
auc_theta_i_test = []
em_step = 0
while em_step < iterr:
# variational E step
q_z_i, alpha, beta = e_step(y_train, n_workers, q_z_i, aij_s, alpha,
beta, theta_i, true_labels,new_order,y_val,start_val,end_val,new_alpha_value)
# variational M step
theta_i, classifier = m_step(nn_em_in, q_z_i, classifier, social_features, total_epochs, steps, y_test, y_val, X_val,
start_val, alpha, beta)
em_step += 1
q_z_i_val_label = np.argmax(q_z_i[start_val:end_val],axis=1)
q_z_i_test_label = np.argmax(q_z_i[end_val:],axis=1)
auc_val = roc_auc_score(y_val, q_z_i[start_val:end_val],multi_class="ovo",average="macro")
auc_test = roc_auc_score(y_test, q_z_i[end_val:],multi_class="ovo",average="macro")
theta_i_val_label = np.argmax(theta_i[start_val:end_val],axis=1)
theta_i_test_label = np.argmax(theta_i[end_val:],axis=1)
auc_val_theta = roc_auc_score(y_val, theta_i[start_val:end_val],multi_class="ovo",average="macro")
auc_test_theta = roc_auc_score(y_test, theta_i[end_val:],multi_class="ovo",average="macro")
accuracy_theta_i_test.append(accuracy_score(y_test_label, theta_i_test_label))
accuracy_theta_i_val.append(accuracy_score(y_val_label, theta_i_val_label))
accuracy_q_z_i_test.append(accuracy_score(y_test_label, q_z_i_test_label))
accuracy_q_z_i_val.append(accuracy_score(y_val_label, q_z_i_val_label))
auc_theta_i_test.append(auc_test_theta)
print('em_step', em_step)
print('Classification Repport for validation set:\n', classification_report(y_val_label, q_z_i_val_label))
print('auc_val:', auc_val)
print('Classification Repport for test set:\n', classification_report(y_test_label, q_z_i_test_label))
print('auc_test:', auc_test)
print('Classification Repport for validation set (theta):\n', classification_report(y_val_label, theta_i_val_label))
print('auc_val_theta:', auc_val_theta)
print('Classification Repport for test set (theta):\n', classification_report(y_test_label, theta_i_test_label))
print('auc_test_theta:', auc_test_theta)
if __name__ == '__main__':
plt.plot(accuracy_theta_i_test, marker='o', label='accuracy_theta_i_test')
plt.plot(accuracy_theta_i_val, marker='o', label='accuracy_theta_i_val')
plt.plot(accuracy_q_z_i_test, marker='o', label='accuracy_q_z_i_test')
plt.plot(accuracy_q_z_i_val, marker='o', label='accuracy_q_z_i_val')
plt.legend()
plt.show()
weights = classifier.get_weights()
# pd.DataFrame(np.concatenate((column_names[1:], weights[0]), axis=1)).to_csv(weights_after_em, encoding="utf-8")
report = pd.DataFrame([accuracy_theta_i_test[-1],auc_theta_i_test[-1],accuracy_theta_i_val[-1],auc_val_theta,accuracy_q_z_i_test[-1],accuracy_q_z_i_val[-1]],index=['accuracy_theta_i_test','auc_theta_i_test','accuracy_theta_i_val','auc_theta_i_val','accuracy_q_z_i_test','accuracy_q_z_i_val']).transpose()
# report = report.describe()
return q_z_i, alpha, beta, theta_i, classifier, report
def run(influencer_file_labeled, annotation_file, labels_file, tweet2vec_file, tweet2vec_dim, theta_file,
evaluation_file, weights_before_em, weights_after_em, total_epochs, n_neurons, steps, supervision_rate,
iterr, sampling_rate, worker_reliability_file, influencer_quality_file, random_sampling,new_alpha_value,multiple_input):
tweet2vec = pd.read_csv(tweet2vec_file)
influencer_labeled = pd.read_csv(influencer_file_labeled, sep=",")
influencer_labeled = pd.merge(influencer_labeled, tweet2vec[['screen_name','tweet2vec']], how='inner', on=['screen_name'])
influencer_labeled = influencer_labeled[influencer_labeled['tweet2vec'].notna()]
labeled_embeddings = []
for index, row in influencer_labeled.iterrows():
labeled_embeddings.append(np.fromstring(row['tweet2vec'][1:-1], dtype=float, sep=' '))
labeled_embeddings = np.array(labeled_embeddings)
influencer_labeled = influencer_labeled.drop(['screen_name','tweet2vec'], axis=1)
column_names = np.array(influencer_labeled.columns).reshape((influencer_labeled.shape[1], 1))
for i in range(0,tweet2vec_dim):
column_names = np.append(column_names, np.array([['vector' + str(i)]]), axis=0)
print(column_names.shape)
annotation_matrix = | pd.read_csv(annotation_file, sep=",",header=None) | pandas.read_csv |
# The xrayvis bokeh app
import os
import numpy as np
import pandas as pd
import requests
import yaml
from tempfile import TemporaryDirectory, NamedTemporaryFile
from base64 import b64decode
import parselmouth
from bokeh_phon.utils import remote_jupyter_proxy_url_callback, set_default_jupyter_url
from bokeh_phon.models.audio_button import AudioButton
from phonlab.array import nonzero_groups
from bokeh.core.query import find
from bokeh.plotting import figure
from bokeh.colors import RGB
from bokeh.models import BoxAnnotation, BoxSelectTool, BoxZoomTool, Button, Circle, \
ColumnDataSource, CrosshairTool, Div, FileInput, HoverTool, LinearColorMapper, \
LogColorMapper, MultiLine, MultiSelect, RadioButtonGroup, Range1d, RangeSlider, \
Select, Slider, Span, Spinner, PanTool, ResetTool, TapTool, \
WheelZoomTool, ZoomInTool, ZoomOutTool
from bokeh.models.widgets import DataTable, NumberFormatter, Panel, Tabs, TableColumn
from bokeh.io import show, output_notebook, push_notebook
from bokeh.layouts import column, gridplot, row
from bokeh.events import MouseMove, SelectionGeometry, Tap
from bokeh.transform import linear_cmap
from bokeh.palettes import Greens, Greys, Greys256, Reds
r_Greens9 = list(reversed(Greens[9]))
r_Greys9 = list(reversed(Greys[9]))
r_Greys256 = list(reversed(Greys256))
r_Reds9 = list(reversed(Reds[9]))
# The remote_jupyter_proxy_url function is required when running on a BinderHub instance.
# Use the set_default_jupyter_url function to set the hostname of your instance after it has
# started. The current value is the most frequent result when launching from mybinder.org.
# Change to another value if running Binder host is not this most common one:
# set_default_jupyter_url('https://datahub.berkeley.edu/')
# Or if you are running locally (on localhost), ignore the previous line and set
# `local_notebook` to True:
# local_notebook = False
output_notebook()
# bad values in .txy files are 1000000 (scaled to 1000)
# TODO:
# when bokeh can handle plots with NaN, use that to filter instead of badval
badval = 1000
params = {
'low_thresh_color': 'white',
'low_thresh_power': 3.5,
'window_size': 5.0,
'spslice_lastx': 0.0,
'downsample_rate': 20000
}
snd = None
wavname = None
dfs = {}
# Info for loading/caching audio files
tempdirobj = TemporaryDirectory()
tempdir = tempdirobj.name
resource_url = 'https://linguistics.berkeley.edu/phonapps/resource/'
manifest_name = 'manifest.yaml'
manifest_key = 'resources'
timesource = ColumnDataSource(
{
'T1x': [], 'T1y': [], 'T2x': [], 'T2y': [],
'T3x': [], 'T3y': [], 'T4x': [], 'T4y': [],
'ULx': [], 'ULy': [], 'LLx': [], 'LLy': [],
'MIx': [], 'MIy': [], 'MMx': [], 'MMy': [],
'color': [], 'sec': []
}
)
allwddf = pd.read_feather('all_words.feather')
allphdf = pd.read_feather('all_phones.feather')
fileopt_demo_dtypes = {
'speaker': 'category', 'wavname': 'category', 'uttid': 'category', 'rep': 'category',
'bytes': np.int16,
'subject': 'category', 'sex': 'category', 'dialect_base_state': 'category', 'dialect_base_city': 'category'
}
fileoptsdf = pd.read_csv('file_opts.csv', dtype=fileopt_demo_dtypes)
demo = pd.merge(
pd.read_csv('speaker_demographics1.csv', dtype=fileopt_demo_dtypes),
| pd.read_csv('speaker_demographics2.csv', dtype=fileopt_demo_dtypes) | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 27 14:23:22 2019
@author: avanetten
"""
import os
import json
import argparse
import pandas as pd
import networkx as nx
# import shapely.wkt
from jsons.config import Config
###############################################################################
def pkl_dir_to_wkt(pkl_dir, output_csv_path='',
weight_keys=['length', 'travel_time_s'],
verbose=False):
"""
Create submission wkt from directory full of graph pickles
"""
wkt_list = []
pkl_list = sorted([z for z in os.listdir(pkl_dir) if z.endswith('.gpickle')])
for i, pkl_name in enumerate(pkl_list):
G = nx.read_gpickle(os.path.join(pkl_dir, pkl_name))
# ensure an undirected graph
print(i, "/", len(pkl_list), "num G.nodes:", len(G.nodes()))
name_root = pkl_name.replace('PS-RGB_', '').replace('PS-MS_', '').split('.')[0]
# AOI_root = 'AOI' + pkl_name.split('AOI')[-1]
# name_root = AOI_root.split('.')[0].replace('PS-RGB_', '')
print("name_root:", name_root)
# if empty, still add to submission
if len(G.nodes()) == 0:
wkt_item_root = [name_root, 'LINESTRING EMPTY']
if len(weight_keys) > 0:
weights = [0 for w in weight_keys]
wkt_list.append(wkt_item_root + weights)
else:
wkt_list.append(wkt_item_root)
# extract geometry pix wkt, save to list
seen_edges = set([])
for i, (u, v, attr_dict) in enumerate(G.edges(data=True)):
# make sure we haven't already seen this edge
if (u, v) in seen_edges or (v, u) in seen_edges:
print(u, v, "already catalogued!")
continue
else:
seen_edges.add((u, v))
seen_edges.add((v, u))
geom_pix_wkt = attr_dict['geometry_pix'].wkt
# check edge lnegth
if attr_dict['length'] > 5000:
print("Edge too long!, u,v,data:", u,v,attr_dict)
return
if verbose:
print(i, "/", len(G.edges()), "u, v:", u, v)
print(" attr_dict:", attr_dict)
print(" geom_pix_wkt:", geom_pix_wkt)
wkt_item_root = [name_root, geom_pix_wkt]
if len(weight_keys) > 0:
weights = [attr_dict[w] for w in weight_keys]
if verbose:
print(" weights:", weights)
wkt_list.append(wkt_item_root + weights)
else:
wkt_list.append(wkt_item_root)
if verbose:
print("wkt_list:", wkt_list)
# create dataframe
if len(weight_keys) > 0:
cols = ['ImageId', 'WKT_Pix'] + weight_keys
else:
cols = ['ImageId', 'WKT_Pix']
# use 'length_m' and 'travel_time_s' instead?
cols_new = []
for z in cols:
if z == 'length':
cols_new.append('length_m')
elif z == 'travel_time':
cols_new.append('travel_time_s')
else:
cols_new.append(z)
cols = cols_new
print("cols:", cols)
df = | pd.DataFrame(wkt_list, columns=cols) | pandas.DataFrame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.