prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import os
import random
from typing import Iterable, Dict, Any
import pandas
from IPython.display import display
from duorat.datasets.spider import SpiderItem, SpiderDataset
from duorat.asdl.lang.spider.spider import SpiderGrammar
from duorat.utils.evaluation import load_from_lines
def show_question(ex):
print(ex.question)
print(ex.query)
def show_question_set(question_set, k=10):
rng = random.Random(1)
if k > len(question_set):
k = len(question_set)
for idx in rng.sample(list(range(len(question_set))), k=k):
print(idx)
show_question(question_set[idx])
print()
def load_outputs(
experiment_path: str, output_file: str, trial_ids: Iterable[int] = None
) -> Dict[int, Any]:
if trial_ids is None:
trial_ids = [int(trial_id) for trial_id in os.listdir(experiment_path)]
all_outputs = {}
for trial_id in trial_ids:
path = f"{experiment_path}/{trial_id}/{output_file}"
with open(path) as src:
all_outputs[trial_id] = list(load_from_lines(list(src)))
return all_outputs
def evaluate_outputs(
dataset: SpiderDataset, all_outputs: Dict[int, Any]
) -> pandas.DataFrame:
columns = ("trial_id", "qid", "exact", "group_acc")
metrics = SpiderDataset.Metrics(dataset)
data = []
for trial_id, outputs in all_outputs.items():
assert len(outputs) == len(dataset)
for qid, (example, predicted) in enumerate(zip(dataset, outputs)):
r = metrics.evaluator.evaluate_one(
example.spider_schema.db_id, example.orig["query"], predicted[0]
)
data.append(
(trial_id, qid, int(r["exact"]), int(r["partial"]["group"]["acc"]))
)
return pandas.DataFrame(data, columns=columns)
def compute_dataset_metadata(
examples: Iterable[SpiderItem], grammar: SpiderGrammar
) -> pandas.DataFrame:
all_metadata = []
for ex in examples:
parse = grammar.parse_sql(ex.spider_sql)
metadata = {
"group_by": "group_by" in parse,
"order_by": "order_by" in parse,
"per": "per" in ex.question.split(),
}
metadata["num_tables"] = len(parse["from"]["table_units"])
# the number of where clauses is easier to infer from the original SPIDER SQL format
metadata["num_where"] = (len(ex.spider_sql["where"]) + 1) // 2
metadata["db_id"] = ex.spider_schema.db_id
def recurse(path, node):
if node["_type"] == "sql":
if path:
metadata["nested"] = True
if "select" in path:
metadata["sql_in_select"] = True
if "where" in path:
metadata["sql_in_where"] = True
elif "from" in path:
metadata["sql_in_from"] = True
elif "having" in path:
metadata["sql_in_having"] = True
elif "except" in path:
metadata["except"] = True
elif "intersect" in path:
metadata["intersect"] = True
elif "union" in path:
metadata["union"] = True
elif path:
metadata["other_nested"] = True
print(path)
if node["_type"] == "Count" and "select" in path:
metadata["count"] = True
if node["_type"] == "Count" and "order_by" in path:
metadata["count_in_order_by"] = True
for key, child in node.items():
if isinstance(child, dict):
recurse(path + [key], child)
elif isinstance(child, list):
for element in child:
recurse(path + [key], element)
recurse([], parse)
all_metadata.append(metadata)
return | pandas.DataFrame(all_metadata) | pandas.DataFrame |
# coding: utf-8
# In[11]:
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# In[12]:
#need to add city after finishing
df_city = pd.read_csv("cityresults.dat", header=None)
df_bayview = pd.read_csv("bayviewresults.dat", header=None)
df_ingleside = pd.read_csv("Ingleside_results.dat", header=None)
df_park = pd.read_csv("Park_results.dat", header=None)
df_taraval = pd.read_csv("Taraval_results.dat", header=None)
df_central = | pd.read_csv("Central_results.dat", header=None) | pandas.read_csv |
import os
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from .. import read_sql
@pytest.fixture(scope="module") # type: ignore
def mssql_url() -> str:
conn = os.environ["MSSQL_URL"]
return conn
@pytest.mark.xfail
def test_on_non_select(mssql_url: str) -> None:
query = "CREATE TABLE non_select(id INTEGER NOT NULL)"
df = read_sql(mssql_url, query)
def test_aggregation(mssql_url: str) -> None:
query = (
"SELECT test_bool, SUM(test_float) as sum FROM test_table GROUP BY test_bool"
)
df = read_sql(mssql_url, query)
expected = pd.DataFrame(
index=range(3),
data={
"test_bool": pd.Series([None, False, True], dtype="boolean"),
"sum": pd.Series([10.9, 5.2, -10.0], dtype="float64"),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_partition_on_aggregation(mssql_url: str) -> None:
query = (
"SELECT test_bool, SUM(test_int) AS test_int FROM test_table GROUP BY test_bool"
)
df = read_sql(mssql_url, query, partition_on="test_int", partition_num=2)
expected = pd.DataFrame(
index=range(3),
data={
"test_bool": pd.Series([None, False, True], dtype="boolean"),
"test_int": pd.Series([4, 5, 1315], dtype="Int64"),
},
)
df.sort_values(by="test_int", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
def test_aggregation2(mssql_url: str) -> None:
query = "select DISTINCT(test_bool) from test_table"
df = read_sql(mssql_url, query)
expected = pd.DataFrame(
index=range(3),
data={
"test_bool": pd.Series([None, False, True], dtype="boolean"),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_partition_on_aggregation2(mssql_url: str) -> None:
query = "select MAX(test_int) as max, MIN(test_int) as min from test_table"
df = read_sql(mssql_url, query, partition_on="max", partition_num=2)
expected = pd.DataFrame(
index=range(1),
data={
"max": pd.Series([1314], dtype="Int64"),
"min": pd.Series([0], dtype="Int64"),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_udf(mssql_url: str) -> None:
query = (
"SELECT dbo.increment(test_int) AS test_int FROM test_table ORDER BY test_int"
)
df = read_sql(mssql_url, query, partition_on="test_int", partition_num=2)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([1, 2, 3, 4, 5, 1315], dtype="Int64"),
},
)
df.sort_values(by="test_int", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
def test_manual_partition(mssql_url: str) -> None:
queries = [
"SELECT * FROM test_table WHERE test_int < 2",
"SELECT * FROM test_table WHERE test_int >= 2",
]
df = read_sql(mssql_url, query=queries)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([0, 1, 2, 3, 4, 1314], dtype="int64"),
"test_nullint": pd.Series([5, 3, None, 7, 9, 2], dtype="Int64"),
"test_str": pd.Series(
["a", "str1", "str2", "b", "c", None], dtype="object"
),
"test_float": pd.Series([3.1, None, 2.2, 3, 7.8, -10], dtype="float64"),
"test_bool": pd.Series(
[None, True, False, False, None, True], dtype="boolean"
),
},
)
df.sort_values(by="test_int", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
def test_mssql_without_partition(mssql_url: str) -> None:
query = "SELECT * FROM test_table"
df = read_sql(mssql_url, query)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([1, 2, 0, 3, 4, 1314], dtype="int64"),
"test_nullint": pd.Series([3, None, 5, 7, 9, 2], dtype="Int64"),
"test_str": pd.Series(
["str1", "str2", "a", "b", "c", None], dtype="object"
),
"test_float": pd.Series([None, 2.2, 3.1, 3, 7.8, -10], dtype="float64"),
"test_bool": pd.Series(
[True, False, None, False, None, True], dtype="boolean"
),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_mssql_limit_without_partition(mssql_url: str) -> None:
query = "SELECT top 3 * FROM test_table"
df = read_sql(mssql_url, query)
expected = pd.DataFrame(
index=range(3),
data={
"test_int": pd.Series([1, 2, 0], dtype="int64"),
"test_nullint": pd.Series([3, None, 5], dtype="Int64"),
"test_str": pd.Series(["str1", "str2", "a"], dtype="object"),
"test_float": pd.Series([None, 2.2, 3.1], dtype="float64"),
"test_bool": pd.Series([True, False, None], dtype="boolean"),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_mssql_limit_large_without_partition(mssql_url: str) -> None:
query = "SELECT top 10 * FROM test_table"
df = read_sql(mssql_url, query)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([1, 2, 0, 3, 4, 1314], dtype="int64"),
"test_nullint": pd.Series([3, None, 5, 7, 9, 2], dtype="Int64"),
"test_str": pd.Series(
["str1", "str2", "a", "b", "c", None], dtype="object"
),
"test_float": pd.Series([None, 2.2, 3.1, 3, 7.8, -10], dtype="float64"),
"test_bool": pd.Series(
[True, False, None, False, None, True], dtype="boolean"
),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_mssql_with_partition(mssql_url: str) -> None:
query = "SELECT * FROM test_table"
df = read_sql(
mssql_url,
query,
partition_on="test_int",
partition_range=(0, 2000),
partition_num=3,
)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([0, 1, 2, 3, 4, 1314], dtype="int64"),
"test_nullint": pd.Series([5, 3, None, 7, 9, 2], dtype="Int64"),
"test_str": pd.Series(
["a", "str1", "str2", "b", "c", None], dtype="object"
),
"test_float": pd.Series([3.1, None, 2.2, 3, 7.8, -10], dtype="float64"),
"test_bool": pd.Series(
[None, True, False, False, None, True], dtype="boolean"
),
},
)
df.sort_values(by="test_int", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
def test_mssql_limit_with_partition(mssql_url: str) -> None:
query = "SELECT top 3 * FROM test_table"
df = read_sql(
mssql_url,
query,
partition_on="test_int",
partition_range=(0, 2000),
partition_num=3,
)
expected = pd.DataFrame(
index=range(3),
data={
"test_int": pd.Series([0, 1, 2], dtype="int64"),
"test_nullint": pd.Series([5, 3, None], dtype="Int64"),
"test_str": pd.Series(["a", "str1", "str2"], dtype="object"),
"test_float": pd.Series([3.1, None, 2.20], dtype="float64"),
"test_bool": pd.Series([None, True, False], dtype="boolean"),
},
)
df.sort_values(by="test_int", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
def test_mssql_limit_large_with_partition(mssql_url: str) -> None:
query = "SELECT top 10 * FROM test_table"
df = read_sql(
mssql_url,
query,
partition_on="test_int",
partition_range=(0, 2000),
partition_num=3,
)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([0, 1, 2, 3, 4, 1314], dtype="int64"),
"test_nullint": pd.Series([5, 3, None, 7, 9, 2], dtype="Int64"),
"test_str": pd.Series(
["a", "str1", "str2", "b", "c", None], dtype="object"
),
"test_float": pd.Series([3.1, None, 2.2, 3, 7.8, -10], dtype="float64"),
"test_bool": pd.Series(
[None, True, False, False, None, True], dtype="boolean"
),
},
)
df.sort_values(by="test_int", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
def test_mssql_with_partition_without_partition_range(mssql_url: str) -> None:
query = "SELECT * FROM test_table where test_float > 3"
df = read_sql(
mssql_url,
query,
partition_on="test_int",
partition_num=3,
)
expected = pd.DataFrame(
index=range(2),
data={
"test_int": pd.Series([0, 4], dtype="int64"),
"test_nullint": pd.Series([5, 9], dtype="Int64"),
"test_str": pd.Series(["a", "c"], dtype="object"),
"test_float": pd.Series([3.1, 7.8], dtype="float64"),
"test_bool": pd.Series([None, None], dtype="boolean"),
},
)
df.sort_values(by="test_int", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
def test_mssql_with_partition_and_selection(mssql_url: str) -> None:
query = "SELECT * FROM test_table WHERE 1 = 3 OR 2 = 2"
df = read_sql(
mssql_url,
query,
partition_on="test_int",
partition_range=(0, 2000),
partition_num=3,
)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([0, 1, 2, 3, 4, 1314], dtype="int64"),
"test_nullint": pd.Series([5, 3, None, 7, 9, 2], dtype="Int64"),
"test_str": pd.Series(
["a", "str1", "str2", "b", "c", None], dtype="object"
),
"test_float": pd.Series([3.1, None, 2.2, 3, 7.8, -10], dtype="float64"),
"test_bool": pd.Series(
[None, True, False, False, None, True], dtype="boolean"
),
},
)
df.sort_values(by="test_int", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
def test_mssql_with_partition_and_projection(mssql_url: str) -> None:
query = "SELECT test_int, test_float, test_str FROM test_table"
df = read_sql(
mssql_url,
query,
partition_on="test_int",
partition_range=(0, 2000),
partition_num=3,
)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([0, 1, 2, 3, 4, 1314], dtype="int64"),
"test_float": pd.Series([3.1, None, 2.2, 3, 7.8, -10], dtype="float64"),
"test_str": pd.Series(
["a", "str1", "str2", "b", "c", None], dtype="object"
),
},
)
df.sort_values(by="test_int", inplace=True, ignore_index=True)
| assert_frame_equal(df, expected, check_names=True) | pandas.testing.assert_frame_equal |
# Packages
# Basic packages
import numpy as np
from scipy import integrate, stats, spatial
from scipy.special import expit, binom
import pandas as pd
import xlrd # help read excel files directly from source into pandas
import copy
import warnings
# Building parameter/computation graph
import inspect
from collections import OrderedDict
# OS/filesystem tools
import time
from datetime import datetime
import random
import string
import os
import shutil
import sys
import cloudpickle
# Distributed computing tools
import dask
import distributed
from dask.distributed import Client
from dask.distributed import as_completed
import itertools
# State Dimensions
# Health states (S, E and D are fixed to 1 dimension)
nI_symp = 2 # number of sympyomatic infected states
nI = 2+nI_symp # number of total infected states (disease stages), the +2 are Exposed and I_nonsymptomatic
nR = 2 # number of recovery states (antibody development post-disease, IgM and IgG are two stages)
nHS = 2+nI+nR # number of total health states, the +2: S, D are suspectible and dead
# Age groups (risk groups)
nAge = 9 # In accordance w Imperial #13 report (0-9, 10-19, ... 70-79, 80+)
# Isolation states
nIso = 4 # None/distancing, Case isolation, Hospitalised, Hospital staff
# Testing states
nTest = 4 # untested/negative, Virus positive, Antibody positive, Both positive
stateTensor = np.ones((nAge, nHS, nIso, nTest))
# Population (data from Imperial #13 ages.csv/UK)
agePopulationTotal = 1000.*np.array([8044.056,7642.473,8558.707,9295.024,8604.251,9173.465,7286.777,5830.635,3450.616])
#agePopulationTotal = 1000.*pd.read_csv("https://raw.githubusercontent.com/ImperialCollegeLondon/covid19model/master/data/ages.csv").iloc[3].values[2:]
# Currently: let's work with england population only instead of full UK, as NHS England + CHESS data is much clearer than other regions
agePopulationTotal *= 55.98/66.27 # (google england/uk population 2018, assuming age dist is similar)
agePopulationRatio = agePopulationTotal/np.sum(agePopulationTotal)
# Helper function to adjust average rates to age-aware rates
def adjustRatesByAge_KeepAverageRate(rate, ageRelativeAdjustment, agePopulationRatio=agePopulationRatio, maxOutRate=10):
"""This is a helper function and wont be picked up as a model parameter!"""
if rate == 0:
return np.zeros_like(ageRelativeAdjustment)
if rate >= maxOutRate:
warnings.warn("covidTesting::adjustRatesByAge_KeepAverageRate Input rate {} > maxOutRate {}, returning input rates".format(rate, maxOutRate))
return rate*np.ones_like(ageRelativeAdjustment)
out = np.zeros_like(ageRelativeAdjustment)
out[0] = maxOutRate+1 # just to start the while loop below
while np.sum(out>=maxOutRate)>0:
corrFactor = np.sum(agePopulationRatio/(1+ageRelativeAdjustment))
out = rate * (1+ageRelativeAdjustment) * corrFactor
if np.sum(out>=maxOutRate)>0:
warnings.warn("covidTesting::adjustRatesByAge_KeepAverageRate Adjusted rate larger than {} encountered, reducing ageAdjustment variance by 10%".format(maxOutRate))
tmp_mean = np.mean(ageRelativeAdjustment)
ageRelativeAdjustment = tmp_mean + np.sqrt(0.9)*(ageRelativeAdjustment-tmp_mean)
return out
# For calculations see data_cleaning_py.ipynb, calculations from NHS England dataset as per 05 Apr
relativeDeathRisk_given_COVID_by_age = np.array([-0.99742186, -0.99728639, -0.98158438, -0.9830432 , -0.82983414,
-0.84039294, 0.10768979, 0.38432409, 5.13754904])
#ageRelativeDiseaseSeverity = np.array([-0.8, -0.6, -0.3, -0.3, -0.1, 0.1, 0.35, 0.4, 0.5]) # FIXED (above) - this is a guess, find data and fix
#ageRelativeRecoverySpeed = np.array([0.2]*5+[-0.1, -0.2, -0.3, -0.5]) # TODO - this is a guess, find data and fix
ageRelativeRecoverySpeed = np.array([0.]*9) # For now we make it same for everyone, makes calculations easier
# For calculations see data_cleaning_py.ipynb, calculations from NHS England dataset as per 05 Apr
caseFatalityRatioHospital_given_COVID_by_age = np.array([0.00856164, 0.03768844, 0.02321319, 0.04282494, 0.07512237,
0.12550367, 0.167096 , 0.37953452, 0.45757006])
def trFunc_diseaseProgression(
# Basic parameters to adhere to
nonsymptomatic_ratio = 0.86,
# number of days between measurable events
infect_to_symptoms = 5.,
#symptom_to_death = 16.,
symptom_to_recovery = 10., # 20.5, #unrealiticly long for old people
symptom_to_hospitalisation = 5.76,
hospitalisation_to_recovery = 14.51,
IgG_formation = 15.,
# Age related parameters
# for now we'll assume that all hospitalised cases are known (overall 23% of hospitalised COVID patients die. 9% overall case fatality ratio)
caseFatalityRatioHospital_given_COVID_by_age = caseFatalityRatioHospital_given_COVID_by_age,
ageRelativeRecoverySpeed = ageRelativeRecoverySpeed,
# Unknown rates to estimate
nonsymp_to_recovery = 15.,
inverse_IS1_IS2 = 4.,
**kwargs
):
# Now we have all the information to build the age-aware multistage SIR model transition matrix
# The full transition tensor is a sparse map from the Age x HealthState x isolation state to HealthState,
# and thus is a 4th order tensor itself, representing a linear mapping
# from "number of people aged A in health state B and isolation state C to health state D.
trTensor_diseaseProgression = np.zeros((nAge, nHS, nIso, nHS))
# Use basic parameters to regularise inputs
E_IS1 = 1./infect_to_symptoms
# Numbers nonsymptomatic is assumed to be 86% -> E->IN / E-IS1 = 0.86/0.14
E_IN = 0.86/0.14 * E_IS1
# Nonsymptomatic recovery
IN_R1 = 1./nonsymp_to_recovery
IS1_IS2 = 1./inverse_IS1_IS2
IS2_R1 = 1./(symptom_to_recovery-inverse_IS1_IS2)
R1_R2 = 1./IgG_formation
# Disease progression matrix # TODO - calibrate (together with transmissionInfectionStage)
# rows: from-state, cols: to-state (non-symmetric!)
# - this represent excess deaths only, doesn't contain baseline deaths!
# Calculate all non-serious cases that do not end up in hospitals.
# Note that we only have reliable death data from hospitals (NHS England), so we do not model people dieing outside hospitals
diseaseProgBaseline = np.array([
# to: E, IN, IS1, IS2, R1, R2, D
[ 0 , E_IN, E_IS1, 0, 0, 0, 0 ], # from E
[ 0, 0, 0, 0, IN_R1, 0, 0 ], # from IN
[ 0 , 0, 0, IS1_IS2, 0, 0, 0 ], # from IS1
[ 0 , 0, 0, 0, IS2_R1, 0, 0 ], # from IS2
[ 0 , 0, 0, 0, 0, R1_R2, 0 ], # from R1
[ 0 , 0, 0, 0, 0, 0, 0 ], # from R2
[ 0 , 0, 0, 0, 0, 0, 0 ] # from D
])
ageAdjusted_diseaseProgBaseline = copy.deepcopy(np.repeat(diseaseProgBaseline[np.newaxis],nAge,axis=0))
# Modify all death and R1 rates:
for ii in range(ageAdjusted_diseaseProgBaseline.shape[1]):
# Adjust death rate by age dependent disease severity
ageAdjusted_diseaseProgBaseline[:,ii,-1] = adjustRatesByAge_KeepAverageRate(
ageAdjusted_diseaseProgBaseline[0,ii,-1],
ageRelativeAdjustment=relativeDeathRisk_given_COVID_by_age
)
# Adjust recovery rate by age dependent recovery speed
ageAdjusted_diseaseProgBaseline[:,ii,-3] = adjustRatesByAge_KeepAverageRate(
ageAdjusted_diseaseProgBaseline[0,ii,-3],
ageRelativeAdjustment=ageRelativeRecoverySpeed,
agePopulationRatio=agePopulationRatio
)
ageAdjusted_diseaseProgBaseline_Hospital = copy.deepcopy(ageAdjusted_diseaseProgBaseline)
# Calculate hospitalisation based rates, for which we do have data. Hospitalisation can end up with deaths
# Make sure that the ratio of recoveries in hospital honour the case fatality ratio appropriately
# IS2 -> death
ageAdjusted_diseaseProgBaseline_Hospital[:,3,-1] = (
# IS2 -> recovery
ageAdjusted_diseaseProgBaseline_Hospital[:,3,-3] * (
# multiply by cfr / (1-cfr) to get correct rate towards death
caseFatalityRatioHospital_given_COVID_by_age/(
1 - caseFatalityRatioHospital_given_COVID_by_age)
)
)
# TODO - time to death might be incorrect overall without an extra delay state, especially for young people
# Non-hospitalised disease progression
for i1 in [0,1,3]:
trTensor_diseaseProgression[:,1:,i1,1:] = ageAdjusted_diseaseProgBaseline
# hospitalised disease progression
trTensor_diseaseProgression[:,1:,2,1:] = ageAdjusted_diseaseProgBaseline_Hospital
return trTensor_diseaseProgression
# Larger data driver approaches, with age distribution, see data_cleaning_R.ipynb for details
ageHospitalisationRateBaseline = pd.read_csv('../data/clean_hosp-epis-stat-admi-summ-rep-2015-16-rep_table_6.csv', sep=',').iloc[:,-1].values
ageHospitalisationRecoveryRateBaseline = 1./pd.read_csv('../data/clean_10641_LoS_age_provider_suppressed.csv', sep=',').iloc[:,-1].values
# Calculate initial hospitalisation (occupancy), that will be used to initialise the model
initBaselineHospitalOccupancyEquilibriumAgeRatio = ageHospitalisationRateBaseline/(ageHospitalisationRateBaseline+ageHospitalisationRecoveryRateBaseline)
# Take into account the NHS work-force in hospitals that for our purposes count as "hospitalised S" population,
# also unaffected by quarantine measures
ageNhsClinicalStaffPopulationRatio = pd.read_csv('../data/clean_nhsclinicalstaff.csv', sep=',').iloc[:,-1].values
# Extra rate of hospitalisation due to COVID-19 infection stages
# TODO - find / estimate data on this (unfortunately true rates are hard to get due to many unknown cases)
# Symptom to hospitalisation is 5.76 days on average (Imperial #8)
infToHospitalExtra = np.array([1e-4, 1e-3, 2e-2, 1e-2])
# We do know at least how age affects these risks:
# For calculations see data_cleaning_py.ipynb, calculations from CHESS dataset as per 05 Apr
relativeAdmissionRisk_given_COVID_by_age = np.array([-0.94886625, -0.96332087, -0.86528671, -0.79828999, -0.61535305,
-0.35214767, 0.12567034, 0.85809052, 3.55950368])
riskOfAEAttandance_by_age = np.array([0.41261361, 0.31560648, 0.3843979 , 0.30475704, 0.26659415,
0.25203475, 0.24970244, 0.31549102, 0.65181376])
# Build the transition tensor from any non-hospitalised state to a hospitalised state
# (being in home quarantine is assumed to affect only the infection probability [below], not the hospitalisation probability)
# caseIsolationHospitalisationRateAdjustment = 1.
# This function takes as input the number of people in given age and health state, and in any non-hospitalised state
# and returns the number of people staying in the same age and health state,
# but now hospitalised (the rest of people remain in whatever state they were in)
def trFunc_HospitalAdmission(
ageHospitalisationRateBaseline = ageHospitalisationRateBaseline,
infToHospitalExtra = infToHospitalExtra,
ageRelativeExtraAdmissionRiskToCovid = relativeAdmissionRisk_given_COVID_by_age * riskOfAEAttandance_by_age,
**kwargs
):
# This tensor will pointwise multiply an nAge x nHS slice of the stateTensor
trTensor_HospitalAdmission = np.zeros((nAge, nHS))
ageAdjusted_infToHospitalExtra = copy.deepcopy(np.repeat(infToHospitalExtra[np.newaxis],nAge,axis=0))
for ii in range(ageAdjusted_infToHospitalExtra.shape[1]):
# Adjust death rate by age dependent disease severity
ageAdjusted_infToHospitalExtra[:,ii] = adjustRatesByAge_KeepAverageRate(
infToHospitalExtra[ii],
ageRelativeAdjustment=ageRelativeExtraAdmissionRiskToCovid
)
# Add baseline hospitalisation to all non-dead states
trTensor_HospitalAdmission[:,:-1] += np.expand_dims(ageHospitalisationRateBaseline,-1)
# Add COVID-caused hospitalisation to all infected states (TODO: This is summation of rates for independent processes, should be correct, but check)
trTensor_HospitalAdmission[:,1:(nI+1)] += ageAdjusted_infToHospitalExtra
return trTensor_HospitalAdmission
def trFunc_HospitalDischarge(
ageHospitalisationRecoveryRateBaseline = ageHospitalisationRecoveryRateBaseline,
dischargeDueToCovidRateMultiplier = 3.,
**kwargs
):
trTensor_HospitalDischarge = np.zeros((nAge, nHS))
# Baseline discharges apply to all non-symptomatic patients (TODO: take into account testing state!)
trTensor_HospitalDischarge[:, :3] += ageHospitalisationRecoveryRateBaseline[:,np.newaxis]
# No discharges for COVID symptomatic people from the hospital until they recover
# TODO - check with health experts if this is correct assumption; probably also depends on testing state
trTensor_HospitalDischarge[:, 3:5] = 0.
trTensor_HospitalDischarge[:, 5:7] = dischargeDueToCovidRateMultiplier * ageHospitalisationRecoveryRateBaseline[:,np.newaxis]
return trTensor_HospitalDischarge
ageSocialMixingBaseline = pd.read_csv('../data/socialcontactdata_UK_Mossong2008_social_contact_matrix.csv', sep=',').iloc[:,1:].values
ageSocialMixingBaseline = (ageSocialMixingBaseline+ageSocialMixingBaseline.T)/2.
ageSocialMixingDistancing = pd.read_csv('../data/socialcontactdata_UK_Mossong2008_social_contact_matrix_with_distancing.csv', sep=',').iloc[:,1:].values
ageSocialMixingDistancing = (ageSocialMixingDistancing+ageSocialMixingDistancing.T)/2.
ageSocialMixingIsolation = np.zeros_like(ageSocialMixingBaseline)
elevatedMixingRatioInHospital = 3.0
withinHospitalSocialMixing = elevatedMixingRatioInHospital * np.sum(np.dot(agePopulationRatio, ageSocialMixingBaseline))
transmissionInfectionStage = np.array([0.001, 0.1, 0.6, 0.5])
def trFunc_newInfections_Complete(
stateTensor,
policySocialDistancing, # True / False, no default because it's important to know which one we use at any moment!
policyImmunityPassports, # True / False, no default because it's important to know which one we use at any moment!
ageSocialMixingBaseline = ageSocialMixingBaseline,
ageSocialMixingDistancing = ageSocialMixingDistancing,
ageSocialMixingIsolation = ageSocialMixingIsolation,
withinHospitalSocialMixing = withinHospitalSocialMixing,
transmissionInfectionStage = transmissionInfectionStage,
**kwargs
):
ageIsoContractionRate = np.zeros((nAge, nIso, nTest))
# Add non-hospital infections
#--------------------------------
curNonIsolatedSocialMixing = ageSocialMixingDistancing if policySocialDistancing else ageSocialMixingBaseline
# Add baseline interactions only between non-isolated people
for k1 in [0,3]:
for k2 in [0,3]:
ageIsoContractionRate[:,k1,:] += np.expand_dims(
np.matmul(
curNonIsolatedSocialMixing,
np.einsum('ijl,j->i',
stateTensor[:,1:(nI+1),k2,:], transmissionInfectionStage) # all infected in non-isolation
),
axis=1
)
if policyImmunityPassports:
# If the immunity passports policy is on, everyone who tested antibody positive, can roam freely
# Therefore replace the interactions between people with testingState = 2 with ageSocialMixingBaseline
# we do this by using the distributive property of matrix multiplication, and adding extra interactions
# "ageSocialMixingBaseline"-"curNonIsolatedSocialMixing" with each other (this is zero if no social distancing!)
# TODO - this is a bit hacky?, but probably correct - double check though!
for k1 in [0,3]:
for k2 in [0,3]:
ageIsoContractionRate[:,k1,2:] += np.matmul(
ageSocialMixingBaseline-curNonIsolatedSocialMixing,
np.einsum('ijk,j->ik',
stateTensor[:,1:(nI+1),k2,2:], transmissionInfectionStage) # all infected in non-isolation
)
# Add isolation interactions only between isolated and non-isolated people
# non-isolated contracting it from isolated
for k1 in [0,3]:
ageIsoContractionRate[:,k1,:] += np.expand_dims(
np.matmul(
ageSocialMixingIsolation,
np.einsum('ijl,j->i',
stateTensor[:,1:(nI+1),1,:], transmissionInfectionStage) # all infected in isolation
),
axis=1
)
# isolated contracting it from non-isolated
for k1 in [0,3]:
ageIsoContractionRate[:,1,:] += np.expand_dims(
np.matmul(
ageSocialMixingIsolation,
np.einsum('ijl,j->i',
stateTensor[:,1:(nI+1),k1,:], transmissionInfectionStage) # all infected in non-hospital, non-isolation
),
axis = 1
)
# isolated cannot contracting it from another isolated
# Add in-hospital infections (of hospitalised patients, and staff)
#--------------------------------
# (TODO - within hospitals we probably want to take into effect the testing state;
# tested people are better isolated and there's less mixing)
ageIsoContractionRate[:,2:,:] += np.expand_dims(
withinHospitalSocialMixing *
np.einsum('ijkl,j->i',
stateTensor[:,1:(nI+1),2:,:], transmissionInfectionStage), # all infected in hospital (sick or working)
axis = (1,2))
return ageIsoContractionRate/np.sum(stateTensor) # Normalise the rate by total population
def trFunc_travelInfectionRate_ageAdjusted(
t, # Time (int, in days) within simulation
travelMaxTime = 200,
travelBaseRate = 5e-4, # How many people normally travel back to the country per day # TODO - get data
travelDecline_mean = 15.,
travelDecline_slope = 1.,
travelInfection_peak = 1e-1,
travelInfection_maxloc = 10.,
travelInfection_shape = 2.,
**kwargs
):
tmpTime = np.arange(travelMaxTime)
# nAge x T TODO get some realistic data on this
travelAgeRateByTime = travelBaseRate * np.outer(agePopulationRatio, 1-expit((tmpTime-travelDecline_mean)/travelDecline_slope))
# 1 x T TODO get some realistic data on this, maybe make it age weighted
travelContractionRateByTime = stats.gamma.pdf(tmpTime, a=travelInfection_shape, loc=0., scale=travelInfection_maxloc/(travelInfection_shape-1))
travelContractionRateByTime = travelInfection_peak*travelContractionRateByTime/np.max(travelContractionRateByTime)
if t >= travelAgeRateByTime.shape[-1]:
return np.zeros(travelAgeRateByTime.shape[0])
else:
return travelAgeRateByTime[:,int(t)] * travelContractionRateByTime[int(t)]
def inpFunc_testSpecifications(
PCR_FNR_I1_to_R2 = np.array([ 0.9, 0.4, 0.15, 0.35, 0.5, 0.8]),
PCR_FPR = 0.01,
antigen_FNR_I1_to_R2 = np.array([ 0.95, 0.6, 0.35, 0.45, 0.6, 0.9]),
antigen_FPR = 0.1,
antibody_FNR_I1_to_R2 = np.array([0.99, 0.85, 0.8, 0.65, 0.3, 0.05]),
antibody_FPR_S_to_I4 = np.array([0.05, 0.04, 0.03, 0.02, 0.01])
):
testSpecifications = pd.DataFrame(
columns=["Name"],#, "Infection stage"],#, "Sensitivity", "Specificity"],
data = (
["PCR"] * nHS +
["Antigen"] * (nHS) +
["Antibody"] * (nHS))
)
testSpecifications['OutputTestState'] = [1]*nHS + [1]*nHS + [2]*nHS # what information state does a pos test transition you to.
testSpecifications['TruePosHealthState'] = [np.arange(1,nI+1)]*nHS + [np.arange(1,nI+1)]*nHS + [np.arange(nI+1,nI+nR+1)]*nHS # what information state does a pos test transition you to.
# In some health states some people are true negatives and some are true positives! (No, makes litte sense to use, just account for it in FPR? Only matters for test makers...)
# testSpecifications['AmbiguousPosHealthState'] = [np.arange(nI+1, nI+nR+1)]*nHS + [np.arange(nI+1, nI+nR+1)]*nHS + [np.arange(1, nI+1)]*nHS # what information state does a pos test transition you to.
testSpecifications['InputHealthState'] = list(np.tile(range(nHS),3))
# These numbers below are "defaults" illustrating the concept, but are modified by the inputs!!!
testSpecifications['FalseNegativeRate'] = [ # ratio of positive (infected / immune) people missed by the test
# For each health stage:
# S -> I1 (asymp) -> I2 (mild symp) -> I3 (symp, sick) -> I4 (symp, less sick) -> R1 / R2 (IgM, IgG avail) -> D
# PCR
0., 0.9, 0.4, 0.15, 0.35, 0.5, 0.8, 0.,
# Antigen
0., 0.95, 0.6, 0.35, 0.45, 0.6, 0.9, 0.,
# Antibody
0., 0.99, 0.85, 0.8, 0.65, 0.3, 0.05, 0.
]
testSpecifications.loc[1:6,'FalseNegativeRate'] = PCR_FNR_I1_to_R2
testSpecifications.loc[9:14,'FalseNegativeRate'] = antigen_FNR_I1_to_R2
testSpecifications.loc[17:22,'FalseNegativeRate'] = antibody_FNR_I1_to_R2
testSpecifications['FalsePositiveRate'] = [ # ratio of negative (non-infected or not immune) people deemed positive by the test
# PCR
0.01, 0.,0.,0.,0., 0.01, 0.01, 0.,
# Antigen
0.1, 0.,0.,0.,0., 0.1, 0.1, 0.,
# Antibody
0.05, 0.04, 0.03, 0.02, 0.01, 0., 0., 0.
]
testSpecifications.loc[0,'FalsePositiveRate'] = PCR_FPR
testSpecifications.loc[5:6,'FalsePositiveRate'] = PCR_FPR
testSpecifications.loc[8,'FalsePositiveRate'] = antigen_FPR
testSpecifications.loc[13:14,'FalsePositiveRate'] = antigen_FPR
testSpecifications.loc[16:20,'FalsePositiveRate'] = antibody_FPR_S_to_I4
name = testSpecifications['Name']
truePosHealthState = testSpecifications['TruePosHealthState']
testSpecifications.drop(['Name', 'TruePosHealthState'], inplace=True, axis=1)
testSpecifications = testSpecifications.to_numpy()
name = name.to_numpy()
truePosHealthState = truePosHealthState.to_numpy()
return testSpecifications, name, truePosHealthState
def trFunc_testCapacity(
realTime, # time within simulation (day)
# PCR capacity - initial
testCapacity_pcr_phe_total = 1e4,
testCapacity_pcr_phe_inflexday = pd.to_datetime("2020-03-25", format="%Y-%m-%d"),
testCapacity_pcr_phe_inflexslope = 5.,
# PCR capacity - increased
testCapacity_pcr_country_total = 1e5,
testCapacity_pcr_country_inflexday = pd.to_datetime("2020-04-25", format="%Y-%m-%d"),
testCapacity_pcr_country_inflexslope = 10,
# Antibody / antigen capacity
testCapacity_antibody_country_firstday = pd.to_datetime("2020-04-25", format="%Y-%m-%d"),
testCapacity_antibody_country_total = 5e6,
testCapacity_antibody_country_inflexday = pd.to_datetime("2020-05-20", format="%Y-%m-%d"),
testCapacity_antibody_country_inflexslope = 20,
testCapacity_antigenratio_country = 0.7,
**kwargs
):
# Returns a dictionary with test names and number available at day "t"
outPCR = (
#phe phase
testCapacity_pcr_phe_total * expit((realTime-testCapacity_pcr_phe_inflexday).days/testCapacity_pcr_phe_inflexslope)
+
#whole country phase
testCapacity_pcr_country_total * expit((realTime-testCapacity_pcr_country_inflexday).days/testCapacity_pcr_country_inflexslope)
)
if realTime<testCapacity_antibody_country_firstday:
outAntiTotal = 0.
else:
outAntiTotal = (
testCapacity_antibody_country_total * expit((realTime-testCapacity_antibody_country_inflexday).days/testCapacity_antibody_country_inflexslope)
)
return {
"PCR": outPCR,
"Antigen": outAntiTotal*testCapacity_antigenratio_country,
"Antibody": outAntiTotal*(1-testCapacity_antigenratio_country)
}
# To test the function, in runtests.jl
py_rTime = pd.to_datetime("2020-05-25", format="%Y-%m-%d")
__trFunc_testCapacity = trFunc_testCapacity(py_rTime)
# PARAMETER DICTIONARIES AND TABLES
# -----------------------------------------------------------------------------------------
# Build the nested parameter/computation graph of a single function.
def build_paramDict(cur_func):
"""
This function iterates through all inputs of a function,
and saves the default argument names and values into a dictionary.
If any of the default arguments are functions themselves, then recursively (depth-first) adds an extra field to
the dictionary, named <funcName + "_params">, that contains its inputs and arguments.
The output of this function can then be passed as a "kwargs" object to the highest level function,
which will then pass the parameter values to the lower dictionary levels appropriately
"""
paramDict = OrderedDict()
allArgs = inspect.getfullargspec(cur_func)
# Check if there are any default parameters, if no, just return empty dict
if allArgs.defaults is None:
return paramDict
for argname, argval in zip(allArgs.args[-len(allArgs.defaults):], allArgs.defaults):
# Save the default argument
paramDict[argname] = argval
# If the default argument is a function, inspect it for further
if callable(argval):
# print(argname)
paramDict[argname+"_params"] = build_paramDict(argval)
return paramDict
# Function that computes the right side of the non-lin model ODE
def dydt_Complete(t,
stateTensor_flattened, # Might be double the normal size (as first dimension) _withNewOnlyCopy, if debugReturnNewPerDay
realStartDate = pd.to_datetime("2020-02-20", format="%Y-%m-%d"),
# debug
debugTransition = False,
debugTimestep = False,
debugReturnNewPerDay = True, # Now implemented by default into state iteration
# Dimensions
nAge=nAge, nHS=nHS, nI=nI, nR=nR, nIso=nIso, nTest=nTest,
# Input functions and tensors
# ----------------------------
# Health state updates
trFunc_diseaseProgression = trFunc_diseaseProgression,
trFunc_newInfections = trFunc_newInfections_Complete,
# Initial incoming travel-based infections (before restrictions)
trFunc_travelInfectionRate_ageAdjusted = trFunc_travelInfectionRate_ageAdjusted,
# Hospitalisation and recovery
trFunc_HospitalAdmission = trFunc_HospitalAdmission,
trFunc_HospitalDischarge = trFunc_HospitalDischarge,
# Policy changes (on social distancing for now) (TODO - possibly make more changes)
tStartSocialDistancing = pd.to_datetime("2020-03-23", format="%Y-%m-%d"),
tStopSocialDistancing = pd.to_datetime("2025-03-23", format="%Y-%m-%d"),
tStartImmunityPassports = pd.to_datetime("2025-03-23", format="%Y-%m-%d"),
tStopImmunityPassports = pd.to_datetime("2025-03-23", format="%Y-%m-%d"),
tStartQuarantineCaseIsolation = pd.to_datetime("2025-03-23", format="%Y-%m-%d"),
tStopQuarantineCaseIsolation = | pd.to_datetime("2025-03-23", format="%Y-%m-%d") | pandas.to_datetime |
# -*- coding: utf-8 -*-
import sys
import os
from pandas.io import pickle
# import pandas as pd
PROJECT_ID = "dots-stock" # @param {type:"string"}
REGION = "us-central1" # @param {type:"string"}
USER = "shkim01" # <---CHANGE THIS
BUCKET_NAME = "gs://pipeline-dots-stock" # @param {type:"string"}
PIPELINE_ROOT = f"{BUCKET_NAME}/pipeline_root/{USER}"
from typing import NamedTuple
from kfp import dsl
from kfp.v2 import compiler
from kfp.v2.dsl import (Artifact,
Dataset,
Input,
Model,
Output,
Metrics,
ClassificationMetrics,
component)
from kfp.v2.google.client import AIPlatformClient
@component(
# base_image='gcr.io/dots-stock/py38-pandas-cal',
base_image="gcr.io/dots-stock/python-img-v5.2"
)
def set_defaults()-> NamedTuple(
'Outputs',
[
('date_ref',str),
('n_days', int),
('period_extra', int)
]):
import pandas as pd
from trading_calendars import get_calendar
today = pd.Timestamp.now('Asia/Seoul').strftime('%Y%m%d')
today = '20210903'
period_to_train = 20
period_extra = 100
n_days = period_to_train + period_extra
cal_KRX = get_calendar('XKRX')
def get_krx_on_dates_start_end(start, end):
return [date.strftime('%Y%m%d')
for date in pd.bdate_range(start=start,
end=end, freq='C',
holidays=cal_KRX.precomputed_holidays)
]
print(f'today : {today}')
dates_krx_on = get_krx_on_dates_start_end('20210104', today)
if today in dates_krx_on :
date_ref = today
else :
date_ref = dates_krx_on[-1]
return (date_ref, n_days, period_extra)
##############################
# get market info ############
##############################
@component(
base_image="gcr.io/dots-stock/python-img-v5.2",
)
def get_market_info(
market_info_dataset: Output[Dataset],
date_ref: str,
n_days: int
):
import pandas as pd
import pickle
from trading_calendars import get_calendar
cal_KRX = get_calendar('XKRX')
from sqlalchemy import create_engine
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# console handler
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
AWS_DB_ID = 'gb_master'
AWS_DB_PWD = '<PASSWORD>'
AWS_DB_ADDRESS = 'kwdb-daily.cf6e7v8fhede.ap-northeast-2.rds.amazonaws.com'
AWS_DB_PORT = '3306'
DB_DATABASE_NAME_daily_naver = 'daily_naver'
db_daily_naver_con = create_engine('mysql+pymysql://{0}:{1}@{2}:{3}/{4}?charset=utf8'
.format(AWS_DB_ID, AWS_DB_PWD, AWS_DB_ADDRESS, AWS_DB_PORT, DB_DATABASE_NAME_daily_naver),
encoding='utf8',
echo=False)
def get_market_from_naver_aws(date_ref):
'''
daily naver ์์ db๊ฐ ๊ทธ๋๋ก parsing ๋ด์ฉ ๋ฐ์์ค๊ธฐ
'''
with db_daily_naver_con.connect() as conn:
table_name = f'{date_ref}_daily_allstock_naver'
str_sql = f'select * from {table_name} order by ๋ฑ๋ฝ๋ฅ DESC'
df = pd.read_sql_query(str_sql, conn) # self.get_db_daily_naver_con())
df = df.reset_index().rename(columns={'index':'์์_์์น๋ฅ ', 'N':'์์_์๊ฐ์ด์ก'})
df['์์_์์น๋ฅ '] = df.์์_์์น๋ฅ + 1
return df
def get_krx_on_dates_n_days_ago(date_ref, n_days):
return [date.strftime('%Y%m%d')
for date in pd.bdate_range(
end=date_ref, freq='C', periods=n_days,
holidays=cal_KRX.precomputed_holidays) ]
def get_markets_aws(date_ref, n_days):
dates_n_days_ago = get_krx_on_dates_n_days_ago(date_ref, n_days)
df_market = pd.DataFrame()
for date in dates_n_days_ago:
df_ = get_market_from_naver_aws(date)
logger.debug(f'date : {date} and df_.shape {df_.shape}' )
df_market = df_market.append(df_)
return df_market
df_market = get_markets_aws(date_ref=date_ref, n_days=n_days)
df_market.to_pickle(market_info_dataset.path, protocol=4)
#######################
# get bros ############
#######################
@component(
base_image="gcr.io/dots-stock/python-img-v5.2"
)
def get_bros(
date_ref: str,
n_days: int,
bros_univ_dataset: Output[Dataset]
):
import pandas as pd
import pickle
import pandas_gbq
import networkx as nx
from trading_calendars import get_calendar
cal_KRX = get_calendar('XKRX')
def get_krx_on_dates_n_days_ago(date_ref, n_days=20):
return [date.strftime('%Y%m%d')
for date in pd.bdate_range(
end=date_ref, freq='C', periods=n_days,
holidays=cal_KRX.precomputed_holidays) ]
def get_corr_pairs_gbq(date_ref, period):
date_ref_ = pd.Timestamp(date_ref).strftime('%Y-%m-%d')
sql = f'''
SELECT
DISTINCT source,
target,
corr_value,
period,
date
FROM
`dots-stock.krx_dataset.corr_ohlc_part1`
WHERE
date = "{date_ref_}"
AND period = {period}
ORDER BY
corr_value DESC
LIMIT
1000'''
PROJECT_ID = 'dots-stock'
df = pandas_gbq.read_gbq(sql, project_id=PROJECT_ID)
return df
def find_bros(date_ref, period):
'''clique over 3 nodes '''
df_edgelist = get_corr_pairs_gbq(date_ref, period)
g = nx.from_pandas_edgelist(df_edgelist, edge_attr=True)
bros_ = nx.find_cliques(g)
bros_3 = [bros for bros in bros_ if len(bros) >=3]
set_bros = set([i for l_i in bros_3 for i in l_i])
g_gang = g.subgraph(set_bros)
df_gangs_edgelist = nx.to_pandas_edgelist(g_gang)
return df_gangs_edgelist
def find_gang(date_ref):
df_gang = pd.DataFrame()
for period in [20, 40, 60, 90, 120]:
df_ = find_bros(date, period=period)
df_gang = df_gang.append(df_)
return df_gang
# jobs
dates = get_krx_on_dates_n_days_ago(date_ref=date_ref, n_days=n_days)
df_bros = pd.DataFrame()
for date in dates:
df = find_gang(date_ref=date)
df_bros = df_bros.append(df)
df_bros.to_pickle(bros_univ_dataset.path, protocol=4)
###############################
# get adj price############
###############################
@component(
base_image="gcr.io/dots-stock/python-img-v5.2",
)
def get_adj_prices(
start_index :int,
end_index : int,
market_info_dataset: Input[Dataset],
adj_price_dataset: Output[Dataset]
):
# import json
import FinanceDataReader as fdr
from ae_module.ae_logger import ae_log
import pandas as pd
df_market = pd.read_pickle(market_info_dataset.path)
date_ref = df_market.๋ ์ง.max()
date_start = '20210101'
codes_stock = df_market[df_market.๋ ์ง == date_ref].์ข
๋ชฉ์ฝ๋.to_list()
def get_price_adj(code, start, end):
return fdr.DataReader(code, start=start, end=end)
def get_price(l_univ, date_start, date_end):
df_price = pd.DataFrame()
for code in l_univ :
df_ = get_price_adj(code, date_start, date_end)
print('size', df_.shape)
df_['code'] = str(code)
df_price = df_price.append(df_)
return df_price
codes = codes_stock[ start_index : end_index ]
ae_log.debug(f'codes_stock {codes.__len__()}')
df_adj_price = get_price(codes, date_start=date_start, date_end=date_ref)
df_adj_price = df_adj_price.reset_index()
print('df_adj_cols =>', df_adj_price.columns)
df_adj_price.to_pickle(adj_price_dataset.path, protocol=4)
ae_log.debug(df_adj_price.shape)
###############################
# get full adj ############
###############################
@component(
base_image="gcr.io/dots-stock/python-img-v5.2"
# packages_to_install=['pandas']
)
def get_full_adj_prices(
adj_price_dataset01: Input[Dataset],
adj_price_dataset02: Input[Dataset],
adj_price_dataset03: Input[Dataset],
adj_price_dataset04: Input[Dataset],
adj_price_dataset05: Input[Dataset],
full_adj_prices_dataset: Output[Dataset]
):
import pandas as pd
df_adj_price_01 = pd.read_pickle(adj_price_dataset01.path)
df_adj_price_02 = pd.read_pickle(adj_price_dataset02.path)
df_adj_price_03 = pd.read_pickle(adj_price_dataset03.path)
df_adj_price_04 = pd.read_pickle(adj_price_dataset04.path)
df_adj_price_05 = pd.read_pickle(adj_price_dataset05.path)
df_full_adj_prices = pd.concat([df_adj_price_01,
df_adj_price_02,
df_adj_price_03,
df_adj_price_04,
df_adj_price_05])
# df_full_adj_prices.to_csv(full_adj_prices_dataset.path)
df_full_adj_prices.to_pickle(full_adj_prices_dataset.path, protocol=4)
# with open(full_adj_prices_dataset.path, 'wb') as f:
# pickle.dump(df_full_adj_prices, f)
###############################
# get target ############
###############################
@component(
base_image="gcr.io/dots-stock/python-img-v5.2",
# base_image="amancevice/pandas:1.3.2-slim"
)
def get_target(
df_price_dataset: Input[Dataset],
df_target_dataset: Output[Dataset]
):
import pandas as pd
import numpy as np
def make_target(df):
df_ = df.copy()
df_.sort_values(by='date', inplace=True)
df_['high_p1'] = df_.high.shift(-1)
df_['high_p2'] = df_.high.shift(-2)
df_['high_p3'] = df_.high.shift(-3)
df_['close_p1'] = df_.close.shift(-1)
df_['close_p2'] = df_.close.shift(-2)
df_['close_p3'] = df_.close.shift(-3)
df_['change_p1'] = (df_.close_p1 - df_.close) / df_.close
df_['change_p2'] = (df_.close_p2 - df_.close) / df_.close
df_['change_p3'] = (df_.close_p3 - df_.close) / df_.close
df_['change_p1_over5'] = df_['change_p1'] > 0.05
df_['change_p2_over5'] = df_['change_p2'] > 0.05
df_['change_p3_over5'] = df_['change_p3'] > 0.05
df_['change_p1_over10'] = df_['change_p1'] > 0.1
df_['change_p2_over10'] = df_['change_p2'] > 0.1
df_['change_p3_over10'] = df_['change_p3'] > 0.1
df_['close_high_1'] = (df_.high_p1 - df_.close) / df_.close
df_['close_high_2'] = (df_.high_p2 - df_.close) / df_.close
df_['close_high_3'] = (df_.high_p3 - df_.close) / df_.close
df_['close_high_1_over10'] = df_['close_high_1'] > 0.1
df_['close_high_2_over10'] = df_['close_high_2'] > 0.1
df_['close_high_3_over10'] = df_['close_high_3'] > 0.1
df_['close_high_1_over5'] = df_['close_high_1'] > 0.05
df_['close_high_2_over5'] = df_['close_high_2'] > 0.05
df_['close_high_3_over5'] = df_['close_high_3'] > 0.05
df_['target_over10'] = np.logical_or.reduce([
df_.close_high_1_over10,
df_.close_high_2_over10,
df_.close_high_3_over10])
df_['target_over5'] = np.logical_or.reduce([
df_.close_high_1_over5,
df_.close_high_2_over5,
df_.close_high_3_over5])
df_['target_close_over_10'] = np.logical_or.reduce([
df_.change_p1_over10,
df_.change_p2_over10,
df_.change_p3_over10])
df_['target_close_over_5'] = np.logical_or.reduce([
df_.change_p1_over5,
df_.change_p2_over5,
df_.change_p3_over5])
df_['target_mclass_close_over10_under5'] = \
np.where(df_['change_p1'] > 0.1,
1, np.where(df_['change_p1'] > -0.05, 0, -1))
df_['target_mclass_close_p2_over10_under5'] = \
np.where(df_['change_p2'] > 0.1,
1, np.where(df_['change_p2'] > -0.05, 0, -1))
df_['target_mclass_close_p3_over10_under5'] = \
np.where(df_['change_p3'] > 0.1,
1, np.where(df_['change_p3'] > -0.05, 0, -1))
df_.dropna(subset=['high_p3'], inplace=True)
return df_
def get_target_df(df_price):
df_price.reset_index(inplace=True)
df_price.columns = df_price.columns.str.lower()
df_target = df_price.groupby('code').apply(lambda df: make_target(df))
df_target = df_target.reset_index(drop=True)
# df_target['date'] = df_target.date.str.replace('-', '')
return df_target
# df_price = pd.read_csv(df_price_dataset.path, index_col=0)
df_price = pd.read_pickle(df_price_dataset.path)
# with open(df_price_dataset.path, 'rb') as f:
# df_price = pickle.load(f)
print('df cols =>', df_price.columns)
df_target = get_target_df(df_price=df_price)
df_target.to_pickle(df_target_dataset.path, protocol=4)
###############################
# get tech indicator ##########
###############################
@component(
# base_image="gcr.io/dots-stock/py38-pandas-cal",
base_image="gcr.io/dots-stock/python-img-v5.2",
packages_to_install=["stockstats", "scikit-learn"]
)
def get_tech_indi(
# date_ref: str,
df_price_dataset: Input[Dataset],
df_techini_dataset: Output[Dataset],
):
from stockstats import StockDataFrame as Sdf
# from sklearn.preprocessing import MaxAbsScaler
from sklearn.preprocessing import maxabs_scale
import pandas as pd
import pickle
class FeatureEngineer:
"""Provides methods for preprocessing the stock price data
Attributes
----------
use_technical_indicator : boolean
we technical indicator or not
tech_indicator_list : list
a list of technical indicator names (modified from config.py)
use_turbulence : boolean
use turbulence index or not
user_defined_feature:boolean
user user defined features or not
Methods
-------
preprocess_data()
main method to do the feature engineering
"""
TECHNICAL_INDICATORS_LIST = ['macd',
'boll_ub',
'boll_lb',
'rsi_30',
'dx_30',
'close_30_sma',
'close_60_sma',
# 'mfi',
]
# PERIOD_MAX = 60,
def __init__(
self,
use_technical_indicator=True,
tech_indicator_list=TECHNICAL_INDICATORS_LIST,
user_defined_feature=False,
):
self.use_technical_indicator = use_technical_indicator
self.tech_indicator_list = tech_indicator_list
self.user_defined_feature = user_defined_feature
def preprocess_data(self, df):
"""main method to do the feature engineering
@:param config: source dataframe
@:return: a DataMatrices object
"""
#clean data
# df = self.clean_data(df)
# add technical indicators using stockstats
if self.use_technical_indicator == True:
df = self.add_technical_indicator(df)
print("Successfully added technical indicators")
# add user defined feature
if self.user_defined_feature == True:
df = self.add_user_defined_feature(df)
print("Successfully added user defined features")
# fill the missing values at the beginning and the end
df = df.fillna(method="bfill").fillna(method="ffill")
return df
def clean_data(self, data):
"""
clean the raw data
deal with missing values
reasons: stocks could be delisted, not incorporated at the time step
:param data: (df) pandas dataframe
:return: (df) pandas dataframe
"""
df = data.copy()
df=df.sort_values(['date','tic'],ignore_index=True) ##
df.index = df.date.factorize()[0]
merged_closes = df.pivot_table(index = 'date',columns = 'tic', values = 'close')
merged_closes = merged_closes.dropna(axis=1)
tics = merged_closes.columns
df = df[df.tic.isin(tics)]
return df
def add_technical_indicator(self, data):
"""
calculate technical indicators
use stockstats package to add technical inidactors
:param data: (df) pandas dataframe
:return: (df) pandas dataframe
"""
df = data.copy()
df = df.sort_values(by=['tic','date'])
stock = Sdf.retype(df.copy())
unique_ticker = stock.tic.unique()
for indicator in self.tech_indicator_list:
indicator_df = pd.DataFrame()
for i in range(len(unique_ticker)):
try:
temp_indicator = stock[stock.tic == unique_ticker[i]][indicator]
temp_indicator = pd.DataFrame(temp_indicator)
temp_indicator['tic'] = unique_ticker[i]
temp_indicator['date'] = df[df.tic == unique_ticker[i]]['date'].to_list()
indicator_df = indicator_df.append(
temp_indicator, ignore_index=True
)
except Exception as e:
print(e)
df = df.merge(indicator_df[['tic','date',indicator]],on=['tic','date'],how='left')
df = df.sort_values(by=['date','tic'])
return df
def add_user_defined_feature(self, data):
"""
add user defined features
:param data: (df) pandas dataframe
:return: (df) pandas dataframe
"""
df = data.copy()
df = df.sort_values(by=['tic','date'])
df["daily_return"] = df.groupby('tic').close.pct_change(1)
df['return_lag_1']=df.groupby('tic').close.pct_change(2)
df['return_lag_2']=df.groupby('tic').close.pct_change(3)
df['return_lag_3']=df.groupby('tic').close.pct_change(4)
# bollinger band - relative
df['bb_u_ratio'] = df.boll_ub / df.close # without groupby
df['bb_l_ratio'] = df.boll_lb / df.close # don't need groupby
# oh oc ol ratio
df['oh_ratio'] = (df.high - df.open) / df.open
df['oc_ratio'] = (df.close - df.open) / df.open
df['ol_ratio'] = (df.low - df.open) / df.open
# macd - relative
df['max_scale_MACD'] = df.groupby('tic').macd.transform(
lambda x: maxabs_scale(x))
# custom volume indicator
def volume_change_wrt_10_max(df):
return df.volume / df.volume.rolling(10).max()
def volume_change_wrt_10_mean(df):
return df.volume / df.volume.rolling(10).mean()
df['volume_change_wrt_10max'] = (
df.groupby('tic')
.apply(lambda df: volume_change_wrt_10_max(df))
.reset_index(drop=True)
)
df['volume_change_wrt_10mean'] = (
df.groupby('tic')
.apply(lambda df: volume_change_wrt_10_mean(df))
.reset_index(drop=True)
)
# close ratio rolling min max
def close_ratio_wrt_10_max(df):
return df.close / df.close.rolling(10).max()
def close_ratio_wrt_10_min(df):
return df.close / df.close.rolling(10).min()
df['close_ratio_wrt_10max'] = (
df.groupby('tic')
.apply(lambda df: close_ratio_wrt_10_max(df))
.reset_index(drop=True)
)
df['close_ratio_wrt_10min'] = (
df.groupby('tic')
.apply(lambda df: close_ratio_wrt_10_min(df))
.reset_index(drop=True)
)
return df
df_price = pd.read_pickle(df_price_dataset.path)
print('size =>', df_price.shape)
print('cols =>', df_price.columns)
df_price.columns = df_price.columns.str.lower()
df_price.rename(columns={'code':'tic'}, inplace=True)
fe = FeatureEngineer(user_defined_feature=True)
df_process = fe.preprocess_data(df_price)
df_process.rename(columns={'tic':'code'}, inplace=True)
df_process.to_pickle(df_techini_dataset.path, protocol=4)
###############################
# get full tech indi ##########
###############################
@component(
base_image="gcr.io/dots-stock/python-img-v5.2"
# packages_to_install=['pandas']
)
def get_full_tech_indi(
tech_indi_dataset01: Input[Dataset],
tech_indi_dataset02: Input[Dataset],
tech_indi_dataset03: Input[Dataset],
tech_indi_dataset04: Input[Dataset],
tech_indi_dataset05: Input[Dataset],
full_tech_indi_dataset: Output[Dataset]
):
import pandas as pd
df_01 = pd.read_pickle(tech_indi_dataset01.path)
df_02 = pd.read_pickle(tech_indi_dataset02.path)
df_03 = pd.read_pickle(tech_indi_dataset03.path)
df_04 = pd.read_pickle(tech_indi_dataset04.path)
df_05 = pd.read_pickle(tech_indi_dataset05.path)
df_full = pd.concat([df_01, df_02, df_03,df_04, df_05])
df_full.to_pickle(full_tech_indi_dataset.path, protocol=4)
#########################################
# get feature ###########################
#########################################
@component(
base_image="gcr.io/dots-stock/python-img-v5.2",
)
def get_features(
market_info_dataset: Input[Dataset],
bros_dataset: Input[Dataset],
features_dataset: Output[Dataset]
):
import pandas as pd
import numpy as np
from collections import Counter
df_market = pd.read_pickle(market_info_dataset.path)
# ๋ฑ๋ฝ๋ฅ -1
df_market = df_market.sort_values('๋ ์ง')
df_market['return_-1'] = df_market.groupby('์ข
๋ชฉ์ฝ๋').๋ฑ๋ฝ๋ฅ .shift(1)
#df_ed ๊ฐ์ ธ์ค๊ธฐ
df_ed = | pd.read_pickle(bros_dataset.path) | pandas.read_pickle |
import pytest
from pandas import (
DataFrame,
Index,
Series,
)
import pandas._testing as tm
@pytest.mark.parametrize("n, frac", [(2, None), (None, 0.2)])
def test_groupby_sample_balanced_groups_shape(n, frac):
values = [1] * 10 + [2] * 10
df = DataFrame({"a": values, "b": values})
result = df.groupby("a").sample(n=n, frac=frac)
values = [1] * 2 + [2] * 2
expected = DataFrame({"a": values, "b": values}, index=result.index)
tm.assert_frame_equal(result, expected)
result = df.groupby("a")["b"].sample(n=n, frac=frac)
expected = Series(values, name="b", index=result.index)
tm.assert_series_equal(result, expected)
def test_groupby_sample_unbalanced_groups_shape():
values = [1] * 10 + [2] * 20
df = DataFrame({"a": values, "b": values})
result = df.groupby("a").sample(n=5)
values = [1] * 5 + [2] * 5
expected = DataFrame({"a": values, "b": values}, index=result.index)
tm.assert_frame_equal(result, expected)
result = df.groupby("a")["b"].sample(n=5)
expected = Series(values, name="b", index=result.index)
tm.assert_series_equal(result, expected)
def test_groupby_sample_index_value_spans_groups():
values = [1] * 3 + [2] * 3
df = | DataFrame({"a": values, "b": values}, index=[1, 2, 2, 2, 2, 2]) | pandas.DataFrame |
from datetime import datetime
from decimal import Decimal
from io import StringIO
import numpy as np
import pytest
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series, Timestamp, date_range, read_csv
import pandas._testing as tm
from pandas.core.base import SpecificationError
import pandas.core.common as com
def test_repr():
# GH18203
result = repr(pd.Grouper(key="A", level="B"))
expected = "Grouper(key='A', level='B', axis=0, sort=False)"
assert result == expected
@pytest.mark.parametrize("dtype", ["int64", "int32", "float64", "float32"])
def test_basic(dtype):
data = Series(np.arange(9) // 3, index=np.arange(9), dtype=dtype)
index = np.arange(9)
np.random.shuffle(index)
data = data.reindex(index)
grouped = data.groupby(lambda x: x // 3)
for k, v in grouped:
assert len(v) == 3
agged = grouped.aggregate(np.mean)
assert agged[1] == 1
tm.assert_series_equal(agged, grouped.agg(np.mean)) # shorthand
tm.assert_series_equal(agged, grouped.mean())
tm.assert_series_equal(grouped.agg(np.sum), grouped.sum())
expected = grouped.apply(lambda x: x * x.sum())
transformed = grouped.transform(lambda x: x * x.sum())
assert transformed[7] == 12
tm.assert_series_equal(transformed, expected)
value_grouped = data.groupby(data)
tm.assert_series_equal(
value_grouped.aggregate(np.mean), agged, check_index_type=False
)
# complex agg
agged = grouped.aggregate([np.mean, np.std])
msg = r"nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
grouped.aggregate({"one": np.mean, "two": np.std})
group_constants = {0: 10, 1: 20, 2: 30}
agged = grouped.agg(lambda x: group_constants[x.name] + x.mean())
assert agged[1] == 21
# corner cases
msg = "Must produce aggregated value"
# exception raised is type Exception
with pytest.raises(Exception, match=msg):
grouped.aggregate(lambda x: x * 2)
def test_groupby_nonobject_dtype(mframe, df_mixed_floats):
key = mframe.index.codes[0]
grouped = mframe.groupby(key)
result = grouped.sum()
expected = mframe.groupby(key.astype("O")).sum()
tm.assert_frame_equal(result, expected)
# GH 3911, mixed frame non-conversion
df = df_mixed_floats.copy()
df["value"] = range(len(df))
def max_value(group):
return group.loc[group["value"].idxmax()]
applied = df.groupby("A").apply(max_value)
result = applied.dtypes
expected = Series(
[np.dtype("object")] * 2 + [np.dtype("float64")] * 2 + [np.dtype("int64")],
index=["A", "B", "C", "D", "value"],
)
tm.assert_series_equal(result, expected)
def test_groupby_return_type():
# GH2893, return a reduced type
df1 = DataFrame(
[
{"val1": 1, "val2": 20},
{"val1": 1, "val2": 19},
{"val1": 2, "val2": 27},
{"val1": 2, "val2": 12},
]
)
def func(dataf):
return dataf["val2"] - dataf["val2"].mean()
with tm.assert_produces_warning(FutureWarning):
result = df1.groupby("val1", squeeze=True).apply(func)
assert isinstance(result, Series)
df2 = DataFrame(
[
{"val1": 1, "val2": 20},
{"val1": 1, "val2": 19},
{"val1": 1, "val2": 27},
{"val1": 1, "val2": 12},
]
)
def func(dataf):
return dataf["val2"] - dataf["val2"].mean()
with tm.assert_produces_warning(FutureWarning):
result = df2.groupby("val1", squeeze=True).apply(func)
assert isinstance(result, Series)
# GH3596, return a consistent type (regression in 0.11 from 0.10.1)
df = DataFrame([[1, 1], [1, 1]], columns=["X", "Y"])
with tm.assert_produces_warning(FutureWarning):
result = df.groupby("X", squeeze=False).count()
assert isinstance(result, DataFrame)
def test_inconsistent_return_type():
# GH5592
# inconsistent return type
df = DataFrame(
dict(
A=["Tiger", "Tiger", "Tiger", "Lamb", "Lamb", "Pony", "Pony"],
B=Series(np.arange(7), dtype="int64"),
C=date_range("20130101", periods=7),
)
)
def f(grp):
return grp.iloc[0]
expected = df.groupby("A").first()[["B"]]
result = df.groupby("A").apply(f)[["B"]]
tm.assert_frame_equal(result, expected)
def f(grp):
if grp.name == "Tiger":
return None
return grp.iloc[0]
result = df.groupby("A").apply(f)[["B"]]
e = expected.copy()
e.loc["Tiger"] = np.nan
tm.assert_frame_equal(result, e)
def f(grp):
if grp.name == "Pony":
return None
return grp.iloc[0]
result = df.groupby("A").apply(f)[["B"]]
e = expected.copy()
e.loc["Pony"] = np.nan
tm.assert_frame_equal(result, e)
# 5592 revisited, with datetimes
def f(grp):
if grp.name == "Pony":
return None
return grp.iloc[0]
result = df.groupby("A").apply(f)[["C"]]
e = df.groupby("A").first()[["C"]]
e.loc["Pony"] = pd.NaT
tm.assert_frame_equal(result, e)
# scalar outputs
def f(grp):
if grp.name == "Pony":
return None
return grp.iloc[0].loc["C"]
result = df.groupby("A").apply(f)
e = df.groupby("A").first()["C"].copy()
e.loc["Pony"] = np.nan
e.name = None
tm.assert_series_equal(result, e)
def test_pass_args_kwargs(ts, tsframe):
def f(x, q=None, axis=0):
return np.percentile(x, q, axis=axis)
g = lambda x: np.percentile(x, 80, axis=0)
# Series
ts_grouped = ts.groupby(lambda x: x.month)
agg_result = ts_grouped.agg(np.percentile, 80, axis=0)
apply_result = ts_grouped.apply(np.percentile, 80, axis=0)
trans_result = ts_grouped.transform(np.percentile, 80, axis=0)
agg_expected = ts_grouped.quantile(0.8)
trans_expected = ts_grouped.transform(g)
tm.assert_series_equal(apply_result, agg_expected)
tm.assert_series_equal(agg_result, agg_expected)
tm.assert_series_equal(trans_result, trans_expected)
agg_result = ts_grouped.agg(f, q=80)
apply_result = ts_grouped.apply(f, q=80)
trans_result = ts_grouped.transform(f, q=80)
tm.assert_series_equal(agg_result, agg_expected)
tm.assert_series_equal(apply_result, agg_expected)
tm.assert_series_equal(trans_result, trans_expected)
# DataFrame
df_grouped = tsframe.groupby(lambda x: x.month)
agg_result = df_grouped.agg(np.percentile, 80, axis=0)
apply_result = df_grouped.apply(DataFrame.quantile, 0.8)
expected = df_grouped.quantile(0.8)
tm.assert_frame_equal(apply_result, expected, check_names=False)
tm.assert_frame_equal(agg_result, expected)
agg_result = df_grouped.agg(f, q=80)
apply_result = df_grouped.apply(DataFrame.quantile, q=0.8)
tm.assert_frame_equal(agg_result, expected)
tm.assert_frame_equal(apply_result, expected, check_names=False)
def test_len():
df = tm.makeTimeDataFrame()
grouped = df.groupby([lambda x: x.year, lambda x: x.month, lambda x: x.day])
assert len(grouped) == len(df)
grouped = df.groupby([lambda x: x.year, lambda x: x.month])
expected = len({(x.year, x.month) for x in df.index})
assert len(grouped) == expected
# issue 11016
df = pd.DataFrame(dict(a=[np.nan] * 3, b=[1, 2, 3]))
assert len(df.groupby(("a"))) == 0
assert len(df.groupby(("b"))) == 3
assert len(df.groupby(["a", "b"])) == 3
def test_basic_regression():
# regression
result = Series([1.0 * x for x in list(range(1, 10)) * 10])
data = np.random.random(1100) * 10.0
groupings = Series(data)
grouped = result.groupby(groupings)
grouped.mean()
@pytest.mark.parametrize(
"dtype", ["float64", "float32", "int64", "int32", "int16", "int8"]
)
def test_with_na_groups(dtype):
index = Index(np.arange(10))
values = Series(np.ones(10), index, dtype=dtype)
labels = Series(
[np.nan, "foo", "bar", "bar", np.nan, np.nan, "bar", "bar", np.nan, "foo"],
index=index,
)
# this SHOULD be an int
grouped = values.groupby(labels)
agged = grouped.agg(len)
expected = Series([4, 2], index=["bar", "foo"])
tm.assert_series_equal(agged, expected, check_dtype=False)
# assert issubclass(agged.dtype.type, np.integer)
# explicitly return a float from my function
def f(x):
return float(len(x))
agged = grouped.agg(f)
expected = Series([4, 2], index=["bar", "foo"])
tm.assert_series_equal(agged, expected, check_dtype=False)
assert issubclass(agged.dtype.type, np.dtype(dtype).type)
def test_indices_concatenation_order():
# GH 2808
def f1(x):
y = x[(x.b % 2) == 1] ** 2
if y.empty:
multiindex = MultiIndex(levels=[[]] * 2, codes=[[]] * 2, names=["b", "c"])
res = DataFrame(columns=["a"], index=multiindex)
return res
else:
y = y.set_index(["b", "c"])
return y
def f2(x):
y = x[(x.b % 2) == 1] ** 2
if y.empty:
return DataFrame()
else:
y = y.set_index(["b", "c"])
return y
def f3(x):
y = x[(x.b % 2) == 1] ** 2
if y.empty:
multiindex = MultiIndex(
levels=[[]] * 2, codes=[[]] * 2, names=["foo", "bar"]
)
res = DataFrame(columns=["a", "b"], index=multiindex)
return res
else:
return y
df = DataFrame({"a": [1, 2, 2, 2], "b": range(4), "c": range(5, 9)})
df2 = DataFrame({"a": [3, 2, 2, 2], "b": range(4), "c": range(5, 9)})
# correct result
result1 = df.groupby("a").apply(f1)
result2 = df2.groupby("a").apply(f1)
tm.assert_frame_equal(result1, result2)
# should fail (not the same number of levels)
msg = "Cannot concat indices that do not have the same number of levels"
with pytest.raises(AssertionError, match=msg):
df.groupby("a").apply(f2)
with pytest.raises(AssertionError, match=msg):
df2.groupby("a").apply(f2)
# should fail (incorrect shape)
with pytest.raises(AssertionError, match=msg):
df.groupby("a").apply(f3)
with pytest.raises(AssertionError, match=msg):
df2.groupby("a").apply(f3)
def test_attr_wrapper(ts):
grouped = ts.groupby(lambda x: x.weekday())
result = grouped.std()
expected = grouped.agg(lambda x: np.std(x, ddof=1))
tm.assert_series_equal(result, expected)
# this is pretty cool
result = grouped.describe()
expected = {name: gp.describe() for name, gp in grouped}
expected = DataFrame(expected).T
tm.assert_frame_equal(result, expected)
# get attribute
result = grouped.dtype
expected = grouped.agg(lambda x: x.dtype)
# make sure raises error
msg = "'SeriesGroupBy' object has no attribute 'foo'"
with pytest.raises(AttributeError, match=msg):
getattr(grouped, "foo")
def test_frame_groupby(tsframe):
grouped = tsframe.groupby(lambda x: x.weekday())
# aggregate
aggregated = grouped.aggregate(np.mean)
assert len(aggregated) == 5
assert len(aggregated.columns) == 4
# by string
tscopy = tsframe.copy()
tscopy["weekday"] = [x.weekday() for x in tscopy.index]
stragged = tscopy.groupby("weekday").aggregate(np.mean)
tm.assert_frame_equal(stragged, aggregated, check_names=False)
# transform
grouped = tsframe.head(30).groupby(lambda x: x.weekday())
transformed = grouped.transform(lambda x: x - x.mean())
assert len(transformed) == 30
assert len(transformed.columns) == 4
# transform propagate
transformed = grouped.transform(lambda x: x.mean())
for name, group in grouped:
mean = group.mean()
for idx in group.index:
tm.assert_series_equal(transformed.xs(idx), mean, check_names=False)
# iterate
for weekday, group in grouped:
assert group.index[0].weekday() == weekday
# groups / group_indices
groups = grouped.groups
indices = grouped.indices
for k, v in groups.items():
samething = tsframe.index.take(indices[k])
assert (samething == v).all()
def test_frame_groupby_columns(tsframe):
mapping = {"A": 0, "B": 0, "C": 1, "D": 1}
grouped = tsframe.groupby(mapping, axis=1)
# aggregate
aggregated = grouped.aggregate(np.mean)
assert len(aggregated) == len(tsframe)
assert len(aggregated.columns) == 2
# transform
tf = lambda x: x - x.mean()
groupedT = tsframe.T.groupby(mapping, axis=0)
tm.assert_frame_equal(groupedT.transform(tf).T, grouped.transform(tf))
# iterate
for k, v in grouped:
assert len(v.columns) == 2
def test_frame_set_name_single(df):
grouped = df.groupby("A")
result = grouped.mean()
assert result.index.name == "A"
result = df.groupby("A", as_index=False).mean()
assert result.index.name != "A"
result = grouped.agg(np.mean)
assert result.index.name == "A"
result = grouped.agg({"C": np.mean, "D": np.std})
assert result.index.name == "A"
result = grouped["C"].mean()
assert result.index.name == "A"
result = grouped["C"].agg(np.mean)
assert result.index.name == "A"
result = grouped["C"].agg([np.mean, np.std])
assert result.index.name == "A"
msg = r"nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
grouped["C"].agg({"foo": np.mean, "bar": np.std})
def test_multi_func(df):
col1 = df["A"]
col2 = df["B"]
grouped = df.groupby([col1.get, col2.get])
agged = grouped.mean()
expected = df.groupby(["A", "B"]).mean()
# TODO groupby get drops names
tm.assert_frame_equal(
agged.loc[:, ["C", "D"]], expected.loc[:, ["C", "D"]], check_names=False
)
# some "groups" with no data
df = DataFrame(
{
"v1": np.random.randn(6),
"v2": np.random.randn(6),
"k1": np.array(["b", "b", "b", "a", "a", "a"]),
"k2": np.array(["1", "1", "1", "2", "2", "2"]),
},
index=["one", "two", "three", "four", "five", "six"],
)
# only verify that it works for now
grouped = df.groupby(["k1", "k2"])
grouped.agg(np.sum)
def test_multi_key_multiple_functions(df):
grouped = df.groupby(["A", "B"])["C"]
agged = grouped.agg([np.mean, np.std])
expected = DataFrame({"mean": grouped.agg(np.mean), "std": grouped.agg(np.std)})
tm.assert_frame_equal(agged, expected)
def test_frame_multi_key_function_list():
data = DataFrame(
{
"A": [
"foo",
"foo",
"foo",
"foo",
"bar",
"bar",
"bar",
"bar",
"foo",
"foo",
"foo",
],
"B": [
"one",
"one",
"one",
"two",
"one",
"one",
"one",
"two",
"two",
"two",
"one",
],
"C": [
"dull",
"dull",
"shiny",
"dull",
"dull",
"shiny",
"shiny",
"dull",
"shiny",
"shiny",
"shiny",
],
"D": np.random.randn(11),
"E": np.random.randn(11),
"F": np.random.randn(11),
}
)
grouped = data.groupby(["A", "B"])
funcs = [np.mean, np.std]
agged = grouped.agg(funcs)
expected = pd.concat(
[grouped["D"].agg(funcs), grouped["E"].agg(funcs), grouped["F"].agg(funcs)],
keys=["D", "E", "F"],
axis=1,
)
assert isinstance(agged.index, MultiIndex)
assert isinstance(expected.index, MultiIndex)
tm.assert_frame_equal(agged, expected)
@pytest.mark.parametrize("op", [lambda x: x.sum(), lambda x: x.mean()])
def test_groupby_multiple_columns(df, op):
data = df
grouped = data.groupby(["A", "B"])
result1 = op(grouped)
keys = []
values = []
for n1, gp1 in data.groupby("A"):
for n2, gp2 in gp1.groupby("B"):
keys.append((n1, n2))
values.append(op(gp2.loc[:, ["C", "D"]]))
mi = MultiIndex.from_tuples(keys, names=["A", "B"])
expected = pd.concat(values, axis=1).T
expected.index = mi
# a little bit crude
for col in ["C", "D"]:
result_col = op(grouped[col])
pivoted = result1[col]
exp = expected[col]
tm.assert_series_equal(result_col, exp)
tm.assert_series_equal(pivoted, exp)
# test single series works the same
result = data["C"].groupby([data["A"], data["B"]]).mean()
expected = data.groupby(["A", "B"]).mean()["C"]
tm.assert_series_equal(result, expected)
def test_as_index_select_column():
# GH 5764
df = pd.DataFrame([[1, 2], [1, 4], [5, 6]], columns=["A", "B"])
result = df.groupby("A", as_index=False)["B"].get_group(1)
expected = pd.Series([2, 4], name="B")
tm.assert_series_equal(result, expected)
result = df.groupby("A", as_index=False)["B"].apply(lambda x: x.cumsum())
expected = pd.Series(
[2, 6, 6], name="B", index=pd.MultiIndex.from_tuples([(0, 0), (0, 1), (1, 2)])
)
tm.assert_series_equal(result, expected)
def test_groupby_as_index_select_column_sum_empty_df():
# GH 35246
df = DataFrame(columns=["A", "B", "C"])
left = df.groupby(by="A", as_index=False)["B"].sum()
assert type(left) is DataFrame
assert left.to_dict() == {"A": {}, "B": {}}
def test_groupby_as_index_agg(df):
grouped = df.groupby("A", as_index=False)
# single-key
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
result2 = grouped.agg({"C": np.mean, "D": np.sum})
expected2 = grouped.mean()
expected2["D"] = grouped.sum()["D"]
tm.assert_frame_equal(result2, expected2)
grouped = df.groupby("A", as_index=True)
msg = r"nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
grouped["C"].agg({"Q": np.sum})
# multi-key
grouped = df.groupby(["A", "B"], as_index=False)
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
result2 = grouped.agg({"C": np.mean, "D": np.sum})
expected2 = grouped.mean()
expected2["D"] = grouped.sum()["D"]
tm.assert_frame_equal(result2, expected2)
expected3 = grouped["C"].sum()
expected3 = DataFrame(expected3).rename(columns={"C": "Q"})
result3 = grouped["C"].agg({"Q": np.sum})
tm.assert_frame_equal(result3, expected3)
# GH7115 & GH8112 & GH8582
df = DataFrame(np.random.randint(0, 100, (50, 3)), columns=["jim", "joe", "jolie"])
ts = Series(np.random.randint(5, 10, 50), name="jim")
gr = df.groupby(ts)
gr.nth(0) # invokes set_selection_from_grouper internally
tm.assert_frame_equal(gr.apply(sum), df.groupby(ts).apply(sum))
for attr in ["mean", "max", "count", "idxmax", "cumsum", "all"]:
gr = df.groupby(ts, as_index=False)
left = getattr(gr, attr)()
gr = df.groupby(ts.values, as_index=True)
right = getattr(gr, attr)().reset_index(drop=True)
tm.assert_frame_equal(left, right)
def test_ops_not_as_index(reduction_func):
# GH 10355, 21090
# Using as_index=False should not modify grouped column
if reduction_func in ("corrwith",):
pytest.skip("Test not applicable")
if reduction_func in ("nth", "ngroup",):
pytest.skip("Skip until behavior is determined (GH #5755)")
df = DataFrame(np.random.randint(0, 5, size=(100, 2)), columns=["a", "b"])
expected = getattr(df.groupby("a"), reduction_func)()
if reduction_func == "size":
expected = expected.rename("size")
expected = expected.reset_index()
g = df.groupby("a", as_index=False)
result = getattr(g, reduction_func)()
tm.assert_frame_equal(result, expected)
result = g.agg(reduction_func)
tm.assert_frame_equal(result, expected)
result = getattr(g["b"], reduction_func)()
tm.assert_frame_equal(result, expected)
result = g["b"].agg(reduction_func)
tm.assert_frame_equal(result, expected)
def test_as_index_series_return_frame(df):
grouped = df.groupby("A", as_index=False)
grouped2 = df.groupby(["A", "B"], as_index=False)
result = grouped["C"].agg(np.sum)
expected = grouped.agg(np.sum).loc[:, ["A", "C"]]
assert isinstance(result, DataFrame)
tm.assert_frame_equal(result, expected)
result2 = grouped2["C"].agg(np.sum)
expected2 = grouped2.agg(np.sum).loc[:, ["A", "B", "C"]]
assert isinstance(result2, DataFrame)
tm.assert_frame_equal(result2, expected2)
result = grouped["C"].sum()
expected = grouped.sum().loc[:, ["A", "C"]]
assert isinstance(result, DataFrame)
tm.assert_frame_equal(result, expected)
result2 = grouped2["C"].sum()
expected2 = grouped2.sum().loc[:, ["A", "B", "C"]]
assert isinstance(result2, DataFrame)
tm.assert_frame_equal(result2, expected2)
def test_as_index_series_column_slice_raises(df):
# GH15072
grouped = df.groupby("A", as_index=False)
msg = r"Column\(s\) C already selected"
with pytest.raises(IndexError, match=msg):
grouped["C"].__getitem__("D")
def test_groupby_as_index_cython(df):
data = df
# single-key
grouped = data.groupby("A", as_index=False)
result = grouped.mean()
expected = data.groupby(["A"]).mean()
expected.insert(0, "A", expected.index)
expected.index = np.arange(len(expected))
tm.assert_frame_equal(result, expected)
# multi-key
grouped = data.groupby(["A", "B"], as_index=False)
result = grouped.mean()
expected = data.groupby(["A", "B"]).mean()
arrays = list(zip(*expected.index.values))
expected.insert(0, "A", arrays[0])
expected.insert(1, "B", arrays[1])
expected.index = np.arange(len(expected))
tm.assert_frame_equal(result, expected)
def test_groupby_as_index_series_scalar(df):
grouped = df.groupby(["A", "B"], as_index=False)
# GH #421
result = grouped["C"].agg(len)
expected = grouped.agg(len).loc[:, ["A", "B", "C"]]
tm.assert_frame_equal(result, expected)
def test_groupby_as_index_corner(df, ts):
msg = "as_index=False only valid with DataFrame"
with pytest.raises(TypeError, match=msg):
ts.groupby(lambda x: x.weekday(), as_index=False)
msg = "as_index=False only valid for axis=0"
with pytest.raises(ValueError, match=msg):
df.groupby(lambda x: x.lower(), as_index=False, axis=1)
def test_groupby_multiple_key(df):
df = tm.makeTimeDataFrame()
grouped = df.groupby([lambda x: x.year, lambda x: x.month, lambda x: x.day])
agged = grouped.sum()
tm.assert_almost_equal(df.values, agged.values)
grouped = df.T.groupby(
[lambda x: x.year, lambda x: x.month, lambda x: x.day], axis=1
)
agged = grouped.agg(lambda x: x.sum())
tm.assert_index_equal(agged.index, df.columns)
tm.assert_almost_equal(df.T.values, agged.values)
agged = grouped.agg(lambda x: x.sum())
tm.assert_almost_equal(df.T.values, agged.values)
def test_groupby_multi_corner(df):
# test that having an all-NA column doesn't mess you up
df = df.copy()
df["bad"] = np.nan
agged = df.groupby(["A", "B"]).mean()
expected = df.groupby(["A", "B"]).mean()
expected["bad"] = np.nan
tm.assert_frame_equal(agged, expected)
def test_omit_nuisance(df):
grouped = df.groupby("A")
result = grouped.mean()
expected = df.loc[:, ["A", "C", "D"]].groupby("A").mean()
tm.assert_frame_equal(result, expected)
agged = grouped.agg(np.mean)
exp = grouped.mean()
tm.assert_frame_equal(agged, exp)
df = df.loc[:, ["A", "C", "D"]]
df["E"] = datetime.now()
grouped = df.groupby("A")
result = grouped.agg(np.sum)
expected = grouped.sum()
tm.assert_frame_equal(result, expected)
# won't work with axis = 1
grouped = df.groupby({"A": 0, "C": 0, "D": 1, "E": 1}, axis=1)
msg = "reduction operation 'sum' not allowed for this dtype"
with pytest.raises(TypeError, match=msg):
grouped.agg(lambda x: x.sum(0, numeric_only=False))
def test_omit_nuisance_python_multiple(three_group):
grouped = three_group.groupby(["A", "B"])
agged = grouped.agg(np.mean)
exp = grouped.mean()
tm.assert_frame_equal(agged, exp)
def test_empty_groups_corner(mframe):
# handle empty groups
df = DataFrame(
{
"k1": np.array(["b", "b", "b", "a", "a", "a"]),
"k2": np.array(["1", "1", "1", "2", "2", "2"]),
"k3": ["foo", "bar"] * 3,
"v1": np.random.randn(6),
"v2": np.random.randn(6),
}
)
grouped = df.groupby(["k1", "k2"])
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
grouped = mframe[3:5].groupby(level=0)
agged = grouped.apply(lambda x: x.mean())
agged_A = grouped["A"].apply(np.mean)
tm.assert_series_equal(agged["A"], agged_A)
assert agged.index.name == "first"
def test_nonsense_func():
df = DataFrame([0])
msg = r"unsupported operand type\(s\) for \+: 'int' and 'str'"
with pytest.raises(TypeError, match=msg):
df.groupby(lambda x: x + "foo")
def test_wrap_aggregated_output_multindex(mframe):
df = mframe.T
df["baz", "two"] = "peekaboo"
keys = [np.array([0, 0, 1]), np.array([0, 0, 1])]
agged = df.groupby(keys).agg(np.mean)
assert isinstance(agged.columns, MultiIndex)
def aggfun(ser):
if ser.name == ("foo", "one"):
raise TypeError
else:
return ser.sum()
agged2 = df.groupby(keys).aggregate(aggfun)
assert len(agged2.columns) + 1 == len(df.columns)
def test_groupby_level_apply(mframe):
result = mframe.groupby(level=0).count()
assert result.index.name == "first"
result = mframe.groupby(level=1).count()
assert result.index.name == "second"
result = mframe["A"].groupby(level=0).count()
assert result.index.name == "first"
def test_groupby_level_mapper(mframe):
deleveled = mframe.reset_index()
mapper0 = {"foo": 0, "bar": 0, "baz": 1, "qux": 1}
mapper1 = {"one": 0, "two": 0, "three": 1}
result0 = mframe.groupby(mapper0, level=0).sum()
result1 = mframe.groupby(mapper1, level=1).sum()
mapped_level0 = np.array([mapper0.get(x) for x in deleveled["first"]])
mapped_level1 = np.array([mapper1.get(x) for x in deleveled["second"]])
expected0 = mframe.groupby(mapped_level0).sum()
expected1 = mframe.groupby(mapped_level1).sum()
expected0.index.name, expected1.index.name = "first", "second"
tm.assert_frame_equal(result0, expected0)
tm.assert_frame_equal(result1, expected1)
def test_groupby_level_nonmulti():
# GH 1313, GH 13901
s = Series([1, 2, 3, 10, 4, 5, 20, 6], Index([1, 2, 3, 1, 4, 5, 2, 6], name="foo"))
expected = Series([11, 22, 3, 4, 5, 6], Index(range(1, 7), name="foo"))
result = s.groupby(level=0).sum()
tm.assert_series_equal(result, expected)
result = s.groupby(level=[0]).sum()
tm.assert_series_equal(result, expected)
result = s.groupby(level=-1).sum()
tm.assert_series_equal(result, expected)
result = s.groupby(level=[-1]).sum()
tm.assert_series_equal(result, expected)
msg = "level > 0 or level < -1 only valid with MultiIndex"
with pytest.raises(ValueError, match=msg):
s.groupby(level=1)
with pytest.raises(ValueError, match=msg):
s.groupby(level=-2)
msg = "No group keys passed!"
with pytest.raises(ValueError, match=msg):
s.groupby(level=[])
msg = "multiple levels only valid with MultiIndex"
with pytest.raises(ValueError, match=msg):
s.groupby(level=[0, 0])
with pytest.raises(ValueError, match=msg):
s.groupby(level=[0, 1])
msg = "level > 0 or level < -1 only valid with MultiIndex"
with pytest.raises(ValueError, match=msg):
s.groupby(level=[1])
def test_groupby_complex():
# GH 12902
a = Series(data=np.arange(4) * (1 + 2j), index=[0, 0, 1, 1])
expected = Series((1 + 2j, 5 + 10j))
result = a.groupby(level=0).sum()
tm.assert_series_equal(result, expected)
result = a.sum(level=0)
tm.assert_series_equal(result, expected)
def test_groupby_series_indexed_differently():
s1 = Series(
[5.0, -9.0, 4.0, 100.0, -5.0, 55.0, 6.7],
index=Index(["a", "b", "c", "d", "e", "f", "g"]),
)
s2 = Series(
[1.0, 1.0, 4.0, 5.0, 5.0, 7.0], index=Index(["a", "b", "d", "f", "g", "h"])
)
grouped = s1.groupby(s2)
agged = grouped.mean()
exp = s1.groupby(s2.reindex(s1.index).get).mean()
tm.assert_series_equal(agged, exp)
def test_groupby_with_hier_columns():
tuples = list(
zip(
*[
["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
["one", "two", "one", "two", "one", "two", "one", "two"],
]
)
)
index = MultiIndex.from_tuples(tuples)
columns = MultiIndex.from_tuples(
[("A", "cat"), ("B", "dog"), ("B", "cat"), ("A", "dog")]
)
df = DataFrame(np.random.randn(8, 4), index=index, columns=columns)
result = df.groupby(level=0).mean()
tm.assert_index_equal(result.columns, columns)
result = df.groupby(level=0, axis=1).mean()
tm.assert_index_equal(result.index, df.index)
result = df.groupby(level=0).agg(np.mean)
tm.assert_index_equal(result.columns, columns)
result = df.groupby(level=0).apply(lambda x: x.mean())
tm.assert_index_equal(result.columns, columns)
result = df.groupby(level=0, axis=1).agg(lambda x: x.mean(1))
tm.assert_index_equal(result.columns, Index(["A", "B"]))
tm.assert_index_equal(result.index, df.index)
# add a nuisance column
sorted_columns, _ = columns.sortlevel(0)
df["A", "foo"] = "bar"
result = df.groupby(level=0).mean()
tm.assert_index_equal(result.columns, df.columns[:-1])
def test_grouping_ndarray(df):
grouped = df.groupby(df["A"].values)
result = grouped.sum()
expected = df.groupby("A").sum()
tm.assert_frame_equal(
result, expected, check_names=False
) # Note: no names when grouping by value
def test_groupby_wrong_multi_labels():
data = """index,foo,bar,baz,spam,data
0,foo1,bar1,baz1,spam2,20
1,foo1,bar2,baz1,spam3,30
2,foo2,bar2,baz1,spam2,40
3,foo1,bar1,baz2,spam1,50
4,foo3,bar1,baz2,spam1,60"""
data = read_csv(StringIO(data), index_col=0)
grouped = data.groupby(["foo", "bar", "baz", "spam"])
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
def test_groupby_series_with_name(df):
result = df.groupby(df["A"]).mean()
result2 = df.groupby(df["A"], as_index=False).mean()
assert result.index.name == "A"
assert "A" in result2
result = df.groupby([df["A"], df["B"]]).mean()
result2 = df.groupby([df["A"], df["B"]], as_index=False).mean()
assert result.index.names == ("A", "B")
assert "A" in result2
assert "B" in result2
def test_seriesgroupby_name_attr(df):
# GH 6265
result = df.groupby("A")["C"]
assert result.count().name == "C"
assert result.mean().name == "C"
testFunc = lambda x: np.sum(x) * 2
assert result.agg(testFunc).name == "C"
def test_consistency_name():
# GH 12363
df = DataFrame(
{
"A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
"B": ["one", "one", "two", "two", "two", "two", "one", "two"],
"C": np.random.randn(8) + 1.0,
"D": np.arange(8),
}
)
expected = df.groupby(["A"]).B.count()
result = df.B.groupby(df.A).count()
tm.assert_series_equal(result, expected)
def test_groupby_name_propagation(df):
# GH 6124
def summarize(df, name=None):
return Series({"count": 1, "mean": 2, "omissions": 3}, name=name)
def summarize_random_name(df):
# Provide a different name for each Series. In this case, groupby
# should not attempt to propagate the Series name since they are
# inconsistent.
return Series({"count": 1, "mean": 2, "omissions": 3}, name=df.iloc[0]["A"])
metrics = df.groupby("A").apply(summarize)
assert metrics.columns.name is None
metrics = df.groupby("A").apply(summarize, "metrics")
assert metrics.columns.name == "metrics"
metrics = df.groupby("A").apply(summarize_random_name)
assert metrics.columns.name is None
def test_groupby_nonstring_columns():
df = DataFrame([np.arange(10) for x in range(10)])
grouped = df.groupby(0)
result = grouped.mean()
expected = df.groupby(df[0]).mean()
tm.assert_frame_equal(result, expected)
def test_groupby_mixed_type_columns():
# GH 13432, unorderable types in py3
df = DataFrame([[0, 1, 2]], columns=["A", "B", 0])
expected = DataFrame([[1, 2]], columns=["B", 0], index=Index([0], name="A"))
result = df.groupby("A").first()
tm.assert_frame_equal(result, expected)
result = df.groupby("A").sum()
tm.assert_frame_equal(result, expected)
# TODO: Ensure warning isn't emitted in the first place
@pytest.mark.filterwarnings("ignore:Mean of:RuntimeWarning")
def test_cython_grouper_series_bug_noncontig():
arr = np.empty((100, 100))
arr.fill(np.nan)
obj = Series(arr[:, 0])
inds = np.tile(range(10), 10)
result = obj.groupby(inds).agg(Series.median)
assert result.isna().all()
def test_series_grouper_noncontig_index():
index = Index(tm.rands_array(10, 100))
values = Series(np.random.randn(50), index=index[::2])
labels = np.random.randint(0, 5, 50)
# it works!
grouped = values.groupby(labels)
# accessing the index elements causes segfault
f = lambda x: len(set(map(id, x.index)))
grouped.agg(f)
def test_convert_objects_leave_decimal_alone():
s = Series(range(5))
labels = np.array(["a", "b", "c", "d", "e"], dtype="O")
def convert_fast(x):
return Decimal(str(x.mean()))
def convert_force_pure(x):
# base will be length 0
assert len(x.values.base) > 0
return Decimal(str(x.mean()))
grouped = s.groupby(labels)
result = grouped.agg(convert_fast)
assert result.dtype == np.object_
assert isinstance(result[0], Decimal)
result = grouped.agg(convert_force_pure)
assert result.dtype == np.object_
assert isinstance(result[0], Decimal)
def test_groupby_dtype_inference_empty():
# GH 6733
df = DataFrame({"x": [], "range": np.arange(0, dtype="int64")})
assert df["x"].dtype == np.float64
result = df.groupby("x").first()
exp_index = Index([], name="x", dtype=np.float64)
expected = DataFrame({"range": Series([], index=exp_index, dtype="int64")})
tm.assert_frame_equal(result, expected, by_blocks=True)
def test_groupby_list_infer_array_like(df):
result = df.groupby(list(df["A"])).mean()
expected = df.groupby(df["A"]).mean()
tm.assert_frame_equal(result, expected, check_names=False)
with pytest.raises(KeyError, match=r"^'foo'$"):
df.groupby(list(df["A"][:-1]))
# pathological case of ambiguity
df = DataFrame({"foo": [0, 1], "bar": [3, 4], "val": np.random.randn(2)})
result = df.groupby(["foo", "bar"]).mean()
expected = df.groupby([df["foo"], df["bar"]]).mean()[["val"]]
def test_groupby_keys_same_size_as_index():
# GH 11185
freq = "s"
index = pd.date_range(
start=pd.Timestamp("2015-09-29T11:34:44-0700"), periods=2, freq=freq
)
df = pd.DataFrame([["A", 10], ["B", 15]], columns=["metric", "values"], index=index)
result = df.groupby([pd.Grouper(level=0, freq=freq), "metric"]).mean()
expected = df.set_index([df.index, "metric"])
tm.assert_frame_equal(result, expected)
def test_groupby_one_row():
# GH 11741
msg = r"^'Z'$"
df1 = pd.DataFrame(np.random.randn(1, 4), columns=list("ABCD"))
with pytest.raises(KeyError, match=msg):
df1.groupby("Z")
df2 = pd.DataFrame(np.random.randn(2, 4), columns=list("ABCD"))
with pytest.raises(KeyError, match=msg):
df2.groupby("Z")
def test_groupby_nat_exclude():
# GH 6992
df = pd.DataFrame(
{
"values": np.random.randn(8),
"dt": [
np.nan,
pd.Timestamp("2013-01-01"),
np.nan,
pd.Timestamp("2013-02-01"),
np.nan,
pd.Timestamp("2013-02-01"),
np.nan,
pd.Timestamp("2013-01-01"),
],
"str": [np.nan, "a", np.nan, "a", np.nan, "a", np.nan, "b"],
}
)
grouped = df.groupby("dt")
expected = [pd.Index([1, 7]), pd.Index([3, 5])]
keys = sorted(grouped.groups.keys())
assert len(keys) == 2
for k, e in zip(keys, expected):
# grouped.groups keys are np.datetime64 with system tz
# not to be affected by tz, only compare values
tm.assert_index_equal(grouped.groups[k], e)
# confirm obj is not filtered
tm.assert_frame_equal(grouped.grouper.groupings[0].obj, df)
assert grouped.ngroups == 2
expected = {
Timestamp("2013-01-01 00:00:00"): np.array([1, 7], dtype=np.intp),
Timestamp("2013-02-01 00:00:00"): np.array([3, 5], dtype=np.intp),
}
for k in grouped.indices:
tm.assert_numpy_array_equal(grouped.indices[k], expected[k])
tm.assert_frame_equal(grouped.get_group(Timestamp("2013-01-01")), df.iloc[[1, 7]])
tm.assert_frame_equal(grouped.get_group(Timestamp("2013-02-01")), df.iloc[[3, 5]])
with pytest.raises(KeyError, match=r"^NaT$"):
grouped.get_group(pd.NaT)
nan_df = DataFrame(
{"nan": [np.nan, np.nan, np.nan], "nat": [pd.NaT, pd.NaT, pd.NaT]}
)
assert nan_df["nan"].dtype == "float64"
assert nan_df["nat"].dtype == "datetime64[ns]"
for key in ["nan", "nat"]:
grouped = nan_df.groupby(key)
assert grouped.groups == {}
assert grouped.ngroups == 0
assert grouped.indices == {}
with pytest.raises(KeyError, match=r"^nan$"):
grouped.get_group(np.nan)
with pytest.raises(KeyError, match=r"^NaT$"):
grouped.get_group(pd.NaT)
def test_groupby_2d_malformed():
d = DataFrame(index=range(2))
d["group"] = ["g1", "g2"]
d["zeros"] = [0, 0]
d["ones"] = [1, 1]
d["label"] = ["l1", "l2"]
tmp = d.groupby(["group"]).mean()
res_values = np.array([[0, 1], [0, 1]], dtype=np.int64)
tm.assert_index_equal(tmp.columns, Index(["zeros", "ones"]))
tm.assert_numpy_array_equal(tmp.values, res_values)
def test_int32_overflow():
B = np.concatenate((np.arange(10000), np.arange(10000), np.arange(5000)))
A = np.arange(25000)
df = DataFrame({"A": A, "B": B, "C": A, "D": B, "E": np.random.randn(25000)})
left = df.groupby(["A", "B", "C", "D"]).sum()
right = df.groupby(["D", "C", "B", "A"]).sum()
assert len(left) == len(right)
def test_groupby_sort_multi():
df = DataFrame(
{
"a": ["foo", "bar", "baz"],
"b": [3, 2, 1],
"c": [0, 1, 2],
"d": np.random.randn(3),
}
)
tups = [tuple(row) for row in df[["a", "b", "c"]].values]
tups = com.asarray_tuplesafe(tups)
result = df.groupby(["a", "b", "c"], sort=True).sum()
tm.assert_numpy_array_equal(result.index.values, tups[[1, 2, 0]])
tups = [tuple(row) for row in df[["c", "a", "b"]].values]
tups = com.asarray_tuplesafe(tups)
result = df.groupby(["c", "a", "b"], sort=True).sum()
tm.assert_numpy_array_equal(result.index.values, tups)
tups = [tuple(x) for x in df[["b", "c", "a"]].values]
tups = com.asarray_tuplesafe(tups)
result = df.groupby(["b", "c", "a"], sort=True).sum()
tm.assert_numpy_array_equal(result.index.values, tups[[2, 1, 0]])
df = DataFrame(
{"a": [0, 1, 2, 0, 1, 2], "b": [0, 0, 0, 1, 1, 1], "d": np.random.randn(6)}
)
grouped = df.groupby(["a", "b"])["d"]
result = grouped.sum()
def _check_groupby(df, result, keys, field, f=lambda x: x.sum()):
tups = [tuple(row) for row in df[keys].values]
tups = com.asarray_tuplesafe(tups)
expected = f(df.groupby(tups)[field])
for k, v in expected.items():
assert result[k] == v
_check_groupby(df, result, ["a", "b"], "d")
def test_dont_clobber_name_column():
df = DataFrame(
{"key": ["a", "a", "a", "b", "b", "b"], "name": ["foo", "bar", "baz"] * 2}
)
result = df.groupby("key").apply(lambda x: x)
tm.assert_frame_equal(result, df)
def test_skip_group_keys():
tsf = tm.makeTimeDataFrame()
grouped = tsf.groupby(lambda x: x.month, group_keys=False)
result = grouped.apply(lambda x: x.sort_values(by="A")[:3])
pieces = [group.sort_values(by="A")[:3] for key, group in grouped]
expected = pd.concat(pieces)
tm.assert_frame_equal(result, expected)
grouped = tsf["A"].groupby(lambda x: x.month, group_keys=False)
result = grouped.apply(lambda x: x.sort_values()[:3])
pieces = [group.sort_values()[:3] for key, group in grouped]
expected = pd.concat(pieces)
tm.assert_series_equal(result, expected)
def test_no_nonsense_name(float_frame):
# GH #995
s = float_frame["C"].copy()
s.name = None
result = s.groupby(float_frame["A"]).agg(np.sum)
assert result.name is None
def test_multifunc_sum_bug():
# GH #1065
x = DataFrame(np.arange(9).reshape(3, 3))
x["test"] = 0
x["fl"] = [1.3, 1.5, 1.6]
grouped = x.groupby("test")
result = grouped.agg({"fl": "sum", 2: "size"})
assert result["fl"].dtype == np.float64
def test_handle_dict_return_value(df):
def f(group):
return {"max": group.max(), "min": group.min()}
def g(group):
return Series({"max": group.max(), "min": group.min()})
result = df.groupby("A")["C"].apply(f)
expected = df.groupby("A")["C"].apply(g)
assert isinstance(result, Series)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("grouper", ["A", ["A", "B"]])
def test_set_group_name(df, grouper):
def f(group):
assert group.name is not None
return group
def freduce(group):
assert group.name is not None
return group.sum()
def foo(x):
return freduce(x)
grouped = df.groupby(grouper)
# make sure all these work
grouped.apply(f)
grouped.aggregate(freduce)
grouped.aggregate({"C": freduce, "D": freduce})
grouped.transform(f)
grouped["C"].apply(f)
grouped["C"].aggregate(freduce)
grouped["C"].aggregate([freduce, foo])
grouped["C"].transform(f)
def test_group_name_available_in_inference_pass():
# gh-15062
df = pd.DataFrame({"a": [0, 0, 1, 1, 2, 2], "b": np.arange(6)})
names = []
def f(group):
names.append(group.name)
return group.copy()
df.groupby("a", sort=False, group_keys=False).apply(f)
expected_names = [0, 1, 2]
assert names == expected_names
def test_no_dummy_key_names(df):
# see gh-1291
result = df.groupby(df["A"].values).sum()
assert result.index.name is None
result = df.groupby([df["A"].values, df["B"].values]).sum()
assert result.index.names == (None, None)
def test_groupby_sort_multiindex_series():
# series multiindex groupby sort argument was not being passed through
# _compress_group_index
# GH 9444
index = MultiIndex(
levels=[[1, 2], [1, 2]],
codes=[[0, 0, 0, 0, 1, 1], [1, 1, 0, 0, 0, 0]],
names=["a", "b"],
)
mseries = Series([0, 1, 2, 3, 4, 5], index=index)
index = MultiIndex(
levels=[[1, 2], [1, 2]], codes=[[0, 0, 1], [1, 0, 0]], names=["a", "b"]
)
mseries_result = Series([0, 2, 4], index=index)
result = mseries.groupby(level=["a", "b"], sort=False).first()
tm.assert_series_equal(result, mseries_result)
result = mseries.groupby(level=["a", "b"], sort=True).first()
tm.assert_series_equal(result, mseries_result.sort_index())
def test_groupby_reindex_inside_function():
periods = 1000
ind = date_range(start="2012/1/1", freq="5min", periods=periods)
df = DataFrame({"high": np.arange(periods), "low": np.arange(periods)}, index=ind)
def agg_before(hour, func, fix=False):
"""
Run an aggregate func on the subset of data.
"""
def _func(data):
d = data.loc[data.index.map(lambda x: x.hour < 11)].dropna()
if fix:
data[data.index[0]]
if len(d) == 0:
return None
return func(d)
return _func
def afunc(data):
d = data.select(lambda x: x.hour < 11).dropna()
return np.max(d)
grouped = df.groupby(lambda x: datetime(x.year, x.month, x.day))
closure_bad = grouped.agg({"high": agg_before(11, np.max)})
closure_good = grouped.agg({"high": agg_before(11, np.max, True)})
tm.assert_frame_equal(closure_bad, closure_good)
def test_groupby_multiindex_missing_pair():
# GH9049
df = DataFrame(
{
"group1": ["a", "a", "a", "b"],
"group2": ["c", "c", "d", "c"],
"value": [1, 1, 1, 5],
}
)
df = df.set_index(["group1", "group2"])
df_grouped = df.groupby(level=["group1", "group2"], sort=True)
res = df_grouped.agg("sum")
idx = MultiIndex.from_tuples(
[("a", "c"), ("a", "d"), ("b", "c")], names=["group1", "group2"]
)
exp = DataFrame([[2], [1], [5]], index=idx, columns=["value"])
tm.assert_frame_equal(res, exp)
def test_groupby_multiindex_not_lexsorted():
# GH 11640
# define the lexsorted version
lexsorted_mi = MultiIndex.from_tuples(
[("a", ""), ("b1", "c1"), ("b2", "c2")], names=["b", "c"]
)
lexsorted_df = DataFrame([[1, 3, 4]], columns=lexsorted_mi)
assert lexsorted_df.columns.is_lexsorted()
# define the non-lexsorted version
not_lexsorted_df = DataFrame(
columns=["a", "b", "c", "d"], data=[[1, "b1", "c1", 3], [1, "b2", "c2", 4]]
)
not_lexsorted_df = not_lexsorted_df.pivot_table(
index="a", columns=["b", "c"], values="d"
)
not_lexsorted_df = not_lexsorted_df.reset_index()
assert not not_lexsorted_df.columns.is_lexsorted()
# compare the results
tm.assert_frame_equal(lexsorted_df, not_lexsorted_df)
expected = lexsorted_df.groupby("a").mean()
with tm.assert_produces_warning(PerformanceWarning):
result = not_lexsorted_df.groupby("a").mean()
tm.assert_frame_equal(expected, result)
# a transforming function should work regardless of sort
# GH 14776
df = DataFrame(
{"x": ["a", "a", "b", "a"], "y": [1, 1, 2, 2], "z": [1, 2, 3, 4]}
).set_index(["x", "y"])
assert not df.index.is_lexsorted()
for level in [0, 1, [0, 1]]:
for sort in [False, True]:
result = df.groupby(level=level, sort=sort).apply(DataFrame.drop_duplicates)
expected = df
tm.assert_frame_equal(expected, result)
result = (
df.sort_index()
.groupby(level=level, sort=sort)
.apply(DataFrame.drop_duplicates)
)
expected = df.sort_index()
tm.assert_frame_equal(expected, result)
def test_index_label_overlaps_location():
# checking we don't have any label/location confusion in the
# the wake of GH5375
df = DataFrame(list("ABCDE"), index=[2, 0, 2, 1, 1])
g = df.groupby(list("ababb"))
actual = g.filter(lambda x: len(x) > 2)
expected = df.iloc[[1, 3, 4]]
tm.assert_frame_equal(actual, expected)
ser = df[0]
g = ser.groupby(list("ababb"))
actual = g.filter(lambda x: len(x) > 2)
expected = ser.take([1, 3, 4])
tm.assert_series_equal(actual, expected)
# ... and again, with a generic Index of floats
df.index = df.index.astype(float)
g = df.groupby(list("ababb"))
actual = g.filter(lambda x: len(x) > 2)
expected = df.iloc[[1, 3, 4]]
tm.assert_frame_equal(actual, expected)
ser = df[0]
g = ser.groupby(list("ababb"))
actual = g.filter(lambda x: len(x) > 2)
expected = ser.take([1, 3, 4])
tm.assert_series_equal(actual, expected)
def test_transform_doesnt_clobber_ints():
# GH 7972
n = 6
x = np.arange(n)
df = DataFrame({"a": x // 2, "b": 2.0 * x, "c": 3.0 * x})
df2 = DataFrame({"a": x // 2 * 1.0, "b": 2.0 * x, "c": 3.0 * x})
gb = df.groupby("a")
result = gb.transform("mean")
gb2 = df2.groupby("a")
expected = gb2.transform("mean")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"sort_column",
["ints", "floats", "strings", ["ints", "floats"], ["ints", "strings"]],
)
@pytest.mark.parametrize(
"group_column", ["int_groups", "string_groups", ["int_groups", "string_groups"]]
)
def test_groupby_preserves_sort(sort_column, group_column):
# Test to ensure that groupby always preserves sort order of original
# object. Issue #8588 and #9651
df = DataFrame(
{
"int_groups": [3, 1, 0, 1, 0, 3, 3, 3],
"string_groups": ["z", "a", "z", "a", "a", "g", "g", "g"],
"ints": [8, 7, 4, 5, 2, 9, 1, 1],
"floats": [2.3, 5.3, 6.2, -2.4, 2.2, 1.1, 1.1, 5],
"strings": ["z", "d", "a", "e", "word", "word2", "42", "47"],
}
)
# Try sorting on different types and with different group types
df = df.sort_values(by=sort_column)
g = df.groupby(group_column)
def test_sort(x):
tm.assert_frame_equal(x, x.sort_values(by=sort_column))
g.apply(test_sort)
def test_group_shift_with_null_key():
# This test is designed to replicate the segfault in issue #13813.
n_rows = 1200
# Generate a moderately large dataframe with occasional missing
# values in column `B`, and then group by [`A`, `B`]. This should
# force `-1` in `labels` array of `g.grouper.group_info` exactly
# at those places, where the group-by key is partially missing.
df = DataFrame(
[(i % 12, i % 3 if i % 3 else np.nan, i) for i in range(n_rows)],
dtype=float,
columns=["A", "B", "Z"],
index=None,
)
g = df.groupby(["A", "B"])
expected = DataFrame(
[(i + 12 if i % 3 and i < n_rows - 12 else np.nan) for i in range(n_rows)],
dtype=float,
columns=["Z"],
index=None,
)
result = g.shift(-1)
tm.assert_frame_equal(result, expected)
def test_group_shift_with_fill_value():
# GH #24128
n_rows = 24
df = DataFrame(
[(i % 12, i % 3, i) for i in range(n_rows)],
dtype=float,
columns=["A", "B", "Z"],
index=None,
)
g = df.groupby(["A", "B"])
expected = DataFrame(
[(i + 12 if i < n_rows - 12 else 0) for i in range(n_rows)],
dtype=float,
columns=["Z"],
index=None,
)
result = g.shift(-1, fill_value=0)[["Z"]]
tm.assert_frame_equal(result, expected)
def test_group_shift_lose_timezone():
# GH 30134
now_dt = | pd.Timestamp.utcnow() | pandas.Timestamp.utcnow |
import numpy as np
import operator
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
from matplotlib.colors import LinearSegmentedColormap
import seaborn as sns
import math
from tkinter import *
# Functions age_encode, race_encode, state_encode, and self_core_dict used to create the core dict.
def age_encode(age):
# Returns the utf-8 object of an arbitrary integer age input.
if age < 1:
return '1'.encode('utf-8')
elif age < 5:
return '1-4'.encode('utf-8')
elif age < 10:
return '5-9'.encode('utf-8')
elif age < 15:
return '10-14'.encode('utf-8')
elif age < 20:
return '15-19'.encode('utf-8')
elif age < 25:
return '20-24'.encode('utf-8')
elif age < 30:
return '25-29'.encode('utf-8')
elif age < 35:
return '30-34'.encode('utf-8')
elif age < 40:
return '35-39'.encode('utf-8')
elif age < 45:
return '40-44'.encode('utf-8')
elif age < 50:
return '45-49'.encode('utf-8')
elif age < 55:
return '50-54'.encode('utf-8')
elif age < 60:
return '55-59'.encode('utf-8')
elif age < 65:
return '60-64'.encode('utf-8')
elif age < 70:
return '65-69'.encode('utf-8')
elif age < 75:
return '70-74'.encode('utf-8')
elif age < 80:
return '75-79'.encode('utf-8')
elif age < 85:
return '80-84'.encode('utf-8')
elif age < 90:
return '85-89'.encode('utf-8')
elif age < 95:
return '90-94'.encode('utf-8')
elif age < 100:
return '95-99'.encode('utf-8')
elif age >= 100:
return '100+'.encode('utf-8')
else:
print('Insert age between 1-85+.')
return
def race_encode(race):
# Insert full name string, return utf-8 object of race code.
race_key = {'White': '2106-3'.encode('utf-8'),
'Asian or Pacific Islander': 'A-PI'.encode('utf-8'),
'Black or African American': '2054-5'.encode('utf-8'),
'American Indian or Alaska Native': '1002-5'.encode('utf-8')}
if race not in race_key.keys():
raise KeyError("%s not present" %race)
else:
return race_key[race]
def state_encode(state):
state_dict = {'Alabama': 1,
'Alaska': 2,
'Arizona': 4,
'Arkansas': 5,
'California': 6,
'Colorado': 8,
'Connecticut': 9,
'Delaware': 10,
'District of Columbia': 11,
'Florida': 12,
'Georgia': 13,
'Hawaii': 15,
'Idaho': 16,
'Illinois': 17,
'Indiana': 18,
'Iowa': 19,
'Kansas': 20,
'Kentucky': 21,
'Louisiana': 22,
'Maine': 23,
'Maryland': 24,
'Massachusetts': 25,
'Michigan': 26,
'Minnesota': 27,
'Mississippi': 28,
'Missouri': 29,
'Montana': 30,
'Nebraska': 31,
'Nevada': 32,
'New Hampshire': 33,
'New Jersey': 34,
'New Mexico': 35,
'New York': 36,
'North Carolina': 37,
'North Dakota': 38,
'Ohio': 39,
'Oklahoma': 40,
'Oregon': 41,
'Pennsylvania': 42,
'Rhode Island': 44,
'South Carolina': 45,
'South Dakota': 46,
'Tennessee': 47,
'Texas': 48,
'Utah': 49,
'Vermont': 50,
'Virginia': 51,
'Washington': 53,
'West Virginia': 54,
'Wisconsin': 55,
'Wyoming': 56}
if state not in state_dict.keys():
raise KeyError('%s not in states' % state)
else:
return state_dict[state]
def hispanic_encode(hispanic):
hispanic_key = {'Not Hispanic': '2186-2'.encode('utf-8'),
'Hispanic': '2135-2'.encode('utf-8'),
'Unspecific': 'NS'.encode('utf-8')}
if hispanic not in hispanic_key.keys():
raise KeyError("%s not present" % hispanic)
else:
return hispanic_key[hispanic]
def self_core_dict(age, race, gender, hispanic, state):
# Produces a dictionary of the person's stats for numpy manipulation.
tester = {}
tester.update({'age': age_encode(age)})
tester.update({'race': race_encode(race)})
tester.update({'gender': gender.encode('utf-8')})
tester.update({'hispanic': hispanic_encode(hispanic)})
tester.update({'state': str(state_encode(state)).encode('utf-8')})
return tester
# Functions age_range_encode, mortality_core_raw used to create the total mortality matrix for the core.
def age_range_encode(age):
#ages = ['<1', '1-4', '5-9', '10-14', '15-19', '20-24', '25-29', '30-34', '35-39', '40-44', '45-49', '50-54',
# '55-59', '60-64', '65-69', '70-74', '75-79', '80-84']
ages = ['1', '1-4', '5-9', '10-14', '15-19', '20-24', '25-29', '30-34', '35-39', '40-44', '45-49', '50-54',
'55-59', '60-64', '65-69', '70-74', '75-79', '80-84', '85-89', '90-94', '95-99', '100+']
byte_ages = [x.encode('utf-8') for x in ages]
return byte_ages[byte_ages.index(age):]
def mortality_core_raw(person_dict, age_range):
# Imports CDC mortality and 85~100+ population data.
mortality_path = 'C:\\Users\Amy\Desktop\Research\data\\070617_113_causeofdeath_cancer.txt'
mortality_data = np.genfromtxt(mortality_path,
dtype=(object, object, object, object, object, object, object, '<i8', '<i8'),
delimiter='\t',
names=True)
pop_85_path = 'C:\\Users\Amy\Desktop\Research\data\85to100_estimates_final.txt'
pop_85_data = np.genfromtxt(pop_85_path,
dtype=(object, object, object, object, object, '<i8'),
delimiter='\t',
names=True)
pop_85_ages = ['85-89'.encode('utf-8'), '90-94'.encode('utf-8'), '95-99'.encode('utf-8'), '100+'.encode('utf-8')]
total_deaths_path = 'C:\\Users\Amy\Desktop\Research\data\\total_deaths.txt'
totald_data = np.genfromtxt(total_deaths_path,
dtype=(object, object, object, object, object, '<i8', '<i8'),
delimiter='\t',
names=True)
age_dict = {'85-89'.encode('utf-8'): 'A',
'90-94'.encode('utf-8'): 'B',
'95-99'.encode('utf-8'): 'C',
'100+'.encode('utf-8'): 'D'}
race_dict = {'2106-3'.encode('utf-8'): '1',
'1002-5'.encode('utf-8'): '2',
'2054-5'.encode('utf-8'): '3',
'A-PI'.encode('utf-8'): '4'}
ethnicity_dict = {'2186-2'.encode('utf-8'): '0',
'2135-2'.encode('utf-8'): '1'}
population_dict = dict()
for entry in pop_85_data:
age = entry[0]
state = entry[1]
gender = entry[2]
race = entry[3]
eth = entry[4]
population = entry[5]
label = age_dict[age] + state.decode('utf-8') + gender.decode('utf-8') + race_dict[race] + ethnicity_dict[eth]
population_dict.update({label: population})
for entry in mortality_data:
age = entry[0]
ethnicity = entry[2]
if age in pop_85_ages and ethnicity != 'NS'.encode('utf-8'):
race = entry[1]
ethnicity = entry[2]
state = entry[3]
gender = entry[4]
label = age_dict[age] + state.decode('utf-8') + gender.decode('utf-8') + race_dict[race] + ethnicity_dict[
ethnicity]
entry[8] = population_dict[label]
# Produces the set of the person for comparison to mortality entries.
person_set = {person_dict['race'], person_dict['gender'], person_dict['hispanic'], person_dict['state']}
# Produces the dictionary of all deaths associated with the core by age.
total_deaths_all = {age: 0 for age in age_range}
for entry in totald_data:
age = entry[0]
deaths = entry[5]
population = entry[6]
if person_set.issubset(set(entry)) and age in age_range:
total_deaths_all.update({age: total_deaths_all[age] + deaths})
# Produces the list of sets of all mortalities associated with the core and total count of all deaths.
mortalities = []
total_deaths_selected = {age: 0 for age in age_range}
total_population_by_age = {age: 0 for age in age_range}
for row in mortality_data:
age = row[0]
mortality_name = row[5]
if person_set.issubset(set(row)) and age in age_range:
mortality_code = row[6]
deaths = row[7]
population = row[8]
rate = row[7] / row[8] * 100000
mortalities.append((age, mortality_name, mortality_code, deaths, population, rate))
total_deaths_selected.update({age: total_deaths_selected[age] + deaths})
total_population_by_age.update({age: population})
# Converts the result from list of sets to a matrix.
mortality_matches = np.array([tuple(x) for x in mortalities], dtype='object, object, object, <i8, <i8, <i8')
mortality_matches.reshape((len(mortality_matches), 1))
# Obtains list of unique mortalities.
mortality_names = set([i[1] for i in mortality_matches])
print('There are', len(mortality_names), 'total unique mortalities.', '\n')
if len(mortality_names) == 0:
print('Congrats! Not enough of you are dying. Perhaps try another state.')
return mortality_matches, mortality_names, total_deaths_selected, total_deaths_all, total_population_by_age
# Function death_ramking used to create the top 12 mortality matrix.
def death_ranking(matches, names, cutoff_num):
scores = {name: 0 for name in names}
# Filters through all raw mortality rows to create a death score for each mortality.
for entry in matches:
current_disease = entry[1]
deaths = entry[3]
scores.update({current_disease: scores[current_disease] + deaths})
sorted_scores = sorted(scores.items(), key=operator.itemgetter(1), reverse=True)
# Returns top cutoff number mortality entries if there are >cutoff_num death scores listed.
if len(sorted_scores) >= cutoff_num:
# Top cutoff_num scores and mortality names obtained.
trim_scores = sorted_scores[0:cutoff_num]
names = [entry[0] for entry in trim_scores]
# Finds which rows are not in the top cutoff_num mortalities and removes them. Returns the trimmed matrix.
to_delete = [i for i in range(len(matches)) if matches[i][1] not in names]
trimmed_matches = np.delete(matches, to_delete, axis=0)
return trimmed_matches, names
else:
names = [entry[0] for entry in sorted_scores]
return matches, names
# Functions bar_chart, stacked_histogram, scatter_plot used for visualization.
def age_bracket_avg(person_dict, ages):
population_path = 'C:\\Users\Amy\Desktop\Research\data\\year_age_popestimate.txt'
population_data = np.genfromtxt(population_path,
dtype=('<i8', object, object, object, object, '<i8'),
delimiter='\t',
names=True)
population_dict = {age: np.array([0, 0, 0, 0, 0]) for age in ages}
person_set = {person_dict['race'], person_dict['gender'], person_dict['hispanic'], person_dict['state']}
ages = ['1', '1-4', '5-9', '10-14', '15-19', '20-24', '25-29', '30-34', '35-39', '40-44', '45-49', '50-54',
'55-59', '60-64', '65-69', '70-74', '75-79', '80-84', '85-89', '90-94', '95-99', '100+']
byte_ages = [x.encode('utf-8') for x in ages]
age_min = (byte_ages.index(person_dict['age'])-1) * 5
for entry in population_data:
current_age = entry[0]
if person_set.issubset(entry) and current_age >= age_min:
age = entry[0]
age_bracket = byte_ages[age // 5 + 1]
age_bracket_year = age % 5
population = entry[5]
population_dict[age_bracket][age_bracket_year] = population
for age, counts in population_dict.items():
tens = (byte_ages.index(age) - 1) // 2 * 10 + (byte_ages.index(age) - 1) % 2 * 5
dists = counts/sum(counts)
avg = np.dot(dists, [0, 1, 2, 3, 4])
population_dict.update({age: round((tens + avg), 2)})
return population_dict
def age_of_death(matches, names, total_deaths_all, age_avgs, just_mortalities):
age_list = list(age_avgs.keys())
names_path = 'C:\\Users\Amy\Desktop\Research\data\\070617_113_listofdeaths.txt'
names_data = np.genfromtxt(names_path,
dtype=(object, object),
delimiter='\t',
names=True)
names_dict = {row[0]: row[1] for row in names_data}
if just_mortalities:
mortality_counts = {name: {age: 0 for age in age_list} for name in names}
mortality_results = {}
for entry in matches:
age = entry[0]
name = entry[1]
deaths = entry[3]
mortality_counts[name].update({age: deaths})
for name, ages in mortality_counts.items():
counts = np.array(list(ages.values()))
indices = list(range(len(list(ages.values()))))
avg_index = math.ceil(np.dot(counts/sum(counts), indices))
mortality_results.update({names_dict[name]: age_avgs[age_list[avg_index]]})
print('Average age of death from these mortalities:')
for key, val in mortality_results.items():
print(key.decode('utf-8'), ' - ', val, sep='')
return mortality_results
else:
counts = np.array(list(total_deaths_all.values()))
indices = list(range(len(list(total_deaths_all.values()))))
avg_index = math.ceil(np.dot(counts/sum(counts), indices))
avg_age = age_avgs[age_list[avg_index]]
print('Average age of death: ', avg_age, '\n', sep='')
return avg_age
def stacked_bar_chart(matches, names, total_deaths_all):
# ABOUT: Takes top 12 mortality data and creates a stacked bar chart of them.
# Creates the dictionary of mortality to death rate per 100,000.
percentage = {name: 0 for name in names}
for entry in matches:
current_mortality = entry[1]
if current_mortality in names:
deaths = entry[3]
percentage.update({current_mortality: (percentage[current_mortality] + deaths)})
names_path = 'C:\\Users\Amy\Desktop\Research\data\\070617_113_listofdeaths.txt'
names_data = np.genfromtxt(names_path,
dtype=(object, object),
delimiter='\t',
names=True)
names_dict = {row[0]: row[1] for row in names_data}
# Sums all the death rates then divides all individual rates by the sum to obtain each percentage of deaths.
for disease, deaths in percentage.items():
percentage.update({disease: int(round(deaths/sum(total_deaths_all.values())*100))})
clean_percentage = {}
for disease, deaths in percentage.items():
new_key = names_dict[disease].decode('utf-8')
clean_percentage.update({new_key: deaths})
# Creates the stacked bar chart.
df = | pd.Series(clean_percentage, name=' ') | pandas.Series |
# -*- coding: utf-8 -*-
import numpy as np
import pytest
from numpy.random import RandomState
from numpy import nan
from datetime import datetime
from itertools import permutations
from pandas import (Series, Categorical, CategoricalIndex,
Timestamp, DatetimeIndex, Index, IntervalIndex)
import pandas as pd
from pandas import compat
from pandas._libs import (groupby as libgroupby, algos as libalgos,
hashtable as ht)
from pandas._libs.hashtable import unique_label_indices
from pandas.compat import lrange, range
import pandas.core.algorithms as algos
import pandas.core.common as com
import pandas.util.testing as tm
import pandas.util._test_decorators as td
from pandas.core.dtypes.dtypes import CategoricalDtype as CDT
from pandas.compat.numpy import np_array_datetime64_compat
from pandas.util.testing import assert_almost_equal
class TestMatch(object):
def test_ints(self):
values = np.array([0, 2, 1])
to_match = np.array([0, 1, 2, 2, 0, 1, 3, 0])
result = algos.match(to_match, values)
expected = np.array([0, 2, 1, 1, 0, 2, -1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Series(algos.match(to_match, values, np.nan))
expected = Series(np.array([0, 2, 1, 1, 0, 2, np.nan, 0]))
tm.assert_series_equal(result, expected)
s = Series(np.arange(5), dtype=np.float32)
result = algos.match(s, [2, 4])
expected = np.array([-1, -1, 0, -1, 1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Series(algos.match(s, [2, 4], np.nan))
expected = Series(np.array([np.nan, np.nan, 0, np.nan, 1]))
tm.assert_series_equal(result, expected)
def test_strings(self):
values = ['foo', 'bar', 'baz']
to_match = ['bar', 'foo', 'qux', 'foo', 'bar', 'baz', 'qux']
result = algos.match(to_match, values)
expected = np.array([1, 0, -1, 0, 1, 2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Series(algos.match(to_match, values, np.nan))
expected = Series(np.array([1, 0, np.nan, 0, 1, 2, np.nan]))
tm.assert_series_equal(result, expected)
class TestFactorize(object):
def test_basic(self):
labels, uniques = algos.factorize(['a', 'b', 'b', 'a', 'a', 'c', 'c',
'c'])
tm.assert_numpy_array_equal(
uniques, np.array(['a', 'b', 'c'], dtype=object))
labels, uniques = algos.factorize(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], sort=True)
exp = np.array([0, 1, 1, 0, 0, 2, 2, 2], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(range(5))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4, 3, 2, 1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(range(5))), sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0, 1, 2, 3, 4], dtype=np.int64)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(np.arange(5.))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4., 3., 2., 1., 0.], dtype=np.float64)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(np.arange(5.))),
sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0., 1., 2., 3., 4.], dtype=np.float64)
tm.assert_numpy_array_equal(uniques, exp)
def test_mixed(self):
# doc example reshaping.rst
x = Series(['A', 'A', np.nan, 'B', 3.14, np.inf])
labels, uniques = algos.factorize(x)
exp = np.array([0, 0, -1, 1, 2, 3], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index(['A', 'B', 3.14, np.inf])
tm.assert_index_equal(uniques, exp)
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([2, 2, -1, 3, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index([3.14, np.inf, 'A', 'B'])
tm.assert_index_equal(uniques, exp)
def test_datelike(self):
# M8
v1 = Timestamp('20130101 09:00:00.00004')
v2 = Timestamp('20130101')
x = Series([v1, v1, v1, v2, v2, v1])
labels, uniques = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v1, v2])
tm.assert_index_equal(uniques, exp)
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([1, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v2, v1])
tm.assert_index_equal(uniques, exp)
# period
v1 = pd.Period('201302', freq='M')
v2 = pd.Period('201303', freq='M')
x = Series([v1, v1, v1, v2, v2, v1])
# periods are not 'sorted' as they are converted back into an index
labels, uniques = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.PeriodIndex([v1, v2]))
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.PeriodIndex([v1, v2]))
# GH 5986
v1 = pd.to_timedelta('1 day 1 min')
v2 = pd.to_timedelta('1 day')
x = Series([v1, v2, v1, v1, v2, v2, v1])
labels, uniques = algos.factorize(x)
exp = np.array([0, 1, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.to_timedelta([v1, v2]))
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([1, 0, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.to_timedelta([v2, v1]))
def test_factorize_nan(self):
# nan should map to na_sentinel, not reverse_indexer[na_sentinel]
# rizer.factorize should not raise an exception if na_sentinel indexes
# outside of reverse_indexer
key = np.array([1, 2, 1, np.nan], dtype='O')
rizer = ht.Factorizer(len(key))
for na_sentinel in (-1, 20):
ids = rizer.factorize(key, sort=True, na_sentinel=na_sentinel)
expected = np.array([0, 1, 0, na_sentinel], dtype='int32')
assert len(set(key)) == len(set(expected))
tm.assert_numpy_array_equal(pd.isna(key),
expected == na_sentinel)
# nan still maps to na_sentinel when sort=False
key = np.array([0, np.nan, 1], dtype='O')
na_sentinel = -1
# TODO(wesm): unused?
ids = rizer.factorize(key, sort=False, na_sentinel=na_sentinel) # noqa
expected = np.array([2, -1, 0], dtype='int32')
assert len(set(key)) == len(set(expected))
tm.assert_numpy_array_equal(pd.isna(key), expected == na_sentinel)
@pytest.mark.parametrize("data,expected_label,expected_level", [
(
[(1, 1), (1, 2), (0, 0), (1, 2), 'nonsense'],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), 'nonsense']
),
(
[(1, 1), (1, 2), (0, 0), (1, 2), (1, 2, 3)],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), (1, 2, 3)]
),
(
[(1, 1), (1, 2), (0, 0), (1, 2)],
[0, 1, 2, 1],
[(1, 1), (1, 2), (0, 0)]
)
])
def test_factorize_tuple_list(self, data, expected_label, expected_level):
# GH9454
result = pd.factorize(data)
tm.assert_numpy_array_equal(result[0],
np.array(expected_label, dtype=np.intp))
expected_level_array = com._asarray_tuplesafe(expected_level,
dtype=object)
tm.assert_numpy_array_equal(result[1], expected_level_array)
def test_complex_sorting(self):
# gh 12666 - check no segfault
# Test not valid numpy versions older than 1.11
if pd._np_version_under1p11:
pytest.skip("Test valid only for numpy 1.11+")
x17 = np.array([complex(i) for i in range(17)], dtype=object)
pytest.raises(TypeError, algos.factorize, x17[::-1], sort=True)
def test_uint64_factorize(self):
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_uniques = np.array([2**63, 1], dtype=np.uint64)
labels, uniques = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(uniques, exp_uniques)
data = np.array([2**63, -1, 2**63], dtype=object)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_uniques = np.array([2**63, -1], dtype=object)
labels, uniques = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(uniques, exp_uniques)
def test_deprecate_order(self):
# gh 19727 - check warning is raised for deprecated keyword, order.
# Test not valid once order keyword is removed.
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
with tm.assert_produces_warning(expected_warning=FutureWarning):
algos.factorize(data, order=True)
with tm.assert_produces_warning(False):
algos.factorize(data)
@pytest.mark.parametrize('data', [
np.array([0, 1, 0], dtype='u8'),
np.array([-2**63, 1, -2**63], dtype='i8'),
np.array(['__nan__', 'foo', '__nan__'], dtype='object'),
])
def test_parametrized_factorize_na_value_default(self, data):
# arrays that include the NA default for that type, but isn't used.
l, u = algos.factorize(data)
expected_uniques = data[[0, 1]]
expected_labels = np.array([0, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_uniques)
@pytest.mark.parametrize('data, na_value', [
(np.array([0, 1, 0, 2], dtype='u8'), 0),
(np.array([1, 0, 1, 2], dtype='u8'), 1),
(np.array([-2**63, 1, -2**63, 0], dtype='i8'), -2**63),
(np.array([1, -2**63, 1, 0], dtype='i8'), 1),
(np.array(['a', '', 'a', 'b'], dtype=object), 'a'),
(np.array([(), ('a', 1), (), ('a', 2)], dtype=object), ()),
(np.array([('a', 1), (), ('a', 1), ('a', 2)], dtype=object),
('a', 1)),
])
def test_parametrized_factorize_na_value(self, data, na_value):
l, u = algos._factorize_array(data, na_value=na_value)
expected_uniques = data[[1, 3]]
expected_labels = np.array([-1, 0, -1, 1], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_uniques)
class TestUnique(object):
def test_ints(self):
arr = np.random.randint(0, 100, size=50)
result = algos.unique(arr)
assert isinstance(result, np.ndarray)
def test_objects(self):
arr = np.random.randint(0, 100, size=50).astype('O')
result = algos.unique(arr)
assert isinstance(result, np.ndarray)
def test_object_refcount_bug(self):
lst = ['A', 'B', 'C', 'D', 'E']
for i in range(1000):
len(algos.unique(lst))
def test_on_index_object(self):
mindex = pd.MultiIndex.from_arrays([np.arange(5).repeat(5), np.tile(
np.arange(5), 5)])
expected = mindex.values
expected.sort()
mindex = mindex.repeat(2)
result = pd.unique(mindex)
result.sort()
tm.assert_almost_equal(result, expected)
def test_datetime64_dtype_array_returned(self):
# GH 9431
expected = np_array_datetime64_compat(
['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'],
dtype='M8[ns]')
dt_index = pd.to_datetime(['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'])
result = algos.unique(dt_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Series(dt_index)
result = algos.unique(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.unique(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_timedelta64_dtype_array_returned(self):
# GH 9431
expected = np.array([31200, 45678, 10000], dtype='m8[ns]')
td_index = pd.to_timedelta([31200, 45678, 31200, 10000, 45678])
result = algos.unique(td_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Series(td_index)
result = algos.unique(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.unique(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_uint64_overflow(self):
s = Series([1, 2, 2**63, 2**63], dtype=np.uint64)
exp = np.array([1, 2, 2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(algos.unique(s), exp)
def test_nan_in_object_array(self):
l = ['a', np.nan, 'c', 'c']
result = pd.unique(l)
expected = np.array(['a', np.nan, 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_categorical(self):
# we are expecting to return in the order
# of appearance
expected = Categorical(list('bac'), categories=list('bac'))
# we are expecting to return in the order
# of the categories
expected_o = Categorical(
list('bac'), categories=list('abc'), ordered=True)
# GH 15939
c = Categorical(list('baabc'))
result = c.unique()
tm.assert_categorical_equal(result, expected)
result = algos.unique(c)
tm.assert_categorical_equal(result, expected)
c = Categorical(list('baabc'), ordered=True)
result = c.unique()
tm.assert_categorical_equal(result, expected_o)
result = algos.unique(c)
tm.assert_categorical_equal(result, expected_o)
# Series of categorical dtype
s = Series(Categorical(list('baabc')), name='foo')
result = s.unique()
tm.assert_categorical_equal(result, expected)
result = pd.unique(s)
tm.assert_categorical_equal(result, expected)
# CI -> return CI
ci = CategoricalIndex(Categorical(list('baabc'),
categories=list('bac')))
expected = CategoricalIndex(expected)
result = ci.unique()
tm.assert_index_equal(result, expected)
result = pd.unique(ci)
tm.assert_index_equal(result, expected)
def test_datetime64tz_aware(self):
# GH 15939
result = Series(
Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])).unique()
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]).unique()
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
result = pd.unique(
Series(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])))
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = pd.unique(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
def test_order_of_appearance(self):
# 9346
# light testing of guarantee of order of appearance
# these also are the doc-examples
result = pd.unique(Series([2, 1, 3, 3]))
tm.assert_numpy_array_equal(result,
np.array([2, 1, 3], dtype='int64'))
result = pd.unique(Series([2] + [1] * 5))
tm.assert_numpy_array_equal(result,
np.array([2, 1], dtype='int64'))
result = pd.unique(Series([Timestamp('20160101'),
Timestamp('20160101')]))
expected = np.array(['2016-01-01T00:00:00.000000000'],
dtype='datetime64[ns]')
tm.assert_numpy_array_equal(result, expected)
result = pd.unique(Index(
[Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]',
freq=None)
tm.assert_index_equal(result, expected)
result = pd.unique(list('aabc'))
expected = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = pd.unique(Series(Categorical(list('aabc'))))
expected = Categorical(list('abc'))
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize("arg ,expected", [
(('1', '1', '2'), np.array(['1', '2'], dtype=object)),
(('foo',), np.array(['foo'], dtype=object))
])
def test_tuple_with_strings(self, arg, expected):
# see GH 17108
result = pd.unique(arg)
tm.assert_numpy_array_equal(result, expected)
class TestIsin(object):
def test_invalid(self):
pytest.raises(TypeError, lambda: algos.isin(1, 1))
pytest.raises(TypeError, lambda: algos.isin(1, [1]))
pytest.raises(TypeError, lambda: algos.isin([1], 1))
def test_basic(self):
result = algos.isin([1, 2], [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(np.array([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series([1, 2]), Series([1]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series([1, 2]), set([1]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(['a', 'b'], ['a'])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series(['a', 'b']), Series(['a']))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series(['a', 'b']), set(['a']))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(['a', 'b'], [1])
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
def test_i8(self):
arr = pd.date_range('20130101', periods=3).values
result = algos.isin(arr, [arr[0]])
expected = np.array([True, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(arr, arr[0:2])
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(arr, set(arr[0:2]))
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
arr = pd.timedelta_range('1 day', periods=3).values
result = algos.isin(arr, [arr[0]])
expected = np.array([True, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(arr, arr[0:2])
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(arr, set(arr[0:2]))
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
def test_large(self):
s = pd.date_range('20000101', periods=2000000, freq='s').values
result = algos.isin(s, s[0:2])
expected = np.zeros(len(s), dtype=bool)
expected[0] = True
expected[1] = True
tm.assert_numpy_array_equal(result, expected)
def test_categorical_from_codes(self):
# GH 16639
vals = np.array([0, 1, 2, 0])
cats = ['a', 'b', 'c']
Sd = Series(Categorical(1).from_codes(vals, cats))
St = Series(Categorical(1).from_codes(np.array([0, 1]), cats))
expected = np.array([True, True, False, True])
result = algos.isin(Sd, St)
tm.assert_numpy_array_equal(expected, result)
@pytest.mark.parametrize("empty", [[], Series(), np.array([])])
def test_empty(self, empty):
# see gh-16991
vals = Index(["a", "b"])
expected = np.array([False, False])
result = algos.isin(vals, empty)
tm.assert_numpy_array_equal(expected, result)
class TestValueCounts(object):
def test_value_counts(self):
np.random.seed(1234)
from pandas.core.reshape.tile import cut
arr = np.random.randn(4)
factor = cut(arr, 4)
# assert isinstance(factor, n)
result = algos.value_counts(factor)
breaks = [-1.194, -0.535, 0.121, 0.777, 1.433]
index = IntervalIndex.from_breaks(breaks).astype(CDT(ordered=True))
expected = Series([1, 1, 1, 1], index=index)
tm.assert_series_equal(result.sort_index(), expected.sort_index())
def test_value_counts_bins(self):
s = [1, 2, 3, 4]
result = algos.value_counts(s, bins=1)
expected = Series([4],
index=IntervalIndex.from_tuples([(0.996, 4.0)]))
tm.assert_series_equal(result, expected)
result = algos.value_counts(s, bins=2, sort=False)
expected = Series([2, 2],
index=IntervalIndex.from_tuples([(0.996, 2.5),
(2.5, 4.0)]))
tm.assert_series_equal(result, expected)
def test_value_counts_dtypes(self):
result = algos.value_counts([1, 1.])
assert len(result) == 1
result = algos.value_counts([1, 1.], bins=1)
assert len(result) == 1
result = algos.value_counts(Series([1, 1., '1'])) # object
assert len(result) == 2
pytest.raises(TypeError, lambda s: algos.value_counts(s, bins=1),
['1', 1])
def test_value_counts_nat(self):
td = Series([np.timedelta64(10000), pd.NaT], dtype='timedelta64[ns]')
dt = pd.to_datetime(['NaT', '2014-01-01'])
for s in [td, dt]:
vc = algos.value_counts(s)
vc_with_na = algos.value_counts(s, dropna=False)
assert len(vc) == 1
assert len(vc_with_na) == 2
exp_dt = Series({Timestamp('2014-01-01 00:00:00'): 1})
tm.assert_series_equal(algos.value_counts(dt), exp_dt)
# TODO same for (timedelta)
def test_value_counts_datetime_outofbounds(self):
# GH 13663
s = Series([datetime(3000, 1, 1), datetime(5000, 1, 1),
datetime(5000, 1, 1), datetime(6000, 1, 1),
datetime(3000, 1, 1), datetime(3000, 1, 1)])
res = s.value_counts()
exp_index = Index([datetime(3000, 1, 1), datetime(5000, 1, 1),
datetime(6000, 1, 1)], dtype=object)
exp = Series([3, 2, 1], index=exp_index)
tm.assert_series_equal(res, exp)
# GH 12424
res = pd.to_datetime(Series(['2362-01-01', np.nan]),
errors='ignore')
exp = Series(['2362-01-01', np.nan], dtype=object)
tm.assert_series_equal(res, exp)
def test_categorical(self):
s = Series(Categorical(list('aaabbc')))
result = s.value_counts()
expected = Series([3, 2, 1], index=CategoricalIndex(['a', 'b', 'c']))
tm.assert_series_equal(result, expected, check_index_type=True)
# preserve order?
s = s.cat.as_ordered()
result = s.value_counts()
expected.index = expected.index.as_ordered()
tm.assert_series_equal(result, expected, check_index_type=True)
def test_categorical_nans(self):
s = Series(Categorical(list('aaaaabbbcc'))) # 4,3,2,1 (nan)
s.iloc[1] = np.nan
result = s.value_counts()
expected = Series([4, 3, 2], index=CategoricalIndex(
['a', 'b', 'c'], categories=['a', 'b', 'c']))
tm.assert_series_equal(result, expected, check_index_type=True)
result = s.value_counts(dropna=False)
expected = Series([
4, 3, 2, 1
], index=CategoricalIndex(['a', 'b', 'c', np.nan]))
tm.assert_series_equal(result, expected, check_index_type=True)
# out of order
s = Series(Categorical(
list('aaaaabbbcc'), ordered=True, categories=['b', 'a', 'c']))
s.iloc[1] = np.nan
result = s.value_counts()
expected = Series([4, 3, 2], index=CategoricalIndex(
['a', 'b', 'c'], categories=['b', 'a', 'c'], ordered=True))
tm.assert_series_equal(result, expected, check_index_type=True)
result = s.value_counts(dropna=False)
expected = Series([4, 3, 2, 1], index=CategoricalIndex(
['a', 'b', 'c', np.nan], categories=['b', 'a', 'c'], ordered=True))
tm.assert_series_equal(result, expected, check_index_type=True)
def test_categorical_zeroes(self):
# keep the `d` category with 0
s = Series(Categorical(
list('bbbaac'), categories=list('abcd'), ordered=True))
result = s.value_counts()
expected = Series([3, 2, 1, 0], index=Categorical(
['b', 'a', 'c', 'd'], categories=list('abcd'), ordered=True))
tm.assert_series_equal(result, expected, check_index_type=True)
def test_dropna(self):
# https://github.com/pandas-dev/pandas/issues/9443#issuecomment-73719328
tm.assert_series_equal(
Series([True, True, False]).value_counts(dropna=True),
Series([2, 1], index=[True, False]))
tm.assert_series_equal(
Series([True, True, False]).value_counts(dropna=False),
Series([2, 1], index=[True, False]))
tm.assert_series_equal(
Series([True, True, False, None]).value_counts(dropna=True),
Series([2, 1], index=[True, False]))
tm.assert_series_equal(
Series([True, True, False, None]).value_counts(dropna=False),
Series([2, 1, 1], index=[True, False, np.nan]))
tm.assert_series_equal(
Series([10.3, 5., 5.]).value_counts(dropna=True),
Series([2, 1], index=[5., 10.3]))
tm.assert_series_equal(
Series([10.3, 5., 5.]).value_counts(dropna=False),
Series([2, 1], index=[5., 10.3]))
tm.assert_series_equal(
Series([10.3, 5., 5., None]).value_counts(dropna=True),
Series([2, 1], index=[5., 10.3]))
# 32-bit linux has a different ordering
if not compat.is_platform_32bit():
result = Series([10.3, 5., 5., None]).value_counts(dropna=False)
expected = Series([2, 1, 1], index=[5., 10.3, np.nan])
tm.assert_series_equal(result, expected)
def test_value_counts_normalized(self):
# GH12558
s = Series([1, 2, np.nan, np.nan, np.nan])
dtypes = (np.float64, np.object, 'M8[ns]')
for t in dtypes:
s_typed = s.astype(t)
result = s_typed.value_counts(normalize=True, dropna=False)
expected = Series([0.6, 0.2, 0.2],
index=Series([np.nan, 2.0, 1.0], dtype=t))
tm.assert_series_equal(result, expected)
result = s_typed.value_counts(normalize=True, dropna=True)
expected = Series([0.5, 0.5],
index=Series([2.0, 1.0], dtype=t))
tm.assert_series_equal(result, expected)
def test_value_counts_uint64(self):
arr = np.array([2**63], dtype=np.uint64)
expected = Series([1], index=[2**63])
result = algos.value_counts(arr)
tm.assert_series_equal(result, expected)
arr = np.array([-1, 2**63], dtype=object)
expected = Series([1, 1], index=[-1, 2**63])
result = algos.value_counts(arr)
# 32-bit linux has a different ordering
if not compat.is_platform_32bit():
tm.assert_series_equal(result, expected)
class TestDuplicated(object):
def test_duplicated_with_nas(self):
keys = np.array([0, 1, np.nan, 0, 2, np.nan], dtype=object)
result = algos.duplicated(keys)
expected = np.array([False, False, False, True, False, True])
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated(keys, keep='first')
expected = np.array([False, False, False, True, False, True])
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated(keys, keep='last')
expected = np.array([True, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated(keys, keep=False)
expected = np.array([True, False, True, True, False, True])
tm.assert_numpy_array_equal(result, expected)
keys = np.empty(8, dtype=object)
for i, t in enumerate(zip([0, 0, np.nan, np.nan] * 2,
[0, np.nan, 0, np.nan] * 2)):
keys[i] = t
result = algos.duplicated(keys)
falses = [False] * 4
trues = [True] * 4
expected = np.array(falses + trues)
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated(keys, keep='last')
expected = np.array(trues + falses)
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated(keys, keep=False)
expected = np.array(trues + trues)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize('case', [
np.array([1, 2, 1, 5, 3,
2, 4, 1, 5, 6]),
np.array([1.1, 2.2, 1.1, np.nan, 3.3,
2.2, 4.4, 1.1, np.nan, 6.6]),
pytest.param(np.array([1 + 1j, 2 + 2j, 1 + 1j, 5 + 5j, 3 + 3j,
2 + 2j, 4 + 4j, 1 + 1j, 5 + 5j, 6 + 6j]),
marks=pytest.mark.xfail(reason="Complex bug. GH 16399")
),
np.array(['a', 'b', 'a', 'e', 'c',
'b', 'd', 'a', 'e', 'f'], dtype=object),
np.array([1, 2**63, 1, 3**5, 10, 2**63, 39, 1, 3**5, 7],
dtype=np.uint64),
])
def test_numeric_object_likes(self, case):
exp_first = np.array([False, False, True, False, False,
True, False, True, True, False])
exp_last = np.array([True, True, True, True, False,
False, False, False, False, False])
exp_false = exp_first | exp_last
res_first = algos.duplicated(case, keep='first')
tm.assert_numpy_array_equal(res_first, exp_first)
res_last = algos.duplicated(case, keep='last')
tm.assert_numpy_array_equal(res_last, exp_last)
res_false = | algos.duplicated(case, keep=False) | pandas.core.algorithms.duplicated |
from xml.etree import ElementTree
from ..windows import BaseWindow
from ..utils.authorization import Authorization
#from ..utils.resources import Resources
from urllib.parse import urlparse
from base64 import b16encode, b64encode
from esppy.espapi.eventsources import EventSources
import pandas as pd
import esppy.espapi.tools as tools
import esppy.espapi.codec as codec
import threading
import logging
import esppy
from esppy.websocket import createWebSocket
import json
import time
import ssl
import six
import re
import os
class Connection(tools.Options):
def __init__(self,esp,**kwargs):
tools.Options.__init__(self,**kwargs)
self._esp = esp
url = urlparse(self._esp.session.conn_url)
self._secure = False
if url[0] == "https":
self._secure = True
s = url[1].split(":")
self._host = s[0]
if len(s) == 2:
self._port = s[1]
elif self._secure:
self._port = 443
else:
self._port = 80
self._path = url[2]
self._websocket = None
self._handshakeComplete = False
self._headers = None
self._authorization = None
def start(self,readyCb = None):
if (self.isConnected):
return
self.clear()
url = self.getUrl()
if (url == None):
raise Exception("invalid url")
headers = []
auth = Authorization.getInstance(self._esp.session)
ws4py = (self.getOpt("websockets","") != "websocket_client")
if auth.isEnabled:
headers.append(("Authorization",auth.authorization))
ws = createWebSocket(url,self._esp.session,on_message=self.on_message,on_data=self.on_data,on_error=self.on_error,on_open=self.on_open,on_close=self.on_close,headers=headers,ws4py=ws4py)
ws.connect()
def stop(self):
if (self.isConnected):
self.clear()
return(True)
return(False)
def restart(self):
self.clear()
self.start()
def send(self,data):
if self._websocket != None:
self._websocket.send(str(data))
def sendBinary(self,o):
if self._websocket != None:
encoder = codec.JsonEncoder(o)
self._websocket.sendBinary(encoder.data)
def getUrl(self):
return(None)
def message(self,message):
if (self.isHandshakeComplete):
return
name = ""
value = None
for i in range(0,len(message)):
c = message[i]
if (c == '\n'):
if (len(name) == 0):
break
if (self._headers == None):
self._headers = {}
if (value != None):
self._headers[name] = value.strip()
else:
self._headers[name] = ""
name = ""
value = None
elif (value != None):
value += c
elif (c == ':'):
value = ""
else:
name += c
status = self.getHeader("status")
if (status != None):
value = int(status)
if (value == 200):
self._handshakeComplete = True
self.handshakeComplete()
if (tools.supports(self._delegate,"connected")):
self._delegate.connected(self)
elif (value == 401):
if (self._authorization != None):
self._websocket.send(self._authorization)
elif (tools.supports(self._delegate,"authenticate")):
scheme = self.getHeader("www-authenticate")
self._delegate.authenticate(self,scheme)
def close(self):
pass
def error(self):
pass
def closed(self):
pass
def on_open(self,ws):
self._websocket = ws
def on_close(self,ws,code,reason):
self.clear()
self.closed()
def on_error(self,ws,error):
self.clear()
self.error()
def on_message(self,ws,message):
self.message(message)
def on_data(self,ws,data):
self.data(data)
def setAuthorization(self,value):
self._authorization = value
if self.isConnected and self.isHandshakeComplete == False:
self._websocket.send(self._authorization)
def clear(self):
if (self._websocket != None):
self._websocket.close()
self._websocket = None
self._handshakeComplete = False
self._headers = None
def getHeader(self,name):
value = None
if (self._headers != None):
if name in self._headers:
value = self._headers[name]
return(value)
def getHost(self):
return(self._host)
def getPort(self):
return(self._port)
def getProtocol(self):
if self._secure:
return("wss")
else:
return("ws")
def getHttpProtocol(self):
if self._secure:
return("https")
else:
return("http")
def isSecure(self):
return(self._secure)
def handshakeComplete(self):
pass
@property
def isConnected(self):
return(self._websocket != None)
@property
def isHandshakeComplete(self):
return(self._handshakeComplete)
class ServerConnection(Connection):
_windowClasses = {
"source":"input",
"filter":"transformation",
"aggregate":"transformation",
"compute":"transformation",
"union":"transformation",
"join":"transformation",
"copy":"transformation",
"functional":"transformation",
"notification":"utility",
"pattern":"utility",
"counter":"utility",
"geofence":"utility",
"procedural":"utility",
"model-supervisor":"analytics",
"model-reader":"analytics",
"train":"analytics",
"calculate":"analytics",
"score":"analytics",
"text-context":"textanalytics",
"text-category":"textanalytics",
"text-sentiment":"textanalytics",
"text-topic":"textanalytics"
}
def __init__(self,esp,delegate,**kwargs):
Connection.__init__(self,esp,**kwargs)
self._esp = esp
self._delegates = []
if delegate != None:
tools.addTo(self._delegates,delegate)
self._delegate = delegate
self._datasources = {}
self._publishers = {}
self._stats = Stats(self)
self._log = Log(self)
self._modelDelegates = {}
self._urlPublishers = {}
self._autoReconnect = True
self._version = 6.2
def addDelegate(self,delegate):
tools.addTo(self._delegates,delegate)
if self.isHandshakeComplete and tools.supports(delegate,"ready"):
delegate.ready(self)
def removeDelegate(self,delegate):
tools.removeFrom(self._delegates,delegate)
def message(self,message):
if self.getOpt("debug",False):
logging.info(message)
if self.isHandshakeComplete == False:
Connection.message(self,message)
return
xml = None
o = None
for c in message:
if c == '{' or c == '[':
o = json.loads(str(message))
break
elif c == '<':
xml = ElementTree.fromstring(str(message))
break
if o != None:
self.processJson(o)
elif xml != None:
self.processXml(xml)
def data(self,data):
decoder = codec.JsonDecoder(data)
if decoder.data != None:
if self.getOpt("debug",False):
logging.info(decoder.data)
self.processJson(decoder.data)
def processXml(self,xml):
if xml.tag == "events" or xml.tag == "schema" or xml.tag == "info":
datasource = None
if "id" in xml.attrib:
id = xml.get("id")
if id in self._datasources:
datasource = self._datasources[id]
if datasource != None:
if xml.tag == "events":
datasource.eventsXml(xml)
elif xml.tag == "schema":
datasource.setSchemaFromXml(xml)
elif xml.tag == "info":
datasource.info(xml)
elif xml.tag == "stats":
self._stats.process(xml)
elif xml.tag == "log":
self._log.process(xml)
elif (xml.tag == "model"):
if "id" in xml.attrib:
id = xml.get("id")
if id in self._modelDelegates:
delegate = self._modelDelegates[id]
delegate.deliver(xml)
del self._modelDelegates[id]
elif (xml.tag == "url-publisher"):
if "id" in xml.attrib:
id = xml.get("id")
if id in self._urlPublishers:
publisher = self._urlPublishers[id]
if "complete" in xml.attrib:
complete = xml.get("complete")
publisher["complete"] = (complete == "true")
del self._urlPublishers[id]
else:
logging.info("GOT MSG: " + str(xml))
def processJson(self,json):
if "events" in json:
o = json["events"]
if "@id" in o:
id = o["@id"]
if id in self._datasources:
self._datasources[id].events(o)
elif "info" in json:
o = json["info"]
if "@id" in o:
id = o["@id"]
if id in self._datasources:
self._datasources[id].deliverInfo(o)
elif "schema" in json:
o = json["schema"]
id = o["@id"]
if id in self._datasources:
self._datasources[id].setSchemaFromJson(o)
elif id in self._publishers:
self._publishers[id].setSchemaFromJson(o)
def getUrl(self):
url = ""
url += self.getProtocol()
url += "://"
url += self.getHost()
url += ":"
url += str(self.getPort())
if self._path != None:
url += self._path
else:
url += "/"
url += "eventStreamProcessing/v1/connect"
if self._esp.accessToken is not None:
url += "?access_token=" + self._esp.accessToken
return(url)
def getEventCollection(self,path,**kwargs):
ec = EventCollection(self,path,**kwargs)
self._datasources[ec._id] = ec
if self.isHandshakeComplete:
ec.open()
return(ec)
def getEventStream(self,path,**kwargs):
es = EventStream(self,path,**kwargs)
self._datasources[es._id] = es
if self.isHandshakeComplete:
es.open()
return(es)
def getPublisher(self,path,**kwargs):
publisher = Publisher(self,path,**kwargs)
self._publishers[publisher._id] = publisher
if self.isHandshakeComplete:
publisher.open()
return(publisher)
def publishUrl(self,path,url,**kwargs):
opts = tools.Options(**kwargs)
blocksize = opts.getOpt("blocksize",1)
wait = opts.getOpt("wait",False)
id = tools.guid()
json = {"url-publisher":{}}
o = json["url-publisher"]
o["id"] = id
o["window"] = path
o["url"] = url
o["blocksize"] = blocksize
publisher = {"complete":False}
self._urlPublishers[id] = publisher
self.send(json)
if wait:
while publisher["complete"] == False:
time.sleep(1)
def publishDataFrame(self,path,df,**kwargs):
opts = tools.Options(**kwargs)
size = opts.getOpt("size",100)
blocksize = opts.getOpt("blocksize",1)
id = tools.guid()
json = {"publisher":{}}
request = json["publisher"]
request["id"] = id
request["action"] = "set"
request["window"] = path
self.send(json)
request["action"] = "publish"
data = []
for index, row in df.iterrows():
o = {}
for col in df.columns:
o[col] = row[col]
data.append(o)
if len(data) == size:
request["data"] = data
self.send(json)
data = []
if len(data) > 0:
request["data"] = data
self.send(request)
request["data"] = None
request["action"] = "delete"
self.send(json)
def publishList(self,path,l,**kwargs):
opts = tools.Options(**kwargs)
size = opts.getOpt("size",100)
blocksize = opts.getOpt("blocksize",1)
id = tools.guid()
json = {"publisher":{}}
request = json["publisher"]
request["id"] = id
request["action"] = "set"
request["window"] = path
self.send(json)
request["action"] = "publish"
data = []
for i in l:
o = {}
for key,value in i.items():
o[key] = i[key]
data.append(o)
if len(data) == size:
request["data"] = data
self.send(json)
data = []
if len(data) > 0:
request["data"] = data
self.send(json)
request["data"] = None
request["action"] = "delete"
self.send(json)
def getStats(self):
return(self._stats)
def getLog(self):
return(self._log)
def loadModel(self,delegate):
if tools.supports(delegate,"modelLoaded") == False:
raise Exception("The model delegate must implement the modelLoaded method")
id = tools.guid()
self._modelDelegates[id] = ModelDelegate(self,delegate)
json = {"model":{}}
o = json["model"]
o["id"] = id
o["schema"] = True
o["index"] = True
o["xml"] = True
self.send(json)
def loadProjectFromFile(self,name,filename,**kwargs):
with open(filename) as f:
contents = f.read().encode("utf-8")
data = b64encode(contents)
json = {"project":{}}
o = json["project"]
o["id"] = tools.guid()
o["name"] = name
o["action"] = "load"
opts = tools.Options(**kwargs)
parms = {}
for k,v in opts.items():
parms[k] = str(v)
o["parms"] = parms
o["data"] = data.decode("utf-8")
self.send(json)
def loadRouterFromFile(self,name,filename,**kwargs):
with open(filename) as f:
contents = f.read().encode("utf-8")
data = b64encode(contents)
json = {"router":{}}
o = json["router"]
o["id"] = tools.guid()
o["name"] = name
o["action"] = "load"
opts = tools.Options(**kwargs)
parms = {}
for k,v in opts.items():
parms[k] = str(v)
o["parms"] = parms
o["data"] = data.decode("utf-8")
self.send(json)
def handshakeComplete(self):
version = self.getHeader("version")
if version == None:
self.version = 6.2
else:
self.version = version
for c in self._datasources.values():
c.open()
for p in self._publishers.values():
p.open()
if len(self._log._delegates) > 0:
self._log.start()
if len(self._stats._delegates) > 0:
self._stats.set()
for delegate in self._delegates:
if tools.supports(delegate,"connected"):
delegate.connected(self)
if tools.supports(delegate,"ready"):
delegate.ready(self)
def closed(self):
for c in self._datasources.values():
c.clear()
if tools.supports(self._delegate,"closed"):
self._delegate.closed(self)
if self._autoReconnect:
thread = threading.Thread(target = self.reconnect)
thread.daemon = True
thread.start()
def reconnect(self):
while self.isConnected == False:
#time.sleep(1)
time.sleep(5)
try:
self.start()
except:
pass
def createEventSources(self,delegate = None):
return(EventSources(self,delegate))
@property
def version(self):
return(self._version)
@version.setter
def version(self,value):
self._version = float(value)
class Datasource(tools.Options):
def __init__(self,connection,**kwargs):
tools.Options.__init__(self,**kwargs)
self._connection = connection
self._id = tools.guid()
self._fields = None
self._keyFields = None
self._schema = Schema()
self._delegates = []
self._paused = False
self._data = None
def setSchemaFromXml(self,xml):
self._schema.fromXml(xml)
for d in self._delegates:
if tools.supports(d,"schemaSet"):
d.schemaSet(self)
def setSchemaFromJson(self,json):
self._schema.fromJson(json)
for d in self._delegates:
if tools.supports(d,"schemaSet"):
d.schemaSet(self)
def setFilter(self,value):
self.setOpt("filter",value)
self.set()
def getFilter(self):
return(self.getOpt("filter",""))
def togglePlay(self):
code = False
if self._paused:
self.play()
code = True
else:
self.pause()
return(code)
def getFields(self):
fields = None
if self._schema != None:
fields = self._schema.getFields()
return(fields)
def getKeyFields(self):
fields = None
if self._schema != None:
fields = self._schema.getKeyFields()
return(fields)
def getKeyFieldNames(self):
names = []
fields = self.getKeyFields()
if fields != None:
for f in fields:
names.append(f["name"])
return(names)
def getColumnFields(self):
fields = None
if self._schema != None:
fields = self._schema.getColumnFields()
return(fields)
def getField(self,name):
field = None
if self._schema != None:
field = self._schema.getField(name)
return(field)
def getKey(self,o):
key = ""
for f in self._schema._keyFields:
name = f["name"]
if (name in o) == False:
break
if len(key) > 0:
key += "-"
key += str(o[name])
return(key)
def getData(self):
return(self._data)
def getList(self):
if isinstance(self._data,dict):
l = []
for k,v in self._data.items():
l.append(v)
return(l)
elif isinstance(self._data,list):
return(self._data)
def getValues(self,name):
f = self._schema.getField(name)
if f == None:
return(None)
values = []
if isinstance(self._data,dict):
for key,value in self._data.items():
if name in value:
if f["isNumber"]:
values.append(float(value[name]))
else:
values.append(value[name])
elif f["isNumber"]:
values.append(0.0)
else:
values.append("")
elif isinstance(self._data,list):
for value in self._data:
if name in value:
if f["isNumber"]:
values.append(float(value[name]))
else:
values.append(value[name])
elif f["isNumber"]:
values.append(0.0)
else:
values.append("")
return(values)
def getValuesBy(self,keys,names,delimiter = "."):
keyFields = []
for s in keys:
f = self._schema.getField(s)
if f == None:
raise Exception("field " + s + " not found")
keyFields.append(f)
timeKeys = False
if len(keyFields) == 1:
if keyFields[0]["isDate"]:
timeKeys = True
elif keyFields[0]["isTime"]:
timeKeys = True
valueFields = []
for s in names:
f = self._schema.getField(s)
if f == None:
raise Exception("field " + s + " not found")
valueFields.append(f)
items = None
if isinstance(self._data,dict):
items = self._data.values()
elif isinstance(self._data,list):
items = self._data
if items == None:
raise Exception("invalid data")
data = {}
for o in items:
key = ""
for f in keyFields:
name = f["name"]
if name in o:
if len(key) > 0:
key += delimiter
key += str(o[name])
if key in data:
entry = data[key]
else:
entry = {}
for f in valueFields:
name = f["name"]
entry[name] = 0.0
data[key] = entry
for f in valueFields:
if f["isNumber"]:
name = f["name"]
entry[name] += float(o[name])
keyValues = []
values = {}
for f in valueFields:
name = f["name"]
values[name] = []
for k,v in data.items():
if timeKeys:
dt = | pd.to_datetime(k,unit="us") | pandas.to_datetime |
import numpy as np
import pandas as pd
import yaml
from tqdm import tqdm
import logging
import math
import random
import argparse
import collections
import sys
from pathlib import Path
import os
import copy
import re
from lib.constants import *
from lib.TSP import TSP
from lib.TSPObjective import TSPObjective
from lib.pheromony_policies import AntSystem, ElitismAntSystem
from lib.Ant import Ant
from lib.SelectionPolicy import SelectionPolicy
import lib.utils as utils
class AntColony:
def __init__(self, initial_pheromone, ants_rate, num_iterations, instance_name, eid,
pheromony_policy, pheromony_kwargs,
selection_policy_kwargs):
self.initial_pheromone = initial_pheromone
self.ants_rate = ants_rate
self.num_iterations = num_iterations
self.instance_name = instance_name
self.eid = eid
self.pheromony_policy = pheromony_policy
self.pheromony_kwargs = pheromony_kwargs
self.selection_policy_kwargs = selection_policy_kwargs
def run(self, distances_suffix, solution_suffix):
tsp = TSP()
tsp.load(DIRS['INPUT']+self.instance_name, distances_suffix, solution_suffix)
best_ant = None
objective = TSPObjective(tsp)
pheromones = np.ones(tsp.distances.shape,dtype=float)*self.initial_pheromone
num_vertexes = len(tsp.distances)
num_ants = int(self.ants_rate * num_vertexes)
population = [None]*num_ants
selection_policy = SelectionPolicy(costs=tsp.distances,
pheromones=pheromones,
**self.selection_policy_kwargs)
pheromony_policy = eval(self.pheromony_policy)(**self.pheromony_kwargs)
columns = ['#Generation','Best fitness global','Best fitness','Mean fitness', 'Median fitness', 'Worst fitness']
df = pd.DataFrame([],columns = columns)
df = df.set_index(columns[0])
for i in range(num_ants):
population[i] = Ant()
# count = 0
logger = logging.getLogger('default')
if logger.level <= logging.INFO:
progress_bar = tqdm
else:
progress_bar = lambda x: x
for i in progress_bar(range(1,self.num_iterations+1)):
for ant in population:
ant.set_start(int(random.uniform(0,num_vertexes-1)))
# ant.set_start(count%num_vertexes)
# count+=1
for ant in population:
selection_policy.select_path(ant, num_vertexes, objective)
pheromony_policy.update(population, pheromones)
solution_values = [ant.solution_value for ant in population]
best_local_ant = population[np.argmin(solution_values)]
# for ant in population:
# print(ant.solution)
if best_ant == None or best_local_ant.solution_value < best_ant.solution_value:
best_ant = copy.copy(best_local_ant)
# print(best_ant.solution,best_ant.solution_value)
# print(pheromones)
df.loc[i] = [f'{best_ant.solution_value:.4f}',f'{np.min(solution_values):.4f}',f'{np.mean(solution_values):.4f}',f'{np.median(solution_values):.4f}',f'{np.max(solution_values):.4f}']
logger.info(f"\n{df}")
self.save_results(df)
def __str__(self):
string=""
for k, v in self.__dict__.items():
string+=f"{k} = {v}\n"
return string
# @staticmethod
# def get_parameters_name(parameters):
# return f"{DIRS['RESULTS']}"+utils.get_parameters_name(parameters_dirs=3)+".json"
def get_name(self):
name = f"{DIRS['RESULTS']}"+utils.get_parameters_name(self.__dict__,num_dirs=3)+".json"
l = name.split('/')
for i in range(2,len(l)):
directory = '/'.join(l[:i])
logger = logging.getLogger('default')
logger.debug(directory)
Path(directory).mkdir(parents=True, exist_ok=True)
return name
def save_results(self, df):
f = open(self.get_name(),'w')
f.write(df.to_json(orient='records',lines=False))
f.close()
def load_results(self):
string = self.get_name()
# string = re.sub(r'{',r'\{',string)
# string = re.sub(r'}',r'\}',string)
# print(string)
return | pd.read_json(string) | pandas.read_json |
import pandas as pd
import pickle as pkl
from glob import glob
import numpy as np
from sklearn.metrics import roc_auc_score, accuracy_score, recall_score, precision_score, f1_score
import pandas as pd
inner_fold = 5
label_file = "/mnt/data3/pnaylor/CellularHeatmaps/outputs/label_nature.csv"
y_interest = "Residual"
def get_options():
import argparse
parser = argparse.ArgumentParser(
description='Creating training on heatmaps')
parser.add_argument('--labels', required=False,
default="/Users/naylorpeter/tmp/predict_from_umap_cell/patients/multi_class.csv",
metavar="str", type=str,
help='path to label csv file')
parser.add_argument('--y_interest', required=False,
default="RCB_class",
metavar="str", type=str,
help='tag for the variable of interest in labels')
parser.add_argument('--inner_fold', required=False,
default=5,
metavar="int", type=int,
help='number of inner folds to perform')
parser.add_argument('--filename', required=False,
default="results.csv",
metavar="str", type=str,
help='file name for val and test')
args = parser.parse_args()
return args
def main():
options = get_options()
inner_fold = options.inner_fold
label_file = options.labels
y_interest = options.y_interest
label = pd.read_csv(label_file, index_col="Biopsy")[y_interest]
list_dic = []
for f in glob("*.pkl"):
dic = pkl.load(open(f, 'rb'))
validation_predictions = [dic["{}_validation_prob".format(i)].join(label) for i in range(inner_fold)]
test_predictions = [dic["{}_test_prob".format(i)].join(label) for i in range(inner_fold)]
auc_scores = []
auc_scores_t = []
for i in range(inner_fold):
y_scores = validation_predictions[i][1]
y_true = validation_predictions[i][y_interest]
auc_scores.append(roc_auc_score(y_true, y_scores))
y_scores_t = test_predictions[i][1]
y_true_t = test_predictions[i][y_interest]
auc_scores_t.append(roc_auc_score(y_true_t, y_scores_t))
best_ind = np.argmax(auc_scores)
auc_score_best_val = auc_scores[best_ind]
auc_score_best_val_t = auc_scores_t[best_ind]
validation_predictions = | pd.concat(validation_predictions, axis=0) | pandas.concat |
from __future__ import print_function, absolute_import, unicode_literals, division
import glob
import itertools
import json
import os
from collections import OrderedDict
import pandas as pd
import numpy as np
# from amt.settings import PATH_visible_not_visible_actions_csv
def robust_decode(bs):
'''Takes a byte string as param and convert it into a unicode one.
First tries UTF8, and fallback to Latin1 if it fails'''
cr = None
try:
cr = bs.decode('utf8')
except UnicodeDecodeError:
cr = bs.decode('latin1')
return cr
def clean_action(action):
# prepare translation table for removing punctuation
if not action:
return action
action = robust_decode(action)
if "my vegetables in water instead of oil" in action:
action = "sauteing my vegetables in water instead of oil"
list_words = action.split(' ')
# remove tokens with numbers in them
action = [word for word in list_words if word.isalpha()]
action = ' '.join(action)
list_words = action.split(' ')
# remove I, you, she, he
list_words = [word.lower() for word in list_words]
action = [word for word in list_words if word not in ['you', 'just', 'i', 'I', 'she', 'he']]
action = ' '.join(action)
return action
def clean_list_actions(list_actions):
new_list_actions = []
for action in list_actions:
new_list_actions.append(clean_action(action))
return new_list_actions
def clean_actions_file(clean_visible, clean_not_visible, path_visible_not_visible_actions_csv):
df = pd.read_csv(path_visible_not_visible_actions_csv)
df.loc[df["Visible Actions"].isnull(), "Visible Actions"] = ""
list_visible = df["Visible Actions"].values.tolist()
if clean_visible:
cleaned_list_visibile = clean_list_actions(list_visible)
else:
cleaned_list_visibile = list_visible
df.loc[df["Not Visible Actions"].isnull(), "Not Visible Actions"] = ""
list_not_visible = df["Not Visible Actions"].values.tolist()
if clean_not_visible:
cleaned_list_not_visibile = clean_list_actions(list_not_visible)
else:
cleaned_list_not_visibile = list_not_visible
list_videos = df["Video_name"].values.tolist()
# create new df
dict = OrderedDict()
dict['Video_name'] = list_videos
dict['Visible Actions'] = cleaned_list_visibile
dict['Not Visible Actions'] = cleaned_list_not_visibile
df_cleaned = pd.DataFrame(dict)
df_cleaned = df_cleaned.replace(np.nan, '', regex=True)
path_new_file = "/".join(path_visible_not_visible_actions_csv.split("/")[:-1]) + "/new_clean_" + \
path_visible_not_visible_actions_csv.split("/")[-1]
df_cleaned.to_csv(path_new_file, index=False)
def clean_context_file():
path_context_csv = "/local/oignat/action_recognition_clean/data/Embeddings/context_embeddings.csv"
df = | pd.read_csv(path_context_csv) | pandas.read_csv |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests for schemas."""
# pytype: skip-file
from __future__ import absolute_import
import typing
import unittest
import future.tests.base # pylint: disable=unused-import
import numpy as np
# patches unittest.testcase to be python3 compatible
import pandas as pd
from parameterized import parameterized
from past.builtins import unicode
import apache_beam as beam
from apache_beam.coders import RowCoder
from apache_beam.coders.typecoders import registry as coders_registry
from apache_beam.dataframe import schemas
from apache_beam.dataframe import transforms
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
Simple = typing.NamedTuple(
'Simple', [('name', unicode), ('id', int), ('height', float)])
coders_registry.register_coder(Simple, RowCoder)
Animal = typing.NamedTuple(
'Animal', [('animal', unicode), ('max_speed', typing.Optional[float])])
coders_registry.register_coder(Animal, RowCoder)
def matches_df(expected):
def check_df_pcoll_equal(actual):
actual = pd.concat(actual)
sorted_actual = actual.sort_values(by=list(actual.columns)).reset_index(
drop=True)
sorted_expected = expected.sort_values(
by=list(expected.columns)).reset_index(drop=True)
if not sorted_actual.equals(sorted_expected):
raise AssertionError(
'Dataframes not equal: \n\nActual:\n%s\n\nExpected:\n%s' %
(sorted_actual, sorted_expected))
return check_df_pcoll_equal
# Test data for all supported types that can be easily tested.
# Excludes bytes because it's difficult to create a series and dataframe bytes
# dtype. For example:
# pd.Series([b'abc'], dtype=bytes).dtype != 'S'
# pd.Series([b'abc'], dtype=bytes).astype(bytes).dtype == 'S'
COLUMNS = [
([375, 24, 0, 10, 16], np.int32, 'i32'),
([375, 24, 0, 10, 16], np.int64, 'i64'),
([375, 24, None, 10, 16], pd.Int32Dtype(), 'i32_nullable'),
([375, 24, None, 10, 16], pd.Int64Dtype(), 'i64_nullable'),
([375., 24., None, 10., 16.], np.float64, 'f64'),
([375., 24., None, 10., 16.], np.float32, 'f32'),
([True, False, True, True, False], np.bool, 'bool'),
(['Falcon', 'Ostrich', None, 3.14, 0], np.object, 'any'),
([True, False, True, None, False], pd.BooleanDtype(), 'bool_nullable'),
(['Falcon', 'Ostrich', None, 'Aardvark', 'Elephant'],
pd.StringDtype(),
'strdtype'),
] # type: typing.List[typing.Tuple[typing.List[typing.Any], typing.Any, str]]
NICE_TYPES_DF = pd.DataFrame(columns=[name for _, _, name in COLUMNS])
for arr, dtype, name in COLUMNS:
NICE_TYPES_DF[name] = pd.Series(arr, dtype=dtype, name=name).astype(dtype)
NICE_TYPES_PROXY = NICE_TYPES_DF[:0]
SERIES_TESTS = [(pd.Series(arr, dtype=dtype, name=name), arr) for arr,
dtype,
name in COLUMNS]
_TEST_ARRAYS = [
arr for arr, _, _ in COLUMNS
] # type: typing.List[typing.List[typing.Any]]
DF_RESULT = list(zip(*_TEST_ARRAYS))
INDEX_DF_TESTS = [
(NICE_TYPES_DF.set_index([name for _, _, name in COLUMNS[:i]]), DF_RESULT)
for i in range(1, len(COLUMNS) + 1)
]
NOINDEX_DF_TESTS = [(NICE_TYPES_DF, DF_RESULT)]
PD_VERSION = tuple(int(n) for n in pd.__version__.split('.'))
class SchemasTest(unittest.TestCase):
def test_simple_df(self):
expected = pd.DataFrame({
'name': list(unicode(i) for i in range(5)),
'id': list(range(5)),
'height': list(float(i) for i in range(5))
},
columns=['name', 'id', 'height'])
with TestPipeline() as p:
res = (
p
| beam.Create([
Simple(name=unicode(i), id=i, height=float(i)) for i in range(5)
])
| schemas.BatchRowsAsDataFrame(min_batch_size=10, max_batch_size=10))
assert_that(res, matches_df(expected))
def test_generate_proxy(self):
expected = pd.DataFrame({
'animal': pd.Series(dtype=pd.StringDtype()),
'max_speed': | pd.Series(dtype=np.float64) | pandas.Series |
import operator
import re
import warnings
import numpy as np
import pytest
from pandas._libs.sparse import IntIndex
import pandas.util._test_decorators as td
import pandas as pd
from pandas import isna
from pandas.core.sparse.api import SparseArray, SparseDtype, SparseSeries
import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal
@pytest.fixture(params=["integer", "block"])
def kind(request):
return request.param
class TestSparseArray:
def setup_method(self, method):
self.arr_data = np.array([np.nan, np.nan, 1, 2, 3,
np.nan, 4, 5, np.nan, 6])
self.arr = SparseArray(self.arr_data)
self.zarr = SparseArray([0, 0, 1, 2, 3, 0, 4, 5, 0, 6], fill_value=0)
def test_constructor_dtype(self):
arr = | SparseArray([np.nan, 1, 2, np.nan]) | pandas.core.sparse.api.SparseArray |
from datetime import timedelta,datetime
from processor.processor import Processor as p
import pandas as pd
from tqdm import tqdm
class Backtester(object):
def __init__(self,strat):
self.strat = strat
def equity_timeseries_backtest(self,start_date,end_date,seats):
trades = []
sim = self.strat.create_sim()
sim = p.column_date_processing(sim)
params = self.strat.trading_params
blacklist = []
phase = "base"
if not params["value"]:
sim["delta"] = sim["delta"] * -1
for seat in tqdm(range(seats),desc="backtesting_seats"):
date = start_date
while date < end_date:
try:
if date.weekday() > 4:
date = date + timedelta(days=1)
else:
phase = "blacklist"
if len(blacklist) > 0:
bl = | pd.DataFrame(blacklist) | pandas.DataFrame |
from warnings import catch_warnings, simplefilter
import numpy as np
from numpy.random import randn
import pytest
import pandas as pd
from pandas import (
DataFrame, MultiIndex, Series, Timestamp, date_range, isna, notna)
from pandas.util import testing as tm
@pytest.mark.filterwarnings("ignore:\\n.ix:DeprecationWarning")
class TestMultiIndexSetItem(object):
def test_setitem_multiindex(self):
with catch_warnings(record=True):
for index_fn in ('ix', 'loc'):
def assert_equal(a, b):
assert a == b
def check(target, indexers, value, compare_fn, expected=None):
fn = getattr(target, index_fn)
fn.__setitem__(indexers, value)
result = fn.__getitem__(indexers)
if expected is None:
expected = value
compare_fn(result, expected)
# GH7190
index = MultiIndex.from_product([np.arange(0, 100),
np.arange(0, 80)],
names=['time', 'firm'])
t, n = 0, 2
df = DataFrame(np.nan, columns=['A', 'w', 'l', 'a', 'x',
'X', 'd', 'profit'],
index=index)
check(target=df, indexers=((t, n), 'X'), value=0,
compare_fn=assert_equal)
df = DataFrame(-999, columns=['A', 'w', 'l', 'a', 'x',
'X', 'd', 'profit'],
index=index)
check(target=df, indexers=((t, n), 'X'), value=1,
compare_fn=assert_equal)
df = DataFrame(columns=['A', 'w', 'l', 'a', 'x',
'X', 'd', 'profit'],
index=index)
check(target=df, indexers=((t, n), 'X'), value=2,
compare_fn=assert_equal)
# gh-7218: assigning with 0-dim arrays
df = DataFrame(-999, columns=['A', 'w', 'l', 'a', 'x',
'X', 'd', 'profit'],
index=index)
check(target=df,
indexers=((t, n), 'X'),
value=np.array(3),
compare_fn=assert_equal,
expected=3, )
# GH5206
df = DataFrame(np.arange(25).reshape(5, 5),
columns='A,B,C,D,E'.split(','), dtype=float)
df['F'] = 99
row_selection = df['A'] % 2 == 0
col_selection = ['B', 'C']
with catch_warnings(record=True):
df.ix[row_selection, col_selection] = df['F']
output = DataFrame(99., index=[0, 2, 4], columns=['B', 'C'])
with catch_warnings(record=True):
tm.assert_frame_equal(df.ix[row_selection, col_selection],
output)
check(target=df,
indexers=(row_selection, col_selection),
value=df['F'],
compare_fn=tm.assert_frame_equal,
expected=output, )
# GH11372
idx = MultiIndex.from_product([
['A', 'B', 'C'],
date_range('2015-01-01', '2015-04-01', freq='MS')])
cols = MultiIndex.from_product([
['foo', 'bar'],
date_range('2016-01-01', '2016-02-01', freq='MS')])
df = DataFrame(np.random.random((12, 4)),
index=idx, columns=cols)
subidx = MultiIndex.from_tuples(
[('A', Timestamp('2015-01-01')),
('A', Timestamp('2015-02-01'))])
subcols = MultiIndex.from_tuples(
[('foo', Timestamp('2016-01-01')),
('foo', Timestamp('2016-02-01'))])
vals = DataFrame(np.random.random((2, 2)),
index=subidx, columns=subcols)
check(target=df,
indexers=(subidx, subcols),
value=vals,
compare_fn=tm.assert_frame_equal, )
# set all columns
vals = DataFrame(
np.random.random((2, 4)), index=subidx, columns=cols)
check(target=df,
indexers=(subidx, slice(None, None, None)),
value=vals,
compare_fn=tm.assert_frame_equal, )
# identity
copy = df.copy()
check(target=df, indexers=(df.index, df.columns), value=df,
compare_fn=tm.assert_frame_equal, expected=copy)
def test_multiindex_setitem(self):
# GH 3738
# setting with a multi-index right hand side
arrays = [np.array(['bar', 'bar', 'baz', 'qux', 'qux', 'bar']),
np.array(['one', 'two', 'one', 'one', 'two', 'one']),
np.arange(0, 6, 1)]
df_orig = DataFrame(np.random.randn(6, 3), index=arrays,
columns=['A', 'B', 'C']).sort_index()
expected = df_orig.loc[['bar']] * 2
df = df_orig.copy()
df.loc[['bar']] *= 2
tm.assert_frame_equal(df.loc[['bar']], expected)
# raise because these have differing levels
with pytest.raises(TypeError):
df.loc['bar'] *= 2
# from SO
# http://stackoverflow.com/questions/24572040/pandas-access-the-level-of-multiindex-for-inplace-operation
df_orig = DataFrame.from_dict({'price': {
('DE', 'Coal', 'Stock'): 2,
('DE', 'Gas', 'Stock'): 4,
('DE', 'Elec', 'Demand'): 1,
('FR', 'Gas', 'Stock'): 5,
('FR', 'Solar', 'SupIm'): 0,
('FR', 'Wind', 'SupIm'): 0
}})
df_orig.index = MultiIndex.from_tuples(df_orig.index,
names=['Sit', 'Com', 'Type'])
expected = df_orig.copy()
expected.iloc[[0, 2, 3]] *= 2
idx = pd.IndexSlice
df = df_orig.copy()
df.loc[idx[:, :, 'Stock'], :] *= 2
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.loc[idx[:, :, 'Stock'], 'price'] *= 2
tm.assert_frame_equal(df, expected)
def test_multiindex_assignment(self):
# GH3777 part 2
# mixed dtype
df = DataFrame(np.random.randint(5, 10, size=9).reshape(3, 3),
columns=list('abc'),
index=[[4, 4, 8], [8, 10, 12]])
df['d'] = np.nan
arr = np.array([0., 1.])
with catch_warnings(record=True):
df.ix[4, 'd'] = arr
tm.assert_series_equal(df.ix[4, 'd'],
Series(arr, index=[8, 10], name='d'))
# single dtype
df = DataFrame(np.random.randint(5, 10, size=9).reshape(3, 3),
columns=list('abc'),
index=[[4, 4, 8], [8, 10, 12]])
with catch_warnings(record=True):
df.ix[4, 'c'] = arr
exp = Series(arr, index=[8, 10], name='c', dtype='float64')
tm.assert_series_equal(df.ix[4, 'c'], exp)
# scalar ok
with catch_warnings(record=True):
df.ix[4, 'c'] = 10
exp = Series(10, index=[8, 10], name='c', dtype='float64')
tm.assert_series_equal(df.ix[4, 'c'], exp)
# invalid assignments
with pytest.raises(ValueError):
with catch_warnings(record=True):
df.ix[4, 'c'] = [0, 1, 2, 3]
with pytest.raises(ValueError):
with catch_warnings(record=True):
df.ix[4, 'c'] = [0]
# groupby example
NUM_ROWS = 100
NUM_COLS = 10
col_names = ['A' + num for num in
map(str, np.arange(NUM_COLS).tolist())]
index_cols = col_names[:5]
df = DataFrame(np.random.randint(5, size=(NUM_ROWS, NUM_COLS)),
dtype=np.int64, columns=col_names)
df = df.set_index(index_cols).sort_index()
grp = df.groupby(level=index_cols[:4])
df['new_col'] = np.nan
f_index = np.arange(5)
def f(name, df2):
return Series(np.arange(df2.shape[0]),
name=df2.index.values[0]).reindex(f_index)
# TODO(wesm): unused?
# new_df = pd.concat([f(name, df2) for name, df2 in grp], axis=1).T
# we are actually operating on a copy here
# but in this case, that's ok
for name, df2 in grp:
new_vals = np.arange(df2.shape[0])
with catch_warnings(record=True):
df.ix[name, 'new_col'] = new_vals
def test_series_setitem(
self, multiindex_year_month_day_dataframe_random_data):
ymd = multiindex_year_month_day_dataframe_random_data
s = ymd['A']
s[2000, 3] = np.nan
assert isna(s.values[42:65]).all()
assert notna(s.values[:42]).all()
assert notna(s.values[65:]).all()
s[2000, 3, 10] = np.nan
assert isna(s[49])
def test_frame_getitem_setitem_boolean(
self, multiindex_dataframe_random_data):
frame = multiindex_dataframe_random_data
df = frame.T.copy()
values = df.values
result = df[df > 0]
expected = df.where(df > 0)
tm.assert_frame_equal(result, expected)
df[df > 0] = 5
values[values > 0] = 5
tm.assert_almost_equal(df.values, values)
df[df == 5] = 0
values[values == 5] = 0
tm.assert_almost_equal(df.values, values)
# a df that needs alignment first
df[df[:-1] < 0] = 2
np.putmask(values[:-1], values[:-1] < 0, 2)
tm.assert_almost_equal(df.values, values)
with pytest.raises(TypeError, match='boolean values only'):
df[df * 0] = 2
def test_frame_getitem_setitem_multislice(self):
levels = [['t1', 't2'], ['a', 'b', 'c']]
codes = [[0, 0, 0, 1, 1], [0, 1, 2, 0, 1]]
midx = MultiIndex(codes=codes, levels=levels, names=[None, 'id'])
df = DataFrame({'value': [1, 2, 3, 7, 8]}, index=midx)
result = df.loc[:, 'value']
tm.assert_series_equal(df['value'], result)
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
result = df.ix[:, 'value']
tm.assert_series_equal(df['value'], result)
result = df.loc[df.index[1:3], 'value']
tm.assert_series_equal(df['value'][1:3], result)
result = df.loc[:, :]
tm.assert_frame_equal(df, result)
result = df
df.loc[:, 'value'] = 10
result['value'] = 10
tm.assert_frame_equal(df, result)
df.loc[:, :] = 10
| tm.assert_frame_equal(df, result) | pandas.util.testing.assert_frame_equal |
###############################################################################
# Building the Model
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.externals import joblib
# import pickle
# opening the databases
train_df = | pd.read_csv('data/train_data_modified.csv') | pandas.read_csv |
# <NAME> (Ausar Geophysical)
# 2017/01/31
import numpy as np
import scipy.signal
import pandas as pd
from sklearn import preprocessing, metrics
from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor
from sklearn.base import clone
from matplotlib import pyplot as plt
import scipy.optimize
from scipy.optimize import lsq_linear
import fastdtw
from scipy.sparse import csr_matrix
from scipy.sparse.linalg import lsqr
from scipy.signal import medfilt, gaussian
import xgboost as xgb
from xgboost.sklearn import XGBClassifier, XGBRegressor
eps = 1e-5
def load_data():
train_data = pd.read_csv('../facies_vectors.csv');
train_data = train_data[train_data['Well Name'] != 'Recruit F9'].reset_index(drop=True)
validation_data = pd.read_csv('../validation_data_nofacies.csv')
return | pd.concat([train_data, validation_data]) | pandas.concat |
import pandas as pd
import numpy as np
import time
import datetime
from keras.models import Sequential
from keras.layers import Dense
from keras.layers.core import Dropout
from keras.utils import to_categorical
from keras.regularizers import l2
from keras.models import load_model
class CompleteCode:
def __init__(self):
pass
def preprocess_data(self,csvfile):
"""Load csv file from the current directory
Should provide the fullname of file.
For example if test data is in file test.csv then argument should be passed
as test.csv
Returns: Pandas dataframe """
data = | pd.read_csv(csvfile) | pandas.read_csv |
"""
๊ตญํ ๊ตํต๋ถ Open API
molit(Ministry of Land, Infrastructure and Transport)
1. Transaction ํด๋์ค: ๋ถ๋์ฐ ์ค๊ฑฐ๋๊ฐ ์กฐํ
- AptTrade: ์ํํธ๋งค๋งค ์ค๊ฑฐ๋์๋ฃ ์กฐํ
- AptTradeDetail: ์ํํธ๋งค๋งค ์ค๊ฑฐ๋ ์์ธ ์๋ฃ ์กฐํ
- AptRent: ์ํํธ ์ ์์ธ ์๋ฃ ์กฐํ
- AptOwnership: ์ํํธ ๋ถ์๊ถ์ ๋งค ์ ๊ณ ์๋ฃ ์กฐํ
- OffiTrade: ์คํผ์คํ
๋งค๋งค ์ ๊ณ ์กฐํ
- OffiRent: ์คํผ์คํ
์ ์์ธ ์ ๊ณ ์กฐํ
- RHTrade: ์ฐ๋ฆฝ๋ค์ธ๋ ๋งค๋งค ์ค๊ฑฐ๋์๋ฃ ์กฐํ
- RHRent: ์ฐ๋ฆฝ๋ค์ธ๋ ์ ์์ธ ์ค๊ฑฐ๋์๋ฃ ์กฐํ
- DHTrade: ๋จ๋
/๋ค๊ฐ๊ตฌ ๋งค๋งค ์ค๊ฑฐ๋ ์กฐํ
- DHRent: ๋จ๋
/๋ค๊ฐ๊ตฌ ์ ์์ธ ์๋ฃ ์กฐํ
- LandTrade: ํ ์ง ๋งค๋งค ์ ๊ณ ์กฐํ
- BizTrade: ์์
์
๋ฌด์ฉ ๋ถ๋์ฐ ๋งค๋งค ์ ๊ณ ์๋ฃ ์กฐํ
2. Building ํด๋์ค: ๊ฑด์ถ๋ฌผ๋์ฅ์ ๋ณด ์๋น์ค
01 ๊ฑด์ถ๋ฌผ๋์ฅ ๊ธฐ๋ณธ๊ฐ์ ์กฐํ: getBrBasisOulnInfo
02 ๊ฑด์ถ๋ฌผ๋์ฅ ์ด๊ดํ์ ๋ถ ์กฐํ: getBrRecapTitleInfo
03 ๊ฑด์ถ๋ฌผ๋์ฅ ํ์ ๋ถ ์กฐํ: getBrTitleInfo
04 ๊ฑด์ถ๋ฌผ๋์ฅ ์ธต๋ณ๊ฐ์ ์กฐํ: getBrFlrOulnInfo
05 ๊ฑด์ถ๋ฌผ๋์ฅ ๋ถ์์ง๋ฒ ์กฐํ: getBrAtchJibunInfo
06 ๊ฑด์ถ๋ฌผ๋์ฅ ์ ์ ๊ณต์ฉ๋ฉด์ ์กฐํ: getBrExposPubuseAreaInfo
07 ๊ฑด์ถ๋ฌผ๋์ฅ ์ค์์ ํ์์ค ์กฐํ: getBrWclfInfo
08 ๊ฑด์ถ๋ฌผ๋์ฅ ์ฃผํ๊ฐ๊ฒฉ ์กฐํ: getBrHsprcInfo
09 ๊ฑด์ถ๋ฌผ๋์ฅ ์ ์ ๋ถ ์กฐํ: getBrExposInfo
10 ๊ฑด์ถ๋ฌผ๋์ฅ ์ง์ญ์ง๊ตฌ๊ตฌ์ญ ์กฐํ: getBrJijiguInfo
"""
import datetime
import numpy as np
import pandas as pd
import requests
from bs4 import BeautifulSoup
class Transaction:
"""
๋ถ๋์ฐ ์ค๊ฑฐ๋๊ฐ ์กฐํ ํด๋์ค
"""
def __init__(self, serviceKey):
"""
๊ณต๊ณต ๋ฐ์ดํฐ ํฌํธ์์ ๋ฐ๊ธ๋ฐ์ Service Key๋ฅผ ์
๋ ฅ๋ฐ์ ์ด๊ธฐํํฉ๋๋ค.
"""
# Open API ์๋น์ค ํค ์ด๊ธฐํ
self.serviceKey = serviceKey
# ServiceKey ์ ํจ์ฑ ๊ฒ์ฌ
self.urlAptTrade = (
"http://openapi.molit.go.kr:8081/OpenAPI_ToolInstallPackage/service/rest/RTMSOBJSvc/getRTMSDataSvcAptTrade?serviceKey="
+ self.serviceKey)
self.urlAptTradeDetail = (
"http://openapi.molit.go.kr/OpenAPI_ToolInstallPackage/service/rest/RTMSOBJSvc/getRTMSDataSvcAptTradeDev?serviceKey="
+ self.serviceKey)
self.urlAptRent = (
"http://openapi.molit.go.kr:8081/OpenAPI_ToolInstallPackage/service/rest/RTMSOBJSvc/getRTMSDataSvcAptRent?serviceKey="
+ self.serviceKey)
self.urlAptOwnership = (
"http://openapi.molit.go.kr/OpenAPI_ToolInstallPackage/service/rest/RTMSOBJSvc/getRTMSDataSvcSilvTrade?serviceKey="
+ self.serviceKey)
self.urlOffiTrade = (
"http://openapi.molit.go.kr/OpenAPI_ToolInstallPackage/service/rest/RTMSOBJSvc/getRTMSDataSvcOffiTrade?serviceKey="
+ self.serviceKey)
self.urlOffiRent = (
"http://openapi.molit.go.kr/OpenAPI_ToolInstallPackage/service/rest/RTMSOBJSvc/getRTMSDataSvcOffiRent?serviceKey="
+ self.serviceKey)
self.urlRHTrade = (
"http://openapi.molit.go.kr:8081/OpenAPI_ToolInstallPackage/service/rest/RTMSOBJSvc/getRTMSDataSvcRHTrade?serviceKey="
+ self.serviceKey)
self.urlRHRent = (
"http://openapi.molit.go.kr:8081/OpenAPI_ToolInstallPackage/service/rest/RTMSOBJSvc/getRTMSDataSvcRHRent?serviceKey="
+ self.serviceKey)
self.urlDHTrade = (
"http://openapi.molit.go.kr:8081/OpenAPI_ToolInstallPackage/service/rest/RTMSOBJSvc/getRTMSDataSvcSHTrade?serviceKey="
+ self.serviceKey)
self.urlDHRent = (
"http://openapi.molit.go.kr:8081/OpenAPI_ToolInstallPackage/service/rest/RTMSOBJSvc/getRTMSDataSvcSHRent?serviceKey="
+ self.serviceKey)
self.urlLandTrade = (
"http://openapi.molit.go.kr/OpenAPI_ToolInstallPackage/service/rest/RTMSOBJSvc/getRTMSDataSvcLandTrade?serviceKey="
+ self.serviceKey)
self.urlBizTrade = (
"http://openapi.molit.go.kr/OpenAPI_ToolInstallPackage/service/rest/RTMSOBJSvc/getRTMSDataSvcNrgTrade?serviceKey="
+ self.serviceKey)
# Open API URL Dict
urlDict = {
"์ํํธ๋งค๋งค ์ค๊ฑฐ๋์๋ฃ ์กฐํ": self.urlAptTrade,
"์ํํธ๋งค๋งค ์ค๊ฑฐ๋ ์์ธ ์๋ฃ ์กฐํ": self.urlAptTradeDetail,
"์ํํธ ์ ์์ธ ์๋ฃ ์กฐํ": self.urlAptRent,
"์ํํธ ๋ถ์๊ถ์ ๋งค ์ ๊ณ ์๋ฃ ์กฐํ": self.urlAptOwnership,
"์คํผ์คํ
๋งค๋งค ์ ๊ณ ์กฐํ": self.urlOffiTrade,
"์คํผ์คํ
์ ์์ธ ์ ๊ณ ์กฐํ": self.urlOffiRent,
"์ฐ๋ฆฝ๋ค์ธ๋ ๋งค๋งค ์ค๊ฑฐ๋์๋ฃ ์กฐํ": self.urlRHTrade,
"์ฐ๋ฆฝ๋ค์ธ๋ ์ ์์ธ ์ค๊ฑฐ๋์๋ฃ ์กฐํ": self.urlRHRent,
"๋จ๋
/๋ค๊ฐ๊ตฌ ๋งค๋งค ์ค๊ฑฐ๋ ์กฐํ": self.urlDHTrade,
"๋จ๋
/๋ค๊ฐ๊ตฌ ์ ์์ธ ์๋ฃ ์กฐํ": self.urlDHRent,
"ํ ์ง ๋งค๋งค ์ ๊ณ ์กฐํ": self.urlLandTrade,
"์์
์
๋ฌด์ฉ ๋ถ๋์ฐ ๋งค๋งค ์ ๊ณ ์๋ฃ ์กฐํ": self.urlBizTrade,
}
# ์๋น์ค ์ ์ ์๋ ์ฌ๋ถ ํ์ธ
for serviceName, url in urlDict.items():
result = requests.get(url, verify=False)
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
te = xmlsoup.findAll("header")
if te[0].find("resultCode").text == "00":
print(f">>> {serviceName} ์๋น์ค๊ฐ ์ ์ ์๋ํฉ๋๋ค.")
else:
print(f">>> {serviceName} ์๋น์คํค ๋ฏธ๋ฑ๋ก ์ค๋ฅ์
๋๋ค.")
# ์ง์ญ ์ฝ๋ ์ด๊ธฐํ
# ๋ฒ์ ๋ ์ฝ๋ ์ถ์ฒ : https://code.go.kr
path_code = "https://raw.githubusercontent.com/WooilJeong/PublicDataReader/f14e4de3410cc0f798a83ee5934070d651cbd67b/docs/%EB%B2%95%EC%A0%95%EB%8F%99%EC%BD%94%EB%93%9C%20%EC%A0%84%EC%B2%B4%EC%9E%90%EB%A3%8C.txt"
code = pd.read_csv(path_code, encoding="cp949", sep="\t")
code = code.loc[code["ํ์ง์ฌ๋ถ"] == "์กด์ฌ"]
code["๋ฒ์ ๊ตฌ์ฝ๋"] = list(map(lambda a: str(a)[:5], list(code["๋ฒ์ ๋์ฝ๋"])))
self.code = code
def CodeFinder(self, name):
"""
๊ตญํ ๊ตํต๋ถ ์ค๊ฑฐ๋๊ฐ ์ ๋ณด ์คํAPI๋ ๋ฒ์ ๋์ฝ๋ 10์๋ฆฌ ์ค ์ 5์๋ฆฌ์ธ ๊ตฌ๋ฅผ ๋ํ๋ด๋ ์ง์ญ์ฝ๋๋ฅผ ์ฌ์ฉํฉ๋๋ค.
API์ ์ฌ์ฉํ ๊ตฌ ๋ณ ์ฝ๋๋ฅผ ์กฐํํ๋ ๋ฉ์๋์ด๋ฉฐ, ๋ฌธ์์ด ์ง์ญ ๋ช
์ ์
๋ ฅ๋ฐ๊ณ , ์กฐํ ๊ฒฐ๊ณผ๋ฅผ Pandas DataFrameํ์์ผ๋ก ์ถ๋ ฅํฉ๋๋ค.
"""
result = self.code[self.code["๋ฒ์ ๋๋ช
"].str.contains(name)][[
"๋ฒ์ ๋๋ช
", "๋ฒ์ ๊ตฌ์ฝ๋"
]]
result.index = range(len(result))
return result
def DataCollector(self, service, LAWD_CD, start_date, end_date):
"""
์๋น์ค๋ณ ๊ธฐ๊ฐ๋ณ ์กฐํ
์
๋ ฅ: ์๋น์ค๋ณ ์กฐํ ๋ฉ์๋, ์ง์ญ์ฝ๋, ์์์(YYYYmm), ์ข
๋ฃ์(YYYYmm)
"""
start_date = datetime.datetime.strptime(str(start_date), "%Y%m")
start_date = datetime.datetime.strftime(start_date, "%Y-%m")
end_date = datetime.datetime.strptime(str(end_date), "%Y%m")
end_date = end_date + datetime.timedelta(days=31)
end_date = datetime.datetime.strftime(end_date, "%Y-%m")
ts = pd.date_range(start=start_date, end=end_date, freq="m")
date_list = list(ts.strftime("%Y%m"))
df = pd.DataFrame()
df_sum = pd.DataFrame()
for m in date_list:
print(">>> LAWD_CD :", LAWD_CD, "DEAL_YMD :", m)
DEAL_YMD = m
df = service(LAWD_CD, DEAL_YMD)
df_sum = pd.concat([df_sum, df])
df_sum.index = range(len(df_sum))
return df_sum
def AptTrade(self, LAWD_CD, DEAL_YMD):
"""
01 ์ํํธ๋งค๋งค ์ค๊ฑฐ๋์๋ฃ ์กฐํ
์
๋ ฅ: ์ง์ญ์ฝ๋(๋ฒ์ ๋์ฝ๋ 5์๋ฆฌ), ๊ณ์ฝ์(YYYYmm)
"""
# URL
url_1 = self.urlAptTrade + "&LAWD_CD=" + str(LAWD_CD)
url_2 = "&DEAL_YMD=" + str(DEAL_YMD)
url_3 = "&numOfRows=99999"
url = url_1 + url_2 + url_3
try:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("item")
# Creating Pandas Data Frame
df = pd.DataFrame()
variables = [
"๋ฒ์ ๋",
"์ง์ญ์ฝ๋",
"์ํํธ",
"์ง๋ฒ",
"๋
",
"์",
"์ผ",
"๊ฑด์ถ๋
๋",
"์ ์ฉ๋ฉด์ ",
"์ธต",
"๊ฑฐ๋๊ธ์ก",
]
for t in te:
for variable in variables:
try:
globals()[variable] = t.find(variable).text
except:
globals()[variable] = np.nan
data = pd.DataFrame(
[[๋ฒ์ ๋, ์ง์ญ์ฝ๋, ์ํํธ, ์ง๋ฒ, ๋
, ์, ์ผ, ๊ฑด์ถ๋
๋, ์ ์ฉ๋ฉด์ , ์ธต, ๊ฑฐ๋๊ธ์ก]],
columns=variables,
)
df = pd.concat([df, data])
# Set Columns
colNames = [
"์ง์ญ์ฝ๋", "๋ฒ์ ๋", "๊ฑฐ๋์ผ", "์ํํธ", "์ง๋ฒ", "์ ์ฉ๋ฉด์ ", "์ธต", "๊ฑด์ถ๋
๋", "๊ฑฐ๋๊ธ์ก"
]
# Feature Engineering
try:
if len(df["๋
"] != 0) & len(df["์"] != 0) & len(df["์ผ"] != 0):
df["๊ฑฐ๋์ผ"] = df["๋
"] + "-" + df["์"] + "-" + df["์ผ"]
df["๊ฑฐ๋์ผ"] = pd.to_datetime(df["๊ฑฐ๋์ผ"])
df["๊ฑฐ๋๊ธ์ก"] = pd.to_numeric(df["๊ฑฐ๋๊ธ์ก"].str.replace(",", ""))
except:
df = pd.DataFrame(columns=colNames)
print("์กฐํํ ์๋ฃ๊ฐ ์์ต๋๋ค.")
# Arange Columns
df = df[colNames]
df = df.sort_values(["๋ฒ์ ๋", "๊ฑฐ๋์ผ"])
df["๋ฒ์ ๋"] = df["๋ฒ์ ๋"].str.strip()
df["์ํํธ"] = df["์ํํธ"].str.strip()
df.index = range(len(df))
# ํ ๋ณํ
cols = df.columns.drop(["๋ฒ์ ๋", "๊ฑฐ๋์ผ", "์ํํธ", "์ง๋ฒ"])
df[cols] = df[cols].apply(pd.to_numeric, errors="coerce")
return df
except:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("header")
# ์ ์ ์์ฒญ์ ์๋ฌ ๋ฐ์ -> Python ์ฝ๋ ์๋ฌ
if te[0].find("resultCode").text == "00":
print(">>> Python Logic Error. e-mail : <EMAIL>")
# Open API ์๋น์ค ์ ๊ณต์ฒ ์ค๋ฅ
else:
print(">>> Open API Error: {}".format(te[0].find["resultMsg"]))
def AptTradeDetail(self, LAWD_CD, DEAL_YMD):
"""
02 ์ํํธ๋งค๋งค ์ค๊ฑฐ๋ ์์ธ ์๋ฃ ์กฐํ
์
๋ ฅ: ์ง์ญ์ฝ๋(๋ฒ์ ๋์ฝ๋ 5์๋ฆฌ), ๊ณ์ฝ์(YYYYmm)
"""
# URL
url_1 = self.urlAptTradeDetail + "&LAWD_CD=" + str(LAWD_CD)
url_2 = "&DEAL_YMD=" + str(DEAL_YMD)
url_3 = "&numOfRows=99999"
url = url_1 + url_2 + url_3
try:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("item")
# Creating Pandas Data Frame
df = pd.DataFrame()
variables = [
"๊ฑฐ๋๊ธ์ก",
"๊ฑด์ถ๋
๋",
"๋
",
"๋๋ก๋ช
",
"๋๋ก๋ช
๊ฑด๋ฌผ๋ณธ๋ฒํธ์ฝ๋",
"๋๋ก๋ช
๊ฑด๋ฌผ๋ถ๋ฒํธ์ฝ๋",
"๋๋ก๋ช
์๊ตฐ๊ตฌ์ฝ๋",
"๋๋ก๋ช
์ผ๋ จ๋ฒํธ์ฝ๋",
"๋๋ก๋ช
์ง์์งํ์ฝ๋",
"๋๋ก๋ช
์ฝ๋",
"๋ฒ์ ๋",
"๋ฒ์ ๋๋ณธ๋ฒ์ฝ๋",
"๋ฒ์ ๋๋ถ๋ฒ์ฝ๋",
"๋ฒ์ ๋์๊ตฐ๊ตฌ์ฝ๋",
"๋ฒ์ ๋์๋ฉด๋์ฝ๋",
"๋ฒ์ ๋์ง๋ฒ์ฝ๋",
"์ํํธ",
"์",
"์ผ",
"์ ์ฉ๋ฉด์ ",
"์ง๋ฒ",
"์ง์ญ์ฝ๋",
"์ธต",
]
for t in te:
for variable in variables:
try:
globals()[variable] = t.find(variable).text
except:
globals()[variable] = np.nan
data = pd.DataFrame(
[[
๊ฑฐ๋๊ธ์ก,
๊ฑด์ถ๋
๋,
๋
,
๋๋ก๋ช
,
๋๋ก๋ช
๊ฑด๋ฌผ๋ณธ๋ฒํธ์ฝ๋,
๋๋ก๋ช
๊ฑด๋ฌผ๋ถ๋ฒํธ์ฝ๋,
๋๋ก๋ช
์๊ตฐ๊ตฌ์ฝ๋,
๋๋ก๋ช
์ผ๋ จ๋ฒํธ์ฝ๋,
๋๋ก๋ช
์ง์์งํ์ฝ๋,
๋๋ก๋ช
์ฝ๋,
๋ฒ์ ๋,
๋ฒ์ ๋๋ณธ๋ฒ์ฝ๋,
๋ฒ์ ๋๋ถ๋ฒ์ฝ๋,
๋ฒ์ ๋์๊ตฐ๊ตฌ์ฝ๋,
๋ฒ์ ๋์๋ฉด๋์ฝ๋,
๋ฒ์ ๋์ง๋ฒ์ฝ๋,
์ํํธ,
์,
์ผ,
์ ์ฉ๋ฉด์ ,
์ง๋ฒ,
์ง์ญ์ฝ๋,
์ธต,
]],
columns=variables,
)
df = pd.concat([df, data])
# Set Columns
colNames = [
"์ง์ญ์ฝ๋",
"๋ฒ์ ๋",
"๊ฑฐ๋์ผ",
"์ํํธ",
"์ง๋ฒ",
"์ ์ฉ๋ฉด์ ",
"์ธต",
"๊ฑด์ถ๋
๋",
"๊ฑฐ๋๊ธ์ก",
"๋ฒ์ ๋๋ณธ๋ฒ์ฝ๋",
"๋ฒ์ ๋๋ถ๋ฒ์ฝ๋",
"๋ฒ์ ๋์๊ตฐ๊ตฌ์ฝ๋",
"๋ฒ์ ๋์๋ฉด๋์ฝ๋",
"๋ฒ์ ๋์ง๋ฒ์ฝ๋",
"๋๋ก๋ช
",
"๋๋ก๋ช
๊ฑด๋ฌผ๋ณธ๋ฒํธ์ฝ๋",
"๋๋ก๋ช
๊ฑด๋ฌผ๋ถ๋ฒํธ์ฝ๋",
"๋๋ก๋ช
์๊ตฐ๊ตฌ์ฝ๋",
"๋๋ก๋ช
์ผ๋ จ๋ฒํธ์ฝ๋",
"๋๋ก๋ช
์ง์์งํ์ฝ๋",
"๋๋ก๋ช
์ฝ๋",
]
# Feature Engineering
try:
if len(df["๋
"] != 0) & len(df["์"] != 0) & len(df["์ผ"] != 0):
df["๊ฑฐ๋์ผ"] = df["๋
"] + "-" + df["์"] + "-" + df["์ผ"]
df["๊ฑฐ๋์ผ"] = pd.to_datetime(df["๊ฑฐ๋์ผ"])
df["๊ฑฐ๋๊ธ์ก"] = pd.to_numeric(df["๊ฑฐ๋๊ธ์ก"].str.replace(",", ""))
except:
df = pd.DataFrame(columns=colNames)
print("์กฐํํ ์๋ฃ๊ฐ ์์ต๋๋ค.")
# Arange Columns
df = df[colNames]
df = df.sort_values(["๋ฒ์ ๋", "๊ฑฐ๋์ผ"])
df["๋ฒ์ ๋"] = df["๋ฒ์ ๋"].str.strip()
df["์ํํธ"] = df["์ํํธ"].str.strip()
df.index = range(len(df))
# ์ซ์ํ ๋ณํ
cols = df.columns.drop(["๋ฒ์ ๋", "๊ฑฐ๋์ผ", "์ํํธ", "์ง๋ฒ", "๋๋ก๋ช
"])
df[cols] = df[cols].apply(pd.to_numeric, errors="coerce")
return df
except:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("header")
# ์ ์ ์์ฒญ์ ์๋ฌ ๋ฐ์ -> Python ์ฝ๋ ์๋ฌ
if te[0].find("resultCode").text == "00":
print(">>> Python Logic Error. e-mail : <EMAIL>")
# Open API ์๋น์ค ์ ๊ณต์ฒ ์ค๋ฅ
else:
print(">>> Open API Error: {}".format(te[0].find["resultMsg"]))
def AptRent(self, LAWD_CD, DEAL_YMD):
"""
03 ์ํํธ ์ ์์ธ ์๋ฃ ์กฐํ
์
๋ ฅ: ์ง์ญ์ฝ๋(๋ฒ์ ๋์ฝ๋ 5์๋ฆฌ), ๊ณ์ฝ์(YYYYmm)
"""
# URL
url_1 = self.urlAptRent + "&LAWD_CD=" + str(LAWD_CD)
url_2 = "&DEAL_YMD=" + str(DEAL_YMD)
url_3 = "&numOfRows=99999"
url = url_1 + url_2 + url_3
try:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("item")
# Creating Pandas Data Frame
df = pd.DataFrame()
variables = [
"๋ฒ์ ๋",
"์ง์ญ์ฝ๋",
"์ํํธ",
"์ง๋ฒ",
"๋
",
"์",
"์ผ",
"๊ฑด์ถ๋
๋",
"์ ์ฉ๋ฉด์ ",
"์ธต",
"๋ณด์ฆ๊ธ์ก",
"์์ธ๊ธ์ก",
]
for t in te:
for variable in variables:
try:
globals()[variable] = t.find(variable).text
except:
globals()[variable] = np.nan
data = pd.DataFrame(
[[๋ฒ์ ๋, ์ง์ญ์ฝ๋, ์ํํธ, ์ง๋ฒ, ๋
, ์, ์ผ, ๊ฑด์ถ๋
๋, ์ ์ฉ๋ฉด์ , ์ธต, ๋ณด์ฆ๊ธ์ก, ์์ธ๊ธ์ก]],
columns=variables,
)
df = pd.concat([df, data])
# Set Columns
colNames = [
"์ง์ญ์ฝ๋",
"๋ฒ์ ๋",
"๊ฑฐ๋์ผ",
"์ํํธ",
"์ง๋ฒ",
"์ ์ฉ๋ฉด์ ",
"์ธต",
"๊ฑด์ถ๋
๋",
"๋ณด์ฆ๊ธ์ก",
"์์ธ๊ธ์ก",
]
# Feature Engineering
try:
if len(df["๋
"] != 0) & len(df["์"] != 0) & len(df["์ผ"] != 0):
df["๊ฑฐ๋์ผ"] = df["๋
"] + "-" + df["์"] + "-" + df["์ผ"]
df["๊ฑฐ๋์ผ"] = pd.to_datetime(df["๊ฑฐ๋์ผ"])
df["๋ณด์ฆ๊ธ์ก"] = pd.to_numeric(df["๋ณด์ฆ๊ธ์ก"].str.replace(",", ""))
df["์์ธ๊ธ์ก"] = pd.to_numeric(df["์์ธ๊ธ์ก"].str.replace(",", ""))
except:
df = pd.DataFrame(columns=colNames)
print("์กฐํํ ์๋ฃ๊ฐ ์์ต๋๋ค.")
# Arange Columns
df = df[colNames]
df = df.sort_values(["๋ฒ์ ๋", "๊ฑฐ๋์ผ"])
df["๋ฒ์ ๋"] = df["๋ฒ์ ๋"].str.strip()
df.index = range(len(df))
# ์ซ์ํ ๋ณํ
cols = df.columns.drop(["๋ฒ์ ๋", "๊ฑฐ๋์ผ", "์ง๋ฒ", "์ํํธ"])
df[cols] = df[cols].apply(pd.to_numeric, errors="coerce")
return df
except:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("header")
# ์ ์ ์์ฒญ์ ์๋ฌ ๋ฐ์ -> Python ์ฝ๋ ์๋ฌ
if te[0].find("resultCode").text == "00":
print(">>> Python Logic Error. e-mail : <EMAIL>")
# Open API ์๋น์ค ์ ๊ณต์ฒ ์ค๋ฅ
else:
print(">>> Open API Error: {}".format(te[0].find["resultMsg"]))
def AptOwnership(self, LAWD_CD, DEAL_YMD):
"""
04 ์ํํธ ๋ถ์๊ถ์ ๋งค ์ ๊ณ ์๋ฃ ์กฐํ
์
๋ ฅ: ์ง์ญ์ฝ๋(๋ฒ์ ๋์ฝ๋ 5์๋ฆฌ), ๊ณ์ฝ์(YYYYmm)
"""
# URL
url_1 = self.urlAptOwnership + "&LAWD_CD=" + str(LAWD_CD)
url_2 = "&DEAL_YMD=" + str(DEAL_YMD)
url_3 = "&numOfRows=99999"
url = url_1 + url_2 + url_3
try:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("item")
# Creating Pandas Data Frame
df = pd.DataFrame()
variables = [
"๋ฒ์ ๋",
"์ง์ญ์ฝ๋",
"์๊ตฐ๊ตฌ",
"๋จ์ง",
"์ง๋ฒ",
"๊ตฌ๋ถ",
"๋
",
"์",
"์ผ",
"์ ์ฉ๋ฉด์ ",
"์ธต",
"๊ฑฐ๋๊ธ์ก",
]
for t in te:
for variable in variables:
try:
globals()[variable] = t.find(variable).text
except:
globals()[variable] = np.nan
data = pd.DataFrame(
[[๋ฒ์ ๋, ์ง์ญ์ฝ๋, ์๊ตฐ๊ตฌ, ๋จ์ง, ์ง๋ฒ, ๊ตฌ๋ถ, ๋
, ์, ์ผ, ์ ์ฉ๋ฉด์ , ์ธต, ๊ฑฐ๋๊ธ์ก]],
columns=variables,
)
df = pd.concat([df, data])
# Set Columns
colNames = [
"์ง์ญ์ฝ๋",
"๋ฒ์ ๋",
"๊ฑฐ๋์ผ",
"์๊ตฐ๊ตฌ",
"๋จ์ง",
"์ง๋ฒ",
"๊ตฌ๋ถ",
"์ ์ฉ๋ฉด์ ",
"์ธต",
"๊ฑฐ๋๊ธ์ก",
]
# Feature Engineering
try:
if len(df["๋
"] != 0) & len(df["์"] != 0) & len(df["์ผ"] != 0):
df["๊ฑฐ๋์ผ"] = df["๋
"] + "-" + df["์"] + "-" + df["์ผ"]
df["๊ฑฐ๋์ผ"] = pd.to_datetime(df["๊ฑฐ๋์ผ"])
df["๊ฑฐ๋๊ธ์ก"] = pd.to_numeric(df["๊ฑฐ๋๊ธ์ก"].str.replace(",", ""))
except:
df = pd.DataFrame(columns=colNames)
print("์กฐํํ ์๋ฃ๊ฐ ์์ต๋๋ค.")
# Arange Columns
df = df[colNames]
df = df.sort_values(["๋ฒ์ ๋", "๊ฑฐ๋์ผ"])
df["๋ฒ์ ๋"] = df["๋ฒ์ ๋"].str.strip()
df.index = range(len(df))
# ์ซ์ํ ๋ณํ
cols = df.columns.drop(["๋ฒ์ ๋", "๊ฑฐ๋์ผ", "์๊ตฐ๊ตฌ", "๋จ์ง", "์ง๋ฒ", "๊ตฌ๋ถ"])
df[cols] = df[cols].apply(pd.to_numeric, errors="coerce")
return df
except:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("header")
# ์ ์ ์์ฒญ์ ์๋ฌ ๋ฐ์ -> Python ์ฝ๋ ์๋ฌ
if te[0].find("resultCode").text == "00":
print(">>> Python Logic Error. e-mail : <EMAIL>")
# Open API ์๋น์ค ์ ๊ณต์ฒ ์ค๋ฅ
else:
print(">>> Open API Error: {}".format(te[0].find["resultMsg"]))
def OffiTrade(self, LAWD_CD, DEAL_YMD):
"""
05 ์คํผ์คํ
๋งค๋งค ์ ๊ณ ์กฐํ
์
๋ ฅ: ์ง์ญ์ฝ๋(๋ฒ์ ๋์ฝ๋ 5์๋ฆฌ), ๊ณ์ฝ์(YYYYmm)
"""
# URL
url_1 = self.urlOffiTrade + "&LAWD_CD=" + str(LAWD_CD)
url_2 = "&DEAL_YMD=" + str(DEAL_YMD)
url_3 = "&numOfRows=99999"
url = url_1 + url_2 + url_3
try:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("item")
# Creating Pandas Data Frame
df = pd.DataFrame()
variables = [
"๋ฒ์ ๋",
"์ง์ญ์ฝ๋",
"์๊ตฐ๊ตฌ",
"๋จ์ง",
"์ง๋ฒ",
"๋
",
"์",
"์ผ",
"์ ์ฉ๋ฉด์ ",
"์ธต",
"๊ฑฐ๋๊ธ์ก",
]
for t in te:
for variable in variables:
try:
globals()[variable] = t.find(variable).text
except:
globals()[variable] = np.nan
data = pd.DataFrame(
[[๋ฒ์ ๋, ์ง์ญ์ฝ๋, ์๊ตฐ๊ตฌ, ๋จ์ง, ์ง๋ฒ, ๋
, ์, ์ผ, ์ ์ฉ๋ฉด์ , ์ธต, ๊ฑฐ๋๊ธ์ก]],
columns=variables,
)
df = pd.concat([df, data])
# Set Columns
colNames = [
"์ง์ญ์ฝ๋", "๋ฒ์ ๋", "๊ฑฐ๋์ผ", "์๊ตฐ๊ตฌ", "๋จ์ง", "์ง๋ฒ", "์ ์ฉ๋ฉด์ ", "์ธต", "๊ฑฐ๋๊ธ์ก"
]
# Feature Engineering
try:
if len(df["๋
"] != 0) & len(df["์"] != 0) & len(df["์ผ"] != 0):
df["๊ฑฐ๋์ผ"] = df["๋
"] + "-" + df["์"] + "-" + df["์ผ"]
df["๊ฑฐ๋์ผ"] = pd.to_datetime(df["๊ฑฐ๋์ผ"])
df["๊ฑฐ๋๊ธ์ก"] = pd.to_numeric(df["๊ฑฐ๋๊ธ์ก"].str.replace(",", ""))
except:
df = pd.DataFrame(columns=colNames)
print("์กฐํํ ์๋ฃ๊ฐ ์์ต๋๋ค.")
# Arange Columns
df = df[colNames]
df = df.sort_values(["๋ฒ์ ๋", "๊ฑฐ๋์ผ"])
df["๋ฒ์ ๋"] = df["๋ฒ์ ๋"].str.strip()
df.index = range(len(df))
# ์ซ์ํ ๋ณํ
cols = df.columns.drop(["๋ฒ์ ๋", "๊ฑฐ๋์ผ", "์๊ตฐ๊ตฌ", "๋จ์ง", "์ง๋ฒ"])
df[cols] = df[cols].apply(pd.to_numeric, errors="coerce")
return df
except:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("header")
# ์ ์ ์์ฒญ์ ์๋ฌ ๋ฐ์ -> Python ์ฝ๋ ์๋ฌ
if te[0].find("resultCode").text == "00":
print(">>> Python Logic Error. e-mail : <EMAIL>")
# Open API ์๋น์ค ์ ๊ณต์ฒ ์ค๋ฅ
else:
print(">>> Open API Error: {}".format(te[0].find["resultMsg"]))
def OffiRent(self, LAWD_CD, DEAL_YMD):
"""
06 ์คํผ์คํ
์ ์์ธ ์ ๊ณ ์กฐํ
์
๋ ฅ: ์ง์ญ์ฝ๋(๋ฒ์ ๋์ฝ๋ 5์๋ฆฌ), ๊ณ์ฝ์(YYYYmm)
"""
# URL
url_1 = self.urlOffiRent + "&LAWD_CD=" + str(LAWD_CD)
url_2 = "&DEAL_YMD=" + str(DEAL_YMD)
url_3 = "&numOfRows=99999"
url = url_1 + url_2 + url_3
try:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("item")
# Creating Pandas Data Frame
df = pd.DataFrame()
variables = [
"๋ฒ์ ๋",
"์ง์ญ์ฝ๋",
"์๊ตฐ๊ตฌ",
"๋จ์ง",
"์ง๋ฒ",
"๋
",
"์",
"์ผ",
"์ ์ฉ๋ฉด์ ",
"์ธต",
"๋ณด์ฆ๊ธ",
"์์ธ",
]
for t in te:
for variable in variables:
try:
globals()[variable] = t.find(variable).text
except:
globals()[variable] = np.nan
data = pd.DataFrame(
[[๋ฒ์ ๋, ์ง์ญ์ฝ๋, ์๊ตฐ๊ตฌ, ๋จ์ง, ์ง๋ฒ, ๋
, ์, ์ผ, ์ ์ฉ๋ฉด์ , ์ธต, ๋ณด์ฆ๊ธ, ์์ธ]],
columns=variables,
)
df = pd.concat([df, data])
# Set Columns
colNames = [
"์ง์ญ์ฝ๋",
"๋ฒ์ ๋",
"๊ฑฐ๋์ผ",
"์๊ตฐ๊ตฌ",
"๋จ์ง",
"์ง๋ฒ",
"์ ์ฉ๋ฉด์ ",
"์ธต",
"๋ณด์ฆ๊ธ",
"์์ธ",
]
# Feature Engineering
try:
if len(df["๋
"] != 0) & len(df["์"] != 0) & len(df["์ผ"] != 0):
df["๊ฑฐ๋์ผ"] = df["๋
"] + "-" + df["์"] + "-" + df["์ผ"]
df["๊ฑฐ๋์ผ"] = pd.to_datetime(df["๊ฑฐ๋์ผ"])
df["๋ณด์ฆ๊ธ"] = pd.to_numeric(df["๋ณด์ฆ๊ธ"].str.replace(",", ""))
df["์์ธ"] = pd.to_numeric(df["์์ธ"].str.replace(",", ""))
except:
df = pd.DataFrame(columns=colNames)
print("์กฐํํ ์๋ฃ๊ฐ ์์ต๋๋ค.")
# Arange Columns
df = df[colNames]
df = df.sort_values(["๋ฒ์ ๋", "๊ฑฐ๋์ผ"])
df["๋ฒ์ ๋"] = df["๋ฒ์ ๋"].str.strip()
df.index = range(len(df))
# ์ซ์ํ ๋ณํ
cols = df.columns.drop(["๋ฒ์ ๋", "๊ฑฐ๋์ผ", "์๊ตฐ๊ตฌ", "๋จ์ง", "์ง๋ฒ"])
df[cols] = df[cols].apply(pd.to_numeric, errors="coerce")
return df
except:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("header")
# ์ ์ ์์ฒญ์ ์๋ฌ ๋ฐ์ -> Python ์ฝ๋ ์๋ฌ
if te[0].find("resultCode").text == "00":
print(">>> Python Logic Error. e-mail : <EMAIL>")
# Open API ์๋น์ค ์ ๊ณต์ฒ ์ค๋ฅ
else:
print(">>> Open API Error: {}".format(te[0].find["resultMsg"]))
def RHTrade(self, LAWD_CD, DEAL_YMD):
"""
07 ์ฐ๋ฆฝ๋ค์ธ๋ ๋งค๋งค ์ค๊ฑฐ๋์๋ฃ ์กฐํ
์
๋ ฅ: ์ง์ญ์ฝ๋(๋ฒ์ ๋์ฝ๋ 5์๋ฆฌ), ๊ณ์ฝ์(YYYYmm)
"""
# URL
url_1 = self.urlRHTrade + "&LAWD_CD=" + str(LAWD_CD)
url_2 = "&DEAL_YMD=" + str(DEAL_YMD)
url_3 = "&numOfRows=99999"
url = url_1 + url_2 + url_3
try:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("item")
# Creating Pandas Data Frame
df = pd.DataFrame()
variables = [
"๋ฒ์ ๋",
"์ง์ญ์ฝ๋",
"์ฐ๋ฆฝ๋ค์ธ๋",
"์ง๋ฒ",
"๋
",
"์",
"์ผ",
"์ ์ฉ๋ฉด์ ",
"๊ฑด์ถ๋
๋",
"์ธต",
"๊ฑฐ๋๊ธ์ก",
]
for t in te:
for variable in variables:
try:
globals()[variable] = t.find(variable).text
except:
globals()[variable] = np.nan
data = pd.DataFrame(
[[๋ฒ์ ๋, ์ง์ญ์ฝ๋, ์ฐ๋ฆฝ๋ค์ธ๋, ์ง๋ฒ, ๋
, ์, ์ผ, ์ ์ฉ๋ฉด์ , ๊ฑด์ถ๋
๋, ์ธต, ๊ฑฐ๋๊ธ์ก]],
columns=variables,
)
df = pd.concat([df, data])
# Set Columns
colNames = [
"์ง์ญ์ฝ๋",
"๋ฒ์ ๋",
"๊ฑฐ๋์ผ",
"์ฐ๋ฆฝ๋ค์ธ๋",
"์ง๋ฒ",
"์ ์ฉ๋ฉด์ ",
"๊ฑด์ถ๋
๋",
"์ธต",
"๊ฑฐ๋๊ธ์ก",
]
# Feature Engineering
try:
if len(df["๋
"] != 0) & len(df["์"] != 0) & len(df["์ผ"] != 0):
df["๊ฑฐ๋์ผ"] = df["๋
"] + "-" + df["์"] + "-" + df["์ผ"]
df["๊ฑฐ๋์ผ"] = pd.to_datetime(df["๊ฑฐ๋์ผ"])
df["๊ฑฐ๋๊ธ์ก"] = pd.to_numeric(df["๊ฑฐ๋๊ธ์ก"].str.replace(",", ""))
except:
df = pd.DataFrame(columns=colNames)
print("์กฐํํ ์๋ฃ๊ฐ ์์ต๋๋ค.")
# Arange Columns
df = df[colNames]
df = df.sort_values(["๋ฒ์ ๋", "๊ฑฐ๋์ผ"])
df["๋ฒ์ ๋"] = df["๋ฒ์ ๋"].str.strip()
df.index = range(len(df))
# ์ซ์ํ ๋ณํ
cols = df.columns.drop(["๋ฒ์ ๋", "๊ฑฐ๋์ผ", "์ฐ๋ฆฝ๋ค์ธ๋", "์ง๋ฒ"])
df[cols] = df[cols].apply(pd.to_numeric, errors="coerce")
return df
except:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("header")
# ์ ์ ์์ฒญ์ ์๋ฌ ๋ฐ์ -> Python ์ฝ๋ ์๋ฌ
if te[0].find("resultCode").text == "00":
print(">>> Python Logic Error. e-mail : <EMAIL>")
# Open API ์๋น์ค ์ ๊ณต์ฒ ์ค๋ฅ
else:
print(">>> Open API Error: {}".format(te[0].find["resultMsg"]))
def RHRent(self, LAWD_CD, DEAL_YMD):
"""
08 ์ฐ๋ฆฝ๋ค์ธ๋ ์ ์์ธ ์ค๊ฑฐ๋์๋ฃ ์กฐํ
์
๋ ฅ: ์ง์ญ์ฝ๋(๋ฒ์ ๋์ฝ๋ 5์๋ฆฌ), ๊ณ์ฝ์(YYYYmm)
"""
# URL
url_1 = self.urlRHRent + "&LAWD_CD=" + str(LAWD_CD)
url_2 = "&DEAL_YMD=" + str(DEAL_YMD)
url_3 = "&numOfRows=99999"
url = url_1 + url_2 + url_3
try:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("item")
# Creating Pandas Data Frame
df = pd.DataFrame()
variables = [
"๋ฒ์ ๋",
"์ง์ญ์ฝ๋",
"์ฐ๋ฆฝ๋ค์ธ๋",
"์ง๋ฒ",
"๋
",
"์",
"์ผ",
"์ ์ฉ๋ฉด์ ",
"๊ฑด์ถ๋
๋",
"์ธต",
"๋ณด์ฆ๊ธ์ก",
"์์ธ๊ธ์ก",
]
for t in te:
for variable in variables:
try:
globals()[variable] = t.find(variable).text
except:
globals()[variable] = np.nan
data = pd.DataFrame(
[[
๋ฒ์ ๋, ์ง์ญ์ฝ๋, ์ฐ๋ฆฝ๋ค์ธ๋, ์ง๋ฒ, ๋
, ์, ์ผ, ์ ์ฉ๋ฉด์ , ๊ฑด์ถ๋
๋, ์ธต, ๋ณด์ฆ๊ธ์ก,
์์ธ๊ธ์ก
]],
columns=variables,
)
df = pd.concat([df, data])
# Set Columns
colNames = [
"์ง์ญ์ฝ๋",
"๋ฒ์ ๋",
"๊ฑฐ๋์ผ",
"์ฐ๋ฆฝ๋ค์ธ๋",
"์ง๋ฒ",
"์ ์ฉ๋ฉด์ ",
"๊ฑด์ถ๋
๋",
"์ธต",
"๋ณด์ฆ๊ธ์ก",
"์์ธ๊ธ์ก",
]
# Feature Engineering
try:
if len(df["๋
"] != 0) & len(df["์"] != 0) & len(df["์ผ"] != 0):
df["๊ฑฐ๋์ผ"] = df["๋
"] + "-" + df["์"] + "-" + df["์ผ"]
df["๊ฑฐ๋์ผ"] = pd.to_datetime(df["๊ฑฐ๋์ผ"])
df["๋ณด์ฆ๊ธ์ก"] = pd.to_numeric(df["๋ณด์ฆ๊ธ์ก"].str.replace(",", ""))
df["์์ธ๊ธ์ก"] = pd.to_numeric(df["์์ธ๊ธ์ก"].str.replace(",", ""))
except:
df = pd.DataFrame(columns=colNames)
print("์กฐํํ ์๋ฃ๊ฐ ์์ต๋๋ค.")
# Arange Columns
df = df[colNames]
df = df.sort_values(["๋ฒ์ ๋", "๊ฑฐ๋์ผ"])
df["๋ฒ์ ๋"] = df["๋ฒ์ ๋"].str.strip()
df.index = range(len(df))
# ์ซ์ํ ๋ณํ
cols = df.columns.drop(["๋ฒ์ ๋", "๊ฑฐ๋์ผ", "์ฐ๋ฆฝ๋ค์ธ๋", "์ง๋ฒ"])
df[cols] = df[cols].apply(pd.to_numeric, errors="coerce")
return df
except:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("header")
# ์ ์ ์์ฒญ์ ์๋ฌ ๋ฐ์ -> Python ์ฝ๋ ์๋ฌ
if te[0].find("resultCode").text == "00":
print(">>> Python Logic Error. e-mail : <EMAIL>")
# Open API ์๋น์ค ์ ๊ณต์ฒ ์ค๋ฅ
else:
print(">>> Open API Error: {}".format(te[0].find["resultMsg"]))
def DHTrade(self, LAWD_CD, DEAL_YMD):
"""
09 ๋จ๋
/๋ค๊ฐ๊ตฌ ๋งค๋งค ์ค๊ฑฐ๋ ์กฐํ
์
๋ ฅ: ์ง์ญ์ฝ๋(๋ฒ์ ๋์ฝ๋ 5์๋ฆฌ), ๊ณ์ฝ์(YYYYmm)
"""
# URL
url_1 = self.urlDHTrade + "&LAWD_CD=" + str(LAWD_CD)
url_2 = "&DEAL_YMD=" + str(DEAL_YMD)
url_3 = "&numOfRows=99999"
url = url_1 + url_2 + url_3
try:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("item")
# Creating Pandas Data Frame
df = pd.DataFrame()
variables = [
"๋ฒ์ ๋",
"์ง์ญ์ฝ๋",
"์ฃผํ์ ํ",
"๋
",
"์",
"์ผ",
"๋์ง๋ฉด์ ",
"์ฐ๋ฉด์ ",
"๊ฑด์ถ๋
๋",
"๊ฑฐ๋๊ธ์ก",
]
for t in te:
for variable in variables:
try:
globals()[variable] = t.find(variable).text
except:
globals()[variable] = np.nan
data = pd.DataFrame(
[[๋ฒ์ ๋, ์ง์ญ์ฝ๋, ์ฃผํ์ ํ, ๋
, ์, ์ผ, ๋์ง๋ฉด์ , ์ฐ๋ฉด์ , ๊ฑด์ถ๋
๋, ๊ฑฐ๋๊ธ์ก]],
columns=variables,
)
df = pd.concat([df, data])
# Set Columns
colNames = [
"์ง์ญ์ฝ๋", "๋ฒ์ ๋", "๊ฑฐ๋์ผ", "์ฃผํ์ ํ", "๋์ง๋ฉด์ ", "์ฐ๋ฉด์ ", "๊ฑด์ถ๋
๋", "๊ฑฐ๋๊ธ์ก"
]
# Feature Engineering
try:
if len(df["๋
"] != 0) & len(df["์"] != 0) & len(df["์ผ"] != 0):
df["๊ฑฐ๋์ผ"] = df["๋
"] + "-" + df["์"] + "-" + df["์ผ"]
df["๊ฑฐ๋์ผ"] = pd.to_datetime(df["๊ฑฐ๋์ผ"])
df["๊ฑฐ๋๊ธ์ก"] = pd.to_numeric(df["๊ฑฐ๋๊ธ์ก"].str.replace(",", ""))
except:
df = pd.DataFrame(columns=colNames)
print("์กฐํํ ์๋ฃ๊ฐ ์์ต๋๋ค.")
# Arange Columns
df = df[colNames]
df = df.sort_values(["๋ฒ์ ๋", "๊ฑฐ๋์ผ"])
df["๋ฒ์ ๋"] = df["๋ฒ์ ๋"].str.strip()
df.index = range(len(df))
# ์ซ์ํ ๋ณํ
cols = df.columns.drop(["๋ฒ์ ๋", "๊ฑฐ๋์ผ", "์ฃผํ์ ํ"])
df[cols] = df[cols].apply(pd.to_numeric, errors="coerce")
return df
except:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("header")
# ์ ์ ์์ฒญ์ ์๋ฌ ๋ฐ์ -> Python ์ฝ๋ ์๋ฌ
if te[0].find("resultCode").text == "00":
print(">>> Python Logic Error. e-mail : <EMAIL>")
# Open API ์๋น์ค ์ ๊ณต์ฒ ์ค๋ฅ
else:
print(">>> Open API Error: {}".format(te[0].find["resultMsg"]))
def DHRent(self, LAWD_CD, DEAL_YMD):
"""
10 ๋จ๋
/๋ค๊ฐ๊ตฌ ์ ์์ธ ์๋ฃ ์กฐํ
์
๋ ฅ: ์ง์ญ์ฝ๋(๋ฒ์ ๋์ฝ๋ 5์๋ฆฌ), ๊ณ์ฝ์(YYYYmm)
"""
# URL
url_1 = self.urlDHRent + "&LAWD_CD=" + str(LAWD_CD)
url_2 = "&DEAL_YMD=" + str(DEAL_YMD)
url_3 = "&numOfRows=99999"
url = url_1 + url_2 + url_3
try:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("item")
# Creating Pandas Data Frame
df = pd.DataFrame()
variables = ["๋ฒ์ ๋", "์ง์ญ์ฝ๋", "๋
", "์", "์ผ", "๊ณ์ฝ๋ฉด์ ", "๋ณด์ฆ๊ธ์ก", "์์ธ๊ธ์ก"]
for t in te:
for variable in variables:
try:
globals()[variable] = t.find(variable).text
except:
globals()[variable] = np.nan
data = pd.DataFrame([[๋ฒ์ ๋, ์ง์ญ์ฝ๋, ๋
, ์, ์ผ, ๊ณ์ฝ๋ฉด์ , ๋ณด์ฆ๊ธ์ก, ์์ธ๊ธ์ก]],
columns=variables)
df = pd.concat([df, data])
# Set Columns
colNames = ["์ง์ญ์ฝ๋", "๋ฒ์ ๋", "๊ฑฐ๋์ผ", "๊ณ์ฝ๋ฉด์ ", "๋ณด์ฆ๊ธ์ก", "์์ธ๊ธ์ก"]
# Feature Engineering
try:
if len(df["๋
"] != 0) & len(df["์"] != 0) & len(df["์ผ"] != 0):
df["๊ฑฐ๋์ผ"] = df["๋
"] + "-" + df["์"] + "-" + df["์ผ"]
df["๊ฑฐ๋์ผ"] = pd.to_datetime(df["๊ฑฐ๋์ผ"])
df["๋ณด์ฆ๊ธ์ก"] = pd.to_numeric(df["๋ณด์ฆ๊ธ์ก"].str.replace(",", ""))
df["์์ธ๊ธ์ก"] = pd.to_numeric(df["์์ธ๊ธ์ก"].str.replace(",", ""))
except:
df = pd.DataFrame(columns=colNames)
print("์กฐํํ ์๋ฃ๊ฐ ์์ต๋๋ค.")
# Arange Columns
df = df[colNames]
df = df.sort_values(["๋ฒ์ ๋", "๊ฑฐ๋์ผ"])
df["๋ฒ์ ๋"] = df["๋ฒ์ ๋"].str.strip()
df.index = range(len(df))
# ์ซ์ํ ๋ณํ
cols = df.columns.drop(["๋ฒ์ ๋", "๊ฑฐ๋์ผ"])
df[cols] = df[cols].apply(pd.to_numeric, errors="coerce")
return df
except:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("header")
# ์ ์ ์์ฒญ์ ์๋ฌ ๋ฐ์ -> Python ์ฝ๋ ์๋ฌ
if te[0].find("resultCode").text == "00":
print(">>> Python Logic Error. e-mail : <EMAIL>")
# Open API ์๋น์ค ์ ๊ณต์ฒ ์ค๋ฅ
else:
print(">>> Open API Error: {}".format(te[0].find["resultMsg"]))
def LandTrade(self, LAWD_CD, DEAL_YMD):
"""
11 ํ ์ง ๋งค๋งค ์ ๊ณ ์กฐํ
์
๋ ฅ: ์ง์ญ์ฝ๋(๋ฒ์ ๋์ฝ๋ 5์๋ฆฌ), ๊ณ์ฝ์(YYYYmm)
"""
# URL
url_1 = self.urlLandTrade + "&LAWD_CD=" + str(LAWD_CD)
url_2 = "&DEAL_YMD=" + str(DEAL_YMD)
url_3 = "&numOfRows=99999"
url = url_1 + url_2 + url_3
try:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("item")
# Creating Pandas Data Frame
df = pd.DataFrame()
variables = [
"๋ฒ์ ๋",
"์ง์ญ์ฝ๋",
"์๊ตฐ๊ตฌ",
"์ฉ๋์ง์ญ",
"์ง๋ชฉ",
"๋
",
"์",
"์ผ",
"์ง๋ถ๊ฑฐ๋๊ตฌ๋ถ",
"๊ฑฐ๋๋ฉด์ ",
"๊ฑฐ๋๊ธ์ก",
]
for t in te:
for variable in variables:
try:
globals()[variable] = t.find(variable).text
except:
globals()[variable] = np.nan
data = pd.DataFrame(
[[๋ฒ์ ๋, ์ง์ญ์ฝ๋, ์๊ตฐ๊ตฌ, ์ฉ๋์ง์ญ, ์ง๋ชฉ, ๋
, ์, ์ผ, ์ง๋ถ๊ฑฐ๋๊ตฌ๋ถ, ๊ฑฐ๋๋ฉด์ , ๊ฑฐ๋๊ธ์ก]],
columns=variables,
)
df = pd.concat([df, data])
# Set Columns
colNames = [
"์ง์ญ์ฝ๋",
"๋ฒ์ ๋",
"๊ฑฐ๋์ผ",
"์๊ตฐ๊ตฌ",
"์ฉ๋์ง์ญ",
"์ง๋ชฉ",
"์ง๋ถ๊ฑฐ๋๊ตฌ๋ถ",
"๊ฑฐ๋๋ฉด์ ",
"๊ฑฐ๋๊ธ์ก",
]
# Feature Engineering
try:
if len(df["๋
"] != 0) & len(df["์"] != 0) & len(df["์ผ"] != 0):
df["๊ฑฐ๋์ผ"] = df["๋
"] + "-" + df["์"] + "-" + df["์ผ"]
df["๊ฑฐ๋์ผ"] = pd.to_datetime(df["๊ฑฐ๋์ผ"])
df["๊ฑฐ๋๊ธ์ก"] = pd.to_numeric(df["๊ฑฐ๋๊ธ์ก"].str.replace(",", ""))
except:
df = pd.DataFrame(columns=colNames)
print("์กฐํํ ์๋ฃ๊ฐ ์์ต๋๋ค.")
# Arange Columns
df = df[colNames]
df = df.sort_values(["๋ฒ์ ๋", "๊ฑฐ๋์ผ"])
df["๋ฒ์ ๋"] = df["๋ฒ์ ๋"].str.strip()
df.index = range(len(df))
# ์ซ์ํ ๋ณํ
cols = df.columns.drop(
["๋ฒ์ ๋", "๊ฑฐ๋์ผ", "์๊ตฐ๊ตฌ", "์ฉ๋์ง์ญ", "์ง๋ชฉ", "์ง๋ถ๊ฑฐ๋๊ตฌ๋ถ"])
df[cols] = df[cols].apply(pd.to_numeric, errors="coerce")
return df
except:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("header")
# ์ ์ ์์ฒญ์ ์๋ฌ ๋ฐ์ -> Python ์ฝ๋ ์๋ฌ
if te[0].find("resultCode").text == "00":
print(">>> Python Logic Error. e-mail : <EMAIL>")
# Open API ์๋น์ค ์ ๊ณต์ฒ ์ค๋ฅ
else:
print(">>> Open API Error: {}".format(te[0].find["resultMsg"]))
def BizTrade(self, LAWD_CD, DEAL_YMD):
"""
12 ์์
์
๋ฌด์ฉ ๋ถ๋์ฐ ๋งค๋งค ์ ๊ณ ์๋ฃ ์กฐํ
์
๋ ฅ: ์ง์ญ์ฝ๋(๋ฒ์ ๋์ฝ๋ 5์๋ฆฌ), ๊ณ์ฝ์(YYYYmm)
"""
# URL
url_1 = self.urlBizTrade + "&LAWD_CD=" + str(LAWD_CD)
url_2 = "&DEAL_YMD=" + str(DEAL_YMD)
url_3 = "&numOfRows=99999"
url = url_1 + url_2 + url_3
try:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("item")
# Creating Pandas Data Frame
df = pd.DataFrame()
variables = [
"๊ฑฐ๋๊ธ์ก",
"๊ฑด๋ฌผ๋ฉด์ ",
"๊ฑด๋ฌผ์ฃผ์ฉ๋",
"๊ฑด์ถ๋
๋",
"๊ตฌ๋ถ",
"๋
",
"์",
"์ผ",
"๋์ง๋ฉด์ ",
"๋ฒ์ ๋",
"์๊ตฐ๊ตฌ",
"์ฉ๋์ง์ญ",
"์ ํ",
"์ง์ญ์ฝ๋",
"์ธต",
]
for t in te:
for variable in variables:
try:
globals()[variable] = t.find(variable).text
except:
globals()[variable] = np.nan
data = pd.DataFrame(
[[
๊ฑฐ๋๊ธ์ก,
๊ฑด๋ฌผ๋ฉด์ ,
๊ฑด๋ฌผ์ฃผ์ฉ๋,
๊ฑด์ถ๋
๋,
๊ตฌ๋ถ,
๋
,
์,
์ผ,
๋์ง๋ฉด์ ,
๋ฒ์ ๋,
์๊ตฐ๊ตฌ,
์ฉ๋์ง์ญ,
์ ํ,
์ง์ญ์ฝ๋,
์ธต,
]],
columns=variables,
)
df = pd.concat([df, data])
# Set Columns
colNames = [
"์ง์ญ์ฝ๋",
"๋ฒ์ ๋",
"๊ฑฐ๋์ผ",
"์๊ตฐ๊ตฌ",
"์ฉ๋์ง์ญ",
"์ ํ",
"๋์ง๋ฉด์ ",
"๊ตฌ๋ถ",
"๊ฑด๋ฌผ๋ฉด์ ",
"๊ฑด๋ฌผ์ฃผ์ฉ๋",
"๊ฑด์ถ๋
๋",
"์ธต",
"๊ฑฐ๋๊ธ์ก",
]
# Feature Engineering
try:
if len(df["๋
"] != 0) & len(df["์"] != 0) & len(df["์ผ"] != 0):
df["๊ฑฐ๋์ผ"] = df["๋
"] + "-" + df["์"] + "-" + df["์ผ"]
df["๊ฑฐ๋์ผ"] = pd.to_datetime(df["๊ฑฐ๋์ผ"])
df["๊ฑฐ๋๊ธ์ก"] = pd.to_numeric(df["๊ฑฐ๋๊ธ์ก"].str.replace(",", ""))
except:
df = pd.DataFrame(columns=colNames)
print("์กฐํํ ์๋ฃ๊ฐ ์์ต๋๋ค.")
# Arange Columns
df = df[colNames]
df = df.sort_values(["๋ฒ์ ๋", "๊ฑฐ๋์ผ"])
df["๋ฒ์ ๋"] = df["๋ฒ์ ๋"].str.strip()
df.index = range(len(df))
# ์ซ์ํ ๋ณํ
cols = df.columns.drop(
["๋ฒ์ ๋", "๊ฑฐ๋์ผ", "์๊ตฐ๊ตฌ", "์ฉ๋์ง์ญ", "์ ํ", "๊ฑด๋ฌผ์ฃผ์ฉ๋"])
df[cols] = df[cols].apply(pd.to_numeric, errors="coerce")
return df
except:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("header")
# ์ ์ ์์ฒญ์ ์๋ฌ ๋ฐ์ -> Python ์ฝ๋ ์๋ฌ
if te[0].find("resultCode").text == "00":
print(">>> Python Logic Error. e-mail : <EMAIL>")
# Open API ์๋น์ค ์ ๊ณต์ฒ ์ค๋ฅ
else:
print(">>> Open API Error: {}".format(te[0].find["resultMsg"]))
class Building:
"""
๊ฑด์ถ๋ฌผ๋์ฅ์ ๋ณด ์๋น์ค
"""
def __init__(self, serviceKey):
"""
๊ณต๊ณต ๋ฐ์ดํฐ ํฌํธ์์ ๋ฐ๊ธ๋ฐ์ Service Key๋ฅผ ์
๋ ฅ๋ฐ์ ์ด๊ธฐํํฉ๋๋ค.
"""
# Open API ์๋น์ค ํค ์ด๊ธฐํ
self.serviceKey = serviceKey
# ServiceKey ์ ํจ์ฑ ๊ฒ์ฌ
self.baseUrl = "http://apis.data.go.kr/1613000/BldRgstService_v2/"
self.url_getBrBasisOulnInfo = (self.baseUrl + "getBrBasisOulnInfo" +
f"?serviceKey={self.serviceKey}")
self.url_getBrRecapTitleInfo = (self.baseUrl + "getBrRecapTitleInfo" +
f"?serviceKey={self.serviceKey}")
self.url_getBrTitleInfo = (self.baseUrl + "getBrTitleInfo" +
f"?serviceKey={self.serviceKey}")
self.url_getBrFlrOulnInfo = (self.baseUrl + "getBrFlrOulnInfo" +
f"?serviceKey={self.serviceKey}")
self.url_getBrAtchJibunInfo = (self.baseUrl + "getBrAtchJibunInfo" +
f"?serviceKey={self.serviceKey}")
self.url_getBrExposPubuseAreaInfo = (self.baseUrl +
"getBrExposPubuseAreaInfo" +
f"?serviceKey={self.serviceKey}")
self.url_getBrWclfInfo = (self.baseUrl + "getBrWclfInfo" +
f"?serviceKey={self.serviceKey}")
self.url_getBrHsprcInfo = (self.baseUrl + "getBrHsprcInfo" +
f"?serviceKey={self.serviceKey}")
self.url_getBrExposInfo = (self.baseUrl + "getBrExposInfo" +
f"?serviceKey={self.serviceKey}")
self.url_getBrJijiguInfo = (self.baseUrl + "getBrJijiguInfo" +
f"?serviceKey={self.serviceKey}")
# Open API URL Dict
urlDict = {
"๊ฑด์ถ๋ฌผ๋์ฅ ๊ธฐ๋ณธ๊ฐ์ ์กฐํ": self.url_getBrBasisOulnInfo,
"๊ฑด์ถ๋ฌผ๋์ฅ ์ด๊ดํ์ ๋ถ ์กฐํ": self.url_getBrRecapTitleInfo,
"๊ฑด์ถ๋ฌผ๋์ฅ ํ์ ๋ถ ์กฐํ": self.url_getBrTitleInfo,
"๊ฑด์ถ๋ฌผ๋์ฅ ์ธต๋ณ๊ฐ์ ์กฐํ": self.url_getBrFlrOulnInfo,
"๊ฑด์ถ๋ฌผ๋์ฅ ๋ถ์์ง๋ฒ ์กฐํ": self.url_getBrAtchJibunInfo,
"๊ฑด์ถ๋ฌผ๋์ฅ ์ ์ ๊ณต์ฉ๋ฉด์ ์กฐํ": self.url_getBrExposPubuseAreaInfo,
"๊ฑด์ถ๋ฌผ๋์ฅ ์ค์์ ํ์์ค ์กฐํ": self.url_getBrWclfInfo,
"๊ฑด์ถ๋ฌผ๋์ฅ ์ฃผํ๊ฐ๊ฒฉ ์กฐํ": self.url_getBrHsprcInfo,
"๊ฑด์ถ๋ฌผ๋์ฅ ์ ์ ๋ถ ์กฐํ": self.url_getBrExposInfo,
"๊ฑด์ถ๋ฌผ๋์ฅ ์ง์ญ์ง๊ตฌ๊ตฌ์ญ ์กฐํ": self.url_getBrJijiguInfo,
}
# ์๋น์ค ์ ์ ์๋ ์ฌ๋ถ ํ์ธ
for serviceName, url in urlDict.items():
result = requests.get(url, verify=False)
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
te = xmlsoup.findAll("header")
if te[0].find("resultCode").text == "00":
print(f">>> {serviceName} ์๋น์ค๊ฐ ์ ์ ์๋ํฉ๋๋ค.")
else:
print(f">>> {serviceName} ์๋น์คํค ๋ฏธ๋ฑ๋ก ์ค๋ฅ์
๋๋ค.")
# ์ง์ญ ์ฝ๋ ์ด๊ธฐํ
# ๋ฒ์ ๋ ์ฝ๋ ์ถ์ฒ : https://code.go.kr
path_code = "https://raw.githubusercontent.com/WooilJeong/PublicDataReader/f14e4de3410cc0f798a83ee5934070d651cbd67b/docs/%EB%B2%95%EC%A0%95%EB%8F%99%EC%BD%94%EB%93%9C%20%EC%A0%84%EC%B2%B4%EC%9E%90%EB%A3%8C.txt"
code = pd.read_csv(path_code, encoding="cp949", sep="\t")
code = code.loc[code["ํ์ง์ฌ๋ถ"] == "์กด์ฌ"]
code["๋ฒ์ ๊ตฌ์ฝ๋"] = list(map(lambda a: str(a)[:5], list(code["๋ฒ์ ๋์ฝ๋"])))
self.code = code
def CodeFinder(self, name):
"""
๊ตญํ ๊ตํต๋ถ ์ค๊ฑฐ๋๊ฐ ์ ๋ณด ์คํAPI๋ ๋ฒ์ ๋์ฝ๋ 10์๋ฆฌ ์ค ์ 5์๋ฆฌ์ธ ๊ตฌ๋ฅผ ๋ํ๋ด๋ ์ง์ญ์ฝ๋๋ฅผ ์ฌ์ฉํฉ๋๋ค.
API์ ์ฌ์ฉํ ๊ตฌ ๋ณ ์ฝ๋๋ฅผ ์กฐํํ๋ ๋ฉ์๋์ด๋ฉฐ, ๋ฌธ์์ด ์ง์ญ ๋ช
์ ์
๋ ฅ๋ฐ๊ณ , ์กฐํ ๊ฒฐ๊ณผ๋ฅผ Pandas DataFrameํ์์ผ๋ก ์ถ๋ ฅํฉ๋๋ค.
"""
result = self.code[self.code["๋ฒ์ ๋๋ช
"].str.contains(name)][[
"๋ฒ์ ๋๋ช
", "๋ฒ์ ๊ตฌ์ฝ๋"
]]
result.index = range(len(result))
return result
def ChangeCols(self, df, operationName):
"""
์๋ฌธ ์ปฌ๋ผ๋ช
์ ๊ตญ๋ฌธ ์ปฌ๋ผ๋ช
์ผ๋ก ๋ณ๊ฒฝ
"""
if operationName == "getBrBasisOulnInfo":
self.colDict = {
"bjdongCd": "๋ฒ์ ๋์ฝ๋",
"bldNm": "๊ฑด๋ฌผ๋ช
",
"block": "๋ธ๋ก",
"bun": "๋ฒ",
"bylotCnt": "์ธํ์ง์",
"crtnDay": "์์ฑ์ผ์",
"guyukCd": "๊ตฌ์ญ์ฝ๋",
"guyukCdNm": "๊ตฌ์ญ์ฝ๋๋ช
",
"ji": "์ง",
"jiguCd": "์ง๊ตฌ์ฝ๋",
"jiguCdNm": "์ง๊ตฌ์ฝ๋๋ช
",
"jiyukCd": "์ง์ญ์ฝ๋",
"jiyukCdNm": "์ง์ญ์ฝ๋๋ช
",
"lot": "๋กํธ",
"mgmBldrgstPk": "๊ด๋ฆฌ๊ฑด์ถ๋ฌผ๋์ฅPK",
"mgmUpBldrgstPk": "๊ด๋ฆฌ์์๊ฑด์ถ๋ฌผ๋์ฅPK",
"naBjdongCd": "์์ฃผ์๋ฒ์ ๋์ฝ๋",
"naMainBun": "์์ฃผ์๋ณธ๋ฒ",
"naRoadCd": "์์ฃผ์๋๋ก์ฝ๋",
"naSubBun": "์์ฃผ์๋ถ๋ฒ",
"naUgrndCd": "์์ฃผ์์ง์์งํ์ฝ๋",
"newPlatPlc": "๋๋ก๋ช
๋์ง์์น",
"platGbCd": "๋์ง๊ตฌ๋ถ์ฝ๋",
"platPlc": "๋์ง์์น",
"regstrGbCd": "๋์ฅ๊ตฌ๋ถ์ฝ๋",
"regstrGbCdNm": "๋์ฅ๊ตฌ๋ถ์ฝ๋๋ช
",
"regstrKindCd": "๋์ฅ์ข
๋ฅ์ฝ๋",
"regstrKindCdNm": "๋์ฅ์ข
๋ฅ์ฝ๋๋ช
",
"rnum": "์๋ฒ",
"sigunguCd": "์๊ตฐ๊ตฌ์ฝ๋",
"splotNm": "ํน์์ง๋ช
",
}
elif operationName == "getBrRecapTitleInfo":
self.colDict = {
"archArea": "๊ฑด์ถ๋ฉด์ ",
"atchBldArea": "๋ถ์๊ฑด์ถ๋ฌผ๋ฉด์ ",
"atchBldCnt": "๋ถ์๊ฑด์ถ๋ฌผ์",
"bcRat": "๊ฑดํ์จ",
"bjdongCd": "๋ฒ์ ๋์ฝ๋",
"bldNm": "๊ฑด๋ฌผ๋ช
",
"block": "๋ธ๋ก",
"bun": "๋ฒ",
"bylotCnt": "์ธํ์ง์",
"crtnDay": "์์ฑ์ผ์",
"engrEpi": "EPI์ ์",
"engrGrade": "์๋์งํจ์จ๋ฑ๊ธ",
"engrRat": "์๋์ง์ ๊ฐ์จ",
"etcPurps": "๊ธฐํ์ฉ๋",
"fmlyCnt": "๊ฐ๊ตฌ์",
"gnBldCert": "์นํ๊ฒฝ๊ฑด์ถ๋ฌผ์ธ์ฆ์ ์",
"gnBldGrade": "์นํ๊ฒฝ๊ฑด์ถ๋ฌผ๋ฑ๊ธ",
"hhldCnt": "์ธ๋์",
"hoCnt": "ํธ์",
"indrAutoArea": "์ฅ๋ด์์ฃผ์๋ฉด์ ",
"indrAutoUtcnt": "์ฅ๋ด์์ฃผ์๋์",
"indrMechArea": "์ฅ๋ด๊ธฐ๊ณ์๋ฉด์ ",
"indrMechUtcnt": "์ฅ๋ด๊ธฐ๊ณ์๋์",
"itgBldCert": "์ง๋ฅํ๊ฑด์ถ๋ฌผ์ธ์ฆ์ ์",
"itgBldGrade": "์ง๋ฅํ๊ฑด์ถ๋ฌผ๋ฑ๊ธ",
"ji": "์ง",
"lot": "๋กํธ",
"mainBldCnt": "์ฃผ๊ฑด์ถ๋ฌผ์",
"mainPurpsCd": "์ฃผ์ฉ๋์ฝ๋",
"mainPurpsCdNm": "์ฃผ์ฉ๋์ฝ๋๋ช
",
"mgmBldrgstPk": "๊ด๋ฆฌ๊ฑด์ถ๋ฌผ๋์ฅPK",
"naBjdongCd": "์์ฃผ์๋ฒ์ ๋์ฝ๋",
"naMainBun": "์์ฃผ์๋ณธ๋ฒ",
"naRoadCd": "์์ฃผ์๋๋ก์ฝ๋",
"naSubBun": "์์ฃผ์๋ถ๋ฒ",
"naUgrndCd": "์์ฃผ์์ง์์งํ์ฝ๋",
"newOldRegstrGbCd": "์ ๊ตฌ๋์ฅ๊ตฌ๋ถ์ฝ๋",
"newOldRegstrGbCdNm": "์ ๊ตฌ๋์ฅ๊ตฌ๋ถ์ฝ๋๋ช
",
"newPlatPlc": "๋๋ก๋ช
๋์ง์์น",
"oudrAutoArea": "์ฅ์ธ์์ฃผ์๋ฉด์ ",
"oudrAutoUtcnt": "์ฅ์ธ์์ฃผ์๋์",
"oudrMechArea": "์ฅ์ธ๊ธฐ๊ณ์๋ฉด์ ",
"oudrMechUtcnt": "์ฅ์ธ๊ธฐ๊ณ์๋์",
"platArea": "๋์ง๋ฉด์ ",
"platGbCd": "๋์ง๊ตฌ๋ถ์ฝ๋",
"platPlc": "๋์ง์์น",
"pmsDay": "ํ๊ฐ์ผ",
"pmsnoGbCd": "ํ๊ฐ๋ฒํธ๊ตฌ๋ถ์ฝ๋",
"pmsnoGbCdNm": "ํ๊ฐ๋ฒํธ๊ตฌ๋ถ์ฝ๋๋ช
",
"pmsnoKikCd": "ํ๊ฐ๋ฒํธ๊ธฐ๊ด์ฝ๋",
"pmsnoKikCdNm": "ํ๊ฐ๋ฒํธ๊ธฐ๊ด์ฝ๋๋ช
",
"pmsnoYear": "ํ๊ฐ๋ฒํธ๋
",
"regstrGbCd": "๋์ฅ๊ตฌ๋ถ์ฝ๋",
"regstrGbCdNm": "๋์ฅ๊ตฌ๋ถ์ฝ๋๋ช
",
"regstrKindCd": "๋์ฅ์ข
๋ฅ์ฝ๋",
"regstrKindCdNm": "๋์ฅ์ข
๋ฅ์ฝ๋๋ช
",
"rnum": "์๋ฒ",
"sigunguCd": "์๊ตฐ๊ตฌ์ฝ๋",
"splotNm": "ํน์์ง๋ช
",
"stcnsDay": "์ฐฉ๊ณต์ผ",
"totArea": "์ฐ๋ฉด์ ",
"totPkngCnt": "์ด์ฃผ์ฐจ์",
"useAprDay": "์ฌ์ฉ์น์ธ์ผ",
"vlRat": "์ฉ์ ๋ฅ ",
"vlRatEstmTotArea": "์ฉ์ ๋ฅ ์ฐ์ ์ฐ๋ฉด์ ",
}
elif operationName == "getBrTitleInfo":
self.colDict = {
"archArea": "๊ฑด์ถ๋ฉด์ ",
"atchBldArea": "๋ถ์๊ฑด์ถ๋ฌผ๋ฉด์ ",
"atchBldCnt": "๋ถ์๊ฑด์ถ๋ฌผ์",
"bcRat": "๊ฑดํ์จ",
"bjdongCd": "๋ฒ์ ๋์ฝ๋",
"bldNm": "๊ฑด๋ฌผ๋ช
",
"block": "๋ธ๋ก",
"bun": "๋ฒ",
"bylotCnt": "์ธํ์ง์",
"crtnDay": "์์ฑ์ผ์",
"dongNm": "๋๋ช
์นญ",
"emgenUseElvtCnt": "๋น์์ฉ์น๊ฐ๊ธฐ์",
"engrEpi": "EPI์ ์",
"engrGrade": "์๋์งํจ์จ๋ฑ๊ธ",
"engrRat": "์๋์ง์ ๊ฐ์จ",
"etcPurps": "๊ธฐํ์ฉ๋",
"etcRoof": "๊ธฐํ์ง๋ถ",
"etcStrct": "๊ธฐํ๊ตฌ์กฐ",
"fmlyCnt": "๊ฐ๊ตฌ์",
"gnBldCert": "์นํ๊ฒฝ๊ฑด์ถ๋ฌผ์ธ์ฆ์ ์",
"gnBldGrade": "์นํ๊ฒฝ๊ฑด์ถ๋ฌผ๋ฑ๊ธ",
"grndFlrCnt": "์ง์์ธต์",
"heit": "๋์ด",
"hhldCnt": "์ธ๋์",
"hoCnt": "ํธ์",
"indrAutoArea": "์ฅ๋ด์์ฃผ์๋ฉด์ ",
"indrAutoUtcnt": "์ฅ๋ด์์ฃผ์๋์",
"indrMechArea": "์ฅ๋ด๊ธฐ๊ณ์๋ฉด์ ",
"indrMechUtcnt": "์ฅ๋ด๊ธฐ๊ณ์๋์",
"itgBldCert": "์ง๋ฅํ๊ฑด์ถ๋ฌผ์ธ์ฆ์ ์",
"itgBldGrade": "์ง๋ฅํ๊ฑด์ถ๋ฌผ๋ฑ๊ธ",
"ji": "์ง",
"lot": "๋กํธ",
"mainAtchGbCd": "์ฃผ๋ถ์๊ตฌ๋ถ์ฝ๋",
"mainAtchGbCdNm": "์ฃผ๋ถ์๊ตฌ๋ถ์ฝ๋๋ช
",
"mainPurpsCd": "์ฃผ์ฉ๋์ฝ๋",
"mainPurpsCdNm": "์ฃผ์ฉ๋์ฝ๋๋ช
",
"mgmBldrgstPk": "๊ด๋ฆฌ๊ฑด์ถ๋ฌผ๋์ฅPK",
"naBjdongCd": "์์ฃผ์๋ฒ์ ๋์ฝ๋",
"naMainBun": "์์ฃผ์๋ณธ๋ฒ",
"naRoadCd": "์์ฃผ์๋๋ก์ฝ๋",
"naSubBun": "์์ฃผ์๋ถ๋ฒ",
"naUgrndCd": "์์ฃผ์์ง์์งํ์ฝ๋",
"newPlatPlc": "๋๋ก๋ช
๋์ง์์น",
"oudrAutoArea": "์ฅ์ธ์์ฃผ์๋ฉด์ ",
"oudrAutoUtcnt": "์ฅ์ธ์์ฃผ์๋์",
"oudrMechArea": "์ฅ์ธ๊ธฐ๊ณ์๋ฉด์ ",
"oudrMechUtcnt": "์ฅ์ธ๊ธฐ๊ณ์๋์",
"platArea": "๋์ง๋ฉด์ ",
"platGbCd": "๋์ง๊ตฌ๋ถ์ฝ๋",
"platPlc": "๋์ง์์น",
"pmsDay": "ํ๊ฐ์ผ",
"pmsnoGbCd": "ํ๊ฐ๋ฒํธ๊ตฌ๋ถ์ฝ๋",
"pmsnoGbCdNm": "ํ๊ฐ๋ฒํธ๊ตฌ๋ถ์ฝ๋๋ช
",
"pmsnoKikCd": "ํ๊ฐ๋ฒํธ๊ธฐ๊ด์ฝ๋",
"pmsnoKikCdNm": "ํ๊ฐ๋ฒํธ๊ธฐ๊ด์ฝ๋๋ช
",
"pmsnoYear": "ํ๊ฐ๋ฒํธ๋
",
"regstrGbCd": "๋์ฅ๊ตฌ๋ถ์ฝ๋",
"regstrGbCdNm": "๋์ฅ๊ตฌ๋ถ์ฝ๋๋ช
",
"regstrKindCd": "๋์ฅ์ข
๋ฅ์ฝ๋",
"regstrKindCdNm": "๋์ฅ์ข
๋ฅ์ฝ๋๋ช
",
"rideUseElvtCnt": "์น์ฉ์น๊ฐ๊ธฐ์",
"rnum": "์๋ฒ",
"roofCd": "์ง๋ถ์ฝ๋",
"roofCdNm": "์ง๋ถ์ฝ๋๋ช
",
"rserthqkAblty": "๋ด์ง ๋ฅ๋ ฅ",
"rserthqkDsgnApplyYn": "๋ด์ง ์ค๊ณ ์ ์ฉ ์ฌ๋ถ",
"sigunguCd": "์๊ตฐ๊ตฌ์ฝ๋",
"splotNm": "ํน์์ง๋ช
",
"stcnsDay": "์ฐฉ๊ณต์ผ",
"strctCd": "๊ตฌ์กฐ์ฝ๋",
"strctCdNm": "๊ตฌ์กฐ์ฝ๋๋ช
",
"totArea": "์ฐ๋ฉด์ ",
"totDongTotArea": "์ด๋์ฐ๋ฉด์ ",
"ugrndFlrCnt": "์งํ์ธต์",
"useAprDay": "์ฌ์ฉ์น์ธ์ผ",
"vlRat": "์ฉ์ ๋ฅ ",
"vlRatEstmTotArea": "์ฉ์ ๋ฅ ์ฐ์ ์ฐ๋ฉด์ ",
}
elif operationName == "getBrFlrOulnInfo":
self.colDict = colDict = {
"area": "๋ฉด์ ",
"areaExctYn": "๋ฉด์ ์ ์ธ์ฌ๋ถ",
"bjdongCd": "๋ฒ์ ๋์ฝ๋",
"bldNm": "๊ฑด๋ฌผ๋ช
",
"block": "๋ธ๋ก",
"bun": "๋ฒ",
"crtnDay": "์์ฑ์ผ์",
"dongNm": "๋๋ช
์นญ",
"etcPurps": "๊ธฐํ์ฉ๋",
"etcStrct": "๊ธฐํ๊ตฌ์กฐ",
"flrGbCd": "์ธต๊ตฌ๋ถ์ฝ๋",
"flrGbCdNm": "์ธต๊ตฌ๋ถ์ฝ๋๋ช
",
"flrNo": "์ธต๋ฒํธ",
"flrNoNm": "์ธต๋ฒํธ๋ช
",
"ji": "์ง",
"lot": "๋กํธ",
"mainAtchGbCd": "์ฃผ๋ถ์๊ตฌ๋ถ์ฝ๋",
"mainAtchGbCdNm": "์ฃผ๋ถ์๊ตฌ๋ถ์ฝ๋๋ช
",
"mainPurpsCd": "์ฃผ์ฉ๋์ฝ๋",
"mainPurpsCdNm": "์ฃผ์ฉ๋์ฝ๋๋ช
",
"mgmBldrgstPk": "๊ด๋ฆฌ๊ฑด์ถ๋ฌผ๋์ฅPK",
"naBjdongCd": "์์ฃผ์๋ฒ์ ๋์ฝ๋",
"naMainBun": "์์ฃผ์๋ณธ๋ฒ",
"naRoadCd": "์์ฃผ์๋๋ก์ฝ๋",
"naSubBun": "์์ฃผ์๋ถ๋ฒ",
"naUgrndCd": "์์ฃผ์์ง์์งํ์ฝ๋",
"newPlatPlc": "๋๋ก๋ช
๋์ง์์น",
"platGbCd": "๋์ง๊ตฌ๋ถ์ฝ๋",
"platPlc": "๋์ง์์น",
"rnum": "์๋ฒ",
"sigunguCd": "์๊ตฐ๊ตฌ์ฝ๋",
"splotNm": "ํน์์ง๋ช
",
"strctCd": "๊ตฌ์กฐ์ฝ๋",
"strctCdNm": "๊ตฌ์กฐ์ฝ๋๋ช
",
}
elif operationName == "getBrAtchJibunInfo":
self.colDict = colDict = {
"atchBjdongCd": "๋ถ์๋ฒ์ ๋์ฝ๋",
"atchBlock": "๋ถ์๋ธ๋ก",
"atchBun": "๋ถ์๋ฒ",
"atchEtcJibunNm": "๋ถ์๊ธฐํ์ง๋ฒ๋ช
",
"atchJi": "๋ถ์์ง",
"atchLot": "๋ถ์๋กํธ",
"atchPlatGbCd": "๋ถ์๋์ง๊ตฌ๋ถ์ฝ๋",
"atchRegstrGbCd": "๋ถ์๋์ฅ๊ตฌ๋ถ์ฝ๋",
"atchRegstrGbCdNm": "๋ถ์๋์ฅ๊ตฌ๋ถ์ฝ๋๋ช
",
"atchSigunguCd": "๋ถ์์๊ตฐ๊ตฌ์ฝ๋",
"atchSplotNm": "๋ถ์ํน์์ง๋ช
",
"bjdongCd": "๋ฒ์ ๋์ฝ๋",
"bldNm": "๊ฑด๋ฌผ๋ช
",
"block": "๋ธ๋ก",
"bun": "๋ฒ",
"crtnDay": "์์ฑ์ผ์",
"ji": "์ง",
"lot": "๋กํธ",
"mgmBldrgstPk": "๊ด๋ฆฌ๊ฑด์ถ๋ฌผ๋์ฅPK",
"naBjdongCd": "์์ฃผ์๋ฒ์ ๋์ฝ๋",
"naMainBun": "์์ฃผ์๋ณธ๋ฒ",
"naRoadCd": "์์ฃผ์๋๋ก์ฝ๋",
"naSubBun": "์์ฃผ์๋ถ๋ฒ",
"naUgrndCd": "์์ฃผ์์ง์์งํ์ฝ๋",
"newPlatPlc": "๋๋ก๋ช
๋์ง์์น",
"platGbCd": "๋์ง๊ตฌ๋ถ์ฝ๋",
"platPlc": "๋์ง์์น",
"regstrGbCd": "๋์ฅ๊ตฌ๋ถ์ฝ๋",
"regstrGbCdNm": "๋์ฅ๊ตฌ๋ถ์ฝ๋๋ช
",
"regstrKindCd": "๋์ฅ์ข
๋ฅ์ฝ๋",
"regstrKindCdNm": "๋์ฅ์ข
๋ฅ์ฝ๋๋ช
",
"rnum": "์๋ฒ",
"sigunguCd": "์๊ตฐ๊ตฌ์ฝ๋",
"splotNm": "ํน์์ง๋ช
",
}
elif operationName == "getBrExposPubuseAreaInfo":
self.colDict = colDict = {
"area": "๋ฉด์ ",
"bjdongCd": "๋ฒ์ ๋์ฝ๋",
"bldNm": "๊ฑด๋ฌผ๋ช
",
"block": "๋ธ๋ก",
"bun": "๋ฒ",
"crtnDay": "์์ฑ์ผ์",
"dongNm": "๋๋ช
์นญ",
"etcPurps": "๊ธฐํ์ฉ๋",
"etcStrct": "๊ธฐํ๊ตฌ์กฐ",
"exposPubuseGbCd": "์ ์ ๊ณต์ฉ๊ตฌ๋ถ์ฝ๋",
"exposPubuseGbCdNm": "์ ์ ๊ณต์ฉ๊ตฌ๋ถ์ฝ๋๋ช
",
"flrGbCd": "์ธต๊ตฌ๋ถ์ฝ๋",
"flrGbCdNm": "์ธต๊ตฌ๋ถ์ฝ๋๋ช
",
"flrNo": "์ธต๋ฒํธ",
"flrNoNm": "์ธต๋ฒํธ๋ช
",
"hoNm": "ํธ๋ช
์นญ",
"ji": "์ง",
"lot": "๋กํธ",
"mainAtchGbCd": "์ฃผ๋ถ์๊ตฌ๋ถ์ฝ๋",
"mainAtchGbCdNm": "์ฃผ๋ถ์๊ตฌ๋ถ์ฝ๋๋ช
",
"mainPurpsCd": "์ฃผ์ฉ๋์ฝ๋",
"mainPurpsCdNm": "์ฃผ์ฉ๋์ฝ๋๋ช
",
"mgmBldrgstPk": "๊ด๋ฆฌ๊ฑด์ถ๋ฌผ๋์ฅPK",
"naBjdongCd": "์์ฃผ์๋ฒ์ ๋์ฝ๋",
"naMainBun": "์์ฃผ์๋ณธ๋ฒ",
"naRoadCd": "์์ฃผ์๋๋ก์ฝ๋",
"naSubBun": "์์ฃผ์๋ถ๋ฒ",
"naUgrndCd": "์์ฃผ์์ง์์งํ์ฝ๋",
"newPlatPlc": "๋๋ก๋ช
๋์ง์์น",
"platGbCd": "๋์ง๊ตฌ๋ถ์ฝ๋",
"platPlc": "๋์ง์์น",
"regstrGbCd": "๋์ฅ๊ตฌ๋ถ์ฝ๋",
"regstrGbCdNm": "๋์ฅ๊ตฌ๋ถ์ฝ๋๋ช
",
"regstrKindCd": "๋์ฅ์ข
๋ฅ์ฝ๋",
"regstrKindCdNm": "๋์ฅ์ข
๋ฅ์ฝ๋๋ช
",
"rnum": "์๋ฒ",
"sigunguCd": "์๊ตฐ๊ตฌ์ฝ๋",
"splotNm": "ํน์์ง๋ช
",
"strctCd": "๊ตฌ์กฐ์ฝ๋",
"strctCdNm": "๊ตฌ์กฐ์ฝ๋๋ช
",
}
elif operationName == "getBrWclfInfo":
self.colDict = colDict = {
"bjdongCd": "๋ฒ์ ๋์ฝ๋",
"bldNm": "๊ฑด๋ฌผ๋ช
",
"block": "๋ธ๋ก",
"bun": "๋ฒ",
"capaLube": "์ฉ๋(๋ฃจ๋ฒ )",
"capaPsper": "์ฉ๋(์ธ์ฉ)",
"crtnDay": "์์ฑ์ผ์",
"etcMode": "๊ธฐํํ์",
"ji": "์ง",
"lot": "๋กํธ",
"mgmBldrgstPk": "๊ด๋ฆฌ๊ฑด์ถ๋ฌผ๋์ฅPK",
"modeCd": "ํ์์ฝ๋",
"modeCdNm": "ํ์์ฝ๋๋ช
",
"naBjdongCd": "์์ฃผ์๋ฒ์ ๋์ฝ๋",
"naMainBun": "์์ฃผ์๋ณธ๋ฒ",
"naRoadCd": "์์ฃผ์๋๋ก์ฝ๋",
"naSubBun": "์์ฃผ์๋ถ๋ฒ",
"naUgrndCd": "์์ฃผ์์ง์์งํ์ฝ๋",
"newPlatPlc": "๋๋ก๋ช
๋์ง์์น",
"platGbCd": "๋์ง๊ตฌ๋ถ์ฝ๋",
"platPlc": "๋์ง์์น",
"regstrGbCd": "๋์ฅ๊ตฌ๋ถ์ฝ๋",
"regstrGbCdNm": "๋์ฅ๊ตฌ๋ถ์ฝ๋๋ช
",
"regstrKindCd": "๋์ฅ์ข
๋ฅ์ฝ๋",
"regstrKindCdNm": "๋์ฅ์ข
๋ฅ์ฝ๋๋ช
",
"rnum": "์๋ฒ",
"sigunguCd": "์๊ตฐ๊ตฌ์ฝ๋",
"splotNm": "ํน์์ง๋ช
",
"unitGbCd": "๋จ์๊ตฌ๋ถ์ฝ๋",
"unitGbCdNm": "๋จ์๊ตฌ๋ถ์ฝ๋๋ช
",
}
elif operationName == "getBrHsprcInfo":
self.colDict = colDict = {
"bjdongCd": "๋ฒ์ ๋์ฝ๋",
"bldNm": "๊ฑด๋ฌผ๋ช
",
"block": "๋ธ๋ก",
"bun": "๋ฒ",
"bylotCnt": "์ธํ์ง์",
"crtnDay": "์์ฑ์ผ์",
"hsprc": "์ฃผํ๊ฐ๊ฒฉ",
"ji": "์ง",
"lot": "๋กํธ",
"mgmBldrgstPk": "๊ด๋ฆฌ๊ฑด์ถ๋ฌผ๋์ฅPK",
"naBjdongCd": "์์ฃผ์๋ฒ์ ๋์ฝ๋",
"naMainBun": "์์ฃผ์๋ณธ๋ฒ",
"naRoadCd": "์์ฃผ์๋๋ก์ฝ๋",
"naSubBun": "์์ฃผ์๋ถ๋ฒ",
"naUgrndCd": "์์ฃผ์์ง์์งํ์ฝ๋",
"newPlatPlc": "๋๋ก๋ช
๋์ง์์น",
"platGbCd": "๋์ง๊ตฌ๋ถ์ฝ๋",
"platPlc": "๋์ง์์น",
"regstrGbCd": "๋์ฅ๊ตฌ๋ถ์ฝ๋",
"regstrGbCdNm": "๋์ฅ๊ตฌ๋ถ์ฝ๋๋ช
",
"regstrKindCd": "๋์ฅ์ข
๋ฅ์ฝ๋",
"regstrKindCdNm": "๋์ฅ์ข
๋ฅ์ฝ๋๋ช
",
"rnum": "์๋ฒ",
"sigunguCd": "์๊ตฐ๊ตฌ์ฝ๋",
"splotNm": "ํน์์ง๋ช
",
}
elif operationName == "getBrExposInfo":
self.colDict = colDict = {
"bjdongCd": "๋ฒ์ ๋์ฝ๋",
"bldNm": "๊ฑด๋ฌผ๋ช
",
"block": "๋ธ๋ก",
"bun": "๋ฒ",
"crtnDay": "์์ฑ์ผ์",
"dongNm": "๋๋ช
์นญ",
"flrGbCd": "์ธต๊ตฌ๋ถ์ฝ๋",
"flrGbCdNm": "์ธต๊ตฌ๋ถ์ฝ๋๋ช
",
"flrNo": "์ธต๋ฒํธ",
"hoNm": "ํธ๋ช
์นญ",
"ji": "์ง",
"lot": "๋กํธ",
"mgmBldrgstPk": "๊ด๋ฆฌ๊ฑด์ถ๋ฌผ๋์ฅPK",
"naBjdongCd": "์์ฃผ์๋ฒ์ ๋์ฝ๋",
"naMainBun": "์์ฃผ์๋ณธ๋ฒ",
"naRoadCd": "์์ฃผ์๋๋ก์ฝ๋",
"naSubBun": "์์ฃผ์๋ถ๋ฒ",
"naUgrndCd": "์์ฃผ์์ง์์งํ์ฝ๋",
"newPlatPlc": "๋๋ก๋ช
๋์ง์์น",
"platGbCd": "๋์ง๊ตฌ๋ถ์ฝ๋",
"platPlc": "๋์ง์์น",
"regstrGbCd": "๋์ฅ๊ตฌ๋ถ์ฝ๋",
"regstrGbCdNm": "๋์ฅ๊ตฌ๋ถ์ฝ๋๋ช
",
"regstrKindCd": "๋์ฅ์ข
๋ฅ์ฝ๋",
"regstrKindCdNm": "๋์ฅ์ข
๋ฅ์ฝ๋๋ช
",
"rnum": "์๋ฒ",
"sigunguCd": "์๊ตฐ๊ตฌ์ฝ๋",
"splotNm": "ํน์์ง๋ช
",
}
elif operationName == "getBrJijiguInfo":
self.colDict = colDict = {
"bjdongCd": "๋ฒ์ ๋์ฝ๋",
"block": "๋ธ๋ก",
"bun": "๋ฒ",
"crtnDay": "์์ฑ์ผ์",
"etcJijigu": "๊ธฐํ์ง์ญ์ง๊ตฌ๊ตฌ์ญ",
"ji": "์ง",
"jijiguCd": "์ง์ญ์ง๊ตฌ๊ตฌ์ญ์ฝ๋",
"jijiguCdNm": "์ง์ญ์ง๊ตฌ๊ตฌ์ญ์ฝ๋๋ช
",
"jijiguGbCd": "์ง์ญ์ง๊ตฌ๊ตฌ์ญ๊ตฌ๋ถ์ฝ๋",
"jijiguGbCdNm": "์ง์ญ์ง๊ตฌ๊ตฌ์ญ๊ตฌ๋ถ์ฝ๋๋ช
",
"lot": "๋กํธ",
"mgmBldrgstPk": "๊ด๋ฆฌ๊ฑด์ถ๋ฌผ๋์ฅPK",
"newPlatPlc": "๋๋ก๋ช
๋์ง์์น",
"platGbCd": "๋์ง๊ตฌ๋ถ์ฝ๋",
"platPlc": "๋์ง์์น",
"reprYn": "๋ํ์ฌ๋ถ",
"rnum": "์๋ฒ",
"sigunguCd": "์๊ตฐ๊ตฌ์ฝ๋",
"splotNm": "ํน์์ง๋ช
",
}
df = df.rename(columns=self.colDict)
return df
def getBrBasisOulnInfo(
self,
sigunguCd_,
bjdongCd_,
platGbCd_="",
bun_="",
ji_="",
startDate_="",
endDate_="",
):
"""
01 ๊ฑด์ถ๋ฌผ๋์ฅ ๊ธฐ๋ณธ๊ฐ์ ์กฐํ
์
๋ ฅ: ์๊ตฐ๊ตฌ์ฝ๋, ๋ฒ์ ๋์ฝ๋, ๋์ง๊ตฌ๋ถ์ฝ๋, ๋ฒ, ์ง
"""
# URL
url = f"{self.url_getBrBasisOulnInfo}&sigunguCd={sigunguCd_}&bjdongCd={bjdongCd_}&platGbCd={platGbCd_}&bun={bun_}&ji={ji_}&startDate={startDate_}&endDate={endDate_}&numOfRows=99999"
try:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("item")
# Creating Pandas Data Frame
df = pd.DataFrame()
variables = [
"bjdongCd",
"bldNm",
"block",
"bun",
"bylotCnt",
"crtnDay",
"guyukCd",
"guyukCdNm",
"ji",
"jiguCd",
"jiguCdNm",
"jiyukCd",
"jiyukCdNm",
"lot",
"mgmBldrgstPk",
"mgmUpBldrgstPk",
"naBjdongCd",
"naMainBun",
"naRoadCd",
"naSubBun",
"naUgrndCd",
"newPlatPlc",
"platGbCd",
"platPlc",
"regstrGbCd",
"regstrGbCdNm",
"regstrKindCd",
"regstrKindCdNm",
"rnum",
"sigunguCd",
"splotNm",
]
for t in te:
for variable in variables:
try:
globals()[variable] = t.find(variable).text
except:
globals()[variable] = np.nan
data = pd.DataFrame(
[[
bjdongCd,
bldNm,
block,
bun,
bylotCnt,
crtnDay,
guyukCd,
guyukCdNm,
ji,
jiguCd,
jiguCdNm,
jiyukCd,
jiyukCdNm,
lot,
mgmBldrgstPk,
mgmUpBldrgstPk,
naBjdongCd,
naMainBun,
naRoadCd,
naSubBun,
naUgrndCd,
newPlatPlc,
platGbCd,
platPlc,
regstrGbCd,
regstrGbCdNm,
regstrKindCd,
regstrKindCdNm,
rnum,
sigunguCd,
splotNm,
]],
columns=variables,
)
df = pd.concat([df, data])
df.index = range(len(df))
return df
except:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("header")
# ์ ์ ์์ฒญ์ ์๋ฌ ๋ฐ์ -> Python ์ฝ๋ ์๋ฌ
if te[0].find("resultCode").text == "00":
print(">>> Python Logic Error. e-mail : <EMAIL>")
# Open API ์๋น์ค ์ ๊ณต์ฒ ์ค๋ฅ
else:
print(">>> Open API Error: {}".format(te[0].find["resultMsg"]))
def getBrRecapTitleInfo(
self,
sigunguCd_,
bjdongCd_,
platGbCd_="",
bun_="",
ji_="",
startDate_="",
endDate_="",
):
"""
02 ๊ฑด์ถ๋ฌผ๋์ฅ ์ด๊ดํ์ ๋ถ ์กฐํ
์
๋ ฅ: ์๊ตฐ๊ตฌ์ฝ๋, ๋ฒ์ ๋์ฝ๋, ๋์ง๊ตฌ๋ถ์ฝ๋, ๋ฒ, ์ง, ๊ฒ์์์์ผ, ๊ฒ์์ข
๋ฃ์ผ
"""
# URL
url = f"{self.url_getBrRecapTitleInfo}&sigunguCd={sigunguCd_}&bjdongCd={bjdongCd_}&platGbCd={platGbCd_}&bun={bun_}&ji={ji_}&startDate={startDate_}&endDate={endDate_}&numOfRows=99999"
try:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("item")
# Creating Pandas Data Frame
df = pd.DataFrame()
variables = [
"archArea",
"atchBldArea",
"atchBldCnt",
"bcRat",
"bjdongCd",
"bldNm",
"block",
"bun",
"bylotCnt",
"crtnDay",
"engrEpi",
"engrGrade",
"engrRat",
"etcPurps",
"fmlyCnt",
"gnBldCert",
"gnBldGrade",
"hhldCnt",
"hoCnt",
"indrAutoArea",
"indrAutoUtcnt",
"indrMechArea",
"indrMechUtcnt",
"itgBldCert",
"itgBldGrade",
"ji",
"lot",
"mainBldCnt",
"mainPurpsCd",
"mainPurpsCdNm",
"mgmBldrgstPk",
"naBjdongCd",
"naMainBun",
"naRoadCd",
"naSubBun",
"naUgrndCd",
"newOldRegstrGbCd",
"newOldRegstrGbCdNm",
"newPlatPlc",
"oudrAutoArea",
"oudrAutoUtcnt",
"oudrMechArea",
"oudrMechUtcnt",
"platArea",
"platGbCd",
"platPlc",
"pmsDay",
"pmsnoGbCd",
"pmsnoGbCdNm",
"pmsnoKikCd",
"pmsnoKikCdNm",
"pmsnoYear",
"regstrGbCd",
"regstrGbCdNm",
"regstrKindCd",
"regstrKindCdNm",
"rnum",
"sigunguCd",
"splotNm",
"stcnsDay",
"totArea",
"totPkngCnt",
"useAprDay",
"vlRat",
"vlRatEstmTotArea",
]
for t in te:
for variable in variables:
try:
globals()[variable] = t.find(variable).text
except:
globals()[variable] = np.nan
data = pd.DataFrame(
[[
archArea,
atchBldArea,
atchBldCnt,
bcRat,
bjdongCd,
bldNm,
block,
bun,
bylotCnt,
crtnDay,
engrEpi,
engrGrade,
engrRat,
etcPurps,
fmlyCnt,
gnBldCert,
gnBldGrade,
hhldCnt,
hoCnt,
indrAutoArea,
indrAutoUtcnt,
indrMechArea,
indrMechUtcnt,
itgBldCert,
itgBldGrade,
ji,
lot,
mainBldCnt,
mainPurpsCd,
mainPurpsCdNm,
mgmBldrgstPk,
naBjdongCd,
naMainBun,
naRoadCd,
naSubBun,
naUgrndCd,
newOldRegstrGbCd,
newOldRegstrGbCdNm,
newPlatPlc,
oudrAutoArea,
oudrAutoUtcnt,
oudrMechArea,
oudrMechUtcnt,
platArea,
platGbCd,
platPlc,
pmsDay,
pmsnoGbCd,
pmsnoGbCdNm,
pmsnoKikCd,
pmsnoKikCdNm,
pmsnoYear,
regstrGbCd,
regstrGbCdNm,
regstrKindCd,
regstrKindCdNm,
rnum,
sigunguCd,
splotNm,
stcnsDay,
totArea,
totPkngCnt,
useAprDay,
vlRat,
vlRatEstmTotArea,
]],
columns=variables,
)
df = pd.concat([df, data])
df.index = range(len(df))
return df
except:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("header")
# ์ ์ ์์ฒญ์ ์๋ฌ ๋ฐ์ -> Python ์ฝ๋ ์๋ฌ
if te[0].find("resultCode").text == "00":
print(">>> Python Logic Error. e-mail : <EMAIL>")
# Open API ์๋น์ค ์ ๊ณต์ฒ ์ค๋ฅ
else:
print(">>> Open API Error: {}".format(te[0].find["resultMsg"]))
def getBrTitleInfo(
self,
sigunguCd_,
bjdongCd_,
platGbCd_="",
bun_="",
ji_="",
startDate_="",
endDate_="",
):
"""
03 ๊ฑด์ถ๋ฌผ๋์ฅ ํ์ ๋ถ ์กฐํ: getBrTitleInfo
์
๋ ฅ: ์๊ตฐ๊ตฌ์ฝ๋, ๋ฒ์ ๋์ฝ๋, ๋์ง๊ตฌ๋ถ์ฝ๋, ๋ฒ, ์ง, ๊ฒ์์์์ผ, ๊ฒ์์ข
๋ฃ์ผ
"""
# URL
url = f"{self.url_getBrTitleInfo}&sigunguCd={sigunguCd_}&bjdongCd={bjdongCd_}&platGbCd={platGbCd_}&bun={bun_}&ji={ji_}&startDate={startDate_}&endDate={endDate_}&numOfRows=99999"
try:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("item")
# Creating Pandas Data Frame
df = pd.DataFrame()
variables = [
"archArea",
"atchBldArea",
"atchBldCnt",
"bcRat",
"bjdongCd",
"bldNm",
"block",
"bun",
"bylotCnt",
"crtnDay",
"dongNm",
"emgenUseElvtCnt",
"engrEpi",
"engrGrade",
"engrRat",
"etcPurps",
"etcRoof",
"etcStrct",
"fmlyCnt",
"gnBldCert",
"gnBldGrade",
"grndFlrCnt",
"heit",
"hhldCnt",
"hoCnt",
"indrAutoArea",
"indrAutoUtcnt",
"indrMechArea",
"indrMechUtcnt",
"itgBldCert",
"itgBldGrade",
"ji",
"lot",
"mainAtchGbCd",
"mainAtchGbCdNm",
"mainPurpsCd",
"mainPurpsCdNm",
"mgmBldrgstPk",
"naBjdongCd",
"naMainBun",
"naRoadCd",
"naSubBun",
"naUgrndCd",
"newPlatPlc",
"oudrAutoArea",
"oudrAutoUtcnt",
"oudrMechArea",
"oudrMechUtcnt",
"platArea",
"platGbCd",
"platPlc",
"pmsDay",
"pmsnoGbCd",
"pmsnoGbCdNm",
"pmsnoKikCd",
"pmsnoKikCdNm",
"pmsnoYear",
"regstrGbCd",
"regstrGbCdNm",
"regstrKindCd",
"regstrKindCdNm",
"rideUseElvtCnt",
"rnum",
"roofCd",
"roofCdNm",
"rserthqkAblty",
"rserthqkDsgnApplyYn",
"sigunguCd",
"splotNm",
"stcnsDay",
"strctCd",
"strctCdNm",
"totArea",
"totDongTotArea",
"ugrndFlrCnt",
"useAprDay",
"vlRat",
"vlRatEstmTotArea",
]
for t in te:
for variable in variables:
try:
globals()[variable] = t.find(variable).text
except:
globals()[variable] = np.nan
data = pd.DataFrame(
[[
archArea,
atchBldArea,
atchBldCnt,
bcRat,
bjdongCd,
bldNm,
block,
bun,
bylotCnt,
crtnDay,
dongNm,
emgenUseElvtCnt,
engrEpi,
engrGrade,
engrRat,
etcPurps,
etcRoof,
etcStrct,
fmlyCnt,
gnBldCert,
gnBldGrade,
grndFlrCnt,
heit,
hhldCnt,
hoCnt,
indrAutoArea,
indrAutoUtcnt,
indrMechArea,
indrMechUtcnt,
itgBldCert,
itgBldGrade,
ji,
lot,
mainAtchGbCd,
mainAtchGbCdNm,
mainPurpsCd,
mainPurpsCdNm,
mgmBldrgstPk,
naBjdongCd,
naMainBun,
naRoadCd,
naSubBun,
naUgrndCd,
newPlatPlc,
oudrAutoArea,
oudrAutoUtcnt,
oudrMechArea,
oudrMechUtcnt,
platArea,
platGbCd,
platPlc,
pmsDay,
pmsnoGbCd,
pmsnoGbCdNm,
pmsnoKikCd,
pmsnoKikCdNm,
pmsnoYear,
regstrGbCd,
regstrGbCdNm,
regstrKindCd,
regstrKindCdNm,
rideUseElvtCnt,
rnum,
roofCd,
roofCdNm,
rserthqkAblty,
rserthqkDsgnApplyYn,
sigunguCd,
splotNm,
stcnsDay,
strctCd,
strctCdNm,
totArea,
totDongTotArea,
ugrndFlrCnt,
useAprDay,
vlRat,
vlRatEstmTotArea,
]],
columns=variables,
)
df = pd.concat([df, data])
df.index = range(len(df))
return df
except:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("header")
# ์ ์ ์์ฒญ์ ์๋ฌ ๋ฐ์ -> Python ์ฝ๋ ์๋ฌ
if te[0].find("resultCode").text == "00":
print(">>> Python Logic Error. e-mail : <EMAIL>")
# Open API ์๋น์ค ์ ๊ณต์ฒ ์ค๋ฅ
else:
print(">>> Open API Error: {}".format(te[0].find["resultMsg"]))
def getBrFlrOulnInfo(
self,
sigunguCd_,
bjdongCd_,
platGbCd_="",
bun_="",
ji_="",
startDate_="",
endDate_="",
):
"""
04 ๊ฑด์ถ๋ฌผ๋์ฅ ์ธต๋ณ๊ฐ์ ์กฐํ
์
๋ ฅ: ์๊ตฐ๊ตฌ์ฝ๋, ๋ฒ์ ๋์ฝ๋, ๋์ง๊ตฌ๋ถ์ฝ๋, ๋ฒ, ์ง, ๊ฒ์์์์ผ, ๊ฒ์์ข
๋ฃ์ผ
"""
# URL
url = f"{self.url_getBrFlrOulnInfo}&sigunguCd={sigunguCd_}&bjdongCd={bjdongCd_}&platGbCd={platGbCd_}&bun={bun_}&ji={ji_}&startDate={startDate_}&endDate={endDate_}&numOfRows=99999"
try:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("item")
# Creating Pandas Data Frame
df = pd.DataFrame()
variables = [
"area",
"areaExctYn",
"bjdongCd",
"bldNm",
"block",
"bun",
"crtnDay",
"dongNm",
"etcPurps",
"etcStrct",
"flrGbCd",
"flrGbCdNm",
"flrNo",
"flrNoNm",
"ji",
"lot",
"mainAtchGbCd",
"mainAtchGbCdNm",
"mainPurpsCd",
"mainPurpsCdNm",
"mgmBldrgstPk",
"naBjdongCd",
"naMainBun",
"naRoadCd",
"naSubBun",
"naUgrndCd",
"newPlatPlc",
"platGbCd",
"platPlc",
"rnum",
"sigunguCd",
"splotNm",
"strctCd",
"strctCdNm",
]
for t in te:
for variable in variables:
try:
globals()[variable] = t.find(variable).text
except:
globals()[variable] = np.nan
data = pd.DataFrame(
[[
area,
areaExctYn,
bjdongCd,
bldNm,
block,
bun,
crtnDay,
dongNm,
etcPurps,
etcStrct,
flrGbCd,
flrGbCdNm,
flrNo,
flrNoNm,
ji,
lot,
mainAtchGbCd,
mainAtchGbCdNm,
mainPurpsCd,
mainPurpsCdNm,
mgmBldrgstPk,
naBjdongCd,
naMainBun,
naRoadCd,
naSubBun,
naUgrndCd,
newPlatPlc,
platGbCd,
platPlc,
rnum,
sigunguCd,
splotNm,
strctCd,
strctCdNm,
]],
columns=variables,
)
df = pd.concat([df, data])
df.index = range(len(df))
return df
except:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("header")
# ์ ์ ์์ฒญ์ ์๋ฌ ๋ฐ์ -> Python ์ฝ๋ ์๋ฌ
if te[0].find("resultCode").text == "00":
print(">>> Python Logic Error. e-mail : <EMAIL>")
# Open API ์๋น์ค ์ ๊ณต์ฒ ์ค๋ฅ
else:
print(">>> Open API Error: {}".format(te[0].find["resultMsg"]))
def getBrAtchJibunInfo(
self,
sigunguCd_,
bjdongCd_,
platGbCd_="",
bun_="",
ji_="",
startDate_="",
endDate_="",
):
"""
05 ๊ฑด์ถ๋ฌผ๋์ฅ ๋ถ์์ง๋ฒ ์กฐํ: getBrAtchJibunInfo
์
๋ ฅ: ์๊ตฐ๊ตฌ์ฝ๋, ๋ฒ์ ๋์ฝ๋, ๋์ง๊ตฌ๋ถ์ฝ๋, ๋ฒ, ์ง, ๊ฒ์์์์ผ, ๊ฒ์์ข
๋ฃ์ผ
"""
# URL
url = f"{self.url_getBrAtchJibunInfo}&sigunguCd={sigunguCd_}&bjdongCd={bjdongCd_}&platGbCd={platGbCd_}&bun={bun_}&ji={ji_}&startDate={startDate_}&endDate={endDate_}&numOfRows=99999"
try:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("item")
# Creating Pandas Data Frame
df = pd.DataFrame()
variables = [
"atchBjdongCd",
"atchBlock",
"atchBun",
"atchEtcJibunNm",
"atchJi",
"atchLot",
"atchPlatGbCd",
"atchRegstrGbCd",
"atchRegstrGbCdNm",
"atchSigunguCd",
"atchSplotNm",
"bjdongCd",
"bldNm",
"block",
"bun",
"crtnDay",
"ji",
"lot",
"mgmBldrgstPk",
"naBjdongCd",
"naMainBun",
"naRoadCd",
"naSubBun",
"naUgrndCd",
"newPlatPlc",
"platGbCd",
"platPlc",
"regstrGbCd",
"regstrGbCdNm",
"regstrKindCd",
"regstrKindCdNm",
"rnum",
"sigunguCd",
"splotNm",
]
for t in te:
for variable in variables:
try:
globals()[variable] = t.find(variable).text
except:
globals()[variable] = np.nan
data = pd.DataFrame(
[[
atchBjdongCd,
atchBlock,
atchBun,
atchEtcJibunNm,
atchJi,
atchLot,
atchPlatGbCd,
atchRegstrGbCd,
atchRegstrGbCdNm,
atchSigunguCd,
atchSplotNm,
bjdongCd,
bldNm,
block,
bun,
crtnDay,
ji,
lot,
mgmBldrgstPk,
naBjdongCd,
naMainBun,
naRoadCd,
naSubBun,
naUgrndCd,
newPlatPlc,
platGbCd,
platPlc,
regstrGbCd,
regstrGbCdNm,
regstrKindCd,
regstrKindCdNm,
rnum,
sigunguCd,
splotNm,
]],
columns=variables,
)
df = pd.concat([df, data])
df.index = range(len(df))
return df
except:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("header")
# ์ ์ ์์ฒญ์ ์๋ฌ ๋ฐ์ -> Python ์ฝ๋ ์๋ฌ
if te[0].find("resultCode").text == "00":
print(">>> Python Logic Error. e-mail : <EMAIL>")
# Open API ์๋น์ค ์ ๊ณต์ฒ ์ค๋ฅ
else:
print(">>> Open API Error: {}".format(te[0].find["resultMsg"]))
def getBrExposPubuseAreaInfo(
self,
sigunguCd_,
bjdongCd_,
platGbCd_="",
bun_="",
ji_="",
startDate_="",
endDate_="",
dongNm_="",
hoNm_="",
):
"""
06 ๊ฑด์ถ๋ฌผ๋์ฅ ์ ์ ๊ณต์ฉ๋ฉด์ ์กฐํ: getBrExposPubuseAreaInfo
์
๋ ฅ: ์๊ตฐ๊ตฌ์ฝ๋, ๋ฒ์ ๋์ฝ๋, ๋์ง๊ตฌ๋ถ์ฝ๋, ๋ฒ, ์ง, ๊ฒ์์์์ผ, ๊ฒ์์ข
๋ฃ์ผ, ๋๋ช
์นญ, ํธ๋ช
์นญ
"""
# URL
url = f"{self.url_getBrExposPubuseAreaInfo}&sigunguCd={sigunguCd_}&bjdongCd={bjdongCd_}&platGbCd={platGbCd_}&bun={bun_}&ji={ji_}&startDate={startDate_}&endDate={endDate_}&dongNm={dongNm_}&hoNm={hoNm_}&numOfRows=99999"
try:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("item")
# Creating Pandas Data Frame
df = pd.DataFrame()
variables = [
"area",
"bjdongCd",
"bldNm",
"block",
"bun",
"crtnDay",
"dongNm",
"etcPurps",
"etcStrct",
"exposPubuseGbCd",
"exposPubuseGbCdNm",
"flrGbCd",
"flrGbCdNm",
"flrNo",
"flrNoNm",
"hoNm",
"ji",
"lot",
"mainAtchGbCd",
"mainAtchGbCdNm",
"mainPurpsCd",
"mainPurpsCdNm",
"mgmBldrgstPk",
"naBjdongCd",
"naMainBun",
"naRoadCd",
"naSubBun",
"naUgrndCd",
"newPlatPlc",
"platGbCd",
"platPlc",
"regstrGbCd",
"regstrGbCdNm",
"regstrKindCd",
"regstrKindCdNm",
"rnum",
"sigunguCd",
"splotNm",
"strctCd",
"strctCdNm",
]
for t in te:
for variable in variables:
try:
globals()[variable] = t.find(variable).text
except:
globals()[variable] = np.nan
data = pd.DataFrame(
[[
area,
bjdongCd,
bldNm,
block,
bun,
crtnDay,
dongNm,
etcPurps,
etcStrct,
exposPubuseGbCd,
exposPubuseGbCdNm,
flrGbCd,
flrGbCdNm,
flrNo,
flrNoNm,
hoNm,
ji,
lot,
mainAtchGbCd,
mainAtchGbCdNm,
mainPurpsCd,
mainPurpsCdNm,
mgmBldrgstPk,
naBjdongCd,
naMainBun,
naRoadCd,
naSubBun,
naUgrndCd,
newPlatPlc,
platGbCd,
platPlc,
regstrGbCd,
regstrGbCdNm,
regstrKindCd,
regstrKindCdNm,
rnum,
sigunguCd,
splotNm,
strctCd,
strctCdNm,
]],
columns=variables,
)
df = pd.concat([df, data])
df.index = range(len(df))
return df
except:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("header")
# ์ ์ ์์ฒญ์ ์๋ฌ ๋ฐ์ -> Python ์ฝ๋ ์๋ฌ
if te[0].find("resultCode").text == "00":
print(">>> Python Logic Error. e-mail : <EMAIL>")
# Open API ์๋น์ค ์ ๊ณต์ฒ ์ค๋ฅ
else:
print(">>> Open API Error: {}".format(te[0].find["resultMsg"]))
def getBrWclfInfo(
self,
sigunguCd_,
bjdongCd_,
platGbCd_="",
bun_="",
ji_="",
startDate_="",
endDate_="",
):
"""
07 ๊ฑด์ถ๋ฌผ๋์ฅ ์ค์์ ํ์์ค ์กฐํ: getBrWclfInfo
์
๋ ฅ: ์๊ตฐ๊ตฌ์ฝ๋, ๋ฒ์ ๋์ฝ๋, ๋์ง๊ตฌ๋ถ์ฝ๋, ๋ฒ, ์ง, ๊ฒ์์์์ผ, ๊ฒ์์ข
๋ฃ์ผ
"""
# URL
url = f"{self.url_getBrWclfInfo}&sigunguCd={sigunguCd_}&bjdongCd={bjdongCd_}&platGbCd={platGbCd_}&bun={bun_}&ji={ji_}&startDate={startDate_}&endDate={endDate_}&numOfRows=99999"
try:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("item")
# Creating Pandas Data Frame
df = pd.DataFrame()
variables = [
"bjdongCd",
"bldNm",
"block",
"bun",
"capaLube",
"capaPsper",
"crtnDay",
"etcMode",
"ji",
"lot",
"mgmBldrgstPk",
"modeCd",
"modeCdNm",
"naBjdongCd",
"naMainBun",
"naRoadCd",
"naSubBun",
"naUgrndCd",
"newPlatPlc",
"platGbCd",
"platPlc",
"regstrGbCd",
"regstrGbCdNm",
"regstrKindCd",
"regstrKindCdNm",
"rnum",
"sigunguCd",
"splotNm",
"unitGbCd",
"unitGbCdNm",
]
for t in te:
for variable in variables:
try:
globals()[variable] = t.find(variable).text
except:
globals()[variable] = np.nan
data = pd.DataFrame(
[[
bjdongCd,
bldNm,
block,
bun,
capaLube,
capaPsper,
crtnDay,
etcMode,
ji,
lot,
mgmBldrgstPk,
modeCd,
modeCdNm,
naBjdongCd,
naMainBun,
naRoadCd,
naSubBun,
naUgrndCd,
newPlatPlc,
platGbCd,
platPlc,
regstrGbCd,
regstrGbCdNm,
regstrKindCd,
regstrKindCdNm,
rnum,
sigunguCd,
splotNm,
unitGbCd,
unitGbCdNm,
]],
columns=variables,
)
df = pd.concat([df, data])
df.index = range(len(df))
return df
except:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("header")
# ์ ์ ์์ฒญ์ ์๋ฌ ๋ฐ์ -> Python ์ฝ๋ ์๋ฌ
if te[0].find("resultCode").text == "00":
print(">>> Python Logic Error. e-mail : <EMAIL>")
# Open API ์๋น์ค ์ ๊ณต์ฒ ์ค๋ฅ
else:
print(">>> Open API Error: {}".format(te[0].find["resultMsg"]))
def getBrHsprcInfo(
self,
sigunguCd_,
bjdongCd_,
platGbCd_="",
bun_="",
ji_="",
startDate_="",
endDate_="",
):
"""
08 ๊ฑด์ถ๋ฌผ๋์ฅ ์ฃผํ๊ฐ๊ฒฉ ์กฐํ: getBrHsprcInfo
์
๋ ฅ: ์๊ตฐ๊ตฌ์ฝ๋, ๋ฒ์ ๋์ฝ๋, ๋์ง๊ตฌ๋ถ์ฝ๋, ๋ฒ, ์ง, ๊ฒ์์์์ผ, ๊ฒ์์ข
๋ฃ์ผ
"""
# URL
url = f"{self.url_getBrHsprcInfo}&sigunguCd={sigunguCd_}&bjdongCd={bjdongCd_}&platGbCd={platGbCd_}&bun={bun_}&ji={ji_}&startDate={startDate_}&endDate={endDate_}&numOfRows=99999"
try:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("item")
# Creating Pandas Data Frame
df = | pd.DataFrame() | pandas.DataFrame |
import gradio as gr
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
from sklearn.linear_model import SGDClassifier
from sklearn.svm import SVC
df = | pd.read_csv('https://raw.githubusercontent.com/toshihiroryuu/Machine_learning/master/ML_001_Heart_faliure/Dataset/heart_failure_clinical_records.csv') | pandas.read_csv |
import pandas as pd
import numpy as np
import matplotlib as mpl
#mpl.use('Agg')
import matplotlib.pyplot as plt
import os
import seaborn as sns
import matplotlib.dates as mdates
import sys
sys.path.append('../')
from processing_helpers import *
from load_paths import load_box_paths
mpl.rcParams['pdf.fonttype'] = 42
def get_pop_by_age(adjust_for_chicago=True):
"""Population by age per covid region"""
pop_df = pd.read_csv(os.path.join(datapath, "covid_IDPH/population/cc-est2019-agesex-17.csv"))
pop_df = pop_df[pop_df['YEAR']==12] # corresponds to 2019
pop_df['AGE16BELOW_TOT'] = pop_df['POPESTIMATE'] - pop_df['AGE16PLUS_TOT']
pop_df['AGE65BELOW_TOT'] = pop_df['POPESTIMATE'] - pop_df['AGE65PLUS_TOT']
pop_df['16-64'] = pop_df['AGE65BELOW_TOT'] - pop_df['AGE16BELOW_TOT']
pop_df['65+'] = pop_df['AGE65PLUS_TOT']
pop_df['county'] = pop_df['CTYNAME'].str.replace(' County','')
pop_df = pop_df[['county','POPESTIMATE','65+','16-64','MEDIAN_AGE_TOT','AGE16BELOW_TOT']]
if adjust_for_chicago:
chicago_pop = 2456274
chicago_perc_pop_below16 = 0.1758 # below 15 due to agebins..
chicago_perc_pop_16to65 = 0.70174 # estimated from https://www.chicagohealthatlas.org/indicators/total-population
chicago_perc_pop_above65 = 0.1224
chicago_pop_below16 = int(round(chicago_pop * chicago_perc_pop_below16,0))
chicago_pop_16to65 = int(round(chicago_pop * chicago_perc_pop_16to65,0))
chicago_pop_above65 = int(round(chicago_pop * chicago_perc_pop_above65,0))
pop_df[pop_df['county'] == 'Cook']
chicago_df = {'county': ['Chicago'], 'POPESTIMATE': [chicago_pop],
'65+': [chicago_pop_above65], '16-64': [chicago_pop_16to65],
'MEDIAN_AGE_TOT' : [-9],'AGE16BELOW_TOT':[chicago_pop_below16] }
chicago_df = pd.DataFrame(data=chicago_df)
cook_df = pop_df[pop_df['county'] == 'Cook']
cook_df['POPESTIMATE'] = cook_df['POPESTIMATE'] - chicago_pop
cook_df['65+'] = cook_df['65+'] - chicago_pop_above65
cook_df['16-64'] = cook_df['16-64'] - chicago_pop_16to65
cook_df['AGE16BELOW_TOT'] = cook_df['AGE16BELOW_TOT'] - chicago_pop_below16
cook_chicago_df = cook_df.append(chicago_df)
pop_df = pop_df[pop_df['county'] != 'Cook']
pop_df = pop_df.append(cook_chicago_df).reset_index()
### Chicgo (region 11 missing)
pop_df = merge_county_covidregions(pop_df, key_x='county', add_pop=False)
#pop_df.groupby(['covid_region'])[['MEDIAN_AGE_TOT']].agg([np.min, np.mean, np.max] ).reset_index()
pop_df = pop_df.groupby(['covid_region'])[['POPESTIMATE','AGE16BELOW_TOT', '65+', '16-64']].agg(np.nansum).reset_index()
pop_df_i = pd.melt(pop_df, id_vars=['covid_region'], value_vars=['65+', '16-64'])
pop_df_i.rename(columns={"variable": "agegrp"}, inplace=True)
pop_df_i.rename(columns={"value": "population"}, inplace=True)
pop_df['16-64'] = pop_df['16-64'] / pop_df['POPESTIMATE']
pop_df['65+'] = pop_df['65+'] / pop_df['POPESTIMATE']
pop_df_ii = pd.melt(pop_df, id_vars=['covid_region'], value_vars=['65+', '16-64'])
pop_df_ii.rename(columns={"variable": "agegrp"}, inplace=True)
pop_df_ii.rename(columns={"value": "pop_perc"}, inplace=True)
df = | pd.merge(pop_df_i, pop_df_ii) | pandas.merge |
import pandas as pd
from SALib.sample.radial.radial_sobol import sample
from .settings import * # import project-specific settings
# read in previous sample set for a single climate scenario
# we use this as a template
df = pd.read_csv(indir+'example_sample.csv', index_col=0)
is_perturbed = (df != df.iloc[0]).any()
perturbed_cols = df.loc[:, is_perturbed].columns
constant_cols = df.loc[:, ~is_perturbed].columns
cat_cols = df.select_dtypes('object').columns
p_df = df.copy()
for col in cat_cols:
p_df[col] = p_df[col].astype('category')
perturbed = p_df.loc[:, is_perturbed]
for col in perturbed_cols:
if col in cat_cols:
perturbed[col] = perturbed[col].cat.codes
bounds = list(zip(perturbed.min().tolist(), perturbed.max().tolist()))
problem = {
'num_vars': len(perturbed_cols),
'names': perturbed_cols,
'bounds': bounds
}
# Create Saltelli samples
# SALib expects purely numeric values so categoricals are transformed as such
samples = sample(problem, 10, seed=101)
# Create template
rsobol_df = df.iloc[0][constant_cols].to_frame().T
rsobol_df = pd.concat([rsobol_df]*len(samples), ignore_index=True)
df_samples = | pd.DataFrame(data=samples, columns=perturbed_cols) | pandas.DataFrame |
import numpy as np
import pandas as pd
from bokeh.plotting import figure
from bokeh.models import Span, Range1d
import random
from math import pi
def calculate_diff(b, m, stats_df, group_names, groups_dict, mean_or_med=0):
"""
calculate difference from reference group
:param b: name of the population (bubble)
:param m: name of the marker
:param stats_df: dataframe with all measurements and marker values for every population
:param group_names: names of patient groups
:param groups_dict: dict with groups data
:param mean_or_med: 0- mean, 1-median
:return: dataframe with difference from reference group for every group
"""
diff_df = pd.DataFrame(index=list([tab.title for tab in group_names]), columns=['diff'])
reference_level = 0
for g in groups_dict:
measurements = g[1]['measurements'].tolist()
if g[1]['patient'].values.tolist()[0] == 'healthy':
if mean_or_med == 0:
reference_level = np.mean(list([stats_df.loc[measurement, (b, m)] for measurement in measurements]))
else:
reference_level = np.median(list([stats_df.loc[measurement, (b, m)] for measurement in measurements]))
break
for idx, g in enumerate(groups_dict):
measurements = g[1]['measurements'].tolist()
if mean_or_med == 0:
group_level = np.mean(list([stats_df.loc[measurement, (b, m)] for measurement in measurements]))
else:
group_level = np.median(list([stats_df.loc[measurement, (b, m)] for measurement in measurements]))
diff_df.loc[group_names[idx].title, 'diff'] = group_level - reference_level
return diff_df
def generate_random_color():
return '#%02X%02X%02X' % (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))
def diff_plot(df_diff= | pd.DataFrame() | pandas.DataFrame |
"""
Prepare training and testing datasets as CSV dictionaries
Created on 11/26/2018
@author: RH
"""
import os
import pandas as pd
import sklearn.utils as sku
import numpy as np
# get all full paths of images
def image_ids_in(root_dir, ignore=['.DS_Store','dict.csv', 'all.csv']):
ids = []
for id in os.listdir(root_dir):
if id in ignore:
print('Skipping ID:', id)
else:
ids.append(id)
return ids
# Get intersection of 2 lists
def intersection(lst1, lst2):
lst3 = [value for value in lst1 if value in lst2]
return lst3
def tile_ids_in(slide, level, root_dir, label):
ids = []
try:
for id in os.listdir(root_dir):
if '.png' in id:
ids.append([slide, level, root_dir+'/'+id, label])
else:
print('Skipping ID:', id)
except FileNotFoundError:
print('Ignore:', root_dir)
return ids
# Balance CPTAC and TCGA tiles in each class
def balance(pdls, cls):
balanced = pd.DataFrame(columns=['slide', 'level', 'path', 'label'])
for i in range(cls):
ref = pdls.loc[pdls['label'] == i]
CPTAC = ref[~ref['slide'].str.contains("TCGA")]
TCGA = ref[ref['slide'].str.contains("TCGA")]
if CPTAC.shape[0] != 0 and TCGA.shape[0] != 0:
ratio = (CPTAC.shape[0])/(TCGA.shape[0])
if ratio < 0.2:
TCGA = TCGA.sample(int(5*CPTAC.shape[0]), replace=False)
ref = pd.concat([TCGA, CPTAC], sort=False)
elif ratio > 5:
CPTAC = CPTAC.sample(int(5*TCGA.shape[0]), replace=False)
ref = pd.concat([TCGA, CPTAC], sort=False)
balanced = pd.concat([balanced, ref], sort=False)
return balanced
# Get all svs images with its label as one file; level is the tile resolution level
def big_image_sum(pmd, path='../tiles/', ref_file='../Fusion_dummy_His_MUT_joined.csv'):
if not os.path.isdir(path):
os.mkdir(path)
import Cutter
Cutter.cut()
allimg = image_ids_in(path)
ref = pd.read_csv(ref_file, header=0)
big_images = []
for level in range(4):
level = str(level)
if pmd == 'subtype':
MSIimg = intersection(ref.loc[ref['subtype_MSI'] == 1]['name'].tolist(), allimg)
EMimg = intersection(ref.loc[ref['subtype_Endometrioid'] == 1]['name'].tolist(), allimg)
SLimg = intersection(ref.loc[ref['subtype_Serous-like'] == 1]['name'].tolist(), allimg)
POLEimg = intersection(ref.loc[ref['subtype_POLE'] == 1]['name'].tolist(), allimg)
for i in MSIimg:
big_images.append([i, level, path + "{}/level{}".format(i, level), 1])
for i in EMimg:
big_images.append([i, level, path + "{}/level{}".format(i, level), 2])
for i in SLimg:
big_images.append([i, level, path + "{}/level{}".format(i, level), 3])
for i in POLEimg:
big_images.append([i, level, path + "{}/level{}".format(i, level), 0])
elif pmd == 'histology':
allimg = intersection(ref.loc[ref['histology_Mixed'] == 0]['name'].tolist(), allimg)
EMimg = intersection(ref.loc[ref['histology_Endometrioid'] == 1]['name'].tolist(), allimg)
Serousimg = intersection(ref.loc[ref['histology_Serous'] == 1]['name'].tolist(), allimg)
for i in EMimg:
big_images.append([i, level, path + "{}/level{}".format(i, level), 0])
for i in Serousimg:
big_images.append([i, level, path + "{}/level{}".format(i, level), 1])
elif pmd in ['Endometrioid', 'MSI', 'Serous-like', 'POLE']:
ref = ref.loc[ref['subtype_0NA'] == 0]
### special version
# ref = ref.loc[ref['histology_Mixed'] == 0]
# ref = ref.loc[ref['histology_Endometrioid'] == 1]
### special version
negimg = intersection(ref.loc[ref['subtype_{}'.format(pmd)] == 0]['name'].tolist(), allimg)
posimg = intersection(ref.loc[ref['subtype_{}'.format(pmd)] == 1]['name'].tolist(), allimg)
for i in negimg:
big_images.append([i, level, path + "{}/level{}".format(i, level), 0])
for i in posimg:
big_images.append([i, level, path + "{}/level{}".format(i, level), 1])
elif pmd == 'MSIst':
Himg = intersection(ref.loc[ref['MSIst_MSI-H'] == 1]['name'].tolist(), allimg)
Simg = intersection(ref.loc[ref['MSIst_MSS'] == 1]['name'].tolist(), allimg)
for i in Himg:
big_images.append([i, level, path + "{}/level{}".format(i, level), 1])
for i in Simg:
big_images.append([i, level, path + "{}/level{}".format(i, level), 0])
elif pmd in ['MSIst_MSI-H', 'MSIst_MSI-L', 'MSIst_MSS']:
ref = ref.loc[ref['MSIst_0NA'] == 0]
negimg = intersection(ref.loc[ref['subtype_{}'.format(pmd)] == 0]['name'].tolist(), allimg)
posimg = intersection(ref.loc[ref['subtype_{}'.format(pmd)] == 1]['name'].tolist(), allimg)
for i in negimg:
big_images.append([i, level, path + "{}/level{}".format(i, level), 0])
for i in posimg:
big_images.append([i, level, path + "{}/level{}".format(i, level), 1])
else:
negimg = intersection(ref.loc[ref[pmd] == 0]['name'].tolist(), allimg)
posimg = intersection(ref.loc[ref[pmd] == 1]['name'].tolist(), allimg)
for i in negimg:
big_images.append([i, level, path + "{}/level{}".format(i, level), 0])
for i in posimg:
big_images.append([i, level, path + "{}/level{}".format(i, level), 1])
datapd = pd.DataFrame(big_images, columns=['slide', 'level', 'path', 'label'])
return datapd
# TO KEEP SPLIT SAME AS BASELINES. seperate into training and testing; each type is the same separation
# ratio on big images test and train csv files contain tiles' path.
def set_sep_secondary(alll, path, cls, pmd, level=None, batchsize=64):
if level:
alll = alll[alll.level == level]
if pmd == 'subtype':
split = pd.read_csv('../split/ST.csv', header=0)
elif pmd == 'histology':
split = pd.read_csv('../split/his.csv', header=0)
elif pmd == 'Serous-like':
split = pd.read_csv('../split/CNVH.csv', header=0)
elif pmd == 'Endometrioid':
split = pd.read_csv('../split/CNVL.csv', header=0)
else:
split = pd.read_csv('../split/{}.csv'.format(pmd), header=0)
train = split.loc[split['set'] == 'train']['slide'].tolist()
validation = split.loc[split['set'] == 'validation']['slide'].tolist()
test = split.loc[split['set'] == 'test']['slide'].tolist()
trlist = []
telist = []
valist = []
subset = alll
valist.append(subset[subset['slide'].isin(validation)])
telist.append(subset[subset['slide'].isin(test)])
trlist.append(subset[subset['slide'].isin(train)])
test = pd.concat(telist)
train = pd.concat(trlist)
validation = pd.concat(valist)
test_tiles_list = []
train_tiles_list = []
validation_tiles_list = []
for idx, row in test.iterrows():
tile_ids = tile_ids_in(row['slide'], row['level'], row['path'], row['label'])
test_tiles_list.extend(tile_ids)
for idx, row in train.iterrows():
tile_ids = tile_ids_in(row['slide'], row['level'], row['path'], row['label'])
train_tiles_list.extend(tile_ids)
for idx, row in validation.iterrows():
tile_ids = tile_ids_in(row['slide'], row['level'], row['path'], row['label'])
validation_tiles_list.extend(tile_ids)
test_tiles = pd.DataFrame(test_tiles_list, columns=['slide', 'level', 'path', 'label'])
train_tiles = pd.DataFrame(train_tiles_list, columns=['slide', 'level', 'path', 'label'])
validation_tiles = pd.DataFrame(validation_tiles_list, columns=['slide', 'level', 'path', 'label'])
train_tiles = balance(train_tiles, cls=cls)
validation_tiles = balance(validation_tiles, cls=cls)
# No shuffle on test set
train_tiles = sku.shuffle(train_tiles)
validation_tiles = sku.shuffle(validation_tiles)
if train_tiles.shape[0] > int(batchsize * 80000 / 3):
train_tiles = train_tiles.sample(int(batchsize * 80000 / 3), replace=False)
print('Truncate training set!')
if validation_tiles.shape[0] > int(batchsize * 80000 / 30):
validation_tiles = validation_tiles.sample(int(batchsize * 80000 / 30), replace=False)
print('Truncate validation set!')
if test_tiles.shape[0] > int(batchsize * 80000 / 3):
test_tiles = test_tiles.sample(int(batchsize * 80000 / 3), replace=False)
print('Truncate test set!')
test_tiles.to_csv(path + '/te_sample.csv', header=True, index=False)
train_tiles.to_csv(path + '/tr_sample.csv', header=True, index=False)
validation_tiles.to_csv(path + '/va_sample.csv', header=True, index=False)
return train_tiles, test_tiles, validation_tiles
# Training and validation on TCGA; Testing on CPTAC
def set_sep_idp(alll, path, cls, level=None, cut=0.1, batchsize=64):
trlist = []
telist = []
valist = []
if level:
alll = alll[alll.level == level]
TCGA = alll[alll['slide'].str.contains("TCGA")]
CPTAC = alll[~alll['slide'].str.contains("TCGA")]
for i in range(cls):
subset = TCGA.loc[TCGA['label'] == i]
unq = list(subset.slide.unique())
np.random.shuffle(unq)
validation = unq[:int(len(unq) * cut)]
valist.append(subset[subset['slide'].isin(validation)])
train = unq[int(len(unq) * cut):]
trlist.append(subset[subset['slide'].isin(train)])
telist.append(CPTAC)
test = pd.concat(telist)
train = pd.concat(trlist)
validation = pd.concat(valist)
test_tiles_list = []
train_tiles_list = []
validation_tiles_list = []
for idx, row in test.iterrows():
tile_ids = tile_ids_in(row['slide'], row['level'], row['path'], row['label'])
test_tiles_list.extend(tile_ids)
for idx, row in train.iterrows():
tile_ids = tile_ids_in(row['slide'], row['level'], row['path'], row['label'])
train_tiles_list.extend(tile_ids)
for idx, row in validation.iterrows():
tile_ids = tile_ids_in(row['slide'], row['level'], row['path'], row['label'])
validation_tiles_list.extend(tile_ids)
test_tiles = pd.DataFrame(test_tiles_list, columns=['slide', 'level', 'path', 'label'])
train_tiles = pd.DataFrame(train_tiles_list, columns=['slide', 'level', 'path', 'label'])
validation_tiles = | pd.DataFrame(validation_tiles_list, columns=['slide', 'level', 'path', 'label']) | pandas.DataFrame |
import pybitflyer2 as PBF
import pandas as pd
import datetime
import calendar
import time
import pickle
import traceback
pbf = PBF.API()
def main():
today = datetime.datetime.now()
yesterday = today - datetime.timedelta(days = 1)
#tday = today
tday = yesterday
start_time = datetime.datetime(tday.year, tday.month, tday.day, 0, 0)
end_time = datetime.datetime(tday.year, tday.month, tday.day, 23, 59)
df = None
last_id = None
while True:
time.sleep(0.2)
try:
df0 = save_executions(last_id, product_code = "FX_BTC_JPY")
except:
print(traceback.format_exc())
continue
last_id = df0.iloc[-1]["id"]
time1 = datetime.datetime.fromtimestamp(df0["unixtime"].iloc[0]) # end
time0 = datetime.datetime.fromtimestamp(df0["unixtime"].iloc[-1]) # start
print(time0, time1, start_time, end_time)
if df is not None:
print(len(df))
else:
print(0)
if time0 > end_time:
continue
elif time1 < start_time:
break
if df is None:
df = df0
else:
df = pd.concat([df, df0], ignore_index=True)
#quit()
start = datetime.datetime.fromtimestamp(df0["unixtime"].iloc[-1])
end = datetime.datetime.fromtimestamp(df0["unixtime"].iloc[0])
fname = str(end).split()[0] + ".pkl"
with open(fname, "wb") as f:
pickle.dump(df, f)
return
def save_executions(last_id=None, product_code = "FX_BTC_JPY"):
#show_board(pbf.board(product_code = "FX_BTC_JPY"))
if last_id is None:
data = pbf.executions(product_code = product_code,
count = 500)
else:
data = pbf.executions(product_code = product_code,
count = 500,
before = last_id)
df1 = | pd.DataFrame(data) | pandas.DataFrame |
import os
import pandas as pd
PATH = 'C:\\Users\\jmedel\\Desktop\\CARDBOARD'
URL_prefix = 'https://sustaynaianotations.blob.core.windows.net/sustaynmechanical/'
filename_lst = os.listdir(PATH)
df = | pd.DataFrame(columns=['image_url']) | pandas.DataFrame |
import os
from numpy.core.numeric import full
import pandas as pd
from feature_computation import Feature
import json
import librosa
from collections import defaultdict
import sys
dataset_mode = sys.argv[1]
print("Dataset mode: {}".format(dataset_mode))
# ***************** PATH CONFIGURATION *****************
# Configuration file
with open('config.json') as config_file:
config = json.load(config_file)
# Import data
if dataset_mode == 'train':
csv_file, root_data, csv_features = config['csv_train_data'], config['root_train_data'], config['train_features']
elif dataset_mode == 'prediction':
csv_file, root_data, csv_features = config['csv_prediction_data'], config['root_prediction_data'], config['prediction_features']
config['middle'] = True
else:
raise Exception("Input '{}' is not valid, the argument 'dataset_mode' can take only two values: 1) 'train' or 2) 'prediction'.".format(dataset_mode))
song_id_list = pd.read_csv(csv_file, index_col='song_id').index.values.tolist() # Getting list of filename songs
# Update features equal to true allows udpating a csv of feature values already computed previously, otherwise an empty dictionary is initialized
if config['update_features']:
features = | pd.read_csv(csv_features, index_col='song_id') | pandas.read_csv |
import os
from cleverhans.attacks import FastGradientMethod
from io import BytesIO
import IPython.display
import numpy as np
import pandas as pd
from PIL import Image
from scipy.misc import imread
from scipy.misc import imsave
import tensorflow as tf
from tensorflow.contrib.slim.nets import inception
slim = tf.contrib.slim
tensorflow_master = ""
checkpoint_path = "../input/inception-v3/inception_v3.ckpt"
input_dir = "../input/nips-2017-adversarial-learning-development-set/images/"
max_epsilon = 16.0
image_width = 299
image_height = 299
batch_size = 16
eps = 2.0 * max_epsilon / 255.0
batch_shape = [batch_size, image_height, image_width, 3]
num_classes = 1001
def load_images(input_dir, batch_shape):
images = np.zeros(batch_shape)
filenames = []
idx = 0
batch_size = batch_shape[0]
for filepath in sorted(tf.gfile.Glob(os.path.join(input_dir, '*.png'))):
with tf.gfile.Open(filepath, "rb") as f:
images[idx, :, :, :] = imread(f, mode='RGB').astype(np.float)*2.0/255.0 - 1.0
filenames.append(os.path.basename(filepath))
idx += 1
if idx == batch_size:
yield filenames, images
filenames = []
images = np.zeros(batch_shape)
idx = 0
if idx > 0:
yield filenames, images
def show_image(a, fmt='png'):
a = np.uint8((a+1.0)/2.0*255.0)
f = BytesIO()
Image.fromarray(a).save(f, fmt)
IPython.display.display(IPython.display.Image(data=f.getvalue()))
class InceptionModel(object):
def __init__(self, num_classes):
self.num_classes = num_classes
self.built = False
def __call__(self, x_input):
"""Constructs model and return probabilities for given input."""
reuse = True if self.built else None
with slim.arg_scope(inception.inception_v3_arg_scope()):
_, end_points = inception.inception_v3(
x_input, num_classes=self.num_classes, is_training=False,
reuse=reuse)
self.built = True
output = end_points['Predictions']
probs = output.op.inputs[0]
return probs
categories = pd.read_csv("../input/nips-2017-adversarial-learning-development-set/categories.csv")
image_classes = pd.read_csv("../input/nips-2017-adversarial-learning-development-set/images.csv")
image_iterator = load_images(input_dir, batch_shape)
# get first batch of images
filenames, images = next(image_iterator)
image_metadata = pd.DataFrame({"ImageId": [f[:-4] for f in filenames]}).merge(image_classes,
on="ImageId")
true_classes = image_metadata["TrueLabel"].tolist()
target_classes = true_labels = image_metadata["TargetClass"].tolist()
true_classes_names = ( | pd.DataFrame({"CategoryId": true_classes}) | pandas.DataFrame |
from typing import Tuple
from argparse import Namespace as APNamespace, _SubParsersAction,ArgumentParser
from train_help import *
from pathlib import Path
import os
import platform
import time
import pandas as pd
import numpy as np
import global_vars as GLOBALS
from ptflops import get_model_complexity_info
import copy
import torch
#DARTS Model files
from model import NetworkCIFAR as Network
from model import NetworkImageNet as Network_ImageNet
import genotypes
from darts_scaling_algorithms import channel_size_adjust_algorithm as DARTS_algorithm
from dartsplus_scaling_algorithms import channel_size_adjust_algorithm as DARTSPlus_algorithm
from shutil import copyfile
import re
"""
Contains training code for DARTS and DARTSPlus
"""
def update_network_DARTS(new_cell_list = None,new_sep_conv_list = None):
if GLOBALS.CONFIG['network'] == 'DARTS':
arch = "DARTS"
elif GLOBALS.CONFIG['network'] == 'DARTSPlus':
arch = "DARTS_PLUS_CIFAR100"
genotype = eval("genotypes.%s" % arch)
if GLOBALS.CONFIG["dataset"] == 'CIFAR10':
fc_dim = 10
elif GLOBALS.CONFIG["dataset"] == 'CIFAR100':
fc_dim = 100
elif GLOBALS.CONFIG["dataset"] == 'ImageNet':
fc_dim = 1000
assert GLOBALS.CONFIG["num_cells"] == 7 or GLOBALS.CONFIG["num_cells"] == 14 or GLOBALS.CONFIG["num_cells"] == 20
if new_cell_list == None:
if GLOBALS.CONFIG["num_cells"] == 20:
if arch == "DARTS":
new_cell_list = GLOBALS.DARTS_cell_list_20
else:
new_cell_list = GLOBALS.DARTSPlus_cell_list_20
elif GLOBALS.CONFIG["num_cells"] == 14:
if arch == "DARTS":
new_cell_list = GLOBALS.DARTS_cell_list_14
else:
new_cell_list = GLOBALS.DARTSPlus_cell_list_14
else:
if arch == "DARTS":
new_cell_list = GLOBALS.DARTS_cell_list_7
else:
new_cell_list = GLOBALS.DARTSPlus_cell_list_7
if new_sep_conv_list == None:
if GLOBALS.CONFIG["num_cells"] == 20:
if arch == "DARTS":
new_sep_conv_list = GLOBALS.DARTS_sep_conv_list_20
else:
new_sep_conv_list = GLOBALS.DARTSPlus_sep_conv_list_20
elif GLOBALS.CONFIG["num_cells"] == 14:
if arch == "DARTS":
new_sep_conv_list = GLOBALS.DARTS_sep_conv_list_14
else:
new_sep_conv_list = GLOBALS.DARTSPlus_sep_conv_list_14
else:
if arch == "DARTS":
new_sep_conv_list = GLOBALS.DARTS_sep_conv_list_7
else:
new_sep_conv_list = GLOBALS.DARTSPlus_sep_conv_list_7
#The 10 is the number of classes in CIFAR10
if GLOBALS.CONFIG["dataset"] == 'CIFAR10' or GLOBALS.CONFIG["dataset"] == 'CIFAR100':
new_network = Network(new_cell_list, new_sep_conv_list, fc_dim, GLOBALS.CONFIG["num_cells"], GLOBALS.CONFIG['auxiliary'], genotype, arch)
elif GLOBALS.CONFIG["dataset"] == 'ImageNet':
new_network = Network_ImageNet(new_cell_list, new_sep_conv_list, fc_dim, GLOBALS.CONFIG["num_cells"],
GLOBALS.CONFIG['auxiliary'], genotype, arch)
print("Cell List:", new_cell_list)
print("Sep Conv List:", new_sep_conv_list)
new_network.drop_path_prob = 0 # Need to update this
return new_network
def find_best_acc_epoch(df):
test_accs = list()
cols = [col for col in df if col.startswith('test_acc_epoch')]
for col in cols:
temp = float(df[col][0])*100
test_accs.append(temp)
return np.array(test_accs).argmax()
def create_full_data_file_DARTS(new_network,full_fresh_file,output_path_string_full_train):
parameter_data = pd.DataFrame(columns=['Accuracy (%)','Training Loss','GMacs','GFlops','Parameter Count (M)'])
#full_save_dfs=pd.read_excel(full_save_file)
full_fresh_dfs=pd.read_excel(full_fresh_file)
#final_epoch_save=full_save_dfs.columns[-1][(full_save_dfs.columns[-1].index('epoch_')+6):]
final_epoch_fresh=full_fresh_dfs.columns[-1][(full_fresh_dfs.columns[-1].index('epoch_')+6):]
#best acc
best_epoch_fresh = find_best_acc_epoch(full_fresh_dfs)
#full_save_accuracy = full_save_dfs['test_acc_epoch_'+str(final_epoch_save)][0]*100
full_fresh_accuracy = full_fresh_dfs['test_acc_epoch_'+str(best_epoch_fresh)][0]*100
#full_save_loss = full_save_dfs['train_loss_epoch_'+str(final_epoch_save)][0]
full_fresh_loss = full_fresh_dfs['train_loss_epoch_'+str(best_epoch_fresh)][0]
if GLOBALS.CONFIG['dataset'] == 'CIFAR10' or GLOBALS.CONFIG['dataset'] == 'CIFAR100':
macs, params = get_model_complexity_info(new_network, (3,32,32), as_strings=False,print_per_layer_stat=False)
elif GLOBALS.CONFIG['dataset'] == 'ImageNet':
macs, params = get_model_complexity_info(new_network, (3, 224, 224), as_strings=False,
print_per_layer_stat=False)
#save_parameter_size_list = [full_save_accuracy,full_save_loss,int(macs)/1000000000,2*int(macs)/1000000000,int(params)/1000000]
fresh_parameter_size_list = [full_fresh_accuracy,full_fresh_loss,int(macs)/1000000000,2*int(macs)/1000000000,int(params)/1000000]
#parameter_data.loc[len(parameter_data)] = save_parameter_size_list
parameter_data.loc[len(parameter_data)] = fresh_parameter_size_list
if platform.system() == 'Windows':
parameter_data.to_excel(output_path_string_full_train+'\\'+'adapted_parameters.xlsx')
else:
parameter_data.to_excel(output_path_string_full_train+'/'+'adapted_parameters.xlsx')
# Copied from master
output_file = 'default.xlsx'
if platform.system() == 'Windows':
performance_output_file = output_path_string_full_train + '\\' + 'performance.xlsx'
auxilary_output_file = output_path_string_full_train + '\\' + 'auxilary.xlsx'
else:
performance_output_file = output_path_string_full_train + '/' + 'performance.xlsx'
auxilary_output_file = output_path_string_full_train + '/' + 'auxilary.xlsx'
writer_performance = pd.ExcelWriter(performance_output_file, engine='openpyxl')
wb_per = writer_performance.book
writer_auxilary = pd.ExcelWriter(auxilary_output_file, engine='openpyxl')
wb_aux = writer_auxilary.book
performance_data = {}
auxilary_data = {}
performance_data['Gmac'] = int(macs) / 1000000000
performance_data['GFlop'] = 2 * int(macs) / 1000000000
performance_data['parameter count (M)'] = int(params) / 1000000
num_layer = len(full_fresh_dfs['train_acc_epoch_' + str(0)])
layer_list = list(range(0, num_layer))
auxilary_data['layer_index'] = layer_list
for i in range(int(final_epoch_fresh) + 1):
performance_data['train_acc_epoch_' + str(i) + " (%)"] = [full_fresh_dfs['train_acc_epoch_' + str(i)][0] * 100]
performance_data['train_loss_epoch_' + str(i)] = [full_fresh_dfs['train_loss_epoch_' + str(i)][0]]
performance_data['test_acc_epoch_' + str(i) + " (%)"] = [full_fresh_dfs['test_acc_epoch_' + str(i)][0] * 100]
performance_data['test_loss_epoch_' + str(i)] = [full_fresh_dfs['test_loss_epoch_' + str(i)][0]]
auxilary_data['in_KG_epcho' + str(i)] = full_fresh_dfs['in_S_epoch_' + str(i)]
auxilary_data['out_KG_epcho' + str(i)] = full_fresh_dfs['out_S_epoch_' + str(i)]
auxilary_data['in_rank_epcho' + str(i)] = full_fresh_dfs['in_rank_epoch_' + str(i)]
auxilary_data['out_rank_epcho' + str(i)] = full_fresh_dfs['out_rank_epoch_' + str(i)]
auxilary_data['in_condition_epcho' + str(i)] = full_fresh_dfs['in_condition_epoch_' + str(i)]
auxilary_data['out_condition_epcho' + str(i)] = full_fresh_dfs['out_condition_epoch_' + str(i)]
df_per = pd.DataFrame(performance_data)
df_per.to_excel(writer_performance, index=False)
wb_per.save(performance_output_file)
df_aux = pd.DataFrame(auxilary_data)
df_aux.to_excel(writer_auxilary, index=False)
wb_aux.save(auxilary_output_file)
if platform.system == 'Windows':
slash = '\\'
else:
slash = '/'
#Hard coded path, copy into adas search folder
# copyfile(GLOBALS.OUTPUT_PATH_STRING +slash+'..'+slash+'..'+slash+'.adas-checkpoint'+slash+'ckpt.pth', output_path_string_full_train + slash + 'ckpt.pth')
return True
def run_fresh_full_train_DARTS(epochs,output_path_fulltrain, cell_list = None, sep_conv_list = None):
"""
Perform model evaluation for DARTS/DARTS+
"""
GLOBALS.FIRST_INIT = False
#optimizer,scheduler=network_initialize(new_network,train_loader)
parser = ArgumentParser(description=__doc__)
get_args(parser)
args = parser.parse_args()
#Just to build directories. Settings get overwritten below
"""
if cell_list != None:
print ("Full Train Cell Architecture:", cell_list)
else:
if GLOBALS.CONFIG["num_cells"] == 20:
print ("Full Train Cell Architecture:", GLOBALS.cell_list_20)
else:
print ("Full Train Cell Architecture:", GLOBALS.cell_list_7)
if sep_conv_list != None:
print ("Full Train Sep Conv Architecture:", sep_conv_list)
else:
if GLOBALS.CONFIG["num_cells"] == 20:
print("Full Train Cell Architecture:", GLOBALS.sep_conv_list_20)
else:
print("Full Train Cell Architecture:", GLOBALS.sep_conv_list_7)
"""
#Set all DARTS Hyperparamter to true for full train
GLOBALS.CONFIG['drop_path'] = GLOBALS.CONFIG['drop_path_full']
GLOBALS.CONFIG['auxiliary'] = GLOBALS.CONFIG['auxiliary_full']
GLOBALS.CONFIG['cutout'] = GLOBALS.CONFIG['cutout_full']
GLOBALS.CONFIG['grad_clip'] = GLOBALS.CONFIG['grad_clip_full']
GLOBALS.CONFIG['mini_batch_size'] = GLOBALS.CONFIG['mini_batch_size_full']
GLOBALS.CONFIG['weight_decay'] = GLOBALS.CONFIG['weight_decay_full']
new_network = update_network_DARTS(cell_list, sep_conv_list)
train_loader,test_loader,device,optimizer,scheduler, model = initialize(args, new_network,beta=GLOBALS.CONFIG['beta_full'],scheduler=GLOBALS.CONFIG['lr_scheduler_full'], load_config=False)
GLOBALS.FULL_TRAIN = True
GLOBALS.PERFORMANCE_STATISTICS = {}
GLOBALS.FULL_TRAIN_MODE = 'fresh' #
GLOBALS.EXCEL_PATH = ''
run_epochs(0, model, epochs, train_loader, test_loader, device, optimizer, scheduler, output_path_fulltrain)
#Initializing again to remove auxiliary head so it does not get added in param / GMAC count.
print("Running initialize again to remove auxiliary head for param / gmac count")
GLOBALS.CONFIG['auxiliary'] = False
#initialize(args_true,beta= GLOBALS.CONFIG['beta_full'],new_cell_list=cell_list, new_sep_conv_list=sep_conv_list, scheduler="StepLR", load_config=False)
new_network = update_network_DARTS(cell_list, sep_conv_list)
return new_network
def run_trials_DARTS(epochs,output_path_train):
"""
Perform Channel Search for DARTS/DARTS+
"""
cell_list_average_slope, cell_list_prev_ops , cell_list_factor , sep_conv_list_average_slope, sep_conv_list_prev_ops , sep_conv_list_factor, cell_list_rank, sep_conv_list_rank = [],[],[],[],[],[],[],[]
trial_dir = os.path.join(GLOBALS.OUTPUT_PATH_STRING, 'Trials')
print (trial_dir)
parameter_type=GLOBALS.CONFIG['parameter_type']
def initialize_dataframes_and_lists():
#[C0, C1, C2, C3] Sizes
#Default 7 cells
if GLOBALS.CONFIG["num_cells"] == 7:
cell_list_columns = ['STEM', 'cell0', 'cell1', 'cell2', 'cell3', 'cell4', 'cell5', 'cell6']
sep_conv_list_columns = ['cell0', 'cell1', 'cell3', 'cell5', 'cell6']
if GLOBALS.CONFIG['network'] == 'DARTS':
initial_cell_list = GLOBALS.DARTS_cell_list_7
initial_sep_conv_list = GLOBALS.DARTS_sep_conv_list_7
else:
initial_cell_list = GLOBALS.DARTSPlus_cell_list_7
initial_sep_conv_list = GLOBALS.DARTSPlus_sep_conv_list_7
elif GLOBALS.CONFIG["num_cells"] == 14:
cell_list_columns = ['STEM','cell0','cell1','cell2', 'cell3', 'cell4', 'cell5', 'cell6', 'cell7', \
'cell8','cell9','cell10', 'cell11', 'cell12', 'cell13']
sep_conv_list_columns = ['cell0','cell1','cell2', 'cell3', 'cell5', 'cell6', 'cell7', \
'cell8', 'cell10', 'cell11', 'cell12', 'cell13']
if GLOBALS.CONFIG['network'] == 'DARTS':
initial_cell_list = GLOBALS.DARTS_cell_list_14
initial_sep_conv_list = GLOBALS.DARTS_sep_conv_list_14
else:
initial_cell_list = GLOBALS.DARTSPlus_cell_list_14
initial_sep_conv_list = GLOBALS.DARTSPlus_sep_conv_list_14
#Config for 20 cells
elif GLOBALS.CONFIG["num_cells"] == 20:
cell_list_columns = ['STEM','cell0','cell1','cell2', 'cell3', 'cell4', 'cell5', 'cell6', 'cell7', \
'cell8','cell9','cell10', 'cell11', 'cell12', 'cell13', 'cell14', 'cell15', \
'cell16','cell17','cell18', 'cell19']
sep_conv_list_columns = ['cell0','cell1','cell2', 'cell3', 'cell4', 'cell5', 'cell7', \
'cell8','cell9','cell10', 'cell11', 'cell12', 'cell14', 'cell15', \
'cell16','cell17','cell18', 'cell19']
if GLOBALS.CONFIG['network'] == 'DARTS':
initial_cell_list = GLOBALS.DARTS_cell_list_20
initial_sep_conv_list = GLOBALS.DARTS_sep_conv_list_20
else:
initial_cell_list = GLOBALS.DARTSPlus_cell_list_20
initial_sep_conv_list = GLOBALS.DARTSPlus_sep_conv_list_20
cell_list_data = pd.DataFrame(columns=cell_list_columns)
sep_conv_list_data = pd.DataFrame(columns=sep_conv_list_columns)
#Final ranks used to calculate [C0, C1, C2, C3] Sizes for all 20 cells
cell_rank_data = | pd.DataFrame(columns=cell_list_columns) | pandas.DataFrame |
from pymongo import MongoClient
import json
import requests, zipfile, io, os, re
import pandas as pd
import geopandas, astral
import time
from astral.sun import sun
METEO_FOLDER = r"C:/Users/48604/Documents/semestr5/PAG/pag2/Meteo/"
ZAPIS_ZIP = METEO_FOLDER + r"Meteo_"
url = "https://dane.imgw.pl/datastore/getfiledown/Arch/Telemetria/Meteo/2015/Meteo_2015-07.zip"
connection = MongoClient("localhost", 27017)
db = connection.local
collection = db
def get_data(url, pth):
file = requests.get(url)
zip = zipfile.ZipFile(io.BytesIO(file.content))
#download zip from IMGW archive
url_end = url[-4:]
#later checking if file ends with .zip or .ZIP
pattern = "Meteo_(.*?)" + url_end
substring = re.search(pattern, url).group(1)
#pattern matching in order to name new dir properly
path = pth + substring + "/"
#path to dir with data from specified period
if os.path.isdir(path) == 0:
os.mkdir(path)
zip.extractall(path)
#creating dir if it doesnt exist and unpacking data
return path
path_data = get_data(url, ZAPIS_ZIP)
path_parametry = METEO_FOLDER + "kody_parametr.csv"
path_effacility = METEO_FOLDER + "effacility.geojson"
path_powiaty = METEO_FOLDER + "powiaty/powiaty.shp"
path_wojewodztwa = METEO_FOLDER + "woj/woj.shp"
def read_parametry(path_parametr):
parametr = pd.read_csv(path_parametr, sep=';', index_col=False, encoding='cp1250')
#separator=';' - by default ','
#index_col=False - store all data as columns not indexes
return parametr
#function to read parameters from the path_parametr file
def read_data(path_data):
fields = ["KodSH", "ParametrSH", "Date", "Wartosc"]
data = {}
#column names; empty dictionary for data from separate csv files in folder
for filename in os.listdir(path_data):
#for every file in folder
dataset_name = pd.read_csv(path_data + filename, sep=';', header=None, names=fields, index_col=False, low_memory=False, dtype={'KodSH': int, 'Wartosc': str}, parse_dates=['Date'])
#applying value
#separator=';' - by default ','
#no header by default
#names=fields - column names
#index_col=False - store all data as columns not indexes
#low_memory=false - way to get rid of different datatypes in columns warning
dataset_name["Wartosc"] = dataset_name["Wartosc"].str.replace(',','.').astype('float64')
#replace ',' with '.' and convert string to float
dataset_name["Date"] = dataset_name["Date"].dt.tz_localize("Europe/Warsaw")
#setting "Data" column to datetime64[ns, Europe/Warsaw] from datetime64[ns]
data[filename] = dataset_name
return data
#function to read data from the path_data file
def read_effacility(path_effacility):
path = open(path_effacility)
effacility = geopandas.read_file(path)
#read geojson
effacility["geometry"] = effacility["geometry"].to_crs(epsg=4258)
x = effacility["geometry"].x
y = effacility["geometry"].y
data = {"KodSH" : effacility["name"], "City" : effacility["name1"], "Lon" : x, "Lat" : y}
effacility = pd.DataFrame(data)
effacility["KodSH"] = effacility["KodSH"].astype('float64')
#store KodSH as number not string
return effacility
def f_init_mean(data):
init_mean = {}
for key in data:
init_mean[key] = data[key].groupby(["KodSH", data[key]["Date"].dt.date])["Wartosc"].mean()
init_mean[key] = init_mean[key].to_frame()
init_mean[key].drop(columns = ["Wartosc"], inplace=True)
return init_mean
def f_sun_info(init_mean, effacility):
sun_info = {}
for key in init_mean:
init_mean[key] = init_mean[key].reset_index("Date")
#Date as a non index value
#init_mean[key] = init_mean[key].drop(["24h"], axis=1)
sun_info[key] = pd.merge(init_mean[key], effacility, on = "KodSH", how = "left")
astral_info = {}
for key in sun_info:
shp = sun_info[key].shape[0]
Dawn = list(range(shp))
Dusk = list(range(shp))
for k in sun_info[key].index:
City = astral.LocationInfo(sun_info[key]["City"][k],"Poland", "Europe/Warsaw", sun_info[key]["Lat"][k], sun_info[key]["Lon"][k])
Dawn[k] = (sun(City.observer, date=sun_info[key]["Date"][k], tzinfo=City.timezone))["dawn"]
Dusk[k] = (sun(City.observer, date=sun_info[key]["Date"][k], tzinfo=City.timezone))["dusk"]
data = {"KodSH" : sun_info[key]["KodSH"], "Dawn" : Dawn ,"Dusk" : Dusk}
astral_info[key] = pd.DataFrame(data)
sun_info[key] = | pd.merge(sun_info[key], astral_info[key], left_index=True, right_index=True) | pandas.merge |
import numpy as np
import pandas as pd
import warnings
from ecomplexity.calc_proximity import calc_discrete_proximity
from ecomplexity.calc_proximity import calc_continuous_proximity
from ecomplexity.ComplexityData import ComplexityData
from ecomplexity.density import calc_density
from ecomplexity.coicog import calc_coi_cog
def reshape_output_to_data(cdata, t):
"""Reshape output ndarrays to df"""
diversity = (
cdata.diversity_t[:, np.newaxis].repeat(cdata.mcp_t.shape[1], axis=1).ravel()
)
ubiquity = (
cdata.ubiquity_t[np.newaxis, :].repeat(cdata.mcp_t.shape[0], axis=0).ravel()
)
eci = cdata.eci_t[:, np.newaxis].repeat(cdata.mcp_t.shape[1], axis=1).ravel()
pci = cdata.pci_t[np.newaxis, :].repeat(cdata.mcp_t.shape[0], axis=0).ravel()
coi = cdata.coi_t[:, np.newaxis].repeat(cdata.mcp_t.shape[1], axis=1).ravel()
out_dict = {
"diversity": diversity,
"ubiquity": ubiquity,
"mcp": cdata.mcp_t.ravel(),
"eci": eci,
"pci": pci,
"density": cdata.density_t.ravel(),
"coi": coi,
"cog": cdata.cog_t.ravel(),
}
if hasattr(cdata, "rpop_t"):
out_dict["rca"] = cdata.rca_t.ravel()
out_dict["rpop"] = cdata.rpop_t.ravel()
elif hasattr(cdata, "rca_t"):
out_dict["rca"] = cdata.rca_t.ravel()
output = pd.DataFrame.from_dict(out_dict).reset_index(drop=True)
cdata.data_t["time"] = t
cdata.output_t = pd.concat([cdata.data_t.reset_index(), output], axis=1)
cdata.output_list.append(cdata.output_t)
return cdata
def conform_to_original_data(cdata, data):
"""Reset column names and add dropped columns back"""
cdata.output = cdata.output.rename(columns=cdata.cols_input)
cdata.output = cdata.output.merge(
data, how="outer", on=list(cdata.cols_input.values())
)
return cdata
def calc_eci_pci(cdata):
# Check if diversity or ubiquity is 0 or nan, can cause problems
if ((cdata.diversity_t == 0).sum() > 0) | ((cdata.ubiquity_t == 0).sum() > 0):
warnings.warn(
f"In year {cdata.t}, diversity / ubiquity is 0 for some locs/prods"
)
# Extract valid elements only
cntry_mask = np.argwhere(cdata.diversity_t == 0).squeeze()
prod_mask = np.argwhere(cdata.ubiquity_t == 0).squeeze()
diversity_valid = cdata.diversity_t[cdata.diversity_t != 0]
ubiquity_valid = cdata.ubiquity_t[cdata.ubiquity_t != 0]
mcp_valid = cdata.mcp_t[cdata.diversity_t != 0, :][:, cdata.ubiquity_t != 0]
# Calculate ECI and PCI eigenvectors
mcp1 = mcp_valid / diversity_valid[:, np.newaxis]
mcp2 = mcp_valid / ubiquity_valid[np.newaxis, :]
# Make copy of transpose to ensure contiguous array for performance reasons
mcp2_t = mcp2.T.copy()
# These matrix multiplication lines are very slow
Mcc = mcp1 @ mcp2_t
Mpp = mcp2_t @ mcp1
try:
# Calculate eigenvectors
eigvals, eigvecs = np.linalg.eig(Mpp)
eigvecs = np.real(eigvecs)
# Get eigenvector corresponding to second largest eigenvalue
eig_index = eigvals.argsort()[-2]
kp = eigvecs[:, eig_index]
kc = mcp1 @ kp
# Adjust sign of ECI and PCI so it makes sense, as per book
s1 = np.sign(np.corrcoef(diversity_valid, kc)[0, 1])
eci_t = s1 * kc
pci_t = s1 * kp
# Add back the deleted elements
for x in cntry_mask:
eci_t = np.insert(eci_t, x, np.nan)
for x in prod_mask:
pci_t = np.insert(pci_t, x, np.nan)
except Exception as e:
warnings.warn(f"Unable to calculate eigenvectors for year {cdata.t}")
print(e)
eci_t = np.empty(cdata.mcp_t.shape[0])
pci_t = np.empty(cdata.mcp_t.shape[1])
eci_t[:] = np.nan
pci_t[:] = np.nan
return (eci_t, pci_t)
def ecomplexity(
data,
cols_input,
presence_test="rca",
val_errors_flag="coerce",
rca_mcp_threshold=1,
rpop_mcp_threshold=1,
pop=None,
continuous=False,
asymmetric=False,
verbose=True,
):
"""Complexity calculations through the ComplexityData class
Args:
data: pandas dataframe containing production / trade data.
Including variables indicating time, location, product and value
cols_input: dict of column names for time, location, product and value.
Example: {'time':'year', 'loc':'origin', 'prod':'hs92', 'val':'export_val'}
presence_test: str for test used for presence of industry in location.
One of "rca" (default), "rpop", "both", or "manual".
Determines which values are used for M_cp calculations.
If "manual", M_cp is taken as given from the "value" column in data
val_errors_flag: {'coerce','ignore','raise'}. Passed to pd.to_numeric
*default* coerce.
rca_mcp_threshold: numeric indicating RCA threshold beyond which mcp is 1.
*default* 1.
rpop_mcp_threshold: numeric indicating RPOP threshold beyond which mcp is 1.
*default* 1. Only used if presence_test is not "rca".
pop: pandas df, with time, location and corresponding population, in that order.
Not required if presence_test is "rca" (default).
continuous: Used to calculate product proximities, indicates whether
to consider correlation of every product pair (True) or product
co-occurrence (False). *default* False.
asymmetric: Used to calculate product proximities, indicates whether
to generate asymmetric proximity matrix (True) or symmetric (False).
*default* False.
verbose: Print year being processed
Returns:
Pandas dataframe containing the data with the following additional columns:
- diversity: k_c,0
- ubiquity: k_p,0
- rca: Balassa's RCA
- rpop: (available if presence_test!="rca") RPOP
- mcp: MCP used for complexity calculations
- eci: Economic complexity index
- pci: Product complexity index
- density: Density of the network around each product
- coi: Complexity Outlook Index
- cog: Complexity Outlook Gain
"""
cdata = ComplexityData(data, cols_input, val_errors_flag)
cdata.output_list = []
# Iterate over time stamps
for t in cdata.data.index.unique("time"):
if verbose:
print(t)
# Rectangularize df
cdata.create_full_df(t)
# Check if Mcp is pre-computed
if presence_test != "manual":
cdata.calculate_rca()
cdata.calculate_mcp(
rca_mcp_threshold, rpop_mcp_threshold, presence_test, pop, t
)
else:
cdata.calculate_manual_mcp()
# Calculate diversity and ubiquity
cdata.diversity_t = np.nansum(cdata.mcp_t, axis=1)
cdata.ubiquity_t = np.nansum(cdata.mcp_t, axis=0)
# Calculate ECI and PCI
cdata.eci_t, cdata.pci_t = calc_eci_pci(cdata)
# Calculate proximity and density
if continuous == False:
prox_mat = calc_discrete_proximity(
cdata.mcp_t, cdata.ubiquity_t, asymmetric
)
cdata.density_t = calc_density(cdata.mcp_t, prox_mat)
elif continuous == True and presence_test == "rpop":
prox_mat = calc_continuous_proximity(cdata.rpop_t, cdata.ubiquity_t)
cdata.density_t = calc_density(cdata.rpop_t, prox_mat)
elif continuous == True and presence_test != "rpop":
prox_mat = calc_continuous_proximity(cdata.rca_t, cdata.ubiquity_t)
cdata.density_t = calc_density(cdata.rca_t, prox_mat)
# Calculate COI and COG
cdata.coi_t, cdata.cog_t = calc_coi_cog(cdata, prox_mat)
# Normalize variables as per STATA package
cdata.pci_t = (cdata.pci_t - cdata.eci_t.mean()) / cdata.eci_t.std()
cdata.cog_t = cdata.cog_t / cdata.eci_t.std()
cdata.eci_t = (cdata.eci_t - cdata.eci_t.mean()) / cdata.eci_t.std()
cdata.coi_t = (cdata.coi_t - cdata.coi_t.mean()) / cdata.coi_t.std()
# Reshape ndarrays to df
cdata = reshape_output_to_data(cdata, t)
cdata.output = | pd.concat(cdata.output_list) | pandas.concat |
import Functions
import pandas as pd
import matplotlib.pyplot as plt
def group_sentiment(dfSentiment):
dfSentiment['datetime'] = pd.to_datetime(dfSentiment['created_utc'], unit='s')
dfSentiment['date'] = pd.DatetimeIndex(dfSentiment['datetime']).date
dfSentiment = dfSentiment[
['created_utc', 'negative_comment', 'neutral_comment', 'positive_comment', 'datetime', 'date']]
dfSentiment = dfSentiment.groupby(by=['date']).sum()
return dfSentiment
def cleaning(df):
# Importing Bot user names
bots = pd.read_csv(r'Data\Bots.csv', index_col=0, sep=';')
# Removing bots from the data
df = df[~df.author.isin(bots.bot_names)]
# Removing any NA's
df.dropna()
# Cleaning the text data, fuld af pis i bunden der prรธver hvert enkelt fรธr de rรธg sammen, slet hvis du ikke er intra
keeplist = "?.!,'_-"
import re
Adj_comment = pd.DataFrame(
[re.sub(r'[\S]+\.(net|com|org|info|edu|gov|uk|de|ca|jp|fr|au|us|ru|ch|it|nel|se|no|es|mil)'
r'[\S]*\s?|(/u/|u/)\S+|(/r/|r/)\S+|[\x00-\x1f\x7f-\xff]|[0-9]+|(&g|&l)\S+'
r'|[^\s\w' + keeplist + ']', "", elem) for elem in df['body']], columns=['body'])
df['body'] = Adj_comment['body']
return df
period = ['2014', '2015_01', '2015_02', '2015_03', '2015_04', '2015_05', '2015_06', '2015_07', '2015_08', '2015_09',
'2015_10', '2015_11', '2015_12', '2016_01', '2016_02', '2016_03', '2016_04', '2016_05', '2016_06', '2016_07',
'2016_08', '2016_09', '2016_10',
'2016_11', '2016_12', '2017_01', '2017_02', '2017_03', '2017_04', '2017_05', '2017_06', '2017_07', '2017_08',
'2017_09',
'2017_10', '2017_11', '2017_12', '2018_01', '2018_02', '2018_03', '2018_04', '2018_05', '2018_06', '2018_07',
'2018_08',
'2018_09', '2018_10', '2018_11', '2018_12', '2019_01', '2019_02', '2019_03', '2019_04', '2019_05', '2019_06',
'2019_07',
'2019_08', '2019_09']
dfAllData = pd.DataFrame()
for sPeriod in period:
query = r"""
#standardSQL
SELECT author, subreddit, created_utc, score, controversiality, body
FROM `fh-bigquery.reddit_comments.{}`
WHERE REGEXP_CONTAINS(body, r'(?i)\b Dash\b')
""".format(sPeriod)
dfData = Functions.collect_big_query(sQuery=query)
print(sPeriod + ' Collected')
print(sPeriod + ' cleaned')
dfAllData = dfAllData.append(dfData)
del dfData
dfAllData.to_csv('Dash_sentiment.csv')
coin_list = ['BCH', 'Cardona', 'dogecoin', 'EOS', 'ETH', 'LTC', 'XRP', 'Monero', 'BNB', 'IOTA', 'TEZOS']
dfSubRed = pd.DataFrame()
for scoin in coin_list:
dfTemp = pd.read_csv(scoin + '_sentiment.csv', index_col=0)
dfTemp = dfTemp.dropna()
dfSubRed = pd.concat([dfSubRed, pd.DataFrame(dfTemp.subreddit.value_counts()[:10].index),
pd.DataFrame(dfTemp.subreddit.value_counts()[:10].values)], axis=1)
# Removing disturbing subreddits:
# EOS:
EOS_list = ['ffxiv', 'photography', 'masseffect', 'whowouldwin', 'astrophotography', 'elementaryos']
dfTemp = pd.read_csv('EOS_sentiment.csv', index_col=0)
dfTemp = dfTemp[~dfTemp['subreddit'].isin(EOS_list)]
dfTemp.to_csv('EOS_R_Sentiment.csv')
# Ripple: indianapolis
XRP_list = ['indianapolis']
dfTemp = pd.read_csv('XRP_sentiment.csv', index_col=0) # 510558
dfTemp = dfTemp[~dfTemp['subreddit'].isin(XRP_list)]
dfTemp.to_csv('XRP_R_Sentiment.csv')
# BNB: SquaredCircle, dragonballfighterz, StreetFighter, step1, AirBnB
BNB_list = ['SquaredCircle', 'dragonballfighterz', 'StreetFighter', 'step1', 'AirBnB']
dfTemp = pd.read_csv('BNB_R_Sentiment.csv', index_col=0) # 109630
dfTemp = dfTemp[~dfTemp['subreddit'].isin(BNB_list)]
dfTemp.to_csv('BNB_R_Sentiment.csv')
# New coin list
coin_list_R = ['BCH', 'Cardona', 'dogecoin', 'EOS_R', 'ETH', 'LTC', 'XRP_R', 'Monero', 'BNB_R', 'IOTA', 'TEZOS']
# Removing NA's
for scoin in coin_list_R:
dfTemp = pd.read_csv(scoin + '_sentiment.csv', index_col=0)
dfTemp = dfTemp.dropna()
dfTemp.to_csv(scoin + 'NA_Sentiment.csv')
coin_list_NA = ['BTC', 'BCHNA', 'CardonaNA', 'dogecoinNA', 'EOS_RNA', 'ETHNA', 'LTCNA', 'XRP_RNA', 'MoneroNA',
'BNB_RNA',
'IOTANA', 'TEZOSNA', ]
for scoin in coin_list_NA:
dfTemp = pd.read_csv(scoin + '_Sentiment.csv', index_col=0)
dfTemp = cleaning(dfTemp)
# dfAllData = Functions.language_filter(dfAllData, series='body', language_select='en')
dfTemp = dfTemp.reset_index(drop=True)
dfTemp = Functions.get_sentiment(dfTemp, series='body')
dfTemp = group_sentiment(dfTemp)
dfTemp.to_csv(scoin + '_Actual_Sentiment.csv')
# Kรธr herfra ved start for at fรฅ fat i de nรธdvendige funktioner og dataframes
import Functions
import pandas as pd
from datetime import datetime
import matplotlib.pyplot as plt
coin_list_NA = ['BTC', 'BCHNA', 'CardonaNA', 'dogecoinNA', 'EOS_RNA', 'ETHNA', 'LTCNA', 'XRP_RNA', 'MoneroNA',
'BNB_RNA',
'IOTANA', 'TEZOSNA', ]
coin_list = ['BTC', 'BCH', 'Cardona', 'dogecoin', 'EOS', 'ETH', 'LTC', 'XRP', 'Monero', 'BNB', 'IOTA', 'TEZOS', ]
dfAllCoins = pd.DataFrame()
dfWMR = pd.read_csv('Data/' + coin_list[0] + '_marketdata.csv', sep=';', thousands=',', decimal='.')
dfWMR['Date'] = pd.to_datetime(dfWMR['Date'], format='%b %d, %Y')
dfWMR['Date'] = pd.DatetimeIndex(dfWMR['Date']).date
dfWMR.index = dfWMR['Date']
dfWMR = dfWMR.sort_index()
for column in dfWMR.columns:
dfWMR = dfWMR.drop(columns=column)
dfReturns = dfWMR
dfMarketCap = dfWMR
dfPositive = dfWMR
dfNeutral = dfWMR
dfNegative = dfWMR
dfMOM3 = dfWMR
dfMOM5 = dfWMR
dfMOM7 = dfWMR
dfMOM14 = dfWMR
for i in range(0, len(coin_list)):
dfMarket = pd.read_csv('Data/' + coin_list[i] + '_marketdata.csv', sep=';', thousands=',', decimal='.')
dfMarket['Date'] = pd.to_datetime(dfMarket['Date'], format='%b %d, %Y')
dfMarket['Date'] = pd.DatetimeIndex(dfMarket['Date']).date
dfMarket.index = dfMarket['Date']
dfMarket = dfMarket.sort_index()
dfMarket['Return'] = dfMarket['Close**'].pct_change()
dfMarket = dfMarket[1:]
dfMarket['Mom3'] = dfMarket.Return.rolling(3).sum()
dfMarket['Mom5'] = dfMarket.Return.rolling(5).sum()
dfMarket['Mom7'] = dfMarket.Return.rolling(7).sum()
dfMarket['Mom14'] = dfMarket.Return.rolling(14).sum()
dfTemp = pd.DataFrame()
dfTemp[coin_list[i]] = dfMarket['Return']
dfReturns = dfReturns.merge(dfTemp, how='left', left_index=True, right_index=True)
dfTemp = pd.DataFrame()
dfTemp[coin_list[i]] = dfMarket['Mom3']
dfMOM3 = dfMOM3.merge(dfTemp, how='left', left_index=True, right_index=True)
dfTemp = pd.DataFrame()
dfTemp[coin_list[i]] = dfMarket['Mom5']
dfMOM5 = dfMOM5.merge(dfTemp, how='left', left_index=True, right_index=True)
dfTemp = pd.DataFrame()
dfTemp[coin_list[i]] = dfMarket['Mom7']
dfMOM7 = dfMOM7.merge(dfTemp, how='left', left_index=True, right_index=True)
dfTemp = pd.DataFrame()
dfTemp[coin_list[i]] = dfMarket['Mom14']
dfMOM14 = dfMOM14.merge(dfTemp, how='left', left_index=True, right_index=True)
dfTemp = pd.DataFrame()
dfTemp[coin_list[i]] = dfMarket['Market Cap']
dfMarketCap = dfMarketCap.merge(dfTemp, how='left', left_index=True, right_index=True)
dfSentiment = pd.read_csv('Data/' + coin_list_NA[i] + '_Actual_Sentiment.csv', index_col=0, sep=',')
if coin_list[i] == 'BTC':
# dfSentiment = pd.read_csv('Data/' + coin_list_NA[i] + '_Actual_Sentiment.csv', index_col=0, sep=';')
dfSentiment = pd.read_csv('Data/All_Merged.csv', index_col=0, sep=',')
dfSentiment = dfSentiment[['positive_comment', 'neutral_comment', 'negative_comment']]
dfSentiment['Date'] = dfSentiment.index
dfSentiment['Date'] = pd.to_datetime(dfSentiment['Date'])
dfSentiment.index = | pd.DatetimeIndex(dfSentiment['Date']) | pandas.DatetimeIndex |
from datetime import datetime, timedelta
import unittest
from pandas.core.datetools import (
bday, BDay, BQuarterEnd, BMonthEnd, BYearEnd, MonthEnd,
DateOffset, Week, YearBegin, YearEnd, Hour, Minute, Second,
format, ole2datetime, to_datetime, normalize_date,
getOffset, getOffsetName, inferTimeRule, hasOffsetName)
from nose.tools import assert_raises
####
## Misc function tests
####
def test_format():
actual = format(datetime(2008, 1, 15))
assert actual == '20080115'
def test_ole2datetime():
actual = ole2datetime(60000)
assert actual == datetime(2064, 4, 8)
assert_raises(Exception, ole2datetime, 60)
def test_to_datetime1():
actual = to_datetime(datetime(2008, 1, 15))
assert actual == datetime(2008, 1, 15)
actual = to_datetime('20080115')
assert actual == datetime(2008, 1, 15)
# unparseable
s = 'Month 1, 1999'
assert to_datetime(s) == s
def test_normalize_date():
actual = normalize_date(datetime(2007, 10, 1, 1, 12, 5, 10))
assert actual == datetime(2007, 10, 1)
#####
### DateOffset Tests
#####
class TestDateOffset(object):
def setUp(self):
self.d = datetime(2008, 1, 2)
def test_repr(self):
repr(DateOffset())
repr(DateOffset(2))
repr(2 * DateOffset())
repr(2 * DateOffset(months=2))
def test_mul(self):
assert DateOffset(2) == 2 * DateOffset(1)
assert DateOffset(2) == DateOffset(1) * 2
def test_constructor(self):
assert((self.d + DateOffset(months=2)) == datetime(2008, 3, 2))
assert((self.d - DateOffset(months=2)) == datetime(2007, 11, 2))
assert((self.d + DateOffset(2)) == datetime(2008, 1, 4))
assert not DateOffset(2).isAnchored()
assert DateOffset(1).isAnchored()
d = datetime(2008, 1, 31)
assert((d + DateOffset(months=1)) == datetime(2008, 2, 29))
def test_copy(self):
assert(DateOffset(months=2).copy() == DateOffset(months=2))
class TestBusinessDay(unittest.TestCase):
def setUp(self):
self.d = datetime(2008, 1, 1)
self.offset = BDay()
self.offset2 = BDay(2)
def test_repr(self):
assert repr(self.offset) == '<1 BusinessDay>'
assert repr(self.offset2) == '<2 BusinessDays>'
expected = '<1 BusinessDay: offset=datetime.timedelta(1)>'
assert repr(self.offset + timedelta(1)) == expected
def test_with_offset(self):
offset = self.offset + timedelta(hours=2)
assert (self.d + offset) == datetime(2008, 1, 2, 2)
def testEQ(self):
self.assertEqual(self.offset2, self.offset2)
def test_mul(self):
pass
def test_hash(self):
self.assertEqual(hash(self.offset2), hash(self.offset2))
def testCall(self):
self.assertEqual(self.offset2(self.d), datetime(2008, 1, 3))
def testRAdd(self):
self.assertEqual(self.d + self.offset2, self.offset2 + self.d)
def testSub(self):
off = self.offset2
self.assertRaises(Exception, off.__sub__, self.d)
self.assertEqual(2 * off - off, off)
self.assertEqual(self.d - self.offset2, self.d + BDay(-2))
def testRSub(self):
self.assertEqual(self.d - self.offset2, (-self.offset2).apply(self.d))
def testMult1(self):
self.assertEqual(self.d + 10*self.offset, self.d + BDay(10))
def testMult2(self):
self.assertEqual(self.d + (-5*BDay(-10)),
self.d + BDay(50))
def testRollback1(self):
self.assertEqual(BDay(10).rollback(self.d), self.d)
def testRollback2(self):
self.assertEqual(BDay(10).rollback(datetime(2008, 1, 5)), datetime(2008, 1, 4))
def testRollforward1(self):
self.assertEqual(BDay(10).rollforward(self.d), self.d)
def testRollforward2(self):
self.assertEqual(BDay(10).rollforward(datetime(2008, 1, 5)), datetime(2008, 1, 7))
def test_onOffset(self):
tests = [(BDay(), datetime(2008, 1, 1), True),
(BDay(), datetime(2008, 1, 5), False)]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
def test_apply(self):
tests = []
tests.append((bday,
{datetime(2008, 1, 1): datetime(2008, 1, 2),
datetime(2008, 1, 4): datetime(2008, 1, 7),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 8)}))
tests.append((2*bday,
{datetime(2008, 1, 1): datetime(2008, 1, 3),
datetime(2008, 1, 4): datetime(2008, 1, 8),
datetime(2008, 1, 5): datetime(2008, 1, 8),
datetime(2008, 1, 6): datetime(2008, 1, 8),
datetime(2008, 1, 7): datetime(2008, 1, 9)}))
tests.append((-bday,
{datetime(2008, 1, 1): datetime(2007, 12, 31),
datetime(2008, 1, 4): datetime(2008, 1, 3),
datetime(2008, 1, 5): datetime(2008, 1, 4),
datetime(2008, 1, 6): datetime(2008, 1, 4),
datetime(2008, 1, 7): datetime(2008, 1, 4),
datetime(2008, 1, 8): datetime(2008, 1, 7)}))
tests.append((-2*bday,
{datetime(2008, 1, 1): datetime(2007, 12, 28),
datetime(2008, 1, 4): datetime(2008, 1, 2),
datetime(2008, 1, 5): datetime(2008, 1, 3),
datetime(2008, 1, 6): datetime(2008, 1, 3),
datetime(2008, 1, 7): datetime(2008, 1, 3),
datetime(2008, 1, 8): datetime(2008, 1, 4),
datetime(2008, 1, 9): datetime(2008, 1, 7)}))
tests.append((BDay(0),
{datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 1, 4): datetime(2008, 1, 4),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 7)}))
for dateOffset, cases in tests:
for baseDate, expected in cases.iteritems():
assertEq(dateOffset, baseDate, expected)
def test_apply_corner(self):
self.assertRaises(Exception, BDay().apply, BMonthEnd())
def assertOnOffset(offset, date, expected):
actual = offset.onOffset(date)
assert actual == expected
class TestWeek(unittest.TestCase):
def test_corner(self):
self.assertRaises(Exception, Week, weekday=7)
self.assertRaises(Exception, Week, weekday=-1)
def test_isAnchored(self):
self.assert_(Week(weekday=0).isAnchored())
self.assert_(not Week().isAnchored())
self.assert_(not Week(2, weekday=2).isAnchored())
self.assert_(not Week(2).isAnchored())
def test_offset(self):
tests = []
tests.append((Week(), # not business week
{datetime(2008, 1, 1): datetime(2008, 1, 8),
datetime(2008, 1, 4): datetime(2008, 1, 11),
datetime(2008, 1, 5): datetime(2008, 1, 12),
datetime(2008, 1, 6): datetime(2008, 1, 13),
datetime(2008, 1, 7): datetime(2008, 1, 14)}))
tests.append((Week(weekday=0), # Mon
{datetime(2007, 12, 31): datetime(2008, 1, 7),
datetime(2008, 1, 4): datetime(2008, 1, 7),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 14)}))
tests.append((Week(0, weekday=0), # n=0 -> roll forward. Mon
{datetime(2007, 12, 31): datetime(2007, 12, 31),
datetime(2008, 1, 4): datetime(2008, 1, 7),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 7)}))
tests.append((Week(-2, weekday=1), # n=0 -> roll forward. Mon
{datetime(2010, 4, 6): datetime(2010, 3, 23),
datetime(2010, 4, 8): datetime(2010, 3, 30),
datetime(2010, 4, 5): datetime(2010, 3, 23)}))
for dateOffset, cases in tests:
for baseDate, expected in cases.iteritems():
assertEq(dateOffset, baseDate, expected)
def test_onOffset(self):
tests = [(Week(weekday=0), datetime(2008, 1, 1), False),
(Week(weekday=0), datetime(2008, 1, 2), False),
(Week(weekday=0), datetime(2008, 1, 3), False),
(Week(weekday=0), datetime(2008, 1, 4), False),
(Week(weekday=0), datetime(2008, 1, 5), False),
(Week(weekday=0), datetime(2008, 1, 6), False),
(Week(weekday=0), datetime(2008, 1, 7), True),
(Week(weekday=1), datetime(2008, 1, 1), True),
(Week(weekday=1), datetime(2008, 1, 2), False),
(Week(weekday=1), datetime(2008, 1, 3), False),
(Week(weekday=1), datetime(2008, 1, 4), False),
(Week(weekday=1), datetime(2008, 1, 5), False),
(Week(weekday=1), datetime(2008, 1, 6), False),
(Week(weekday=1), datetime(2008, 1, 7), False),
(Week(weekday=2), datetime(2008, 1, 1), False),
(Week(weekday=2), datetime(2008, 1, 2), True),
(Week(weekday=2), datetime(2008, 1, 3), False),
(Week(weekday=2), datetime(2008, 1, 4), False),
(Week(weekday=2), datetime(2008, 1, 5), False),
(Week(weekday=2), datetime(2008, 1, 6), False),
(Week(weekday=2), datetime(2008, 1, 7), False),
(Week(weekday=3), datetime(2008, 1, 1), False),
(Week(weekday=3), datetime(2008, 1, 2), False),
(Week(weekday=3), datetime(2008, 1, 3), True),
(Week(weekday=3), datetime(2008, 1, 4), False),
(Week(weekday=3), datetime(2008, 1, 5), False),
(Week(weekday=3), datetime(2008, 1, 6), False),
(Week(weekday=3), datetime(2008, 1, 7), False),
(Week(weekday=4), datetime(2008, 1, 1), False),
(Week(weekday=4), datetime(2008, 1, 2), False),
(Week(weekday=4), datetime(2008, 1, 3), False),
(Week(weekday=4), datetime(2008, 1, 4), True),
(Week(weekday=4), datetime(2008, 1, 5), False),
(Week(weekday=4), datetime(2008, 1, 6), False),
(Week(weekday=4), datetime(2008, 1, 7), False),
(Week(weekday=5), datetime(2008, 1, 1), False),
(Week(weekday=5), datetime(2008, 1, 2), False),
(Week(weekday=5), datetime(2008, 1, 3), False),
(Week(weekday=5), datetime(2008, 1, 4), False),
(Week(weekday=5), datetime(2008, 1, 5), True),
(Week(weekday=5), datetime(2008, 1, 6), False),
(Week(weekday=5), datetime(2008, 1, 7), False),
(Week(weekday=6), datetime(2008, 1, 1), False),
(Week(weekday=6), datetime(2008, 1, 2), False),
(Week(weekday=6), datetime(2008, 1, 3), False),
(Week(weekday=6), datetime(2008, 1, 4), False),
(Week(weekday=6), datetime(2008, 1, 5), False),
(Week(weekday=6), datetime(2008, 1, 6), True),
(Week(weekday=6), datetime(2008, 1, 7), False),
]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
class TestBMonthEnd(unittest.TestCase):
def test_offset(self):
tests = []
tests.append((BMonthEnd(),
{datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 2, 29),
datetime(2006, 12, 29): datetime(2007, 1, 31),
datetime(2006, 12, 31): datetime(2007, 1, 31),
datetime(2007, 1, 1): datetime(2007, 1, 31),
datetime(2006, 12, 1): datetime(2006, 12, 29)}))
tests.append((BMonthEnd(0),
{datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 1, 31),
datetime(2006, 12, 29): datetime(2006, 12, 29),
datetime(2006, 12, 31): datetime(2007, 1, 31),
datetime(2007, 1, 1): datetime(2007, 1, 31)}))
tests.append((BMonthEnd(2),
{datetime(2008, 1, 1): datetime(2008, 2, 29),
datetime(2008, 1, 31): datetime(2008, 3, 31),
datetime(2006, 12, 29): datetime(2007, 2, 28),
datetime(2006, 12, 31): datetime(2007, 2, 28),
datetime(2007, 1, 1): datetime(2007, 2, 28),
datetime(2006, 11, 1): datetime(2006, 12, 29)}))
tests.append((BMonthEnd(-1),
{datetime(2007, 1, 1): datetime(2006, 12, 29),
datetime(2008, 6, 30): datetime(2008, 5, 30),
datetime(2008, 12, 31): datetime(2008, 11, 28),
datetime(2006, 12, 29): datetime(2006, 11, 30),
datetime(2006, 12, 30): datetime(2006, 12, 29),
datetime(2007, 1, 1): datetime(2006, 12, 29)}))
for dateOffset, cases in tests:
for baseDate, expected in cases.iteritems():
assertEq(dateOffset, baseDate, expected)
def test_onOffset(self):
tests = [(BMonthEnd(), datetime(2007, 12, 31), True),
(BMonthEnd(), datetime(2008, 1, 1), False)]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
class TestMonthEnd(unittest.TestCase):
def test_offset(self):
tests = []
tests.append((MonthEnd(),
{datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 2, 29),
datetime(2006, 12, 29): datetime(2006, 12, 31),
datetime(2006, 12, 31): datetime(2007, 1, 31),
datetime(2007, 1, 1): datetime(2007, 1, 31),
datetime(2006, 12, 1): datetime(2006, 12, 31)}))
tests.append((MonthEnd(0),
{datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 1, 31),
datetime(2006, 12, 29): datetime(2006, 12, 31),
datetime(2006, 12, 31): datetime(2006, 12, 31),
datetime(2007, 1, 1): datetime(2007, 1, 31)}))
tests.append((MonthEnd(2),
{datetime(2008, 1, 1): datetime(2008, 2, 29),
datetime(2008, 1, 31): datetime(2008, 3, 31),
datetime(2006, 12, 29): datetime(2007, 1, 31),
datetime(2006, 12, 31): datetime(2007, 2, 28),
datetime(2007, 1, 1): datetime(2007, 2, 28),
datetime(2006, 11, 1): datetime(2006, 12, 31)}))
tests.append((MonthEnd(-1),
{datetime(2007, 1, 1): datetime(2006, 12, 31),
datetime(2008, 6, 30): datetime(2008, 5, 31),
datetime(2008, 12, 31): datetime(2008, 11, 30),
datetime(2006, 12, 29): datetime(2006, 11, 30),
datetime(2006, 12, 30): datetime(2006, 11, 30),
datetime(2007, 1, 1): datetime(2006, 12, 31)}))
for dateOffset, cases in tests:
for baseDate, expected in cases.iteritems():
assertEq(dateOffset, baseDate, expected)
def test_onOffset(self):
tests = [( | MonthEnd() | pandas.core.datetools.MonthEnd |
import os; os.environ['OMP_NUM_THREADS'] = '3'
from sklearn.ensemble import ExtraTreesRegressor
import nltk
nltk.data.path.append("/media/sayantan/Personal/nltk_data")
from nltk.stem.snowball import RussianStemmer
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import TfidfVectorizer as Tfidf
from sklearn.metrics import mean_squared_log_error
from sklearn.model_selection import KFold
from sklearn.externals import joblib
from scipy.sparse import hstack, csr_matrix
import lightgbm as lgb
from sklearn.metrics import mean_squared_error
import pandas as pd
import numpy as np
import tensorflow as tf
import gc
from sklearn.preprocessing import StandardScaler, OneHotEncoder, LabelEncoder, MinMaxScaler
from sklearn import model_selection
from sklearn.isotonic import IsotonicRegression
from sklearn.linear_model import Ridge, Lasso, HuberRegressor, ElasticNet, BayesianRidge, LinearRegression
train_x1 = | pd.read_feather('../train_imagetop_targetenc.pkl') | pandas.read_feather |
from datetime import datetime
import warnings
import numpy as np
import pytest
from pandas.core.dtypes.generic import ABCDateOffset
import pandas as pd
from pandas import (
DatetimeIndex,
Index,
PeriodIndex,
Series,
Timestamp,
bdate_range,
date_range,
)
from pandas.tests.test_base import Ops
import pandas.util.testing as tm
from pandas.tseries.offsets import BDay, BMonthEnd, CDay, Day, Hour
START, END = datetime(2009, 1, 1), datetime(2010, 1, 1)
class TestDatetimeIndexOps(Ops):
def setup_method(self, method):
super().setup_method(method)
mask = lambda x: (isinstance(x, DatetimeIndex) or isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
f = lambda x: isinstance(x, DatetimeIndex)
self.check_ops_properties(DatetimeIndex._field_ops, f)
self.check_ops_properties(DatetimeIndex._object_ops, f)
self.check_ops_properties(DatetimeIndex._bool_ops, f)
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH#7206
msg = "'Series' object has no attribute '{}'"
for op in ["year", "day", "second", "weekday"]:
with pytest.raises(AttributeError, match=msg.format(op)):
getattr(self.dt_series, op)
# attribute access should still work!
s = Series(dict(year=2000, month=1, day=10))
assert s.year == 2000
assert s.month == 1
assert s.day == 10
msg = "'Series' object has no attribute 'weekday'"
with pytest.raises(AttributeError, match=msg):
s.weekday
def test_repeat_range(self, tz_naive_fixture):
tz = tz_naive_fixture
rng = date_range("1/1/2000", "1/1/2001")
result = rng.repeat(5)
assert result.freq is None
assert len(result) == 5 * len(rng)
index = pd.date_range("2001-01-01", periods=2, freq="D", tz=tz)
exp = pd.DatetimeIndex(
["2001-01-01", "2001-01-01", "2001-01-02", "2001-01-02"], tz=tz
)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
index = pd.date_range("2001-01-01", periods=2, freq="2D", tz=tz)
exp = pd.DatetimeIndex(
["2001-01-01", "2001-01-01", "2001-01-03", "2001-01-03"], tz=tz
)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
index = pd.DatetimeIndex(["2001-01-01", "NaT", "2003-01-01"], tz=tz)
exp = pd.DatetimeIndex(
[
"2001-01-01",
"2001-01-01",
"2001-01-01",
"NaT",
"NaT",
"NaT",
"2003-01-01",
"2003-01-01",
"2003-01-01",
],
tz=tz,
)
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
def test_repeat(self, tz_naive_fixture):
tz = tz_naive_fixture
reps = 2
msg = "the 'axis' parameter is not supported"
rng = pd.date_range(start="2016-01-01", periods=2, freq="30Min", tz=tz)
expected_rng = DatetimeIndex(
[
Timestamp("2016-01-01 00:00:00", tz=tz, freq="30T"),
Timestamp("2016-01-01 00:00:00", tz=tz, freq="30T"),
Timestamp("2016-01-01 00:30:00", tz=tz, freq="30T"),
Timestamp("2016-01-01 00:30:00", tz=tz, freq="30T"),
]
)
res = rng.repeat(reps)
tm.assert_index_equal(res, expected_rng)
assert res.freq is None
tm.assert_index_equal(np.repeat(rng, reps), expected_rng)
with pytest.raises(ValueError, match=msg):
np.repeat(rng, reps, axis=1)
def test_resolution(self, tz_naive_fixture):
tz = tz_naive_fixture
for freq, expected in zip(
["A", "Q", "M", "D", "H", "T", "S", "L", "U"],
[
"day",
"day",
"day",
"day",
"hour",
"minute",
"second",
"millisecond",
"microsecond",
],
):
idx = pd.date_range(start="2013-04-01", periods=30, freq=freq, tz=tz)
assert idx.resolution == expected
def test_value_counts_unique(self, tz_naive_fixture):
tz = tz_naive_fixture
# GH 7735
idx = pd.date_range("2011-01-01 09:00", freq="H", periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = DatetimeIndex(np.repeat(idx.values, range(1, len(idx) + 1)), tz=tz)
exp_idx = pd.date_range("2011-01-01 18:00", freq="-1H", periods=10, tz=tz)
expected = Series(range(10, 0, -1), index=exp_idx, dtype="int64")
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = pd.date_range("2011-01-01 09:00", freq="H", periods=10, tz=tz)
tm.assert_index_equal(idx.unique(), expected)
idx = DatetimeIndex(
[
"2013-01-01 09:00",
"2013-01-01 09:00",
"2013-01-01 09:00",
"2013-01-01 08:00",
"2013-01-01 08:00",
pd.NaT,
],
tz=tz,
)
exp_idx = DatetimeIndex(["2013-01-01 09:00", "2013-01-01 08:00"], tz=tz)
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = DatetimeIndex(["2013-01-01 09:00", "2013-01-01 08:00", pd.NaT], tz=tz)
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(
DatetimeIndex,
(
[0, 1, 0],
[0, 0, -1],
[0, -1, -1],
["2015", "2015", "2016"],
["2015", "2015", "2014"],
),
):
assert idx[0] in idx
@pytest.mark.parametrize(
"idx",
[
DatetimeIndex(
["2011-01-01", "2011-01-02", "2011-01-03"], freq="D", name="idx"
),
DatetimeIndex(
["2011-01-01 09:00", "2011-01-01 10:00", "2011-01-01 11:00"],
freq="H",
name="tzidx",
tz="Asia/Tokyo",
),
],
)
def test_order_with_freq(self, idx):
ordered = idx.sort_values()
tm.assert_index_equal(ordered, idx)
assert ordered.freq == idx.freq
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
tm.assert_index_equal(ordered, expected)
assert ordered.freq == expected.freq
assert ordered.freq.n == -1
ordered, indexer = idx.sort_values(return_indexer=True)
tm.assert_index_equal(ordered, idx)
tm.assert_numpy_array_equal(indexer, np.array([0, 1, 2]), check_dtype=False)
assert ordered.freq == idx.freq
ordered, indexer = idx.sort_values(return_indexer=True, ascending=False)
expected = idx[::-1]
tm.assert_index_equal(ordered, expected)
tm.assert_numpy_array_equal(indexer, np.array([2, 1, 0]), check_dtype=False)
assert ordered.freq == expected.freq
assert ordered.freq.n == -1
@pytest.mark.parametrize(
"index_dates,expected_dates",
[
(
["2011-01-01", "2011-01-03", "2011-01-05", "2011-01-02", "2011-01-01"],
["2011-01-01", "2011-01-01", "2011-01-02", "2011-01-03", "2011-01-05"],
),
(
["2011-01-01", "2011-01-03", "2011-01-05", "2011-01-02", "2011-01-01"],
["2011-01-01", "2011-01-01", "2011-01-02", "2011-01-03", "2011-01-05"],
),
(
[pd.NaT, "2011-01-03", "2011-01-05", "2011-01-02", pd.NaT],
[pd.NaT, pd.NaT, "2011-01-02", "2011-01-03", "2011-01-05"],
),
],
)
def test_order_without_freq(self, index_dates, expected_dates, tz_naive_fixture):
tz = tz_naive_fixture
# without freq
index = DatetimeIndex(index_dates, tz=tz, name="idx")
expected = DatetimeIndex(expected_dates, tz=tz, name="idx")
ordered = index.sort_values()
tm.assert_index_equal(ordered, expected)
assert ordered.freq is None
ordered = index.sort_values(ascending=False)
tm.assert_index_equal(ordered, expected[::-1])
assert ordered.freq is None
ordered, indexer = index.sort_values(return_indexer=True)
tm.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
tm.assert_numpy_array_equal(indexer, exp, check_dtype=False)
assert ordered.freq is None
ordered, indexer = index.sort_values(return_indexer=True, ascending=False)
tm.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
tm.assert_numpy_array_equal(indexer, exp, check_dtype=False)
assert ordered.freq is None
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.date_range("2011-01-01", "2011-01-31", freq="D", name="idx")
result = idx.drop_duplicates()
tm.assert_index_equal(idx, result)
assert idx.freq == result.freq
idx_dup = idx.append(idx)
assert idx_dup.freq is None # freq is reset
result = idx_dup.drop_duplicates()
tm.assert_index_equal(idx, result)
assert result.freq is None
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.date_range("2011-01-01", "2011-01-31", freq="D", name="idx")
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep="last")
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep="last")
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
@pytest.mark.parametrize(
"freq",
[
"A",
"2A",
"-2A",
"Q",
"-1Q",
"M",
"-1M",
"D",
"3D",
"-3D",
"W",
"-1W",
"H",
"2H",
"-2H",
"T",
"2T",
"S",
"-3S",
],
)
def test_infer_freq(self, freq):
# GH 11018
idx = pd.date_range("2011-01-01 09:00:00", freq=freq, periods=10)
result = pd.DatetimeIndex(idx.asi8, freq="infer")
tm.assert_index_equal(idx, result)
assert result.freq == freq
def test_nat(self, tz_naive_fixture):
tz = tz_naive_fixture
assert pd.DatetimeIndex._na_value is pd.NaT
assert pd.DatetimeIndex([])._na_value is pd.NaT
idx = pd.DatetimeIndex(["2011-01-01", "2011-01-02"], tz=tz)
assert idx._can_hold_na
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
assert idx.hasnans is False
tm.assert_numpy_array_equal(idx._nan_idxs, np.array([], dtype=np.intp))
idx = pd.DatetimeIndex(["2011-01-01", "NaT"], tz=tz)
assert idx._can_hold_na
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
assert idx.hasnans is True
tm.assert_numpy_array_equal(idx._nan_idxs, np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
idx = pd.DatetimeIndex(["2011-01-01", "2011-01-02", "NaT"])
assert idx.equals(idx)
assert idx.equals(idx.copy())
assert idx.equals(idx.astype(object))
assert idx.astype(object).equals(idx)
assert idx.astype(object).equals(idx.astype(object))
assert not idx.equals(list(idx))
assert not idx.equals(pd.Series(idx))
idx2 = pd.DatetimeIndex(["2011-01-01", "2011-01-02", "NaT"], tz="US/Pacific")
assert not idx.equals(idx2)
assert not idx.equals(idx2.copy())
assert not idx.equals(idx2.astype(object))
assert not idx.astype(object).equals(idx2)
assert not idx.equals(list(idx2))
assert not idx.equals(pd.Series(idx2))
# same internal, different tz
idx3 = pd.DatetimeIndex._simple_new(idx.asi8, tz="US/Pacific")
tm.assert_numpy_array_equal(idx.asi8, idx3.asi8)
assert not idx.equals(idx3)
assert not idx.equals(idx3.copy())
assert not idx.equals(idx3.astype(object))
assert not idx.astype(object).equals(idx3)
assert not idx.equals(list(idx3))
assert not idx.equals(pd.Series(idx3))
@pytest.mark.parametrize("values", [["20180101", "20180103", "20180105"], []])
@pytest.mark.parametrize("freq", ["2D", Day(2), "2B", BDay(2), "48H", Hour(48)])
@pytest.mark.parametrize("tz", [None, "US/Eastern"])
def test_freq_setter(self, values, freq, tz):
# GH 20678
idx = DatetimeIndex(values, tz=tz)
# can set to an offset, converting from string if necessary
idx.freq = freq
assert idx.freq == freq
assert isinstance(idx.freq, ABCDateOffset)
# can reset to None
idx.freq = None
assert idx.freq is None
def test_freq_setter_errors(self):
# GH 20678
idx = DatetimeIndex(["20180101", "20180103", "20180105"])
# setting with an incompatible freq
msg = (
"Inferred frequency 2D from passed values does not conform to "
"passed frequency 5D"
)
with pytest.raises(ValueError, match=msg):
idx.freq = "5D"
# setting with non-freq string
with pytest.raises(ValueError, match="Invalid frequency"):
idx.freq = "foo"
def test_offset_deprecated(self):
# GH 20716
idx = pd.DatetimeIndex(["20180101", "20180102"])
# getter deprecated
with tm.assert_produces_warning(FutureWarning):
idx.offset
# setter deprecated
with tm.assert_produces_warning(FutureWarning):
idx.offset = BDay()
class TestBusinessDatetimeIndex:
def setup_method(self, method):
self.rng = bdate_range(START, END)
def test_comparison(self):
d = self.rng[10]
comp = self.rng > d
assert comp[11]
assert not comp[9]
def test_pickle_unpickle(self):
unpickled = tm.round_trip_pickle(self.rng)
assert unpickled.freq is not None
def test_copy(self):
cp = self.rng.copy()
repr(cp)
tm.assert_index_equal(cp, self.rng)
def test_shift(self):
shifted = self.rng.shift(5)
assert shifted[0] == self.rng[5]
assert shifted.freq == self.rng.freq
shifted = self.rng.shift(-5)
assert shifted[5] == self.rng[0]
assert shifted.freq == self.rng.freq
shifted = self.rng.shift(0)
assert shifted[0] == self.rng[0]
assert shifted.freq == self.rng.freq
rng = date_range(START, END, freq=BMonthEnd())
shifted = rng.shift(1, freq=BDay())
assert shifted[0] == rng[0] + BDay()
def test_equals(self):
assert not self.rng.equals(list(self.rng))
def test_identical(self):
t1 = self.rng.copy()
t2 = self.rng.copy()
assert t1.identical(t2)
# name
t1 = t1.rename("foo")
assert t1.equals(t2)
assert not t1.identical(t2)
t2 = t2.rename("foo")
assert t1.identical(t2)
# freq
t2v = Index(t2.values)
assert t1.equals(t2v)
assert not t1.identical(t2v)
class TestCustomDatetimeIndex:
def setup_method(self, method):
self.rng = | bdate_range(START, END, freq="C") | pandas.bdate_range |
# -*- coding: utf-8 -*-
"""Interface for flopy's implementation for MODFLOW."""
__all__ = ["MfSfrNetwork"]
import pickle
from itertools import combinations, zip_longest
from textwrap import dedent
import geopandas
import numpy as np
import pandas as pd
from shapely import wkt
from shapely.geometry import LineString, Point, Polygon, box
from shapely.ops import linemerge
from swn.core import SurfaceWaterNetwork
from swn.spatial import compare_crs, get_sindex
from swn.util import abbr_str
try:
import matplotlib
except ImportError:
matplotlib = False
class MfSfrNetwork:
"""MODFLOW SFR network class.
Attributes
----------
model : flopy.modflow.mf.Modflow
Instance of a flopy MODFLOW model
segments : geopandas.GeoDataFrame
Copied from swn.segments, but with additional columns added
segment_data : pandas.DataFrame
Similar to structure in model.sfr.segment_data, but for one stress
period. Transient data (where applicable) will show summary statistics.
The index is 'nseg', ordered and starting from 1. An additional column
'segnum' is used to identify segments, and if defined,
abstraction/diversion identifiers, where iupseg != 0.
reaches : geopandas.GeoDataFrame
Similar to structure in model.sfr.reach_data with index 'reachID',
ordered and starting from 1. Contains geometry and other columns
not used by flopy. Use get_reach_data() for use with flopy.
diversions : geopandas.GeoDataFrame, pd.DataFrame or None
Copied from swn.diversions, if set/defined.
logger : logging.Logger
Logger to show messages.
"""
def __init__(self, logger=None):
"""Initialise MfSfrNetwork.
Parameters
----------
logger : logging.Logger, optional
Logger to show messages.
"""
from swn.logger import get_logger, logging
from importlib.util import find_spec
if logger is None:
self.logger = get_logger(self.__class__.__name__)
elif isinstance(logger, logging.Logger):
self.logger = logger
else:
raise ValueError(
"expected 'logger' to be Logger; found " + str(type(logger)))
self.logger.warning(
"using legacy MfSfrNetwork; consider using SwnModflow")
self.logger.info('creating new %s object', self.__class__.__name__)
if not find_spec('flopy'):
raise ImportError(self.__class__.__name__ + ' requires flopy')
self.segments = None
self.segment_data = None
self.reaches = None
self.diversions = None
# all other properties added afterwards
@classmethod
def from_swn_flopy(
cls, swn, model, ibound_action='freeze',
reach_include_fraction=0.2, min_slope=1./1000,
hyd_cond1=1., hyd_cond_out=None, thickness1=1., thickness_out=None,
width1=10., width_out=None, roughch=0.024,
abstraction={}, inflow={}, flow={}, runoff={}, etsw={}, pptsw={}):
"""Create a MODFLOW SFR structure from a surface water network.
Parameters
----------
swn : swn.SurfaceWaterNetwork
Instance of a SurfaceWaterNetwork.
model : flopy.modflow.mf.Modflow
Instance of a flopy MODFLOW model with DIS and BAS6 packages.
ibound_action : str, optional
Action to handle IBOUND:
- ``freeze`` : Freeze IBOUND, but clip streams to fit bounds.
- ``modify`` : Modify IBOUND to fit streams, where possible.
reach_include_fraction : float or pandas.Series, optional
Fraction of cell size used as a threshold distance to determine if
reaches outside the active grid should be included to a cell.
Based on the furthest distance of the line and cell geometries.
Default 0.2 (e.g. for a 100 m grid cell, this is 20 m).
min_slope : float or pandas.Series, optional
Minimum downwards slope imposed on segments. If float, then this is
a global value, otherwise it is per-segment with a Series.
Default 1./1000 (or 0.001).
hyd_cond1 : float or pandas.Series, optional
Hydraulic conductivity of the streambed, as a global or per top of
each segment. Used for either STRHC1 or HCOND1/HCOND2 outputs.
Default 1.
hyd_cond_out : None, float or pandas.Series, optional
Similar to thickness1, but for the hydraulic conductivity of each
segment outlet. If None (default), the same hyd_cond1 value for the
top of the outlet segment is used for the bottom.
thickness1 : float or pandas.Series, optional
Thickness of the streambed, as a global or per top of each segment.
Used for either STRTHICK or THICKM1/THICKM2 outputs. Default 1.
thickness_out : None, float or pandas.Series, optional
Similar to thickness1, but for the bottom of each segment outlet.
If None (default), the same thickness1 value for the top of the
outlet segment is used for the bottom.
width1 : float or pandas.Series, optional
Channel width, as a global or per top of each segment. Used for
WIDTH1/WIDTH2 outputs. Default 10.
width_out : None, float or pandas.Series, optional
Similar to width1, but for the bottom of each segment outlet.
If None (default), the same width1 value for the top of the
outlet segment is used for the bottom.
roughch : float or pandas.Series, optional
Manning's roughness coefficient for the channel. If float, then
this is a global value, otherwise it is per-segment with a Series.
Default 0.024.
abstraction : dict or pandas.DataFrame, optional
See generate_segment_data for details.
Default is {} (no abstraction from diversions).
inflow : dict or pandas.DataFrame, optional
See generate_segment_data for details.
Default is {} (no outside inflow added to flow term).
flow : dict or pandas.DataFrame, optional
See generate_segment_data. Default is {} (zero).
runoff : dict or pandas.DataFrame, optional
See generate_segment_data. Default is {} (zero).
etsw : dict or pandas.DataFrame, optional
See generate_segment_data. Default is {} (zero).
pptsw : dict or pandas.DataFrame, optional
See generate_segment_data. Default is {} (zero).
logger : logging.Logger, optional
Logger to show messages.
"""
obj = cls()
import flopy
if not isinstance(swn, SurfaceWaterNetwork):
raise ValueError('swn must be a SurfaceWaterNetwork object')
elif ibound_action not in ('freeze', 'modify'):
raise ValueError('ibound_action must be one of freeze or modify')
obj.model = model
obj.segments = swn.segments.copy()
# Make sure model CRS and segments CRS are the same (if defined)
crs = None
segments_crs = getattr(obj.segments.geometry, 'crs', None)
modelgrid_crs = None
modelgrid = obj.model.modelgrid
epsg = modelgrid.epsg
proj4_str = modelgrid.proj4
if epsg is not None:
segments_crs, modelgrid_crs, same = compare_crs(segments_crs, epsg)
else:
segments_crs, modelgrid_crs, same = compare_crs(segments_crs,
proj4_str)
if (segments_crs is not None and modelgrid_crs is not None and
not same):
obj.logger.warning(
'CRS for segments and modelgrid are different: {0} vs. {1}'
.format(segments_crs, modelgrid_crs))
crs = segments_crs or modelgrid_crs
# Make sure their extents overlap
minx, maxx, miny, maxy = modelgrid.extent
model_bbox = box(minx, miny, maxx, maxy)
rstats = obj.segments.bounds.describe()
segments_bbox = box(
rstats.loc['min', 'minx'], rstats.loc['min', 'miny'],
rstats.loc['max', 'maxx'], rstats.loc['max', 'maxy'])
if model_bbox.disjoint(segments_bbox):
raise ValueError('modelgrid extent does not cover segments extent')
# More careful check of overlap of lines with grid polygons
obj.logger.debug('building model grid cell geometries')
dis = obj.model.dis
cols, rows = np.meshgrid(np.arange(dis.ncol), np.arange(dis.nrow))
ibound = obj.model.bas6.ibound[0].array.copy()
ibound_modified = 0
grid_df = pd.DataFrame({'row': rows.flatten(), 'col': cols.flatten()})
grid_df.set_index(['row', 'col'], inplace=True)
grid_df['ibound'] = ibound.flatten()
if ibound_action == 'freeze' and (ibound == 0).any():
# Remove any inactive grid cells from analysis
grid_df = grid_df.loc[grid_df['ibound'] != 0]
# Determine grid cell size
col_size = np.median(dis.delr.array)
if dis.delr.array.min() != dis.delr.array.max():
obj.logger.warning(
'assuming constant column spacing %s', col_size)
row_size = np.median(dis.delc.array)
if dis.delc.array.min() != dis.delc.array.max():
obj.logger.warning(
'assuming constant row spacing %s', row_size)
cell_size = (row_size + col_size) / 2.0
# Note: modelgrid.get_cell_vertices(row, col) is slow!
xv = modelgrid.xvertices
yv = modelgrid.yvertices
r, c = [np.array(s[1])
for s in grid_df.reset_index()[['row', 'col']].iteritems()]
cell_verts = zip(
zip(xv[r, c], yv[r, c]),
zip(xv[r, c + 1], yv[r, c + 1]),
zip(xv[r + 1, c + 1], yv[r + 1, c + 1]),
zip(xv[r + 1, c], yv[r + 1, c])
)
obj.grid_cells = grid_cells = geopandas.GeoDataFrame(
grid_df, geometry=[Polygon(r) for r in cell_verts], crs=crs)
obj.logger.debug('evaluating reach data on model grid')
grid_sindex = get_sindex(grid_cells)
reach_include = swn.segments_series(reach_include_fraction) * cell_size
# Make an empty DataFrame for reaches
obj.reaches = pd.DataFrame(columns=['geometry'])
obj.reaches.insert(1, column='row', value=pd.Series(dtype=int))
obj.reaches.insert(2, column='col', value=pd.Series(dtype=int))
empty_reach_df = obj.reaches.copy() # take this before more added
obj.reaches.insert(
1, column='segnum',
value=pd.Series(dtype=obj.segments.index.dtype))
obj.reaches.insert(2, column='dist', value=pd.Series(dtype=float))
empty_reach_df.insert(3, column='length', value=pd.Series(dtype=float))
empty_reach_df.insert(4, column='moved', value= | pd.Series(dtype=bool) | pandas.Series |
# -*- coding: utf-8 -*-
"""One line description.
Authors:
<NAME> - <EMAIL>
Todo:
* Docstring
* Put all hyper to arguments
"""
import logging
import os
import time
from pathlib import Path
import click
import numpy as np
import pandas as pd
import wandb
from sklearn.model_selection import train_test_split
from food_ke.entailment.augment import augment_df
from food_ke.entailment.custom_typing import PathLike
from food_ke.entailment.dataset import EntailmentDataset
from food_ke.entailment.rank import rank_data_unlabelled
from food_ke.entailment.train import load_data, load_model, train
from food_ke.retraining.evaluate import evaluate
from food_ke.retraining.export_annotations import export_annotations
from food_ke.retraining.preprocess import preprocess
logging.basicConfig(
format="[%(filename)s:%(lineno)s - %(funcName)20s()] %(message)s",
level=logging.DEBUG,
)
def map_label_studio_id_to_data_litsense_id(
data_litsense, premise, hypothesis
):
row = data_litsense.loc[
(data_litsense["premise"] == premise)
& (data_litsense["hypothesis"] == hypothesis)
]
if len(row) > 1:
print(row)
raise Exception(
"Premise and hypothesis pair is not a unique identifier for a "
"task. Need to implement an id for litsense."
)
if len(row) < 1:
raise Exception(
"There is data from LS that is not in the LitSense dataset."
)
return row.index[0]
def exclude_labelled_annotations(
data_preprocessed_premises, data_preprocessed_hypotheses, data_litsense
):
""" """
idx_exclusion = []
for premise, hypothesis in zip(
data_preprocessed_premises, data_preprocessed_hypotheses
):
idx_exclusion.append(
map_label_studio_id_to_data_litsense_id(
data_litsense, premise, hypothesis
)
)
data_litsense_unlabelled = data_litsense.drop(idx_exclusion)
logging.info(
f"{len(idx_exclusion)} labelled samples removed LitSense dataset."
)
return data_litsense_unlabelled
def wandb_setup(
model_name,
adapter_name,
epochs,
learning_rate,
batch_size,
augmentation_strategies,
num_samples_unaugmented,
validate_every_steps,
validate_every_examples,
):
""" """
wandb.init(
project="food_ke_entailment",
entity="food_ke",
config={
"model_name": model_name,
"adapter_name": adapter_name,
"epochs": epochs,
"learning_rate": learning_rate,
"batch_size": batch_size,
"augmentation_strategies": augmentation_strategies,
"num_samples_unaugmented": num_samples_unaugmented,
"validate_every_steps": validate_every_steps,
"validate_every_examples": validate_every_examples,
},
)
def sort_by_rank_scores(data_ranked: pd.DataFrame) -> pd.DataFrame:
"""TODO: Move to rank.py"""
confidence_scores = data_ranked["hypothesis_confidence"]
confidence_scores_sorted = confidence_scores.sort_values(
ascending=False
).to_frame()
confidence_scores_sorted["rank"] = list(range(len(confidence_scores)))
data_ranked[
"hypothesis_confidence_rank"
] = confidence_scores_sorted.sort_index()["rank"]
uncertainty_scores = data_ranked["hypothesis_uncertainty"]
uncertainty_scores_sorted = uncertainty_scores.sort_values(
ascending=False
).to_frame()
uncertainty_scores_sorted["rank"] = list(range(len(uncertainty_scores)))
data_ranked[
"hypothesis_uncertainty_rank"
] = uncertainty_scores_sorted.sort_index()["rank"]
ranks_overall = []
picked_metrics = np.random.choice(
["hypothesis_confidence_rank", "hypothesis_uncertainty_rank"],
size=len(data_ranked),
)
for i, picked_metric in enumerate(picked_metrics):
ranks_overall.append(data_ranked.iloc[i][picked_metric])
data_ranked["hypothesis_total_rank"] = ranks_overall
data_ranked["hypothesis_total_rank_calculated_by"] = picked_metrics
data_ranked_sorted = data_ranked.sort_values(
by=["hypothesis_novelty", "hypothesis_total_rank"],
ascending=[False, True],
)
return data_ranked_sorted
def _make_output_directories(
path_output: PathLike,
path_output_dir: PathLike,
path_evaluation_dir: PathLike,
):
# Initialize repo.
logging.info(f"Generating retraining directory {path_output}")
if not os.path.exists(path_output_dir):
os.mkdir(path_output_dir)
os.mkdir(path_output)
os.mkdir(path_evaluation_dir)
def retrain(
path_model_checkpoint: PathLike,
path_adapter_checkpoint: PathLike,
data: pd.DataFrame,
class_distribution: dict,
epochs=5,
learning_rate=2e-5,
batch_size=12,
validate_every_steps=10,
validate_every_examples=None,
):
model, tokenizer, optimizer = load_model(
model_name="roberta-base",
tokenizer_name="roberta-base",
adapter_name="AdapterHub/roberta-base-pf-scitail",
optimizer_kwargs={"lr": learning_rate, "correct_bias": True},
)
idx_train, idx_val = train_test_split(
data["orig_idx"].unique(), test_size=0.2, shuffle=True, random_state=42
)
# Validation should keep all classes having the same number.
data_val = data[data["orig_idx"].isin(idx_val)].copy()
data_val["row_id"] = data_val.index
data_val_rus = load_data(
data_val,
class_distribution={
"entailment": 0.5,
"neutral": 0.5,
"contradiction": 0.0,
},
)
data_train = data[data["orig_idx"].isin(idx_train)]
data_train_augmented = augment_df(
data_train, augs_per_food=5, augs_per_chemical=5
).copy()
data_train_augmented["row_id"] = data_train_augmented.index
data_train_augmented_rus = load_data(
data_train_augmented,
class_distribution=class_distribution,
)
dataset = EntailmentDataset(
train_df=data_train_augmented_rus,
val_df=data_val_rus,
tokenizer=tokenizer,
)
train_loader, val_loader = dataset.get_data_loaders(batch_size=batch_size)
wandb_setup(
model_name="roberta-base",
adapter_name="AdapterHub/roberta-base-pf-scitail",
epochs=epochs,
learning_rate=learning_rate,
batch_size=batch_size,
augmentation_strategies="all",
num_samples_unaugmented=len(idx_train),
validate_every_steps=validate_every_steps,
validate_every_examples=validate_every_examples,
)
train(
model,
train_loader,
val_loader,
optimizer,
epochs=epochs,
flatten_neutral_contradicts=True,
checkpoint_dir=path_model_checkpoint,
adapter_dir=path_adapter_checkpoint,
adapter_name="AdapterHub/roberta-base-pf-scitail",
adapter_checkpoint_name="scitail",
early_stopping=True,
validate_every_steps=10,
validate_every_examples=None,
patience=10,
stopping_threshold=1e-3,
prediction_file=None,
)
return (
data_train,
data_train_augmented,
data_train_augmented_rus,
data_val,
data_val_rus,
)
@click.command()
@click.option("--label-studio-project-id", type=int)
@click.option(
"--path-output-dir",
type=str,
default=os.path.join(
os.path.abspath(os.path.dirname(__file__)),
"..",
"..",
"data",
"retraining_data",
),
)
@click.option(
"--path-data-all",
type=str,
default=os.path.join(
os.path.abspath(os.path.dirname(__file__)),
"..",
"..",
"data",
"entailment_data",
"data_litsense_no_dup.csv",
),
)
@click.option(
"--path-data-test",
type=str,
default=os.path.join(
os.path.abspath(os.path.dirname(__file__)),
"..",
"..",
"data",
"entailment_data",
"entailment_test.csv",
),
)
@click.option("--path-previous-session-dir", type=str)
@click.option("--ratio-entailment", type=float, default=0.5)
@click.option("--ratio-neutral", type=float, default=0.5)
@click.option("--ratio-contradiction", type=float, default=0.0)
@click.option("--random-state", type=int, default=42)
@click.option("--rank-confidence", type=bool, default=True)
@click.option(
"--rank-confidence-agg", type=click.Choice(["mean", "sum"]), default="sum"
)
@click.option("--rank-novelty", type=bool, default=True)
@click.option(
"--rank-novelty-agg", type=click.Choice(["mean", "sum"]), default="sum"
)
@click.option("--rank-completeness", type=bool, default=False)
@click.option(
"--rank-completeness-agg",
type=click.Choice(["mean", "sum"]),
default="sum",
)
def main(
label_studio_project_id: int,
path_output_dir: PathLike,
path_data_all: PathLike,
path_data_test: PathLike,
path_previous_session_dir: PathLike,
ratio_entailment: float,
ratio_neutral: float,
ratio_contradiction: float,
random_state: int,
rank_confidence: bool,
rank_confidence_agg: str,
rank_novelty: bool,
rank_novelty_agg: str,
rank_completeness: bool,
rank_completeness_agg: str,
):
# Check data class distribution ratios.
if ratio_entailment < 0 or ratio_neutral < 0 or ratio_contradiction < 0:
raise ValueError("Ratios must be non-negative.")
if ratio_entailment + ratio_neutral + ratio_contradiction != 1.0:
raise ValueError("Ratios must sum to 1.")
class_distribution = {
"entailment": ratio_entailment,
"neutral": ratio_neutral,
"contradiction": ratio_contradiction,
}
# Initialize random state.
np.random.seed(random_state)
# Initialize retraining constants.
time_str = time.strftime("%Y%m%d_%H%M%S")
path_output = Path(path_output_dir) / time_str
# Initialize path names for evaulation data.
path_evaluation_dir = path_output / "evaluation"
path_evaluation_train_with_pred = (
path_evaluation_dir / "train_aug_rus_with_pred.csv"
)
path_evaluation_val_with_pred = (
path_evaluation_dir / "val_rus_with_pred.csv"
)
path_evaluation_test_with_pred = path_evaluation_dir / "test_with_pred.csv"
path_evaluation_train_result = (
path_evaluation_dir / "result_train_aug_rus.csv"
)
path_evaluation_val_result = path_evaluation_dir / "result_val_rus.csv"
path_evaluation_test_result = path_evaluation_dir / "result_test.csv"
# Initialize path names for intermediate data for training the entailment
# model.
path_data_export = path_output / "export.csv"
path_data_preprocessed = path_output / "preprocessed.csv"
path_data_train = path_output / "train.csv"
path_data_train_augmented = path_output / "train_aug.csv"
path_data_train_augmented_rus = path_output / "train_aug_rus.csv"
path_data_val = path_output / "val.csv"
path_data_val_rus = path_output / "val_rus.csv"
path_data_unlabelled = path_output / "data_unlabelled.csv"
path_data_unlabelled_ranked = path_output / "data_unlabelled_ranked.csv"
path_data_unlabelled_sorted = path_output / "data_unlabelled_sorted.csv"
# Initialize path names for model checkpoints.
path_model_checkpoint = path_output / "model_checkpoint"
path_adapter_checkpoint = path_output / "adapter_checkpoint"
_make_output_directories(path_output, path_output_dir, path_evaluation_dir)
# Store exported data from the LabelStudio.
data_raw = export_annotations(
project_id=label_studio_project_id, export_type="CSV"
)
if path_previous_session_dir is not None:
data_raw_previous = pd.read_csv(
os.path.join(path_previous_session_dir, "export.csv")
)
data_raw = pd.concat(
[data_raw, data_raw_previous], axis=0, ignore_index=True
)
data_raw.to_csv(path_data_export, index=False)
# Store preprocessed data.
data_preprocessed = preprocess(data_raw=data_raw, flatten_error=False)
data_preprocessed.to_csv(path_data_preprocessed, index=False)
# Store all intermediate data during retraining.
(
data_train,
data_train_augmented,
data_train_augmented_rus,
data_val,
data_val_rus,
) = retrain(
path_model_checkpoint,
path_adapter_checkpoint,
data_preprocessed,
class_distribution=class_distribution,
) # TODO: add hyperparameter input here
data_train.to_csv(path_data_train, index=False)
data_train_augmented.to_csv(path_data_train_augmented, index=False)
data_train_augmented_rus.to_csv(path_data_train_augmented_rus, index=False)
data_val.to_csv(path_data_val, index=False)
data_val_rus.to_csv(path_data_val_rus, index=False)
# Store ranking results of the next training cycle.
data_litsense = | pd.read_csv(path_data_all) | pandas.read_csv |
from sklearn.model_selection import train_test_split
import os
from functools import reduce
import matplotlib.colors as mcolors
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.model_selection import train_test_split
def read_data(filepath):
preprocess_data(filepath)
dateparse = lambda x: | pd.datetime.strptime(x, '%Y-%m-%d %H:%M:%S.%f') | pandas.datetime.strptime |
# ndp d2 app for smoooth rdm...
import streamlit as st
import pandas as pd
import numpy as np
from st_aggrid import AgGrid
import plotly.express as px
from apis import pno_data
from apis import mtk_rak_pno
from apis import pno_hist
# page setup
st.set_page_config(page_title="NDP App d2", layout="wide")
padding = 2
st.markdown(f""" <style>
.reportview-container .main .block-container{{
padding-top: {padding}rem;
padding-right: {padding}rem;
padding-left: {padding}rem;
padding-bottom: {padding}rem;
}} </style> """, unsafe_allow_html=True)
header = '<p style="font-family:sans-serif; color:grey; font-size: 12px;">\
NDP project app2 V0.94 "Carelian Beta"\
</p>'
st.markdown(header, unsafe_allow_html=True)
# plot size setup
#px.defaults.width = 600
px.defaults.height = 600
# page title
header_title = '''
**Naked Density Project**
'''
st.subheader(header_title)
header_text = '''
<p style="font-family:sans-serif; color:Dimgrey; font-size: 12px;">
Naked Density Projekti on <a href="https://research.aalto.fi/en/persons/teemu-jama" target="_blank">Te<NAME></a> vรคitรถskirjatutkimus Aalto-yliopistossa.
Projektissa tutkitaan maankรคytรถn tehokkuuden vaikutuksia kestรคvรครคn kehitykseen data-analytiikan avulla.
</p>
'''
st.markdown(header_text, unsafe_allow_html=True)
st.markdown("""---""")
st.title("Data Paper #2")
st.markdown("Koko Suomi datana")
st.markdown("###")
kuntakoodit = pd.read_csv('config/kunta_dict.csv', index_col=False, header=0).astype(str)
kuntalista = kuntakoodit['kunta'].tolist()
default_ix = kuntalista.index('Espoo')
st.title(':point_down:')
# kuntavalitsin
valinta = st.selectbox('Valitse kunta ja taulukosta postinumeroalue', kuntalista, index=default_ix)
# hae pno data..
taulukkodata = pno_data(valinta)
# TABLE ..
from st_aggrid.grid_options_builder import GridOptionsBuilder
gb = GridOptionsBuilder.from_dataframe(taulukkodata)
gb.configure_selection(selection_mode="single", use_checkbox=True) # (selection_mode="multiple", use_checkbox=True)
gridOptions = gb.build()
from st_aggrid.shared import GridUpdateMode
data = AgGrid(taulukkodata,
gridOptions=gridOptions,
enable_enterprise_modules=False,
allow_unsafe_jscode=True,
update_mode=GridUpdateMode.SELECTION_CHANGED)
selected_row = data["selected_rows"]
pno_alue = | pd.DataFrame(selected_row) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""Benchmark the speed for generating new datasets by remixing old ones."""
import itertools as itt
import logging
import os
import time
from datetime import datetime
import click
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import torch
from humanize import intword
from tqdm import tqdm
from pykeen.datasets import dataset_resolver, get_dataset
from pykeen.triples.splitting import split
from pykeen.utils import get_benchmark
from pykeen.version import get_git_hash
SPLITTING_DIRECTORY = get_benchmark('splitting')
RESULTS_DIRECTORY = SPLITTING_DIRECTORY / 'results'
os.makedirs(RESULTS_DIRECTORY, exist_ok=True)
tsv_path = SPLITTING_DIRECTORY / 'split_benchmark.tsv'
png_path = SPLITTING_DIRECTORY / 'split_benchmark.png'
scatter_png_path = SPLITTING_DIRECTORY / 'split_benchmark_scatter.png'
columns = [
'hash',
'dataset',
'dataset_size',
'dataset_load_time',
'dataset_cat_time',
'method',
'ratio',
'replicate',
'split_time',
'training_size',
'testing_size',
'validation_size',
]
def _log(s):
tqdm.write(f'[{datetime.now().strftime("%H:%M:%S")}] {s}')
@click.command()
@click.option('-r', '--replicates', type=int, default=5, show_default=True)
@click.option('-f', '--force', is_flag=True)
def main(replicates: int, force: bool):
import pykeen.triples.splitting
pykeen.triples.splitting.logger.setLevel(logging.ERROR)
import pykeen.triples.triples_factory
pykeen.triples.triples_factory.logger.setLevel(logging.ERROR)
import pykeen.utils
pykeen.utils.logger.setLevel(logging.ERROR)
git_hash = get_git_hash()
methods = ['cleanup', 'coverage']
ratios = [0.8]
click.echo(f'output directory: {SPLITTING_DIRECTORY.as_posix()}')
rows = []
outer_it = tqdm(sorted(dataset_resolver.lookup_dict), desc='Dataset')
for dataset in outer_it:
dataset_path = RESULTS_DIRECTORY / f'{dataset}.tsv'
if dataset_path.exists() and not force:
_log(f'loading pre-calculated {dataset} from {dataset_path}')
df = pd.read_csv(dataset_path, sep='\t')
rows.extend(df.values)
continue
_log(f'loading {dataset}')
t = time.time()
dataset = get_dataset(dataset=dataset)
dataset_name = dataset.__class__.__name__
ccl = [
dataset.training.mapped_triples,
dataset.testing.mapped_triples,
dataset.validation.mapped_triples,
]
load_time = time.time() - t
_log(f'done loading {dataset_name} after {load_time:.3f} seconds')
_log(f'concatenating {dataset_name}')
t = time.time()
mapped_triples: torch.LongTensor = torch.cat(ccl, dim=0)
concat_time = time.time() - t
_log(f'done concatenating {dataset_name} after {concat_time:.3f} seconds')
_log(f'deleting {dataset_name}')
del dataset
_log(f'done deleting {dataset_name}')
dataset_rows = []
inner_it = itt.product(methods, ratios, range(1, 1 + replicates))
inner_it = tqdm(
inner_it,
total=len(methods) * len(ratios) * replicates,
desc=f'{dataset_name} ({intword(mapped_triples.shape[0])})',
)
for method, ratio, replicate in inner_it:
t = time.time()
results = split(
mapped_triples,
ratios=[ratio, (1 - ratio) / 2],
method=method,
random_state=replicate,
)
split_time = time.time() - t
dataset_rows.append((
git_hash,
dataset_name,
mapped_triples.shape[0],
load_time,
concat_time,
method,
ratio,
replicate,
split_time,
results[0].shape[0],
results[1].shape[0],
results[2].shape[0],
))
del results
_log(f'writing to {dataset_path}')
| pd.DataFrame(dataset_rows, columns=columns) | pandas.DataFrame |
"""Console script for koapy."""
import os
import locale
import logging
import click
import koapy
from koapy.utils.logging import set_verbosity
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
client_check_timeout = 3
def fail_with_usage(message=None):
ctx = click.get_current_context()
if message is not None:
click.UsageError(message).show()
click.echo()
click.echo(ctx.get_help())
ctx.exit(1)
@click.group(context_settings=CONTEXT_SETTINGS)
@click.version_option(koapy.__version__, '-V', '--version')
def cli():
pass
@cli.command(context_settings=CONTEXT_SETTINGS, short_help='Start grpc server with tray application.')
@click.option('-p', '--port', metavar='PORT', help='Port number of grpc server (optional).')
@click.option('-v', '--verbose', count=True, default=5, help='Verbosity.')
@click.option('--no-verbose', is_flag=True)
@click.argument('args', nargs=-1)
def serve(port, verbose, no_verbose, args):
"""
ARGS are passed to QApplication.
"""
app_args = []
if port:
app_args += ['--port', port]
if not no_verbose and verbose > 0:
app_args.append('-' + 'v' * verbose)
app_args += list(args)
from koapy.pyqt5.KiwoomOpenApiTrayApplication import KiwoomOpenApiTrayApplication
KiwoomOpenApiTrayApplication.main(app_args)
@cli.command(context_settings=CONTEXT_SETTINGS, short_help='Ensure logged in when server is up.')
@click.option('-p', '--port', metavar='PORT', help='Port number of grpc server (optional).')
@click.option('-v', '--verbose', count=True, help='Verbosity.')
def login(port, verbose):
set_verbosity(verbose)
from koapy import KiwoomOpenApiContext
with KiwoomOpenApiContext(port=port, client_check_timeout=client_check_timeout) as context:
state = context.GetConnectState()
if state == 0:
click.echo('Logging in...')
else:
click.echo('Already logged in.')
context.EnsureConnected()
gubun = context.GetServerGubun()
if gubun == '1':
click.echo('Logged into Simulation server.')
else:
click.echo('Logged into Real server.')
@cli.group(context_settings=CONTEXT_SETTINGS, short_help='Configure many things.')
def config():
pass
@config.command(context_settings=CONTEXT_SETTINGS, short_help='Configure auto login.')
@click.option('-p', '--port', metavar='PORT', help='Port number of grpc server (optional).')
@click.option('-v', '--verbose', count=True, help='Verbosity.')
def autologin(port, verbose):
set_verbosity(verbose)
from koapy import KiwoomOpenApiContext
with KiwoomOpenApiContext(port=port, client_check_timeout=client_check_timeout) as context:
context.EnsureConnected()
context.ShowAccountWindow()
@cli.group(context_settings=CONTEXT_SETTINGS, short_help='Update openapi metadata.')
def update():
pass
@update.command(context_settings=CONTEXT_SETTINGS, short_help='Update openapi TR metadata.')
@click.option('-v', '--verbose', count=True, help='Verbosity.')
def trdata(verbose):
set_verbosity(verbose)
from koapy.openapi.TrInfo import TrInfo
TrInfo.dump_trinfo_by_code()
@update.command(context_settings=CONTEXT_SETTINGS, short_help='Update openapi realtype metadata.')
@click.option('-v', '--verbose', count=True, help='Verbosity.')
def realdata(verbose):
set_verbosity(verbose)
from koapy.openapi.RealType import RealType
RealType.dump_realtype_by_desc()
@cli.group(context_settings=CONTEXT_SETTINGS, short_help='Get various types of data.')
def get():
pass
market_codes = [
'0',
'10',
'3',
'8',
'50',
'4',
'5',
'6',
'9',
'30',
'all',
]
@get.command(context_settings=CONTEXT_SETTINGS, short_help='Get stock codes.')
@click.option('-m', '--market', 'markets', metavar='MARKET', multiple=True, type=click.Choice(market_codes, case_sensitive=False), help='Stock market code to get. Can set multiple times.')
@click.option('-n', '--name', metavar='NAME', help='Name of stock.')
@click.option('-p', '--port', metavar='PORT', help='Port number of grpc server (optional).')
def stockcode(markets, name, port):
"""
\b
Possible market codes are:
0 : ์ฅ๋ด
10 : ์ฝ์ค๋ฅ
3 : ELW
8 : ETF
50 : KONEX
4 : ๋ฎค์ถ์ผํ๋
5 : ์ ์ฃผ์ธ์๊ถ
6 : ๋ฆฌ์ธ
9 : ํ์ด์ผํ๋
30 : K-OTC
\b
Possible market code aliases are:
all: All possible market codes.
"""
if (markets, name) == (tuple(), None):
fail_with_usage()
from koapy import KiwoomOpenApiContext
with KiwoomOpenApiContext(port=port, client_check_timeout=client_check_timeout) as context:
context.EnsureConnected()
if name is not None and not markets:
markets = ['0']
if 'all' in markets:
markets = market_codes
codes = set()
for market in markets:
codes = codes.union(set(context.GetCodeListByMarketAsList(market)))
codes = sorted(list(codes))
if not name:
for code in codes:
click.echo(code)
else:
names = [context.GetMasterCodeName(code) for code in codes]
codes_by_name = dict(zip(names, codes))
code = codes_by_name.get(name, None)
if code:
click.echo(code)
else:
click.echo('Cannot find code for given name.')
@get.command(context_settings=CONTEXT_SETTINGS, short_help='Get name for stock codes.')
@click.option('-c', '--code', 'codes', metavar='CODE', multiple=True, help='Stock code to get. Can set multiple times.')
@click.option('-p', '--port', metavar='PORT', help='Port number of grpc server (optional).')
def stockname(codes, port):
from koapy import KiwoomOpenApiContext
with KiwoomOpenApiContext(port=port, client_check_timeout=client_check_timeout) as context:
context.EnsureConnected()
def get_codes():
if codes:
if '-' in codes:
with click.open_file('-', 'r') as f:
for code in f:
yield code.strip()
else:
for code in codes:
yield code
else:
while True:
try:
code = click.prompt('code', prompt_suffix=' >>> ')
code = code.strip()
if code == 'exit':
break
if code:
yield code
except EOFError:
break
for code in get_codes():
name = context.GetMasterCodeName(code)
click.echo(name)
@get.command(context_settings=CONTEXT_SETTINGS, short_help='Get basic information of stocks.')
@click.option('-c', '--code', 'codes', metavar='CODE', multiple=True, help='Stock code to get. Can set multiple times.')
@click.option('-m', '--market', 'markets', metavar='MARKET', multiple=True, type=click.Choice(market_codes, case_sensitive=False), help='Stock market code to get. Alternative to --code. Can set multiple times.')
@click.option('-i', '--input', metavar='FILENAME', type=click.Path(), help='Text or excel file containing codes. Alternative to --code or --market.')
@click.option('-o', '--output', metavar='FILENAME', type=click.Path(), help="Output filename. Optional for single code (prints to console).")
@click.option('-f', '--format', metavar='FORMAT', type=click.Choice(['md', 'xlsx', 'json'], case_sensitive=False))
@click.option('-p', '--port', metavar='PORT', help='Port number of grpc server (optional).')
@click.option('-v', '--verbose', count=True, help='Verbosity.')
def stockinfo(codes, markets, input, output, format, port, verbose): # pylint: disable=redefined-builtin
"""
\b
Possible market codes are:
0 : ์ฅ๋ด
10 : ์ฝ์ค๋ฅ
3 : ELW
8 : ETF
50 : KONEX
4 : ๋ฎค์ถ์ผํ๋
5 : ์ ์ฃผ์ธ์๊ถ
6 : ๋ฆฌ์ธ
9 : ํ์ด์ผํ๋
30 : K-OTC
\b
Possible market code aliases are:
all: All possible market codes.
"""
if (codes, markets, input, output) == (tuple(), tuple(), None, None):
fail_with_usage()
set_verbosity(verbose)
codes_from_input = False
codes_len = len(codes)
if codes_len == 0 and len(markets) == 0:
if input is None:
fail_with_usage('Cannot specify codes.')
if not os.path.exists(input):
fail_with_usage('Given input does not exist.')
codes_from_input = True
if os.path.isfile(input):
if input.endswith('.xlsx'):
import pandas as pd
df = pd.read_excel(input, dtype=str)
code_column = '์ข
๋ชฉ์ฝ๋'
if code_column in df:
codes = df[code_column]
else:
codes = df.iloc[0]
codes_len = len(codes)
elif input.endswith('.txt'):
with open(input) as f:
codes = [line.strip() for line in f]
codes_len = len(codes)
else:
fail_with_usage('Unrecognized input type.')
else:
fail_with_usage('Unrecognized input type.')
if output is None:
if codes_len > 1 or codes_from_input:
fail_with_usage('Output path is not specified.')
if format is None:
format = 'md'
else:
if format is None:
format = 'xlsx'
if format == 'xlsx':
if not output.endswith('.xlsx'):
output += '.xlsx'
elif format == 'md':
if not output.endswith('.md'):
output += '.md'
elif format == 'json':
if not output.endswith('.json'):
output += '.json'
import pandas as pd
from koapy import KiwoomOpenApiContext
with KiwoomOpenApiContext(port=port, client_check_timeout=client_check_timeout, verbosity=verbose) as context:
context.EnsureConnected()
if not codes_from_input and codes_len == 1:
df = context.GetStockInfoAsDataFrame(codes)
if not output:
if format == 'md':
click.echo(df.iloc[0].to_markdown())
elif format == 'json':
click.echo(df.iloc[0].to_json())
else:
if format == 'xlsx':
df.to_excel(output, index=False)
elif format == 'json':
with open(output, 'w') as f:
click.echo(df.iloc[0].to_json(), file=f)
elif codes_len > 0:
df = context.GetStockInfoAsDataFrame(codes)
df.to_excel(output, index=False)
elif len(markets) > 0:
if 'all' in markets:
markets = market_codes
if format == 'xlsx':
with pd.ExcelWriter(output) as writer: # pylint: disable=abstract-class-instantiated
for market in markets:
codes = context.GetCodeListByMarketAsList(market)
df = context.GetStockInfoAsDataFrame(codes)
df.to_excel(writer, index=False, sheet_name=market)
elif format == 'json':
codes = set()
for market in markets:
codes = codes.union(set(context.GetCodeListByMarketAsList(market)))
codes = sorted(list(codes))
with open(output, 'w', encoding='utf-8') as f:
for code in codes:
df = context.GetStockInfoAsDataFrame(code)
click.echo(df.iloc[0].to_json(), file=f)
else:
fail_with_usage('Cannot specify codes.')
@get.command(context_settings=CONTEXT_SETTINGS, short_help='Get daily OHLCV of stocks.')
@click.option('-c', '--code', 'codes', metavar='CODE', multiple=True, help='Stock code to get. Can set multiple times.')
@click.option('-i', '--input', metavar='FILENAME', type=click.Path(), help='Text or excel file containing codes. Alternative to --codes option.')
@click.option('-o', '--output', metavar='FOLDER|FILENAME', type=click.Path(), help='Output foldername or filename for single code. Files inside the folder would be named as CODE.xlsx. Defaults to current directory.')
@click.option('-f', '--format', metavar='FORMAT', type=click.Choice(['xlsx', 'sqlite3'], case_sensitive=False), default='xlsx', help='Output format. (default: xlsx)')
@click.option('-x', '--clean', is_flag=True, help='Remove untracked files.')
@click.option('-s', '--start-date', metavar='YYYY-MM-DD', type=click.DateTime(formats=['%Y-%m-%d', '%Y%m%d']), help='Most recent date to get. Defaults to today or yesterday if market is open.')
@click.option('-e', '--end-date', metavar='YYYY-MM-DD', type=click.DateTime(formats=['%Y-%m-%d', '%Y%m%d']), help='Stops if reached, not included (optional).')
@click.option('-p', '--port', metavar='PORT', help='Port number of grpc server (optional).')
@click.option('-v', '--verbose', count=True, help='Verbosity.')
def daily(codes, input, output, format, clean, start_date, end_date, port, verbose): # pylint: disable=redefined-builtin
if (codes, input, output, start_date, end_date) == (tuple(), None, None, None, None):
fail_with_usage()
set_verbosity(verbose)
codes_len = len(codes)
extension = '.' + format
if codes_len == 0:
if input is None:
fail_with_usage('Either code or input should be given.')
if not os.path.exists(input):
fail_with_usage('Given input does not exist.')
if os.path.isfile(input):
if input.endswith('.xlsx'):
import pandas as pd
df = pd.read_excel(input, dtype=str)
code_column = '์ข
๋ชฉ์ฝ๋'
if code_column in df:
codes = df[code_column]
else:
codes = df.iloc[0]
codes_len = len(codes)
elif input.endswith('.txt'):
with open(input) as f:
codes = [line.strip() for line in f]
codes_len = len(codes)
else:
fail_with_usage('Unrecognized input file type.')
elif os.path.isdir(input):
import re
codes = [os.path.splitext(name)[0] for name in os.listdir(input) if name.endswith(extension)]
codes = [code for code in codes if re.match(r'[0-9A-Z]+', code)]
codes_len = len(codes)
else:
fail_with_usage('Unrecognized input type.')
if output is None:
output = '.'
if os.path.exists(output):
if os.path.isdir(output):
output_is_folder = True
else:
output_is_folder = False
else:
if output.endswith('/') or output.endswith(os.path.sep) or codes_len > 1:
output_is_folder = True
else:
output_is_folder = False
if not output_is_folder:
assert codes_len == 1
code = codes[0]
base_output = os.path.basename(output)
output = os.path.dirname(output)
if not base_output.endswith(extension):
base_output += extension
final_output = os.path.join(output, base_output)
def post_process(updater, codes, output, context): # pylint: disable=unused-argument
os.replace(updater.get_filepath_for_code(code), final_output)
else:
def post_process(updater, codes, output, context): # pylint: disable=unused-argument
pass
import contextlib
with contextlib.ExitStack() as stack:
context = None
if port is not None:
from koapy import KiwoomOpenApiContext
context = stack.enter_context(KiwoomOpenApiContext(port=port, client_check_timeout=client_check_timeout, verbosity=verbose))
context.EnsureConnected()
from koapy.data.HistoricalStockPriceDataUpdater import HistoricalStockPriceDataUpdater
updater = HistoricalStockPriceDataUpdater(codes, output, 'daily', 1, format, delete_remainings=clean, context=context)
updater.update()
post_process(updater, codes, output, context)
minute_intervals = [
'1',
'3',
'5',
'10',
'15',
'30',
'45',
'60',
]
@get.command(context_settings=CONTEXT_SETTINGS, short_help='Get minute OHLCV of stocks.')
@click.option('-c', '--code', 'codes', metavar='CODE', multiple=True, help='Stock code to get. Can set multiple times.')
@click.option('-t', '--interval', metavar='INTERVAL', type=click.Choice(minute_intervals, case_sensitive=False), help='Minute interval. Possible values are [%s]' % '|'.join(minute_intervals))
@click.option('-i', '--input', metavar='FILENAME', type=click.Path(), help='Text or excel file containing codes. Alternative to --codes option.')
@click.option('-o', '--output', metavar='FOLDER|FILENAME', type=click.Path(), help='Output foldername or filename for single code. Files inside the folder would be named as CODE.xlsx. Defaults to current directory.')
@click.option('-f', '--format', metavar='FORMAT', type=click.Choice(['xlsx', 'sqlite3'], case_sensitive=False), default='xlsx', help='Output format. (default: xlsx)')
@click.option('-x', '--clean', is_flag=True, help='Remove untracked files.')
@click.option('-s', '--start-date', metavar="YYYY-MM-DD['T'hh:mm:ss]", type=click.DateTime(formats=['%Y-%m-%d', '%Y%m%d', '%Y-%m-%dT%H:%M:%S', '%Y%m%d%H%M%S']), help='Most recent date to get. Defaults to today or yesterday if market is open.')
@click.option('-e', '--end-date', metavar="YYYY-MM-DD['T'hh:mm:ss]", type=click.DateTime(formats=['%Y-%m-%d', '%Y%m%d', '%Y-%m-%dT%H:%M:%S', '%Y%m%d%H%M%S']), help='Stops if reached, not included (optional).')
@click.option('-p', '--port', metavar='PORT', help='Port number of grpc server (optional).')
@click.option('-v', '--verbose', count=True, help='Verbosity.')
def minute(codes, interval, input, output, format, clean, start_date, end_date, port, verbose): # pylint: disable=redefined-builtin
if (codes, interval, input, output, start_date, end_date) == (tuple(), None, None, None, None, None):
fail_with_usage()
set_verbosity(verbose)
if interval is None:
fail_with_usage('Interval is not set.')
codes_len = len(codes)
extension = '.' + format
if codes_len == 0:
if input is None:
fail_with_usage('Either code or input should be given.')
if not os.path.exists(input):
fail_with_usage('Given input does not exist.')
if os.path.isfile(input):
if input.endswith('.xlsx'):
import pandas as pd
df = pd.read_excel(input, dtype=str)
code_column = '์ข
๋ชฉ์ฝ๋'
if code_column in df:
codes = df[code_column]
else:
codes = df.iloc[0]
codes_len = len(codes)
elif input.endswith('.txt'):
with open(input) as f:
codes = [line.strip() for line in f]
codes_len = len(codes)
else:
fail_with_usage('Unrecognized input file type.')
elif os.path.isdir(input):
import re
codes = [os.path.splitext(name)[0] for name in os.listdir(input) if name.endswith(extension)]
codes = [code for code in codes if re.match(r'[0-9A-Z]+', code)]
codes_len = len(codes)
else:
fail_with_usage('Unrecognized input type.')
if output is None:
output = '.'
if os.path.exists(output):
if os.path.isdir(output):
output_is_folder = True
else:
output_is_folder = False
else:
if output.endswith('/') or output.endswith(os.path.sep) or codes_len > 1:
output_is_folder = True
else:
output_is_folder = False
if not output_is_folder:
assert codes_len == 1
code = codes[0]
base_output = os.path.basename(output)
output = os.path.dirname(output)
extension = '.' + format
if not base_output.endswith(extension):
base_output += extension
final_output = os.path.join(output, base_output)
def post_process(updater, codes, output, context): # pylint: disable=unused-argument
os.replace(updater.get_filepath_for_code(code), final_output)
else:
def post_process(updater, codes, output, context): # pylint: disable=unused-argument
pass
import contextlib
with contextlib.ExitStack() as stack:
context = None
if port is not None:
from koapy import KiwoomOpenApiContext
context = stack.enter_context(KiwoomOpenApiContext(port=port, client_check_timeout=client_check_timeout, verbosity=verbose))
context.EnsureConnected()
from koapy.data.HistoricalStockPriceDataUpdater import HistoricalStockPriceDataUpdater
updater = HistoricalStockPriceDataUpdater(codes, output, 'minute', interval, format, delete_remainings=clean, context=context)
updater.update()
post_process(updater, codes, output, context)
@get.command(context_settings=CONTEXT_SETTINGS, short_help='Get TR info.')
@click.option('-t', '--trcode', 'trcodes', metavar='TRCODE', multiple=True, help='TR code to get (like opt10001).')
def trinfo(trcodes):
from koapy.openapi.TrInfo import TrInfo
def get_codes():
if trcodes:
if '-' in trcodes:
with click.open_file('-', 'r') as f:
for code in f:
yield code.strip()
else:
for code in trcodes:
yield code
else:
while True:
try:
code = click.prompt('trcode', prompt_suffix=' >>> ')
code = code.strip()
if code == 'exit':
break
if code:
yield code
except EOFError:
break
for trcode in get_codes():
trinfo = TrInfo.get_trinfo_by_code(trcode)
if trinfo is not None:
click.echo('[%s] : [%s]' % (trinfo.tr_code.upper(), trinfo.name))
click.echo(' [INPUT]')
for input in trinfo.inputs:
click.echo(' %s' % input.name)
if trinfo.single_outputs:
click.echo(' [OUTPUT] [SINGLE DATA] : [%s]' % trinfo.single_outputs_name)
for output in trinfo.single_outputs:
click.echo(' %s' % output.name)
if trinfo.multi_outputs:
click.echo(' [OUTPUT] [MULTI DATA] : [%s]' % trinfo.multi_outputs_name)
for output in trinfo.multi_outputs:
click.echo(' %s' % output.name)
else:
click.echo('Given trcode is invalid')
@get.command(context_settings=CONTEXT_SETTINGS, short_help='Get real type info.')
@click.option('-t', '--realtype', 'realtypes', metavar='REALTYPE', multiple=True, help='Real type name to get (like ์ฃผ์์์ธ).')
def realinfo(realtypes):
from koapy.openapi.RealType import RealType
def get_realtypes():
if realtypes:
if '-' in realtypes:
with click.open_file('-', 'r') as f:
for realtype in f:
yield realtype.strip()
else:
for realtype in realtypes:
yield realtype
else:
while True:
try:
realtype = click.prompt('realtype', prompt_suffix=' >>> ')
realtype = realtype.strip()
if realtype == 'exit':
break
if realtype:
yield realtype
except EOFError:
break
for realtype in get_realtypes():
fids = RealType.get_fids_by_realtype(realtype)
if fids:
names = [RealType.Fid.get_name_by_fid(fid, str(fid)) for fid in fids]
for fid, name in zip(fids, names):
click.echo(' [%s] = %s' % (fid, name))
else:
click.echo('Given realtype is invalid')
@get.command(context_settings=CONTEXT_SETTINGS, short_help='Get market holidays.')
@click.option('-o', '--output', metavar='FILENAME', type=click.Path(), help='Output filename. (optional)')
@click.option('-O', '--offline', is_flag=True, help='Do not use krx marketdata api. (default: false)')
@click.option('-u', '--update', is_flag=True, help='Update local cache, download from krx marketdata api.')
@click.option('-U', '--no-update', is_flag=True, help='Disable update.')
@click.option('-v', '--verbose', count=True, help='Verbosity.')
def holidays(output, offline, update, no_update, verbose):
set_verbosity(verbose)
if output is None:
import pandas as pd
if not offline:
from koapy.utils.krx.marketdata.holiday import download_holidays_as_dict
response = download_holidays_as_dict()
else:
def get_holidays():
import datetime
from koapy.utils.krx.calendar.KrxHolidayCalendar import KrxHolidayCalendar
today = datetime.datetime.today()
calendar = KrxHolidayCalendar()
start = datetime.datetime(today.year, 1, 1)
end = datetime.datetime(today.year, 12, 31)
holidays = calendar.holidays(start, end, return_name=True)
return holidays
def get_holidays_as_dict():
holidays = get_holidays()
response = {'block1': [{
'calnd_dd_dy': dt.strftime('%Y-%m-%d'),
'kr_dy_tp': dt.strftime('%a'),
'dy_tp_cd': dt.strftime('%a'),
'holdy_nm': name,
} for dt, name in holidays.items() if dt.weekday() < 5]}
return response
response = get_holidays_as_dict()
lang = locale.getdefaultlocale()[0]
if lang == 'ko_KR':
day_key = '<KEY>'
columns = ['์ผ์ ๋ฐ ์์ผ', '์์ผ๊ตฌ๋ถ', '๋น๊ณ ']
else:
day_key = '<KEY>'
columns = ['date', 'day of week', 'comment']
data = []
for holiday in response['block1']:
date = holiday['calnd_dd_dy']
day = holiday[day_key].strip()
name = holiday['holdy_nm']
data.append([date, day, name])
df = pd.DataFrame.from_records(data, columns=columns)
click.echo(df.to_markdown())
else:
if not offline:
from koapy.utils.krx.marketdata.holiday import download_holidays_as_excel
if output and not output.endswith('.xls'):
output += '.xls'
download_holidays_as_excel(output)
if update:
logging.warning('Cannot update on file output.')
else:
fail_with_usage('Saving to file should come with offline option disabled.')
@get.command(context_settings=CONTEXT_SETTINGS, short_help='Get user information.')
@click.option('-p', '--port', metavar='PORT', help='Port number of grpc server (optional).')
@click.option('-v', '--verbose', count=True, help='Verbosity.')
def userinfo(port, verbose):
set_verbosity(verbose)
import pandas as pd
from koapy import KiwoomOpenApiContext
with KiwoomOpenApiContext(port=port, client_check_timeout=client_check_timeout, verbosity=verbose) as context:
context.EnsureConnected()
result = {}
result['๋ณด์ ๊ณ์ข์'] = context.GetLoginInfo('ACCOUNT_CNT')
account_numbers = context.GetLoginInfo('ACCLIST').rstrip(';').split(';')
for i, accno in enumerate(account_numbers):
result['๊ณ์ข๋ฒํธ (%d/%s)' % (i + 1, result['๋ณด์ ๊ณ์ข์'])] = accno
result['์ฌ์ฉ์ ID'] = context.GetLoginInfo('USER_ID')
result['์ฌ์ฉ์ ๋ช
'] = context.GetLoginInfo('USER_NAME')
result['ํค๋ณด๋๋ณด์ ํด์ง ์ฌ๋ถ'] = {
'0': '์ ์',
'1': 'ํด์ง',
}.get(context.GetLoginInfo('KEY_BSECGB'), '์์์์')
result['๋ฐฉํ๋ฒฝ ์ค์ ์ฌ๋ถ'] = {
'0': '๋ฏธ์ค์ ',
'1': '์ค์ ',
'2': 'ํด์ง',
}.get(context.GetLoginInfo('FIREW_SECGB'), '์์์์')
result['์ ์์๋ฒ ๊ตฌ๋ถ'] = {
'1': '๋ชจ์ํฌ์',
}.get(context.GetServerGubun(), '์ค์๋ฒ')
click.echo(pd.Series(result).to_markdown())
@get.command(context_settings=CONTEXT_SETTINGS, short_help='Get account deposit.')
@click.option('-a', '--account', metavar='ACCNO', help='Account number.')
@click.option('-p', '--port', metavar='PORT', help='Port number of grpc server (optional).')
@click.option('-v', '--verbose', count=True, help='Verbosity.')
def deposit(account, port, verbose):
set_verbosity(verbose)
if account is None:
logging.info('Account not given. Using first account available.')
from koapy import KiwoomOpenApiContext
with KiwoomOpenApiContext(port=port, client_check_timeout=client_check_timeout, verbosity=verbose) as context:
context.EnsureConnected()
if account is None:
account = context.GetAccountList()[0]
result = context.GetDepositInfo(account)
click.echo(result.to_markdown())
@get.command(context_settings=CONTEXT_SETTINGS, short_help='Get account evaluation.')
@click.option('-a', '--account', metavar='ACCNO', help='Account number.')
@click.option('-d', '--include-delisted', is_flag=True, help='Include delisted.', default=True)
@click.option('-D', '--exclude-delisted', is_flag=True, help='Exclude delisted.')
@click.option('-e', '--for-each', is_flag=True, help='Show individual evaluation.', default=True)
@click.option('-E', '--as-summary', is_flag=True, help='Show summarized evaluation.')
@click.option('-p', '--port', metavar='PORT', help='Port number of grpc server (optional).')
@click.option('-v', '--verbose', count=True, help='Verbosity.')
def evaluation(account, include_delisted, exclude_delisted, for_each, as_summary, port, verbose):
set_verbosity(verbose)
if account is None:
logging.info('Account not given. Using first account available.')
if exclude_delisted:
include_delisted = False
if as_summary:
for_each = False
lookup_type = '1'
elif for_each:
lookup_type = '2'
from koapy import KiwoomOpenApiContext
with KiwoomOpenApiContext(port=port, client_check_timeout=client_check_timeout, verbosity=verbose) as context:
context.EnsureConnected()
if account is None:
account = context.GetAccountList()[0]
single, multi = context.GetAccountEvaluationStatusAsSeriesAndDataFrame(account, include_delisted)
click.echo('[๊ณ์ขํ๊ฐํํฉ์์ฒญ] : [๊ณ์ขํ๊ฐํํฉ]')
click.echo(single.to_markdown())
click.echo()
click.echo('[๊ณ์ขํ๊ฐํํฉ์์ฒญ] : [์ข
๋ชฉ๋ณ๊ณ์ขํ๊ฐํํฉ]')
click.echo(multi.to_markdown())
click.echo()
single, multi = context.GetAccountEvaluationBalanceAsSeriesAndDataFrame(account, lookup_type)
click.echo('[๊ณ์ขํ๊ฐ์๊ณ ๋ด์ญ์์ฒญ] : [๊ณ์ขํ๊ฐ๊ฒฐ๊ณผ]')
click.echo(single.to_markdown())
click.echo()
click.echo('[๊ณ์ขํ๊ฐ์๊ณ ๋ด์ญ์์ฒญ] : [๊ณ์ขํ๊ฐ์๊ณ ๊ฐ๋ณํฉ์ฐ]')
click.echo(multi.to_markdown())
@get.command(context_settings=CONTEXT_SETTINGS, short_help='Get order history of a date.')
@click.option('-a', '--account', metavar='ACCNO', help='Account number.')
@click.option('-d', '--date', metavar='DATE', help='Date to get.')
@click.option('-r', '--reverse', is_flag=True)
@click.option('-e', '--executed-only', is_flag=True)
@click.option('-E', '--not-executed-only', is_flag=True)
@click.option('-S', '--stock-only', is_flag=True)
@click.option('-B', '--bond-only', is_flag=True)
@click.option('-s', '--sell-only', is_flag=True)
@click.option('-b', '--buy-only', is_flag=True)
@click.option('-c', '--code', metavar='CODE', help='Stock code to get.')
@click.option('-o', '--starting-order-no', metavar='ORDERNO', help='Starting order no.')
@click.option('-p', '--port', metavar='PORT', help='Port number of grpc server (optional).')
@click.option('-v', '--verbose', count=True, help='Verbosity.')
def orders(account, date, reverse, executed_only, not_executed_only, stock_only, bond_only, sell_only, buy_only, code, starting_order_no, port, verbose):
set_verbosity(verbose)
if account is None:
logging.info('Account not given. Using first account available.')
from koapy import KiwoomOpenApiContext
sort_type = '1'
if reverse:
sort_type = '2'
if executed_only:
sort_type = '3'
if not_executed_only:
sort_type = '4'
asset_type = '0'
if stock_only:
asset_type = '1'
if bond_only:
asset_type = '2'
order_type = '0'
if sell_only:
order_type = '1'
if buy_only:
order_type = '2'
with KiwoomOpenApiContext(port=port, client_check_timeout=client_check_timeout, verbosity=verbose) as context:
context.EnsureConnected()
if account is None:
account = context.GetAccountList()[0]
click.echo(context.GetOrderLogAsDataFrame3(account, date, sort_type, asset_type, order_type, code, starting_order_no).to_markdown())
@get.command(context_settings=CONTEXT_SETTINGS, short_help='Get OpenApi module installation path.')
@click.option('-p', '--port', metavar='PORT', help='Port number of grpc server (optional).')
@click.option('-v', '--verbose', count=True, help='Verbosity.')
def modulepath(port, verbose):
set_verbosity(verbose)
from koapy import KiwoomOpenApiContext
with KiwoomOpenApiContext(port=port, client_check_timeout=client_check_timeout, verbosity=verbose) as context:
click.echo(context.GetAPIModulePath())
@get.command(context_settings=CONTEXT_SETTINGS, short_help='Get error message for error code.')
@click.option('-e', '--err-code', metavar='ERR', type=int, help='Error code to check.')
@click.option('-v', '--verbose', count=True, help='Verbosity.')
def errmsg(err_code, verbose):
set_verbosity(verbose)
from koapy.openapi.KiwoomOpenApiError import KiwoomOpenApiError
err_msg = KiwoomOpenApiError.get_error_message_by_code(err_code)
click.echo('[%d] %s' % (err_code, err_msg))
@cli.command(context_settings=CONTEXT_SETTINGS, short_help='Watch realtime data.')
@click.option('-c', '--code', 'codes', metavar='CODE', multiple=True, help='Stock code to get. Can set multiple times.')
@click.option('-i', '--input', metavar='FILENAME', type=click.Path(), help='Text or excel file containing codes. Alternative to --codes option.')
@click.option('-f', '--fid', 'fids', metavar='FID', multiple=True, help='FID to get. Can set multiple times.')
@click.option('-t', '--realtype', metavar='REALTYPE', help='Real type name. Alternative to --fid.')
@click.option('-o', '--output', metavar='FILENAME', type=click.File('w', lazy=True), default='-', help='Output filename (optional).')
@click.option('-f', '--format', metavar='FORMAT', type=click.Choice(['md', 'json'], case_sensitive=False), default='md')
@click.option('-p', '--port', metavar='PORT', help='Port number of grpc server (optional).')
@click.option('-v', '--verbose', count=True, help='Verbosity.')
def watch(codes, input, fids, realtype, output, format, port, verbose):
if (codes, fids, realtype) == (tuple(), tuple(), None):
fail_with_usage()
set_verbosity(verbose)
codes_len = len(codes)
if codes_len == 0:
if input is None:
fail_with_usage('Either code or input should be given.')
if not os.path.exists(input):
fail_with_usage('Given input does not exist.')
if os.path.isfile(input):
if input.endswith('.xlsx'):
import pandas as pd
df = pd.read_excel(input, dtype=str)
code_column = '์ข
๋ชฉ์ฝ๋'
if code_column in df:
codes = df[code_column]
else:
codes = df.iloc[0]
codes_len = len(codes)
elif input.endswith('.txt'):
with open(input) as f:
codes = [line.strip() for line in f]
codes_len = len(codes)
else:
fail_with_usage('Unrecognized input type.')
else:
fail_with_usage('Unrecognized input type.')
if realtype is not None:
from koapy.openapi.RealType import RealType
fids_from_realtype = RealType.get_fids_by_realtype(realtype)
fids = list(set(fids).union(set(fids_from_realtype)))
if not codes:
fail_with_usage('No codes to watch. Set --code or --input.')
if not fids:
fail_with_usage('Cannot infer fids to watch. Set either --fid or --realtype.')
import datetime
import pandas as pd
from koapy import KiwoomOpenApiContext
from koapy.openapi.RealType import RealType
def parse_message(message):
fids = event.single_data.names
names = [RealType.Fid.get_name_by_fid(fid, str(fid)) for fid in fids]
values = event.single_data.values
dic = dict((name, value) for fid, name, value in zip(fids, names, values) if name != fid)
series = | pd.Series(dic) | pandas.Series |
import pandas as pd
from sqlalchemy import create_engine
from dbnd import log_metric, log_dataframe
QUERY = ""
DB_CONNECTION = ""
def track_database():
engine = create_engine(DB_CONNECTION)
log_metric("query executed", QUERY)
with engine.connect() as connection:
result = connection.execute(QUERY).keys()
header = [row for row in result]
result = connection.execute(QUERY)
data = [row for row in result]
df = | pd.DataFrame(data, columns=header) | pandas.DataFrame |
##
drive_path = 'c:/'
import numpy as np
import pandas as pd
import os
import sys
import matplotlib.pyplot as plt
from scipy.stats import ks_2samp
from scipy.stats import anderson_ksamp
from scipy.stats import kruskal
from scipy.stats import variation
from scipy import signal as sps
import seaborn as sns
import glob
import re
##
#This piece spits out all the peaks in one dataframe
def getpeaks(date):
'''Spits out all the peaks from imaging session
session input as string
'''
# This piece spits out all the peaks from one session in one dataframe
peakdf = pd.DataFrame([])
os.chdir('C:\\Users\\Annie\\Documents\\Data\\Ca_Imaging\\HabituationFiles\\%s' % date)
for filename in glob.glob('*dt.txt'):
f = pd.read_csv(filename, nrows=175)
df = f[[col for col in f.columns if 'G PMT' in col]]
peak = []
for col in df.columns:
a = df[col]
firsta = 1;
firstb = 24;
# Figures out if there is a min or max and sees if it passes threshold (3SD)
if np.absolute(min(a[26:80])) > np.absolute(max(a[26:80])) and np.absolute(min(a[26:80])) >= 3 * np.std(
df[col][firsta:firstb]):
b = min(a[26:80])
peak.append(b)
elif np.absolute(max(a[26:80])) > np.absolute(min(a[26:80])) and np.absolute(max(a[26:80])) >= 3 * np.std(
df[col][firsta:firstb]):
b = max(a[26:80])
peak.append(b)
else:
b = 0
peak.append(b)
peaks = pd.DataFrame(peak).T
peaks.columns = df.columns
peaks = pd.concat([pd.DataFrame({'Trial': [int(filename.split('dt')[0])]}), peaks], axis=1)
peakdf = peakdf.append(peaks, ignore_index=True)
peakdf.to_csv('%s_peaks.csv' % date, index=False)
trials = pd.read_csv('C:\\Users\\Annie\\Documents\\Data\\Ca_Imaging\\Analysis\\Odor_Panel\\Odor_Trials.csv')
filerow = trials.loc[trials['File'] == date]
odortrials = {}
for t in filerow.Odor.unique():
y = {t: [int(x) for x in filerow.loc[filerow['Odor'] == t][['T1', 'T2', 'T3', 'T4']].values.tolist()[0]]}
odortrials.update(y)
# Get average peak across all trials using peakdf dataframe
meandf = pd.DataFrame([])
for key in odortrials:
odor = odortrials[key]
mean = []
for col in peakdf.loc[peakdf['Trial'].isin(odor)][
[col for col in peakdf.loc[peakdf['Trial'].isin(odor)].columns if 'G PMT' in col]]:
mean.append(peakdf.loc[peakdf['Trial'].isin(odor)][col].mean())
mean = pd.DataFrame(mean).T
mean.columns = peakdf.loc[peakdf['Trial'].isin(odor)][
[col for col in peakdf.loc[peakdf['Trial'].isin(odor)].columns if 'G PMT' in col]].columns
meandf = meandf.append(mean)
meandf = meandf.reset_index(drop=True)
meandf.columns = [str(col) + '_' + date for col in meandf.columns]
meandf = pd.concat([pd.DataFrame({'Odor': odortrials.keys()}), meandf], axis=1)
meandf.to_csv('%s_mean.csv' % date, index=False)
# Get proportion of successful trials
successdf = pd.DataFrame([])
for key in odortrials:
odor = odortrials[key]
newdf = peakdf.loc[peakdf['Trial'].isin(odor)]
s = []
for col in peakdf.loc[peakdf['Trial'].isin(odor)][
[col for col in peakdf.loc[peakdf['Trial'].isin(odor)].columns if 'G PMT' in col]]:
s.append(np.divide((newdf.loc[:, col] != 0).sum(), float(len(newdf.loc[:, col]))))
s = pd.DataFrame(s).T
s.columns = peakdf.loc[peakdf['Trial'].isin(odor)][
[col for col in peakdf.loc[peakdf['Trial'].isin(odor)].columns if 'G PMT' in col]].columns
successdf = successdf.append(s)
successdf = successdf.reset_index(drop=True)
successdf.columns = [str(col) + '_' + date for col in successdf.columns]
successdf = pd.concat([pd.DataFrame({'Odor': odortrials.keys()}), successdf], axis=1)
successdf.to_csv('%s_success.csv' % date, index=False)
return 'Done'
##
def getintegral(date):
'''Compute integrals and integral means
date: string, session
'''
temp = pd.DataFrame([])
os.chdir('C:\\Users\\Annie\\Documents\\Data\\Ca_Imaging\\HabituationFiles\\%s' % date)
# Pull the trials that correspond to specific date/odors
trials = pd.read_csv('C:\\Users\\Annie\\Documents\\Data\\Ca_Imaging\\Analysis\\Odor_Panel\\Odor_Trials.csv')
filerow = trials.loc[trials['File'] == date]
odortrials = {}
for t in filerow.Odor.unique():
y = {t: [int(x) for x in filerow.loc[filerow['Odor'] == t][['T1', 'T2', 'T3', 'T4']].values.tolist()[0]]}
odortrials.update(y)
# Get the frame rate for a specified date
num = trials.File.unique().tolist().index('%s' % date)
fr = trials.loc[trials['File'] == trials.File.unique().tolist()[num]]['FrameRate'].iloc[0]
# Get the integral
intdf = | pd.DataFrame([]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 19 22:51:03 2018
@author: <NAME>
"""
import os
import time
import pdb
import shutil
import sys
import argparse
import logging
import tempfile
import multiprocessing as mp
import platform
import pytest
import numpy as np
import pandas as pd
import sandy
from sandy.settings import SandyError
from sandy.formats import read_formatted_file, get_file_format
from sandy.formats.endf6 import Endf6
from sandy.formats.utils import FySamples, XsCov
from sandy import pfns
from sandy.tools import is_valid_dir, is_valid_file
from sandy import njoy
__author__ = "<NAME>"
__all__ = [
"SamplingManager",
"sampling",
]
def get_parser():
description = """Produce perturbed files containing sampled parameters
that represent the information\nstored in the evaluated nuclear data
covariances"""
parser = argparse.ArgumentParser(
prog="sandy",
description=description,
formatter_class=argparse.RawTextHelpFormatter,
)
SamplingManager.add_file_argument(parser)
SamplingManager.add_covfile_argument(parser)
SamplingManager.add_mat_argument(parser)
SamplingManager.add_mf_argument(parser)
SamplingManager.add_mt_argument(parser)
SamplingManager.add_processes_argument(parser)
SamplingManager.add_samples_argument(parser)
SamplingManager.add_version_argument(parser)
return parser
class SamplingManager():
"""
Attributes
----------
file : `str`
ENDF-6 or PENDF format file
covfile : `str`
ENDF-6 file containing covariances
mat : `list` of `int`
draw samples only from the selected MAT sections
mf : `list` of `int`
draw samples only from the selected MF sections
mt : `list` of `int`
draw samples only from the selected MT sections
processes : `int`
number of worker processes (default is 1)
samples : `int`
number of samples (default is 100)
"""
def __repr__(self):
return self.__dict__.__repr__()
def __init__(self, file):
self.file = file
@property
def file(self):
"""
Examples
--------
>>> with pytest.raises(Exception): sandy.SamplingManager("random_file")
"""
return self._file
@file.setter
def file(self, file):
if not os.path.isfile(file):
raise ValueError(f"File '{file}' does not exist")
self._file = file
@staticmethod
def add_file_argument(parser):
parser.add_argument(
'file',
help="ENDF-6 or PENDF format file",
)
@property
def covfile(self):
"""
"""
if hasattr(self, "_covfile"):
return self._covfile
else:
return None
@covfile.setter
def covfile(self, covfile):
if not covfile:
self._covfile = None
else:
if not os.path.isfile(covfile):
raise ValueError(f"File '{covfile}' does not exist")
self._covfile = covfile
@staticmethod
def add_covfile_argument(parser):
parser.add_argument(
'--covfile', '-C',
help="ENDF-6 file containing covariances",
)
@property
def mat(self):
if hasattr(self, "_mat"):
return self._mat
else:
return list(range(1, 10000))
@mat.setter
def mat(self, mat):
self._mat = np.array(mat).astype(int).tolist()
@staticmethod
def add_mat_argument(parser):
parser.add_argument(
'--mat',
type=int,
default=list(range(1, 10000)),
action='store',
nargs="+",
metavar="{1,..,9999}",
help="draw samples only from the selected MAT sections "
"(default is keep all)",
)
@property
def mf(self):
if hasattr(self, "_mf"):
return self._mf
else:
return [31, 33, 34, 35]
@mf.setter
def mf(self, mf):
self._mf = np.array(mf).astype(int).tolist()
@staticmethod
def add_mf_argument(parser):
parser.add_argument(
'--mf',
type=int,
default=[31, 33, 34, 35],
action='store',
nargs="+",
metavar="{31,33,34,35}",
help="draw samples only from the selected MF sections "
"(default is keep all)",
)
@property
def mt(self):
if hasattr(self, "_mt"):
return self._mt
else:
return list(range(1, 1000))
@mt.setter
def mt(self, mt):
self._mt = np.array(mt).astype(int).tolist()
@staticmethod
def add_mt_argument(parser):
parser.add_argument(
'--mt',
type=int,
default=list(range(1, 1000)),
action='store',
nargs="+",
metavar="{1,..,999}",
help="draw samples only from the selected MT sections "
"(default = keep all)",
)
@property
def processes(self):
if hasattr(self, "_processes"):
return 1
else:
return self._processes
@processes.setter
def processes(self, processes):
if platform.system() == "Windows":
self._processes = 1
logging.info("Running on Windows does not allow parallel "
"processing")
else:
self._processes = int(processes)
@staticmethod
def add_processes_argument(parser):
parser.add_argument(
'--processes', '-N',
type=int,
default=1,
help="number of worker processes (default is 1)",
)
@property
def samples(self):
if hasattr(self, "_samples"):
return 100
else:
return self._samples
@samples.setter
def samples(self, samples):
self._samples = int(samples)
@staticmethod
def add_samples_argument(parser):
parser.add_argument(
'--samples', '-S',
type=int,
default=100,
help="number of samples (default is 100)",
)
@staticmethod
def add_version_argument(parser):
parser.add_argument(
"--version", "-v",
action='version',
version=f'%(prog)s {sandy.__version__}',
help="code version",
)
@classmethod
def from_cli(cls, iargs=None):
"""
Parse command line arguments for sampling option.
Parameters
----------
iargs : `list` of `str`, optional, default is `None`
list of strings to parse.
The default is taken from `sys.argv`.
Returns
-------
`sandy.SamplingManager`
object to draw samples from endf6 file
Examples
--------
>>> file = os.path.join(sandy.data.__path__[0], "h1.endf")
>>> sm = sandy.SamplingManager.from_cli([file])
"""
arguments, skip = get_parser().parse_known_args(args=iargs)
sm = cls(arguments.file)
for k, v in arguments._get_kwargs():
sm.__setattr__(k, v)
return sm
@classmethod
def from_cli2(cls, iargs=None):
"""
Parse command line arguments for sampling option.
Parameters
----------
iargs : `list` of `str`, optional, default is `None`
list of strings to parse.
The default is taken from `sys.argv`.
Returns
-------
`argparse.Namespace`
namespace object containing processed given arguments and/or
default options.
"""
description = """Produce perturbed files containing sampled parameters
that represent the information\nstored in the evaluated nuclear data
covariances"""
parser = argparse.ArgumentParser(
prog="sandy",
description=description,
formatter_class=argparse.RawTextHelpFormatter,
)
parser.add_argument('--acer',
default=False,
action="store_true",
help="for each perturbed file, produce ACE files\n"
"(argument file must be in ENDF-6 format, not PENDF)\n(argument temperature is required)\n(default = False)")
parser.add_argument('--cov33csv',
type=lambda x: is_valid_file(parser, x),
help="file containing xs/nubar covariances in csv "
"format")
parser.add_argument('--debug',
default=False,
action="store_true",
help="turn on debug mode")
parser.add_argument('--eig',
type=int,
default=10,
metavar="N",
help="print the first N eigenvalues of the evaluated covariance matrices\n(default = do not print)")
parser.add_argument('--energy-sequence', '-E',
type=int,
metavar="EL",
default=49,
help=argparse.SUPPRESS)
parser.add_argument('--errorr',
default=False,
action="store_true",
help="run NJOY module ERRORR to produce covariance "
"matrix for xs data (default = False)")
parser.add_argument('--fission-yields', '-F',
default=False,
action="store_true",
help="input <file> contains fission yields")
parser.add_argument('--max-polynomial', '-P',
type=int,
help="Maximum order of Legendre polynomial coefficients considered for sampling (default = all)")
parser.add_argument('--njoy',
type=lambda x: is_valid_file(parser, x),
default=None,
help="NJOY executable "
"(default search PATH, and env variable NJOY)")
parser.add_argument('--outdir', '-D',
metavar="DIR",
default=os.getcwd(),
type=lambda x: is_valid_dir(parser, x, mkdir=True),
help="target directory where outputs are stored\n(default = current working directory)\nif it does not exist it will be created")
parser.add_argument('--outname', '-O',
type=str,
help="basename for the output files "
"(default is the the basename of <file>.)")
parser.add_argument('--seed31',
type=int,
default=None,
metavar="S31",
help="seed for random sampling of MF31 covariance "
"matrix (default = random)")
parser.add_argument('--seed33',
type=int,
default=None,
metavar="S33",
help="seed for random sampling of MF33 covariance "
"matrix (default = random)")
parser.add_argument('--seed34',
type=int,
default=None,
metavar="S34",
help="seed for random sampling of MF34 covariance "
"matrix (default = random)")
parser.add_argument('--seed35',
type=int,
default=None,
metavar="S35",
help="seed for random sampling of MF35 covariance "
"matrix (default = random)")
parser.add_argument('--temperatures', '-T',
default=[],
type=float,
action='store',
nargs="+",
metavar="T",
help="for each perturbed file, produce ACE files at "
"given temperatures")
init = parser.parse_known_args(args=iargs)[0]
if init.acer and not init.temperatures:
parser.error("--acer requires --temperatures")
if init.acer and sandy.formats.get_file_format(init.file) != "endf6":
parser.error("--acer requires file in 'endf6' format")
return init
@property
def tape(self):
if not hasattr(self, "_tape"):
self._tape = sandy.Endf6.from_file(self.file)
return self._tape
@tape.setter
def tape(self, tape):
self._tape = tape
@property
def covtape(self):
if not self.covfile or self.covfile == self.file:
self._covtape = self.tape
if not hasattr(self, "_covtape"):
self._covtape = sandy.Endf6.from_file(self.covfile)
return self._covtape
@covtape.setter
def covtape(self, covtape):
self._covtape = covtape
def get_xs_samples(self):
"""
Draw samples using all covariance sections in the given tape.
"""
mf = 33
pertxs = None
if mf in self.mf and mf in self.covtape.mf:
covtape = self.covtape.filter_by(
listmat=self.mat,
listmf=[33],
listmt=self.mt,
)
xscov = sandy.XsCov.from_endf6(covtape)
if not xscov.empty:
pertxs = xscov.get_samples(self.samples)#, eig=init.eig, seed=init.seed33)
return pertxs
def _process_into_ace(ismp):
global init
outname = init.outname if init.outname else os.path.basename(init.file)
smpfile = os.path.join(init.outdir, f'{outname}-{ismp}')
print(ismp)
kwargs = dict(
purr=False,
wdir=init.outdir,
keep_pendf=False,
pendftape=smpfile,
tag=f"_{ismp}",
temperatures=init.temperatures,
err=0.005,
addpath="",
)
fmt = sandy.formats.get_file_format(smpfile)
if fmt == "pendf":
kwargs["pendftape"] = smpfile
inp = init.file
elif fmt == "endf6":
inp = smpfile
input, inputs, outputs = njoy.process(inp, **kwargs)
def _sampling_mp(ismp, skip_title=False, skip_fend=False):
global init, pnu, pxs, plpc, pchi, pfy, tape
t0 = time.time()
mat = tape.mat[0]
newtape = Endf6(tape.copy())
extra_points = np.logspace(-5, 7, init.energy_sequence)
if not pxs.empty:
xs = newtape.get_xs()
if not xs.empty:
xspert = xs.perturb(pxs[ismp])
newtape = newtape.update_xs(xspert)
if not pnu.empty:
nubar = newtape.get_nubar()
if not nubar.empty:
nubarpert = nubar.perturb(pnu[ismp])
newtape = newtape.update_nubar(nubarpert)
if not pchi.empty:
# use new format tape for energy distribution
endfnew = sandy.Endf6._from_old_format(newtape)
edistr = sandy.Edistr.from_endf6(endfnew).add_points(extra_points)
if not edistr.empty:
edistrpert = edistr.perturb(pchi[ismp])
newtape = newtape.update_edistr(edistrpert)
if not plpc.empty:
lpc = newtape.get_lpc().add_points(extra_points)
if not lpc.empty:
lpcpert = lpc.perturb(plpc[ismp])
newtape = newtape.update_lpc(lpcpert)
if not pfy.empty:
fy = newtape.get_fy()
if not fy.empty:
fypert = fy.perturb(pfy[ismp])
newtape = newtape.update_fy(fypert)
print("Created sample {} for MAT {} in {:.2f} sec".format(ismp, mat, time.time()-t0,))
descr = ["perturbed file No.{} created by SANDY".format(ismp)]
return newtape.delete_cov().update_info(descr=descr).write_string(skip_title=skip_title, skip_fend=skip_fend)
# def _sampling_fy_mp(ismp, skip_title=False, skip_fend=False):
# global tape, PertFY, init
# t0 = time.time()
# mat = tape.mat[0]
# newtape = Endf6(tape.copy())
# fy = newtape.get_fy()
# fynew = fy.perturb(PertFy[ismp])
# newtape = newtape.update_fy(fynew)
# print("Created sample {} for MAT {} in {:.2f} sec".format(ismp, mat, time.time()-t0,))
# descr = ["perturbed file No.{} created by SANDY".format(ismp)]
# return newtape.delete_cov().update_info(descr=descr).write_string(skip_title=skip_title, skip_fend=skip_fend)
def parse(iargs=None):
"""Parse command line arguments for sampling option.
Parameters
----------
iargs : `list` of `str`
list of strings to parse. The default is taken from `sys.argv`.
Returns
-------
`argparse.Namespace`
namespace object containing processed given arguments and/or default
options.
"""
description = "Produce perturbed files containing sampled parameters that "
"represent the information\nstored in the evaluated nuclear "
"data covariances"
parser = argparse.ArgumentParser(
prog="sandy",
description=description,
formatter_class=argparse.RawTextHelpFormatter,
)
parser.add_argument('file',
type=lambda x: is_valid_file(parser, x),
help="ENDF-6 or PENDF format file")
parser.add_argument('--acer',
default=False,
action="store_true",
help="for each perturbed file, produce ACE files\n"
"(argument file must be in ENDF-6 format, not PENDF)\n(argument temperature is required)\n(default = False)")
parser.add_argument('--cov', '-C',
type=lambda x: is_valid_file(parser, x),
help="file containing covariances")
parser.add_argument('--cov33csv',
type=lambda x: is_valid_file(parser, x),
help="file containing xs/nubar covariances in csv "
"format")
parser.add_argument('--debug',
default=False,
action="store_true",
help="turn on debug mode")
parser.add_argument('--eig',
type=int,
default=10,
metavar="N",
help="print the first N eigenvalues of the evaluated covariance matrices\n(default = do not print)")
parser.add_argument('--energy-sequence', '-E',
type=int,
metavar="EL",
default=49,
help=argparse.SUPPRESS)
parser.add_argument('--errorr',
default=False,
action="store_true",
help="run NJOY module ERRORR to produce covariance "
"matrix for xs data (default = False)")
parser.add_argument('--fission-yields', '-F',
default=False,
action="store_true",
help="input <file> contains fission yields")
parser.add_argument('--mat',
type=int,
default=list(range(1, 10000)),
action='store',
nargs="+",
metavar="{1,..,9999}",
help="draw samples only from the selected MAT "
"sections (default = keep all)")
parser.add_argument('--max-polynomial', '-P',
type=int,
help="Maximum order of Legendre polynomial coefficients considered for sampling (default = all)")
parser.add_argument('--mf',
type=int,
default=[31, 33, 34, 35],
action='store',
nargs="+",
metavar="{31,33,34,35}",
help="draw samples only from the selected MF sections "
"(default = keep all)")
parser.add_argument('--mt',
type=int,
default=list(range(1, 1000)),
action='store',
nargs="+",
metavar="{1,..,999}",
help="draw samples only from the selected MT sections "
"(default = keep all)")
parser.add_argument('--njoy',
type=lambda x: is_valid_file(parser, x),
default=None,
help="NJOY executable "
"(default search PATH, and env variable NJOY)")
parser.add_argument('--outdir', '-D',
metavar="DIR",
default=os.getcwd(),
type=lambda x: is_valid_dir(parser, x, mkdir=True),
help="target directory where outputs are stored\n(default = current working directory)\nif it does not exist it will be created")
parser.add_argument('--outname', '-O',
type=str,
help="basename for the output files "
"(default is the the basename of <file>.)")
parser.add_argument('--processes', '-N',
type=int,
default=1,
help="number of worker processes (default = 1)")
parser.add_argument('--samples', '-S',
type=int,
default=200,
help="number of samples (default = 200)")
parser.add_argument('--seed31',
type=int,
default=None,
metavar="S31",
help="seed for random sampling of MF31 covariance "
"matrix (default = random)")
parser.add_argument('--seed33',
type=int,
default=None,
metavar="S33",
help="seed for random sampling of MF33 covariance "
"matrix (default = random)")
parser.add_argument('--seed34',
type=int,
default=None,
metavar="S34",
help="seed for random sampling of MF34 covariance "
"matrix (default = random)")
parser.add_argument('--seed35',
type=int,
default=None,
metavar="S35",
help="seed for random sampling of MF35 covariance "
"matrix (default = random)")
parser.add_argument('--temperatures', '-T',
default=[],
type=float,
action='store',
nargs="+",
metavar="T",
help="for each perturbed file, produce ACE files at "
"given temperatures")
parser.add_argument("--version", "-v",
action='version',
version='%(prog)s {}'.format(sandy.__version__),
help="SANDY's version.")
init = parser.parse_known_args(args=iargs)[0]
if init.acer and not init.temperatures:
parser.error("--acer requires --temperatures")
if init.acer and sandy.formats.get_file_format(init.file) != "endf6":
parser.error("--acer requires file in 'endf6' format")
return init
def extract_samples(ftape, covtape):
"""
Draw samples using all covariance sections in the given tape.
"""
global init
# EXTRACT FY PERTURBATIONS FROM COV FILE
PertFy = pd.DataFrame()
if 8 in covtape.mf and 454 in ftape.mt:
fy = ftape.get_fy(listmat=init.mat, listmt=init.mt)
if not fy.empty:
index = fy.index.to_frame(index=False)
dfperts = []
for mat,dfmat in index.groupby("MAT"):
for mt,dfmt in dfmat.groupby("MT"):
for e,dfe in dfmt.groupby("E"):
fycov = fy.get_cov(mat, mt, e)
pert = fycov.get_samples(init.samples, eig=0)
dfperts.append(pert)
PertFy = FySamples( | pd.concat(dfperts) | pandas.concat |
import argparse
import datetime
import logging
import os
import pickle
from random import Random
import numpy as np
import pandas as pd
from dltranz.data_preprocessing.util import pd_hist
logger = logging.getLogger(__name__)
def parse_args(args=None):
parser = argparse.ArgumentParser()
parser.add_argument('--data_path', type=os.path.abspath)
parser.add_argument('--trx_files', nargs='+')
parser.add_argument('--target_files', nargs='*', default=[])
parser.add_argument('--print_dataset_info', action='store_true', default=True)
parser.add_argument('--col_client_id', type=str)
parser.add_argument('--cols_event_time', nargs='+')
parser.add_argument('--cols_category', nargs='*', default=[])
parser.add_argument('--cols_log_norm', nargs='*', default=[])
parser.add_argument('--col_target', required=False, type=str)
parser.add_argument('--test_size', type=float, default=0.1)
parser.add_argument('--salt', type=int, default=42)
parser.add_argument('--output_train_path', type=os.path.abspath)
parser.add_argument('--output_test_path', type=os.path.abspath)
parser.add_argument('--output_test_ids_path', type=os.path.abspath)
parser.add_argument('--log_file', type=os.path.abspath)
args = parser.parse_args(args)
logger.info('Parsed args:\n' + '\n'.join([f' {k:15}: {v}' for k, v in vars(args).items()]))
return args
def load_source_data(data_path, trx_files):
data = []
for file in trx_files:
file_path = os.path.join(data_path, file)
df = pd.read_csv(file_path)
data.append(df)
logger.info(f'Loaded {len(df)} rows from "{file_path}"')
data = pd.concat(data, axis=0)
logger.info(f'Loaded {len(data)} rows in total')
return data
def encode_col(col):
col = col.astype(str)
return col.map({k: i + 1 for i, k in enumerate(col.value_counts().index)})
def trx_to_features(df_data, print_dataset_info,
col_client_id, cols_event_time, cols_category, cols_log_norm):
def copy_time(rec):
rec['event_time'] = rec['feature_arrays']['event_time']
del rec['feature_arrays']['event_time']
return rec
def _td_default(df, cols_event_time):
df_event_time = df[cols_event_time].drop_duplicates()
df_event_time = df_event_time.sort_values(cols_event_time)
df_event_time['event_time'] = np.arange(len(df_event_time))
df = | pd.merge(df, df_event_time, on=cols_event_time) | pandas.merge |
import pandas as pd
import numpy as np
import pickle
import json
def save_arguments(path="", args=None):
print(vars(args))
if args!=None:
file = open("{}/arguments.json".format(path), "w", encoding="utf8")
json.dump(vars(args), file, indent=4, sort_keys=True)
file.close()
def load_arguments(path=""):
file = open("{}arguments.json".format(path), "rb")
return json.load(file)
def save_results(env_name, seeds, rewards, update_steps, path=""):
if type(env_name) == list:
env_name = " ".join(env_name)
rewards = np.array([np.array(r) for r in rewards]).T
seeds = ["Seed: {}".format(seed) for seed in seeds]
data = | pd.DataFrame(rewards, columns=seeds) | pandas.DataFrame |
import numpy as np
import pandas as pd
import xarray as xr
import copy
import warnings
try:
from plotly import graph_objs as go
plotly_installed = True
except:
plotly_installed = False
# warnings.warn("PLOTLY not installed so interactive plots are not available. This may result in unexpected funtionality")
global_3d_mapper = np.repeat(0, 256 * 4).reshape(256, -1)
global_3d_mapper[ord('T'), :] = np.array([0, 0, 0, 1])
global_3d_mapper[ord('C'), :] = np.array([0, 1, 0, 0])
global_3d_mapper[ord('A'), :] = np.array([1, 0, 0, 0])
global_3d_mapper[ord('G'), :] = np.array([0, 0, 1, 0])
def compare_sequence_matrices(seq_arr1, seq_arr2, flip=False, treat_as_match=[], ignore_characters=[], return_num_bases=False):
"""
This will "align" seq_arr1 to seq_arr2. It will calculate which positions in each sequence defined by seq_arr1 matches each position in each sequence defined by seq_arr2
seq_arr1 = NxP matrix where N = # of sequences represented in seq_arr1 and P represents each base pair position/the length of the string
seq_arr2 = MxP matrix where M = # of sequences represented in seq_arr1 and P represents each base pair position/the length of the string
This operation will return a NxPxM boolean matrix where each position represents whether the base pair in sequence N and the base pair in sequence M represented at position P match
In other words, if bool_arr = compare_sequence_matrices(A, B) then the total hamming distance between the second and third sequence in matrices A and B respective can be found as
>>> bool_arr.sum(axis=1)[1][2]
Args:
seq_arr1 (np.array): MxP matrix of sequences represented as array of numbers
seq_arr2 (np.array): NxP matrix of sequences represented as array of numbers
flip (bool): If False then "true" means that letters are equal at specified positoin, If True then return positions that are NOT equal to one another
treat_as_match (list of chars): Treat any positions that have any of these letters in either matricies as True
ignore_characters (list of chars): Ignore positions that have letters in either matricies at specified positions
.. warning:: datatype
When ignore character is defined, the array is passed back as a np.float dtype because it must accomodate np.nan
return_num_bases (False): If true then it will return a second parameter that defines the number of non nan values between alignments
Returns: NxPxM array of boolean values
"""
assert seq_arr1.shape[1] == seq_arr2.shape[1], 'Matrices do not match!'
# use np.int8 because it ends upbeing faster
seq_arr1 = seq_arr1.view(np.uint8)
seq_arr2 = seq_arr2.view(np.uint8)
# this will return true of pos X in seqA and seqB are equal
diff_arr = (seq_arr1[..., np.newaxis].view(np.uint8) == seq_arr2.T[np.newaxis, ...])
# print(diff_arr.shape)
if treat_as_match:
# treat any of these letters at any positions as true regardles of whether they match in respective pairwise sequences
if not isinstance(treat_as_match, list):
treat_as_match = [treat_as_match]
treat_as_match = [ord(let) for let in treat_as_match]
# now we have to ignore characters that are equal to specific values
# return True for any positions that is equal to "treat_as_true"
ignore_pos = ((seq_arr1 == treat_as_match[0])[..., np.newaxis]) | ((seq_arr2 == treat_as_match[0])[..., np.newaxis].T)
for chr_p in treat_as_match[1:]:
ignore_pos = ignore_pos | ((seq_arr1 == chr_p)[..., np.newaxis]) | ((seq_arr2 == chr_p)[..., np.newaxis].T)
# now adjust boolean results to ignore any positions == treat_as_true
diff_arr = (diff_arr | ignore_pos) # if flip is False else (diffs | ignore_pos)
if flip is False:
diff_arr = diff_arr # (~(~diffarr))
else:
diff_arr = ~diff_arr # (~diffarr)
# print(diff_arr.shape)
if ignore_characters:
# do not treat these characters as true OR false
if not isinstance(ignore_characters, list):
ignore_characters = [ignore_characters]
ignore_characters = [ord(let) for let in ignore_characters]
# now we have to ignore characters that are equal to specific values
ignore_pos = (seq_arr1 == ignore_characters[0])[..., np.newaxis] | ((seq_arr2 == ignore_characters[0])[..., np.newaxis].T)
for chr_p in ignore_characters[1:]:
ignore_pos = ignore_pos | ((seq_arr1 == chr_p)[..., np.newaxis]) | ((seq_arr2 == chr_p)[..., np.newaxis]).T
diff_arr = diff_arr.astype(np.float)
diff_arr[ignore_pos] = np.nan
diff_arr = diff_arr
if return_num_bases:
num_bases = np.apply_along_axis(
arr=diff_arr,
axis=1,
func1d=lambda x: len(x[~np.isnan(x)])
)
return diff_arr, num_bases
else:
return diff_arr
def numpy_value_counts_bin_count(arr, weights=None):
"""
Use the 'bin count' function in numpy to calculate the unique values in every column of a dataframe
clocked at about 3-4x faster than pandas_value_counts (df.apply(pd.value_counts))
Args:
arr (dataframe, or np array): Should represent rows as sequences and columns as positions. All values should be int
weights (np array): Should be a list of weights to place on each
"""
if not isinstance(arr, np.ndarray):
raise Exception('The provided parameter for arr is not a dataframe or numpy array')
if len(arr.shape) == 1:
# its a ONE D array, lets make it two D
arr = arr.reshape(-1, 1)
arr = arr.view(np.uint8)
# returns an array of length equal to the the max value in array + 1. each element represents number of times an integer appeared in array.
bins = [
np.bincount(arr[:, x], weights=weights)
for x in range(arr.shape[1])
]
indices = [np.nonzero(x)[0] for x in bins] # only look at non zero bins
series = [ | pd.Series(y[x], index=x) | pandas.Series |
# -*- coding: utf-8 -*-
from __future__ import print_function
import pytest
import operator
from collections import OrderedDict
from datetime import datetime
from itertools import chain
import warnings
import numpy as np
from pandas import (notna, DataFrame, Series, MultiIndex, date_range,
Timestamp, compat)
import pandas as pd
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.apply import frame_apply
from pandas.util.testing import (assert_series_equal,
assert_frame_equal)
import pandas.util.testing as tm
from pandas.conftest import _get_cython_table_params
from pandas.tests.frame.common import TestData
class TestDataFrameApply(TestData):
def test_apply(self):
with np.errstate(all='ignore'):
# ufunc
applied = self.frame.apply(np.sqrt)
tm.assert_series_equal(np.sqrt(self.frame['A']), applied['A'])
# aggregator
applied = self.frame.apply(np.mean)
assert applied['A'] == np.mean(self.frame['A'])
d = self.frame.index[0]
applied = self.frame.apply(np.mean, axis=1)
assert applied[d] == np.mean(self.frame.xs(d))
assert applied.index is self.frame.index # want this
# invalid axis
df = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=['a', 'a', 'c'])
pytest.raises(ValueError, df.apply, lambda x: x, 2)
# see gh-9573
df = DataFrame({'c0': ['A', 'A', 'B', 'B'],
'c1': ['C', 'C', 'D', 'D']})
df = df.apply(lambda ts: ts.astype('category'))
assert df.shape == (4, 2)
assert isinstance(df['c0'].dtype, CategoricalDtype)
assert isinstance(df['c1'].dtype, CategoricalDtype)
def test_apply_mixed_datetimelike(self):
# mixed datetimelike
# GH 7778
df = DataFrame({'A': date_range('20130101', periods=3),
'B': pd.to_timedelta(np.arange(3), unit='s')})
result = df.apply(lambda x: x, axis=1)
assert_frame_equal(result, df)
def test_apply_empty(self):
# empty
applied = self.empty.apply(np.sqrt)
assert applied.empty
applied = self.empty.apply(np.mean)
assert applied.empty
no_rows = self.frame[:0]
result = no_rows.apply(lambda x: x.mean())
expected = Series(np.nan, index=self.frame.columns)
assert_series_equal(result, expected)
no_cols = self.frame.loc[:, []]
result = no_cols.apply(lambda x: x.mean(), axis=1)
expected = Series(np.nan, index=self.frame.index)
assert_series_equal(result, expected)
# 2476
xp = DataFrame(index=['a'])
rs = xp.apply(lambda x: x['a'], axis=1)
assert_frame_equal(xp, rs)
def test_apply_with_reduce_empty(self):
# reduce with an empty DataFrame
x = []
result = self.empty.apply(x.append, axis=1, result_type='expand')
assert_frame_equal(result, self.empty)
result = self.empty.apply(x.append, axis=1, result_type='reduce')
assert_series_equal(result, Series(
[], index=pd.Index([], dtype=object)))
empty_with_cols = DataFrame(columns=['a', 'b', 'c'])
result = empty_with_cols.apply(x.append, axis=1, result_type='expand')
assert_frame_equal(result, empty_with_cols)
result = empty_with_cols.apply(x.append, axis=1, result_type='reduce')
assert_series_equal(result, Series(
[], index=pd.Index([], dtype=object)))
# Ensure that x.append hasn't been called
assert x == []
def test_apply_deprecate_reduce(self):
with warnings.catch_warnings(record=True):
x = []
self.empty.apply(x.append, axis=1, result_type='reduce')
def test_apply_standard_nonunique(self):
df = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=['a', 'a', 'c'])
rs = df.apply(lambda s: s[0], axis=1)
xp = Series([1, 4, 7], ['a', 'a', 'c'])
assert_series_equal(rs, xp)
rs = df.T.apply(lambda s: s[0], axis=0)
assert_series_equal(rs, xp)
def test_with_string_args(self):
for arg in ['sum', 'mean', 'min', 'max', 'std']:
result = self.frame.apply(arg)
expected = getattr(self.frame, arg)()
tm.assert_series_equal(result, expected)
result = self.frame.apply(arg, axis=1)
expected = getattr(self.frame, arg)(axis=1)
tm.assert_series_equal(result, expected)
def test_apply_broadcast_deprecated(self):
with tm.assert_produces_warning(FutureWarning):
self.frame.apply(np.mean, broadcast=True)
def test_apply_broadcast(self):
# scalars
result = self.frame.apply(np.mean, result_type='broadcast')
expected = DataFrame([self.frame.mean()], index=self.frame.index)
tm.assert_frame_equal(result, expected)
result = self.frame.apply(np.mean, axis=1, result_type='broadcast')
m = self.frame.mean(axis=1)
expected = DataFrame({c: m for c in self.frame.columns})
tm.assert_frame_equal(result, expected)
# lists
result = self.frame.apply(
lambda x: list(range(len(self.frame.columns))),
axis=1,
result_type='broadcast')
m = list(range(len(self.frame.columns)))
expected = DataFrame([m] * len(self.frame.index),
dtype='float64',
index=self.frame.index,
columns=self.frame.columns)
tm.assert_frame_equal(result, expected)
result = self.frame.apply(lambda x: list(range(len(self.frame.index))),
result_type='broadcast')
m = list(range(len(self.frame.index)))
expected = DataFrame({c: m for c in self.frame.columns},
dtype='float64',
index=self.frame.index)
tm.assert_frame_equal(result, expected)
# preserve columns
df = DataFrame(np.tile(np.arange(3), 6).reshape(6, -1) + 1,
columns=list('ABC'))
result = df.apply(lambda x: [1, 2, 3],
axis=1,
result_type='broadcast')
tm.assert_frame_equal(result, df)
df = DataFrame(np.tile(np.arange(3), 6).reshape(6, -1) + 1,
columns=list('ABC'))
result = df.apply(lambda x: Series([1, 2, 3], index=list('abc')),
axis=1,
result_type='broadcast')
expected = df.copy()
tm.assert_frame_equal(result, expected)
def test_apply_broadcast_error(self):
df = DataFrame(
np.tile(np.arange(3, dtype='int64'), 6).reshape(6, -1) + 1,
columns=['A', 'B', 'C'])
# > 1 ndim
with pytest.raises(ValueError):
df.apply(lambda x: np.array([1, 2]).reshape(-1, 2),
axis=1,
result_type='broadcast')
# cannot broadcast
with pytest.raises(ValueError):
df.apply(lambda x: [1, 2],
axis=1,
result_type='broadcast')
with pytest.raises(ValueError):
df.apply(lambda x: Series([1, 2]),
axis=1,
result_type='broadcast')
def test_apply_raw(self):
result0 = self.frame.apply(np.mean, raw=True)
result1 = self.frame.apply(np.mean, axis=1, raw=True)
expected0 = self.frame.apply(lambda x: x.values.mean())
expected1 = self.frame.apply(lambda x: x.values.mean(), axis=1)
assert_series_equal(result0, expected0)
assert_series_equal(result1, expected1)
# no reduction
result = self.frame.apply(lambda x: x * 2, raw=True)
expected = self.frame * 2
assert_frame_equal(result, expected)
def test_apply_axis1(self):
d = self.frame.index[0]
tapplied = self.frame.apply(np.mean, axis=1)
assert tapplied[d] == np.mean(self.frame.xs(d))
def test_apply_ignore_failures(self):
result = frame_apply(self.mixed_frame,
np.mean, 0,
ignore_failures=True).apply_standard()
expected = self.mixed_frame._get_numeric_data().apply(np.mean)
assert_series_equal(result, expected)
def test_apply_mixed_dtype_corner(self):
df = DataFrame({'A': ['foo'],
'B': [1.]})
result = df[:0].apply(np.mean, axis=1)
# the result here is actually kind of ambiguous, should it be a Series
# or a DataFrame?
expected = Series(np.nan, index=pd.Index([], dtype='int64'))
assert_series_equal(result, expected)
df = DataFrame({'A': ['foo'],
'B': [1.]})
result = df.apply(lambda x: x['A'], axis=1)
expected = Series(['foo'], index=[0])
assert_series_equal(result, expected)
result = df.apply(lambda x: x['B'], axis=1)
expected = Series([1.], index=[0])
assert_series_equal(result, expected)
def test_apply_empty_infer_type(self):
no_cols = DataFrame(index=['a', 'b', 'c'])
no_index = DataFrame(columns=['a', 'b', 'c'])
def _check(df, f):
with warnings.catch_warnings(record=True):
test_res = f(np.array([], dtype='f8'))
is_reduction = not isinstance(test_res, np.ndarray)
def _checkit(axis=0, raw=False):
res = df.apply(f, axis=axis, raw=raw)
if is_reduction:
agg_axis = df._get_agg_axis(axis)
assert isinstance(res, Series)
assert res.index is agg_axis
else:
assert isinstance(res, DataFrame)
_checkit()
_checkit(axis=1)
_checkit(raw=True)
_checkit(axis=0, raw=True)
with np.errstate(all='ignore'):
_check(no_cols, lambda x: x)
_check(no_cols, lambda x: x.mean())
_check(no_index, lambda x: x)
_check(no_index, lambda x: x.mean())
result = no_cols.apply(lambda x: x.mean(), result_type='broadcast')
assert isinstance(result, DataFrame)
def test_apply_with_args_kwds(self):
def add_some(x, howmuch=0):
return x + howmuch
def agg_and_add(x, howmuch=0):
return x.mean() + howmuch
def subtract_and_divide(x, sub, divide=1):
return (x - sub) / divide
result = self.frame.apply(add_some, howmuch=2)
exp = self.frame.apply(lambda x: x + 2)
assert_frame_equal(result, exp)
result = self.frame.apply(agg_and_add, howmuch=2)
exp = self.frame.apply(lambda x: x.mean() + 2)
assert_series_equal(result, exp)
res = self.frame.apply(subtract_and_divide, args=(2,), divide=2)
exp = self.frame.apply(lambda x: (x - 2.) / 2.)
assert_frame_equal(res, exp)
def test_apply_yield_list(self):
result = self.frame.apply(list)
assert_frame_equal(result, self.frame)
def test_apply_reduce_Series(self):
self.frame.loc[::2, 'A'] = np.nan
expected = self.frame.mean(1)
result = self.frame.apply(np.mean, axis=1)
assert_series_equal(result, expected)
def test_apply_differently_indexed(self):
df = DataFrame(np.random.randn(20, 10))
result0 = df.apply(Series.describe, axis=0)
expected0 = DataFrame(dict((i, v.describe())
for i, v in compat.iteritems(df)),
columns=df.columns)
assert_frame_equal(result0, expected0)
result1 = df.apply(Series.describe, axis=1)
expected1 = DataFrame(dict((i, v.describe())
for i, v in compat.iteritems(df.T)),
columns=df.index).T
assert_frame_equal(result1, expected1)
def test_apply_modify_traceback(self):
data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two',
'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull',
'dull', 'shiny', 'shiny', 'dull',
'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
data.loc[4, 'C'] = np.nan
def transform(row):
if row['C'].startswith('shin') and row['A'] == 'foo':
row['D'] = 7
return row
def transform2(row):
if (notna(row['C']) and row['C'].startswith('shin') and
row['A'] == 'foo'):
row['D'] = 7
return row
try:
data.apply(transform, axis=1)
except AttributeError as e:
assert len(e.args) == 2
assert e.args[1] == 'occurred at index 4'
assert e.args[0] == "'float' object has no attribute 'startswith'"
def test_apply_bug(self):
# GH 6125
positions = pd.DataFrame([[1, 'ABC0', 50], [1, 'YUM0', 20],
[1, 'DEF0', 20], [2, 'ABC1', 50],
[2, 'YUM1', 20], [2, 'DEF1', 20]],
columns=['a', 'market', 'position'])
def f(r):
return r['market']
expected = positions.apply(f, axis=1)
positions = DataFrame([[datetime(2013, 1, 1), 'ABC0', 50],
[datetime(2013, 1, 2), 'YUM0', 20],
[datetime(2013, 1, 3), 'DEF0', 20],
[datetime(2013, 1, 4), 'ABC1', 50],
[datetime(2013, 1, 5), 'YUM1', 20],
[datetime(2013, 1, 6), 'DEF1', 20]],
columns=['a', 'market', 'position'])
result = positions.apply(f, axis=1)
assert_series_equal(result, expected)
def test_apply_convert_objects(self):
data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two',
'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull',
'dull', 'shiny', 'shiny', 'dull',
'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
result = data.apply(lambda x: x, axis=1)
assert_frame_equal(result._convert(datetime=True), data)
def test_apply_attach_name(self):
result = self.frame.apply(lambda x: x.name)
expected = Series(self.frame.columns, index=self.frame.columns)
assert_series_equal(result, expected)
result = self.frame.apply(lambda x: x.name, axis=1)
expected = Series(self.frame.index, index=self.frame.index)
assert_series_equal(result, expected)
# non-reductions
result = self.frame.apply(lambda x: np.repeat(x.name, len(x)))
expected = DataFrame(np.tile(self.frame.columns,
(len(self.frame.index), 1)),
index=self.frame.index,
columns=self.frame.columns)
assert_frame_equal(result, expected)
result = self.frame.apply(lambda x: np.repeat(x.name, len(x)),
axis=1)
expected = Series(np.repeat(t[0], len(self.frame.columns))
for t in self.frame.itertuples())
expected.index = self.frame.index
assert_series_equal(result, expected)
def test_apply_multi_index(self):
index = MultiIndex.from_arrays([['a', 'a', 'b'], ['c', 'd', 'd']])
s = DataFrame([[1, 2], [3, 4], [5, 6]],
index=index,
columns=['col1', 'col2'])
result = s.apply(
lambda x: Series({'min': min(x), 'max': max(x)}), 1)
expected = DataFrame([[1, 2], [3, 4], [5, 6]],
index=index,
columns=['min', 'max'])
assert_frame_equal(result, expected, check_like=True)
def test_apply_dict(self):
# GH 8735
A = DataFrame([['foo', 'bar'], ['spam', 'eggs']])
A_dicts = Series([dict([(0, 'foo'), (1, 'spam')]),
dict([(0, 'bar'), (1, 'eggs')])])
B = DataFrame([[0, 1], [2, 3]])
B_dicts = Series([dict([(0, 0), (1, 2)]), dict([(0, 1), (1, 3)])])
fn = lambda x: x.to_dict()
for df, dicts in [(A, A_dicts), (B, B_dicts)]:
reduce_true = df.apply(fn, result_type='reduce')
reduce_false = df.apply(fn, result_type='expand')
reduce_none = df.apply(fn)
assert_series_equal(reduce_true, dicts)
assert_frame_equal(reduce_false, df)
assert_series_equal(reduce_none, dicts)
def test_applymap(self):
applied = self.frame.applymap(lambda x: x * 2)
tm.assert_frame_equal(applied, self.frame * 2)
self.frame.applymap(type)
# gh-465: function returning tuples
result = self.frame.applymap(lambda x: (x, x))
assert isinstance(result['A'][0], tuple)
# gh-2909: object conversion to float in constructor?
df = DataFrame(data=[1, 'a'])
result = df.applymap(lambda x: x)
assert result.dtypes[0] == object
df = DataFrame(data=[1., 'a'])
result = df.applymap(lambda x: x)
assert result.dtypes[0] == object
# see gh-2786
df = DataFrame(np.random.random((3, 4)))
df2 = df.copy()
cols = ['a', 'a', 'a', 'a']
df.columns = cols
expected = df2.applymap(str)
expected.columns = cols
result = df.applymap(str)
tm.assert_frame_equal(result, expected)
# datetime/timedelta
df['datetime'] = Timestamp('20130101')
df['timedelta'] = pd.Timedelta('1 min')
result = df.applymap(str)
for f in ['datetime', 'timedelta']:
assert result.loc[0, f] == str(df.loc[0, f])
# see gh-8222
empty_frames = [pd.DataFrame(),
pd.DataFrame(columns=list('ABC')),
pd.DataFrame(index=list('ABC')),
pd.DataFrame({'A': [], 'B': [], 'C': []})]
for frame in empty_frames:
for func in [round, lambda x: x]:
result = frame.applymap(func)
tm.assert_frame_equal(result, frame)
def test_applymap_box_timestamps(self):
# #2689, #2627
ser = pd.Series(date_range('1/1/2000', periods=10))
def func(x):
return (x.hour, x.day, x.month)
# it works!
pd.DataFrame(ser).applymap(func)
def test_applymap_box(self):
# ufunc will not be boxed. Same test cases as the test_map_box
df = pd.DataFrame({'a': [pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-01-02')],
'b': [pd.Timestamp('2011-01-01', tz='US/Eastern'),
pd.Timestamp('2011-01-02', tz='US/Eastern')],
'c': [pd.Timedelta('1 days'),
pd.Timedelta('2 days')],
'd': [pd.Period('2011-01-01', freq='M'),
pd.Period('2011-01-02', freq='M')]})
res = df.applymap(lambda x: '{0}'.format(x.__class__.__name__))
exp = pd.DataFrame({'a': ['Timestamp', 'Timestamp'],
'b': ['Timestamp', 'Timestamp'],
'c': ['Timedelta', 'Timedelta'],
'd': ['Period', 'Period']})
tm.assert_frame_equal(res, exp)
def test_frame_apply_dont_convert_datetime64(self):
from pandas.tseries.offsets import BDay
df = DataFrame({'x1': [datetime(1996, 1, 1)]})
df = df.applymap(lambda x: x + BDay())
df = df.applymap(lambda x: x + BDay())
assert df.x1.dtype == 'M8[ns]'
def test_apply_non_numpy_dtype(self):
# See gh-12244
df = DataFrame({'dt': pd.date_range(
"2015-01-01", periods=3, tz='Europe/Brussels')})
result = df.apply(lambda x: x)
assert_frame_equal(result, df)
result = df.apply(lambda x: x + pd.Timedelta('1day'))
expected = DataFrame({'dt': pd.date_range(
"2015-01-02", periods=3, tz='Europe/Brussels')})
assert_frame_equal(result, expected)
df = DataFrame({'dt': ['a', 'b', 'c', 'a']}, dtype='category')
result = df.apply(lambda x: x)
assert_frame_equal(result, df)
def test_apply_dup_names_multi_agg(self):
# GH 21063
df = pd.DataFrame([[0, 1], [2, 3]], columns=['a', 'a'])
expected = pd.DataFrame([[0, 1]], columns=['a', 'a'], index=['min'])
result = df.agg(['min'])
tm.assert_frame_equal(result, expected)
class TestInferOutputShape(object):
# the user has supplied an opaque UDF where
# they are transforming the input that requires
# us to infer the output
def test_infer_row_shape(self):
# gh-17437
# if row shape is changing, infer it
df = pd.DataFrame(np.random.rand(10, 2))
result = df.apply(np.fft.fft, axis=0)
assert result.shape == (10, 2)
result = df.apply(np.fft.rfft, axis=0)
assert result.shape == (6, 2)
def test_with_dictlike_columns(self):
# gh 17602
df = DataFrame([[1, 2], [1, 2]], columns=['a', 'b'])
result = df.apply(lambda x: {'s': x['a'] + x['b']},
axis=1)
expected = Series([{'s': 3} for t in df.itertuples()])
assert_series_equal(result, expected)
df['tm'] = [pd.Timestamp('2017-05-01 00:00:00'),
pd.Timestamp('2017-05-02 00:00:00')]
result = df.apply(lambda x: {'s': x['a'] + x['b']},
axis=1)
assert_series_equal(result, expected)
# compose a series
result = (df['a'] + df['b']).apply(lambda x: {'s': x})
expected = Series([{'s': 3}, {'s': 3}])
assert_series_equal(result, expected)
# gh-18775
df = DataFrame()
df["author"] = ["X", "Y", "Z"]
df["publisher"] = ["BBC", "NBC", "N24"]
df["date"] = pd.to_datetime(['17-10-2010 07:15:30',
'13-05-2011 08:20:35',
'15-01-2013 09:09:09'])
result = df.apply(lambda x: {}, axis=1)
expected = Series([{}, {}, {}])
assert_series_equal(result, expected)
def test_with_dictlike_columns_with_infer(self):
# gh 17602
df = DataFrame([[1, 2], [1, 2]], columns=['a', 'b'])
result = df.apply(lambda x: {'s': x['a'] + x['b']},
axis=1, result_type='expand')
expected = DataFrame({'s': [3, 3]})
assert_frame_equal(result, expected)
df['tm'] = [pd.Timestamp('2017-05-01 00:00:00'),
pd.Timestamp('2017-05-02 00:00:00')]
result = df.apply(lambda x: {'s': x['a'] + x['b']},
axis=1, result_type='expand')
assert_frame_equal(result, expected)
def test_with_listlike_columns(self):
# gh-17348
df = DataFrame({'a': Series(np.random.randn(4)),
'b': ['a', 'list', 'of', 'words'],
'ts': date_range('2016-10-01', periods=4, freq='H')})
result = df[['a', 'b']].apply(tuple, axis=1)
expected = Series([t[1:] for t in df[['a', 'b']].itertuples()])
assert_series_equal(result, expected)
result = df[['a', 'ts']].apply(tuple, axis=1)
expected = Series([t[1:] for t in df[['a', 'ts']].itertuples()])
assert_series_equal(result, expected)
# gh-18919
df = DataFrame({'x': Series([['a', 'b'], ['q']]),
'y': Series([['z'], ['q', 't']])})
df.index = MultiIndex.from_tuples([('i0', 'j0'), ('i1', 'j1')])
result = df.apply(
lambda row: [el for el in row['x'] if el in row['y']],
axis=1)
expected = Series([[], ['q']], index=df.index)
assert_series_equal(result, expected)
def test_infer_output_shape_columns(self):
# gh-18573
df = DataFrame({'number': [1., 2.],
'string': ['foo', 'bar'],
'datetime': [pd.Timestamp('2017-11-29 03:30:00'),
pd.Timestamp('2017-11-29 03:45:00')]})
result = df.apply(lambda row: (row.number, row.string), axis=1)
expected = Series([(t.number, t.string) for t in df.itertuples()])
assert_series_equal(result, expected)
def test_infer_output_shape_listlike_columns(self):
# gh-16353
df = DataFrame(np.random.randn(6, 3), columns=['A', 'B', 'C'])
result = df.apply(lambda x: [1, 2, 3], axis=1)
expected = Series([[1, 2, 3] for t in df.itertuples()])
assert_series_equal(result, expected)
result = df.apply(lambda x: [1, 2], axis=1)
expected = Series([[1, 2] for t in df.itertuples()])
assert_series_equal(result, expected)
# gh-17970
df = DataFrame({"a": [1, 2, 3]}, index=list('abc'))
result = df.apply(lambda row: np.ones(1), axis=1)
expected = Series([np.ones(1) for t in df.itertuples()],
index=df.index)
assert_series_equal(result, expected)
result = df.apply(lambda row: np.ones(2), axis=1)
expected = Series([np.ones(2) for t in df.itertuples()],
index=df.index)
assert_series_equal(result, expected)
# gh-17892
df = pd.DataFrame({'a': [pd.Timestamp('2010-02-01'),
pd.Timestamp('2010-02-04'),
pd.Timestamp('2010-02-05'),
pd.Timestamp('2010-02-06')],
'b': [9, 5, 4, 3],
'c': [5, 3, 4, 2],
'd': [1, 2, 3, 4]})
def fun(x):
return (1, 2)
result = df.apply(fun, axis=1)
expected = Series([(1, 2) for t in df.itertuples()])
assert_series_equal(result, expected)
def test_consistent_coerce_for_shapes(self):
# we want column names to NOT be propagated
# just because the shape matches the input shape
df = DataFrame(np.random.randn(4, 3), columns=['A', 'B', 'C'])
result = df.apply(lambda x: [1, 2, 3], axis=1)
expected = Series([[1, 2, 3] for t in df.itertuples()])
assert_series_equal(result, expected)
result = df.apply(lambda x: [1, 2], axis=1)
expected = Series([[1, 2] for t in df.itertuples()])
assert_series_equal(result, expected)
def test_consistent_names(self):
# if a Series is returned, we should use the resulting index names
df = DataFrame(
np.tile(np.arange(3, dtype='int64'), 6).reshape(6, -1) + 1,
columns=['A', 'B', 'C'])
result = df.apply(lambda x: Series([1, 2, 3],
index=['test', 'other', 'cols']),
axis=1)
expected = DataFrame(
np.tile(np.arange(3, dtype='int64'), 6).reshape(6, -1) + 1,
columns=['test', 'other', 'cols'])
assert_frame_equal(result, expected)
result = df.apply(
lambda x: pd.Series([1, 2], index=['test', 'other']), axis=1)
expected = DataFrame(
np.tile(np.arange(2, dtype='int64'), 6).reshape(6, -1) + 1,
columns=['test', 'other'])
assert_frame_equal(result, expected)
def test_result_type(self):
# result_type should be consistent no matter which
# path we take in the code
df = DataFrame(
np.tile(np.arange(3, dtype='int64'), 6).reshape(6, -1) + 1,
columns=['A', 'B', 'C'])
result = df.apply(lambda x: [1, 2, 3], axis=1, result_type='expand')
expected = df.copy()
expected.columns = [0, 1, 2]
assert_frame_equal(result, expected)
result = df.apply(lambda x: [1, 2], axis=1, result_type='expand')
expected = df[['A', 'B']].copy()
expected.columns = [0, 1]
assert_frame_equal(result, expected)
# broadcast result
result = df.apply(lambda x: [1, 2, 3], axis=1, result_type='broadcast')
expected = df.copy()
assert_frame_equal(result, expected)
columns = ['other', 'col', 'names']
result = df.apply(
lambda x: pd.Series([1, 2, 3],
index=columns),
axis=1,
result_type='broadcast')
expected = df.copy()
assert_frame_equal(result, expected)
# series result
result = df.apply(lambda x: Series([1, 2, 3], index=x.index), axis=1)
expected = df.copy()
assert_frame_equal(result, expected)
# series result with other index
columns = ['other', 'col', 'names']
result = df.apply(
lambda x: pd.Series([1, 2, 3], index=columns),
axis=1)
expected = df.copy()
expected.columns = columns
assert_frame_equal(result, expected)
@pytest.mark.parametrize("result_type", ['foo', 1])
def test_result_type_error(self, result_type):
# allowed result_type
df = DataFrame(
np.tile(np.arange(3, dtype='int64'), 6).reshape(6, -1) + 1,
columns=['A', 'B', 'C'])
with pytest.raises(ValueError):
df.apply(lambda x: [1, 2, 3],
axis=1,
result_type=result_type)
@pytest.mark.parametrize(
"box",
[lambda x: list(x),
lambda x: tuple(x),
lambda x: np.array(x, dtype='int64')],
ids=['list', 'tuple', 'array'])
def test_consistency_for_boxed(self, box):
# passing an array or list should not affect the output shape
df = DataFrame(
np.tile(np.arange(3, dtype='int64'), 6).reshape(6, -1) + 1,
columns=['A', 'B', 'C'])
result = df.apply(lambda x: box([1, 2]), axis=1)
expected = Series([box([1, 2]) for t in df.itertuples()])
assert_series_equal(result, expected)
result = df.apply(lambda x: box([1, 2]), axis=1, result_type='expand')
expected = DataFrame(
np.tile(np.arange(2, dtype='int64'), 6).reshape(6, -1) + 1)
assert_frame_equal(result, expected)
def zip_frames(frames, axis=1):
"""
take a list of frames, zip them together under the
assumption that these all have the first frames' index/columns.
Returns
-------
new_frame : DataFrame
"""
if axis == 1:
columns = frames[0].columns
zipped = [f.loc[:, c] for c in columns for f in frames]
return pd.concat(zipped, axis=1)
else:
index = frames[0].index
zipped = [f.loc[i, :] for i in index for f in frames]
return pd.DataFrame(zipped)
class TestDataFrameAggregate(TestData):
def test_agg_transform(self, axis):
other_axis = 1 if axis in {0, 'index'} else 0
with np.errstate(all='ignore'):
f_abs = np.abs(self.frame)
f_sqrt = np.sqrt(self.frame)
# ufunc
result = self.frame.transform(np.sqrt, axis=axis)
expected = f_sqrt.copy()
assert_frame_equal(result, expected)
result = self.frame.apply(np.sqrt, axis=axis)
assert_frame_equal(result, expected)
result = self.frame.transform(np.sqrt, axis=axis)
assert_frame_equal(result, expected)
# list-like
result = self.frame.apply([np.sqrt], axis=axis)
expected = f_sqrt.copy()
if axis in {0, 'index'}:
expected.columns = pd.MultiIndex.from_product(
[self.frame.columns, ['sqrt']])
else:
expected.index = pd.MultiIndex.from_product(
[self.frame.index, ['sqrt']])
assert_frame_equal(result, expected)
result = self.frame.transform([np.sqrt], axis=axis)
assert_frame_equal(result, expected)
# multiple items in list
# these are in the order as if we are applying both
# functions per series and then concatting
result = self.frame.apply([np.abs, np.sqrt], axis=axis)
expected = zip_frames([f_abs, f_sqrt], axis=other_axis)
if axis in {0, 'index'}:
expected.columns = pd.MultiIndex.from_product(
[self.frame.columns, ['absolute', 'sqrt']])
else:
expected.index = pd.MultiIndex.from_product(
[self.frame.index, ['absolute', 'sqrt']])
assert_frame_equal(result, expected)
result = self.frame.transform([np.abs, 'sqrt'], axis=axis)
assert_frame_equal(result, expected)
def test_transform_and_agg_err(self, axis):
# cannot both transform and agg
def f():
self.frame.transform(['max', 'min'], axis=axis)
pytest.raises(ValueError, f)
def f():
with np.errstate(all='ignore'):
self.frame.agg(['max', 'sqrt'], axis=axis)
pytest.raises(ValueError, f)
def f():
with np.errstate(all='ignore'):
self.frame.transform(['max', 'sqrt'], axis=axis)
pytest.raises(ValueError, f)
df = pd.DataFrame({'A': range(5), 'B': 5})
def f():
with np.errstate(all='ignore'):
df.agg({'A': ['abs', 'sum'], 'B': ['mean', 'max']}, axis=axis)
@pytest.mark.parametrize('method', [
'abs', 'shift', 'pct_change', 'cumsum', 'rank',
])
def test_transform_method_name(self, method):
# https://github.com/pandas-dev/pandas/issues/19760
df = pd.DataFrame({"A": [-1, 2]})
result = df.transform(method)
expected = operator.methodcaller(method)(df)
tm.assert_frame_equal(result, expected)
def test_demo(self):
# demonstration tests
df = pd.DataFrame({'A': range(5), 'B': 5})
result = df.agg(['min', 'max'])
expected = DataFrame({'A': [0, 4], 'B': [5, 5]},
columns=['A', 'B'],
index=['min', 'max'])
tm.assert_frame_equal(result, expected)
result = df.agg({'A': ['min', 'max'], 'B': ['sum', 'max']})
expected = DataFrame({'A': [4.0, 0.0, np.nan],
'B': [5.0, np.nan, 25.0]},
columns=['A', 'B'],
index=['max', 'min', 'sum'])
tm.assert_frame_equal(result.reindex_like(expected), expected)
def test_agg_multiple_mixed_no_warning(self):
# https://github.com/pandas-dev/pandas/issues/20909
mdf = pd.DataFrame({'A': [1, 2, 3],
'B': [1., 2., 3.],
'C': ['foo', 'bar', 'baz'],
'D': pd.date_range('20130101', periods=3)})
expected = pd.DataFrame({"A": [1, 6], 'B': [1.0, 6.0],
"C": ['bar', 'foobarbaz'],
"D": [pd.Timestamp('2013-01-01'), pd.NaT]},
index=['min', 'sum'])
# sorted index
with tm.assert_produces_warning(None):
result = mdf.agg(['min', 'sum'])
tm.assert_frame_equal(result, expected)
with tm.assert_produces_warning(None):
result = mdf[['D', 'C', 'B', 'A']].agg(['sum', 'min'])
# For backwards compatibility, the result's index is
# still sorted by function name, so it's ['min', 'sum']
# not ['sum', 'min'].
expected = expected[['D', 'C', 'B', 'A']]
tm.assert_frame_equal(result, expected)
def test_agg_dict_nested_renaming_depr(self):
df = pd.DataFrame({'A': range(5), 'B': 5})
# nested renaming
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
df.agg({'A': {'foo': 'min'},
'B': {'bar': 'max'}})
def test_agg_reduce(self, axis):
other_axis = 1 if axis in {0, 'index'} else 0
name1, name2 = self.frame.axes[other_axis].unique()[:2].sort_values()
# all reducers
expected = pd.concat([self.frame.mean(axis=axis),
self.frame.max(axis=axis),
self.frame.sum(axis=axis),
], axis=1)
expected.columns = ['mean', 'max', 'sum']
expected = expected.T if axis in {0, 'index'} else expected
result = self.frame.agg(['mean', 'max', 'sum'], axis=axis)
| assert_frame_equal(result, expected) | pandas.util.testing.assert_frame_equal |
# utils.py
"""
Utils
-----
Utility functions for the whole project.
"""
import collections
from copy import deepcopy
import logging.config
import os
from pathlib import Path
from typing import List, Optional, Tuple
import numpy as np
import pandas as pd
from pandas.tseries import offsets
from soam.constants import DS_COL
logger = logging.getLogger(__name__)
def range_datetime(
datetime_start,
datetime_end,
hourly_offset: bool = False,
timeskip=None,
as_datetime: bool = False,
):
# TODO: review datetime_start, datetime_end, are datetimes?
# TODO: timeskip is Tick?
"""
Build datetime generator over successive time steps.
Parameters
----------
datetime_start: datetime
Start datetime.
datetime_end: datetime
End datetime.
hourly_offset: boolean
Wheteher to offset hourly. False by default.
timeskip: Tick
An instance of fast-forwarding a substantial amount of time.
as_datetime: boolean
Whether the object type should be datetime. False by default.
"""
if timeskip is None:
timeskip = offsets.Day(1) if not hourly_offset else offsets.Hour(1)
if not isinstance(datetime_start, pd.Timestamp):
datetime_start = | pd.Timestamp(datetime_start) | pandas.Timestamp |
# -*- coding: utf-8 -*-
"""
Xarray Stacked Images Writer.
Create 3D datasets, allows setting spatial and temporal subset (images and time
series)
"""
#TODO. File locking as option for multiple processes?
# todo: Add Point data results manager (for ismn based results)
import xarray as xr
import numpy as np
import pandas as pd
from datetime import datetime
import warnings
import matplotlib.pyplot as plt
import os
from io_utils.utils import safe_arange
from pygeogrids.grids import BasicGrid, CellGrid, lonlat2cell
import copy
from io_utils.write.utils import minmax
def to_reg_cell_grid(grid, cellsize=5.):
"""
Create RegularCellGrid from BasicGrid or CellGrid
Parameters
----------
grid : CellGrid or BasicGrid
Input grid to convert
cellsize : float, optional (default: 5.)
Cell size of the CellGrid to create.
Returns
-------
grid : RegularCellGrid
A regularly gridded CellGrid
"""
if isinstance(grid, RegularCellGrid) and (grid.cellsize == cellsize):
return grid
return RegularCellGrid(grid.arrlon, grid.arrlat, cellsize, gpis=grid.gpis,
subset=grid.subset, shape=grid.shape)
class Point(object):
""" Helper class to combine lon and lat in one Point """
def __init__(self, lon, lat):
if lat > 90. or lat <-90:
raise IOError('{} is out of valid bounds (+-90) for Latitude'.format(lat))
if lon > 180. or lon <-180:
raise IOError('{} is out of valid bounds (+-180) for Longitude'.format(lon))
self.__lon, self.__lat = lon, lat
self.__loc = (lon, lat)
def __str__(self):
return 'Lon: {}, Lat: {}'.format(self.lon, self.lat)
@property
def lon(self):
return self.__lon
@property
def lat(self):
return self.__lat
@property
def loc(self):
return (self.lon, self.lat)
class RegularCellGrid(CellGrid):
# Special for of a Cell Grid that has equal spacing between grid points
def __init__(self, lon, lat, cellsize=5., gpis=None, geodatum='WGS84',
subset=None, setup_kdTree=False, **kwargs):
self.cellsize = cellsize
cells = lonlat2cell(lon, lat, cellsize=cellsize)
super(RegularCellGrid, self).__init__(lon, lat, cells, gpis, geodatum,
subset=subset, setup_kdTree=setup_kdTree,
**kwargs)
self.dx, self.dy = self._grid_space()
def _grid_space(self):
# find the resolution of the grid and check if it is regular along x and y
lons, lats = self.get_grid_points()[1], self.get_grid_points()[2]
diff_x = np.around(np.diff(sorted(np.unique(lons))), 10)
diff_y = np.around(np.diff(sorted(np.unique(lats))), 10)
dx = np.max(diff_x)
assert np.min(diff_x) == dx
dy = np.max(diff_y)
assert np.min(diff_y) == dy
assert np.all(diff_x == dx)
assert np.all(diff_y == dy)
return dx, dy
class RegularArea(object):
""" Helper class to combine lons and lats that span an Area """
def __init__(self, llc, urc, grid):
"""
Create an regularly gridded 2d Area.
Parameters
----------
llc : Point
Lower left corner point of the Area
urc : Point
Upper right corner point of the Area
grid : BasicGrid or CellGrid
An independent grid that the area is a subset of.
"""
self.grid = to_reg_cell_grid(grid)
self.llc = llc
self.urc = urc
self.subset = self._subset_from_corners()
def _subset(self, llc, urc):
ind = np.where((self.grid.activearrlon >= llc.lon) &
(self.grid.activearrlon <= urc.lon) &
(self.grid.activearrlat >= llc.lat) &
(self.grid.activearrlat <= urc.lat))
gpis = self.grid.activegpis[ind]
lons = self.grid.activearrlon[ind]
lats = self.grid.activearrlat[ind]
return gpis, lons, lats
def _subset_from_corners(self):
self._assert_corners()
gpis, lons, lats = self._subset(self.llc, self.urc)
subset = self.grid.subgrid_from_gpis(gpis)
subset.shape = (np.unique(lats).size, np.unique(lons).size)
return subset
def _assert_corners(self):
# check if the corner points are also in the grid
assert self.llc.lon in self.grid.get_grid_points()[1]
assert self.llc.lat in self.grid.get_grid_points()[2]
def as_slice(self, d=False):
"""
Create a lon and lat slice of the Area.
Parameters
---------
d : bool, optional (default: False)
Include step size in slice
Returns
-------
lon_slice : slice
Slice across the area
lat_slice : slice
Slice across the area
"""
return slice(self.llc.lon, self.urc.lon, self.grid.dx if d else None), \
slice(self.llc.lat, self.urc.lat, self.grid.dy if d else None)
class NcRegGridStack(object):
""" Store netcdf cubes with xarray and dask """
def __init__(self, dx=0.25, dy=0.25, z=None, z_name='z',
llc=Point(-179.875, -89.875), urc=Point(179.875, 89.875),
indexed=True, zlib=True, fill_value=9999.):
"""
Parameters
----------
dx : float, optional (default: 0.25)
Regular spacing in x/lon direction
dy : float, optional (default: 0.25)
Regular spacing in y/lat direction
z : np.array
Z Values, e.g. Timestamps (z dimension of cube)
z_name : str, optional (default: time)
Name of the z dimension (e.g. time or depth)
llc : Point, optional (default: Point(-179.875, -89.875))
Lower left corner point of the dataset area.
urc : Point, optional (default: Point(179.875, 89.875))
Upper right corner point of the dataset area.
indexed : bool, optional (default: True)
Add a 2d variable of unique index to each point of the dataset.
zlib : bool, optional (default: True)
Compress data when writing to netcdf
fill_value : float, optional (default: 9999.)
Fill value nans are replaced with
"""
if z is None:
z = [None]
self.zlib = zlib
self.z_name = z_name
self.fill_value = fill_value
self.llc, self.urc = llc, urc
lons, lats = self._coords(dx, dy)
self.shape = (z.size, lats.size, lons.size)
gpis = self._gpis('ll') # origin is in the lower left
self.ds = xr.Dataset(
data_vars={'gpi': (['lat', 'lon'], gpis)} if indexed else None,
coords={'lon': lons, 'lat': lats, self.z_name: z})
self.grid = to_reg_cell_grid(self._grid(gpis), 5.)
@property
def subset(self):
return (self.llc, self.urc)
def _grid(self, gpis):
# create a pygeogrids object
lons, lats = np.meshgrid(self.ds.lon.values, np.flipud(self.ds.lat.values))
lons, lats = lons.flatten(), lats.flatten()
grid = BasicGrid(lons, lats, gpis=gpis.flatten()).to_cell_grid(5.)
return grid
def _gpis(self, origin='ll'):
"""
Parameters
---------
origin : str, optional (Default: 'll')
String indication where gpi=0 is.
ll = lower left, ur=upper right, lr = lower right, ul = upper left
Returns
---------
gpis : np.ndarray
Array of GPIs
"""
origins = ['ll', 'lr', 'ul', 'ur']
if origin not in origins:
raise NotImplementedError(
"Origin {} not implemented. Choose one of: {}"
.format(origin, ','.join(origins)))
n = self.shape[1] * self.shape[2]
gpis = np.arange(n).reshape(self.shape[1], self.shape[2])
if origin[0] == 'l':
gpis = np.flipud(gpis)
if origin[1] == 'r':
gpis = np.fliplr(gpis)
return gpis
def _coords(self, dx, dy):
""" Build coord range with chosen resolution over dataset area """
lons = safe_arange(self.llc.lon, self.urc.lon+dx, dx)
lats = safe_arange(self.llc.lat, self.urc.lat+dy, dy)
self.dx, self.dy = dx, dy
return lons, lats
def _add_empty_3d(self, name):
# add a empty variable with z dimension of the passed name
#print('Add empty 3D variable {}'.format(name))
self.ds[name] = \
xr.DataArray(np.full(self.shape, self.fill_value),
dims=[self.z_name, 'lat', 'lon'],
coords=[self.ds[self.z_name], self.ds.lat, self.ds.lon])
def _write_img(self, data, **kwargs):
"""
Write area to dataset.
Parameters
----------
data : xr.Dataset, 2d arrays to write, keys are variable names
"""
for var in list(data.data_vars.keys()):
if var not in self.ds.variables:
self._add_empty_3d(var)
self.ds[var].loc[dict(**kwargs)] = data[var]
def _write_ser(self, data, **kwargs):
"""
Write (time) series of multiple variables in data frame
"""
for var in data.keys():
if var not in self.ds.variables:
self._add_empty_3d(var)
assert data[var].size == self.ds[self.z_name].size
dat = data[var]
dat[np.isnan(dat)] = self.fill_value
self.ds[var].loc[dict(**kwargs)] = dat
def _write_pt(self, data, **kwargs):
# takes arrays of lon, lat, z and data dict of arrays
for var in data.keys():
if var not in self.ds.variables:
self._add_empty_3d(var)
dat = data[var]
dat[np.isnan(dat)] = self.fill_value
self.ds[var].loc[dict(**kwargs)] = dat
def store_stack(self, filename=None, global_attrs=None, dtypes=np.float32):
"""
Write down xarray cute to netcdf file
Parameters
----------
filename : str
Path to the stack file to write
global_attrs : dict, optional (default: None)
Global attributes
dtypes : np.float32
Data types of results, affects compression.
"""
if global_attrs is None:
global_attrs = {}
self.ds = self.ds.assign_attrs(global_attrs)
try:
if self.zlib:
encoding = {}
for var in self.ds.variables:
if var not in ['lat', 'lon', self.z_name]:
encoding[var] = {'complevel': 9, 'zlib': True,
'dtype': dtypes,
'_FillValue': self.fill_value}
else:
encoding = None
self.ds.to_netcdf(filename, engine='netcdf4', encoding=encoding)
except: # todo: specifiy exception
warnings.warn('Compression failed, store uncompressed results.')
self.ds.to_netcdf(filename, engine='netcdf4')
self.ds.close()
def store_files(self, path, filename_templ='file_{}.nc',
dtypes=np.float32):
"""
filename_templ :
{} is replaced by the z indicator (strftime(z) if z is a date time).
"""
# todo: add option to append to existing file (memory dump)
# todo: integrate with the other function
if self.zlib:
encoding = {}
for var in self.ds.variables:
if var not in ['lat', 'lon', self.z_name]:
encoding[var] = {'complevel': 9, 'zlib': True,
'dtype': dtypes,
'_FillValue': self.fill_value}
else:
encoding = None
datetime_obs = [np.datetime64, datetime]
for z in self.ds[self.z_name]:
if any([isinstance(z.values, dt) for dt in datetime_obs]):
pydatetime= | pd.to_datetime(z.values) | pandas.to_datetime |
import pandas as pd
import dateutil
import datetime
class api_IEX:
"""A class to work with the IEX API @ api.iextrading.com """
baseURL = "https://api.iextrading.com/1.0/stock/"
dfResponse = "nothing queried"
def __init__(self, ticker):
self.symbol=ticker
self.baseURL = self.baseURL + ticker
def testAPI(self):
""" Basic test function to check if the API is working"""
apiPath = self.baseURL + "/delayed-quote"
self.dfResponse = | pd.read_json(apiPath, typ='series') | pandas.read_json |
import sys
from typing import List, Tuple
import numpy as np
import pandas as pd
def get_valid_gene_info(
genes: List[str],
release=102,
species='homo sapiens'
) -> Tuple[List[str], List[int], List[int], List[int]]:
"""Returns gene locations for all genes in ensembl release 93 --S Markson 3 June 2020
Parameters
----------
genes : A list of genes
genes : List[str] :
genes : List[str] :
genes : List[str] :
genes : List[str] :
genes : List[str] :
genes : List[str] :
genes : List[str] :
genes: List[str] :
Returns
-------
"""
from pyensembl import EnsemblRelease
assembly = EnsemblRelease(release, species=species)
gene_names = []
gene_contigs = []
gene_starts = []
gene_ends = []
for gene in np.intersect1d(genes, [
gene.gene_name for gene in assembly.genes()
if gene.contig.isnumeric() or gene.contig == 'X'
]): # Toss genes not in hg38 release 93
gene_info = assembly.genes_by_name(gene)
gene_info = gene_info[0]
gene_names.append(gene)
gene_contigs.append(gene_info.contig)
gene_starts.append(gene_info.start)
gene_ends.append(gene_info.end)
return gene_names, gene_contigs, gene_starts, gene_ends
def seurat_to_loom(seuratrds, patient_id_column, celltype_column,
complexity_column, loomfile):
"""
Parameters
----------
seuratrds :
patient_id_column :
celltype_column :
complexity_column :
loomfile :
Returns
-------
"""
import rpy2.robjects as robjects
from scipy import sparse
from rpy2.robjects import pandas2ri
import loompy
robjects.r('''
library(Seurat)
seurat2rawandmeta <- function(seuratrds) {
seuratobj <- readRDS(seuratrds)
return(list(genes=rownames(seuratobj@data), metadata=<EMAIL>, data=as.data.frame(summary(seuratobj@data))))
}
''')
seurat_grab = robjects.r['seurat2rawandmeta'](seuratrds)
genes = pd.DataFrame(np.array(seurat_grab.rx2('genes')))
genes.columns = ['gene']
metadata = pandas2ri.rpy2py_dataframe(seurat_grab.rx2('metadata'))
if patient_id_column != 'patient_ID':
metadata['patient_ID'] = metadata[patient_id_column]
metadata.drop(patient_id_column, inplace=True)
if celltype_column != 'cell_type':
metadata['cell_type'] = metadata[celltype_column]
metadata.drop(celltype_column, inplace=True)
if complexity_column != 'complexity':
metadata['complexity'] = metadata[complexity_column]
metadata.drop(complexity_column, inplace=True)
data_df = pandas2ri.rpy2py_dataframe(seurat_grab.rx2('data'))
sparsedata = sparse.coo_matrix(
(data_df['x'], (data_df['i'] - 1, data_df['j'] - 1))).tocsc()
sparsedata.resize((genes.shape[0], metadata.shape[0]))
loompy.create(loomfile, sparsedata, genes.to_dict("list"),
metadata.to_dict("list"))
def intify(df_init):
"""
Parameters
----------
df_init :
Returns
-------
"""
import binascii
df = df_init.copy()
for col in df.columns:
if col.endswith('_ad'):
raise Exception(
"Don't append you column names with _ad! -- Samuel")
df[col] = df[col].apply(
lambda x: int(binascii.hexlify(x.encode()), 16))
while np.sum(df.max() > sys.maxsize) > 0:
for col in df.columns:
if df[col].max() > sys.maxsize:
df[col + '_ad'] = df[col] // sys.maxsize
df[col] = df[col] % sys.maxsize
return df.astype(np.int64)
def deintify(df_init):
"""
Parameters
----------
df_init :
Returns
-------
"""
import binascii
df = df_init.copy()
while np.sum([x.endswith('_ad') for x in df.columns]) > 0:
for col in df.columns:
if col.endswith('_ad') and col + '_ad' not in df.columns:
df[col[0:-3]] = df[col[0:-3]].astype(object)
df[col] = df[col].astype(object)
df[col[0:-3]] = df[col[0:-3]] + sys.maxsize * df[col]
df.drop(col, axis=1, inplace=True)
for col in df.columns:
try:
df[col] = df[col].apply(
lambda x: binascii.unhexlify(hex(x)[2::].encode()).decode())
except:
print(df[col].apply(
lambda x: binascii.unhexlify(hex(x)[2::].encode()).decode()))
raise Exception("whoops")
return df
def recover_meta(db, do_deint=False):
"""
Parameters
----------
db :
do_deint :
(Default value = False)
Returns
-------
"""
colmeta = None
for key in db.ca.keys():
if colmeta is None:
colmeta = pd.DataFrame(db.ca[key])
colmeta.columns = [key]
else:
colmeta[key] = db.ca[key]
if do_deint:
colmeta = deintify(colmeta.astype(np.int64))
rowmeta = None
for key in db.ra.keys():
if rowmeta is None:
rowmeta = pd.DataFrame(db.ra[key])
rowmeta.columns = [key]
else:
rowmeta[key] = db.ra[key]
if do_deint:
rowmeta = deintify(rowmeta.astype(np.int64))
return rowmeta, colmeta
def we_can_pickle_it(thing, thingname: str):
"""
Parameters
----------
thing :
thingname : str :
thingname : str :
thingname : str :
thingname : str :
thingname: str :
Returns
-------
"""
import pickle
with open(thingname, 'wb') as f:
pickle.dump(thing, f, pickle.HIGHEST_PROTOCOL)
def we_can_unpickle_it(thingname: str):
"""
Parameters
----------
thingname : str :
thingname : str :
thingname : str :
thingname : str :
thingname: str :
Returns
-------
"""
import pickle
with open(thingname, 'rb') as f:
thing = pickle.load(f)
return thing
def get_alpha_concave_hull_polygon(xcoords, ycoords, alpha=0.1, buffer=1):
"""Much credit to https://thehumangeo.wordpress.com/2014/05/12/drawing-boundaries-in-python/
Parameters
----------
xcoords :
ycoords :
alpha :
(Default value = 0.1)
buffer :
(Default value = 1)
Returns
-------
"""
from shapely.ops import cascaded_union, polygonize
import shapely.geometry as geometry
from scipy.spatial import Delaunay
import numpy as np
import math
def alpha_shape(points, alpha):
"""Compute the alpha shape (concave hull) of a set
of points.
Parameters
----------
points :
Iterable container of points.
alpha :
alpha value to influence the
gooeyness of the border. Smaller numbers
don't fall inward as much as larger numbers.
Too large, and you lose everything!
Returns
-------
"""
if len(points) < 4:
# When you have a triangle, there is no sense
# in computing an alpha shape.
return geometry.MultiPoint(list(points)).convex_hull
def add_edge(edges, edge_points, coords, i, j):
"""Add a line between the i-th and j-th points,
if not in the list already
Parameters
----------
edges :
edge_points :
coords :
i :
j :
Returns
-------
"""
if (i, j) in edges or (j, i) in edges:
# already added
return
edges.add((i, j))
edge_points.append(coords[[i, j]])
coords = np.array([point.coords[0] for point in points])
tri = Delaunay(coords)
edges = set()
edge_points = []
# loop over triangles:
# ia, ib, ic = indices of corner points of the
# triangle
for ia, ib, ic in tri.vertices:
pa = coords[ia]
pb = coords[ib]
pc = coords[ic]
# Lengths of sides of triangle
a = math.sqrt((pa[0] - pb[0])**2 + (pa[1] - pb[1])**2)
b = math.sqrt((pb[0] - pc[0])**2 + (pb[1] - pc[1])**2)
c = math.sqrt((pc[0] - pa[0])**2 + (pc[1] - pa[1])**2)
# Semiperimeter of triangle
s = (a + b + c) / 2.0
# Area of triangle by Heron's formula
area = math.sqrt(s * (s - a) * (s - b) * (s - c))
circum_r = a * b * c / (4.0 * area)
# Here's the radius filter.
#print circum_r
if circum_r < 1.0 / alpha:
add_edge(edges, edge_points, coords, ia, ib)
add_edge(edges, edge_points, coords, ib, ic)
add_edge(edges, edge_points, coords, ic, ia)
m = geometry.MultiLineString(edge_points)
triangles = list(polygonize(m))
return cascaded_union(triangles), edge_points
points = []
for x, y in zip(xcoords, ycoords):
points.append(geometry.shape({'type': 'Point', 'coordinates': [x, y]}))
concave_hull, edge_points = alpha_shape(points, alpha=alpha)
return concave_hull.buffer(buffer)
def get_outlier_removal_mask(xcoords, ycoords, nth_neighbor=10, quantile=.9):
"""
Parameters
----------
xcoords :
ycoords :
nth_neighbor :
(Default value = 10)
quantile :
(Default value = .9)
Returns
-------
"""
from scipy.spatial.distance import pdist, squareform
D = squareform(pdist(np.vstack((xcoords, ycoords)).T))
distances = D[np.argsort(D, axis=0)[nth_neighbor - 1, :], 0]
return distances <= np.quantile(distances, quantile)
def cohensd(g1, g2):
"""
Returns Cohen's D for the effect size of group 1 values (g1) over group 2 values (g2).
Parameters
----------
g1 : group 1 values (list or numpy vector)
g2 : group 2 values (list or numpy vector)
Returns
-------
(mean(g1) - mean(g2) )/s, where s is the pooled standard deviation of the two groups with Bessel's correction
"""
n1 = len(g1)
n2 = len(g2)
s1 = np.std(g1, ddof=1)
s2 = np.std(g2, ddof=1)
s = np.sqrt(((n1 - 1) * s1 * s1 + (n2 - 1) * s2 * s2) / (n1 + n2 - 2))
return (np.mean(g1) - np.mean(g2)) / s
def phi_coefficient(contingency_table):
"""
Returns the phi-coefficient for a contingency table.
Paramenters
-----------
contingency_table : contingency table, identical in format to scipy.stats.fisher_exact
Returns
-------
phi coefficient
"""
table1 = contingency_table[0]
table2 = contingency_table[1]
table = np.vstack([table1, table2])
phitop = (table1[0] * table2[1] - table1[1] * table2[0])
phibottom = np.sqrt((table2[1]+table2[0])*\
(table1[1]+table1[0])*\
(table1[0]+table2[0])*\
(table2[1]+table1[1]))
phi = phitop / phibottom
return phi
def get_igraph_from_adjacency(adjacency, directed=None):
"""This is taken from scanpy._utils.__init__.py as of 12 August 2021
Get igraph graph from adjacency matrix."""
import igraph as ig
sources, targets = adjacency.nonzero()
weights = adjacency[sources, targets]
if isinstance(weights, np.matrix):
weights = weights.A1
g = ig.Graph(directed=directed)
g.add_vertices(adjacency.shape[0]) # this adds adjacency.shape[0] vertices
g.add_edges(list(zip(sources, targets)))
try:
g.es['weight'] = weights
except KeyError:
pass
if g.vcount() != adjacency.shape[0]:
logg.warning(f'The constructed graph has only {g.vcount()} nodes. '
'Your adjacency matrix contained redundant nodes.')
return g
def convert_10x_h5(path_10x_h5,
output_file,
labelkey=None,
label='',
genes_as_ca=[],
gene_whitelist=None,
output_type='loom'):
import cellranger.matrix as cr_matrix
import loompy
output_type = output_file.split('.')[-1]
if output_type not in ['loom', 'pkl']:
raise Exception(
"output_file must be have suffix loom or pkl, denoting an output type of loom of pickle respectively"
)
filtered_feature_bc_matrix = cr_matrix.CountMatrix.load_h5_file(
path_10x_h5)
id2feature = {
val: key
for key, val in filtered_feature_bc_matrix.feature_ids_map.items()
}
features = [
id2feature[x].decode("utf-8")
for x in range(filtered_feature_bc_matrix.features_dim)
]
features_common_names = filtered_feature_bc_matrix.feature_ref.get_feature_names(
)
barcodes = filtered_feature_bc_matrix.bcs.astype(str)
ca = {'cellname': barcodes}
if labelkey is not None:
ca[labelkey] = [label] * len(barcodes)
m = filtered_feature_bc_matrix.m
if gene_whitelist is not None:
if len(gene_whitelist) > 0:
mask = np.isin(features, gene_whitelist)
m = m[mask, :]
features = list(np.array(features)[mask])
features_common_names = list(np.array(features_common_names)[mask])
if type(genes_as_ca) == str:
genes_as_ca = [genes_as_ca]
else:
genes_as_ca = list(genes_as_ca)
if len(genes_as_ca) > 0:
mask = np.isin(features, genes_as_ca)
if len(genes_as_ca) != mask.sum():
raise Exception(
"Improper mapping of row attributes; perhaps gene of interest not in loom.ra[\'gene\']?"
)
for gene in genes_as_ca:
submask = np.array(features) == gene
if np.sum(submask) > 1:
raise Exception("Two or more features with this name")
elif np.sum(submask) == 0:
raise Exception("No features with this name")
ca[gene] = list(m[submask, :].toarray()[0])
m = m[~mask, :]
features = list(np.array(features)[~mask])
features_common_names = list(np.array(features_common_names)[~mask])
ra = {'gene': features, 'gene_common_name': features_common_names}
if output_type == 'loom':
loompy.create(output_file, m, ra, ca)
if output_type == 'pkl':
if gene_whitelist is None:
raise Exception(
"pkl output intended only for saving a small subsetted geneset of interest. Please select a whitelist before saving as dataframe pkl."
)
mask = np.isin(features, gene_whitelist)
features = np.array(features)[mask]
features_common_names = np.array(features_common_names)[mask]
df = pd.DataFrame(m[mask, :].toarray())
df.index = features
if labelkey is not None:
df.columns = [labelkey + '_' + x for x in barcodes]
else:
df.columns = barcodes
df.to_pickle(output_file)
def create_split_exon_gtf(input_gtf, output_gtf, gene):
gtf = pd.read_table(input_gtf, header=None, comment='#')
gtf.columns = [
'seqname', 'source', 'feature', 'start', 'end', 'score', 'strand',
'frame', 'attribute'
]
gtf = gtf[gtf['feature'] == 'exon']
if type(gene) == str:
mask = gtf['attribute'].apply(
lambda x: 'gene_name "{}"'.format(gene) in x)
elif type(gene) in [list, tuple, np.array]:
mask = np.array([False] * len(gtf))
for g in gene:
mask = mask | gtf['attribute'].apply(
lambda x: 'gene_name "{}"'.format(g) in x)
gtf_unchanged = gtf[~mask]
gtf_changed = gtf[mask]
def append_exon_number_to_id_and_name(attribute):
exon_number = attribute.split('exon_number')[1].split(';')[0].split(
'\"')[-2]
old_gene_id_str = 'gene_id' + attribute.split('gene_id')[1].split(
';')[0]
new_gene_id_str = '\"'.join(
old_gene_id_str.split('\"')[0:-1]) + '-exon' + exon_number + '\"'
attribute = attribute.replace(old_gene_id_str, new_gene_id_str)
old_gene_name_str = 'gene_name' + attribute.split(
'gene_name')[1].split(';')[0]
new_gene_name_str = '\"'.join(
old_gene_name_str.split('\"')[0:-1]) + '-exon' + exon_number + '\"'
attribute = attribute.replace(old_gene_name_str, new_gene_name_str)
old_transcript_id_str = 'transcript_id' + attribute.split(
'transcript_id')[1].split(';')[0]
new_transcript_id_str = '\"'.join(
old_transcript_id_str.split('\"')
[0:-1]) + '-exon' + exon_number + '\"'
attribute = attribute.replace(old_transcript_id_str,
new_transcript_id_str)
old_transcript_name_str = 'transcript_name' + attribute.split(
'transcript_name')[1].split(';')[0]
new_transcript_name_str = '\"'.join(
old_transcript_name_str.split('\"')
[0:-1]) + '-exon' + exon_number + '\"'
attribute = attribute.replace(old_transcript_name_str,
new_transcript_name_str)
if 'ccds_id' in attribute:
old_ccds_id_str = 'ccds_id' + attribute.split('ccds_id')[1].split(
';')[0]
new_ccds_id_str = '\"'.join(old_ccds_id_str.split('\"')
[0:-1]) + '-exon' + exon_number + '\"'
attribute = attribute.replace(old_ccds_id_str, new_ccds_id_str)
return attribute
gtf_changed['attribute'] = gtf_changed['attribute'].apply(
append_exon_number_to_id_and_name)
gtf = pd.concat([gtf_changed, gtf_unchanged])
gtf.to_csv(output_gtf, sep='\t', index=False, header=None)
def get_umap_from_matrix(X,
random_state=17,
verbose=True,
min_dist=0.001,
n_neighbors=20,
metric='correlation'):
import umap
reducer = umap.UMAP(random_state=random_state,
verbose=verbose,
min_dist=min_dist,
n_neighbors=n_neighbors,
metric=metric)
return reducer.fit_transform(X)
def convert_h5ad(h5ad,
output_loom,
convert_obsm=True,
convert_varm=True,
convert_uns=True,
convert_layers=True):
import scanpy
import loompy
h5ad = scanpy.read_h5ad(h5ad)
ra = {'gene': np.array(h5ad.var.index)}
for col in h5ad.var.columns:
if col == 'gene':
raise Exception(
"var column of h5ad is \"gene\". This conflicts with panopticon loom format. You must rename before converting."
)
else:
ra[col] = np.array(h5ad.var[col].values)
ca = {'cellname': np.array(h5ad.obs.index)}
for col in h5ad.obs.columns:
if col == 'cellname':
raise Exception(
"obs column of h5ad is \"cellname\". This conflicts with panopticon loom format. You must rename before converting."
)
else:
ca[col] = np.array(h5ad.obs[col].values)
if convert_obsm:
for obsm_key in h5ad.obsm.keys():
for i in range(h5ad.obsm[obsm_key].shape[1]):
ca_key = "{}_{}".format(
obsm_key,
i + 1) # one added so that these are 1-indexed by default
if ca_key in ca.keys():
raise Exception(
"key\"{}\" already present as column attribute key. Please rename to avoid."
)
else:
ca[ca_key] = h5ad.obsm[obsm_key][:, i]
if convert_varm:
for varm_key in h5ad.varm.keys():
for i in range(h5ad.varm[varm_key].shape[1]):
ra_key = "{}_{}".format(
varm_key,
i + 1) # one added so that these are 1-indexed by default
if ra_key in ra.keys():
raise Exception(
"key\"{}\" already present as row attribute key. Please rename to avoid."
)
else:
ra[ra_key] = h5ad.varm[varm_key][:, i]
loompy.create(output_loom, h5ad.X.T, ra, ca)
if convert_uns:
loom = loompy.connect(output_loom)
for uns_key in h5ad.uns.keys():
loom.attrs[uns_key] = h5ad.uns[uns_key]
loom.close()
if convert_layers:
loom = loompy.connect(output_loom)
for layer_key in h5ad.layers.keys():
loom.layers[layer_key] = h5ad.layers[key].T
loom.close()
def get_UMI_curve_from_10x_h5(path_10x_h5, save_to_file=None):
import cellranger.matrix as cr_matrix
import matplotlib.pyplot as plt
bc_matrix = cr_matrix.CountMatrix.load_h5_file(path_10x_h5)
fig, ax = plt.subplots(figsize=(5, 5))
ax.plot(np.sort(bc_matrix.get_counts_per_bc())[::-1])
ax.set_title('UMI counts per barcode, sorted')
ax.set_ylabel('UMI counts')
ax.set_xlabel('cell rank, UMI counts (most to fewest)')
ax.set_xscale('log')
ax.set_yscale('log')
if save_to_file is None:
plt.show()
else:
plt.savefig(save_to_file)
plt.cla()
def get_dsb_normalization(cell_antibody_counts,
empty_droplet_antibody_counts,
use_isotype_control=True,
denoise_counts=True,
isotype_control_name_vec=None,
define_pseudocount=False,
pseudocount_use=10,
quantile_clipping=False,
quantile_clip=[0.001, 0.9995],
return_stats=False):
import rpy2.robjects as robjects
import rpy2.robjects.numpy2ri
if isotype_control_name_vec is None:
isotype_control_name_vec = robjects.r("NULL")
if (pseudocount_use != 10) and (not define_pseudocount):
raise Exception(
"\"define_pseudocount\" must be set to True to use pseudocount_use"
)
rpy2.robjects.numpy2ri.activate()
robjects.r('''
library(mclust)
library(dsb)
dsb <- function(cells,
empty,
use.isotype.control=TRUE,
denoise.counts=TRUE,
isotype.control.name.vec = NULL,
define.pseudocount = FALSE,
pseudocount.use = 10,
quantile.clipping = FALSE,
quantile.clip = c(0.001, 0.9995),
return.stats = FALSE){
DSBNormalizeProtein(cells, empty, use.isotype.control=use.isotype.control,
isotype.control.name.vec = isotype.control.name.vec,
denoise.counts=denoise.counts,
define.pseudocount = define.pseudocount,
pseudocount.use = pseudocount.use,
quantile.clipping = quantile.clipping,
quantile.clip = quantile.clip,
return.stats = return.stats)
}
''')
dsb = robjects.r['dsb']
return dsb(cell_antibody_counts,
empty_droplet_antibody_counts,
use_isotype_control=use_isotype_control,
denoise_counts=denoise_counts,
isotype_control_name_vec=isotype_control_name_vec,
define_pseudocount=define_pseudocount,
pseudocount_use=pseudocount_use,
quantile_clipping=quantile_clipping,
quantile_clip=quantile_clip,
return_stats=return_stats)
def get_cellphonedb_compatible_counts_and_meta(loom,
layername,
celltype_ca,
gene_ra='gene',
cellname_ca='cellname',
return_df=False,
output_prefix=None,
mouse_to_human=False):
if output_prefix is None and not return_df:
raise Exception(
"either output_prefix must be specified, or return_df must be True"
)
counts = pd.DataFrame(loom[layername][:, :])
counts.columns = loom.ca[cellname_ca]
#counts.insert(0, 'Gene', np.array([x.upper() for x in loom.ra[gene_ra]]))
genes = loom.ra[gene_ra]
if mouse_to_human:
from pybiomart import Server
server = Server(host="http://www.ensembl.org")
mouse_dataset = (server.marts['ENSEMBL_MART_ENSEMBL'].
datasets['mmusculus_gene_ensembl'])
mouse_data = mouse_dataset.query(
attributes=['ensembl_gene_id', 'external_gene_name'])
mouse_data['Gene upper'] = mouse_data['Gene name'].apply(
lambda x: str(x).upper())
human_dataset = (server.marts['ENSEMBL_MART_ENSEMBL'].
datasets['hsapiens_gene_ensembl'])
human_data = human_dataset.query(
attributes=['ensembl_gene_id', 'external_gene_name'])
conversion_dict = pd.merge(
mouse_data, human_data, left_on='Gene upper',
right_on='Gene name').set_index(
'Gene stable ID_x')['Gene stable ID_y'].to_dict()
convertible_mask = np.array(
[x in conversion_dict.keys() for x in genes])
genes = [
conversion_dict[x] if x in conversion_dict.keys() else np.nan
for x in genes
]
counts.insert(0, 'Gene', genes)
if mouse_to_human:
counts = counts.iloc[convertible_mask, :]
counts = counts.groupby('Gene').first().reset_index()
meta = pd.DataFrame(loom.ca[cellname_ca])
meta.columns = ['Cell']
meta['cell_type'] = loom.ca[celltype_ca]
if output_prefix is not None:
counts.to_csv(output_prefix + '_counts.txt', sep='\t', index=False)
meta.to_csv(output_prefix + '_meta.txt', sep='\t', index=False)
command = 'cellphonedb method statistical_analysis {0}_meta.txt {0}_counts.txt'.format(
output_prefix)
print("Run cellphonedb on command line with \"{}\"".format(command))
elif return_df:
return meta, counts
def create_gsea_txt_and_cls(loom,
layername,
output_prefix,
phenotypes,
cellmask=None,
gene_ra='gene',
cellname_ca='cellname'):
import os
if cellmask is None:
cellmask = np.array([True] * loom.shape[1])
if type(phenotypes) == str:
phenotypes = loom.ca[phenotypes]
if len(phenotypes) != cellmask.sum():
raise Exception(
"length of phenotypes vector must be equal to number of samples (cells)"
)
txt = pd.DataFrame(loom.ra[gene_ra])
txt.columns = ['NAME']
txt['DESCRIPTION'] = 'na'
#txt = pd.concat([txt,pd.DataFrame(loom[layername][:,cellmask])],axis=1)
#txt.columns = ['NAME','DESCRIPTION'] + list(loom.ca[cellname_ca][cellmask])
#txt.to_csv(output_prefix+'.txt',index=False,sep='\t')
total = cellmask.sum()
nphenotypes = len(np.unique(phenotypes))
outcls = output_prefix + '.cls'
if os.path.exists(outcls):
os.system("rm {}".format(outcls))
#raise Exception("cls file already present--cannot overwrite")
line1 = "{} {} 1".format(total, nphenotypes)
line2 = '# ' + ' '.join(np.unique(phenotypes))
phenotype2index = {
phenotype: i
for i, phenotype in enumerate(np.unique(phenotypes))
}
#print(phenotype2index)
#print([phenotype2index[x] for x in phenotypes])
line3 = ' '.join([str(phenotype2index[x]) for x in phenotypes])
for line in [line1, line2, line3]:
os.system('echo \"{}\">>{}'.format(line, outcls))
def get_cross_column_attribute_heatmap(loom,
ca1,
ca2,
normalization_axis=None):
#if type(normalization_axis) == list:
# outdfs = []
# for axis in normalization_axis:
# outdfs.append(get_cross_column_attribute_heatmap(loom, ca1, ca2, normalization_axis=axis))
# return outdfs
df = | pd.DataFrame(loom.ca[ca1], copy=True) | pandas.DataFrame |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import os
import boto3
from tqdm import tqdm
import yaml
from ._01_ETL import Boba_ETL as etl
from ._02_Preprocessing import Boba_Preprocessing as pp
from ._03_Modeling import Boba_Modeling as m
class BobaModeling(etl,pp,m):
def __init__(self, year, position_group):
self.s3_client = boto3.client('s3')
self.bucket = "boba-voglewede"
self.year = year
self.information_cols = ['Season','Name','playerID','position','Team','Age']
self.position_group = position_group
if self.position_group=='hitters':
self.per_metric = 'PA'
self.counting_stats = ['HR','R','RBI','WAR','SB','CS']
self.rate_stats = ['AVG','OBP','SLG','BABIP','BB%','K%','wOBA']
self.model_targets = self.rate_stats + [c+'_per_'+self.per_metric for c in self.counting_stats]
self.pt_metric = 'PA'
elif self.position_group=='SP':
self.per_metric = 'GS'
self.counting_stats = ['ShO','CG','W','WAR']
self.rate_stats = ['ERA','BB_per_9','K_per_9','OBP','SLG']
self.model_targets = self.rate_stats + [c+'_per_'+self.per_metric for c in self.counting_stats]
self.pt_metric = 'IP'
elif self.position_group=='RP':
self.per_metric = 'G'
self.counting_stats = ['SV','HLD','WAR']
self.rate_stats = ['ERA','BB_per_9','K_per_9','OBP','SLG']
self.model_targets = self.rate_stats + [c+'_per_'+self.per_metric for c in self.counting_stats]
self.pt_metric = 'IP'
else:
pass
def __repr__(self):
return "This is the way"
def scrape_raw_season_data(self, source, start_year,end_year, writeS3=False):
seasons = list(np.arange(start_year,end_year+1))
statcast_seasons = list(np.arange(2015,end_year+1))
data_group = 'hitters' if self.position_group == 'hitters' else 'pitchers'
if data_group == 'hitters':
print("gather data for {} through {} seasonal hitting data".format(start_year,end_year))
for i in tqdm(seasons):
etl.FG_hitters_season(self,season=i,writeS3=writeS3)
print("Fangraphs scrape completed")
for i in tqdm(statcast_seasons):
etl.statcast_hitters_season(self,season=i,writeS3=writeS3)
print("Statcast scrape completed")
elif data_group == 'pitchers':
print("gather data for {} through {} seasonal pitching data".format(start_year,end_year))
for i in tqdm(seasons):
etl.FG_pitchers_season(self,season=i,writeS3=writeS3)
print("Fangraphs scrape completed")
for i in tqdm(statcast_seasons):
etl.statcast_pitchers_season(self,season=i,writeS3=writeS3)
print("Statcast scrape completed")
else:
pass
def prepare_source_masters(self, writeS3 = False):
data_group = 'hitters' if self.position_group == 'hitters' else 'pitchers'
if data_group == 'hitters':
etl.gather_source_masters(self,position_group=self.position_group, source = 'fangraphs', writeS3 = False)
etl.gather_source_masters(self,position_group=self.position_group, source = 'statcast', writeS3 = False)
elif data_group == 'pitchers':
etl.gather_source_masters(self,position_group=self.position_group, source = 'fangraphs', writeS3 = False)
etl.gather_source_masters(self,position_group=self.position_group, source = 'statcast', writeS3 = False)
else:
pass
def create_master_data(self,start_year=2014):
self.start_year = start_year
with open(r'boba/recipes/preprocessing_parameters.yaml') as file:
yaml_data = yaml.load(file, Loader=yaml.FullLoader)
self.pt_min = yaml_data['parameters'][self.position_group]['pt_min']
self.pt_keep_thres = yaml_data['parameters'][self.position_group]['pt_keep_thres']
self.pt_drop = yaml_data['parameters'][self.position_group]['pt_drop']
fg_df,statcast_df,id_map,fantrax = etl.load_raw_dataframes(self)
master_df = pp.join_tables(self,fg_df,statcast_df,id_map,fantrax)
master_df = pp.preliminary_col_reduction(self,master_df)
master_df = pp.feature_engineering(self,master_df)
master_df = pp.drop_out_of_position(self,master_df = master_df)
master_df = pp.limit_years(self,start_year=start_year,master_df=master_df)
master_df = pp.make_targets(self,master_df=master_df)
master_df = pp.organize_training_columns(self,master_df=master_df)
master_df = master_df.reset_index(drop = True)
master_df.to_csv('data/processed/'+self.position_group+'/master_df.csv',index=True)
modeling_df = pp.remove_injured(self,master_df=master_df)
modeling_df = pp.remove_missing(self,modeling_df=modeling_df)
modeling_df.to_csv('data/processed/'+self.position_group+'/modeling_df.csv',index=True)
pp.run_corr_analysis(self,modeling_df)
return master_df, modeling_df
def modeling_pipeline(self, target, knn = 5,test_size = .3, max_evals = 100, seed = 8,verbose=False, split_method='timeseries'):
modeling_df = pd.read_csv('data/processed/'+self.position_group+'/modeling_df.csv',index_col=0)
self.seed = seed
self.knn = knn
model_df = m.isolate_relevant_columns(self,modeling_df = modeling_df,target = target)
self.X_train, self.X_test, self.y_train, self.y_test = m.evaluation_split(self,model_df=model_df,target=target,test_size=test_size,method=split_method)
self.X_train_prod, self.X_test_prod, self.y_train_prod, self.y_test_prod = m.production_split(self,model_df=model_df,target=target,test_size=test_size,method=split_method)
self.X_train, self.X_test = m.preprocessing_pipeline(self, X_train = self.X_train, X_test = self.X_test, target = target, prod=False)
self.model_eval = m.build_model(self,X_train=self.X_train, X_test=self.X_test,y_train=self.y_train, y_test=self.y_test,target=target,prod=False,max_evals=max_evals,verbose=verbose)
self.X_train_prod, self.X_test_prod = m.preprocessing_pipeline(self, X_train = self.X_train_prod, X_test = self.X_test_prod, target = target, prod=True)
self.model_prod = m.build_model(self,X_train=self.X_train, X_test=self.X_test,y_train=self.y_train, y_test=self.y_test,target=target,prod=True,max_evals=max_evals,verbose=verbose)
def prod_scoring_pipeline(self):
try:
os.remove('data/projections/'+self.position_group+'_'+str(self.year)+'_raw.csv')
except:
pass
fg_df,statcast_df,id_map,fantrax = etl.load_raw_dataframes(self)
pp.create_scoring_data(self,fg_df,statcast_df,id_map,fantrax)
for target in tqdm(self.model_targets):
m.generate_prod_predictions(self, target=target)
scored_df = pd.read_csv('data/projections/'+self.position_group+'_'+str(self.year)+'_raw.csv',index_col=0)
# scored_df = scored_df.drop(['Name_zips'],axis=1,errors='ignore')
scored_df = self.clean_projections(scored_df)
scored_df.to_csv('data/projections/'+self.position_group+'_'+str(self.year)+'_raw.csv')
return scored_df
def clean_projections(self,scored_df):
systems = ['Boba','zips','stmr','atc','bat']
if self.position_group == 'hitters':
for i in tqdm(systems):
scored_df['NetSB_'+i] = scored_df['SB_'+i] - scored_df['CS_'+i]
elif self.position_group == 'SP':
for i in tqdm(systems):
scored_df['QS_'+i] = ((scored_df['IP_zips']/(12*6.15)-0.11*scored_df['ERA_'+i]))*scored_df['GS_zips']
# scored_df['QS_'+i] = (scored_df['GS_zips']*(.465-(scored_df['ERA_'+i]*.0872381))+(scored_df['IP_zips']/scored_df['GS_zips'])*.0746775)
scored_df['QSCGSHO_'+i] = scored_df['QS_'+i]+scored_df['CG_Boba']+scored_df['ShO_Boba']
scored_df['OPS_Boba'] = scored_df['SLG_Boba']+scored_df['OBP_Boba']
elif self.position_group == 'RP':
scored_df['SVHLD_Boba'] = scored_df['SV_Boba']+scored_df['HLD_Boba']
scored_df['SVHLD_atc'] = scored_df['SV_atc']+scored_df['HLD_Boba']
scored_df['SVHLD_stmr'] = scored_df['SV_stmr']+scored_df['HLD_Boba']
scored_df['SVHLD_bat'] = np.nan
scored_df['SVHLD_zips'] = np.nan
scored_df['OPS_Boba'] = scored_df['SLG_Boba']+scored_df['OBP_Boba']
else:
pass
return scored_df
class BobaProjections(BobaModeling):
def __init__(self, year, remove_list):
self.remove_list = remove_list
self.b_H = BobaModeling(year=year,position_group='hitters')
self.b_SP = BobaModeling(year=year,position_group='SP')
self.b_RP = BobaModeling(year=year,position_group='RP')
self.proj_H = pd.read_csv('data/projections/hitters_2020_raw.csv',index_col=0)
self.proj_SP = pd.read_csv('data/projections/SP_2020_raw.csv',index_col=0)
self.proj_RP = pd.read_csv('data/projections/RP_2020_raw.csv',index_col=0)
self.stat_cat_H_default = ['OBP','SLG','HR','R','RBI','NetSB']
self.stat_cat_P_default = ['ERA','OPS','K_per_9','BB_per_9','QSCGSHO','SVHLD']
self.display_cols_H = self.stat_cat_H_default + ['OPS_H','BB%_Boba','K%_Boba']
self.display_cols_P = self.stat_cat_P_default.copy()
def remove_injured_and_optouts(self, df, position_group):
if position_group == 'hitters':
for i in self.remove_list:
if i in list(df['Name']):
df.loc[df['Name'] == i, 'PA_zips'] = 0
df.loc[df['Name'] == i, 'HR'] = 0
df.loc[df['Name'] == i, 'RBI'] = 0
df.loc[df['Name'] == i, 'R'] = 0
elif position_group == 'pitchers':
for i in self.remove_list:
if i in list(df['Name']):
df.loc[df['Name'] == i, 'IP_zips'] = 0
df.loc[df['Name'] == i, 'GS_zips'] = 0
df.loc[df['Name'] == i, 'QSCGSHO'] = 0
df.loc[df['Name'] == i, 'G_zips'] = 0
df.loc[df['Name'] == i, 'SVHLD'] = 0
df.loc[df['Name'] == i, 'K_per_9'] = 0
df.loc[df['Name'] == i, 'ERA'] = 5
else:
pass
return df
def create_league(self, stat_categories_H, stat_categories_P, n_teams=12, catcher=1, first=1, second=1, third=1, ss=1, outfield=3, utility=2, off_bench = 5, startingP = 7, reliefP = 6):
self.stat_categories_H = stat_categories_H
self.stat_categories_P = stat_categories_P
self.n_teams = n_teams
self.tot_catcher = n_teams*catcher
self.tot_first = n_teams*first
self.tot_second = n_teams*second
self.tot_third = n_teams*third
self.tot_ss = n_teams*ss
self.tot_outfield = n_teams*outfield
self.tot_utility = n_teams*utility
self.tot_off_bench = n_teams*off_bench
self.total_offense = self.tot_catcher + self.tot_first + self.tot_second + self.tot_third + self.tot_ss + self.tot_outfield + self.tot_utility
self.tot_startingP = n_teams*startingP
self.tot_reliefP = n_teams*reliefP
self.total_roster = self.total_offense + self.tot_startingP + self.tot_reliefP
def set_model_weights(self, trust= True, weights=None):
if trust:
self.w_H_Boba = .8
self.w_H_zips = .05
self.w_H_stmr = .05
self.w_H_atc = .05
self.w_H_bat = .05
self.w_SP_Boba = .8
self.w_SP_zips = .05
self.w_SP_stmr = .05
self.w_SP_atc = .05
self.w_SP_bat = .05
self.w_RP_Boba = .8
self.w_RP_zips = .05
self.w_RP_stmr = .05
self.w_RP_atc = .05
self.w_RP_bat = .05
else:
self.w_H_Boba = weights['hitters']['Boba']
self.w_H_zips = weights['hitters']['zips']
self.w_H_stmr = weights['hitters']['steamer']
self.w_H_atc = weights['hitters']['atc']
self.w_H_bat = weights['hitters']['bat']
self.w_SP_Boba = weights['SP']['Boba']
self.w_SP_zips = weights['SP']['zips']
self.w_SP_stmr = weights['SP']['steamer']
self.w_SP_atc = weights['SP']['atc']
self.w_SP_bat = weights['SP']['bat']
self.w_RP_Boba = weights['RP']['Boba']
self.w_RP_zips = weights['RP']['zips']
self.w_RP_stmr = weights['RP']['steamer']
self.w_RP_atc = weights['RP']['atc']
self.w_RP_bat = weights['RP']['bat']
def set_custom_parameters(self, value_NetSB = True, versatility_premium = 0, utility_discount = 0, adjust_PA = False, adjust_PA_exceptC = False):
self.value_NetSB = value_NetSB
self.versatility_premium = versatility_premium
self.utility_discount = utility_discount
if adjust_PA:
self.PA_adj = 630
else:
self.PA_adj = 0
if adjust_PA_exceptC:
self.PA_adj_nonC = 630
else:
self.PA_adj_nonC = 0
def generate_projections(self):
print("TBD")
def system_comparison(self):
print("TBD")
def compile_hitters(self):
fantrax = pd.read_csv('data/utils/fantrax.csv')
fantrax = fantrax[['Player','Position','ADP']]
id_map = self.load_ID_map()
join_df = | pd.merge(id_map,fantrax, how='left',left_on='FANTRAXNAME', right_on='Player' ) | pandas.merge |
import os
import numpy as np
import pandas as pd
from pathlib import Path
from tqdm import tqdm
import json
# import sys
# sys.path.insert(0, './data')
# sys.path.insert(0, './utils')
# sys.path.insert(0, './common')
import os,sys,inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0,parentdir)
from utils.visualization import *
from utils.skeleton import Skeleton
from common.mmm import parse_motions
from common.transforms3dbatch import *
from common.quaternion import *
from renderUtils import quat2xyz
from model.model import Integrator
import torch
import pickle as pkl
import scipy.ndimage.filters as filters
import pdb
## permute joints to make it a DAG
def permute(parents, root=0, new_parent=-1, new_joints=[], new_parents=[]):
new_joints.append(root)
new_parents.append(new_parent)
new_parent = len(new_joints) - 1
for idx, p in enumerate(parents):
if p == root:
permute(parents, root=idx, new_parent=new_parent, new_joints=new_joints, new_parents=new_parents)
return new_joints, new_parents
def softmax(x, **kw):
softness = kw.pop('softness', 1.0)
maxi, mini = np.max(x, **kw), np.min(x, **kw)
return maxi + np.log(softness + np.exp(mini - maxi))
def softmin(x, **kw):
return -softmax(-x, **kw)
class RawData():
def __init__(self):
pass
def _get_f(self):
raise NotImplementedError
def _get_df(self):
raise NotImplementedError
def preProcess(self):
raise NotImplementedError
def get_skeletonNpermutation(self):
raise NotImplementedError
@property
def quat_columns(self):
## quaternion columns
quat_columns = ['root_tx', 'root_ty', 'root_tz']
for joint in self.skel.joints:
quat_columns += ['{}_{}'.format(joint, col_suffix) for col_suffix in ['rw', 'rx', 'ry', 'rz']]
return quat_columns
@property
def fke_columns(self):
## forward kinematics columns
fke_columns = []
for joint in self.skel.joints:
fke_columns += ['{}_{}'.format(joint, col_suffix) for col_suffix in ['tx', 'ty', 'tz']]
return fke_columns
@property
def pose_columns(self):
pose_columns = []
for joint in self.skel.joints:
pose_columns += ['{}_{}'.format(joint, col_suffix) for col_suffix in ['rx', 'ry', 'rz']]
return pose_columns
@property
def rifke_columns(self):
## Save Rotation invariant fke (rifke)
rifke_columns = self.fke_columns + ['root_Vx', 'root_Vz', 'root_Ry', 'feet_l1', 'feet_l2', 'feet_r1', 'feet_r2']
return rifke_columns
@property
def rifke_dict(self):
raise NotImplementedError
def output_columns(self, feats_kind):
if feats_kind in {'euler'}:
return self.pose_columns
elif feats_kind in {'quaternion'}:
return self.quat_columns
elif feats_kind in {'fke'}:
return self.fke_columns
elif feats_kind in {'rifke'}:
return self.rifke_columns
def mat2csv(self, data, filename, columns):
pd.DataFrame(data=data, columns=columns).to_csv(filename)
def quat2fke(self, df_quat, filename_fke, filename_rifke):
'''Save Forward Kinematics'''
df_fke = pd.DataFrame(data=np.zeros((df_quat.shape[0], len(self.fke_columns))), columns=self.fke_columns)
## copying translation as is
df_fke[['root_tx', 'root_ty', 'root_tz']] = df_quat.loc[:, ['root_tx', 'root_ty', 'root_tz']].copy()
xyz_data = quat2xyz(df_quat, self.skel)
df_fke.loc[:, self.fke_columns] = xyz_data.reshape(-1, np.prod(xyz_data.shape[1:]))
#filename_fke = dir_name / Path(row[feats_kind]).relative_to(Path(path2data)/'subjects').with_suffix('.fke')
os.makedirs(filename_fke.parent, exist_ok=True)
df_fke.to_csv(filename_fke.as_posix())
'''Save Rotation Invariant Forward Kinematics'''
df_rifke = pd.DataFrame(data=np.zeros((df_quat.shape[0]-1, len(self.rifke_columns))), columns=self.rifke_columns)
rifke_data = self.fke2rifke(xyz_data.copy())
df_rifke[self.rifke_columns] = rifke_data[..., 3:]
#filename_rifke = dir_name / Path(row[feats_kind]).relative_to(Path(path2data)/'subjects').with_suffix('.rifke')
os.makedirs(filename_rifke.parent, exist_ok=True)
df_rifke.to_csv(filename_rifke.as_posix())
''' Convert rifke to fke to get comparable ground truths '''
new_df_fke = pd.DataFrame(data=self.rifke2fke(df_rifke[self.rifke_columns].values, filename_rifke).reshape(-1, len(self.fke_columns)),
columns=self.fke_columns)
new_fke_dir = filename_fke.parent/'new_fke'
os.makedirs(new_fke_dir, exist_ok=True)
new_df_fke.to_csv((new_fke_dir/filename_fke.name).as_posix())
return xyz_data
## fke to rotation invariant fke (Holden et. al.)
def fke2rifke(self, positions):
""" Put on Floor """
#fid_l, fid_r = np.array([5,6]), np.array([10,11])
fid_l, fid_r = self.rifke_dict['fid_l'], self.rifke_dict['fid_r']
foot_heights = np.minimum(positions[:,fid_l,1], positions[:,fid_r,1]).min(axis=1)
floor_height = softmin(foot_heights, softness=0.5, axis=0)
positions[:,:,1] -= floor_height
""" Add Reference Joint """
trajectory_filterwidth = 3
reference = positions[:,0] * np.array([1,0,1])
reference = filters.gaussian_filter1d(reference, trajectory_filterwidth, axis=0, mode='nearest')
positions = np.concatenate([reference[:,np.newaxis], positions], axis=1)
""" Get Foot Contacts """
velfactor, heightfactor = np.array([0.05,0.05]), np.array([3.0, 2.0])
feet_l_x = (positions[1:,fid_l,0] - positions[:-1,fid_l,0])**2
feet_l_y = (positions[1:,fid_l,1] - positions[:-1,fid_l,1])**2
feet_l_z = (positions[1:,fid_l,2] - positions[:-1,fid_l,2])**2
feet_l_h = positions[:-1,fid_l,1]
feet_l = (((feet_l_x + feet_l_y + feet_l_z) < velfactor) & (feet_l_h < heightfactor)).astype(np.float)
feet_r_x = (positions[1:,fid_r,0] - positions[:-1,fid_r,0])**2
feet_r_y = (positions[1:,fid_r,1] - positions[:-1,fid_r,1])**2
feet_r_z = (positions[1:,fid_r,2] - positions[:-1,fid_r,2])**2
feet_r_h = positions[:-1,fid_r,1]
feet_r = (((feet_r_x + feet_r_y + feet_r_z) < velfactor) & (feet_r_h < heightfactor)).astype(np.float)
""" Get Root Velocity """
velocity = (positions[1:,0:1] - positions[:-1,0:1]).copy()
""" Remove Translation """
positions[:,:,0] = positions[:,:,0] - positions[:,0:1,0]
positions[:,:,2] = positions[:,:,2] - positions[:,0:1,2]
""" Get Forward Direction """
#sdr_l, sdr_r, hip_l, hip_r = 19, 26, 3, 8
sdr_l, sdr_r, hip_l, hip_r = self.rifke_dict['sdr_l'], self.rifke_dict['sdr_r'], self.rifke_dict['hip_l'], self.rifke_dict['hip_r']
across1 = positions[:,hip_l] - positions[:,hip_r]
across0 = positions[:,sdr_l] - positions[:,sdr_r]
across = across0 + across1
across = across / np.sqrt((across**2).sum(axis=-1))[...,np.newaxis]
direction_filterwidth = 20
forward = np.cross(across, np.array([[0,1,0]]))
forward = filters.gaussian_filter1d(forward, direction_filterwidth, axis=0, mode='nearest')
forward = forward / np.sqrt((forward**2).sum(axis=-1))[...,np.newaxis]
""" Remove Y Rotation """
target = np.array([[0,0,1]]).repeat(len(forward), axis=0)
rotation = qbetween_np(forward, target)[:, np.newaxis]
positions = qrot_np(np.repeat(rotation, positions.shape[1], axis=1), positions)
""" Get Root Rotation """
velocity = qrot_np(rotation[1:], np.repeat(velocity, rotation.shape[1], axis=1))
rvelocity = self.get_rvelocity(rotation, forward='z', plane='xz')
""" Add Velocity, RVelocity, Foot Contacts to vector """
positions = positions[:-1]
positions = positions.reshape(len(positions), -1)
positions = np.concatenate([positions, velocity[:,:,0]], axis=-1)
positions = np.concatenate([positions, velocity[:,:,2]], axis=-1)
positions = np.concatenate([positions, rvelocity], axis=-1)
positions = np.concatenate([positions, feet_l, feet_r], axis=-1)
return positions
def get_rvelocity(self, rotation, forward='z', plane='xz'):
## TODO - might need a reversal of inputs for qmul_np
qs = qmul_np(rotation[1:], qinv_np(rotation[:-1]))
ds = np.zeros(qs.shape[:-1] + (3,))
ds[...,'xyz'.index(forward)] = 1.0
ds = qrot_np(qs, ds)
ys = ds[...,'xyz'.index(plane[0])]
xs = ds[...,'xyz'.index(plane[1])]
return np.arctan2(ys, xs)
def rifke2fke(self, positions, filename=None):
root_ry = torch.from_numpy(positions[..., -5]).unsqueeze(0).unsqueeze(0).float()
pos = positions[..., :-7].reshape(positions.shape[0], -1, 3)
pos[..., 0, [0,2]] = 0
''' Get Y Rotations '''
integrator = Integrator(1, root_ry.shape[-1])
root_ry = integrator(root_ry).squeeze(0).squeeze(0).numpy()
rotations = np.stack([np.cos(root_ry/2), np.zeros_like(root_ry),
np.sin(root_ry/2), np.zeros_like(root_ry)],
axis=-1).astype(np.float)
rotations = np.expand_dims(rotations, axis=1)
''' Rotate positions by adding Y rotations '''
pos = qrot_np(np.repeat(qinv_np(rotations), pos.shape[1], axis=1), pos)
''' Rotate XZ velocity vector '''
root_v = positions[..., -7:-5]
root_v = np.stack([root_v[..., 0], np.zeros_like(root_v[..., 0]), root_v[..., 1]], axis=-1)
try:
root_v = qrot_np(qinv_np(rotations.squeeze(1)), root_v)
except:
pdb.set_trace()
root_v = torch.from_numpy(root_v.transpose(1,0)).unsqueeze(0).float()
''' Get Root Positions from Root Velocities'''
integrator = Integrator(3, root_v.shape[-1])
root_t = integrator(root_v).squeeze(0).transpose(1, 0).numpy()
''' Add translations back to all the joints '''
pos[..., :, 0] += root_t[..., 0:1]
pos[..., :, 2] += root_t[..., 2:3]
return pos
class KITMocap(RawData):
def __init__(self, path2data, preProcess_flag=False):
super(KITMocap, self).__init__()
## load skeleton
self._SKELPATH = 'dataProcessing/KITMocap/skeleton.p'
self._MMMSKELPATH = 'skeleton/mmm.xml'
self._MMMSAMPLEPATH = 'dataProcessing/KITMocap/00001_mmm.xml'
os.makedirs(Path(self._SKELPATH).parent, exist_ok=True)
## get the skeleton and permutation
self.skel, self.permutation, self.new_joints = self.get_skeletonNpermutation()
## save skeleton
pkl.dump(self.skel, open(self._SKELPATH, 'wb'))
if preProcess_flag:
self.preProcess(path2data)
## Reading data
data = []
for tup in os.walk(path2data):
for filename in tup[2]:
if Path(filename).suffix == '.xml':
annotpath = Path(tup[0])/(filename.split('_')[0] + '_annotations.json')
annot = json.load(open(annotpath, 'r'))
quatpath = filename.split('_')[0] + '_quat.csv'
fkepath = filename.split('_')[0] + '_quat.fke'
rifkepath = filename.split('_')[0] + '_quat.rifke'
if annot:
for description in annot:
data.append([(Path(tup[0])/filename).as_posix(),
description,
(Path(tup[0])/quatpath).as_posix(),
(Path(tup[0])/fkepath).as_posix(),
(Path(tup[0])/rifkepath).as_posix()])
else:
data.append([(Path(tup[0])/filename).as_posix(),
'',
(Path(tup[0])/quatpath).as_posix(),
(Path(tup[0])/fkepath).as_posix(),
(Path(tup[0])/rifkepath).as_posix()])
self.df = | pd.DataFrame(data=data, columns=['euler', 'descriptions', 'quaternion', 'fke', 'rifke']) | pandas.DataFrame |
import os
import pandas as pd
INDEXES = ["deaths", "confirmed", "hospitalized", "intensive care", "intubated", "released"]
def read_inputs():
text = input("Insert data to append as: [date: MM/DD/YY], [deaths], [confirmed], [hospitalized], [intensive care], "
"[intubated], [released]\n")
data_list = text.split(" ")
if len(data_list) != 7:
raise ValueError("Missing input values.")
new_data_series = pd.Series(index=INDEXES, data=data_list[1:])
new_data_series.rename(data_list[0], inplace=True)
new_data_series= new_data_series.astype('int')
return new_data_series
def append_data(new_data_series):
base_path = "../data"
filepath_overall = os.path.join(base_path, "time_series_19-covid-all.csv")
filepath_confirmed = os.path.join(base_path, "time_series_19-covid-Confirmed.csv")
filepath_deaths = os.path.join(base_path, "time_series_19-covid-Deaths.csv")
filepath_hospitalized = os.path.join(base_path, "time_series_19-covid-Hospitalized.csv")
filepath_icu = os.path.join(base_path, "time_series_19-covid-ICU.csv")
filepath_intubated = os.path.join(base_path, "time_series_19-covid-Intubated.csv")
filepath_released = os.path.join(base_path, "time_series_19-covid-Released.csv")
df_overall = | pd.read_csv(filepath_overall) | pandas.read_csv |
#!/usr/bin/env python
# coding: utf-8
"""
make_herbarium_2022_catalog_df.py
"""
#
# Description:
#
# Created On: Sunday Feb 27th, 2022
# Created By: <NAME>
# ### Key constants
# DATASETS_ROOT = "/media/data_cifs/projects/prj_fossils/data/processed_data/leavesdb-v1_1/images"
# EXTANT_ROOT = "/media/data_cifs/projects/prj_fossils/data/processed_data/leavesdb-v1_1/images/Extant_Leaves/original/full/jpg"
# GENERAL_ROOT = "/media/data_cifs/projects/prj_fossils/data/processed_data/leavesdb-v1_1/images/Fossil/General_Fossil/original/full/jpg"
# FLORISSANT_ROOT = "/media/data_cifs/projects/prj_fossils/data/processed_data/leavesdb-v1_1/images/Fossil/Florissant_Fossil/original/full/jpg"
# with open(os.path.join(HERBARIUM_ROOT, "train_metadata.json")) as fp:
# train_data = json.load(fp)
# with open(os.path.join(HERBARIUM_ROOT, "test_metadata.json")) as fp:
# test_data = json.load(fp)
# for k,v in train_data.items():
# print(k, f"| Total:{len(v)}")
# print("First:", v[0])
# print("Last:", v[-1])
# print("="*15+"\n")
# assert len(train_data["annotations"]) == len(train_data["images"])
import argparse
import os
import sys
from typing import *
import json
import pandas as pd
from pathlib import Path
import seaborn as sns
import matplotlib.pyplot as plt
from rich import print as pp
# HERBARIUM_ROOT_DEFAULT = "/media/data_cifs/projects/prj_fossils/data/raw_data/herbarium-2022-fgvc9_resize"
# from dotenv import load_dotenv
# load_dotenv()
import imutils
from imutils.big.split_catalog_utils import TRAIN_KEY, VAL_KEY, TEST_KEY
HERBARIUM_ROOT_DEFAULT = os.environ["HERBARIUM_ROOT_DEFAULT"]
CATALOG_DIR = os.environ["CATALOG_DIR"]
SPLITS_DIR = os.environ["SPLITS_DIR"]
def optimize_dtypes_train(df: pd.DataFrame) -> pd.DataFrame:
"""
Convert column dtypes to optimal type for herbarium train metadata df.
"""
# Reduce total df size by optimizing dtypes per column
cat_cols = ['genus_id', 'institution_id', 'category_id',
'scientificName', 'family', 'genus', 'species','Species',
'collectionCode', 'license', 'authors']
if "y" in df.columns:
cat_cols.append("y")
str_cols = ['image_id', 'file_name', 'path']
col_dtypes = {c:"category" for c in cat_cols if c in df.columns}
col_dtypes.update({c:"string" for c in str_cols})
# df = df.convert_dtypes()
df = df.astype(col_dtypes)
return df
def optimize_dtypes_test(df: pd.DataFrame) -> pd.DataFrame:
"""
Convert column dtypes to optimal type for herbarium test metadata df.
"""
dtypes_test = {'image_id':"string",
'file_name':"string",
'license':"category",
'path':"string"}
dtypes_test= {col:dtype for col, dtype in dtypes_test.items() if col in df.columns}
# Reduce total df size by optimizing dtypes per column
df = df.astype(dtypes_test)
return df
def read_train_df_from_csv(train_path,
nrows: Optional[int]=None,
index_col: int=0
) -> pd.DataFrame:
df = pd.read_csv(train_path, index_col=index_col, nrows=nrows)
df = optimize_dtypes_train(df)
return df
def read_test_df_from_csv(test_path,
nrows: Optional[int]=None,
index_col: int=0
) -> pd.DataFrame:
df = pd.read_csv(test_path, index_col=index_col, nrows=nrows)
df = optimize_dtypes_test(df)
return df
def read_all_from_csv(root_dir: str=None,
source_csv_paths: Optional[List[str]]=None,
subset_read_funcs: Union[Callable, Dict[str, Callable]]={
TRAIN_KEY: read_train_df_from_csv,
TEST_KEY: read_test_df_from_csv
},
return_dict: bool=False,
**kwargs) -> Tuple[pd.DataFrame]:
"""
Read the train_metadata.csv and test_metadata.csv files from `root_dir`
Note: This is prior to any train-val splits.
"""
if source_csv_paths is not None:
train_path, test_path = sorted(source_csv_paths)[::-1]
else:
train_path = Path(root_dir, "train_metadata.csv")
test_path = Path(root_dir, "test_metadata.csv")
if isinstance(subset_read_funcs, Callable):
train_df = subset_read_funcs(train_path)
test_df = subset_read_funcs(test_path)
else:
train_df = subset_read_funcs[TRAIN_KEY](train_path)
test_df = subset_read_funcs[TEST_KEY](test_path)
# train_df = read_train_df_from_csv(train_path)
# test_df = read_test_df_from_csv(test_path)
if return_dict:
return {
TRAIN_KEY: train_df,
TEST_KEY: test_df
}
return train_df, test_df
# read_train_df_from_csv,
# read_test_df_from_csv
###################################
###################################
class HerbariumMetadata:
TRAIN_KEYS = ['annotations', 'images', 'categories', 'genera', 'institutions', 'distances', 'license']
TEST_KEYS = ['image_id', 'file_name', 'license']
def __init__(self,
herbarium_root: str=HERBARIUM_ROOT_DEFAULT):
self.herbarium_root = herbarium_root
def get_train_df(self) -> pd.DataFrame:
metadata_path = Path(self.herbarium_root, "train_metadata.json")
with open(os.path.join(metadata_path)) as fp:
train_data = json.load(fp)
assert all([k in train_data.keys() for k in self.TRAIN_KEYS])
train_annotations = pd.DataFrame(train_data['annotations'])
train_categories = pd.DataFrame(train_data['categories']).set_index("category_id")
train_genera = pd.DataFrame(train_data['genera']).set_index("genus_id")
train_institutions = | pd.DataFrame(train_data['institutions']) | pandas.DataFrame |
#!/usr/bin/env python3
import glob
import math
import sqlite3
import sys
from itertools import product
import logzero
import pandas as pd
from logzero import logger
from statsmodels.tsa.arima.model import ARIMA
from statsmodels.tsa.seasonal import seasonal_decompose
from statsmodels.tsa.statespace.sarimax import SARIMAX
from statsmodels.tsa.statespace.varmax import VARMAX
sys.path.append("../../sql")
import queries
pd.set_option("plotting.backend", "plotly")
def get_db_connection(db_path, db_filename):
conn = sqlite3.connect(db_path + db_filename)
logger.info(f"Connection made: {conn}")
return conn
def get_db_zipcodes(conn):
cursor = conn.cursor()
cursor.execute(queries.select_distinct_zips)
zipcodes = cursor.fetchall()
zipcodes = [z[0] for z in zipcodes]
logger.info(f"Distinct zip codes: {zipcodes}")
return zipcodes
def get_column_names(conn, table_name):
cursor = conn.cursor()
cursor.execute(queries.select_column_names, {"table_name": table_name})
names = cursor.fetchall()
names = [name[0] for name in names]
logger.info(f"Column Names: {names}")
return names
def get_locale_data(conn, zipcode):
cursor = conn.cursor()
cursor.execute(queries.select_locale_data, {"zipcode": zipcode})
query_data = cursor.fetchone()
locale_data = [qd for qd in query_data]
logger.info(f"Locale data: {locale_data}")
return locale_data
def get_db_files(db_path="./"):
db_files = [
file.split("/")[-1]
for file in glob.glob(db_path + "*.db")
if file.split("/")[-1] != "geo_zipcodes.db"
]
return tuple(sorted(db_files))
def get_irr_data(conn, zipcode):
params = {"zipcode": zipcode}
df = pd.read_sql(
queries.select_nsr_rows,
conn,
params=params,
index_col="date_time",
parse_dates=["date_time"],
)
df.sort_index(axis=0, inplace=True)
return df
def get_plots_layout(num_columns=1, num_items=1):
# row, column dimension calculation
return {"rows": (math.ceil(num_items / num_columns)), "columns": num_columns}
def get_data_decomps(df, period=12):
decomps = {}
cols = df.columns.tolist()
for col in cols:
decomps.update({col: seasonal_decompose(df[col], model="additive", period=period)})
return decomps
def get_train_test(df, test_len_yrs=1):
# columns = df.columns.tolist()
# forecast_on_idx = 1
# sarima_series = df[columns[forecast_on_idx]]
months = 12
total_len = len(df)
test_len = months * test_len_yrs
train_len = total_len - test_len
train = df.iloc[:train_len] # [columns[forecast_on_idx]]
test = df.iloc[train_len:] # [columns[forecast_on_idx]]
return train, val
def get_forecast():
forecast = ts_tools.sarima_model(
sarima_train,
*best_order,
s=12,
num_fc=119,
forecast=True,
)
return forecast
#-----------------------------------------------------------------------------#
def gen_arima_params(p_rng=(0, 0), d_rng=(0, 0), q_rng=(0, 0), debug=False):
"""
input: 3 2-tuples of inclusive value ranges
Boolean for debug printing
functionality: produce a cartesian product of the inputs
output: list of 3-tuple products
"""
p = range(p_rng[0], p_rng[1] + 1)
d = range(d_rng[0], d_rng[1] + 1)
q = range(q_rng[0], q_rng[1] + 1)
# Create a list with all possible combination of parameters
parameters = product(p, d, q)
parameters_list = list(parameters)
order_list = []
for params in parameters_list:
params = list(params)
params = tuple(params)
order_list.append(params)
if debug:
print(f"ARIMA Order list length: {len(order_list)}")
print(f"ARIMA Order list\n {order_list[:3]}")
return order_list
def ARIMA_optimizer(series, arima_order, debug=False):
"""
input: Pandas data series with pd.datetime index
list of 3-tuples representing ARIMA orders (p, d, q)
Boolean for debug printing
functionality: model each (p, d, q) sequence, store AIC
return: pandas dataframe listing all (p, d, q) AIC pairs,
ordered best to worst
"""
results = []
for order in arima_order:
try:
model = ARIMA(
series,
order=order,
).fit()
except:
if debug:
print("exception occured")
continue
aic = model.aic
results.append([order, model.aic])
result_df = pd.DataFrame(results)
result_df.columns = ["(p, d, q)", "AIC"]
result_df = result_df.sort_values(by="AIC", ascending=True).reset_index(drop=True)
return result_df
def arima_model(series, p=0, d=0, q=0, num_fc=1, summary=False, forecast=False):
"""
input: Pandas Series with pd.datetime index.
Integers: ARIMA parameters p, d, q
Integer: number of forecast periods
Boolean: summary print
Bookean: True return forecast, False return model
functionality: perform an ARIMA forecast of the Series time-series
return: forecast data or model
"""
model = ARIMA(
series,
order=(p, d, p),
enforce_stationarity=True,
# trend="n",
).fit()
if summary:
print(model.summary())
if forecast:
start = len(series)
end = start + num_fc
forecast = model.predict(start=start, end=end)
return forecast
return model
def gen_sarima_params(
p_rng=(0, 0),
d_rng=(0, 0),
q_rng=(0, 0),
P_rng=(0, 0),
D_rng=(0, 0),
Q_rng=(0, 0),
debug=False,
):
"""
input: 3 3-tuples of inclusive value ranges
Boolean for debug printing
functionality: produce a cartesian product of the inputs
output: list of 3-tuple products
"""
p = range(p_rng[0], p_rng[1] + 1)
d = range(d_rng[0], d_rng[1] + 1)
q = range(q_rng[0], q_rng[1] + 1)
P = range(P_rng[0], P_rng[1] + 1)
D = range(D_rng[0], D_rng[1] + 1)
Q = range(Q_rng[0], Q_rng[1] + 1)
# Create a list with all possible combination of parameters
parameters = product(p, d, q, P, D, Q)
parameters_list = list(parameters)
order_list = []
for params in parameters_list:
params = list(params)
params = tuple(params)
order_list.append(params)
if debug:
print(f"SARIMA Order list length: {len(order_list)}")
print(f"SARIMA Order list\n {order_list[:3]}")
return order_list
def SARIMA_optimizer(series, sarima_order, s=0, debug=False):
"""
input: Pandas data series with pd.datetime index
list of 6-tuples representing ARIMA orders (p, d, q, P, D, Q)
Boolean for debug printing
functionality: model each (p, d, q) sequence, store AIC
return: pandas dataframe listing all (p, d, q, P, D, Q) AIC pairs,
ordered best to worst
"""
results = []
for order in sarima_order:
if debug:
print(order)
try:
model = ARIMA(
series,
order=(order[0], order[1], order[2]),
seasonal_order=(order[3], order[4], order[5], s),
enforce_stationarity=True,
# trend="c",
).fit()
except:
if debug:
print("exception occured")
continue
aic = model.aic
results.append([order, model.aic])
result_df = | pd.DataFrame(results) | pandas.DataFrame |
# https://www.kaggle.com/shivank856/gtsrb-cnn-98-test-accuracy
import PIL
import numpy as np
import pandas as pd
import os
import cv2
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
from PIL import Image
from sklearn.model_selection import train_test_split
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.optimizers import Adam
from sklearn.metrics import accuracy_score
np.random.seed(42)
from matplotlib import style
style.use('fivethirtyeight')
data_dir = '../Kagle_trye/archive'
train_path = '../Kagle_trye/archive/Train'
test_path = '../Kagle_trye/archive'
# Resizing the images to 30x30x3
IMG_HEIGHT = 30
IMG_WIDTH = 30
channels = 3
NUM_CATEGORIES = len(os.listdir(train_path))
print(NUM_CATEGORIES)
classes = { 0:'Speed limit (20km/h)',
1:'Speed limit (30km/h)',
2:'Speed limit (50km/h)',
3:'Speed limit (60km/h)',
4:'Speed limit (70km/h)',
5:'Speed limit (80km/h)',
6:'End of speed limit (80km/h)',
7:'Speed limit (100km/h)',
8:'Speed limit (120km/h)',
9:'No passing',
10:'No passing veh over 3.5 tons',
11:'Right-of-way at intersection',
12:'Priority road',
13:'Yield',
14:'Stop',
15:'No vehicles',
16:'Veh > 3.5 tons prohibited',
17:'No entry',
18:'General caution',
19:'Dangerous curve left',
20:'Dangerous curve right',
21:'Double curve',
22:'Bumpy road',
23:'Slippery road',
24:'Road narrows on the right',
25:'Road work',
26:'Traffic signals',
27:'Pedestrians',
28:'Children crossing',
29:'Bicycles crossing',
30:'Beware of ice/snow',
31:'Wild animals crossing',
32:'End speed + passing limits',
33:'Turn right ahead',
34:'Turn left ahead',
35:'Ahead only',
36:'Go straight or right',
37:'Go straight or left',
38:'Keep right',
39:'Keep left',
40:'Roundabout mandatory',
41:'End of no passing',
42:'End no passing veh > 3.5 tons' }
folders = os.listdir(train_path)
train_number = []
class_num = []
for folder in folders:
train_files = os.listdir(train_path + '/' + folder)
train_number.append(len(train_files))
class_num.append(classes[int(folder)])
# Sorting the dataset on the basis of number of images in each class
zipped_lists = zip(train_number, class_num)
sorted_pairs = sorted(zipped_lists)
tuples = zip(*sorted_pairs)
train_number, class_num = [list(tuple) for tuple in tuples]
# Plotting the number of images in each class
plt.figure(figsize=(21, 10))
plt.bar(class_num, train_number)
plt.xticks(class_num, rotation='vertical')
plt.show()
import random
from matplotlib.image import imread
test = pd.read_csv(data_dir + '/Test.csv')
imgs = test["Path"].values
plt.figure(figsize=(25,25))
for i in range(1,26):
plt.subplot(5,5,i)
random_img_path = data_dir + '/' + random.choice(imgs)
rand_img = imread(random_img_path)
plt.imshow(rand_img)
plt.grid(b=None)
plt.xlabel(rand_img.shape[1], fontsize = 20)#width of image
plt.ylabel(rand_img.shape[0], fontsize = 20)#height of image
plt.show()
image_data = []
image_labels = []
for i in range(NUM_CATEGORIES):
path = data_dir + '/Train/' + str(i)
images = os.listdir(path)
for img in images:
try:
# image = cv2.imread(path + '/' + img)
# image_fromarray = Image.fromarray(image, 'RGB')
# resize_image = image_fromarray.resize((IMG_HEIGHT, IMG_WIDTH))
img = PIL.Image.open(path + '/' + img)
resize_image = img.resize((IMG_HEIGHT, IMG_WIDTH))
image_data.append(np.array(resize_image))
image_labels.append(i)
except:
print("Error in " + img)
# Changing the list to numpy array
image_data = np.array(image_data)
image_labels = np.array(image_labels)
shuffle_indexes = np.arange(image_data.shape[0])
np.random.shuffle(shuffle_indexes)
image_data = image_data[shuffle_indexes]
image_labels = image_labels[shuffle_indexes]
X_train, X_val, y_train, y_val = train_test_split(image_data, image_labels, test_size=0.3, random_state=42, shuffle=True)
X_train = X_train/255
X_val = X_val/255
print("X_train.shape", X_train.shape)
print("X_valid.shape", X_val.shape)
print("y_train.shape", y_train.shape)
print("y_valid.shape", y_val.shape)
print(image_data.shape, image_labels.shape)
y_train = keras.utils.to_categorical(y_train, NUM_CATEGORIES)
y_val = keras.utils.to_categorical(y_val, NUM_CATEGORIES)
print(y_train.shape)
print(y_val.shape)
model = keras.models.Sequential([
keras.layers.Conv2D(filters=16, kernel_size=(3, 3), activation='relu',
input_shape=(IMG_HEIGHT, IMG_WIDTH, channels)),
keras.layers.Conv2D(filters=32, kernel_size=(3, 3), activation='relu'),
keras.layers.MaxPool2D(pool_size=(2, 2)),
keras.layers.BatchNormalization(axis=-1),
keras.layers.Conv2D(filters=64, kernel_size=(3, 3), activation='relu'),
keras.layers.Conv2D(filters=128, kernel_size=(3, 3), activation='relu'),
keras.layers.MaxPool2D(pool_size=(2, 2)),
keras.layers.BatchNormalization(axis=-1),
keras.layers.Flatten(),
keras.layers.Dense(512, activation='relu'),
keras.layers.BatchNormalization(),
keras.layers.Dropout(rate=0.5),
keras.layers.Dense(43, activation='softmax')
])
aug = ImageDataGenerator(
rotation_range=10,
zoom_range=0.15,
width_shift_range=0.1,
height_shift_range=0.1,
shear_range=0.15,
horizontal_flip=False,
vertical_flip=False,
fill_mode="nearest")
history = model.fit(X_train, y_train, batch_size=32, epochs=epochs, validation_data=(X_val, y_val))
model.save("KagleModel300EPIL.h5")
pd.DataFrame(history.history).plot(figsize=(8, 5))
plt.grid(True)
plt.gca().set_ylim(0, 1)
plt.show()
test = | pd.read_csv(data_dir + '/Test.csv') | pandas.read_csv |
import pandas as pd
from cellphonedb.src.core.methods import cpdb_statistical_analysis_helper
from cellphonedb.src.core.core_logger import core_logger
from cellphonedb.src.core.models.interaction import interaction_filter
def call(meta: pd.DataFrame,
counts: pd.DataFrame,
interactions: pd.DataFrame,
iterations: int = 1000,
threshold: float = 0.1,
threads: int = 4,
debug_seed: int = -1,
result_precision: int = 3
) -> (pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame):
core_logger.info(
'[Cluster Statistical Analysis Simple] '
'Threshold:{} Iterations:{} Debug-seed:{} Threads:{} Precision:{}'.format(threshold,
iterations,
debug_seed,
threads,
result_precision))
if debug_seed >= 0:
pd.np.random.seed(debug_seed)
core_logger.warning('Debug random seed enabled. Setted to {}'.format(debug_seed))
interactions_filtered, counts_filtered = prefilters(counts, interactions)
if interactions_filtered.empty or counts_filtered.empty:
return pd.DataFrame(), pd.DataFrame(), pd.DataFrame(), pd.DataFrame(), | pd.DataFrame() | pandas.DataFrame |
# Mar21, 2022
##
#---------------------------------------------------------------------
# SERVER only input all files (.bam and .fa) output MeH matrix in .csv
# August 3, 2021 clean
# FINAL github
#---------------------------------------------------------------------
import random
import math
import pysam
import csv
import sys
import pandas as pd
import numpy as np
import datetime
import time as t
from collections import Counter, defaultdict, OrderedDict
#---------------------------------------
# Functions definition
#---------------------------------------
def open_log(fname):
open_log.logfile = open(fname, 'w', 1)
def logm(message):
log_message = "[%s] %s\n" % (datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), message)
print(log_message),
open_log.logfile.write(log_message)
def close_log():
open_log.logfile.close()
# Count # of windows with enough reads for complete/impute
def coverage(methbin,complete,w):
count=0
tot = 0
meth=methbin.iloc[:,methbin.columns!='Qname']
if len(meth.columns)>=w:
for i in range(len(meth.columns)-w+1):
# extract a window
temp = meth.iloc[:,i:i+w].copy()
#print(temp)
tot = tot+1
if (enough_reads(window=temp,complete=complete,w=w)):
count=count+1
#toprint=temp.notnull().sum(axis=1)>=w
#print(toprint.sum())
#print(count)
#print(tot)
return count/tot*100
else:
return 0
# Check whether a window has enough reads for complete/impute
def enough_reads(window,w,complete):
temp=np.isnan(window).sum(axis=1)==0
if complete: # For heterogeneity estimation
return temp.sum()>=2**w
else: # for imputation
tempw1=np.isnan(window).sum(axis=1)==1
return temp.sum()>=2**(w-2) and tempw1.sum()>0
def impute(window,w):
full_ind=np.where(np.isnan(window).sum(axis=1)==0)[0]
part_ind=np.where(np.isnan(window).sum(axis=1)==1)[0]
for i in range(len(part_ind)):
sam = []
# which column is nan
pos=np.where(np.isnan(window[part_ind[i],:]))[0]
if np.unique(window[np.where(np.invert(np.isnan(window[:,pos])))[0],pos]).shape[0]==1:
window[part_ind[i],pos]=window[np.where(np.invert(np.isnan(window[:,pos])))[0],pos][0]
else:
#print("win_part i pos =",window[part_ind[i],pos])
for j in range(len(full_ind)):
if (window[part_ind[i],:]==window[full_ind[j],:]).sum()==w-1:
sam.append(j)
if len(sam)>0:
s1=random.sample(sam, 1)
s=window[full_ind[s1],pos]
else:
s=random.sample(window[np.where(np.invert(np.isnan(window[:,pos])))[0],pos].tolist(), k=1)[0]
window[part_ind[i],pos]=np.float64(s)
#print("win_part i =",window[part_ind[i],pos])
#print("s = ",np.float64(s))
return window
def getcomplete(window,w):
temp=np.isnan(window).sum(axis=1)==0
mat=window[np.where(temp)[0],:]
#temp=window.notnull().sum(axis=1)>=w
#mat=window.iloc[np.where(temp)[0],:]
#else:
# temp=mat.notnull().sum(axis=1)>=w-1
return mat
def PattoDis(mat,dist=1):
s=mat.shape[0]
dis=np.zeros((s,s))
for i in range(s):
for j in range(s):
if j<i:
if dist==1:
d=Ham_d(mat.iloc[i,],mat.iloc[j,])
else:
d=WDK_d(mat.iloc[i,],mat.iloc[j,])
dis[i,j]=dis[j,i]=d
return dis
def Ham_d(pat1,pat2):
return (pat1!=pat2).sum()
def WDK_d(pat1,pat2):
d=0
w=pat1.shape[0]
for i in range(w): # k-1
for j in range(w-i): # starting pos
s=(w-i-1)*(1-np.all(pat1[j:j+i+1]==pat2[j:j+i+1]))
d+=s
return d
# input a window of w CGs and output a list of proportions with starting genomic location and genomic distance across
def window_summ(pat,start,dis,chrom):
m=np.shape(pat)[0]
d=np.shape(pat)[1]
all_pos=np.zeros((2**d,d))
for i in range(d):
all_pos[:,i]=np.linspace(0,2**d-1,2**d)%(2**(i+1))//(2**i)
#print(all_pos)
prob=np.zeros((2**d,1))
#print(prob)
for i in range(2**d):
count = 0
for j in range(m):
if (all_pos[i,:]==pat.iloc[j,:]).sum()==d:
count += 1
#print(count)
prob[i]=count
if d==3:
out=pd.DataFrame({'chrom':chrom,'pos':start,'p01':prob[0],'p02':prob[1],'p03':prob[2],'p04':prob[3],\
'p05':prob[4],'p06':prob[5],'p07':prob[6],'p08':prob[7],'dis':dis})
if d==4:
out=pd.DataFrame({'chrom':chrom,'pos':start,'p01':prob[0],'p02':prob[1],'p03':prob[2],'p04':prob[3],\
'p05':prob[4],'p06':prob[5],'p07':prob[6],'p08':prob[7],'p09':prob[8],'p10':prob[9],\
'p11':prob[10],'p12':prob[11],'p13':prob[12],'p14':prob[13],'p15':prob[14],\
'p16':prob[15],'dis':dis})
if d==5:
out=pd.DataFrame({'chrom':chrom,'pos':start,'p01':prob[0],'p02':prob[1],'p03':prob[2],'p04':prob[3],\
'p05':prob[4],'p06':prob[5],'p07':prob[6],'p08':prob[7],'p09':prob[8],'p10':prob[9],\
'p11':prob[10],'p12':prob[11],'p13':prob[12],'p14':prob[13],'p15':prob[14],\
'p16':prob[15],'p17':prob[16],'p18':prob[17],'p19':prob[18],'p20':prob[19],\
'p21':prob[20],'p22':prob[21],'p23':prob[22],'p24':prob[23],'p25':prob[24],\
'p26':prob[25],'p27':prob[26],'p28':prob[27],'p29':prob[28],'p30':prob[29],\
'p31':prob[30],'p32':prob[31],'dis':dis})
if d==6:
out=pd.DataFrame({'chrom':chrom,'pos':start,'p01':prob[0],'p02':prob[1],'p03':prob[2],'p04':prob[3],\
'p05':prob[4],'p06':prob[5],'p07':prob[6],'p08':prob[7],'p09':prob[8],'p10':prob[9],\
'p11':prob[10],'p12':prob[11],'p13':prob[12],'p14':prob[13],'p15':prob[14],\
'p16':prob[15],'p17':prob[16],'p18':prob[17],'p19':prob[18],'p20':prob[19],\
'p21':prob[20],'p22':prob[21],'p23':prob[22],'p24':prob[23],'p25':prob[24],\
'p26':prob[25],'p27':prob[26],'p28':prob[27],'p29':prob[28],'p30':prob[29],\
'p31':prob[30],'p32':prob[31],'p33':prob[32],'p34':prob[33],'p35':prob[34],\
'p36':prob[35],'p37':prob[36],'p38':prob[37],'p39':prob[38],'p40':prob[39],\
'p41':prob[40],'p42':prob[41],'p43':prob[42],'p44':prob[43],'p45':prob[44],\
'p46':prob[45],'p47':prob[46],'p48':prob[47],'p49':prob[48],'p50':prob[49],\
'p51':prob[50],'p52':prob[51],'p53':prob[52],'p54':prob[53],'p55':prob[54],\
'p56':prob[55],'p57':prob[56],'p58':prob[57],'p59':prob[58],'p60':prob[59],\
'p61':prob[60],'p62':prob[61],'p63':prob[62],'p64':prob[63],'dis':dis})
return out
def MeHperwindow(pat,start,dis,chrom,D,w,optional,MeH=2,dist=1,strand='f'):
count=np.zeros((2**w,1))
m=np.shape(pat)[0]
pat=np.array(pat)
if w==2:
pat = Counter([str(i[0])+str(i[1]) for i in pat.astype(int).tolist()])
count=np.array([float(pat[i]) for i in ['00','10','01','11']])
if w==3:
pat = Counter([str(i[0])+str(i[1])+str(i[2]) for i in pat.astype(int).tolist()])
count=np.array([float(pat[i]) for i in ['000','100','010','110','001','101','011','111']])
if w==4:
pat = Counter([str(i[0])+str(i[1])+str(i[2])+str(i[3]) for i in pat.astype(int).tolist()])
count=np.array([float(pat[i]) for i in ['0000','1000','0100','1100','0010','1010','0110','1110','0001',\
'1001','0101','1101','0011','1011','0111','1111']])
if w==5:
pat = Counter([str(i[0])+str(i[1])+str(i[2])+str(i[3])+str(i[4]) for i in pat.astype(int).tolist()])
count=np.array([float(pat[i]) for i in ['00000','10000','01000','11000','00100','10100','01100','11100','00010',\
'10010','01010','11010','00110','10110','01110','11110','00001','10001','01001','11001','00101',\
'10101','01101','11101','00011','10011','01011','11011','00111','10111','01111','11111']])
if w==6:
pat = Counter([str(i[0])+str(i[1])+str(i[2])+str(i[3])+str(i[4])+str(i[5]) for i in pat.astype(int).tolist()])
count=np.array([float(pat[i]) for i in ['000000','100000','010000','110000','001000','101000','011000','111000','000100',\
'100100','010100','110100','001100','101100','011100','111100','000010','100010','010010','110010','001010',\
'101010','011010','111010','000110', '100110','010110','110110','001110','101110','011110','111110',\
'000001','100001','010001','110001','001001','101001','011001','111001','000101',\
'100101','010101','110101','001101','101101','011101','111101','000011','100011','010011','110011','001011',\
'101011','011011','111011','000111', '100111','010111','110111','001111','101111','011111','111111']])
if MeH==1: # Abundance based
score=(((count/m)**2).sum(axis=0))**(-1)
elif MeH==2: # PWS based
interaction=np.multiply.outer(count/m,count/m).reshape((2**w,2**w))
Q=sum(sum(D*interaction))
#print("Q =",Q)
if Q==0:
score=0
else:
score=(sum(sum(D*(interaction**2)))/(Q**2))**(-0.5)
elif MeH==3: #Phylogeny based
count=count.reshape(2**w)
count=np.concatenate((count[[0]],count))
if dist==1 and w==4:
phylotree=np.append(np.append(np.append(np.append([0],np.repeat(0.5,16)),np.repeat(0.25,6)),[0.5]),np.repeat(0.25,6))
#phylotree=np.repeat(0,1).append(np.repeat(0.5,16)).append(np.repeat(0.25,6)).append(0.5).append(np.repeat(0.25,6))
countn=np.zeros(30)
#count<-rep(0,29)
countn[1:17]=count[[1,9,5,3,2,13,11,10,7,6,4,15,14,12,8,16]]
countn[17]=countn[4]+countn[7]
countn[18]=countn[9]+countn[12]
countn[19]=countn[1]+countn[2]
countn[20]=countn[3]+countn[6]
countn[21]=countn[17]+countn[18]
countn[22]=countn[19]+countn[20]
countn[23]=countn[21]+countn[22]
countn[24]=countn[5]+countn[8]
countn[25]=countn[10]+countn[13]
countn[26]=countn[24]+countn[25]
countn[27]=countn[23]+countn[26]
countn[28]=countn[11]+countn[14]
countn[29]=countn[27]+countn[28]
#Q=sum(sum(phylotree*count))
if dist==2 and w==4:
phylotree=np.append(np.append(np.append(np.append([0],np.repeat(3,16)),np.repeat(1.5,6)),[3.2,0.8]),np.repeat(2,3),np.repeat(1.5,2))
#phylotree=c(rep(3,16),rep(1.5,6),3.2,0.8,rep(2,3),1.5,1.5)
countn=np.zeros(30)
#print(count)
countn[1:17]=count[[1,9,5,3,2,13,11,10,7,6,4,15,14,12,8,16]]
countn[17]=countn[1]+countn[2]
countn[18]=countn[5]+countn[8]
countn[19]=countn[3]+countn[6]
countn[20]=countn[10]+countn[13]
countn[21]=countn[4]+countn[7]
countn[22]=countn[11]+countn[14]
countn[23]=countn[17]+countn[18]
countn[24]=countn[21]+countn[22]
countn[25]=countn[19]+countn[20]
countn[26]=countn[23]+countn[24]
countn[27]=countn[25]+countn[26]
countn[28]=countn[9]+countn[12]
countn[29]=countn[27]+countn[28]
#Q=sum(phylotree*count)
if dist==2 and w==3:
phylotree=np.append(np.append(np.append([0],np.repeat(1.5,8)),np.repeat(0.75,3)),np.repeat(1.5,0.75))
#phylotree=np.array(0).append(np.repeat(1.5,8)).append(np.repeat(0.75,3)).append(1.5,0.75)
#phylotree=c(rep(1.5,8),rep(0.75,3),1.5,0.75)
countn=np.zeros(14)
countn[1:9]=count[1:9]
countn[9]=countn[1]+countn[2]
countn[10]=countn[5]+countn[6]
countn[11]=countn[3]+countn[4]
countn[12]=countn[9]+countn[10]
countn[13]=countn[11]+countn[12]
#Q=sum(phylotree*count)
if dist==1 and w==3:
phylotree=np.append(np.append(np.append([0],np.repeat(0.5,8)),np.repeat(0.25,3)),[0.5,0.25])
#phylotree=np.array(0).append(np.repeat(0.5,8)).append(np.repeat(0.25,3)).append(0.5,0.25)
countn=np.zeros(14)
countn[1:9]=count[1:9]
countn[9]=countn[1]+countn[2]
countn[10]=countn[5]+countn[6]
countn[11]=countn[3]+countn[4]
countn[12]=countn[9]+countn[10]
countn[13]=countn[11]+countn[12]
#print("count = ",count)
#print("phylotree = ",phylotree)
Q=sum(phylotree*countn)
score=sum(phylotree*((countn/Q)**2))**(-1)
elif MeH==4: #Entropy
score=0
for i in count:
if i>0:
score-=(i/m)*np.log2(i/m)/w
elif MeH==5: #Epipoly
score=1-((count/m)**2).sum(axis=0)
if optional:
if MeH!=3:
count=count.reshape(2**w)
count=np.concatenate((count[[0]],count))
if w==3:
opt=pd.DataFrame({'chrom':chrom,'pos':start,'p01':count[1],'p02':count[2],'p03':count[3],'p04':count[4],\
'p05':count[5],'p06':count[6],'p07':count[7],'p08':count[8],'MeH':round(score,5),'dis':dis,'strand':strand}, index=[0])
if w==4:
opt=pd.DataFrame({'chrom':chrom,'pos':start,'p01':count[1],'p02':count[2],'p03':count[3],'p04':count[4],\
'p05':count[5],'p06':count[6],'p07':count[7],'p08':count[8],'p09':count[9],'p10':count[10],\
'p11':count[11],'p12':count[12],'p13':count[13],'p14':count[14],'p15':count[15],\
'p16':count[16],'MeH':round(score,5),'dis':dis,'strand':strand}, index=[0])
if w==5:
opt=pd.DataFrame({'chrom':chrom,'pos':start,'p01':count[1],'p02':count[2],'p03':count[3],'p04':count[4],\
'p05':count[5],'p06':count[6],'p07':count[7],'p08':count[8],'p09':count[9],'p10':count[10],\
'p11':count[11],'p12':count[12],'p13':count[13],'p14':count[14],'p15':count[15],\
'p16':count[16],'p17':count[17],'p18':count[18],'p19':count[19],'p20':count[20],\
'p21':count[21],'p22':count[22],'p23':count[23],'p24':count[24],'p25':count[25],\
'p26':count[26],'p27':count[27],'p28':count[28],'p29':count[29],'p30':count[30],\
'p31':count[31],'p32':count[32],'MeH':round(score,5),'dis':dis,'strand':strand}, index=[0])
if w==6:
opt=pd.DataFrame({'chrom':chrom,'pos':start,'p01':count[1],'p02':count[2],'p03':count[3],'p04':count[4],\
'p05':count[5],'p06':count[6],'p07':count[7],'p08':count[8],'p09':count[9],'p10':count[10],\
'p11':count[11],'p12':count[12],'p13':count[13],'p14':count[14],'p15':count[15],\
'p16':count[16],'p17':count[17],'p18':count[18],'p19':count[19],'p20':count[20],\
'p21':count[21],'p22':count[22],'p23':count[23],'p24':count[24],'p25':count[25],\
'p26':count[26],'p27':count[27],'p28':count[28],'p29':count[29],'p30':count[30],\
'p31':count[31],'p32':count[32],'p33':count[33],'p34':count[34],'p35':count[35],\
'p36':count[36],'p37':count[37],'p38':count[38],'p39':count[39],'p40':count[40],\
'p41':count[41],'p42':count[42],'p43':count[43],'p44':count[44],'p45':count[45],\
'p46':count[46],'p47':count[47],'p48':count[48],'p49':count[49],'p50':count[50],\
'p51':count[51],'p52':count[52],'p53':count[53],'p54':count[54],'p55':count[55],\
'p56':count[56],'p57':count[57],'p58':count[58],'p59':count[59],'p60':count[60],\
'p61':count[61],'p62':count[62],'p63':count[63],'p64':count[64],'MeH':round(score,5),'dis':dis,'strand':strand}, index=[0])
return out, opt
else:
out=pd.DataFrame({'chrom':chrom,'pos':start,'MeH':round(score,5),'dis':dis,'strand':strand}, index=[0])
return out
def impute(window,w):
full_ind=np.where(np.isnan(window).sum(axis=1)==0)[0]
part_ind=np.where(np.isnan(window).sum(axis=1)==1)[0]
for i in range(len(part_ind)):
sam = []
# which column is nan
pos=np.where(np.isnan(window[part_ind[i],:]))[0]
if np.unique(window[np.where(np.invert(np.isnan(window[:,pos])))[0],pos]).shape[0]==1:
window[part_ind[i],pos]=window[np.where(np.invert(np.isnan(window[:,pos])))[0],pos][0]
else:
#print("win_part i pos =",window[part_ind[i],pos])
for j in range(len(full_ind)):
if (window[part_ind[i],:]==window[full_ind[j],:]).sum()==w-1:
sam.append(j)
if len(sam)>0:
s1=random.sample(sam, 1)
s=window[full_ind[s1],pos]
else:
s=random.sample(window[np.where(np.invert(np.isnan(window[:,pos])))[0],pos].tolist(), k=1)[0]
window[part_ind[i],pos]=np.float64(s)
return window
def CGgenome_scr(bamfile,w,fa,optional,melv,silence=False,dist=1,MeH=2,imp=True):
filename, file_extension = os.path.splitext(bamfile)
sample = str.split(filename,'_')[0]
coverage = cov_context = 0
# load bamfile
samfile = pysam.AlignmentFile("MeHdata/%s.bam" % (filename), "rb")
# load reference genome
fastafile = pysam.FastaFile('MeHdata/%s.fa' % fa)
# initialise data frame for genome screening (load C from bam file)
aggreR = aggreC = pd.DataFrame(columns=['Qname'])
# initialise data frame for output
ResultPW = pd.DataFrame(columns=['chrom','pos','MeH','dis','strand'])
if melv:
ResML = pd.DataFrame(columns=['chrom','pos','ML','strand','depth'])
# if user wants to output compositions of methylation patterns at every eligible window, initialise data frame
if optional:
if w==3:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08',\
'MeH','dis','strand'])
if w==4:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08','p09','p10','p11',\
'p12','p13','p14','p15','p16','MeH','dis','strand'])
if w==5:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16'\
,'p17','p18','p19','p20','p21','p22','p23','p24','p25','p26','p27','p28',\
'p29','p30','p31','p32','MeH','dis','strand'])
if w==5:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16'\
,'p17','p18','p19','p20','p21','p22','p23','p24','p25','p26','p27','p28',\
'p29','p30','p31','p32','p33','p34','p35','p36','p37','p38','p39','p40','p41','p42','p43','p44','p45','p46'\
,'p47','p48','p49','p50','p51','p52','p53','p54','p55','p56','p57','p58','p59','p60','p61','p62','p63','p64'\
,'MeH','dis','strand'])
neverr = never = True
# all methylation patterns for Methylation heterogeneity evaluation
all_pos=np.zeros((2**w,w))
for i in range(w):
all_pos[:,i]=np.linspace(0,2**w-1,2**w)%(2**(i+1))//(2**i)
# distance matrix, also for Methylation heterogeneity evaluation
D=PattoDis(pd.DataFrame(all_pos),dist=dist) # 1:Hamming distance, 2: WDK
start=datetime.datetime.now()
# vector for saving methylation statuses before imputation
MU=np.zeros((2,w))
# screen bamfile by column
for pileupcolumn in samfile.pileup():
coverage += 1
chrom = pileupcolumn.reference_name
if not silence:
if (pileupcolumn.pos % 2000000 == 1):
print("CG %s s %s w %s %s pos %s Result %s" % (datetime.datetime.now(),filename,w,chrom,pileupcolumn.pos,ResultPW.shape[0]))
# Forward strand, check if 'CG' in reference genome
if (fastafile.fetch(chrom,pileupcolumn.pos,pileupcolumn.pos+2)=='CG'):
cov_context += 1
temp = pd.DataFrame(columns=['Qname',pileupcolumn.pos+1])
pileupcolumn.set_min_base_quality(0)
# append reads in the column
for pileupread in pileupcolumn.pileups:
if not pileupread.is_del and not pileupread.is_refskip and not pileupread.alignment.is_reverse: # C
d = {'Qname': [pileupread.alignment.query_name], pileupcolumn.pos+1: [pileupread.alignment.query_sequence[pileupread.query_position]]}
df2 = pd.DataFrame(data=d)
temp=temp.append(df2, ignore_index=True)
if melv:
temp2 = temp.replace(['C'],1)
temp2 = temp2.replace(['G'],0)
temp2 = temp2.replace(['A','T','N'],np.nan)
temp2 = temp2.drop('Qname',axis=1)
MC=(temp2==1).sum(axis=0).to_numpy()
UC=(temp2==0).sum(axis=0).to_numpy()
depth=MC+UC
if depth>3:
toappend=pd.DataFrame({'chrom':chrom,'pos':temp2.columns[0], \
'strand':'f','depth':depth,'ML':float(MC)/depth}, index=[0])
ResML=ResML.append(toappend)
# merge with other columns
if (not temp.empty):
aggreC = pd.merge(aggreC,temp,how='outer',on=['Qname'])
aggreC = aggreC.drop_duplicates()
# Reverse strand, check if 'CG' in reference genome
if pileupcolumn.pos>1:
if (fastafile.fetch(chrom,pileupcolumn.pos-1,pileupcolumn.pos+1)=='CG'):
cov_context += 1
tempr = pd.DataFrame(columns=['Qname',pileupcolumn.pos+1])
pileupcolumn.set_min_base_quality(0)
for pileupread in pileupcolumn.pileups:
if not pileupread.is_del and not pileupread.is_refskip and pileupread.alignment.is_reverse: # C
dr = {'Qname': [pileupread.alignment.query_name], pileupcolumn.pos+1: [pileupread.alignment.query_sequence[pileupread.query_position]]}
dfr2 = pd.DataFrame(data=dr)
tempr=tempr.append(dfr2, ignore_index=True)
if melv:
temp2 = tempr.replace(['G'],1)
temp2 = temp2.replace(['C'],0)
temp2 = temp2.replace(['A','T','N'],np.nan)
temp2 = temp2.drop('Qname',axis=1)
MC=(temp2==1).sum(axis=0).to_numpy()
UC=(temp2==0).sum(axis=0).to_numpy()
depth=MC+UC
if depth>3:
toappend=pd.DataFrame({'chrom':chrom,'pos':temp2.columns[0], \
'strand':'r','depth':depth,'ML':float(MC)/depth}, index=[0])
ResML=ResML.append(toappend)
if (not tempr.empty):
aggreR = pd.merge(aggreR,tempr,how='outer',on=['Qname'])
aggreR = aggreR.drop_duplicates()
# Impute and estimate, if there are 2w-1 columns
if never and aggreC.shape[1] == (2*w):
# C/G to 1, rest to 0, N to NA
never = False
aggreC = aggreC.replace(['C'],1)
aggreC = aggreC.replace(['T'],0)
aggreC = aggreC.replace(['A','N','G'],np.nan)
methbin = aggreC
meth = methbin.copy()
# remove read ID
meth = meth.drop('Qname',axis=1)
# back up for imputation
if imp:
methtemp = meth.copy()
# imputation by sliding window of 1 C
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# save methylation statuses before imputation
# check if eligible for imputation, impute
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
# overwrite imputed window
meth = methtemp.copy()
# Evaluate methylation level and methylation heterogeneity and append to result
for i in range(0,w,1): # w windows
window = meth.iloc[:,range(i,i+w)].values
# check if enough complete patterns for evaluating MeH
if enough_reads(window,w,complete=True):
matforMH=getcomplete(window,w)
# if need to output methylation patterns
if optional:
toappend,opt=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='f',optional=optional)
Resultopt=Resultopt.append(opt)
# evaluate and output MeH
else:
toappend=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='f',optional=optional)
ResultPW=ResultPW.append(toappend)
# remove 1 column
aggreC = aggreC.drop(meth.columns[0:1],axis=1)
# drop rows with no values
aggreC.dropna(axis = 0, thresh=2, inplace = True)
#total += w
# Reverse
if neverr and aggreR.shape[1] == (2*w):
neverr = False
aggreR = aggreR.replace(['G'],1)
aggreR = aggreR.replace(['A'],0)
aggreR = aggreR.replace(['C','N','T'],np.nan)
methbin = aggreR # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
if imp:
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# if eligible for imputation
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
meth = methtemp.copy()
for i in range(0,w,1):
window = meth.iloc[:,range(i,i+w)].values
if enough_reads(window,w,complete=True):
matforMH=getcomplete(window,w)
if optional:
toappend,opt=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='r',optional=optional)
Resultopt=Resultopt.append(opt)
else:
toappend=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='r',optional=optional)
ResultPW=ResultPW.append(toappend)
aggreR = aggreR.drop(meth.columns[0:1],axis=1)
aggreR.dropna(axis = 0, thresh=2, inplace = True)
#------------------
# SECONDARY CASE
#------------------
if (aggreC.shape[1] == (3*w-1)):
aggreC = aggreC.replace(['C'],1)
aggreC = aggreC.replace(['T'],0)
aggreC = aggreC.replace(['A','N','G'],np.nan)
methbin = aggreC # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
if imp:
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
meth = methtemp.copy()
# compute coverage and output summary
for i in range(w-1,2*w-1,1):
#for i in range(0,meth.shape[1]-w+1,1):
#if i>w-2 and i<2*w:
window = meth.iloc[:,range(i,i+w)].values
if enough_reads(window,w,complete=True):
matforMH=getcomplete(window,w)
if optional:
toappend,opt=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='f',optional=optional)
Resultopt=Resultopt.append(opt)
else:
toappend=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='f',optional=optional)
ResultPW=ResultPW.append(toappend)
if ResultPW.shape[0] % 100000 == 1:
ResultPW.to_csv(r"MeHdata/CG_%s.csv"%(filename),index = False, header=True)
if melv:
ResML.to_csv(r"MeHdata/CG_ML_%s.csv"%(filename),index = False, header=True)
if optional:
Resultopt.to_csv(r"MeHdata/CG_opt_%s.csv"%(filename),index = False, header=True)
if not silence:
print("Checkpoint CG. For file %s: %s results obtained up to position chr %s: %s." % (filename,ResultPW.shape[0],chrom,pileupcolumn.pos))
aggreC = aggreC.drop(meth.columns[0:w],axis=1)
aggreC.dropna(axis = 0, thresh=2, inplace = True)
#print(aggreC)
#total += w
# reverse
if (aggreR.shape[1] == (3*w-1)):
aggreR = aggreR.replace(['G'],1)
aggreR = aggreR.replace(['A'],0)
aggreR = aggreR.replace(['C','N','T'],np.nan)
methbin = aggreR # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
if imp:
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# if eligible for imputation
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
meth = methtemp.copy()
# compute coverage and output summary
for i in range(w-1,2*w-1,1):
window = meth.iloc[:,range(i,i+w)].values
if enough_reads(window,w,complete=True):
matforMH=getcomplete(window,w)
if optional:
toappend,opt=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='r',optional=optional)
Resultopt=Resultopt.append(opt)
else:
toappend=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='r',optional=optional)
ResultPW=ResultPW.append(toappend)
if ResultPW.shape[0] % 100000 == 1:
ResultPW.to_csv(r"MeHdata/CG_%s.csv"%(filename),index = False, header=True)
if melv:
ResML.to_csv(r"MeHdata/CG_ML_%s.csv"%(filename),index = False, header=True)
if optional:
Resultopt.to_csv(r"MeHdata/CG_opt_%s.csv"%(filename),index = False, header=True)
if not silence:
print("Checkpoint CG. For file %s: %s results obtained up to position chr %s: %s." % (filename,ResultPW.shape[0],chrom,pileupcolumn.pos))
aggreR = aggreR.drop(meth.columns[0:w],axis=1)
aggreR.dropna(axis = 0, thresh=2, inplace = True)
if ResultPW.shape[0]>0:
ResultPW.to_csv(r"MeHdata/CG_%s.csv"%(filename),index = False, header=True)
if optional:
Resultopt.to_csv(r"MeHdata/CG_opt_%s.csv"%(filename),index = False, header=True)
if melv:
ResML.to_csv(r"MeHdata/CG_ML_%s.csv"%(filename),index = False, header=True)
return sample, coverage, cov_context, 'CG'
print("Done CG for file %s: %s results obtained up to position chr %s: %s." % (filename,ResultPW.shape[0],chrom,pileupcolumn.pos))
#samfile.close()
def CHHgenome_scr(bamfile,w,fa,optional,melv,silence=False,dist=1,MeH=2,imp=True):
filename, file_extension = os.path.splitext(bamfile)
sample = str.split(filename,'_')[0]
coverage = cov_context = 0
#directory = "Outputs/" + str(sample) + '.csv' #original filename of .bams
samfile = pysam.AlignmentFile("MeHdata/%s.bam" % (filename), "rb")
fastafile = pysam.FastaFile('MeHdata/%s.fa' % fa)
aggreR = aggreC = pd.DataFrame(columns=['Qname'])
ResultPW = pd.DataFrame(columns=['chrom','pos','MeH','dis','strand'])
if melv:
ResML = pd.DataFrame(columns=['chrom','pos','ML','depth','strand'])
if optional:
if w==3:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08',\
'MeH','dis','strand'])
if w==4:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08','p09','p10','p11',\
'p12','p13','p14','p15','p16','MeH','dis','strand'])
if w==5:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16'\
,'p17','p18','p19','p20','p21','p22','p23','p24','p25','p26','p27','p28',\
'p29','p30','p31','p32','MeH','dis','strand'])
if w==5:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16'\
,'p17','p18','p19','p20','p21','p22','p23','p24','p25','p26','p27','p28',\
'p29','p30','p31','p32','p33','p34','p35','p36','p37','p38','p39','p40','p41','p42','p43','p44','p45','p46'\
,'p47','p48','p49','p50','p51','p52','p53','p54','p55','p56','p57','p58','p59','p60','p61','p62','p63','p64'\
,'MeH','dis','strand'])
neverr = never = True
#chr_lengths = fastafile.get_reference_length(chrom)
all_pos=np.zeros((2**w,w))
for i in range(w):
all_pos[:,i]=np.linspace(0,2**w-1,2**w)%(2**(i+1))//(2**i)
D=PattoDis(pd.DataFrame(all_pos),dist=dist) #1:Hamming distance
start=datetime.datetime.now()
MU=np.zeros((2,w))
for pileupcolumn in samfile.pileup():
coverage += 1
chrom = pileupcolumn.reference_name
if not silence:
if (pileupcolumn.pos % 2000000 == 1):
print("CHH %s s %s w %s %s pos %s Result %s" % (datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),filename,w,chrom,pileupcolumn.pos,ResultPW.shape[0]))
# forward
if fastafile.fetch(chrom,pileupcolumn.pos,pileupcolumn.pos+1)=='C' and fastafile.fetch(chrom,pileupcolumn.pos+1,pileupcolumn.pos+2)!='G' and fastafile.fetch(chrom,pileupcolumn.pos+2,pileupcolumn.pos+3)!='G':
cov_context += 1
temp = pd.DataFrame(columns=['Qname',pileupcolumn.pos+1])
pileupcolumn.set_min_base_quality(0)
for pileupread in pileupcolumn.pileups:
if not pileupread.is_del and not pileupread.is_refskip and not pileupread.alignment.is_reverse: # C
d = {'Qname': [pileupread.alignment.query_name], pileupcolumn.pos+1: [pileupread.alignment.query_sequence[pileupread.query_position]]}
df2 = pd.DataFrame(data=d)
#df2.head()
temp=temp.append(df2, ignore_index=True)
#temp.head()
if melv:
temp2 = temp.replace(['C'],1)
temp2 = temp2.replace(['T'],0)
temp2 = temp2.replace(['A','G','N'],np.nan)
temp2 = temp2.drop('Qname',axis=1)
MC=(temp2==1).sum(axis=0).to_numpy()
UC=(temp2==0).sum(axis=0).to_numpy()
depth=MC+UC
if depth>3:
toappend=pd.DataFrame({'chrom':chrom,'pos':temp2.columns[0], \
'strand':'f','depth':depth,'ML':float(MC)/depth}, index=[0])
ResML=ResML.append(toappend)
if (not temp.empty):
#temp.head()
aggreC = pd.merge(aggreC,temp,how='outer',on=['Qname'])
aggreC = aggreC.drop_duplicates()
# reverse
if pileupcolumn.pos>2:
if fastafile.fetch(chrom,pileupcolumn.pos,pileupcolumn.pos+1)=='G' and fastafile.fetch(chrom,pileupcolumn.pos-1,pileupcolumn.pos)!='C' and fastafile.fetch(chrom,pileupcolumn.pos-2,pileupcolumn.pos-1)!='C':
cov_context += 1
tempr = pd.DataFrame(columns=['Qname',pileupcolumn.pos+1])
pileupcolumn.set_min_base_quality(0)
for pileupread in pileupcolumn.pileups:
if not pileupread.is_del and not pileupread.is_refskip and pileupread.alignment.is_reverse: # C
d = {'Qname': [pileupread.alignment.query_name], pileupcolumn.pos+1: [pileupread.alignment.query_sequence[pileupread.query_position]]}
df2 = | pd.DataFrame(data=d) | pandas.DataFrame |
import os
import numpy as np
import json
import requests
try:
import modin.pandas as pd
except ImportError:
import pandas as pd
import galaxy_utilities as gu
from tqdm import tqdm
import make_cutouts as mkct
from astropy.wcs import WCS, FITSFixedWarning
import warnings
warnings.simplefilter('ignore', FITSFixedWarning)
# we'll be dividing by NaN a lot (regions of zero pixel coverage)
warnings.simplefilter('ignore', RuntimeWarning)
loc = os.path.abspath(os.path.dirname(__file__))
sid_list = np.loadtxt(os.path.join(loc, 'subject-id-list.csv'), dtype='u8')
def get_data(subject_id):
diff_data = gu.get_diff_data(subject_id)
# generate a cutout and sigma image
frame_data = mkct.get_frame_data(subject_id)
frame_data.to_pickle('frame_data/{}.pickle'.format(subject_id))
try:
stacked_image, sigma_image = mkct.generate_new_cutout(
subject_id, frame_data=frame_data
)
except ValueError as e:
print('Error on:', subject_id, e)
print('Frame cutouts were not the same shape')
return None
# scale the image and std to match that used in modelling
im_scaled = stacked_image / diff_data['multiplier']
sd_scaled = sigma_image / diff_data['multiplier']
# we may need to correct for rotation for some subjects
r = requests.get(json.loads(gu.subjects.loc[subject_id].locations)['0'])
rotation_correction = 0
if r.ok:
subject_data = json.loads(r.text)
zoo_mask = np.array(subject_data['mask'])
zoo_gal = np.ma.masked_array(subject_data['imageData'], zoo_mask)
montaged_cutout = mkct.get_montaged_cutout(subject_id).data
montaged_mask = gu.get_diff_data(subject_id)['mask']
montaged_gal = np.ma.masked_array(montaged_cutout, montaged_mask)
loss = np.inf
for k in (0, 3):
d = montaged_gal / montaged_gal.max() - np.rot90(zoo_gal, k=k)
m = np.logical_xor(montaged_mask, np.rot90(zoo_gal.mask, k=k))
loss_ = np.nansum(np.abs(d)) / d.size + np.sum(m)
if loss_ < loss:
rotation_correction = 2 * np.pi * k / 4
loss = loss_
else:
# assume rotation is zero on failure
rotation_correction = 0
# get the WCS objects so we can transform models back into the original
# projection
montage_wcs = mkct.get_montaged_cutout(subject_id).wcs
original_wcs = frame_data.iloc[0].wcs
return dict(
psf=gu.get_psf(subject_id),
pixel_mask=~im_scaled.mask,
galaxy_data=im_scaled,
montage_wcs=montage_wcs,
original_wcs=original_wcs,
multiplier=diff_data['multiplier'],
sigma_image=sd_scaled,
width=diff_data['width'],
size_diff=diff_data['width'] / diff_data['imageWidth'],
rotation_correction=rotation_correction,
)
tqdm.pandas(desc='Making DataFrame')
df = | pd.Series(sid_list, index=sid_list) | pandas.Series |
import numpy as np
import pandas as pd
import numba
from vtools.functions.filter import cosine_lanczos
def get_smoothed_resampled(df, cutoff_period='2H', resample_period='1T', interpolate_method='pchip'):
"""Resample the dataframe (indexed by time) to the regular period of resample_period using the interpolate method
Furthermore the cosine lanczos filter is used with a cutoff_period to smooth the signal to remove high frequency noise
Args:
df (DataFrame): A single column dataframe indexed by datetime
cutoff_period (str, optional): cutoff period for cosine lanczos filter. Defaults to '2H'.
resample_period (str, optional): Resample to regular period. Defaults to '1T'.
interpolate_method (str, optional): interpolation for resampling. Defaults to 'pchip'.
Returns:
DataFrame: smoothed and resampled dataframe indexed by datetime
"""
dfb = df.resample(resample_period).fillna(method='backfill')
df = df.resample(resample_period).interpolate(method=interpolate_method)
df[dfb.iloc[:, 0].isna()] = np.nan
return cosine_lanczos(df, cutoff_period)
@numba.jit(nopython=True)
def lmax(arr):
'''Local maximum: Returns value only when centered on maximum
'''
idx = np.argmax(arr)
if idx == len(arr)/2:
return arr[idx]
else:
return np.NaN
@numba.jit(nopython=True)
def lmin(arr):
'''Local minimum: Returns value only when centered on minimum
'''
idx = np.argmin(arr)
if idx == len(arr)/2:
return arr[idx]
else:
return np.NaN
def periods_per_window(moving_window_size: str, period_str: str) -> int:
"""Number of period size in moving window
Args:
moving_window_size (str): moving window size as a string e.g 7H for 7 hour
period_str (str): period as str e.g. 1T for 1 min
Returns:
int: number of periods in the moving window rounded to an integer
"""
return int(pd.Timedelta(moving_window_size)/pd.to_timedelta(pd.tseries.frequencies.to_offset(period_str)))
def tidal_highs(df, moving_window_size='7H'):
"""Tidal highs (could be upto two highs in a 25 hr period)
Args:
df (DataFrame): a time series with a regular frequency
moving_window_size (str, optional): moving window size to look for highs within. Defaults to '7H'.
Returns:
DataFrame: an irregular time series with highs at resolution of df.index
"""
period_str = df.index.freqstr
periods = periods_per_window(moving_window_size, period_str)
dfmax = df.rolling(moving_window_size, min_periods=periods).apply(lmax, raw=True)
dfmax = dfmax.shift(periods=-(periods//2-1))
dfmax = dfmax.dropna()
dfmax.columns = ['max']
return dfmax
def tidal_lows(df, moving_window_size='7H'):
"""Tidal lows (could be upto two lows in a 25 hr period)
Args:
df (DataFrame): a time series with a regular frequency
moving_window_size (str, optional): moving window size to look for lows within. Defaults to '7H'.
Returns:
DataFrame: an irregular time series with lows at resolution of df.index
"""
period_str = df.index.freqstr
periods = periods_per_window(moving_window_size, period_str)
dfmin = df.rolling(moving_window_size, min_periods=periods).apply(lmin, raw=True)
dfmin = dfmin.shift(periods=-(periods//2-1))
dfmin = dfmin.dropna()
dfmin.columns = ['min']
return dfmin
def get_tidal_hl(df, cutoff_period='2H', resample_period='1T', interpolate_method='pchip', moving_window_size='7H'):
"""Get Tidal highs and lows
Args:
df (DataFrame): A single column dataframe indexed by datetime
cutoff_period (str, optional): cutoff period for cosine lanczos filter. Defaults to '2H'.
resample_period (str, optional): Resample to regular period. Defaults to '1T'.
interpolate_method (str, optional): interpolation for resampling. Defaults to 'pchip'.
moving_window_size (str, optional): moving window size to look for lows within. Defaults to '7H'.
Returns:
tuple of DataFrame: Tidal high and tidal low time series
"""
dfs = get_smoothed_resampled(df, cutoff_period, resample_period, interpolate_method)
return tidal_highs(dfs), tidal_lows(dfs)
get_tidal_hl_rolling = get_tidal_hl # for older refs. #FIXME
def get_tidal_amplitude(dfh, dfl):
"""Tidal amplitude given tidal highs and lows
Args:
dfh (DataFrame): Tidal highs time series
dfl (DataFrame): Tidal lows time series
Returns:
DataFrame: Amplitude timeseries, at the times of the low following the high being used for amplitude calculation
"""
dfamp = pd.concat([dfh, dfl], axis=1)
dfamp = dfamp[['min']].dropna().join(dfamp[['max']].ffill())
return pd.DataFrame(dfamp['max']-dfamp['min'], columns=['amplitude'])
def get_value_diff(df, percent_diff=False):
'''
Get the difference of values of each element in the dataframe
The times in the dataframe may or may not coincide as this is a slice of irregularly sampled time series
On any error, the returned value is np.nan
'''
try:
arr = [df[c].dropna() for c in df.columns]
if percent_diff:
value_diff = 100.0 * (arr[0].values[0]-arr[1].values[0])/arr[1].values[0]
else:
value_diff = arr[0].values[0]-arr[1].values[0]
return value_diff
except:
return np.nan
def get_tidal_amplitude_diff(dfamp1, dfamp2, percent_diff=False):
"""Get the difference of values within +/- 4H of values in the two amplitude arrays
Args:
dfamp1 (DataFrame): Amplitude time series
dfamp2 (DataFrame): Amplitude time series
percent_diff (bool, optional): If true do percent diff. Defaults to False.
Returns:
DataFrame: Difference dfamp1-dfamp2 or % Difference (dfamp1-dfamp2)/dfamp2*100 for values within +/- 4H of each other
"""
dfamp = pd.concat([dfamp1, dfamp2], axis=1).dropna(how='all')
dfamp.columns = ['2', '1']
tdelta = '4H'
sliceamp = [slice(t-pd.to_timedelta(tdelta), t+pd.to_timedelta(tdelta)) for t in dfamp.index]
ampdiff = [get_value_diff(dfamp[sl], percent_diff) for sl in sliceamp]
return pd.DataFrame(ampdiff, index=dfamp.index)
def get_index_diff(df):
'''
Get the difference of index values of each element in the dataframe
The times in the dataframe may or may not coincide
The difference is in Timedelta and is converted to minutes
On any error, the returned value is np.nan
'''
try:
arr = [df[c].dropna() for c in df.columns]
tidal_phase_diff = (arr[0].index[0]-arr[1].index[0]).total_seconds()/60.
return tidal_phase_diff
except:
return np.nan
def get_tidal_phase_diff(dfh2, dfl2, dfh1, dfl1):
"""Calculates the phase difference between df2 and df1 tidal highs and lows
Scans +/- 4 hours in df1 to get the highs and lows in that windows for df2 to
get the tidal highs and lows at the times of df1
Args:
dfh2 (DataFrame): Timeseries of tidal highs
dfl2 (DataFrame): Timeseries of tidal lows
dfh1 (DataFrame): Timeseries of tidal highs
dfl1 (DataFRame): Timeseries of tidal lows
Returns:
DataFrame: Phase difference (dfh2-dfh1) and (dfl2-dfl1) in minutes
"""
'''
'''
tdelta = '4H'
sliceh1 = [slice(t-pd.to_timedelta(tdelta), t+pd.to_timedelta(tdelta)) for t in dfh1.index]
slicel1 = [slice(t-pd.to_timedelta(tdelta), t+ | pd.to_timedelta(tdelta) | pandas.to_timedelta |
# coding=utf-8
# Copyright 2018-2020 EVA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
import numpy as np
from eva.expression.abstract_expression import AbstractExpression, \
ExpressionType, \
ExpressionReturnType
from eva.models.storage.batch import Batch
class ComparisonExpression(AbstractExpression):
def __init__(self, exp_type: ExpressionType, left: AbstractExpression,
right: AbstractExpression):
children = []
if left is not None:
children.append(left)
if right is not None:
children.append(right)
super().__init__(exp_type, rtype=ExpressionReturnType.BOOLEAN,
children=children)
def evaluate(self, *args, **kwargs):
# cast in to numpy array
lvalues = self.get_child(0).evaluate(*args, **kwargs).frames.values
rvalues = self.get_child(1).evaluate(*args, **kwargs).frames.values
if len(lvalues) != len(rvalues):
if len(lvalues) == 1:
lvalues = np.repeat(lvalues, len(rvalues), axis=0)
elif len(rvalues) == 1:
rvalues = np.repeat(rvalues, len(lvalues), axis=0)
else:
raise Exception(
"Left and Right batch does not have equal elements")
if self.etype == ExpressionType.COMPARE_EQUAL:
return Batch(pd.DataFrame(lvalues == rvalues))
elif self.etype == ExpressionType.COMPARE_GREATER:
return Batch(pd.DataFrame(lvalues > rvalues))
elif self.etype == ExpressionType.COMPARE_LESSER:
return Batch(pd.DataFrame(lvalues < rvalues))
elif self.etype == ExpressionType.COMPARE_GEQ:
return Batch(pd.DataFrame(lvalues >= rvalues))
elif self.etype == ExpressionType.COMPARE_LEQ:
return Batch(pd.DataFrame(lvalues <= rvalues))
elif self.etype == ExpressionType.COMPARE_NEQ:
return Batch( | pd.DataFrame(lvalues != rvalues) | pandas.DataFrame |
"""
Description : This file implements the Drain algorithm for log parsing
Author : LogPAI team
License : MIT
"""
import hashlib
import os
import re
import pandas as pd
from datetime import datetime
from typing import List
from .log_signature import calc_signature
# ไธไธชๅถๅญ่็นๅฐฑๆฏไธไธชLogCluster
class LogCluster:
def __init__(self, template_token_list: List[str], log_id_list: List[int]):
self.template_token_list = template_token_list
self.log_id_list = log_id_list
self.template_id = None
# ๆ ่็น
class Node:
def __init__(self, childD=None, depth=0, digitOrToken=None):
self.childD = {} if childD is None else childD
self.depth = depth
self.digitOrToken = digitOrToken
self.template_count = 0
def get_template(seq1, seq2):
assert len(seq1) == len(seq2)
res = []
for t1, t2 in zip(seq1, seq2):
if t1 == t2:
res.append(t1)
else:
res.append('<*>')
return res
class LogParser:
def __init__(self, log_format, indir='./', outdir='./result/', depth=4, st=0.4,
maxChild=100, rex=None, keep_para=True):
"""
Attributes
----------
rex : regular expressions used in preprocessing (step1)
path : the input path stores the input log file name
depth : depth of all leaf nodes
st : similarity threshold
maxChild : max number of children of an internal node
log_name : the name of the input file containing raw log messages
save_path : the output path stores the file containing structured logs
"""
self.path = indir
self.depth = depth - 2
self.st = st
self.maxChild = maxChild
self.log_name = None
self.save_path = outdir
self.df_log = None
self.log_format = log_format
self.rex = [] if rex is None else rex
self.keep_para = keep_para
def tree_search(self, root, token_list):
seq_len = len(token_list)
# ้ฟๅบฆๅฑ๏ผๅคๆญ้ฟๅบฆ
if seq_len not in root.childD:
return 0, None
len_node = root.childD[seq_len] # ้ฟๅบฆๅฑ็่็น
depth = 1
for token in token_list:
if depth >= self.depth or depth > seq_len:
break
if token in len_node.childD:
len_node = len_node.childD[token]
elif '<*>' in len_node.childD:
len_node = len_node.childD['<*>']
else:
return 0, None
depth += 1
return self.fastMatch(len_node.childD, token_list)
def addSeqToPrefixTree(self, rn, logClust):
def has_number(s):
return any(char.isdigit() for char in s)
logClust.template_id = rn.template_count # ๆจกๆฟid็ญไบๅบๅท
rn.template_count += 1 # ่ฟไธชๆ นไธ็ๆจกๆฟๆปๆฐๅ ไธ
seqLen = len(logClust.template_token_list)
if seqLen not in rn.childD:
firtLayerNode = Node(depth=1, digitOrToken=seqLen)
rn.childD[seqLen] = firtLayerNode
else:
firtLayerNode = rn.childD[seqLen]
parentn = firtLayerNode
currentDepth = 1
# ๅชๆไธไธชtokenๆถ๏ผ็ปๆๆฏไธๅฏน็
for token in logClust.template_token_list:
# Add current log cluster to the leaf node
if currentDepth >= self.depth or currentDepth > seqLen:
# if len(parentn.childD) == 0:
# parentn.childD = [logClust]
# else:
# parentn.childD.append(logClust)
break
# If token not matched in this layer of existing tree.
if token not in parentn.childD:
if not has_number(token):
if '<*>' in parentn.childD:
if len(parentn.childD) < self.maxChild:
newNode = Node(depth=currentDepth + 1, digitOrToken=token)
parentn.childD[token] = newNode
parentn = newNode
else:
parentn = parentn.childD['<*>']
else:
if len(parentn.childD) + 1 < self.maxChild:
newNode = Node(depth=currentDepth + 1, digitOrToken=token)
parentn.childD[token] = newNode
parentn = newNode
elif len(parentn.childD) + 1 == self.maxChild:
newNode = Node(depth=currentDepth + 1, digitOrToken='<*>')
parentn.childD['<*>'] = newNode
parentn = newNode
else:
parentn = parentn.childD['<*>']
else:
if '<*>' not in parentn.childD:
newNode = Node(depth=currentDepth + 1, digitOrToken='<*>')
parentn.childD['<*>'] = newNode
parentn = newNode
else:
parentn = parentn.childD['<*>']
# If the token is matched
else:
parentn = parentn.childD[token]
currentDepth += 1
# ๆทปๅ ๅฐ logClusterList
if len(parentn.childD) == 0:
parentn.childD = [logClust]
else:
parentn.childD.append(logClust)
# seq1 is template
def seqDist(self, seq1, seq2):
assert len(seq1) == len(seq2)
simTokens = 0
numOfPar = 0
for token1, token2 in zip(seq1, seq2):
if token1 == '<*>':
numOfPar += 1
continue
if token1 == token2:
simTokens += 1
retVal = float(simTokens) / len(seq1)
return retVal, numOfPar
def fastMatch(self, logClustL, seq):
retLogClust = None
maxSim = -1
maxNumOfPara = -1
maxClust = None
maxIdx = -1 # ๅน้
็็ฐๅฏนๅบ็็ดขๅผ
for i, logClust in enumerate(logClustL):
curSim, curNumOfPara = self.seqDist(logClust.template_token_list, seq)
if curSim > maxSim or (curSim == maxSim and curNumOfPara > maxNumOfPara):
maxSim = curSim
maxNumOfPara = curNumOfPara
maxClust = logClust
maxIdx = i
if maxSim < self.st:
return len(logClustL), None
else:
return maxIdx, maxClust
# if maxSim >= self.st:
# retLogClust = maxClust
# return retLogClust
# ่พๅบ่ชๅฎไน็ปๆ
def outputEventId(self, event_id_list):
if not os.path.exists(self.save_path):
os.makedirs(self.save_path)
self.df_log['EventId'] = event_id_list
self.df_log.to_csv(os.path.join(self.save_path, self.log_name + '_structured.csv'), index=False)
def outputResult(self, logClustL):
if not os.path.exists(self.save_path):
os.makedirs(self.save_path)
log_templates = [0] * self.df_log.shape[0]
log_templateids = [0] * self.df_log.shape[0]
df_events = []
for logClust in logClustL:
template_str = ' '.join(logClust.template_token_list)
occurrence = len(logClust.log_id_list)
# template_id = hashlib.md5(template_str.encode('utf-8')).hexdigest()[0:8]
template_id = logClust.template_id
for log_id in logClust.log_id_list:
log_id -= 1
log_templates[log_id] = template_str
log_templateids[log_id] = template_id
df_events.append([template_id, template_str, occurrence])
# df_event = pd.DataFrame(df_events, columns=['EventId', 'EventTemplate', 'Occurrences'])
self.df_log['EventId'] = log_templateids
self.df_log['EventTemplate'] = log_templates
if self.keep_para:
self.df_log["ParameterList"] = self.df_log.apply(self.get_parameter_list, axis=1)
self.df_log.to_csv(os.path.join(self.save_path, self.log_name + '_structured.csv'), index=False)
occ_dict = dict(self.df_log['EventTemplate'].value_counts())
df_event = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Sat May 9 19:30:38 2020
@author: aletu
"""
import numpy as np
import pandas as pd
import random
import datetime
def generateWarehouseData(num_SKUs = 100,
nodecode = 1,
idwh = ['LOGICAL_WH1', 'LOGICAL_WH2', 'FAKE'],
whsubarea = ['AREA 1'],
num_corsie = 5,
num_campate = 66,
num_livelli = 5,
alt_livello = 1200,
largh_campate = 800,
largh_corsia = 4000,
num_movements=1000,
num_ordercode = 800,
average_time_between_movements = 1/24, #days
first_day = datetime.datetime(year=2020, month=1, day = 2),
):
#% CLASS SKU
class SKU():
def __init__(self,itemcode):
self.ITEMCODE=itemcode
self.DESCRIPTION = f"PRODOTTO_{itemcode}"
self.VOLUME = np.random.uniform(0.1,100) #volume in dm3
self.WEIGHT = np.random.uniform(0.1,10) #weigth in Kg
#% CLASS STORAGE LOCATION
class STORAGE_LOCATION():
def __init__(self,nodecode, idwh, whsubarea, idlocation, loccodex, loccodey, loccodez, rack, bay, level):
self.NODECODE = nodecode
self.IDWH = idwh
self.WHSUBAREA = whsubarea
self.IDLOCATION=idlocation
self.LOCCODEX = loccodex
self.LOCCODEY = loccodey
self.LOCCODEZ = loccodez
self.RACK = rack
self.BAY = bay
self.LEVEL = level
# % CLASS MOVEMENTS
class MOVEMENTS():
def __init__(self, itemcode,volume,weight, nodecode, idwh, whsubarea, idlocation,rack, bay, level, loccodex, loccodey, loccodez,
ordercode, quantity, timestamp, inout, ordertype):
self.ITEMCODE=itemcode
self.NODECODE = nodecode
self.IDWH = idwh
self.WHSUBAREA = whsubarea
self.IDLOCATION=idlocation
self.RACK=rack
self.BAY=bay
self.LEVEL=level
self.LOCCODEX = loccodex
self.LOCCODEY = loccodey
self.LOCCODEZ = loccodez
self.ORDERCODE = ordercode
self.PICKINGLIST = ordercode
self.QUANTITY = quantity
self.VOLUME = volume*quantity
self.WEIGHT = weight*quantity
self.TIMESTAMP_IN = timestamp
self.INOUT = inout
self.ORDERTYPE = ordertype
# % CLASS INVENTORY
class INVENTORY():
def __init__(self,itemcode,nodecode, idwh, idlocation,quantity, timestamp):
self.NODECODE = nodecode
self.IDWH=idwh
self.ITEMCODE=itemcode
self.IDLOCATION = idlocation
self.QUANTITY = quantity
self.TIMESTAMP = timestamp
#% CREATE SKUS
dict_SKUs={}
itemcodes=np.arange(0,num_SKUs)
for itemcode in itemcodes:
dict_SKUs[itemcode] = SKU(itemcode)
# % CREATE WH LAYOUT
dict_locations ={}
idlocation=0
for corsia in range(0, num_corsie):
for campata in range(0, num_campate):
for livello in range(0,num_livelli):
idlocation=idlocation+1 #create a new location index
#save parameters
NODECODE = nodecode
IDWH = random.choice(idwh)
WHSUBAREA = random.choice(whsubarea)
IDLOCATION = idlocation
LOCCODEX = corsia*largh_corsia
LOCCODEY = campata*largh_campate
LOCCODEZ = livello*alt_livello
#create storage location
dict_locations[idlocation] = STORAGE_LOCATION(NODECODE,
IDWH,
WHSUBAREA,
IDLOCATION,
LOCCODEX,
LOCCODEY,
LOCCODEZ,
corsia,
campata,
livello)
# %% CREATE MOVEMENTS
dict_movements={}
num_creati = 0
ordercodes = np.arange(0,num_ordercode)
while num_creati < num_movements:
num_creati = num_creati+1
#random select sku
sku = random.choice(dict_SKUs)
itemcode = sku.ITEMCODE
volume = sku.VOLUME
weight = sku.WEIGHT
#random select storage location
loc_key = random.choice(list(dict_locations.keys()))
loc = dict_locations[loc_key]
nodecode = loc.NODECODE
idwh = loc.IDWH
whsubarea=loc.WHSUBAREA
idlocation=loc.IDLOCATION
loccodex = loc.LOCCODEX
loccodey = loc.LOCCODEY
loccodez = loc.LOCCODEZ
rack = loc.RACK
bay=loc.BAY
level = loc.LEVEL
#generates movements data
ordercode = random.choice(ordercodes)
quantity = np.random.lognormal(mean=2,sigma=1)
wait = np.random.exponential(average_time_between_movements)
if num_creati==1:
timestamp = first_day + datetime.timedelta(wait)
else:
timestamp = dict_movements[num_creati-1].TIMESTAMP_IN + datetime.timedelta(wait)
inout = random.choice(['+','-',' '])
ordertype = random.choice(['PICKING','PUTAWAY',' OTHER '])
dict_movements[num_creati] = MOVEMENTS(itemcode,volume,weight , nodecode, idwh, whsubarea, idlocation,rack, bay, level, loccodex, loccodey, loccodez,
ordercode, quantity, timestamp, inout, ordertype)
# %% CREATE INVENTORY
dict_inventory = {}
for itemcode in dict_SKUs:
#sku = dict_SKUs[itemcode]
loc_key = random.choice(list(dict_locations.keys()))
loc = dict_locations[loc_key]
nodecode=loc.NODECODE
idwh=loc.IDWH
idlocation=loc.IDLOCATION
quantity = np.random.lognormal(mean=2,sigma=1)
dict_inventory[itemcode] = INVENTORY(itemcode,nodecode, idwh, idlocation,quantity, first_day)
# %% SAVE LOCATIONS AND EXPORT
D_locations = | pd.DataFrame() | pandas.DataFrame |
import os, datetime, pymongo, configparser
import pandas as pd
from bson import json_util
global_config = None
global_client = None
global_stocklist = None
def getConfig(root_path):
global global_config
if global_config is None:
#print("initial Config...")
global_config = configparser.ConfigParser()
global_config.read(root_path + "/" + "config.ini")
return global_config
def getClient():
global global_client
from pymongo import MongoClient
if global_client is None:
#print("initial DB Client...")
global_client = MongoClient('localhost', 27017)
return global_client
def getCollection(database, collection):
client = getClient()
db = client[database]
return db[collection]
def getStockList(root_path, database, sheet):
global global_stocklist
if global_stocklist is None:
#print("initial Stock List...")
global_stocklist = queryStockList(root_path, database, sheet)
return global_stocklist
def setStockList(df):
global global_stocklist
df.set_index('symbol', inplace=True)
global_stocklist = df
return global_stocklist
def readFromCollection(collection, queryString=None):
if queryString is None:
result = collection.find()
else:
result = collection.find(queryString)
df = pd.DataFrame(list(result))
if df.empty == False: del df['_id']
return df
def writeToCollection(collection, df, id = None):
jsonStrings = df.to_json(orient='records')
bsonStrings = json_util.loads(jsonStrings)
for string in bsonStrings:
if id is not None:
id_string = ''.join([string[item] for item in id])
string['_id'] = id_string
collection.save(string)
def readFromCollectionExtend(collection, queryString=None):
if queryString is None:
result = collection.find()
else:
result = collection.find_one(queryString)
if result is None:
return pd.DataFrame(), {}
return | pd.read_json(result['data'], orient='records') | pandas.read_json |
#!/home/admin/anaconda3/envs/TF/bin/ python3.5
# -*- coding: utf-8 -*-
'''
Created on 2018ๅนด6ๆ11ๆฅ
@author: <NAME>
Jiangxi university of finance and economics
'''
from pandas import DataFrame
from pandas import concat
import pandas as pd
time_ser_process=pd.read_csv('pricedetails_plus.csv')#ๅๅค่ฟ่กๆถ้ดๅบๅๅค็
time_ser_process=time_ser_process.dropna(axis=0)
print(time_ser_process['citys'])
time_ser_process.to_csv('pricedetails_plus_1.csv')
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):#ๆถ้ดๅบๅๅฝๆฐ
"""
Frame a time series as a supervised learning dataset.
Arguments:
data: Sequence of observations as a list or NumPy array.
n_in: Number of lag observations as input (X).
n_out: Number of observations as output (y).
dropnan: Boolean whether or not to drop rows with NaN values.
Returns:
Pandas DataFrame of series framed for supervised learning.
"""
n_vars = 1 if type(data) is list else data.shape[1]
df = DataFrame(data)
cols, names = list(), list()
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]
# forecast sequence (t, t+1, ... t+n)
for i in range(0, n_out):
cols.append(df.shift(-i))
if i == 0:
names += [('var%d(t)' % (j+1)) for j in range(n_vars)]
else:
names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)]
# put it all together
agg = | concat(cols, axis=1) | pandas.concat |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
# pip install factor_analyzer
# In[2]:
#All the header files required for the code
import numpy as np
import pandas as pd
from factor_analyzer import FactorAnalyzer
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn import metrics
import random
# In[3]:
#Importing both the file using pandas
data1 = pd.read_csv('movies data.csv')
data2 = pd.read_csv('ratings data.csv')
# In[4]:
#Deleting unnecessary columns
data1 = data1.drop('Unnamed: 0',axis = 1)
data2 = data2.drop(['Unnamed: 0','Timestamp'],axis = 1)
# In[5]:
data1.head()
# In[6]:
data2.head()
# In[7]:
#Merging both the dataframes
data = pd.merge(data2 , data1 , how='outer', on='MovieID')
# In[8]:
data.head()
# In[9]:
# Data Processing
# Converting Genres into different columns
# Here we just create columns and put there initial value as 0
x = data.Genres
a = list()
for i in x:
abc = i
a.append(abc.split('|'))
a = | pd.DataFrame(a) | pandas.DataFrame |
# Filename: reference.py
"""
Data provided for free by IEX (https://iextrading.com/developer/).
See https://iextrading.com/api-exhibit-a/ for more information.
"""
from iex.base import _Base, IEXAPIError
import pandas as pd
class Reference(_Base):
"""https://iextrading.com/developer/docs/#reference-data"""
_ENDPOINT = '/ref-data/'
def get_symbols(self):
"""https://api.iextrading.com/1.0/ref-data/symbols"""
payload = 'symbols'
data = self._get_json(self._ENDPOINT, payload)
symbols = pd.DataFrame(data)
return(symbols)
def get_corporate_actions(self, date=None, sample=False):
"""https://iextrading.com/developer/docs/#iex-corporate-actions"""
if date is not None:
payload = ''.join(['daily-list/corporate-actions/', date])
elif sample is True:
payload = 'daily-list/corporate-actions/sample'
else:
payload = 'daily-list/corporate-actions'
data = self._get_json(self._ENDPOINT, payload)
corporate_actions = pd.DataFrame(data)
return(corporate_actions)
def get_dividends(self, date=None, sample=False):
"""https://iextrading.com/developer/docs/#iex-dividends"""
if date is not None:
payload = ''.join(['daily-list/dividends/', date])
elif sample is True:
payload = 'daily-list/dividends/sample'
else:
payload = 'daily-list/dividends'
data = self._get_json(self._ENDPOINT, payload)
dividends = | pd.DataFrame(data) | pandas.DataFrame |
import pandas as pd
import numpy as np
import os
import sys
import tensorflow as tf
import json
import joblib
import time
from tensorflow import keras
from keras import optimizers
from datetime import datetime,timedelta
from sklearn.preprocessing import MinMaxScaler
from datetime import datetime
pd.set_option('display.max_columns', None)
#---------------------------------------
# variables
#---------------------------------------
start = time.time()
DATASET_NUM = 7
MODEL_NUM = 10
# path
PATH_BASE = './'
PATH_MODEL = PATH_BASE + '/model/'
PATH_RESULT = PATH_BASE + '/result/'
# power capacity
power_nm_list = ['onm1_h','onm2_h','onm3_h','onm4_h']
capacity_list = [89.7, 96.6, 90, 46.2]
RSRS_ID = 0
POWER_NM = power_nm_list[RSRS_ID]
CAPACITY = capacity_list[RSRS_ID]
print("POWER_NM:{}, CAPACITY:{}".format(POWER_NM,CAPACITY))
# timesteps
SHIFT_DAYS = 7
PRED_STEPS = 24
dataX_STEPS = SHIFT_DAYS*PRED_STEPS
#---------------------------------------
# functions
#---------------------------------------
# ์ด์์น nan ์ฒ๋ฆฌ
def power_anomal(x) :
if x > CAPACITY :
return np.nan
return x
def sensor_anomal(x) :
if x < -900 :
return np.nan
return x
# load sol omn
def load_power(POWER_NM):
df_power = pd.read_csv(PATH_BASE + '/df_power.csv',index_col=0)
df_power['POWER']=df_power['POWER'].apply(power_anomal).apply(lambda x:x)
df_power.sort_values(by=['DATE'], axis=0)
df_power = df_power.set_index( | pd.DatetimeIndex(df_power['DATE']) | pandas.DatetimeIndex |
"""
This notebook plots DFT results for thermoelectric properties of several
candidate materials identified via random forest regression and portfolio-like
risk management. See src/notsbooks/screen/random_forest.py for details.
"""
# %%
import pandas as pd
from matplotlib import pyplot as plt
from thermo.utils import ROOT
DIR = ROOT + "/results/screen/dft/"
# %%
# convert excel file into usable format
zT_el_greedy_gurobi = pd.read_excel(DIR + "zT_el_greedy&gurobi.xlsx").dropna()
zT_el_greedy_gurobi.columns = range(len(zT_el_greedy_gurobi.columns))
# %%
# Get mask to distinguish string values in column 0.
m1 = | pd.to_numeric(zT_el_greedy_gurobi[0], errors="coerce") | pandas.to_numeric |
# from google.colab import drive
# drive.mount('/content/drive')
# !pip install shap
# !pip install pyitlib
# import os
# os.path.abspath(os.getcwd())
# os.chdir('/content/drive/My Drive/Protein project')
# os.path.abspath(os.getcwd())
#!/usr/bin/env python
#-*- coding:utf-8 -*-
"""
Created on Mar 1, 2020
@author: <NAME>
"""
import warnings
warnings.filterwarnings("ignore")
from __future__ import division ###for float operation
from collections import Counter
import numpy as np
from sklearn.metrics import accuracy_score
from sklearn.metrics import recall_score ##tp / (tp + fn)
from sklearn.metrics import precision_score #tp / (tp + fp)
from sklearn.preprocessing import MultiLabelBinarizer
from sklearn.model_selection import KFold, StratifiedKFold
#from pyitlib import discrete_random_variable as drv
import time
import timeit
import networkx as nx
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.utils.validation import check_X_y, check_array, check_is_fitted ### Checks if the estimator is fitted by verifying the presence of fitted attributes (ending with a trailing underscore)
#from sklearn.utils.multiclass import unique_labels, not necessary, can be replaced by array(list(set()))
#
class PUgenerator:
""" Generate PU data from fully labeled data set.
Labelling assumption: SCAR, i.e, e(x) = P(s=1|y=1,x) = p(s=1|y=1) = c
Scenarios: case_control or single-training
"""
def __init__(self):
pass
def fit(self,X,y, case_control = True, n_L = None,n_U = None, c = None,n_T = None, random_state = 42, pos_label = '1'):
""" Implementation of a fitting function.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The training input samples.
y : array-like, shape (n_samples,) or (n_samples, n_outputs)
The target values (class labels in classification, real numbers in
regression).
n_L : Scalar
Number of labeled examples, for case_control = T
n_U : Scalar
Number of Unlabeled examples, for case_control = T
random_state : Scalar
set seed for Pu data generation
pos_label : default is '1'.
make other labels be '0'.
case_control : Bool
Case control scenario or single training data scenario
c : scalar
P(s=1|y=1), only needed when case_control = F
n_T : scalar
Sample size of single training dataset, for case_control = F
Returns
-------
self
"""
# print("just arrived in the fit function--",id(X))
X,y = check_X_y(X,y)
y = y.astype(str)
# f(x) = alpha f+(x) + (1-alpha) f-(x), true population
data = np.concatenate((X, y.reshape(-1,1)), axis=1)
np.random.seed(random_state*3) # set seed for random
np.random.shuffle(data) # save memory space than permutation, since no copy needed
# print("after check_X_y --",id(X))
n,p = X.shape
# f+(x)
X_1_true = data[data[:,-1] == pos_label ,0:p]
# case control scenario
if case_control:
# sample labeled positive from f+(x)
np.random.seed(random_state) # set seed for np.random
row_inx_L = np.random.choice(X_1_true.shape[0], n_L,replace=True)
X_labeled = X_1_true[row_inx_L,:]
# sample unlabeled X from f(x)
np.random.seed(random_state*2)
row_inx_U = np.random.choice(n, n_U,replace=True) # set seed for np.random
X_Unlabeled = data[row_inx_U,0:p]
y_Unlabeled = data[row_inx_U,-1]
y_Unlabeled = np.where(y_Unlabeled == pos_label,'1','0')
self.X_1abeled_ , self.X_Unlabeled_, self.prevalence_ ,self.X_true_, self.X_1_true_, self.p_, self.y_Unlabeled_ = X_labeled,X_Unlabeled, X_1_true.shape[0]/n, X, X_1_true, p, y_Unlabeled
else:
# sample single training data.
np.random.seed(random_state*2)
row_inx_Total = np.random.choice(n, n_T,replace=True) # set seed for np.random
data_T = data[row_inx_Total,:] # data_T is single training set
data_T_P = data_T[data_T[:,-1] == pos_label ,:]
data_T_N = data_T[data_T[:,-1] != pos_label ,:]
# sample positive labeled.
np.random.seed(random_state) # set seed for np.random
row_inx_L = np.random.choice(data_T_P.shape[0],int(data_T_P.shape[0]*c) ,replace=False)
data_T_P_L = data_T_P[row_inx_L,:]
data_T_P_U = data_T_P[list(set(range(data_T_P.shape[0])).difference(row_inx_L) ) ]
# Unlabeled = data_T_P_U + data_T_N
data_T_U = np.concatenate((data_T_P_U,data_T_N), axis = 0)
X_Unlabeled = data_T_U[:,0:p]
y_Unlabeled = data_T_U[:,-1]
y_Unlabeled = np.where(y_Unlabeled == pos_label,'1','0')
self.X_1abeled_, self.X_Unlabeled_, self.X_T_, self.prevalence_, self.X_true_, self.X_1_true_,self.p_, self.y_Unlabeled_ = data_T_P_L[:,0:p], X_Unlabeled, data_T[:,0:p], X_1_true.shape[0]/n, X ,X_1_true, p, y_Unlabeled
# X_T_
self.case_control_ = case_control
return self
def value_count(self):
# x_labeled vs x_1_true,
# x_T or x_Unlabeled vs X_true_
X_L = pd.DataFrame(self.X_1abeled_) # fl(x)
X_1_true = | pd.DataFrame(self.X_1_true_) | pandas.DataFrame |
from io import StringIO
import subprocess
import pandas as pd
import os
# Time columns in job records
# If we exclude PENDING jobs (that we do in slurm_raw_processing), all time columns should have a time stamp,
# except RUNNING jobs that do not have the 'End' stamp.
time_columns = ['Eligible','Submit','Start','End']
# Define what constitutes a duplicate job
duplicate_job_def = ['JobID','Submit','Start']
def sacct_jobs(account_query, d_from, d_to='', debugging=False,
serialize_frame='', slurm_names=False):
"""Ingest job record information from slurm via sacct and return DataFrame.
Parameters
-------
account_query: str
String query to be sent to sacct via -A flag.
d_from: date str
Beginning of the query period, e.g. '2019-04-01T00:00:00
debugging: boolean, optional
Boolean for reporting progress to stdout. Default False.
sacct_file: str, optional
Loads a raw query from file.
If empty, query is rerun. Defaults to the empty string.
serialize_frame: str, optional
Pickle the resulting DataFrame.
If empty, pickling is skipped. Defaults to the empty string.
slurm_names: str, optional
Keep slurm's sacct column names instead of shorthands.
Defaults to False.
Returns
-------
DataFrame
Returns a standard pandas DataFrame, or an empty dataframe if no
jobs are found.
"""
raw_frame = _get_slurm_records(pd.to_datetime(d_from))
out_frame = _slurm_raw_processing(raw_frame, slurm_names)
# Legacy/consistency check:
# Protect end time for jobs that are still currently running
out_frame['end'] = out_frame['end'].replace({pd.NaT: pd.to_datetime(d_to)})
# return _slurm_consistency_check(out_frame) if debugging else out_frame
# TODO: consisder swapping this to a better format
if serialize_frame != '':
out_frame.to_pickle(serialize_frame, protocol=4)
return out_frame
def _get_slurm_records(arg, ssh_client=None):
'''Retrieve records either via SSH or from a file.'''
sacct_format = 'Account,AllocCPUS,AllocNodes,AllocTRES,AssocID,Cluster,CPUTimeRAW,'\
'CPUTime,DerivedExitCode,ElapsedRaw,Elapsed,Eligible,End,ExitCode,Flags,GID,Group,'\
'JobID,JobIDRaw,NCPUS,NNodes,NodeList,Priority,Partition,QOS,QOSRAW,Reason,ReqCPUS,'\
'ReqMem,ReqNodes,ReqTRES,Reserved,ResvCPURAW,ResvCPU,Start,State,Submit,Suspended,'\
'SystemCPU,TimelimitRaw,Timelimit,TotalCPU,UID,User,UserCPU,WorkDir'
sacct_command = 'TZ=UTC sacct'
sacct_options = f'--duplicates --allusers --allocations --parsable2 --delimiter=";" --format={sacct_format}'
if isinstance(arg, str):
# Read a SLURM dump from a file
source = arg
command = None
if not os.path.isfile(source):
print('The seed file does not exist. Quitting.')
return pd.DataFrame()
elif isinstance(arg, list) and arg:
# Get specific jobs
command = f'{sacct_command} {sacct_options} --jobs {",".join(arg)}'
elif isinstance(arg, pd.Timestamp):
# Get a list of jobs in a date range
# Note that --start selects jobs in ANY state after the specified time.
# This is not the same as filtering by 'Start' afterwards.
command = f'{sacct_command} {sacct_options} --start {arg:%Y-%m-%dT%H:%M} --end Now\n'
else:
print('Unexpected input parameter to get_slurm_records().')
return pd.DataFrame()
if command:
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
stdout, stderr = process.communicate()
source = stdout.decode('UTF-8')
try:
records = pd.read_csv(StringIO(source), sep=';', dtype='str', on_bad_lines='skip')
except e: # TODO: Fix this to be less heavy handed
return pd.DataFrame()
return | pd.DataFrame() | pandas.DataFrame |
from __future__ import division
import os, gc, copy, json
import pandas as pd
import numpy as np
import ipywidgets as widgets
from IPython.display import display, clear_output
import matplotlib as mpl
import matplotlib.pyplot as plt#, mpld3
import seaborn as sns
import warnings
#mpld3 hack
# class NumpyEncoder(json.JSONEncoder):
# def default(self, obj):
# import numpy as np
# if isinstance(obj, np.ndarray):
# return obj.tolist()
# return json.JSONEncoder.default(self, obj)
# from mpld3 import _display
# _display.NumpyEncoder = NumpyEncoder
filled_markers = ('o', 'X', 's', 'P', 'D', '^', '8', 'p', '*', 'h', 'H', '>', 'd', '<', 'v', 'd', 'o', 'X', 's', 'P', 'D', '^', '8', 'p', '*', 'h', 'H', '>', 'd', '<', 'v', 'd')
class Plotter(object):
def __init__(self):
self.data = | pd.DataFrame() | pandas.DataFrame |
# Copyright 2021, <NAME>.
#
# Developed as a thesis project at the TORSEC research group of the Polytechnic of Turin (Italy) under the supervision
# of professor <NAME> and engineer <NAME> and with the support of engineer <NAME>.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
import configparser # implements a basic configuration language for Python programs
import json # json encoder and decoder
import os # provides a portable way of using operating system dependent functionality
import sys # system-specific parameters and functions
import tempfile # used to create temporary files and directories
import time # provides various time-related functions
from copy import deepcopy # creates a new object and recursively copies the original object elements
import baker # easy, powerful access to Python functions from the command line
import mlflow # open source platform for managing the end-to-end machine learning lifecycle
import numpy as np # the fundamental package for scientific computing with Python
import pandas as pd # pandas is a flexible and easy to use open source data analysis and manipulation tool
import psutil # used for retrieving information on running processes and system utilization
import torch # tensor library like NumPy, with strong GPU support
from logzero import logger # robust and effective logging for Python
from nets.Contrastive_Model_net import Net
from nets.generators.fresh_generators import get_generator
from utils.ranking_metrics import (mean_reciprocal_rank, mean_average_precision,
max_reciprocal_rank, min_reciprocal_rank,
max_average_precision, min_average_precision)
# get config file path
model_dir = os.path.dirname(os.path.abspath(__file__))
src_dir = os.path.dirname(model_dir)
config_filepath = os.path.join(src_dir, 'config.ini')
# instantiate config parser and read config file
config = configparser.ConfigParser()
config.read(config_filepath)
# get variables from config file
device = config['general']['device']
N_SAMPLES = int(config['sorel20mDataset']['test_n_samples'])
try:
# try getting layer sizes from config file
layer_sizes = json.loads(config['mtje']['layer_sizes'])
except json.JSONDecodeError:
# if the option is not present in the config file set layer sizes to None
layer_sizes = None
# instantiate run additional parameters dict setting values got from config file
run_additional_params = {
'layer_sizes': layer_sizes,
'dropout_p': float(config['mtje']['dropout_p']),
'activation_function': config['mtje']['activation_function'],
'normalization_function': config['mtje']['normalization_function'],
'optimizer': config['contrastiveLearning']['optimizer'],
'lr': float(config['contrastiveLearning']['lr']),
'momentum': float(config['contrastiveLearning']['momentum']),
'weight_decay': float(config['contrastiveLearning']['weight_decay']),
'hard': int(config['contrastiveLearning']['hard']),
'margin': float(config['contrastiveLearning']['margin']),
'squared': int(config['contrastiveLearning']['squared'])
}
def compute_ranking_scores(rank_per_query): # list of ranks computed by the model evaluation procedure
""" Compute ranking scores (MRR and MAP) and a bunch of interesting ranks to save to file from a list of ranks.
Args:
rank_per_query: List of ranks computed by the model evaluation procedure
Returns:
ranking scores (in a dict) and a dict of interesting ranks to save to file.
"""
# compute binarized (0/1) relevance scores
rs = [np.asarray([i == rank['ground_truth_label'] for i in rank['rank_labels']], dtype=np.dtype(int))
for rank in rank_per_query]
# compute and log MRR and MAP scores
ranking_scores = {'MRR': mean_reciprocal_rank(rs), 'MAP': mean_average_precision(rs)}
# compute a bunch of indexes for interesting queries to save in csv files as examples
max_rr, max_rr_idx = max_reciprocal_rank(rs)
min_rr, min_rr_idx = min_reciprocal_rank(rs)
max_ap, max_ap_idx = max_average_precision(rs)
min_ap, min_ap_idx = min_average_precision(rs)
# save indexes (and values) just computed to a dict
queries_indexes = {
'max_rr': {'value': max_rr, 'index': max_rr_idx},
'min_rr': {'value': min_rr, 'index': min_rr_idx},
'max_ap': {'value': max_ap, 'index': max_ap_idx},
'min_ap': {'value': min_ap, 'index': min_ap_idx}
}
# get interesting queries
ranks_to_save = {
key: {
'value': scores['value'],
'rank': rank_per_query[scores['index']]
}
for key, scores in queries_indexes.items()
}
# return computed scores and interesting queries
return ranking_scores, ranks_to_save
def normalize_results(labels,
predictions):
""" Normalize results to make them easier to be saved to file.
Args:
labels: Array-like (tensor or numpy array) object containing the ground truth labels
predictions: Array-like (tensor or numpy array) object containing the model predictions
Returns:
Dictionary containing the normalized labels and predictions tensors.
"""
# initialize the result value dict detaching and copying the labels tensor
rv = {
'label': Net.detach_and_copy_array(labels)
}
# for each prediction in the 'predictions' dict
for k, v in predictions.items():
# save into the return value dict the current model prediction tensor after having detached and copied it
rv['{}-NN_pred'.format(k)] = Net.detach_and_copy_array(v)
# return 'return value'
return rv
@baker.command
def evaluate_network(fresh_ds_path, # path of the directory where to find the fresh dataset (containing .dat files)
checkpoint_path, # path to the model checkpoint to load
training_run=0, # training run identifier
train_split_proportion=7, # train subsplit proportion value
valid_split_proportion=1, # validation subsplit proportion value
test_split_proportion=2, # test subsplit proportion value
batch_size=250, # how many samples per batch to load
rank_size=20, # size (number of samples) of the ranking to produce
knn_k_min=1, # minimum value of k to use when applying the k-nn algorithm
knn_k_max=11, # maximum value of k to use when applying the k-nn algorithm
# if provided, seed random number generation with this value (default: None, no seeding)
random_seed=None,
# how many worker (threads) the dataloader uses (default: 0 -> use multiprocessing.cpu_count())
workers=0):
""" Evaluate the model on both the family prediction task and on the family ranking task.
Args:
fresh_ds_path: Path of the directory where to find the fresh dataset (containing .dat files)
checkpoint_path: Path to the model checkpoint to load
training_run: Training run identifier (default: 0)
train_split_proportion: Train subsplit proportion value (default: 7)
valid_split_proportion: Validation subsplit proportion value (default: 1)
test_split_proportion: Test subsplit proportion value (default: 2)
batch_size: How many samples per batch to load (default: 250)
rank_size: Size (number of samples) of the ranking to produce (default: 20)
knn_k_min: Minimum value of k to use when applying the k-nn algorithm (default: 1)
knn_k_max: Maximum value of k to use when applying the k-nn algorithm (default: 11)
random_seed: If provided, seed random number generation with this value (default: None, no seeding)
workers: How many worker (threads) the dataloader uses (default: 0 -> use multiprocessing.cpu_count())
"""
# start mlflow run
with mlflow.start_run() as mlrun:
# if the split proportions are not as expected raise ValueError
if train_split_proportion <= 0 or valid_split_proportion <= 0 or test_split_proportion <= 0:
raise ValueError('train, valid and test split proportions must be positive integers.')
# the rank size must be smaller (or at most equal) to the selected batch size
if rank_size > batch_size:
raise ValueError('rank size should be smaller or equal to the batch size.')
# generate the dataset split proportions (list)
dataset_split_proportions = [train_split_proportion, valid_split_proportion, test_split_proportion]
# if workers has a value (it is not None) then convert it to int if it is > 0, otherwise set it to None
workers = workers if workers is None else int(workers) if int(workers) > 0 else None
if random_seed is not None: # if a seed was provided
logger.info(f"Setting random seed to {int(random_seed)}.")
# set the seed for generating random numbers
torch.manual_seed(int(random_seed))
logger.info('...instantiating siamese network for contrastive evaluation run n. {}'.format(training_run))
# create fresh dataset generators
train_generator, _, test_generator = get_generator(ds_root=fresh_ds_path,
splits=dataset_split_proportions,
batch_size=batch_size,
return_shas=True,
num_workers=workers,
shuffle=True) # shuffle samples
# get label to signature function from the test dataset (used to convert numerical labels to family names)
label_to_sig = test_generator.dataset.label_to_sig
# get total number of families
n_families = test_generator.dataset.n_families
# create contrastive (siamese) mtjeNet model
model = Net(feature_dimension=2381,
embedding_dimension=32,
layer_sizes=run_additional_params['layer_sizes'],
dropout_p=run_additional_params['dropout_p'],
activation_function=run_additional_params['activation_function'],
normalization_function=run_additional_params['normalization_function'])
# load model checkpoint
model.load_state_dict(torch.load(checkpoint_path))
# allocate model to selected device (CPU or GPU)
model.to(device)
logger.info('Evaluating contrastive learning model..')
# set model into evaluation mode
model.eval()
# get number of steps per epoch (# of total batches) from test generator
test_steps_per_epoch = len(test_generator)
# get number of steps per epoch (# of total batches) from train generator
train_steps_per_epoch = len(train_generator)
# create temporary directory
with tempfile.TemporaryDirectory() as tempdir:
# compute result file path
filename = os.path.join(tempdir, 'results.csv')
# create and open the results file in write mode
with open(filename, 'w') as f:
first_batch = True
ranks = []
# set current epoch start time
start_time = time.time()
# for all mini-batches of samples of the test generator
for i, (query_shas, query_features, query_labels) in enumerate(test_generator):
# get the query samples shas
query_shas = np.asarray(query_shas)
# transfer query features and labels to selected device
query_features = deepcopy(query_features).to(device)
query_labels = deepcopy(query_labels.long()).to(device)
with torch.no_grad(): # disable gradient calculation
# perform a forward pass through the network to get the query samples embeddings
query_pe_embeddings = model(query_features)
# initialize top samples arrays with null value
top_shas = None
top_labels = None
top_distances = None
predictions = {}
# for all mini-batches of data from the train generator
for j, (anchor_shas, anchor_features, anchor_labels) in enumerate(train_generator):
# get the anchor samples shas
anchor_shas = np.asarray(anchor_shas)
# transfer anchor features to selected device
anchor_features = anchor_features.to(device)
with torch.no_grad(): # disable gradient calculation
# perform a forward pass through the network to get the anchor embeddings
anchor_pe_embeddings = model(anchor_features)
# compute euclidean distances between query and anchor samples
distances = torch.cdist(query_pe_embeddings, anchor_pe_embeddings, p=2.0)
# if top distances is none
if top_distances is None:
# assign to top distances the current computed distances
top_distances = distances
# compute array of indices which sort the distances in ascending order
indices = top_distances.argsort(dim=1)
# obtain the shas of the first 'rank_size' most similar query samples
# (based on the computed indices)
top_shas = np.concatenate([np.expand_dims(
np.repeat(np.expand_dims(anchor_shas, axis=0), query_shas.shape[0], axis=0)[x, y],
axis=0)
for x, row in enumerate(indices[:, :rank_size])
for y in row]).reshape(-1, rank_size)
# obtain the labels of the first 'rank_size' most similar query samples
# (based on the computed indices)
top_labels = torch.cat([anchor_labels.repeat(query_labels.shape[0], 1)[x, y].unsqueeze(0)
for x, row in enumerate(indices[:, :rank_size])
for y in row]).view(-1, rank_size)
# obtain the distances of the first 'rank_size' most similar query samples to the current
# anchor (based on the computed indices)
top_distances = torch.cat([top_distances[x, y].unsqueeze(0)
for x, row in enumerate(indices[:, :rank_size])
for y in row]).view(-1, rank_size)
else:
# concatenate the current shas to the top shas array
top_shas = np.concatenate((top_shas, np.repeat(np.expand_dims(anchor_shas, axis=0),
top_shas.shape[0], axis=0)), axis=1)
# concatenate the current labels to the top labels tensor
top_labels = torch.cat((top_labels, anchor_labels.repeat(top_labels.size()[0], 1)), dim=1)
# concatenate the current distances to the top distances tensor
top_distances = torch.cat((top_distances, distances), dim=1)
# compute array of indices which sort the distances in ascending order
indices = top_distances.argsort(dim=1)
# obtain the labels of the first 'rank_size' most similar query samples
# (based on the computed indices)
top_shas = np.concatenate([np.expand_dims(top_shas[x, y], axis=0)
for x, row in enumerate(indices[:, :rank_size])
for y in row]).reshape(-1, rank_size)
# obtain the labels of the first 'rank_size' most similar query samples
# (based on the computed indices)
top_labels = torch.cat([top_labels[x, y].unsqueeze(0)
for x, row in enumerate(indices[:, :rank_size])
for y in row]).view(-1, rank_size)
# obtain the distances of the first 'rank_size' most similar query samples to the current
# anchor (based on the computed indices)
top_distances = torch.cat([top_distances[x, y].unsqueeze(0)
for x, row in enumerate(indices[:, :rank_size])
for y in row]).view(-1, rank_size)
# for all query samples
for k, s in enumerate(query_shas):
# save ranking
ranks.append({
'query_sha': s,
'ground_truth_label': int(query_labels[k].item()),
'ground_truth_family': label_to_sig(int(query_labels[k].item())),
'rank_shas': top_shas[k].tolist(),
'rank_labels': [int(lab.item()) for lab in top_labels[k]],
'rank_families': [label_to_sig(int(lab.item())) for lab in top_labels[k]]
})
# for all odd values of k from knn_k_min to knn_k_max (included)
for k in range(knn_k_min if knn_k_min % 2 else knn_k_min + 1, knn_k_max + 1, 2):
# get the first k labels from the top labels tensor
knn_labels = top_labels[:, :k]
# get the first k distances from the top distances tensor and raise them to the power of -2
knn_weights = torch.pow(top_distances[:, :k], -2)
# initialize per family-scores to 0
knn_scores = torch.zeros((knn_labels.shape[0], n_families)).to(device)
# for all top k labels
for idx, labs in enumerate(knn_labels):
# compute the per-family sum of distance weights
knn_scores[idx].index_add_(0, torch.tensor([int(lab.item()) for lab in labs]).to(device),
knn_weights[idx])
# save as prediction the family with the maximum score
predictions[str(k)] = torch.argmax(knn_scores, dim=1)
# compute current epoch elapsed time (in seconds)
elapsed_time = time.time() - start_time
# write on standard out the elapsed time, predicted total epoch completion time, current mean speed
# and main memory usage
sys.stdout.write('\r Contrastive learning evaluation: {}/{} '.format(i + 1, test_steps_per_epoch)
+ '[{}/{}, {:6.3f}it/s, RAM used: {:4.1f}%] '
.format(time.strftime("%H:%M:%S", time.gmtime(elapsed_time)), # show elapsed time
time.strftime("%H:%M:%S", # predict total epoch completion time
time.gmtime(test_steps_per_epoch * elapsed_time / (i + 1))),
(i + 1) / elapsed_time, # compute current mean speed (it/s)
psutil.virtual_memory().percent)) # get percentage of main memory used
# flush standard output
sys.stdout.flush()
# normalize the results
results = normalize_results(query_labels, predictions)
# store results into a pandas dataframe (indexed by the sha265 keys) and then save it as csv into
# file f (inserting the header only if this is the first batch in the loop)
| pd.DataFrame(results, index=query_shas) | pandas.DataFrame |
import os
import re
import copy
import json
import tqdm
import pprint
import sklearn
import pandas as pd
from sklearn.model_selection import train_test_split
for lib in ["emoji", "fasttext", "google_trans_new"]:
try:
exec(f"import {lib}")
except ImportError:
os.system(f"pip install {lib}")
from google_trans_new import google_translator
DATASET_PATH = "raw_data/Development Data/dev_data_article.xlsx"
TRANSLATED_PATH = "/raw_data/Translated Data/"
PROCESSED_PATH = "raw_data/Processed Data"
TRANSLATE = False
STATS_PATH = "stats.json"
TRANSLATOR = google_translator()
dataset = pd.read_excel(DATASET_PATH)
trans_dataset = copy.deepcopy(dataset).to_dict("records")
articles = list(dataset['Text'])
headlines = list(dataset['Headline'])
def lower(text):
return text.lower()
def remove_urls(text):
return text
def remove_punctuation(text):
result = ""
for letter in text:
if letter not in '''".',:`;''':
result += letter
return result
def identity(text):
return text
VALID_FILTERS = {"lower":lower, "remove_punctuation":remove_punctuation}
class ProcessText:
@classmethod
def Sequential(cls, filts):
obj = cls()
obj.filts = []
# c.valid_filts = VALID_FILTERS
for filt in filts:
obj.filts.append(obj.valid_filts.get(filt, identity))
return obj
def __init__(self):
self.filts = []
self.valid_filts = VALID_FILTERS
def add(self, filt):
self.filts.append(filt)
def run(self, text):
for filt in self.filts:
text = filt(text)
return text
text_processor = ProcessText.Sequential(["strip",
"remove_urls",
"remove_punctuation"])
stats = {"total_lines":0,
"total_headlines":0,
"max_lines":0,
"min_lines":100000,
"avg_lines":0,
"max_line_len":0,
"min_line_len":100000,
"avg_line_len":0,
"en_lines":0,
"hi_lines":0,
"eh_lines":0,
"max_headline_len":0,
"min_headline_len":100000,
"avg_headline_len":0,
"en_headlines":0,
"hi_headlines":0,
"eh_headlines":0}
try:
open('/home/atharva/lid.176.bin', "rb")
except FileNotFoundError:
os.system("wget -O /home/atharva/lid.176.bin https://dl.fbaipublicfiles.com/fasttext/supervised-models/lid.176.bin")
PRETRAINED_MODEL_PATH = '/home/atharva/lid.176.bin'
model = fasttext.load_model(PRETRAINED_MODEL_PATH)
def translate(text):
return TRANSLATOR.translate(text, lang_tgt='en').strip()
if True:
total_lines = 0
total_words = 0
for j,article in enumerate(tqdm.tqdm(articles)):
article_lines = 0
proc_article = ''
for i,para in enumerate(article.split("\n")):
for line in para.split("."):
line = line.strip().strip("\n")
if line != "":
article_lines += 1
proc_article += text_processor.run(line) + ' . '
num_words = len(line.split())
lang = model.predict([line])[0][0][0].split("__")[-1]
if lang == 'hi':
stats["hi_lines"] += 1
if TRANSLATE:
line = translate(line)
elif lang == 'en':
stats["en_lines"] += 1
else:
stats["eh_lines"] += 1
if TRANSLATE:
line = translate(line)
total_words += num_words
stats["max_line_len"] = max(num_words, stats["max_line_len"])
stats["min_line_len"] = min(num_words, stats["min_line_len"])
total_lines += article_lines
stats["max_lines"] = max(article_lines, stats["max_lines"])
stats["min_lines"] = min(article_lines, stats["min_lines"])
proc_article = proc_article.strip().strip("\n")
articles[j] = proc_article.replace("\n", "")
stats["total_lines"] = total_lines
stats["avg_line_len"] = total_words/total_lines
stats["avg_lines"] = total_lines/len(articles)
total_headlines = 0
total_headline_words = 0
for i,headline in enumerate(tqdm.tqdm(headlines)):
headlines[i] = text_processor.run(headline)
lang = model.predict([line])[0][0][0].split("__")[-1]
num_words = len(headlines[i].split())
total_headline_words += num_words
total_headlines += 1
if lang == "en":
stats["en_headlines"] += 1
elif lang == "hi":
if TRANSLATE:
headlines[i] = translate(headlines[i])
stats["hi_headlines"] += 1
else:
if TRANSLATE:
headlines[i] = translate(headlines[i])
stats["eh_headlines"] += 1
stats["max_headline_len"] = max(stats["max_headline_len"], num_words)
stats["min_headline_len"] = min(stats["min_headline_len"], num_words)
stats["total_headlines"] = total_headlines
stats["avg_headline_len"] = total_headline_words/total_headlines
pprint.pprint(stats)
json.dump(stats, open(STATS_PATH, "w"))
proc = []
for headline, article in zip(headlines, articles):
proc.append({"text":article, "summary":headline})
train, test = train_test_split(proc, test_size=0.1)
test = pd.DataFrame(test)
train = | pd.DataFrame(train) | pandas.DataFrame |
import os
import sys
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
from joblib import Memory
matplotlib.use('TkAgg')
# Auto-detect terminal width.
pd.options.display.width = None
pd.options.display.max_rows = 1000
pd.options.display.max_colwidth = 200
# Initialize a persistent memcache.
mem_hist = Memory(cachedir='./.cached_plot_hist', verbose=0)
mem_sim = Memory(cachedir='./.cached_plot_sim', verbose=0)
PRINT_BASELINE = True
PRINT_DELTA_ONLY = True
BETWEEN_START = pd.to_datetime('09:30').time()
BETWEEN_END = | pd.to_datetime('09:30:00.000001') | pandas.to_datetime |
#
# Copyright 2019 <NAME> <<EMAIL>>
#
# This file is part of Salus
# (see https://github.com/SymbioticLab/Salus).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import, print_function, division
from builtins import input
import parse_log as pl
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import os
from importlib import reload
reload(pl)
#%%
def choose(prompt, choices=None, default=0):
"""Prompt the user to make a choice.
Args:
prompt: The prompt to show
choices: Iterable of tuples of choices. Each tuple represents a choice, and is
in the form (one letter, help, value) or (one letter, help). If value is missing,
it defaults to the letter.
The default choices are [('y', 'yes', True), ('n', 'no', False)]
default: the index of the default choice. Defaults to 0
Returns:
the associated value of the choice the user has made.
"""
# Handle default arguments
if choices is None:
choices = [('y', 'yes', True), ('n', 'no', False)]
# validate arguments
if not choices:
raise ValueError('Empty choices')
if default < 0 or default >= len(choices):
raise IndexError(f'Default index should be within [0, {len(choices)}), got: {default}')
def parse_choice(ch):
if len(ch) == 2:
return ch[0].lower(), ch[1], ch[0]
elif len(ch) == 3:
return ch[0].lower(), ch[1], ch[2]
else:
raise ValueError(f'Invalid choice in choices: {tuple}')
choices = [parse_choice(c) for c in choices]
# form choices string
choices_str = '/'.join(ch[0] if idx != default else ch[0].upper()
for idx, ch in enumerate(choices))
prompt = f'{prompt} [{choices_str}]: '
def_resp = choices[default][0]
while True:
resp = input(prompt)
if not resp:
resp = def_resp
resp = resp.lower()
for ch, _, value in choices:
if resp == ch:
return value
# Invalid input, print help
print(f'Invalid response: {resp}')
print('Accepted responses are:')
for ch, h, _ in choices:
print(f'{ch} - {h}')
def confirm(prompt, default=False, yes_choice='y', no_choice='n'):
"""Prompt for user's confirmation on some operation.
Returns:
True if the user confirmed, False otherwise.
"""
return choose(prompt, choices=[(yes_choice, 'yes', True), (no_choice, 'no', False)], default=0 if default else 1)
def select_steps(df):
# count unique numbers
counts = df.groupby('step').agg({c: 'nunique' for c in ['kernel', 'op']}).reset_index()
ss = counts.query('step > 10 & kernel > 10 & op > 200')
# so the step list is
if len(ss) > 1:
# drop first iteration
return ss.step.astype(int).tolist()[1:]
else:
slist = []
# nothing we can find programmatically, let the user decide
for _, s, ker, op in counts.itertuples():
if confirm('Step {} has {} tasks, with {} kernels, select?'.format(s, op, ker)):
slist.append(s)
return slist
def only_step(steps, idx):
ss = steps.step.sort_values().unique().tolist()
if idx >= len(ss):
idx = len(ss) - 1
return steps[steps.step == ss[idx]]
def only_steps(steps, idxs):
ss = steps.step.sort_values().unique().tolist()
return steps[steps.step.isin([ss[idx] for idx in idxs])]
def unify_names(*dfs):
# make sure names map to same idx
names = pd.concat([df.name for df in dfs]).unique()
ndf = pd.DataFrame({'name': names})
ndf.index.rename('nameid', inplace=True)
ndf = ndf.reset_index()
res = [df.drop('nameid', axis=1, errors='ignore').merge(ndf) for df in dfs]
if len(res) == 1:
return res[0]
else:
return res
#
# First load log from tensorflow
#
tf_events = ['task_ready', 'task_start', 'task_done']
def load_tf(path):
logs = pl.load_file(path)
df = pd.DataFrame(l.__dict__ for l in logs)
df = df[df.type.isin(tf_events)].drop(['level','loc', 'entry_type'], axis=1)
# make sure step is int
df['step'] = df.step.astype(int)
ss = select_steps(df)
step25 = df[df.step.isin(ss)]
# discard some internal or async op: _SOURCE, _Recv, _Send
ignored = ['_Recv', '_Send']
step25 = step25[~step25.kernel.isin(ignored)]
step25 = step25[step25.op != '_SOURCE']
steptf = step25.pivot_table(values='timestamp', index=['step', 'op', 'kernel'],
columns='type', aggfunc='first').reset_index()
# add a name column
def name(row):
return '{}[{}]'.format(row['op'], row['kernel'])
steptf['name'] = steptf.apply(name, axis=1).values
# reorder
steptf = steptf[['step', 'name', 'op', 'kernel'] + tf_events]
return steptf.sort_values(by=tf_events).reset_index(drop=True)
#
# Second load log from salus
#
salus_events = [
'queued', # entering queue
'inspected', # submitted by scheduler
'prealloced', # preallocated
'running', # start running in thread pool
'afterDevCtx', # get devctx
'afterPrepInput', # after prepare input
'afterCompute', # after op->Compute
'afterClearInput', # after clear input
'afterPropOut', # after prop output
#'failed', # failed
'done', # finally
]
def load_salus(path, filter_step=True):
logs = pl.load_file(path)
df = pd.DataFrame(l.__dict__ for l in logs)
df = df[df.type == 'optracing_evt']
df = df.drop(['entry_type','level','loc', 'thread', 'type'], axis=1)
# make sure step is int
df['step'] = df.step.astype(int)
if filter_step:
ss = select_steps(df)
step25 = df[df.step.isin(ss)]
else:
step25 = df
# discard some internal or async op: _SOURCE, _Recv, _Send
ignored = ['_Recv', '_Send']
step25 = step25[~step25.kernel.isin(ignored)]
step25 = step25[step25.op != '_SOURCE']
# discard unneeded event
step25 = step25[step25.evt != 'scheduled']
# convert evt values to columns
step = step25.pivot_table(values='timestamp',
index=['step', 'op', 'kernel', 'sess'],
columns='evt', aggfunc='last').reset_index()
# add a name column
def name(row):
return '{}[{}]'.format(row['op'], row['kernel'])
step['name'] = step.apply(name, axis=1).values
# reorder
step = step[['sess', 'step', 'name', 'op', 'kernel'] + salus_events]
# sort
return step.sort_values(by=salus_events).reset_index(drop=True)
#
# Draw hlines
#
def draw_lines(ax, step, checkpoints, colors=['g', 'y', 'r'], offset=None,
labels=None, set_y=True, sort=False):
"""
step is a pd.DataFrame contains a:
timestamp, op, kernel, task_ready, task_start, task_done
"""
# sort first
if sort:
step = unify_names(step.sort_values(by=checkpoints))
# columns as unix timestamp in us
columns = [step[c].astype(np.int64) // 10**3 for c in checkpoints]
# with offset subtracted
if offset is None:
offset = np.min([np.min(col) for col in columns])
columns = [col - offset for col in columns]
if labels is None:
labels = [''] * len(colors)
for st, ed, c, l in zip(columns, columns[1:], colors, labels):
ax.hlines(y=step.nameid, xmin=st, xmax=ed, color=c, label=l)
# put name on yaxis
if set_y:
ax.set_yticks(step.nameid)
ax.set_yticklabels(step.name)
return ax, offset
def draw_lines2(ax, step, checkpoints, colors=['g', 'y', 'r'], offset=None,
labels=None, set_y=True, sort=False):
"""
step is a pd.DataFrame contains a:
timestamp, op, kernel, task_ready, task_start, task_done
"""
# sort first
if sort:
step = unify_names(step.sort_values(by=checkpoints))
# with offset subtracted
if offset is None:
offset = step[checkpoints].min().min()
columns = [step[col] - offset for col in checkpoints]
if labels is None:
labels = [''] * len(colors)
for st, ed, c, l in zip(columns, columns[1:], colors, labels):
ax.hlines(y=step.nameid, xmin=st, xmax=ed, color=c, label=l)
# put name on yaxis
if set_y:
ax.set_yticks(step.nameid)
ax.set_yticklabels(step.name)
return ax, offset
def draw_tf(ax, df, **kwargs):
draw_lines(ax, df, tf_events, colors=['g', 'r'], **kwargs)
def draw_salus(ax, df, **kwargs):
return draw_lines2(ax, df, [e for e in salus_events if e != 'failed'],
colors=plt.rcParams['axes.prop_cycle'].by_key()['color'],
labels=['Queuing', 'Prealloc', 'TPWait', 'DevCtx',
'PrepInput', 'Compute', 'ClrInput',
'PropOut', 'Misc'],
**kwargs)
sns.set_style("dark")
#plt.ioff()
#%% def main
def main():
#%%
#
# Set paths
#
model = 'alexnet_25'
logdir = 'logs/optracing/'
outputdir = '/home/peifeng/desktop/'
figsize = (40, 70)
set_y = True
#%%
#logdir = 'logs/optracing/'
#outputdir = '/home/peifeng/sync/M.S.Study/Research/salus/groupmeeting/20180403/weekend'
#figsize = None
#set_y = False
#%% Load data
steptf = load_tf(os.path.join(logdir, 'tf/{}.tf.10iter.0.output'.format(model)))
stepsalus = load_salus(os.path.join(logdir, 'salus/1/perf.output'))
twosess = load_salus(os.path.join(logdir, 'salus/2/perf.output'))
steptf = unify_names(steptf)
stepsalus = unify_names(stepsalus)
twosess = unify_names(twosess)
#%%
#
# Running one
#
fig, axs = plt.subplots(nrows=2, ncols=1, sharex=True, figsize=figsize)
draw_lines(axs[0], steptf, tf_events, colors=['g', 'r'], set_y=set_y)
axs[0].set_title('alexnet_25 on TF')
# load a few iters
draw_lines(axs[1], stepsalus, salus_events,
colors=plt.rcParams['axes.prop_cycle'].by_key()['color'],
labels=['Queuing', 'Prealloc', 'TPWait', 'DevCtx',
'PrepInput', 'Compute', 'ClrInput',
'PropOut', 'Misc'],
set_y=set_y)
axs[1].legend()
axs[1].set_title('alexnet_25 on Salus')
axs[1].set_xlabel('Normalized time (us)')
fig.tight_layout()
fig.savefig(os.path.join(outputdir, 'tfsalus.pdf'), dpi=300)
plt.close(fig)
#%%
#
# Running one with matching iter
#
#fig, axs = plt.subplots(nrows=2, ncols=1, sharex=True, figsize=figsize)
fig, axs = plt.subplots(nrows=2, ncols=1, sharex=True)
draw_lines(axs[0], only_step(steptf, 6), tf_events, colors=['g', 'r'], set_y=set_y)
axs[0].set_title('alexnet_25 on TensorFlow')
# use second normal iter
draw_lines(axs[1], only_step(stepsalus, 10), salus_events,
colors=plt.rcParams['axes.prop_cycle'].by_key()['color'],
labels=['Queuing', 'Prealloc', 'TPWait', 'DevCtx',
'PrepInput', 'Compute', 'ClrInput',
'PropOut', 'Misc'],
set_y=set_y, sort=True)
axs[1].legend(loc='lower right', bbox_to_anchor=(1,1.5), ncol=2)
axs[1].set_title('alexnet_25 on Salus')
axs[1].set_xlabel('Normalized time (us)')
axs[1].set_ylabel('Tasks')
axs[1].set_xlim(0, 40000)
fig.tight_layout()
fig.savefig(os.path.join(outputdir, 'tfsalus-later.pdf'), dpi=300)
#plt.close(fig)
#%%
#
# Running two
#
def split_sess(twosess):
sessA, sessB = twosess.sess.unique()
alexA = twosess[twosess.sess == sessA]
alexB = twosess[twosess.sess == sessB]
return alexA, alexB, sessA, sessB
fig, axs = plt.subplots(nrows=2, ncols=1, sharex=True, figsize=figsize)
alexA, alexB, sessA, sessB = split_sess(twosess)
offset=None
_, offset = draw_lines(axs[0], alexA, salus_events,
colors=plt.rcParams['axes.prop_cycle'].by_key()['color'],
labels=['Queuing', 'Prealloc', 'TPWait', 'DevCtx', 'PrepInput',
'Compute', 'ClrInput', 'PropOut', 'Misc'],
offset=offset, set_y=set_y)
_, offset = draw_lines(axs[1], alexB, salus_events,
colors=plt.rcParams['axes.prop_cycle'].by_key()['color'],
labels=['Queuing', 'Prealloc', 'TPWait', 'DevCtx', 'PrepInput',
'Compute', 'ClrInput', 'PropOut', 'Misc'],
offset=offset, set_y=set_y)
axs[0].set_title('alexnet_25 on Salus (Instance A: {})'.format(sessA))
axs[1].set_title('alexnet_25 on Salus (Instance B: {})'.format(sessB))
axs[1].set_xlabel('Normalized time (us)')
fig.tight_layout()
fig.savefig(os.path.join(outputdir, 'salusab.pdf'), dpi=300)
plt.close(fig)
#%% CDF of task length
def cdf(X, ax=None, **kws):
if ax is None:
_, ax = plt.subplots()
n = np.arange(1,len(X)+1) / np.float(len(X))
Xs = np.sort(X)
ax.step(Xs, n, **kws)
ax.set_ylim(0, 1)
return ax
import os
logdir = 'logs/osdi18/cc/exp18'
model = 'alexnet_25'
steptf = load_tf(os.path.join(logdir, 'tf/{}.tf.10iter.0.output'.format(model)))
stepsalus = load_salus(os.path.join(logdir, 'salus/1/perf.output'))
steptf = unify_names(steptf)
stepsalus = unify_names(stepsalus)
tflength = steptf[tf_events[-1]] - steptf[tf_events[0]]
saluslength = stepsalus[salus_events[-1]] - stepsalus[salus_events[0]]
tflength = tflength / | pd.Timedelta(microseconds=1) | pandas.Timedelta |
"""
Misc tools for implementing data structures
"""
import re
import collections
import numbers
from datetime import datetime, timedelta
from functools import partial
import numpy as np
import pandas as pd
import pandas.algos as algos
import pandas.lib as lib
import pandas.tslib as tslib
from pandas import compat
from pandas.compat import (long, zip, map, string_types,
iteritems)
from pandas.types import api as gt
from pandas.types.api import * # noqa
from pandas.core.config import get_option
class PandasError(Exception):
pass
class PerformanceWarning(Warning):
pass
class SettingWithCopyError(ValueError):
pass
class SettingWithCopyWarning(Warning):
pass
class AmbiguousIndexError(PandasError, KeyError):
pass
class UnsupportedFunctionCall(ValueError):
pass
class AbstractMethodError(NotImplementedError):
"""Raise this error instead of NotImplementedError for abstract methods
while keeping compatibility with Python 2 and Python 3.
"""
def __init__(self, class_instance):
self.class_instance = class_instance
def __str__(self):
return ("This method must be defined in the concrete class of %s" %
self.class_instance.__class__.__name__)
_POSSIBLY_CAST_DTYPES = set([np.dtype(t).name
for t in ['O', 'int8', 'uint8', 'int16', 'uint16',
'int32', 'uint32', 'int64', 'uint64']])
_NS_DTYPE = np.dtype('M8[ns]')
_TD_DTYPE = np.dtype('m8[ns]')
_INT64_DTYPE = np.dtype(np.int64)
_DATELIKE_DTYPES = set([np.dtype(t)
for t in ['M8[ns]', '<M8[ns]', '>M8[ns]',
'm8[ns]', '<m8[ns]', '>m8[ns]']])
_int8_max = np.iinfo(np.int8).max
_int16_max = np.iinfo(np.int16).max
_int32_max = np.iinfo(np.int32).max
_int64_max = np.iinfo(np.int64).max
def isnull(obj):
"""Detect missing values (NaN in numeric arrays, None/NaN in object arrays)
Parameters
----------
arr : ndarray or object value
Object to check for null-ness
Returns
-------
isnulled : array-like of bool or bool
Array or bool indicating whether an object is null or if an array is
given which of the element is null.
See also
--------
pandas.notnull: boolean inverse of pandas.isnull
"""
return _isnull(obj)
def _isnull_new(obj):
if lib.isscalar(obj):
return lib.checknull(obj)
# hack (for now) because MI registers as ndarray
elif isinstance(obj, pd.MultiIndex):
raise NotImplementedError("isnull is not defined for MultiIndex")
elif isinstance(obj, (gt.ABCSeries, np.ndarray, pd.Index)):
return _isnull_ndarraylike(obj)
elif isinstance(obj, gt.ABCGeneric):
return obj._constructor(obj._data.isnull(func=isnull))
elif isinstance(obj, list) or hasattr(obj, '__array__'):
return _isnull_ndarraylike(np.asarray(obj))
else:
return obj is None
def _isnull_old(obj):
"""Detect missing values. Treat None, NaN, INF, -INF as null.
Parameters
----------
arr: ndarray or object value
Returns
-------
boolean ndarray or boolean
"""
if lib.isscalar(obj):
return | lib.checknull_old(obj) | pandas.lib.checknull_old |
#j Import Dependencies
import requests
import pandas as pd
import matplot.lib.pyplot as plt
import hvplot.panda
import plotly.express as px
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
## Create a dataframe
### Import csv data and use Pandas to create a dataframe that consists ONLY of crypto being traded
data_path = open(".Resources/crypto_data.csv")
df = pd.read_csv(data_path, index_col=0)
active_crypto_df = df[df.IsTrading.eq(True)]
df_crypto.shape
### Alter dataframe to remove cryptos with working algo; remove is trading column; Remove rows with Null value; Remove cryptos that have no mining; Remove N/As;
pd.isna(active_crypto_df['Algorithm']) # Remove anything that doesn't have algos
active_crypto_df = active_crypto_df(['IsTrading'], axis = 1) # Removing IsTrading column
active_crypto_df = active_crypto_df.dropna(how='any', axis=0) # Dropping rows with null values
active_crypto_df = active_crypto_df[active_crypto_df.TotalCoinsMined > 0] # Removing rows of crypto that have less than 0 mined coins
active_crypto_df = active_crypto_df[active_crypto_df!='N/A']
### Move CoinName column into its own DF and then drop the CoinName column from the Primary DF
Cryptocurrency_DF = active_crypto_df.filter(['CoinName'], axis=1)
active_crypto_df = active_crypto_df.drop(['CoinName'], axis=1)
### Create dummy values for text features, drop Algo and ProofType Column, and Standardize the data
foo_crypto = pd.get_dummies(active_crypto_df['Algorithm'])
foo_dummy = pd.get_dummies(active_crypto_df['ProofType'])
foo_combined = pd.concat([crypto, dummy], axis=1)
df = active_crypto_df.merge(combined, left_index=True, right_index=True)
df = df.drop(['Algorithm', 'ProofType'], axis=1)
scale_df = StandardScaler().fit_transform(df)
## Clean data for PCA
### Reduce Dimensions for better Fitted Data
pca = PCA(n_components=3)
PCA_data = pca.fit_transform(df_scaled)
PCA_df = | pd.DataFrame(data=PCA_data, columns=['PC1', 'PC2', 'PC3'], index=Cryptocurrency_DF.index) | pandas.DataFrame |
import numpy as np
import pandas as pd
import joblib, os
class dataset_creator():
def __init__(self, project, data, njobs=1):
self.data = data
self.dates_ts = self.check_dates(data.index)
self.project_name= project['_id']
self.static_data = project['static_data']
self.path_nwp_project = self.static_data['pathnwp']
self.areas = self.static_data['areas']
self.nwp_model = self.static_data['NWP_model']
self.njobs = njobs
if self.static_data['type'] == 'pv':
self.variables = ['Cloud', 'Flux', 'Temperature']
elif self.static_data['type'] == 'wind':
self.variables = ['WS', 'WD']
else:
self.variables = []
def check_dates(self, dates):
start_date = pd.to_datetime(dates[0].strftime('%d%m%y'), format='%d%m%y')
end_date = pd.to_datetime(dates[-1].strftime('%d%m%y'), format='%d%m%y')
dates = pd.date_range(start_date, end_date)
return dates
def stack_2d(self, X, sample):
if len(sample.shape)==3:
if X.shape[0] == 0:
X = sample
elif len(X.shape) == 3:
X = np.stack((X, sample))
else:
X = np.vstack((X, sample[np.newaxis, :, :, :]))
elif len(sample.shape)==2:
if X.shape[0] == 0:
X = sample
elif len(X.shape) == 2:
X = np.stack((X, sample))
else:
X = np.vstack((X, sample[np.newaxis, :, :]))
return X
def get_3d_dataset(self):
X = np.array([])
data_var = dict()
for var in self.variables:
if var in {'WS', 'Flux'}:
data_var[var+'_prev'] = X
data_var[var] = X
data_var[var+'_next'] = X
else:
data_var[var] = X
data_var['dates'] = X
for t in self.dates_ts:
nwps = joblib.load(
os.path.join(self.path_nwp_project, self.nwp_model + '_' + t.strftime('%d%m%y') + '.pickle'))
pdates = pd.date_range(t + pd.DateOffset(hours=25), t + | pd.DateOffset(hours=48) | pandas.DateOffset |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scheduler.GOBI import GOBIScheduler
plt.style.use(['science'])
plt.rcParams["text.usetex"] = False
class Stats():
def __init__(self, Environment, WorkloadModel, Datacenter, Scheduler):
self.env = Environment
self.env.stats = self
self.workload = WorkloadModel
self.datacenter = Datacenter
self.scheduler = Scheduler
self.simulated_scheduler = GOBIScheduler('energy_latency_'+str(self.datacenter.num_hosts))
self.simulated_scheduler.env = self.env
self.initStats()
def initStats(self):
self.hostinfo = []
self.workloadinfo = []
self.activecontainerinfo = []
self.allcontainerinfo = []
self.metrics = []
self.schedulerinfo = []
def saveHostInfo(self):
hostinfo = dict()
hostinfo['interval'] = self.env.interval
hostinfo['cpu'] = [host.getCPU() for host in self.env.hostlist]
hostinfo['numcontainers'] = [len(self.env.getContainersOfHost(i)) for i,host in enumerate(self.env.hostlist)]
hostinfo['power'] = [host.getPower() for host in self.env.hostlist]
hostinfo['baseips'] = [host.getBaseIPS() for host in self.env.hostlist]
hostinfo['ipsavailable'] = [host.getIPSAvailable() for host in self.env.hostlist]
hostinfo['ipscap'] = [host.ipsCap for host in self.env.hostlist]
hostinfo['apparentips'] = [host.getApparentIPS() for host in self.env.hostlist]
hostinfo['ram'] = [host.getCurrentRAM() for host in self.env.hostlist]
hostinfo['ramavailable'] = [host.getRAMAvailable() for host in self.env.hostlist]
hostinfo['disk'] = [host.getCurrentDisk() for host in self.env.hostlist]
hostinfo['diskavailable'] = [host.getDiskAvailable() for host in self.env.hostlist]
self.hostinfo.append(hostinfo)
def saveWorkloadInfo(self, deployed, migrations):
workloadinfo = dict()
workloadinfo['interval'] = self.env.interval
workloadinfo['totalcontainers'] = len(self.workload.createdContainers)
if self.workloadinfo:
workloadinfo['newcontainers'] = workloadinfo['totalcontainers'] - self.workloadinfo[-1]['totalcontainers']
else:
workloadinfo['newcontainers'] = workloadinfo['totalcontainers']
workloadinfo['deployed'] = len(deployed)
workloadinfo['migrations'] = len(migrations)
workloadinfo['inqueue'] = len(self.workload.getUndeployedContainers())
self.workloadinfo.append(workloadinfo)
def saveContainerInfo(self):
containerinfo = dict()
containerinfo['interval'] = self.env.interval
containerinfo['activecontainers'] = self.env.getNumActiveContainers()
containerinfo['ips'] = [(c.getBaseIPS() if c else 0) for c in self.env.containerlist]
containerinfo['apparentips'] = [(c.getApparentIPS() if c else 0) for c in self.env.containerlist]
containerinfo['ram'] = [(c.getRAM() if c else 0) for c in self.env.containerlist]
containerinfo['disk'] = [(c.getDisk() if c else 0) for c in self.env.containerlist]
containerinfo['creationids'] = [(c.creationID if c else -1) for c in self.env.containerlist]
containerinfo['hostalloc'] = [(c.getHostID() if c else -1) for c in self.env.containerlist]
containerinfo['active'] = [(c.active if c else False) for c in self.env.containerlist]
self.activecontainerinfo.append(containerinfo)
def saveAllContainerInfo(self):
containerinfo = dict()
allCreatedContainers = [self.env.getContainerByCID(cid) for cid in list(np.where(self.workload.deployedContainers)[0])]
containerinfo['interval'] = self.env.interval
if self.datacenter.__class__.__name__ == 'Datacenter':
containerinfo['application'] = [self.env.getContainerByCID(cid).application for cid in list(np.where(self.workload.deployedContainers)[0])]
containerinfo['ips'] = [(c.getBaseIPS() if c.active else 0) for c in allCreatedContainers]
containerinfo['create'] = [(c.createAt) for c in allCreatedContainers]
containerinfo['start'] = [(c.startAt) for c in allCreatedContainers]
containerinfo['destroy'] = [(c.destroyAt) for c in allCreatedContainers]
containerinfo['apparentips'] = [(c.getApparentIPS() if c.active else 0) for c in allCreatedContainers]
containerinfo['ram'] = [(c.getRAM() if c.active else 0) for c in allCreatedContainers]
containerinfo['disk'] = [(c.getDisk() if c.active else 0) for c in allCreatedContainers]
containerinfo['hostalloc'] = [(c.getHostID() if c.active else -1) for c in allCreatedContainers]
containerinfo['active'] = [(c.active) for c in allCreatedContainers]
self.allcontainerinfo.append(containerinfo)
def saveMetrics(self, destroyed, migrations):
metrics = dict()
metrics['interval'] = self.env.interval
metrics['numdestroyed'] = len(destroyed)
metrics['nummigrations'] = len(migrations)
metrics['energy'] = [host.getPower()*self.env.intervaltime for host in self.env.hostlist]
metrics['energytotalinterval'] = np.sum(metrics['energy'])
metrics['energypercontainerinterval'] = np.sum(metrics['energy'])/self.env.getNumActiveContainers()
metrics['responsetime'] = [c.totalExecTime + c.totalMigrationTime for c in destroyed]
metrics['avgresponsetime'] = np.average(metrics['responsetime']) if len(destroyed) > 0 else 0
metrics['migrationtime'] = [c.totalMigrationTime for c in destroyed]
metrics['avgmigrationtime'] = np.average(metrics['migrationtime']) if len(destroyed) > 0 else 0
metrics['slaviolations'] = len(np.where([c.destroyAt > c.sla for c in destroyed]))
metrics['slaviolationspercentage'] = metrics['slaviolations'] * 100.0 / len(destroyed) if len(destroyed) > 0 else 0
metrics['waittime'] = [c.startAt - c.createAt for c in destroyed]
metrics['energytotalinterval_pred'], metrics['avgresponsetime_pred'] = self.runSimulationGOBI()
self.metrics.append(metrics)
def saveSchedulerInfo(self, selectedcontainers, decision, schedulingtime):
schedulerinfo = dict()
schedulerinfo['interval'] = self.env.interval
schedulerinfo['selection'] = selectedcontainers
schedulerinfo['decision'] = decision
schedulerinfo['schedule'] = [(c.id, c.getHostID()) if c else (None, None) for c in self.env.containerlist]
schedulerinfo['schedulingtime'] = schedulingtime
if self.datacenter.__class__.__name__ == 'Datacenter':
schedulerinfo['migrationTime'] = self.env.intervalAllocTimings[-1]
self.schedulerinfo.append(schedulerinfo)
def saveStats(self, deployed, migrations, destroyed, selectedcontainers, decision, schedulingtime):
self.saveHostInfo()
self.saveWorkloadInfo(deployed, migrations)
self.saveContainerInfo()
self.saveAllContainerInfo()
self.saveMetrics(destroyed, migrations)
self.saveSchedulerInfo(selectedcontainers, decision, schedulingtime)
def runSimpleSimulation(self, decision):
host_alloc = []; container_alloc = [-1] * len(self.env.hostlist)
for i in range(len(self.env.hostlist)):
host_alloc.append([])
for c in self.env.containerlist:
if c and c.getHostID() != -1:
host_alloc[c.getHostID()].append(c.id)
container_alloc[c.id] = c.getHostID()
decision = self.simulated_scheduler.filter_placement(decision)
for cid, hid in decision:
if self.env.getPlacementPossible(cid, hid) and container_alloc[cid] != -1:
host_alloc[container_alloc[cid]].remove(cid)
host_alloc[hid].append(cid)
energytotalinterval_pred = 0
for hid, cids in enumerate(host_alloc):
ips = 0
for cid in cids: ips += self.env.containerlist[cid].getApparentIPS()
energytotalinterval_pred += self.env.hostlist[hid].getPowerFromIPS(ips)
return energytotalinterval_pred*self.env.intervaltime, max(0, np.mean([metric_d['avgresponsetime'] for metric_d in self.metrics[-5:]]))
def runSimulationGOBI(self):
host_alloc = []; container_alloc = [-1] * len(self.env.hostlist)
for i in range(len(self.env.hostlist)):
host_alloc.append([])
for c in self.env.containerlist:
if c and c.getHostID() != -1:
host_alloc[c.getHostID()].append(c.id)
container_alloc[c.id] = c.getHostID()
selected = self.simulated_scheduler.selection()
decision = self.simulated_scheduler.filter_placement(self.simulated_scheduler.placement(selected))
for cid, hid in decision:
if self.env.getPlacementPossible(cid, hid) and container_alloc[cid] != -1:
host_alloc[container_alloc[cid]].remove(cid)
host_alloc[hid].append(cid)
energytotalinterval_pred = 0
for hid, cids in enumerate(host_alloc):
ips = 0
for cid in cids: ips += self.env.containerlist[cid].getApparentIPS()
energytotalinterval_pred += self.env.hostlist[hid].getPowerFromIPS(ips)
return energytotalinterval_pred*self.env.intervaltime, max(0, np.mean([metric_d['avgresponsetime'] for metric_d in self.metrics[-5:]]))
########################################################################################################
def generateGraphsWithInterval(self, dirname, listinfo, obj, metric, metric2=None):
fig, axes = plt.subplots(len(listinfo[0][metric]), 1, sharex=True, figsize=(4, 0.5*len(listinfo[0][metric])))
title = obj + '_' + metric + '_with_interval'
totalIntervals = len(listinfo)
x = list(range(totalIntervals))
metric_with_interval = []; metric2_with_interval = []
ylimit = 0; ylimit2 = 0
for hostID in range(len(listinfo[0][metric])):
metric_with_interval.append([listinfo[interval][metric][hostID] for interval in range(totalIntervals)])
ylimit = max(ylimit, max(metric_with_interval[-1]))
if metric2:
metric2_with_interval.append([listinfo[interval][metric2][hostID] for interval in range(totalIntervals)])
ylimit2 = max(ylimit2, max(metric2_with_interval[-1]))
for hostID in range(len(listinfo[0][metric])):
axes[hostID].set_ylim(0, max(ylimit, ylimit2))
axes[hostID].plot(x, metric_with_interval[hostID])
if metric2:
axes[hostID].plot(x, metric2_with_interval[hostID])
axes[hostID].set_ylabel(obj[0].capitalize()+" "+str(hostID))
axes[hostID].grid(b=True, which='both', color='#eeeeee', linestyle='-')
plt.tight_layout(pad=0)
plt.savefig(dirname + '/' + title + '.pdf')
def generateMetricsWithInterval(self, dirname):
fig, axes = plt.subplots(9, 1, sharex=True, figsize=(4, 5))
x = list(range(len(self.metrics)))
res = {}
for i,metric in enumerate(['numdestroyed', 'nummigrations', 'energytotalinterval', 'avgresponsetime',\
'avgmigrationtime', 'slaviolations', 'slaviolationspercentage', 'waittime', 'energypercontainerinterval']):
metric_with_interval = [self.metrics[i][metric] for i in range(len(self.metrics))] if metric != 'waittime' else \
[sum(self.metrics[i][metric]) for i in range(len(self.metrics))]
axes[i].plot(x, metric_with_interval)
axes[i].set_ylabel(metric, fontsize=5)
axes[i].grid(b=True, which='both', color='#eeeeee', linestyle='-')
res[metric] = sum(metric_with_interval)
print("Summation ", metric, " = ", res[metric])
print('Average energy (sum energy interval / sum numdestroyed) = ', res['energytotalinterval']/res['numdestroyed'])
plt.tight_layout(pad=0)
plt.savefig(dirname + '/' + 'Metrics' + '.pdf')
def generateWorkloadWithInterval(self, dirname):
fig, axes = plt.subplots(5, 1, sharex=True, figsize=(4, 5))
x = list(range(len(self.workloadinfo)))
for i, metric in enumerate(['totalcontainers', 'newcontainers', 'deployed', 'migrations', 'inqueue']):
metric_with_interval = [self.workloadinfo[i][metric] for i in range(len(self.workloadinfo))]
axes[i].plot(x, metric_with_interval)
axes[i].set_ylabel(metric)
axes[i].grid(b=True, which='both', color='#eeeeee', linestyle='-')
plt.tight_layout(pad=0)
plt.savefig(dirname + '/' + 'Workload' + '.pdf')
########################################################################################################
def generateCompleteDataset(self, dirname, data, name):
title = name + '_with_interval'
metric_with_interval = []
headers = list(data[0].keys())
for datum in data:
metric_with_interval.append([datum[value] for value in datum.keys()])
df = pd.DataFrame(metric_with_interval, columns=headers)
df.to_csv(dirname + '/' + title + '.csv', index=False)
def generateDatasetWithInterval(self, dirname, metric, objfunc, metric2=None, objfunc2=None):
title = metric + '_' + (metric2 + '_' if metric2 else "") + (objfunc + '_' if objfunc else "") + (objfunc2 + '_' if objfunc2 else "") + 'with_interval'
totalIntervals = len(self.hostinfo)
metric_with_interval = []; metric2_with_interval = [] # metric1 is of host and metric2 is of containers
host_alloc_with_interval = []; objfunc2_with_interval = []
objfunc_with_interval = []
for interval in range(totalIntervals-1):
metric_with_interval.append([self.hostinfo[interval][metric][hostID] for hostID in range(len(self.hostinfo[0][metric]))])
host_alloc_with_interval.append([self.activecontainerinfo[interval]['hostalloc'][cID] for cID in range(len(self.activecontainerinfo[0]['hostalloc']))])
objfunc_with_interval.append(self.metrics[interval+1][objfunc])
if metric2:
metric2_with_interval.append(self.activecontainerinfo[interval][metric2])
if objfunc2:
objfunc2_with_interval.append(self.metrics[interval+1][objfunc2])
df = pd.DataFrame(metric_with_interval)
if metric2: df = pd.concat([df, pd.DataFrame(metric2_with_interval)], axis=1)
df = pd.concat([df, pd.DataFrame(host_alloc_with_interval)], axis=1)
df = pd.concat([df, pd.DataFrame(objfunc_with_interval)], axis=1)
if objfunc2: df = pd.concat([df, pd.DataFrame(objfunc2_with_interval)], axis=1)
df.to_csv(dirname + '/' + title + '.csv' , header=False, index=False)
def generateDatasetWithInterval2(self, dirname, metric, metric2, metric3, metric4, objfunc, objfunc2):
title = metric + '_' + metric2 + '_' + metric3 + '_' + metric4 + '_' +objfunc + '_' + objfunc2 + '_' + 'with_interval'
totalIntervals = len(self.hostinfo)
metric_with_interval = []; metric2_with_interval = []
metric3_with_interval = []; metric4_with_interval = []
host_alloc_with_interval = []; objfunc2_with_interval = []
objfunc_with_interval = []
for interval in range(totalIntervals-1):
metric_with_interval.append([self.hostinfo[interval][metric][hostID] for hostID in range(len(self.hostinfo[0][metric]))])
host_alloc_with_interval.append([self.activecontainerinfo[interval]['hostalloc'][cID] for cID in range(len(self.activecontainerinfo[0]['hostalloc']))])
objfunc_with_interval.append(self.metrics[interval+1][objfunc])
metric2_with_interval.append(self.activecontainerinfo[interval][metric2])
metric3_with_interval.append(self.metrics[interval][metric3])
metric4_with_interval.append(self.metrics[interval][metric4])
objfunc2_with_interval.append(self.metrics[interval+1][objfunc2])
df = | pd.DataFrame(metric_with_interval) | pandas.DataFrame |
import spacy
import json
import numpy as np
import itertools
import multiprocessing as mp
from pandas import pandas
from collections import Counter
from keras.preprocessing.text import Tokenizer
from keras.utils import Sequence
from keras.preprocessing.sequence import pad_sequences
from utils import preprocess
from utils import parse_content_line
dataset_path = [
"dataset/computers/train/computers_train_xlarge.json",
"dataset/computers/test/computers_gs.json",
]
cores = mp.cpu_count()
preprocess_method = "nltk"
attrs = ["title"]
attributes = [attr + "_left" for attr in attrs] + [attr + "_right" for attr in attrs]
print("* LOADING DATASET")
dataset = []
for path in dataset_path:
with open(path, "r") as f:
for line in f:
dataset.append(parse_content_line(line, attributes, label=False))
dataset = np.concatenate(dataset, axis=0).astype(object)
print("* DONE")
sentences = list(itertools.chain(*dataset))
cores = mp.cpu_count() # Count the number of cores in a computer
print("* PREPROCESS")
# Preprocess text
if preprocess_method == "spacy":
# Load spacy for tokenizing text
nlp = spacy.load("en_core_web_sm", disable=["ner", "parser"])
txt = [
preprocess(doc, method=preprocess_method)
for doc in nlp.pipe(sentences, batch_size=5000, n_threads=cores)
]
elif preprocess_method == "nltk":
with mp.Pool(processes=cores) as pool:
for attr in range(len(attributes)):
txt = pool.map(preprocess, sentences)
print("* DONE")
# Remove duplicates
cleaned_sentences = | pandas.DataFrame({"sentences": txt}) | pandas.pandas.DataFrame |
"""
Classes that represent a collection of points/structures that will define a labelmap or similar for image analysis purposes.
Currently the parent object is GeometryTopologyData, that can contain objects of type Point and/or BoundingBox.
The structure of the object is defined in the GeometryTopologyData.xsd schema.
Created on Apr 6, 2015
@author: <NAME>
"""
import xml.etree.ElementTree as et
import os
import platform
import time
import numpy as np
import warnings
class GeometryTopologyData(object):
# Coordinate System Constants
UNKNOWN = 0
IJK = 1
RAS = 2
LPS = 3
def __init__(self):
self.coordinate_system = self.UNKNOWN
self.lps_to_ijk_transformation_matrix = None # Transformation matrix to go from LPS to IJK (in the shape of a 4x4 list)
self.__lps_to_ijk_transformation_matrix_array__ = None # Same matrix in a numpy array
self.origin = None # Volume origin
self.spacing = None # Volume spacing
self.dimensions = None # Volume Dimensions
self.points = [] # List of Point objects
self.bounding_boxes = [] # List of BoundingBox objects
self.__seed_id__ = 1 # Seed. The structures added with "add_point", etc. will have an id = seed_id + 1
self.__print_separator__ = " " # Each level of the xml will be "tabulated" this number of spaces
@property
def seed_id(self):
return self.__seed_id__
@seed_id.setter
def seed_id(self, value):
warnings.warn("This property should not be set manually. Use with caution")
self.__seed_id__ = value
@property
def lps_to_ijk_transformation_matrix_array(self):
""" LPS_IJK transformation matrix in a numpy format
"""
if self.lps_to_ijk_transformation_matrix is None:
return None
if self.__lps_to_ijk_transformation_matrix_array__ is None:
self.__lps_to_ijk_transformation_matrix_array__ = np.array(self.lps_to_ijk_transformation_matrix,
dtype=np.float)
return self.__lps_to_ijk_transformation_matrix_array__
def __str__(self):
"""
Print a nicely formatted XML with the current content of the object
"""
return self.to_xml()
def add_point(self, point, fill_auto_fields=True, timestamp=None):
""" Add a new Point to the structure
:param point: Point object
:param fill_auto_fields: fill automatically UserName, MachineName, etc.
:param timestamp: optional timestamp to be set in the object
"""
self.points.append(point)
if fill_auto_fields:
self.fill_auto_fields(point)
if timestamp:
point.timestamp = timestamp
def add_bounding_box(self, bounding_box, fill_auto_fields=True, timestamp=None):
""" Add a new BoundingBox to the structure
:param bounding_box: BoundingBox object
:param fill_auto_fields: fill automatically UserName, MachineName, etc.
:param timestamp: optional timestamp to be set in the object
"""
self.bounding_boxes.append(bounding_box)
if fill_auto_fields:
self.fill_auto_fields(bounding_box)
if timestamp:
bounding_box.timestamp = timestamp
def fill_auto_fields(self, structure):
""" Fill "auto" fields like timestamp, username, etc, unless there is already a specified value
The id will be the current seed_id
@param structure: object whose fields will be filled
"""
if structure.__id__ == 0:
# Use the current seed to set the structure id
structure.__id__ = self.__seed_id__
# Update the seed
self.__seed_id__ += 1
if not structure.timestamp:
structure.timestamp = GeometryTopologyData.get_timestamp()
if not structure.user_name:
structure.user_name = os.path.split(os.path.expanduser('~'))[-1]
if not structure.machine_name:
structure.machine_name = platform.node()
def update_seed(self):
"""
Update the seed_id field to the maximum id found + 1
"""
id = 0
for p in self.points:
id = max(id, p.id)
for bb in self.bounding_boxes:
id = max(id, bb.id)
self.__seed_id__ = id + 1
@staticmethod
def get_timestamp():
""" Get a timestamp of the current date in the preferred format
@return:
"""
return time.strftime('%Y-%m-%d %H:%M:%S')
def to_xml(self):
"""
Generate the XML string representation of this object.
It doesn't use any special python module by default to keep compatibility with Slicer
Returns:
XML string representation of the object
"""
header = '<?xml version="1.0" encoding="UTF-8"?>\r\n'
output = header + "<GeometryTopologyData>\r\n"
output += ("{0}<CoordinateSystem>{1}</CoordinateSystem>\r\n".format(self.__print_separator__,
self.__coordinate_system_to_str__(self.coordinate_system)))
if self.lps_to_ijk_transformation_matrix is not None:
output += self.__write_transformation_matrix__(self.lps_to_ijk_transformation_matrix)
if self.spacing is not None:
output += "{0}<Spacing>\r\n{1}{0}</Spacing>\r\n".format(self.__print_separator__,
GeometryTopologyData.to_xml_vector(
self.spacing, separator=self.__print_separator__,
level=2)
)
if self.origin is not None:
output += "{0}<Origin>\r\n{1}{0}</Origin>\r\n".format(self.__print_separator__,
GeometryTopologyData.to_xml_vector(
self.origin, separator=self.__print_separator__,
level=2)
)
if self.dimensions is not None:
output += "{0}<Dimensions>\r\n{1}{0}</Dimensions>\r\n".format(self.__print_separator__,
GeometryTopologyData.to_xml_vector(
self.dimensions, separator=self.__print_separator__,
level=2)
)
# Concatenate points (sort first)
self.points.sort(key=lambda p: p.__id__)
points = "".join([i.to_xml() for i in self.points])
# Concatenate bounding boxes
bounding_boxes = "".join([i.to_xml() for i in self.bounding_boxes])
# Final result
s = output + points + bounding_boxes + "</GeometryTopologyData>\r\n"
return s
def to_xml_file(self, xml_file_path):
"""
Save this object to an xml file
Args:
xml_file_path: file path
pretty_print: write the xml in a nice format (requires lxml)
"""
s = self.to_xml()
with open(xml_file_path, "w+b") as f:
f.write(s)
@staticmethod
def from_xml_file(xml_file_path):
""" Get a GeometryTopologyObject from a file
@param xml_file_path: file path
@return: GeometryTopologyData object
"""
with open(xml_file_path, 'r+b') as f:
xml = f.read()
return GeometryTopologyData.from_xml(xml)
@staticmethod
def from_xml(xml):
""" Build a GeometryTopologyData object from a xml string.
All the coordinates will be float.
remark: Use the ElementTree instead of lxml module to be compatible with Slicer
:param xml: xml string
:return: new GeometryTopologyData object
"""
root = et.fromstring(xml)
geometry_topology = GeometryTopologyData()
# NumDimensions. DEPRECATED
# node = root.find("NumDimensions")
# if node is not None:
# geometry_topology.__num_dimensions__ = int(node.text)
# Coordinate System
node = root.find("CoordinateSystem")
if node is not None:
geometry_topology.coordinate_system = geometry_topology.__coordinate_system_from_str__(node.text)
geometry_topology.lps_to_ijk_transformation_matrix = geometry_topology.__read_transformation_matrix__(root)
node = root.find("Spacing")
if node is not None:
val = []
for node_val in node.findall("value"):
val.append(float(node_val.text))
geometry_topology.spacing = np.array(val)
node = root.find("Origin")
if node is not None:
val = []
for node_val in node.findall("value"):
val.append(float(node_val.text))
geometry_topology.origin = np.array(val)
node = root.find("Dimensions")
if node is not None:
val = []
for node_val in node.findall("value"):
val.append(float(node_val.text))
geometry_topology.dimensions = np.array(val)
# Points
for xml_point_node in root.findall("Point"):
point = Point.from_xml_node(xml_point_node)
geometry_topology.add_point(point, fill_auto_fields=False)
# BoundingBoxes
for xml_bb_node in root.findall("BoundingBox"):
bb = BoundingBox.from_xml_node(xml_bb_node)
geometry_topology.add_bounding_box(BoundingBox.from_xml_node(xml_bb_node), fill_auto_fields=False)
# Set the new seed so that every point (or bounding box) added with "add_point" has a bigger id
geometry_topology.update_seed()
return geometry_topology
def get_hashtable(self):
"""
Return a "hashtable" that will be a dictionary of hash:structure for every point or
bounding box present in the structure
"""
hash = {}
for p in self.points:
hash[p.get_hash()] = p
for bb in self.bounding_boxes:
hash[bb.get_hash()] = bb
return hash
def convert_coordinates_to_array(self, type_=np.float32):
"""
Convert the coordinates of all the Points/Bounding_Boxes to numpy arrays of the specific type (default: float32)
Args:
type_: type for the conversion (default: float32)
"""
for p in self.points:
p.convert_to_array(type_)
for bb in self.bounding_boxes:
bb.convert_to_array(type_)
def coordinate_system_str(self):
"""
Return the coordinate system in text ("LPS", "RAS", "IJK", "UNKNOWN")
Returns: string
"""
if self.coordinate_system == self.IJK:
return "IJK"
if self.coordinate_system == self.RAS:
return "RAS"
if self.coordinate_system == self.LPS:
return "LPS"
return "UNKNOWN"
def export_to_dataframe(self):
"""
Export this instance info to a Pandas dataframe.
Note that this method requires some imports that will fail in 3D Slicer!
:return: Pandas dataframe with all the points/bounding boxes info
"""
import pandas as pd
from cip_python.common import ChestConventions
if len(self.points) > 0 and len(self.bounding_boxes) > 0:
raise NotImplementedError("This function can be used only for points or bounding boxes. This object contains both")
columns = ['chest_type_id', 'chest_type_name',
'chest_region_id', 'chest_region_name',
'feature_type_id', 'feature_type_name',
'description', 'timestamp', 'user_name', 'machine_name',
'coordinate_system', 'lps_to_ijk_transformation_matrix',
'spacing', 'origin', 'dimensions'
]
if len(self.points) > 0:
# Export points
columns = ['c1', 'c2', 'c3'] + columns
df = pd.DataFrame(columns=columns)
for s in self.points:
df.loc[s.id] = [s.coordinate[0], s.coordinate[1], s.coordinate[2],
s.chest_type, ChestConventions.GetChestTypeName(s.chest_type),
s.chest_region, ChestConventions.GetChestRegionName(s.chest_region),
s.feature_type, ChestConventions.GetImageFeatureName(s.feature_type),
s.description, s.timestamp, s.user_name, s.machine_name,
# Common properties
self.coordinate_system_str(), self.lps_to_ijk_transformation_matrix_array,
self.spacing, self.origin, self.dimensions]
elif len(self.points) > 0:
# Export bounding boxes
columns = ['start1', 'start2', 'start3', 'size1', 'size2', 'size3'] + columns
df = | pd.DataFrame(columns=columns) | pandas.DataFrame |
import numpy as np
import pandas as pd
from munch import Munch
from plaster.run.priors import ParamsAndPriors, Prior, Priors
from plaster.tools.aaseq.aaseq import aa_str_to_list
from plaster.tools.schema import check
from plaster.tools.schema.schema import Schema as s
from plaster.tools.utils import utils
from plaster.tools.c_common import c_common_tools
class SimV2Params(ParamsAndPriors):
channel__priors__columns = (
"ch_i",
"channel_name",
"bg_mu",
"bg_sigma",
"dye_name",
"gain_mu",
"gain_sigma",
"index",
"p_bleach",
"row_k_sigma",
)
dye__label__priors__columns = (
"channel_name",
"dye_name",
"aa",
"label_name",
"ptm_only",
"p_non_fluorescent",
"ch_i",
"bg_mu",
"bg_sigma",
"gain_mu",
"gain_sigma",
"row_k_sigma",
"p_bleach",
)
defaults = Munch(
n_pres=1,
n_mocks=0,
n_edmans=1,
dyes=[],
labels=[],
allow_edman_cterm=False,
enable_ptm_labels=False,
use_lognormal_model=False,
is_survey=False,
n_samples_train=5000,
n_samples_test=1000,
)
schema = s(
s.is_kws_r(
priors_desc=Priors.priors_desc_schema,
n_pres=s.is_int(bounds=(0, None)),
n_mocks=s.is_int(bounds=(0, None)),
n_edmans=s.is_int(bounds=(0, None)),
n_samples_train=s.is_int(bounds=(1, None)),
n_samples_test=s.is_int(bounds=(1, None)),
dyes=s.is_list(
elems=s.is_kws_r(dye_name=s.is_str(), channel_name=s.is_str(),)
),
labels=s.is_list(
elems=s.is_kws_r(
aa=s.is_str(),
dye_name=s.is_str(),
label_name=s.is_str(),
ptm_only=s.is_bool(required=False, noneable=True),
)
),
channels=s.is_dict(required=False),
allow_edman_cterm=s.is_bool(required=False, noneable=True),
enable_ptm_labels=s.is_bool(required=False, noneable=True),
use_lognormal_model=s.is_bool(),
is_survey=s.is_bool(),
)
)
def __init__(self, **kwargs):
# _skip_setup_dfs is True in fixture mode
super().__init__(source="SimV2Params", **kwargs)
self._setup_dfs()
def validate(self):
super().validate()
all_dye_names = list(set([d.dye_name for d in self.dyes]))
# No duplicate dye names
self._validate(
len(all_dye_names) == len(self.dyes), "The dye list contains a duplicate"
)
# No duplicate labels
self._validate(
len(list(set(utils.listi(self.labels, "aa")))) == len(self.labels),
"There is a duplicate label in the label_set",
)
# All labels have a legit dye name
[
self._validate(
label.dye_name in all_dye_names,
f"Label {label.label_name} does not have a valid matching dye_name",
)
for label in self.labels
]
# Channel mappings
mentioned_channels = {dye.channel_name: False for dye in self.dyes}
if "channels" in self:
# Validate that channel mapping is complete
for channel_name, ch_i in self.channels.items():
self._validate(
channel_name in mentioned_channels,
f"Channel name '{channel_name}' was not found in dyes",
)
mentioned_channels[channel_name] = True
self._validate(
all([mentioned for _, mentioned in mentioned_channels.items()]),
"Not all channels in dyes were enumerated in channels",
)
else:
# No channel mapping: assign them
self["channels"] = {
ch_name: i
for i, ch_name in enumerate(sorted(mentioned_channels.keys()))
}
@property
def n_cycles(self):
return self.n_pres + self.n_mocks + self.n_edmans
def channel_names(self):
return [
ch_name
for ch_name, _ in sorted(self.channels.items(), key=lambda item: item[1])
]
def ch_i_by_name(self):
return self.channels
@property
def n_channels(self):
# if self.is_photobleaching_run:
# return 1
return len(self.channels)
@property
def n_channels_and_cycles(self):
return self.n_channels, self.n_cycles
def _setup_dfs(self):
"""
Assemble all of the priors into several dataframes indexed differently.
(Call after validate)
* self.channel__priors:
ch_i,
ch_name,
bg_mu,
bg_sigma,
gain_mu,
gain_sigma,
row_k_sigma,
p_bleach
--> Note, does NOT have p_non_fluorescent because this is a dye property
* self.dye__label__priors:
aa,
label_name,
dye_name,
ch_i,
ch_name,
bg_mu,
bg_sigma,
gain_mu,
gain_sigma,
row_k_sigma,
p_bleach
p_non_fluorescent,
"""
# if self.is_photobleaching_run:
# # Not sure what these should be yet
# # self._ch_by_aa = {}
# # self._channel__priors = pd.DataFrame(columns=self.channel__priors__columns)
# # self._dye__label__priors = pd.DataFrame(columns=self.dye__label__priors__columns)
# self.dyes = [Munch(dye_name="zero", channel_name="zero")]
# self.channels = Munch(zero=0)
# self.labels = [
# dict(aa=".", dye_name="zero", label_name="zero", ptm_only=False)
# ]
labels_df = pd.DataFrame(self.labels)
# labels_df: (aa, dye_name, label_name, ptm_only)
# assert len(labels_df) > 0
dyes_df = | pd.DataFrame(self.dyes) | pandas.DataFrame |
"""Utilities for read counting operations.
"""
import warnings
from collections import defaultdict
from collections import Counter
from collections import OrderedDict
from functools import reduce
import os
import subprocess
import sys
import numpy as np
import pandas as pd
import pybedtools
import pysam
import six
import h5py
from tqdm import tqdm
from .genome import _get_sizes
from .genome import _get_bed
from .genome import __GENOMES_DB__
from .helpers import merge_intervals
from .helpers import mkdir_p
from .helpers import complementary_strand
from .wig import WigReader
from .interval import Interval
from .parallel import ParallelExecutor
from joblib import delayed
from .infer_protocol import infer_protocol
from .helpers import read_bed_as_intervaltree
from .helpers import is_read_uniq_mapping, create_bam_index
from . import __version__
class OrderedCounter(Counter, OrderedDict):
"Counter that remembers the order elements are first encountered"
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, OrderedDict(self))
def __reduce__(self):
return self.__class__, (OrderedDict(self),)
def _get_gene_strand(refseq, chrom, mapped_start, mapped_end):
"""Lookup a particular location on chromosome to determine
its strand
Parameters
----------
refseq: string or intervaltree
refseq genes.bed file loaded as
"""
if isinstance(refseq, six.string_types):
refseq = read_bed_as_intervaltree(refseq)
gene_strand = list(set(refseq[chrom].find(mapped_start, mapped_end)))
if len(gene_strand) > 1:
return "ambiguous"
if len(gene_strand) == 0:
# Try searching upstream 30 and downstream 30 nt
gene_strand = list(set(refseq[chrom].find(mapped_start - 30, mapped_end - 30)))
if len(gene_strand) == 0:
# Downstream
gene_strand = list(
set(refseq[chrom].find(mapped_start + 30, mapped_end + 30))
)
if len(gene_strand) == 0:
return "not_found"
if len(gene_strand) > 1:
return "ambiguous"
return gene_strand[0]
if len(gene_strand) > 1:
return "ambiguous"
return gene_strand[0]
return gene_strand[0]
def gene_coverage(gene_group, bw, offset_5p=0, offset_3p=0):
"""Get gene coverage.
Parameters
----------
gene_group: DataFrame
gene group from bed file
bw: str
Path to bigwig to fetch the scores from
offset_5p: int (positive)
Number of bases to count upstream (5')
offset_3p: int (positive)
Number of bases to count downstream (3')
Returns
-------
coverage_combined: series
Series with index as position and value as coverage
gene_offset_5p: Gene wise 5 prime offset
This might be different from `offset_5p` in cases where
`offset_5p` leads to a negative coordinate
gene_offset_3p: Gene wise 3 prime offset
This might be different from `offset_3p` in cases where
`offset_3p` leads to position beyond chromsome length
"""
if offset_5p < 0 or offset_3p < 0:
raise RuntimeError("Offsets must be non-negative")
sys.exit(1)
if not isinstance(bw, WigReader):
bw = WigReader(bw)
chromosome_lengths = bw.chromosomes
if len(gene_group["strand"].unique()) != 1:
raise RuntimeError("Multiple strands?: {}".format(gene_group))
if len(gene_group["chrom"].unique()) != 1:
raise RuntimeError("Chrom not unique for: {}".format(gene_group))
strand = gene_group["strand"].unique()[0]
intervals = list(
zip(
gene_group["chrom"],
gene_group["start"],
gene_group["end"],
gene_group["strand"],
)
)
intervals = [Interval(i[0], i[1], i[2], i[3]) for i in intervals]
intervals_combined, gene_offset_5p, gene_offset_3p = merge_intervals(
intervals, chromosome_lengths, offset_5p, offset_3p
)
coverages = bw.query(intervals_combined)
if len(coverages) == 0:
return (pd.Series([]), 0, 0)
coverages_combined = []
for cov in coverages:
coverages_combined += list(cov)
# if it is located on negative strand
# reverse the values since we no longer
# want to track the individual position
# but just an indexed version
if strand == "-":
coverages_combined.reverse()
coverages_combined = np.array(coverages_combined).flatten()
coverages_combined = pd.Series(
coverages_combined,
index=np.arange(-gene_offset_5p, len(coverages_combined) - gene_offset_5p),
)
return (coverages_combined, gene_offset_5p, gene_offset_3p)
def export_gene_coverages(bed, bw, saveto, offset_5p=0, offset_3p=0):
"""Export all gene coverages.
Parameters
----------
bed: str
Path to CDS or 5'UTR or 3'UTR bed
bw: str
Path to bigwig to fetch the scores from
saveto: str
Path to write output tsv file
offset_5p: int (positive)
Number of bases to count upstream (5')
offset_3p: int (positive)
Number of bases to count downstream (3')
Returns
-------
gene_profiles: file
with the following format:
gene1\t5poffset1\t3poffset1\tcnt1_1 cnt1_2 cnt1_3 ...\n
gene2\t5poffset2\t3poffset2\tcnt2_1 cnt2_2 cnt2_3 ...\n
"""
if bed.lower().split("_")[0] in __GENOMES_DB__:
splitted = bed.lower().split("_")
if len(splitted) == 2:
genome, region_type = splitted
elif len(splitted) == 3:
genome = splitted[0]
region_type = ("_").join(splitted[1:])
bed = _get_bed(region_type, genome)
bed_df = pybedtools.BedTool(bed).sort().to_dataframe()
bed_df["chrom"] = bed_df["chrom"].astype(str)
bed_df["name"] = bed_df["name"].astype(str)
bed_grouped = bed_df.groupby("name")
if not isinstance(bw, WigReader):
bw = WigReader(bw)
to_write = "gene_name\toffset_5p\toffset_3p\tcoverage\ttotal\n"
for gene_name, gene_group in tqdm(bed_grouped):
coverage, gene_offset_5p, gene_offset_3p = gene_coverage(
gene_group, bw, offset_5p, offset_3p
)
coverage = coverage.fillna(0)
coverage = coverage.astype(int)
coverage = coverage.tolist()
to_write += "{}\t{}\t{}\t{}\t{}\n".format(
gene_name,
int(gene_offset_5p),
int(gene_offset_3p),
coverage,
np.sum(coverage),
)
mkdir_p(os.path.dirname(saveto))
with open(saveto, "w") as outfile:
outfile.write(to_write)
def _multiprocess_gene_coverage(data):
"""Process gene_c overage given a bigwig and a genegroup.
WigReader is not pickleable when passed as an argument so we use strings
as input for the bigwig
Parameters
----------
data: tuple
gene_gorup, bigwig, offset_5p, offset_3p, max_positions, orientation
Returns
-------
norm_cov: Series
normalized coverage
"""
gene_group, bw, offset_5p, offset_3p, max_positions, orientation = data
bw = WigReader(bw)
coverage, gene_offset_5p, gene_offset_3p = gene_coverage(
gene_group, bw, offset_5p, offset_3p
)
coverage = coverage.fillna(0)
if orientation == "5prime":
if max_positions is not None and len(coverage.index) > 0:
# min_index will correspond to the gene_offset_5p in general
min_index = min(coverage.index.tolist())
max_index = max(coverage.index.tolist())
assert (
min_index == -gene_offset_5p
), "min_index and gene_offset_5p are not same| min_index: {} | gene_offset_5p: {}".format(
min_index, -gene_offset_5p
)
coverage = coverage[np.arange(min_index, min(max_index, max_positions))]
elif orientation == "3prime":
# We now want to be tracking things from the end position
# we can do this since gene_coverage() takes care of the strand
# so a 3prime is always the tail of the array
# note that if gene_offset_5p >0, in this case, it is almost never used
# since we restrict ourselves to max_positions, which itself is almost
# always < 1000
if max_positions is not None and len(coverage.index) > 0:
max_index = max(coverage.index.tolist())
min_index = min(coverage.index.tolist())
assert (
min_index == -gene_offset_5p
), "min_index and gene_offset_5p are not same| min_index: {} | gene_offset_5p: {}".format(
min_index, -gene_offset_5p
)
# max_index is the maximum we can go to the right
# our stop codon will be located gene_offset_3p upstream of this index
# Let's reindex our series so that we set
coverage = coverage.reindex(np.arange(-max_index, -min_index, 1))
coverage = coverage[np.arange(-max_positions, gene_offset_3p)]
else:
raise ValueError("{} orientation not supported".format(orientation))
assert (
coverage is not None
), "coverage is none | max_index={} | min_index={}| gene_offset_3p={} | gene_offset_5p={}".format(
max_index, min_index, gene_offset_3p, gene_offset_5p
)
coverage_mean = coverage.mean()
norm_cov = coverage / coverage_mean
norm_cov = norm_cov.fillna(0)
bw.close()
return norm_cov
def export_metagene_coverage(
bed,
bw,
max_positions=None,
saveto=None,
offset_5p=0,
offset_3p=0,
orientation="5prime",
n_jobs=16,
):
"""Export metagene coverage.
Parameters
----------
bed: str
Path to CDS or 5'UTR or 3'UTR bed
bw: str
Path to bigwig to fetch the scores from
max_positions: int
Number of positions to consider while
calculating the normalized coverage
Higher values lead to slower implementation
saveto: str
Path to write output tsv file
offset_5p: int (positive)
Number of bases to count upstream (5')
offset_3p: int (positive)
Number of bases to count downstream (3')
orientation: string
['5prime', '3prime'] indicating the end of read
being tracked
n_jobs: int
Number of paralle threads to open
Better to do on a multi-cpu machine, but also works decently on
a single core machine
Returns
-------
metagene_coverage: series
Metagene coverage
"""
if max_positions is None:
warnings.warn(
"The max_positions is not specified, it could"
"take long time to calculate metagene coverage"
)
if max_positions is not None and max_positions <= 0:
raise RuntimeError("The max_positions must be positive")
sys.exit(1)
if bed.lower().split("_")[0] in __GENOMES_DB__:
splitted = bed.lower().split("_")
if len(splitted) == 2:
genome, region_type = splitted
elif len(splitted) == 3:
genome = splitted[0]
region_type = ("_").join(splitted[1:])
bed = _get_bed(region_type, genome)
bed_df = pybedtools.BedTool(bed).sort().to_dataframe()
bed_df["chrom"] = bed_df["chrom"].astype(str)
bed_df["name"] = bed_df["name"].astype(str)
bed_grouped = bed_df.groupby("name")
if not isinstance(bw, WigReader):
bw = WigReader(bw)
position_counter = Counter()
metagene_coverage = pd.Series()
data = [
(gene_group, bw.wig_location, offset_5p, offset_3p, max_positions, orientation)
for gene_name, gene_group in bed_grouped
]
aprun = ParallelExecutor(n_jobs=n_jobs)
total = len(bed_grouped.groups)
all_coverages = aprun(total=total)(
delayed(_multiprocess_gene_coverage)(d) for d in data
)
for norm_cov in all_coverages:
metagene_coverage = metagene_coverage.add(norm_cov, fill_value=0)
position_counter += Counter(norm_cov.index.tolist())
if len(position_counter) != len(metagene_coverage):
raise RuntimeError("Gene normalized counter mismatch")
sys.exit(1)
position_counter = pd.Series(position_counter)
metagene_coverage = metagene_coverage.div(position_counter)
if len(metagene_coverage.index) == 0:
# If nothing is found in the bigwig, return zeros
metagene_coverage = pd.Series(
[0] * (max_positions + offset_5p),
index=np.arange(-offset_5p, max_positions),
)
if saveto:
mkdir_p(os.path.dirname(saveto))
to_write = pd.DataFrame(
{"position": metagene_coverage.index, "count": metagene_coverage.values}
)
to_write = to_write[["position", "count"]]
to_write.to_csv(saveto, sep=str("\t"), index=False)
return metagene_coverage
def export_read_counts(gene_coverages, saveto, keep_offsets=True):
"""export read counts from gene coverages file.
Parameters
----------
gene_coverages: string
Path to gene coverages.tsv
saveto: str
Path to save output tsv
gene_name\tcount\tlength
keep_offsets: bool
whether to keep the 5' and 3' offsets in gene coverages
default is False
"""
if ".gz" in gene_coverages:
gene_coverages_df = pd.read_table(gene_coverages, compression="gzip")
else:
gene_coverages_df = pd.read_table(gene_coverages)
gene_coverages_zipped = list(
zip(
gene_coverages_df["gene_name"],
gene_coverages_df["offset_5p"],
gene_coverages_df["offset_3p"],
gene_coverages_df["coverage"],
)
)
to_write = "gene_name\tcount\tlength\n"
for gene_name, offset_5p, offset_3p, cov in gene_coverages_zipped:
coverage = eval(cov)
coverage = pd.Series(
np.array(coverage), index=np.arange(-offset_5p, len(coverage) - offset_5p)
)
coverage = coverage.fillna(0)
max_index = max(coverage.index.tolist())
if not keep_offsets:
coverage = coverage[np.arange(0, max_index - offset_3p)]
count = coverage.sum()
length = len(coverage.tolist())
to_write += "{}\t{}\t{}\n".format(gene_name, int(count), int(length))
mkdir_p(os.path.dirname(saveto))
with open(saveto, "w") as output:
output.write(to_write)
def merge_gene_coverages(gene_coverages, max_positions=None, saveto=None):
"""merge gene coverages to generate metagene coverage.
Parameters
----------
gene_coverages: string
Path to gene coverages.tsv
max_positions: int
Number of positions to consider while
calculating the normalized coverage
saveto: str
Path to save output tsv
Returns
-------
metagene_coverage : Series
Metagene coverage
"""
if ".gz" in gene_coverages:
gene_coverages_df = | pd.read_table(gene_coverages, compression="gzip") | pandas.read_table |
"""
WSP Cleaning:
Takes a csv file from WSP's collision analysis tool and returns a new csv file
in a format that can be merged with Weather Underground data, ultimately being
used in a visualization tool
"""
import numpy as np
import pandas as pd
from IPython.core.interactiveshell import InteractiveShell
from pyproj import Proj, transform
InteractiveShell.ast_node_interactivity = "all"
def convert_stateplane_to_latlon(state_x, state_y, proj_in=2286,
proj_out=4326):
"""
This funtion takes the state plane coordinates used by the state patrol
and converts them to latitudes and longitudes to be plotted on a map
:param state_x: float
x state plane coordinate (corresponding with longitude)
:param state_y: float
y state plane coordinate (corresponding with latitude)
:proj_in: int
value to convert state plane coordinate to lat/lon
:proj_out: int
value to convert state plane coordinate to lat/lon
"""
inProj = Proj(init='epsg:' + str(proj_in), preserve_units=True)
outProj = Proj(init='epsg:' + str(proj_out))
lon, lat = transform(inProj, outProj, state_x, state_y)
return lat, lon
def column_conversion(input_data, old_column, dictionary, record):
"""
Converts values in columns to descriptions using dictionaries provided by
WSP collision analysis tool
:param input_data: dataframe
the dataframe to be read and have column changed
:param old_column: string
reads old column value
:param dictionary: dictionary
reads the appropriate dictionary to assign new value
:param record: int (loop parameter)
the record number being changed (processed in loop)
"""
if not np.isnan(input_data[old_column][record]):
new_value = dictionary[input_data[old_column][record]]
return new_value
else:
pass
def clean_wsp_collision_data(input_csv_filepath):
"""
Takes raw input csv downloaded from WSP's collision analysis tool and
converts it into a cleaned dataframe
:param input csv: string
filepath location of file to be cleaned
"""
# read in raw data from WSP's collision analysis tool
print('\nreading csv file...')
df = pd.read_csv(input_csv_filepath, sep=',', low_memory=False)
# drop any collision records with no state plane coordinates
print('dropping records with no state plane coordinates...')
df = df.drop(df[np.isnan(df.Colli_Dtl_Info_State_Plane_X)].index)
df = df.drop(df[np.isnan(df.Colli_Dtl_Info_State_Plane_Y)].index)
df = df.reset_index(drop=True)
# convert state plane coordinates to latitudes and longitudes
print('converting state plane coordinates/'
'dropping records not in range...')
x = np.array(df.Colli_Dtl_Info_State_Plane_X)
y = np.array(df.Colli_Dtl_Info_State_Plane_Y)
x_new, y_new = convert_stateplane_to_latlon(x, y)
df = df.rename(columns={'Colli_Dtl_Info_State_Plane_X': 'lat',
'Colli_Dtl_Info_State_Plane_Y': 'lon'})
df['lat'] = | pd.DataFrame(x_new) | pandas.DataFrame |
"""Scraper for https://projects.fivethirtyeight.com/soccer-predictions."""
import itertools
import json
from pathlib import Path
from typing import Callable, Dict, List, Optional, Union
import pandas as pd
from ._common import BaseRequestsReader, make_game_id, standardize_colnames
from ._config import DATA_DIR, NOCACHE, NOSTORE, TEAMNAME_REPLACEMENTS
FIVETHIRTYEIGHT_DATA_DIR = DATA_DIR / "FiveThirtyEight"
FIVETHIRTYEIGHT_API = "https://projects.fivethirtyeight.com/soccer-predictions"
class FiveThirtyEight(BaseRequestsReader):
"""Provides pd.DataFrames from fivethirtyeight's "Club Soccer Predictions" project.
Data will be downloaded as necessary and cached locally in
``~/soccerdata/data/FiveThirtyEight``.
Original project and background info:
https://projects.fivethirtyeight.com/soccer-predictions/ and
https://fivethirtyeight.com/features/how-our-club-soccer-projections-work/
Parameters
----------
leagues : string or iterable, optional
IDs of Leagues to include.
seasons : string, int or list, optional
Seasons to include. Supports multiple formats.
Examples: '16-17'; 2016; '2016-17'; [14, 15, 16]
proxy : 'tor' or or dict or list(dict) or callable, optional
Use a proxy to hide your IP address. Valid options are:
- "tor": Uses the Tor network. Tor should be running in
the background on port 9050.
- dict: A dictionary with the proxy to use. The dict should be
a mapping of supported protocols to proxy addresses. For example::
{
'http': 'http://10.10.1.10:3128',
'https': 'http://10.10.1.10:1080',
}
- list(dict): A list of proxies to choose from. A different proxy will
be selected from this list after failed requests, allowing rotating
proxies.
- callable: A function that returns a valid proxy. This function will
be called after failed requests, allowing rotating proxies.
no_cache : bool
If True, will not use cached data.
no_store : bool
If True, will not store downloaded data.
data_dir : Path
Path to directory where data will be cached.
"""
def __init__(
self,
leagues: Optional[Union[str, List[str]]] = None,
seasons: Optional[Union[str, int, List]] = None,
proxy: Optional[
Union[str, Dict[str, str], List[Dict[str, str]], Callable[[], Dict[str, str]]]
] = None,
no_cache: bool = NOCACHE,
no_store: bool = NOSTORE,
data_dir: Path = FIVETHIRTYEIGHT_DATA_DIR,
):
"""Initialize a new FiveThirtyEight reader."""
super().__init__(
leagues=leagues, proxy=proxy, no_cache=no_cache, no_store=no_store, data_dir=data_dir
)
self.seasons = seasons # type: ignore
self._data = {}
url = f"{FIVETHIRTYEIGHT_API}/data.json"
filepath = self.data_dir / "latest.json"
reader = self.get(url, filepath)
for k, v in json.load(reader).items():
self._data[k] = v
def read_leagues(self) -> pd.DataFrame:
"""Retrieve the selected leagues from the datasource.
Returns
-------
pd.DataFrame
"""
df = (
pd.DataFrame.from_dict(self._data["leagues"])
.rename(columns={"slug": "league", "id": "league_id"})
.pipe(self._translate_league)
.pipe(standardize_colnames)
.drop(columns=["overview_column", "custom_template", "skip_cols"])
.set_index("league")
.loc[self._selected_leagues.keys()]
.sort_index()
)
return df
def read_games(self) -> pd.DataFrame:
"""Retrieve all games for the selected leagues.
Returns
-------
pd.DataFrame
"""
col_rename = {
"adj_score1": "adj_score_home",
"adj_score2": "adj_score_away",
"chances1": "chances_home",
"chances2": "chances_away",
"datetime": "date",
"moves1": "moves_home",
"moves2": "moves_away",
"prob1": "prob_home",
"prob2": "prob_away",
"probtie": "prob_tie",
"score1": "score_home",
"score2": "score_away",
"team1": "home_team",
"team1_code": "home_code",
"team1_id": "home_id",
"team1_sdr_id": "home_sdr_id",
"team2": "away_team",
"team2_code": "away_code",
"team2_id": "away_id",
"team2_sdr_id": "away_sdr_id",
}
filemask = "matches_{}_{}.csv"
urlmask = FIVETHIRTYEIGHT_API + "/forecasts/20{}_{}_matches.json"
data = []
for lkey, skey in itertools.product(self._selected_leagues.values(), self.seasons):
filepath = self.data_dir / filemask.format(lkey, skey)
url = urlmask.format(skey[:2], lkey)
reader = self.get(url, filepath)
data.extend([{"league": lkey, "season": skey, **d} for d in json.load(reader)])
df = (
| pd.DataFrame.from_dict(data) | pandas.DataFrame.from_dict |
import pandas as pd
import os
from datetime import date
import logging
from tqdm import tqdm
# logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", datefmt='%Y-%m-%d %H:%M:%S')
def getTrialStats(today:date):
os.chdir(os.path.realpath('../'))
diseasesDf = pd.read_csv(f'nciMainDiseases{today}.csv')
sitesToTrialsDf = pd.read_csv(f'nciSites{today}.csv')
# sitesDiseaseCountDf = pd.DataFrame(columns=['orgName', 'nciThesaurusConceptId', 'disease', 'count'])
sitesDiseaseCount = []
for row in tqdm(sitesToTrialsDf[['orgName', 'nciId']].itertuples(), total=len(sitesToTrialsDf.index)):
#removing the Other Cancer, Other Neoplasm, and Other Disease entries
relevantDiseasesDf = diseasesDf[(diseasesDf['nciId'] == row.nciId) & (~diseasesDf['nciThesaurusConceptId'].isin(['C2991', 'C3262', 'C2916']))]
relevantDiseasesDf = relevantDiseasesDf[relevantDiseasesDf['isLeadDisease'] == 1]
for id in relevantDiseasesDf['nciThesaurusConceptId'].unique():
rowDict = {
'orgName': row.orgName,
'nciThesaurusConceptId': id,
'disease': relevantDiseasesDf[relevantDiseasesDf['nciThesaurusConceptId'] == id]['name'].iloc[0],
# 'count': len(relevantDiseasesDf[relevantDiseasesDf['nciThesaurusConceptId'] == id].index)
}
sitesDiseaseCount.append(rowDict)
sitesDiseaseCountDf = | pd.DataFrame.from_records(sitesDiseaseCount) | pandas.DataFrame.from_records |
from datetime import datetime
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
Series,
_testing as tm,
)
def test_split(any_string_dtype):
values = Series(["a_b_c", "c_d_e", np.nan, "f_g_h"], dtype=any_string_dtype)
result = values.str.split("_")
exp = Series([["a", "b", "c"], ["c", "d", "e"], np.nan, ["f", "g", "h"]])
tm.assert_series_equal(result, exp)
# more than one char
values = Series(["a__b__c", "c__d__e", np.nan, "f__g__h"], dtype=any_string_dtype)
result = values.str.split("__")
tm.assert_series_equal(result, exp)
result = values.str.split("__", expand=False)
tm.assert_series_equal(result, exp)
# regex split
values = Series(["a,b_c", "c_d,e", np.nan, "f,g,h"], dtype=any_string_dtype)
result = values.str.split("[,_]")
exp = Series([["a", "b", "c"], ["c", "d", "e"], np.nan, ["f", "g", "h"]])
tm.assert_series_equal(result, exp)
def test_split_object_mixed():
mixed = Series(["a_b_c", np.nan, "d_e_f", True, datetime.today(), None, 1, 2.0])
result = mixed.str.split("_")
exp = Series(
[
["a", "b", "c"],
np.nan,
["d", "e", "f"],
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
]
)
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
result = mixed.str.split("_", expand=False)
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
@pytest.mark.parametrize("method", ["split", "rsplit"])
def test_split_n(any_string_dtype, method):
s = Series(["a b", pd.NA, "b c"], dtype=any_string_dtype)
expected = Series([["a", "b"], pd.NA, ["b", "c"]])
result = getattr(s.str, method)(" ", n=None)
tm.assert_series_equal(result, expected)
result = getattr(s.str, method)(" ", n=0)
tm.assert_series_equal(result, expected)
def test_rsplit(any_string_dtype):
values = Series(["a_b_c", "c_d_e", np.nan, "f_g_h"], dtype=any_string_dtype)
result = values.str.rsplit("_")
exp = Series([["a", "b", "c"], ["c", "d", "e"], np.nan, ["f", "g", "h"]])
| tm.assert_series_equal(result, exp) | pandas._testing.assert_series_equal |
import requests,json,os,re,argparse
import pandas as pd
from time import sleep
parser=argparse.ArgumentParser()
parser.add_argument('-i','--input_file', required=True, help='Input csv file with user name and orcid id')
parser.add_argument('-o','--output_xml', required=True, help='Output xml file')
args=parser.parse_args()
input_file=args.input_file
output_xml=args.output_xml
def get_pmc_data(orcid_id,cursor=''):
'''
A method for fetching pmc data
:param orcid_id: An orcid id
:param cursor: A cursor string, default empty string
'''
try:
data=list()
url_str='https://www.ebi.ac.uk/europepmc/webservices/rest/search?query=AUTHORID:{0}&format=json&sort_date:y%20BDESC&cursorMark={1}'.format(orcid_id,cursor)
response=requests.get(url_str)
if response.ok:
json_data=json.loads(response.content.decode('utf-8'))
data=json_data['resultList']['result']
#print(json_data)
if 'nextCursorMark' in json_data:
if cursor !=json_data['nextCursorMark']:
cursor=json_data['nextCursorMark']
else:
cursor=''
return data,cursor
except:
raise
def add_pmc_link(series):
'''
A method for adding pubmed link to the data table
:param series: A data series with 'pmid' (pubmed id)
'''
try:
pmid=series['pmid']
series['link']='https://www.ncbi.nlm.nih.gov/pubmed/{0}'.format(pmid)
return series
except:
raise
def get_pmc_data_for_user(user,orcid_id):
'''
A method for fetching all publication info for a user
:param user: A user name
:param orcid_id: An orcid id for PMC lookup
:returns: A dataframe containing list of publications
'''
try:
all_data=list()
cursor=''
while True:
data,cursor=get_pmc_data(orcid_id=orcid_id,cursor=cursor)
if len(data)>0 or cursor !='':
all_data.extend(data)
sleep(10)
else:
break
all_data=pd.DataFrame(all_data)
all_data['user']=user
all_data=all_data.apply(lambda x: add_pmc_link(series=x),
axis=1)
return all_data
except:
raise
def get_publication_list(input_file):
'''
A method for fetching publication list and writing it to an output csv file
:param input_file: An input csv file containing 'name' and 'orcid' column
returns: A pandas dataframe containing publication info of all the users
'''
try:
final_data= | pd.DataFrame() | pandas.DataFrame |
import argparse
import collections
import pandas
import numpy as np
import os
import gym
from keras.layers import Activation, Dense, Flatten
from keras.models import Sequential
from keras.optimizers import Adam
import tensorflow as tf
from rl.agents import SARSAAgent
from rl.core import Processor
from rl.policy import BoltzmannQPolicy
from noise_estimator import *
from utils import *
parser = argparse.ArgumentParser()
parser.add_argument('--error_positive', type=float, default=0.2,
help='Error positive rate [default: 0.2]')
parser.add_argument('--error_negative', type=float, default=0.0,
help='Error negative rate [default: 0.0]')
parser.add_argument('--log_dir', default='logs',
help='Log dir [default: logs]')
parser.add_argument('--reward', default='normal',
help='reward choice: normal/noisy/surrogate [default: normal]')
parser.add_argument('--smooth', type=str2bool, default=False,
help='Add smoothing to rewards [default: False]')
FLAGS = parser.parse_args()
ERR_P = FLAGS.error_positive
ERR_N = FLAGS.error_negative
REWARD = FLAGS.reward
SMOOTH = FLAGS.smooth
if REWARD == "normal":
LOG_DIR = os.path.join(FLAGS.log_dir, "sarsa_cartpole")
else:
LOG_DIR = os.path.join(os.path.join(FLAGS.log_dir, "sarsa_cartpole"), str(ERR_P))
ENV_NAME = 'CartPole-v0'
if not os.path.exists(LOG_DIR):
os.makedirs(LOG_DIR)
os.system('cp sarsa_cartpole.py %s' % (LOG_DIR)) # bkp of train procedure
print ('cp sarsa_cartpole.py %s' % (LOG_DIR))
LOG_FOUT = open(os.path.join(LOG_DIR, 'setting.txt'), 'w')
LOG_FOUT.write(str(FLAGS)+'\n')
def log_string(out_str):
LOG_FOUT.write(out_str+'\n')
LOG_FOUT.flush()
print(out_str)
def build_state(features):
return int("".join(map(lambda feature: str(int(feature)), features)))
def to_bin(value, bins):
return np.digitize(x=[value], bins=bins)[0]
def train():
# Get the environment and extract the number of actions.
env = gym.make(ENV_NAME)
np.random.seed(123)
env.seed(123)
nb_actions = env.action_space.n
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
from keras import backend as K
K.set_session(sess)
# Next, we build a very simple model.
model = Sequential()
model.add(Flatten(input_shape=(1,) + env.observation_space.shape))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(nb_actions))
model.add(Activation('linear'))
print(model.summary())
# SARSA does not require a memory.
policy = BoltzmannQPolicy()
# processor_noisy = CartpoleSurrogateProcessor(e_= ERR_N, e=ERR_P, surrogate=False)
# processor_surrogate = CartpoleSurrogateProcessor(e_= ERR_N, e=ERR_P, surrogate=True)
if not SMOOTH:
processor_noisy = CartpoleProcessor(e_= ERR_N, e=ERR_P, smooth=False, surrogate=False)
processor_surrogate = CartpoleProcessor(e_= ERR_N, e=ERR_P, smooth=False, surrogate=True)
else:
processor_noisy = CartpoleProcessor(e_= ERR_N, e=ERR_P, smooth=True, surrogate=False)
processor_surrogate = CartpoleProcessor(e_= ERR_N, e=ERR_P, smooth=True, surrogate=True)
if REWARD == "normal":
sarsa_normal = SARSAAgent(model=model, nb_actions=nb_actions, nb_steps_warmup=10,
policy=policy)
sarsa_normal.compile(Adam(lr=1e-3), metrics=['mae'])
history_normal = sarsa_normal.fit(env, nb_steps=50000, visualize=False, verbose=2)
sarsa_normal.save_weights(os.path.join(LOG_DIR, 'sarsa_normal_{}_weights.h5f'.format(ENV_NAME)), overwrite=True)
sarsa_normal.test(env, nb_episodes=10, visualize=False, verbose=2)
| pandas.DataFrame(history_normal.history) | pandas.DataFrame |
#!/usr/bin/env python
import pandas as pd
pd.set_option('display.max_rows', 15500)
pd.set_option('display.max_columns', 55500)
pd.set_option('display.width', 551000)
data = pd.read_csv("crimemar14.csv")
data.head()
newdata = data["Datetime"].str.split("@", n = 1, expand = True)
data["date"] = newdata[0]
data["time"] = newdata[1]
data.head()
data["time"]= | pd.to_datetime(data["time"],format=' %I:%M %p' ) | pandas.to_datetime |
import decimal
import numpy as np
from numpy import iinfo
import pytest
import pandas as pd
from pandas import to_numeric
from pandas.util import testing as tm
class TestToNumeric(object):
def test_empty(self):
# see gh-16302
s = pd.Series([], dtype=object)
res = to_numeric(s)
expected = pd.Series([], dtype=np.int64)
tm.assert_series_equal(res, expected)
# Original issue example
res = to_numeric(s, errors='coerce', downcast='integer')
expected = pd.Series([], dtype=np.int8)
tm.assert_series_equal(res, expected)
def test_series(self):
s = pd.Series(['1', '-3.14', '7'])
res = to_numeric(s)
expected = pd.Series([1, -3.14, 7])
tm.assert_series_equal(res, expected)
s = pd.Series(['1', '-3.14', 7])
res = to_numeric(s)
tm.assert_series_equal(res, expected)
def test_series_numeric(self):
s = pd.Series([1, 3, 4, 5], index=list('ABCD'), name='XXX')
res = to_numeric(s)
tm.assert_series_equal(res, s)
s = pd.Series([1., 3., 4., 5.], index=list('ABCD'), name='XXX')
res = to_numeric(s)
tm.assert_series_equal(res, s)
# bool is regarded as numeric
s = pd.Series([True, False, True, True],
index=list('ABCD'), name='XXX')
res = to_numeric(s)
tm.assert_series_equal(res, s)
def test_error(self):
s = pd.Series([1, -3.14, 'apple'])
msg = 'Unable to parse string "apple" at position 2'
with pytest.raises(ValueError, match=msg):
to_numeric(s, errors='raise')
res = to_numeric(s, errors='ignore')
expected = pd.Series([1, -3.14, 'apple'])
tm.assert_series_equal(res, expected)
res = to_numeric(s, errors='coerce')
expected = pd.Series([1, -3.14, np.nan])
tm.assert_series_equal(res, expected)
s = pd.Series(['orange', 1, -3.14, 'apple'])
msg = 'Unable to parse string "orange" at position 0'
with pytest.raises(ValueError, match=msg):
to_numeric(s, errors='raise')
def test_error_seen_bool(self):
s = pd.Series([True, False, 'apple'])
msg = 'Unable to parse string "apple" at position 2'
with pytest.raises(ValueError, match=msg):
to_numeric(s, errors='raise')
res = to_numeric(s, errors='ignore')
expected = pd.Series([True, False, 'apple'])
tm.assert_series_equal(res, expected)
# coerces to float
res = to_numeric(s, errors='coerce')
expected = pd.Series([1., 0., np.nan])
tm.assert_series_equal(res, expected)
def test_list(self):
s = ['1', '-3.14', '7']
res = to_numeric(s)
expected = np.array([1, -3.14, 7])
tm.assert_numpy_array_equal(res, expected)
def test_list_numeric(self):
s = [1, 3, 4, 5]
res = to_numeric(s)
tm.assert_numpy_array_equal(res, np.array(s, dtype=np.int64))
s = [1., 3., 4., 5.]
res = to_numeric(s)
tm.assert_numpy_array_equal(res, np.array(s))
# bool is regarded as numeric
s = [True, False, True, True]
res = to_numeric(s)
tm.assert_numpy_array_equal(res, np.array(s))
def test_numeric(self):
s = pd.Series([1, -3.14, 7], dtype='O')
res = to_numeric(s)
expected = pd.Series([1, -3.14, 7])
tm.assert_series_equal(res, expected)
s = pd.Series([1, -3.14, 7])
res = to_numeric(s)
tm.assert_series_equal(res, expected)
# GH 14827
df = pd.DataFrame(dict(
a=[1.2, decimal.Decimal(3.14), decimal.Decimal("infinity"), '0.1'],
b=[1.0, 2.0, 3.0, 4.0],
))
expected = pd.DataFrame(dict(
a=[1.2, 3.14, np.inf, 0.1],
b=[1.0, 2.0, 3.0, 4.0],
))
# Test to_numeric over one column
df_copy = df.copy()
df_copy['a'] = df_copy['a'].apply(to_numeric)
tm.assert_frame_equal(df_copy, expected)
# Test to_numeric over multiple columns
df_copy = df.copy()
df_copy[['a', 'b']] = df_copy[['a', 'b']].apply(to_numeric)
tm.assert_frame_equal(df_copy, expected)
def test_numeric_lists_and_arrays(self):
# Test to_numeric with embedded lists and arrays
df = pd.DataFrame(dict(
a=[[decimal.Decimal(3.14), 1.0], decimal.Decimal(1.6), 0.1]
))
df['a'] = df['a'].apply(to_numeric)
expected = pd.DataFrame(dict(
a=[[3.14, 1.0], 1.6, 0.1],
))
tm.assert_frame_equal(df, expected)
df = pd.DataFrame(dict(
a=[np.array([decimal.Decimal(3.14), 1.0]), 0.1]
))
df['a'] = df['a'].apply(to_numeric)
expected = pd.DataFrame(dict(
a=[[3.14, 1.0], 0.1],
))
tm.assert_frame_equal(df, expected)
def test_all_nan(self):
s = pd.Series(['a', 'b', 'c'])
res = to_numeric(s, errors='coerce')
expected = pd.Series([np.nan, np.nan, np.nan])
tm.assert_series_equal(res, expected)
@pytest.mark.parametrize("errors", [None, "ignore", "raise", "coerce"])
def test_type_check(self, errors):
# see gh-11776
df = pd.DataFrame({"a": [1, -3.14, 7], "b": ["4", "5", "6"]})
kwargs = dict(errors=errors) if errors is not None else dict()
error_ctx = pytest.raises(TypeError, match="1-d array")
with error_ctx:
to_numeric(df, **kwargs)
def test_scalar(self):
assert pd.to_numeric(1) == 1
assert pd.to_numeric(1.1) == 1.1
assert pd.to_numeric('1') == 1
assert pd.to_numeric('1.1') == 1.1
with pytest.raises(ValueError):
to_numeric('XX', errors='raise')
assert to_numeric('XX', errors='ignore') == 'XX'
assert np.isnan(to_numeric('XX', errors='coerce'))
def test_numeric_dtypes(self):
idx = pd.Index([1, 2, 3], name='xxx')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, idx)
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(idx, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, idx.values)
idx = pd.Index([1., np.nan, 3., np.nan], name='xxx')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, idx)
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(idx, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, idx.values)
def test_str(self):
idx = pd.Index(['1', '2', '3'], name='xxx')
exp = np.array([1, 2, 3], dtype='int64')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, pd.Index(exp, name='xxx'))
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(exp, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, exp)
idx = pd.Index(['1.5', '2.7', '3.4'], name='xxx')
exp = np.array([1.5, 2.7, 3.4])
res = pd.to_numeric(idx)
tm.assert_index_equal(res, pd.Index(exp, name='xxx'))
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(exp, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, exp)
def test_datetime_like(self, tz_naive_fixture):
idx = pd.date_range("20130101", periods=3,
tz=tz_naive_fixture, name="xxx")
res = pd.to_numeric(idx)
tm.assert_index_equal(res, pd.Index(idx.asi8, name="xxx"))
res = pd.to_numeric(pd.Series(idx, name="xxx"))
tm.assert_series_equal(res, pd.Series(idx.asi8, name="xxx"))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, idx.asi8)
def test_timedelta(self):
idx = pd.timedelta_range('1 days', periods=3, freq='D', name='xxx')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, pd.Index(idx.asi8, name='xxx'))
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(idx.asi8, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, idx.asi8)
def test_period(self):
idx = pd.period_range('2011-01', periods=3, freq='M', name='xxx')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, pd.Index(idx.asi8, name='xxx'))
# TODO: enable when we can support native PeriodDtype
# res = pd.to_numeric(pd.Series(idx, name='xxx'))
# tm.assert_series_equal(res, pd.Series(idx.asi8, name='xxx'))
def test_non_hashable(self):
# Test for Bug #13324
s = pd.Series([[10.0, 2], 1.0, 'apple'])
res = pd.to_numeric(s, errors='coerce')
tm.assert_series_equal(res, pd.Series([np.nan, 1.0, np.nan]))
res = pd.to_numeric(s, errors='ignore')
tm.assert_series_equal(res, pd.Series([[10.0, 2], 1.0, 'apple']))
with pytest.raises(TypeError, match="Invalid object type"):
pd.to_numeric(s)
@pytest.mark.parametrize("data", [
["1", 2, 3],
[1, 2, 3],
np.array(["1970-01-02", "1970-01-03",
"1970-01-04"], dtype="datetime64[D]")
])
def test_downcast_basic(self, data):
# see gh-13352
invalid_downcast = "unsigned-integer"
msg = "invalid downcasting method provided"
with pytest.raises(ValueError, match=msg):
pd.to_numeric(data, downcast=invalid_downcast)
expected = np.array([1, 2, 3], dtype=np.int64)
# Basic function tests.
res = pd.to_numeric(data)
tm.assert_numpy_array_equal(res, expected)
res = pd.to_numeric(data, downcast=None)
tm.assert_numpy_array_equal(res, expected)
# Basic dtype support.
smallest_uint_dtype = np.dtype(np.typecodes["UnsignedInteger"][0])
# Support below np.float32 is rare and far between.
float_32_char = np.dtype(np.float32).char
smallest_float_dtype = float_32_char
expected = np.array([1, 2, 3], dtype=smallest_uint_dtype)
res = pd.to_numeric(data, downcast="unsigned")
tm.assert_numpy_array_equal(res, expected)
expected = np.array([1, 2, 3], dtype=smallest_float_dtype)
res = pd.to_numeric(data, downcast="float")
tm.assert_numpy_array_equal(res, expected)
@pytest.mark.parametrize("signed_downcast", ["integer", "signed"])
@pytest.mark.parametrize("data", [
["1", 2, 3],
[1, 2, 3],
np.array(["1970-01-02", "1970-01-03",
"1970-01-04"], dtype="datetime64[D]")
])
def test_signed_downcast(self, data, signed_downcast):
# see gh-13352
smallest_int_dtype = np.dtype(np.typecodes["Integer"][0])
expected = np.array([1, 2, 3], dtype=smallest_int_dtype)
res = pd.to_numeric(data, downcast=signed_downcast)
tm.assert_numpy_array_equal(res, expected)
def test_ignore_downcast_invalid_data(self):
# If we can't successfully cast the given
# data to a numeric dtype, do not bother
# with the downcast parameter.
data = ["foo", 2, 3]
expected = np.array(data, dtype=object)
res = pd.to_numeric(data, errors="ignore",
downcast="unsigned")
tm.assert_numpy_array_equal(res, expected)
def test_ignore_downcast_neg_to_unsigned(self):
# Cannot cast to an unsigned integer
# because we have a negative number.
data = ["-1", 2, 3]
expected = np.array([-1, 2, 3], dtype=np.int64)
res = pd.to_numeric(data, downcast="unsigned")
tm.assert_numpy_array_equal(res, expected)
@pytest.mark.parametrize("downcast", ["integer", "signed", "unsigned"])
@pytest.mark.parametrize("data,expected", [
(["1.1", 2, 3],
np.array([1.1, 2, 3], dtype=np.float64)),
([10000.0, 20000, 3000, 40000.36, 50000, 50000.00],
np.array([10000.0, 20000, 3000,
40000.36, 50000, 50000.00], dtype=np.float64))
])
def test_ignore_downcast_cannot_convert_float(
self, data, expected, downcast):
# Cannot cast to an integer (signed or unsigned)
# because we have a float number.
res = | pd.to_numeric(data, downcast=downcast) | pandas.to_numeric |
# -*- coding: utf-8 -*-
"""
These the test the public routines exposed in types/common.py
related to inference and not otherwise tested in types/test_common.py
"""
from warnings import catch_warnings, simplefilter
import collections
import re
from datetime import datetime, date, timedelta, time
from decimal import Decimal
from numbers import Number
from fractions import Fraction
import numpy as np
import pytz
import pytest
import pandas as pd
from pandas._libs import lib, iNaT, missing as libmissing
from pandas import (Series, Index, DataFrame, Timedelta,
DatetimeIndex, TimedeltaIndex, Timestamp,
Panel, Period, Categorical, isna, Interval,
DateOffset)
from pandas import compat
from pandas.compat import u, PY2, StringIO, lrange
from pandas.core.dtypes import inference
from pandas.core.dtypes.common import (
is_timedelta64_dtype,
is_timedelta64_ns_dtype,
is_datetime64_dtype,
is_datetime64_ns_dtype,
is_datetime64_any_dtype,
is_datetime64tz_dtype,
is_number,
is_integer,
is_float,
is_bool,
is_scalar,
is_scipy_sparse,
ensure_int32,
ensure_categorical)
from pandas.util import testing as tm
import pandas.util._test_decorators as td
@pytest.fixture(params=[True, False], ids=str)
def coerce(request):
return request.param
# collect all objects to be tested for list-like-ness; use tuples of objects,
# whether they are list-like or not (special casing for sets), and their ID
ll_params = [
([1], True, 'list'), # noqa: E241
([], True, 'list-empty'), # noqa: E241
((1, ), True, 'tuple'), # noqa: E241
(tuple(), True, 'tuple-empty'), # noqa: E241
({'a': 1}, True, 'dict'), # noqa: E241
(dict(), True, 'dict-empty'), # noqa: E241
({'a', 1}, 'set', 'set'), # noqa: E241
(set(), 'set', 'set-empty'), # noqa: E241
(frozenset({'a', 1}), 'set', 'frozenset'), # noqa: E241
(frozenset(), 'set', 'frozenset-empty'), # noqa: E241
(iter([1, 2]), True, 'iterator'), # noqa: E241
(iter([]), True, 'iterator-empty'), # noqa: E241
((x for x in [1, 2]), True, 'generator'), # noqa: E241
((x for x in []), True, 'generator-empty'), # noqa: E241
(Series([1]), True, 'Series'), # noqa: E241
(Series([]), True, 'Series-empty'), # noqa: E241
(Series(['a']).str, True, 'StringMethods'), # noqa: E241
(Series([], dtype='O').str, True, 'StringMethods-empty'), # noqa: E241
(Index([1]), True, 'Index'), # noqa: E241
(Index([]), True, 'Index-empty'), # noqa: E241
(DataFrame([[1]]), True, 'DataFrame'), # noqa: E241
(DataFrame(), True, 'DataFrame-empty'), # noqa: E241
(np.ndarray((2,) * 1), True, 'ndarray-1d'), # noqa: E241
(np.array([]), True, 'ndarray-1d-empty'), # noqa: E241
(np.ndarray((2,) * 2), True, 'ndarray-2d'), # noqa: E241
(np.array([[]]), True, 'ndarray-2d-empty'), # noqa: E241
(np.ndarray((2,) * 3), True, 'ndarray-3d'), # noqa: E241
(np.array([[[]]]), True, 'ndarray-3d-empty'), # noqa: E241
(np.ndarray((2,) * 4), True, 'ndarray-4d'), # noqa: E241
(np.array([[[[]]]]), True, 'ndarray-4d-empty'), # noqa: E241
(np.array(2), False, 'ndarray-0d'), # noqa: E241
(1, False, 'int'), # noqa: E241
(b'123', False, 'bytes'), # noqa: E241
(b'', False, 'bytes-empty'), # noqa: E241
('123', False, 'string'), # noqa: E241
('', False, 'string-empty'), # noqa: E241
(str, False, 'string-type'), # noqa: E241
(object(), False, 'object'), # noqa: E241
(np.nan, False, 'NaN'), # noqa: E241
(None, False, 'None') # noqa: E241
]
objs, expected, ids = zip(*ll_params)
@pytest.fixture(params=zip(objs, expected), ids=ids)
def maybe_list_like(request):
return request.param
def test_is_list_like(maybe_list_like):
obj, expected = maybe_list_like
expected = True if expected == 'set' else expected
assert inference.is_list_like(obj) == expected
def test_is_list_like_disallow_sets(maybe_list_like):
obj, expected = maybe_list_like
expected = False if expected == 'set' else expected
assert inference.is_list_like(obj, allow_sets=False) == expected
def test_is_sequence():
is_seq = inference.is_sequence
assert (is_seq((1, 2)))
assert (is_seq([1, 2]))
assert (not is_seq("abcd"))
assert (not is_seq(u("abcd")))
assert (not is_seq(np.int64))
class A(object):
def __getitem__(self):
return 1
assert (not is_seq(A()))
def test_is_array_like():
assert inference.is_array_like(Series([]))
assert inference.is_array_like(Series([1, 2]))
assert inference.is_array_like(np.array(["a", "b"]))
assert inference.is_array_like(Index(["2016-01-01"]))
class DtypeList(list):
dtype = "special"
assert inference.is_array_like(DtypeList())
assert not inference.is_array_like([1, 2, 3])
assert not inference.is_array_like(tuple())
assert not inference.is_array_like("foo")
assert not inference.is_array_like(123)
@pytest.mark.parametrize('inner', [
[], [1], (1, ), (1, 2), {'a': 1}, {1, 'a'}, Series([1]),
Series([]), Series(['a']).str, (x for x in range(5))
])
@pytest.mark.parametrize('outer', [
list, Series, np.array, tuple
])
def test_is_nested_list_like_passes(inner, outer):
result = outer([inner for _ in range(5)])
assert inference.is_list_like(result)
@pytest.mark.parametrize('obj', [
'abc', [], [1], (1,), ['a'], 'a', {'a'},
[1, 2, 3], Series([1]), DataFrame({"A": [1]}),
([1, 2] for _ in range(5)),
])
def test_is_nested_list_like_fails(obj):
assert not inference.is_nested_list_like(obj)
@pytest.mark.parametrize(
"ll", [{}, {'A': 1}, Series([1])])
def test_is_dict_like_passes(ll):
assert inference.is_dict_like(ll)
@pytest.mark.parametrize(
"ll", ['1', 1, [1, 2], (1, 2), range(2), Index([1])])
def test_is_dict_like_fails(ll):
assert not inference.is_dict_like(ll)
@pytest.mark.parametrize("has_keys", [True, False])
@pytest.mark.parametrize("has_getitem", [True, False])
@pytest.mark.parametrize("has_contains", [True, False])
def test_is_dict_like_duck_type(has_keys, has_getitem, has_contains):
class DictLike(object):
def __init__(self, d):
self.d = d
if has_keys:
def keys(self):
return self.d.keys()
if has_getitem:
def __getitem__(self, key):
return self.d.__getitem__(key)
if has_contains:
def __contains__(self, key):
return self.d.__contains__(key)
d = DictLike({1: 2})
result = inference.is_dict_like(d)
expected = has_keys and has_getitem and has_contains
assert result is expected
def test_is_file_like(mock):
class MockFile(object):
pass
is_file = inference.is_file_like
data = StringIO("data")
assert is_file(data)
# No read / write attributes
# No iterator attributes
m = MockFile()
assert not is_file(m)
MockFile.write = lambda self: 0
# Write attribute but not an iterator
m = MockFile()
assert not is_file(m)
# gh-16530: Valid iterator just means we have the
# __iter__ attribute for our purposes.
MockFile.__iter__ = lambda self: self
# Valid write-only file
m = MockFile()
assert is_file(m)
del MockFile.write
MockFile.read = lambda self: 0
# Valid read-only file
m = MockFile()
assert is_file(m)
# Iterator but no read / write attributes
data = [1, 2, 3]
assert not is_file(data)
assert not is_file(mock.Mock())
@pytest.mark.parametrize(
"ll", [collections.namedtuple('Test', list('abc'))(1, 2, 3)])
def test_is_names_tuple_passes(ll):
assert inference.is_named_tuple(ll)
@pytest.mark.parametrize(
"ll", [(1, 2, 3), 'a', Series({'pi': 3.14})])
def test_is_names_tuple_fails(ll):
assert not inference.is_named_tuple(ll)
def test_is_hashable():
# all new-style classes are hashable by default
class HashableClass(object):
pass
class UnhashableClass1(object):
__hash__ = None
class UnhashableClass2(object):
def __hash__(self):
raise TypeError("Not hashable")
hashable = (1,
3.14,
np.float64(3.14),
'a',
tuple(),
(1, ),
HashableClass(), )
not_hashable = ([], UnhashableClass1(), )
abc_hashable_not_really_hashable = (([], ), UnhashableClass2(), )
for i in hashable:
assert inference.is_hashable(i)
for i in not_hashable:
assert not inference.is_hashable(i)
for i in abc_hashable_not_really_hashable:
assert not inference.is_hashable(i)
# numpy.array is no longer collections.Hashable as of
# https://github.com/numpy/numpy/pull/5326, just test
# is_hashable()
assert not inference.is_hashable(np.array([]))
# old-style classes in Python 2 don't appear hashable to
# collections.Hashable but also seem to support hash() by default
if PY2:
class OldStyleClass():
pass
c = OldStyleClass()
assert not isinstance(c, compat.Hashable)
assert inference.is_hashable(c)
hash(c) # this will not raise
@pytest.mark.parametrize(
"ll", [re.compile('ad')])
def test_is_re_passes(ll):
assert inference.is_re(ll)
@pytest.mark.parametrize(
"ll", ['x', 2, 3, object()])
def test_is_re_fails(ll):
assert not inference.is_re(ll)
@pytest.mark.parametrize(
"ll", [r'a', u('x'),
r'asdf',
re.compile('adsf'),
u(r'\u2233\s*'),
re.compile(r'')])
def test_is_recompilable_passes(ll):
assert inference.is_re_compilable(ll)
@pytest.mark.parametrize(
"ll", [1, [], object()])
def test_is_recompilable_fails(ll):
assert not inference.is_re_compilable(ll)
class TestInference(object):
def test_infer_dtype_bytes(self):
compare = 'string' if PY2 else 'bytes'
# string array of bytes
arr = np.array(list('abc'), dtype='S1')
assert lib.infer_dtype(arr) == compare
# object array of bytes
arr = arr.astype(object)
assert lib.infer_dtype(arr) == compare
# object array of bytes with missing values
assert lib.infer_dtype([b'a', np.nan, b'c'], skipna=True) == compare
def test_isinf_scalar(self):
# GH 11352
assert libmissing.isposinf_scalar(float('inf'))
assert libmissing.isposinf_scalar(np.inf)
assert not libmissing.isposinf_scalar(-np.inf)
assert not libmissing.isposinf_scalar(1)
assert not libmissing.isposinf_scalar('a')
assert libmissing.isneginf_scalar(float('-inf'))
assert libmissing.isneginf_scalar(-np.inf)
assert not libmissing.isneginf_scalar(np.inf)
assert not libmissing.isneginf_scalar(1)
assert not libmissing.isneginf_scalar('a')
def test_maybe_convert_numeric_infinities(self):
# see gh-13274
infinities = ['inf', 'inF', 'iNf', 'Inf',
'iNF', 'InF', 'INf', 'INF']
na_values = {'', 'NULL', 'nan'}
pos = np.array(['inf'], dtype=np.float64)
neg = np.array(['-inf'], dtype=np.float64)
msg = "Unable to parse string"
for infinity in infinities:
for maybe_int in (True, False):
out = lib.maybe_convert_numeric(
np.array([infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
out = lib.maybe_convert_numeric(
np.array(['-' + infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, neg)
out = lib.maybe_convert_numeric(
np.array([u(infinity)], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
out = lib.maybe_convert_numeric(
np.array(['+' + infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
# too many characters
with pytest.raises(ValueError, match=msg):
lib.maybe_convert_numeric(
np.array(['foo_' + infinity], dtype=object),
na_values, maybe_int)
def test_maybe_convert_numeric_post_floatify_nan(self, coerce):
# see gh-13314
data = np.array(['1.200', '-999.000', '4.500'], dtype=object)
expected = np.array([1.2, np.nan, 4.5], dtype=np.float64)
nan_values = {-999, -999.0}
out = lib.maybe_convert_numeric(data, nan_values, coerce)
tm.assert_numpy_array_equal(out, expected)
def test_convert_infs(self):
arr = np.array(['inf', 'inf', 'inf'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False)
assert result.dtype == np.float64
arr = np.array(['-inf', '-inf', '-inf'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False)
assert result.dtype == np.float64
def test_scientific_no_exponent(self):
# See PR 12215
arr = np.array(['42E', '2E', '99e', '6e'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False, True)
assert np.all(np.isnan(result))
def test_convert_non_hashable(self):
# GH13324
# make sure that we are handing non-hashables
arr = np.array([[10.0, 2], 1.0, 'apple'])
result = lib.maybe_convert_numeric(arr, set(), False, True)
tm.assert_numpy_array_equal(result, np.array([np.nan, 1.0, np.nan]))
def test_convert_numeric_uint64(self):
arr = np.array([2**63], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)
arr = np.array([str(2**63)], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)
arr = np.array([np.uint64(2**63)], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)
@pytest.mark.parametrize("arr", [
np.array([2**63, np.nan], dtype=object),
np.array([str(2**63), np.nan], dtype=object),
np.array([np.nan, 2**63], dtype=object),
np.array([np.nan, str(2**63)], dtype=object)])
def test_convert_numeric_uint64_nan(self, coerce, arr):
expected = arr.astype(float) if coerce else arr.copy()
result = lib.maybe_convert_numeric(arr, set(),
coerce_numeric=coerce)
tm.assert_almost_equal(result, expected)
def test_convert_numeric_uint64_nan_values(self, coerce):
arr = np.array([2**63, 2**63 + 1], dtype=object)
na_values = {2**63}
expected = (np.array([np.nan, 2**63 + 1], dtype=float)
if coerce else arr.copy())
result = lib.maybe_convert_numeric(arr, na_values,
coerce_numeric=coerce)
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize("case", [
np.array([2**63, -1], dtype=object),
np.array([str(2**63), -1], dtype=object),
np.array([str(2**63), str(-1)], dtype=object),
np.array([-1, 2**63], dtype=object),
np.array([-1, str(2**63)], dtype=object),
np.array([str(-1), str(2**63)], dtype=object)])
def test_convert_numeric_int64_uint64(self, case, coerce):
expected = case.astype(float) if coerce else case.copy()
result = lib.maybe_convert_numeric(case, set(), coerce_numeric=coerce)
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize("value", [-2**63 - 1, 2**64])
def test_convert_int_overflow(self, value):
# see gh-18584
arr = np.array([value], dtype=object)
result = lib.maybe_convert_objects(arr)
tm.assert_numpy_array_equal(arr, result)
def test_maybe_convert_objects_uint64(self):
# see gh-4471
arr = np.array([2**63], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
# NumPy bug: can't compare uint64 to int64, as that
# results in both casting to float64, so we should
# make sure that this function is robust against it
arr = np.array([np.uint64(2**63)], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
arr = np.array([2, -1], dtype=object)
exp = np.array([2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
arr = np.array([2**63, -1], dtype=object)
exp = np.array([2**63, -1], dtype=object)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
def test_mixed_dtypes_remain_object_array(self):
# GH14956
array = np.array([datetime(2015, 1, 1, tzinfo=pytz.utc), 1],
dtype=object)
result = lib.maybe_convert_objects(array, convert_datetime=1)
tm.assert_numpy_array_equal(result, array)
class TestTypeInference(object):
# Dummy class used for testing with Python objects
class Dummy():
pass
def test_inferred_dtype_fixture(self, any_skipna_inferred_dtype):
# see pandas/conftest.py
inferred_dtype, values = any_skipna_inferred_dtype
# make sure the inferred dtype of the fixture is as requested
assert inferred_dtype == lib.infer_dtype(values, skipna=True)
def test_length_zero(self):
result = lib.infer_dtype(np.array([], dtype='i4'))
assert result == 'integer'
result = lib.infer_dtype([])
assert result == 'empty'
# GH 18004
arr = np.array([np.array([], dtype=object),
np.array([], dtype=object)])
result = lib.infer_dtype(arr)
assert result == 'empty'
def test_integers(self):
arr = np.array([1, 2, 3, np.int64(4), np.int32(5)], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'integer'
arr = np.array([1, 2, 3, np.int64(4), np.int32(5), 'foo'], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'mixed-integer'
arr = np.array([1, 2, 3, 4, 5], dtype='i4')
result = lib.infer_dtype(arr)
assert result == 'integer'
def test_bools(self):
arr = np.array([True, False, True, True, True], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'boolean'
arr = np.array([np.bool_(True), np.bool_(False)], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'boolean'
arr = np.array([True, False, True, 'foo'], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'mixed'
arr = np.array([True, False, True], dtype=bool)
result = lib.infer_dtype(arr)
assert result == 'boolean'
arr = np.array([True, np.nan, False], dtype='O')
result = lib.infer_dtype(arr, skipna=True)
assert result == 'boolean'
def test_floats(self):
arr = np.array([1., 2., 3., np.float64(4), np.float32(5)], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'floating'
arr = np.array([1, 2, 3, np.float64(4), np.float32(5), 'foo'],
dtype='O')
result = lib.infer_dtype(arr)
assert result == 'mixed-integer'
arr = np.array([1, 2, 3, 4, 5], dtype='f4')
result = lib.infer_dtype(arr)
assert result == 'floating'
arr = np.array([1, 2, 3, 4, 5], dtype='f8')
result = lib.infer_dtype(arr)
assert result == 'floating'
def test_decimals(self):
# GH15690
arr = np.array([Decimal(1), Decimal(2), Decimal(3)])
result = lib.infer_dtype(arr)
assert result == 'decimal'
arr = np.array([1.0, 2.0, Decimal(3)])
result = lib.infer_dtype(arr)
assert result == 'mixed'
arr = np.array([Decimal(1), Decimal('NaN'), Decimal(3)])
result = lib.infer_dtype(arr)
assert result == 'decimal'
arr = np.array([Decimal(1), np.nan, Decimal(3)], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'decimal'
def test_string(self):
pass
def test_unicode(self):
arr = [u'a', np.nan, u'c']
result = lib.infer_dtype(arr)
assert result == 'mixed'
arr = [u'a', np.nan, u'c']
result = lib.infer_dtype(arr, skipna=True)
expected = 'unicode' if PY2 else 'string'
assert result == expected
@pytest.mark.parametrize('dtype, missing, skipna, expected', [
(float, np.nan, False, 'floating'),
(float, np.nan, True, 'floating'),
(object, np.nan, False, 'floating'),
(object, np.nan, True, 'empty'),
(object, None, False, 'mixed'),
(object, None, True, 'empty')
])
@pytest.mark.parametrize('box', [pd.Series, np.array])
def test_object_empty(self, box, missing, dtype, skipna, expected):
# GH 23421
arr = box([missing, missing], dtype=dtype)
result = lib.infer_dtype(arr, skipna=skipna)
assert result == expected
def test_datetime(self):
dates = [datetime(2012, 1, x) for x in range(1, 20)]
index = Index(dates)
assert index.inferred_type == 'datetime64'
def test_infer_dtype_datetime(self):
arr = np.array([Timestamp('2011-01-01'),
Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([np.datetime64('2011-01-01'),
np.datetime64('2011-01-01')], dtype=object)
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([datetime(2011, 1, 1), datetime(2012, 2, 1)])
assert lib.infer_dtype(arr) == 'datetime'
# starts with nan
for n in [pd.NaT, np.nan]:
arr = np.array([n, pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([n, np.datetime64('2011-01-02')])
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([n, datetime(2011, 1, 1)])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([n, pd.Timestamp('2011-01-02'), n])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([n, np.datetime64('2011-01-02'), n])
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([n, datetime(2011, 1, 1), n])
assert lib.infer_dtype(arr) == 'datetime'
# different type of nat
arr = np.array([np.timedelta64('nat'),
np.datetime64('2011-01-02')], dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.datetime64('2011-01-02'),
np.timedelta64('nat')], dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
# mixed datetime
arr = np.array([datetime(2011, 1, 1),
pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'datetime'
# should be datetime?
arr = np.array([np.datetime64('2011-01-01'),
pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([pd.Timestamp('2011-01-02'),
np.datetime64('2011-01-01')])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.nan, pd.Timestamp('2011-01-02'), 1])
assert lib.infer_dtype(arr) == 'mixed-integer'
arr = np.array([np.nan, pd.Timestamp('2011-01-02'), 1.1])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.nan, '2011-01-01', pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'mixed'
def test_infer_dtype_timedelta(self):
arr = np.array([pd.Timedelta('1 days'),
pd.Timedelta('2 days')])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([np.timedelta64(1, 'D'),
np.timedelta64(2, 'D')], dtype=object)
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([timedelta(1), timedelta(2)])
assert lib.infer_dtype(arr) == 'timedelta'
# starts with nan
for n in [pd.NaT, np.nan]:
arr = np.array([n, Timedelta('1 days')])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, np.timedelta64(1, 'D')])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, timedelta(1)])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, pd.Timedelta('1 days'), n])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, np.timedelta64(1, 'D'), n])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, timedelta(1), n])
assert lib.infer_dtype(arr) == 'timedelta'
# different type of nat
arr = np.array([np.datetime64('nat'), np.timedelta64(1, 'D')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.timedelta64(1, 'D'), np.datetime64('nat')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
def test_infer_dtype_period(self):
# GH 13664
arr = np.array([pd.Period('2011-01', freq='D'),
pd.Period('2011-02', freq='D')])
assert lib.infer_dtype(arr) == 'period'
arr = np.array([pd.Period('2011-01', freq='D'),
pd.Period('2011-02', freq='M')])
assert lib.infer_dtype(arr) == 'period'
# starts with nan
for n in [pd.NaT, np.nan]:
arr = np.array([n, pd.Period('2011-01', freq='D')])
assert lib.infer_dtype(arr) == 'period'
arr = np.array([n, | pd.Period('2011-01', freq='D') | pandas.Period |
import xenaPython as xena
import pandas as pd
GENES = ['FOXM1', 'TP53']
def get_codes(host, dataset, fields, data):
"get codes for enumerations"
codes = xena.field_codes(host, dataset, fields)
codes_idx = dict([(x['name'],
x['code'].split('\t')) for x in codes if x['code'] is not None])
for i in range(len(fields)):
if fields[i] in codes_idx:
data[i] = [None if v == 'NaN' else codes_idx[fields[i]][int(v)] for v in data[i]]
return data
def get_fields(host, dataset, samples, fields):
"get field values, column names in the spreadsheet"
data = xena.dataset_fetch(host, dataset, samples, fields)
return data
def get_fields_and_codes(host, dataset, samples, fields):
"get fields and resolve NA in the value"
return get_codes(host, dataset, fields, get_fields( host, dataset, samples, fields))
# dictionary with all hub links
xena.PUBLIC_HUBS
# pancanAtlas cohort
cohort = 'TCGA PanCanAtlas'
host = xena.PUBLIC_HUBS['pancanAtlasHub']
# get expression for GENES
expression_dataset = 'EB++AdjustPANCAN_IlluminaHiSeq_RNASeqV2.geneExp.xena'
samples = xena.dataset_samples(host, expression_dataset, None)
samples[0: 10]
expression = get_fields_and_codes(host,
expression_dataset,
samples,
GENES) # list of lists.
expression_by_gene = dict(zip(GENES, expression)) # index by gene.
[expression_by_gene.keys(), GENES[0], expression_by_gene[GENES[0]][0:10]]
# note that missing data is returned as 'NaN'. One might want to remap this to None or NaN, depending on the later analysis tools.
# get disease type and survival columns
survival_dataset = 'Survival_SupplementalTable_S1_20171025_xena_sp'
fields = ['cancer type abbreviation', 'OS', 'OS.time']
values = get_fields_and_codes(host,
survival_dataset,
samples,
fields) # list of lists
phenotypes = dict(zip(fields, values)) # index by phenotype
# show all unique variable in the list phenotypes['cancer type abbreviation']
phenotype_index = set(phenotypes['cancer type abbreviation'])
print(phenotype_index)
# get sample type. TCGA includes a few "normal" tissue samples. These normals are of
# limited value because there are few of them, and they are not entirely normal, being
# taken from disease tissue, outside of the visible tumor. It's often best to omit them.
sampletype_dataset = 'TCGA_phenotype_denseDataOnlyDownload.tsv'
fields = ['sample_type']
values = get_fields_and_codes(host,
sampletype_dataset,
samples,
fields)
set(values[0])
## has to use ['None'] to list all cohorts within the hub
## all_cohorts(host, exclude)
xena.all_cohorts(hub, ['None'])
cohort = 'https://pancanatlas.xenahubs.net'
xena.cohort_summary(hub, ['None']) # count datasets per cohort
## Dataset metadata for datasets in the given cohorts
xena.dataset_list(hub, ['TCGA Pan-Cancer (PANCAN)'])
## use dataset ID in Xena website
dataset = 'EB++AdjustPANCAN_IlluminaHiSeq_RNASeqV2.geneExp.xena'
gene = ["VEGFA"]
## samples = xena.dataset_samples(hub, dataset, None)
samples = xena.dataset_samples(hub, dataset, 10)
## get expression for gene
expression = xena.dataset_gene_probes_values(hub, dataset, samples, gene)
expression_by_gene = dict([(g['gene'], g['scores'][0]) for g in expression])
[expression_by_gene.keys(), gene[0], expression_by_gene[gene[0]][0:10]]
## retrieve sample type value
## NOTE: different hub
hub = 'https://pancanatlas.xenahubs.net'
dataset = 'TCGA_phenotype_denseDataOnlyDownload.tsv'
fields = ['_primary_disease', 'sample_type']
# _sample_type will identify normals.
# _primary_disease will identify cancer study group
values = get_fields_and_codes(hub, dataset, samples, fields) # list of lists
phenotypes = dict(zip(fields, values)) # index by phenotype
phenotypes['_primary_disease'][0:10]
toil_summary = {
'samples': samples,
'expression': expression_by_gene,
'phenotypes': phenotypes
}
# list of field (probes) and probeMap,
# a probeMap is used to map a gene location to a set
# of probes (all RNA-seq counts at a given genomic location)
xena.dataset_field(hub, dataset)
# Exon counts in gene, and probe genomic positions, for given samples
lst = xena.dataset_gene_probes_values(hub, dataset, samples, gene)
# create a Pandas dataframe from probes_value list
data1 = pd.DataFrame(lst[0])
data2 = | pd.DataFrame(lst[1]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from __future__ import print_function
from numpy import nan
import numpy as np
from pandas import compat
from pandas import (DataFrame, Series, MultiIndex, Timestamp,
date_range)
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
class TestDataFrameConvertTo(tm.TestCase, TestData):
_multiprocess_can_split_ = True
def test_to_dict(self):
test_data = {
'A': {'1': 1, '2': 2},
'B': {'1': '1', '2': '2', '3': '3'},
}
recons_data = DataFrame(test_data).to_dict()
for k, v in compat.iteritems(test_data):
for k2, v2 in compat.iteritems(v):
self.assertEqual(v2, recons_data[k][k2])
recons_data = DataFrame(test_data).to_dict("l")
for k, v in compat.iteritems(test_data):
for k2, v2 in compat.iteritems(v):
self.assertEqual(v2, recons_data[k][int(k2) - 1])
recons_data = DataFrame(test_data).to_dict("s")
for k, v in compat.iteritems(test_data):
for k2, v2 in compat.iteritems(v):
self.assertEqual(v2, recons_data[k][k2])
recons_data = DataFrame(test_data).to_dict("sp")
expected_split = {'columns': ['A', 'B'], 'index': ['1', '2', '3'],
'data': [[1.0, '1'], [2.0, '2'], [nan, '3']]}
tm.assert_dict_equal(recons_data, expected_split)
recons_data = DataFrame(test_data).to_dict("r")
expected_records = [{'A': 1.0, 'B': '1'},
{'A': 2.0, 'B': '2'},
{'A': nan, 'B': '3'}]
tm.assertIsInstance(recons_data, list)
self.assertEqual(len(recons_data), 3)
for l, r in zip(recons_data, expected_records):
tm.assert_dict_equal(l, r)
# GH10844
recons_data = DataFrame(test_data).to_dict("i")
for k, v in compat.iteritems(test_data):
for k2, v2 in compat.iteritems(v):
self.assertEqual(v2, recons_data[k2][k])
def test_to_dict_timestamp(self):
# GH11247
# split/records producing np.datetime64 rather than Timestamps
# on datetime64[ns] dtypes only
tsmp = Timestamp('20130101')
test_data = DataFrame({'A': [tsmp, tsmp], 'B': [tsmp, tsmp]})
test_data_mixed = DataFrame({'A': [tsmp, tsmp], 'B': [1, 2]})
expected_records = [{'A': tsmp, 'B': tsmp},
{'A': tsmp, 'B': tsmp}]
expected_records_mixed = [{'A': tsmp, 'B': 1},
{'A': tsmp, 'B': 2}]
self.assertEqual(test_data.to_dict(orient='records'),
expected_records)
self.assertEqual(test_data_mixed.to_dict(orient='records'),
expected_records_mixed)
expected_series = {
'A': Series([tsmp, tsmp], name='A'),
'B': Series([tsmp, tsmp], name='B'),
}
expected_series_mixed = {
'A': Series([tsmp, tsmp], name='A'),
'B': Series([1, 2], name='B'),
}
tm.assert_dict_equal(test_data.to_dict(orient='series'),
expected_series)
tm.assert_dict_equal(test_data_mixed.to_dict(orient='series'),
expected_series_mixed)
expected_split = {
'index': [0, 1],
'data': [[tsmp, tsmp],
[tsmp, tsmp]],
'columns': ['A', 'B']
}
expected_split_mixed = {
'index': [0, 1],
'data': [[tsmp, 1],
[tsmp, 2]],
'columns': ['A', 'B']
}
tm.assert_dict_equal(test_data.to_dict(orient='split'),
expected_split)
tm.assert_dict_equal(test_data_mixed.to_dict(orient='split'),
expected_split_mixed)
def test_to_dict_invalid_orient(self):
df = DataFrame({'A': [0, 1]})
self.assertRaises(ValueError, df.to_dict, orient='xinvalid')
def test_to_records_dt64(self):
df = DataFrame([["one", "two", "three"],
["four", "five", "six"]],
index=date_range("2012-01-01", "2012-01-02"))
self.assertEqual(df.to_records()['index'][0], df.index[0])
rs = df.to_records(convert_datetime64=False)
self.assertEqual(rs['index'][0], df.index.values[0])
def test_to_records_with_multindex(self):
# GH3189
index = [['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
data = np.zeros((8, 4))
df = DataFrame(data, index=index)
r = df.to_records(index=True)['level_0']
self.assertTrue('bar' in r)
self.assertTrue('one' not in r)
def test_to_records_with_Mapping_type(self):
import email
from email.parser import Parser
import collections
collections.Mapping.register(email.message.Message)
headers = Parser().parsestr('From: <<EMAIL>>\n'
'To: <<EMAIL>>\n'
'Subject: Test message\n'
'\n'
'Body would go here\n')
frame = | DataFrame.from_records([headers]) | pandas.DataFrame.from_records |
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
from mabwiser.mab import MAB, LearningPolicy, NeighborhoodPolicy
from tests.test_base import BaseTest
class MABTest(BaseTest):
#################################################
# Test context free predict() method
################################################
def test_arm_list_int(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
seed=123456,
num_run=1,
is_predict=True)
def test_arm_list_str(self):
for lp in MABTest.lps:
self.predict(arms=["A", "B", "C"],
decisions=["A", "A", "A", "B", "B", "B", "C", "C", "C"],
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=["A", "B", "C"],
decisions=["A", "A", "A", "B", "B", "B", "C", "C", "C"],
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
seed=123456,
num_run=1,
is_predict=True)
def test_decision_series(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=pd.Series([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3],
decisions=pd.Series([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
def test_reward_series(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],
rewards=pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],
rewards=pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1]),
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
def test_decision_reward_series(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=pd.Series([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3],
decisions=pd.Series([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1]),
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
def test_decision_array(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3],
decisions=np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
def test_reward_array(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],
rewards=np.array([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],
rewards=np.array([0, 0, 0, 0, 0, 0, 1, 1, 1]),
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
def test_decision_reward_array(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=np.array([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3],
decisions=np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=np.array([0, 0, 0, 0, 0, 0, 1, 1, 1]),
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
def test_decision_series_reward_array(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=pd.Series([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=np.array([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3],
decisions=pd.Series([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=np.array([0, 0, 0, 0, 0, 0, 1, 1, 1]),
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
def test_decision_array_reward_series(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3],
decisions=np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1]),
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
#################################################
# Test context free predict_expectation() method
################################################
def test_exp_arm_list_int(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=False)
def test_exp_arm_list_str(self):
for lp in MABTest.lps:
self.predict(arms=["A", "B", "C"],
decisions=["A", "A", "A", "B", "B", "B", "C", "C", "C"],
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=False)
def test_exp_decision_series(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=pd.Series([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=False)
def test_exp_reward_series(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],
rewards=pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=False)
def test_exp_decision_reward_series(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=pd.Series([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=False)
def test_exp_decision_array(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=False)
def test_exp_reward_array(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],
rewards=np.array([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=False)
def test_exp_decision_reward_array(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=np.array([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=False)
def test_exp_decision_series_reward_array(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=pd.Series([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=np.array([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=False)
def test_exp_decision_array_reward_series(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=False)
def test_context_history_series(self):
contexts = pd.DataFrame({'column1': [1, 2, 3], 'column2': [2, 3, 1]})
for lp in BaseTest.para_lps:
arm, mab = self.predict(arms=[0, 1],
decisions=[1, 1, 1],
rewards=[0, 0, 0],
learning_policy=lp,
context_history=contexts['column1'],
contexts=[[1]],
seed=123456,
num_run=1,
is_predict=True)
self.assertEqual(mab._imp.arm_to_model[0].beta.shape[0], 1)
for cp in BaseTest.nps:
for lp in BaseTest.lps:
arm, mab = self.predict(arms=[0, 1],
decisions=[1, 1, 1],
rewards=[0, 0, 0],
learning_policy=lp,
neighborhood_policy=cp,
context_history=contexts['column1'],
contexts=[[1]],
seed=123456,
num_run=1,
is_predict=True)
self.assertEqual(np.ndim(mab._imp.contexts), 2)
for cp in BaseTest.cps:
for lp in BaseTest.lps:
arm, mab = self.predict(arms=[0, 1],
decisions=[1, 1, 1],
rewards=[0, 0, 0],
learning_policy=lp,
neighborhood_policy=cp,
context_history=contexts['column1'],
contexts=[[1]],
seed=123456,
num_run=1,
is_predict=True)
self.assertEqual(np.ndim(mab._imp.contexts), 2)
def test_context_series(self):
contexts = pd.DataFrame({'column1': [1, 2, 3, 3, 2, 1], 'column2': [2, 3, 1, 1, 2, 3]})
for lp in BaseTest.para_lps:
arm, mab = self.predict(arms=[0, 1],
decisions=[1, 1, 1, 1, 1, 1],
rewards=[0, 0, 0, 0, 0, 0],
learning_policy=lp,
context_history=contexts['column1'],
contexts=pd.Series([1]),
seed=123456,
num_run=1,
is_predict=True)
self.assertEqual(mab._imp.arm_to_model[0].beta.shape[0], 1)
for cp in BaseTest.nps:
for lp in BaseTest.lps:
arm, mab = self.predict(arms=[0, 1],
decisions=[1, 1, 1, 1, 1, 1],
rewards=[0, 0, 0, 0, 0, 0],
learning_policy=lp,
neighborhood_policy=cp,
context_history=contexts['column1'],
contexts=pd.Series([1]),
seed=123456,
num_run=1,
is_predict=True)
self.assertEqual(np.ndim(mab._imp.contexts), 2)
for cp in BaseTest.cps:
for lp in BaseTest.lps:
arm, mab = self.predict(arms=[0, 1],
decisions=[1, 1, 1, 1, 1, 1],
rewards=[0, 0, 0, 0, 0, 0],
learning_policy=lp,
neighborhood_policy=cp,
context_history=contexts['column1'],
contexts=pd.Series([1]),
seed=123456,
num_run=1,
is_predict=True)
self.assertEqual(np.ndim(mab._imp.contexts), 2)
#################################################
# Test contextual predict() method
################################################
def test_context_arm_list_int(self):
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3, 4],
decisions=[1, 1, 1, 2, 2, 3, 3, 3, 3, 3],
rewards=[0, 1, 1, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],
[0, 2, 1, 0, 0]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
seed=123456,
num_run=1,
is_predict=True)
for cp in MABTest.nps:
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3, 4],
decisions=[1, 1, 1, 2, 2, 3, 3, 3, 3, 3],
rewards=[0, 1, 1, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
neighborhood_policy=cp,
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],
[0, 2, 1, 0, 0]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
seed=123456,
num_run=1,
is_predict=True)
for cp in MABTest.cps:
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3, 4],
decisions=[1, 1, 1, 2, 2, 3, 3, 3, 3, 3],
rewards=[0, 1, 1, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
neighborhood_policy=cp,
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],
[0, 2, 1, 0, 0]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
seed=123456,
num_run=1,
is_predict=True)
def test_context_arm_list_str(self):
for lp in MABTest.para_lps:
self.predict(arms=["A", "B", "C", "D"],
decisions=["A", "A", "A", "B", "B", "C", "C", "C", "C", "C"],
rewards=[0, 1, 1, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],
[0, 2, 1, 0, 0]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
seed=123456,
num_run=1,
is_predict=True)
for cp in MABTest.nps:
for lp in MABTest.lps:
self.predict(arms=["A", "B", "C", "D"],
decisions=["A", "A", "A", "B", "B", "C", "C", "C", "C", "C"],
rewards=[0, 1, 1, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
neighborhood_policy=cp,
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],
[0, 2, 1, 0, 0]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
seed=123456,
num_run=1,
is_predict=True)
for cp in MABTest.cps:
for lp in MABTest.lps:
self.predict(arms=["A", "B", "C", "D"],
decisions=["A", "A", "A", "B", "B", "C", "C", "C", "C", "C"],
rewards=[0, 1, 1, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
neighborhood_policy=cp,
context_history=[[0, -2, 2, 3, 11], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, -5, 2, 3, 10], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, -2, 4, 3, 9], [20, 19, 18, 17, 16], [1, 2, 1, 1, 3],
[17, 18, 17, 19, 18]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
seed=123456,
num_run=1,
is_predict=True)
def test_context_decision_series(self):
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3, 4],
decisions=pd.Series([1, 1, 1, 2, 2, 3, 3, 3, 3, 3]),
rewards=[0, 1, 1, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],
[0, 2, 1, 0, 0]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
seed=123456,
num_run=1,
is_predict=True)
for cp in MABTest.nps:
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3, 4],
decisions=pd.Series([1, 1, 1, 2, 2, 3, 3, 3, 3, 3]),
rewards=[0, 1, 1, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
neighborhood_policy=cp,
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],
[0, 2, 1, 0, 0]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
seed=123456,
num_run=1,
is_predict=True)
for cp in MABTest.cps:
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3, 4],
decisions=pd.Series([1, 1, 1, 2, 2, 3, 3, 3, 3, 3]),
rewards=[0, 1, 1, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
neighborhood_policy=cp,
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],
[0, 2, 1, 0, 0]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
seed=123456,
num_run=1,
is_predict=True)
def test_context_reward_series(self):
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3, 4],
decisions=[1, 1, 1, 2, 2, 3, 3, 3, 3, 3],
rewards=pd.Series([0, 1, 1, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],
[0, 2, 1, 0, 0]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
seed=123456,
num_run=1,
is_predict=True)
for cp in MABTest.nps:
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3, 4],
decisions=[1, 1, 1, 2, 2, 3, 3, 3, 3, 3],
rewards=pd.Series([0, 1, 1, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
neighborhood_policy=cp,
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],
[0, 2, 1, 0, 0]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
seed=123456,
num_run=1,
is_predict=True)
for cp in MABTest.cps:
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3, 4],
decisions=[1, 1, 1, 2, 2, 3, 3, 3, 3, 3],
rewards=pd.Series([0, 1, 1, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
neighborhood_policy=cp,
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],
[0, 2, 1, 0, 0]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
seed=123456,
num_run=1,
is_predict=True)
def test_context_decision_reward_series(self):
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3, 4],
decisions= | pd.Series([1, 1, 1, 2, 2, 3, 3, 3, 3, 3]) | pandas.Series |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.