prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
"""
Survey response summary calculation
Calculation builds up a nested dict structure which makes referencing columns
and specific values easy. This is converted to an array structure for Django
Template Language to be able to put it on the screen.
The results nested dict looks something like
{u'demographics_group/age': {
'label': u'How old are you?',
'options': {
u'under_25': {'count': {'female': 53,
'male': 11},
'idx': 0,
'key': ,
'label': u'Under 25 years old',
'pct': {'female': 26.903553299492383,
'male': 10.2803738317757}},
{'count': {'female': 101,
'male': 56},
'idx': 1,
'key': u'26_40',
'label': u'26 - 40 years old',
'pct': {'female': 51.26903553299492,
'male': 52.336448598130836}},
where the keys are group path names then the q name separated by /
and options are under the options field keyed on option name.
The results array looks something like
[{'key': u'demographics_group/age',
'label': u'How old are you?',
'options': [{'count': {'female': 53,
'male': 11},
'idx': 0,
'key': u'under_25',
'label': u'Under 25 years old',
'pct': {'female': 26.903553299492383,
'male': 10.2803738317757}},
where questions are in an array and each question's options are
in an array under the question's options field.
Assumptions:
- that the gender column is always called 'demographics_group/gender'
and its values are always 'female' or 'male'
- that the _uuid column always exists and is required
"""
from logging import getLogger
from xform import SelectOne, SelectAllThatApply
import pandas
# These are used for grouping. Trying to count them and group by them
# at the same time doesn't work.
SKIP_QUESTIONS = [['_uuid'], ['demographics_group', 'gender']]
SKIP_GROUP_NAMES = ['meta', 'formhub']
GENDER_COLUMN = 'demographics_group/gender'
log = getLogger(__name__)
def count_submissions(submissions, gender_disagg=True):
if not len(submissions):
return {'total': 0, 'male': 0, 'female': 0}
results = {}
results['total'] = int(submissions.loc[:, ['_uuid']].count())
if gender_disagg:
cols = ['_uuid', GENDER_COLUMN]
question_table = submissions.loc[:, cols]
gender_counts = question_table.groupby(
[GENDER_COLUMN]
).count()
for gender in ('female', 'male'):
if gender in gender_counts.index:
results[gender] = int(gender_counts.loc[gender])
else:
results[gender] = 0
return results
def count_options(submissions, children, path=None, group_labels=None, results=None, gender_disagg=True):
"""
returns nested dicts where the keys are the names of the XForm element
branches to each question and each option of a question. Only multiple
choice questions are supported.
"""
gender_disagg = gender_disagg or False
path = path or [] # list of names in structure leading to current element
group_labels = group_labels or [] # list of labels in groups in path
results = results or {} # results nested dict under construction
for child in children:
deeper_path = path + [child['name']]
if deeper_path in SKIP_QUESTIONS:
pass
elif child.get('type') == 'group' and child['name'] in SKIP_GROUP_NAMES:
pass
elif child.get('type') == 'group':
label = child.get('label')
if not label:
label = child['name'].replace('_', ' ').capitalize()
deeper_group_labels = group_labels + [label]
results = count_options(submissions, child['children'],
deeper_path, deeper_group_labels, results, gender_disagg=gender_disagg)
elif child.get('type') == 'select one':
control = child.get('control', None)
if control:
if control.get('appearance') == 'label':
continue
question = SelectOne(child, path, group_labels)
results = count_select_one(submissions, question, results, gender_disagg)
elif child.get('type') == 'select all that apply':
question = SelectAllThatApply(child, path, group_labels)
results = count_select_all_that_apply(submissions, question, results, gender_disagg)
else:
pass
return results
def count_select_one(submissions, q, results, gender_disagg):
"""
assumes distinct values in question column as per formhub csvwriter
"""
select_counts = count_select_one_selections(submissions, q, gender_disagg)
results = deep_set(results, [q.pathstr, 'label'], q.label)
results = deep_set(results, [q.pathstr, 'group_labels'], q.group_labels)
response_counts = count_select_one_responses(submissions, q, gender_disagg)
results = set_response_counts(q, results, response_counts, gender_disagg)
for idx in range(len(q.options)):
opt = q.options[idx]
results = deep_set(results, [q.pathstr, 'options', opt.name, 'label'], opt.label)
results = deep_set(results, [q.pathstr, 'options', opt.name, 'idx'], idx)
results = set_select_one_selection_counts(q, opt, results, select_counts, gender_disagg)
return results
def count_select_all_that_apply(submissions, q, results, gender_disagg):
"""
assumes column per option as per formhub csvwriter
"""
results = deep_set(results, [q.pathstr, 'label'], q.label)
results = deep_set(results, [q.pathstr, 'group_labels'], q.group_labels)
response_counts = count_select_all_that_apply_responses(submissions, q, gender_disagg)
results = set_response_counts(q, results, response_counts, gender_disagg)
for idx in range(len(q.options)):
opt = q.options[idx]
select_counts = count_select_all_that_apply_selections(submissions, opt, gender_disagg)
results = deep_set(results, [q.pathstr, 'options', opt.name, 'label'], opt.label)
results = deep_set(results, [q.pathstr, 'options', opt.name, 'idx'], idx)
results = set_select_all_that_apply_selection_counts(q, opt, results, select_counts, gender_disagg)
return results
def count_select_one_selections(submissions, question, gender_disagg):
if gender_disagg:
group_cols = [GENDER_COLUMN, question.pathstr]
else:
group_cols = [question.pathstr]
cols = ['_uuid'] + group_cols
question_table = submissions.loc[:, cols]
question_counts = question_table.groupby(group_cols).count()
return question_counts
def count_select_all_that_apply_selections(submissions, option, gender_disagg):
"""
assumes selecting this option gives value 'True' as per formhub csvwriter
"""
option_col = option.pathstr
if gender_disagg:
cols = [GENDER_COLUMN, option_col]
option_table = submissions.loc[:, cols]
option_chosen_table = option_table.where(submissions[option_col] == 'True')
option_counts = option_chosen_table.groupby([GENDER_COLUMN]).count()
else:
option_table = submissions.loc[:, [option_col]]
option_chosen_table = option_table.where(submissions[option_col] == 'True')
option_counts = option_chosen_table.groupby([GENDER_COLUMN]).count()
return option_counts
def count_select_one_responses(submissions, q, gender_disagg):
"""
assumes that an un-answered 'select one' question column is
set to 'n/a' as per formhub csvwriter
"""
if gender_disagg:
question_table = submissions.loc[:, [GENDER_COLUMN, q.pathstr]]
return question_table.where(submissions[q.pathstr] != 'n/a').groupby([GENDER_COLUMN]).count()
else:
question_table = submissions.loc[:, q.pathstr]
return question_table.where(submissions[q.pathstr] != 'n/a').count()
def count_select_all_that_apply_responses(submissions, q, gender_disagg):
"""
assumes that an un-answered 'select all that apply' question
has all option columns set to 'n/a' as per formhub csvwriter
"""
some_option = q.options[0]
question_table = submissions.loc[:, [GENDER_COLUMN, some_option.pathstr]]
return question_table.where(
submissions[some_option.pathstr] != 'n/a'
).groupby([GENDER_COLUMN]).count()
def set_select_all_that_apply_selection_counts(q, opt, results, option_table, gender_disagg):
if gender_disagg:
for gender in ['male', 'female']:
try:
val = int(option_table.loc[gender])
except KeyError:
# values that aren't counted because they don't occur in the
# results for this question won't be indexes in the counts
log.debug("Question %s option %s %s not found in counts DataFrame %s",
q.pathstr, gender, opt.name, option_table)
val = 0
results = deep_set(results, [q.pathstr, 'options', opt.name, 'count', gender], val)
try:
val = len(option_table)
except KeyError:
# values that aren't counted because they don't occur in the
# results for this question won't be indexes in the counts
log.debug("Question %s option %s %s not found in counts DataFrame %s",
q.pathstr, gender, opt.name, option_table)
val = 0
results = deep_set(results, [q.pathstr, 'options', opt.name, 'count', 'total'], val)
return results
def set_select_one_selection_counts(q, option, results, option_table, gender_disagg):
if gender_disagg:
for gender in ['male', 'female']:
try:
val = int(option_table.loc[gender, option.name])
except KeyError:
# values that aren't counted because they don't occur in the
# results for this question won't be indexes in the counts
log.debug("Question %s option %s %s not found in counts DataFrame %s",
q.pathstr, gender, option.name, option_table)
val = 0
results = deep_set(results, [q.pathstr, 'options', option.name, 'count', gender], val)
try:
val = int(option_table.loc[option.name])
except KeyError:
# values that aren't counted because they don't occur in the
# results for this question won't be indexes in the counts
log.debug("Question %s option %s not found in counts DataFrame %s",
q.pathstr, option.name, option_table)
val = 0
results = deep_set(results, [q.pathstr, 'options', option.name, 'count', 'total'], val)
return results
def set_response_counts(q, results, counts_table, gender_disagg):
if gender_disagg:
total = len(counts_table)
for gender in ['male', 'female']:
if gender in counts_table.index:
val = int(counts_table.loc[gender])
else:
val = 0
results = deep_set(results, [q.pathstr, 'response_count', gender], val)
else:
total = counts_table
results = deep_set(results, [q.pathstr, 'response_count', 'total'], total)
return results
def deep_set(deep_dict, path, value):
key = path[0]
if path[1:]:
if key in deep_dict:
deep_dict[key] = deep_set(deep_dict[key], path[1:], value)
else:
deep_dict[key] = deep_set({}, path[1:], value)
else:
deep_dict[key] = value
return deep_dict
def combine_curr_hist(question_dict, prev_q_dict):
"""
Combines two question result dicts, updating 'question_dict'
to have 'current' and 'previous' results. The Question 'options'
dict is replaced with an option array in the order the options occurred
in the form.
e.g.
{"yes_no_group/bribe": {
"response_count": {
"total": 159, "male": 45, "female": 114
},
"options": [{"current": {
"count": {"male": 0, "female": 1},
"key": "yes",
"pct": {"male": 0.0, "female": 0.8771929824561403},
"idx": 0, "label": "Yes"
},
"prev": {"count": {"male": 0, "female": 0},
"""
for q_key, question in question_dict.iteritems():
options_dict = question['options']
options_arr = [None] * len(options_dict)
if (prev_q_dict and prev_q_dict.get(q_key)):
prev_q = prev_q_dict.get(q_key)
if not is_comparable(question, prev_q):
prev_q = None
else:
prev_q = None
for o_key, option in options_dict.iteritems():
option = options_dict[o_key]
option['key'] = o_key
options_arr[option['idx']] = {'current': option}
if prev_q:
prev_o = prev_q['options'].get(o_key)
if prev_o:
options_arr[option['idx']]['prev'] = prev_o
question['options'] = options_arr # overwrite
question['response_count'] = {'current': question['response_count']}
question['key'] = q_key
def is_comparable(q1, q2):
"""
True if q1 and q2 have identical keys
"""
return set(q1['options'].keys()) == set(q2['options'].keys())
def calc_q_percents(questions, gender_disagg=True):
"""
updates and returns a questions dict with percentages for option counts
"""
if gender_disagg:
counts = ['male', 'female']
else:
counts = ['total']
for q_key, question in questions.iteritems():
for o_key, option in question['options'].iteritems():
for count in counts:
select_count = float(option['count'][count])
response_count = float(question['response_count'][count])
if not (response_count or select_count):
pct = 0
else:
pct = (select_count / response_count) * 100
deep_set(option, ['pct', count], pct)
return questions
def cross_site_summary(result_sets):
""" Prepare a summary of responses from result sets from multiple sites.
"""
responses = []
# define these outside loop scope so we can rightfully use whatever's set after loop
form = None
gender_disagg = None
site_totals = {}
facility_options = []
for result_set in result_sets:
# Assume that get_survey will make all surveys compatible
# and therefore the last-set form applies to all
form, site_responses = result_set.get_survey()
facility_options.extend(form.get_by_path('facility').get('children'))
gender_disagg = not not form.get_by_path('demographics_group/gender')
df = pandas.DataFrame(site_responses)
site_totals[result_set.site.id] = count_submissions(df, gender_disagg=gender_disagg)
responses.extend(site_responses)
if responses:
df = | pandas.DataFrame(responses) | pandas.DataFrame |
"""
.. module:: repository
:platform: Unix, Windows
:synopsis: A module for examining a single git repository
.. moduleauthor:: <NAME> <<EMAIL>>
"""
import os
import sys
import datetime
import time
import numpy as np
import json
import logging
import tempfile
import shutil
from git import Repo, GitCommandError
from pandas import DataFrame, to_datetime
__author__ = 'willmcginnis'
class Repository(object):
"""
The base class for a generic git repository, from which to gather statistics. The object encapulates a single
gitpython Repo instance.
:param working_dir: the directory of the git repository, meaning a .git directory is in it (default None=cwd)
:return:
"""
def __init__(self, working_dir=None, verbose=False):
self.verbose = verbose
self.log = logging.getLogger('gitpandas')
self.__delete_hook = False
self._git_repo_name = None
if working_dir is not None:
if working_dir[:3] == 'git':
if self.verbose:
print('cloning repository: %s into a temporary location' % (working_dir, ))
dir_path = tempfile.mkdtemp()
self.repo = Repo.clone_from(working_dir, dir_path)
self._git_repo_name = working_dir.split(os.sep)[-1].split('.')[0]
self.git_dir = dir_path
self.__delete_hook = True
else:
self.git_dir = working_dir
self.repo = Repo(self.git_dir)
else:
self.git_dir = os.getcwd()
self.repo = Repo(self.git_dir)
if self.verbose:
print('Repository [%s] instantiated at directory: %s' % (self._repo_name(), self.git_dir))
def __del__(self):
"""
On delete, clean up any temporary repositories still hanging around
:return:
"""
if self.__delete_hook:
if os.path.exists(self.git_dir):
shutil.rmtree(self.git_dir)
def is_bare(self):
"""
Returns a boolean for if the repo is bare or not
:return: bool
"""
return self.repo.bare
def has_coverage(self):
"""
Returns a boolean for is a parseable .coverage file can be found in the repository
:return: bool
"""
if os.path.exists(self.git_dir + os.sep + '.coverage'):
try:
with open(self.git_dir + os.sep + '.coverage', 'r') as f:
blob = f.read()
blob = blob.split('!')[2]
_ = json.loads(blob)
return True
except:
return False
else:
return False
def coverage(self):
"""
If there is a .coverage file available, this will attempt to form a DataFrame with that information in it, which
will contain the columns:
* filename
* lines_covered
* total_lines
* coverage
If it can't be found or parsed, an empty DataFrame of that form will be returned.
:return: DataFrame
"""
if not self.has_coverage():
return DataFrame(columns=['filename', 'lines_covered', 'total_lines', 'coverage'])
with open(self.git_dir + os.sep + '.coverage', 'r') as f:
blob = f.read()
blob = blob.split('!')[2]
cov = json.loads(blob)
ds = []
for filename in cov['lines'].keys():
idx = 0
with open(filename, 'r') as f:
for idx, l in enumerate(f):
pass
num_lines = idx + 1
short_filename = filename.split(self.git_dir + os.sep)[1]
ds.append([short_filename, len(cov['lines'][filename]), num_lines])
df = DataFrame(ds, columns=['filename', 'lines_covered', 'total_lines'])
df['coverage'] = df['lines_covered'] / df['total_lines']
return df
def commit_history(self, branch='master', limit=None, extensions=None, ignore_dir=None, days=None):
"""
Returns a pandas DataFrame containing all of the commits for a given branch. Included in that DataFrame will be
the columns:
* date (index)
* author
* committer
* message
* lines
* insertions
* deletions
* net
:param branch: the branch to return commits for
:param limit: (optional, default=None) a maximum number of commits to return, None for no limit
:param extensions: (optional, default=None) a list of file extensions to return commits for
:param ignore_dir: (optional, default=None) a list of directory names to ignore
:param days: (optional, default=None) number of days to return, if limit is None
:return: DataFrame
"""
# setup the data-set of commits
if limit is None:
if days is None:
ds = [[
x.author.name,
x.committer.name,
x.committed_date,
x.message,
self.__check_extension(x.stats.files, extensions, ignore_dir)
] for x in self.repo.iter_commits(branch, max_count=sys.maxsize)]
else:
ds = []
c_date = time.time()
commits = self.repo.iter_commits(branch, max_count=sys.maxsize)
dlim = time.time() - days * 24 * 3600
while c_date > dlim:
try:
if sys.version_info.major == 2:
x = commits.next()
else:
x = commits.__next__()
except StopIteration as e:
break
ds.append([
x.author.name,
x.committer.name,
x.committed_date,
x.message,
self.__check_extension(x.stats.files, extensions, ignore_dir)
])
c_date = x.committed_date
else:
ds = [[
x.author.name,
x.committer.name,
x.committed_date,
x.message,
self.__check_extension(x.stats.files, extensions, ignore_dir)
] for x in self.repo.iter_commits(branch, max_count=limit)]
# aggregate stats
ds = [x[:-1] + [sum([x[-1][key]['lines'] for key in x[-1].keys()]),
sum([x[-1][key]['insertions'] for key in x[-1].keys()]),
sum([x[-1][key]['deletions'] for key in x[-1].keys()]),
sum([x[-1][key]['insertions'] for key in x[-1].keys()]) - sum([x[-1][key]['deletions'] for key in x[-1].keys()])
] for x in ds if len(x[-1].keys()) > 0]
# make it a pandas dataframe
df = | DataFrame(ds, columns=['author', 'committer', 'date', 'message', 'lines', 'insertions', 'deletions', 'net']) | pandas.DataFrame |
# Imports
import streamlit as st
import streamlit.components.v1 as components
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import time
import os.path
# ML dependency imports
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from sklearn.cluster import KMeans
from sklearn.ensemble import RandomForestClassifier
from sklearn.datasets import make_regression
from sklearn.linear_model import LinearRegression, Lasso
from sklearn.model_selection import train_test_split
from streamlit.type_util import Key
# Page Settings
st.set_page_config(page_title="California Wildfire ML", page_icon="./img/fav.png", initial_sidebar_state="collapsed")
#"""
#--------------------------
#---- MACHINE LEARNING ----
#--------------------------
#"""
def main():
print("IN MAIN")
# If data has not been cleaned, then clean it
if os.path.isfile("./data/clean/fire_data_clean.csv") == False:
print("CLEANING FIRE")
clean_fire()
if os.path.isfile("./data/clean/drought_data_clean.csv") == False:
print("CLEANING DROUGHT")
clean_drought()
if os.path.isfile("./data/clean/precip_data_clean.csv") == False:
print("CLEANING RAIN")
clean_percip()
# # Init sidebar with header text
# st.sidebar.header("Menu")
# # Add URL for github repository
# st.sidebar.write("[View on GitHub](https://github.com/josephchancey/ca-wildfire-ml)")
def old_fire_dataset():
unclean_fire = pd.read_csv("./data/fire_data.csv")
return unclean_fire
def old_precip_dataset():
unclean_precip = pd.read_csv("./data/precip_data.csv")
return unclean_precip
def old_drought_dataset():
unclean_drought = pd.read_csv("./data/drought_data.csv")
return unclean_drought
def clean_fire():
if os.path.isfile("./data/clean/fire_data_clean.csv") == False:
# import fire data csv
fireFile = "./data/fire_data.csv"
# read the file and store in a data frame
fireData = pd.read_csv(fireFile)
# remove extraneous columns
fireData = fireData[["incident_id","incident_name","incident_county","incident_acres_burned",
"incident_dateonly_created","incident_dateonly_extinguished"]]
# rename columns
fireData = fireData.rename(columns={"incident_id":"ID","incident_name":"Name","incident_county":"County",
"incident_acres_burned":"AcresBurned","incident_dateonly_created":"Started",
"incident_dateonly_extinguished":"Extinguished"})
# check for duplicates, then drop ID column
fireData.drop_duplicates(subset=["ID"])
fireData = fireData[["Name","County","AcresBurned","Started","Extinguished"]]
# create a column that contains the duration
# first convert date columns to datetime
fireData["Started"] = pd.to_datetime(fireData["Started"])
fireData["Extinguished"] = pd.to_datetime(fireData["Extinguished"])
# subtract the dates
fireData["Duration"] = fireData["Extinguished"] - fireData["Started"]
# convert duration to string and remove "days"
fireData["Duration"] = fireData["Duration"].astype(str)
fireData["Duration"] = fireData["Duration"].str.replace("days","")
# replace NaT with NaN and convert back to float
fireData["Duration"] = fireData["Duration"].replace(["NaT"],"NaN")
fireData["Duration"] = fireData["Duration"].astype(float)
# add one day to duration to capture fires that started and were extinguished in the same day
fireData["Duration"] = fireData["Duration"] + 1
# create a column for year and filter for fires during or after 2013
fireData["Year"] = fireData["Started"].dt.year
fireData = fireData.loc[(fireData["Year"]>=2013),:]
# create a column to hold the year and month of the start date
fireData["Date"] = fireData["Started"].apply(lambda x: x.strftime('%Y-%m'))
fireData = fireData[["Date", "County", "Duration", "AcresBurned"]]
# drop nulls
fireData = fireData.dropna()
# reset the index
fireData.reset_index(inplace=True,drop=True)
# export as csv
fireData.to_csv("./data/clean/fire_data_clean.csv",index=False)
return fireData
else:
# This prevents the cleaning from being ran each time this function is called, checks if cleaning is done already
fireData = pd.read_csv("./data/clean/fire_data_clean.csv")
return fireData
def clean_percip():
if os.path.isfile("./data/clean/precip_data_clean.csv") == False:
# import precipitation data csv
precipFile = "./data/precip_data.csv"
# read the file and store in a data frame
precipData = pd.read_csv(precipFile)
# remove extraneous columns
precipData = precipData[["Date","Location","Value"]]
# rename columns
precipData = precipData.rename(columns = {"Location":"County","Value":"Precip"})
# remove "county" from county column to be consistent with other datasets
precipData["County"] = precipData["County"].astype(str)
precipData["County"] = precipData["County"].str.replace(" County","")
# convert date column
precipData["Date"] = pd.to_datetime(precipData["Date"].astype(str), format='%Y%m')
# create a column for year and filter for data during or after 2013
precipData["Year"] = precipData["Date"].dt.year
precipData = precipData.loc[(precipData["Year"]>=2013),:]
# drop the year column
precipData = precipData[["Date","County","Precip"]]
# edit the date column to match the format of the other datasets
precipData["Date"] = precipData["Date"].apply(lambda x: x.strftime('%Y-%m'))
precipData = precipData.dropna()
precipData.reset_index(inplace=True,drop=True)
# export as csv
precipData.to_csv("./data/clean/precip_data_clean.csv",index=False)
return precipData
else:
precipData = pd.read_csv("./data/clean/precip_data_clean.csv")
return precipData
def clean_drought():
if os.path.isfile("./data/clean/precip_data_clean.csv") == False:
# import drought data csv
droughtFile = "./data/drought_data.csv"
# read the file and store in a dataframe
droughtData = pd.read_csv(droughtFile)
droughtData = droughtData[["ValidStart","County","None","D0","D1","D2",
"D3","D4"]]
# rename columns
droughtData = droughtData.rename(columns={"ValidStart":"Date"})
# remove "county" from county column to be consistent with other datasets
droughtData["County"] = droughtData["County"].astype(str)
droughtData["County"] = droughtData["County"].str.replace(" County","")
# edit the date column to match the format of the other datasets
droughtData["Date"] = pd.to_datetime(droughtData["Date"])
droughtData["Date"] = droughtData["Date"].apply(lambda x: x.strftime('%Y-%m'))
# drop nulls and reset the index
droughtData = droughtData.dropna()
droughtData.reset_index(inplace=True,drop=True)
# group by date and county and average the drought levels of each week to obtain a monthly summary
groupedDrought = droughtData.groupby(["Date","County"])
groupedDrought = groupedDrought.mean()
# export as csv
groupedDrought.to_csv("./data/clean/drought_data_clean.csv")
return groupedDrought
else:
groupedDrought = pd.read_csv("./data/clean/drought_data_clean.csv")
return groupedDrought
def lin_model():
print("MODEL RAN")
# import fire data
fireFile = "./data/clean/fire_data_clean.csv"
fireData = pd.read_csv(fireFile)
droughtFile = "./data/clean/drought_data_clean.csv"
droughtData = pd.read_csv(droughtFile)
precipFile = "./data/clean/precip_data_clean.csv"
precipData = pd.read_csv(precipFile)
droughtMerged = pd.merge(droughtData, fireData, on = ["Date", "County"])
precipMerged = pd.merge(precipData, fireData, on = ["Date","County"])
masterMerge = pd.merge(droughtMerged, precipData, on = ["Date","County"])
droughtML = pd.get_dummies(droughtMerged)
precipML = pd.get_dummies(precipMerged)
masterML = pd.get_dummies(masterMerge)
masterML.drop(columns='None', inplace=True)
df = masterML
X = df
y = df["AcresBurned"]
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1)
scaler = StandardScaler().fit(X_train)
X_train_scaled = scaler.transform(X_train)
X_test_scaled = scaler.transform(X_test)
reg = LinearRegression().fit(X_train_scaled, y_train)
reg_score_val = reg.score(X_test_scaled, y_test)
return reg_score_val
def lasso_model():
# import fire data
fireFile = "./data/clean/fire_data_clean.csv"
fireData = pd.read_csv(fireFile)
droughtFile = "./data/clean/drought_data_clean.csv"
droughtData = pd.read_csv(droughtFile)
precipFile = "./data/clean/precip_data_clean.csv"
precipData = pd.read_csv(precipFile)
droughtMerged = pd.merge(droughtData, fireData, on = ["Date", "County"])
precipMerged = pd.merge(precipData, fireData, on = ["Date","County"])
masterMerge = pd.merge(droughtMerged, precipData, on = ["Date","County"])
masterML = pd.get_dummies(masterMerge)
masterML.drop(columns='None', inplace=True)
df = masterML
X = df
y = df["AcresBurned"]
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1)
scaler = StandardScaler().fit(X_train)
X_train_scaled = scaler.transform(X_train)
X_test_scaled = scaler.transform(X_test)
lasso = Lasso().fit(X_train_scaled, y_train)
lasso_score_val = lasso.score(X_test_scaled, y_test)
return lasso_score_val
def random_forest():
# import fire data
fireFile = "./data/clean/fire_data_clean.csv"
fireData = pd.read_csv(fireFile)
droughtFile = "./data/clean/drought_data_clean.csv"
droughtData = pd.read_csv(droughtFile)
precipFile = "./data/clean/precip_data_clean.csv"
precipData = pd.read_csv(precipFile)
droughtMerged = pd.merge(droughtData, fireData, on = ["Date", "County"])
precipMerged = pd.merge(precipData, fireData, on = ["Date","County"])
masterMerge = pd.merge(droughtMerged, precipData, on = ["Date","County"])
masterML = pd.get_dummies(masterMerge)
masterML.drop(columns='None', inplace=True)
df = masterML
X = df
y = df["AcresBurned"]
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1)
scaler = StandardScaler().fit(X_train)
X_train_scaled = scaler.transform(X_train)
X_test_scaled = scaler.transform(X_test)
clf = RandomForestClassifier()
clf.fit(X_train, y_train)
clf.score(X_train, y_train)
random_forest_val = clf.score(X_test, y_test)
return random_forest_val
def plot_rnd_frst():
# import fire data
fireFile = "./data/clean/fire_data_clean.csv"
fireData = pd.read_csv(fireFile)
droughtFile = "./data/clean/drought_data_clean.csv"
droughtData = pd.read_csv(droughtFile)
precipFile = "./data/clean/precip_data_clean.csv"
precipData = pd.read_csv(precipFile)
droughtMerged = pd.merge(droughtData, fireData, on = ["Date", "County"])
precipMerged = pd.merge(precipData, fireData, on = ["Date","County"])
masterMerge = pd.merge(droughtMerged, precipData, on = ["Date","County"])
masterML = | pd.get_dummies(masterMerge) | pandas.get_dummies |
#!/usr/bin/env python
'''
The article dictionary ends up in this mongo format
comment_id : [<e|"#comment-68330010">]
author : [<content>]
author_id : [<content>]
reply_count : [<content>]
timestamp : [<content>]
reply_to_author : [<content>]
reply_to_comment : [<content>]
content : [<content>]
'''
import pandas as pd
def reverse_comments(article):
comment_df = | pd.DataFrame(article['comments']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# pylint: disable-msg=W0612,E1101
import itertools
import warnings
from warnings import catch_warnings
from datetime import datetime
from pandas.types.common import (is_integer_dtype,
is_float_dtype,
is_scalar)
from pandas.compat import range, lrange, lzip, StringIO, lmap
from pandas.tslib import NaT
from numpy import nan
from numpy.random import randn
import numpy as np
import pandas as pd
from pandas import option_context
from pandas.core.indexing import _non_reducing_slice, _maybe_numeric_slice
from pandas.core.api import (DataFrame, Index, Series, Panel, isnull,
MultiIndex, Timestamp, Timedelta, UInt64Index)
from pandas.formats.printing import pprint_thing
from pandas import concat
from pandas.core.common import PerformanceWarning
from pandas.tests.indexing.common import _mklbl
import pandas.util.testing as tm
from pandas import date_range
_verbose = False
# ------------------------------------------------------------------------
# Indexing test cases
def _generate_indices(f, values=False):
""" generate the indicies
if values is True , use the axis values
is False, use the range
"""
axes = f.axes
if values:
axes = [lrange(len(a)) for a in axes]
return itertools.product(*axes)
def _get_value(f, i, values=False):
""" return the value for the location i """
# check agains values
if values:
return f.values[i]
# this is equiv of f[col][row].....
# v = f
# for a in reversed(i):
# v = v.__getitem__(a)
# return v
with catch_warnings(record=True):
return f.ix[i]
def _get_result(obj, method, key, axis):
""" return the result for this obj with this key and this axis """
if isinstance(key, dict):
key = key[axis]
# use an artifical conversion to map the key as integers to the labels
# so ix can work for comparisions
if method == 'indexer':
method = 'ix'
key = obj._get_axis(axis)[key]
# in case we actually want 0 index slicing
try:
xp = getattr(obj, method).__getitem__(_axify(obj, key, axis))
except:
xp = getattr(obj, method).__getitem__(key)
return xp
def _axify(obj, key, axis):
# create a tuple accessor
axes = [slice(None)] * obj.ndim
axes[axis] = key
return tuple(axes)
class TestIndexing(tm.TestCase):
_objs = set(['series', 'frame', 'panel'])
_typs = set(['ints', 'uints', 'labels', 'mixed',
'ts', 'floats', 'empty', 'ts_rev'])
def setUp(self):
self.series_ints = Series(np.random.rand(4), index=lrange(0, 8, 2))
self.frame_ints = DataFrame(np.random.randn(4, 4),
index=lrange(0, 8, 2),
columns=lrange(0, 12, 3))
self.panel_ints = Panel(np.random.rand(4, 4, 4),
items=lrange(0, 8, 2),
major_axis=lrange(0, 12, 3),
minor_axis=lrange(0, 16, 4))
self.series_uints = Series(np.random.rand(4),
index=UInt64Index(lrange(0, 8, 2)))
self.frame_uints = DataFrame(np.random.randn(4, 4),
index=UInt64Index(lrange(0, 8, 2)),
columns=UInt64Index(lrange(0, 12, 3)))
self.panel_uints = Panel(np.random.rand(4, 4, 4),
items=UInt64Index(lrange(0, 8, 2)),
major_axis=UInt64Index(lrange(0, 12, 3)),
minor_axis=UInt64Index(lrange(0, 16, 4)))
self.series_labels = Series(np.random.randn(4), index=list('abcd'))
self.frame_labels = DataFrame(np.random.randn(4, 4),
index=list('abcd'), columns=list('ABCD'))
self.panel_labels = Panel(np.random.randn(4, 4, 4),
items=list('abcd'),
major_axis=list('ABCD'),
minor_axis=list('ZYXW'))
self.series_mixed = Series(np.random.randn(4), index=[2, 4, 'null', 8])
self.frame_mixed = DataFrame(np.random.randn(4, 4),
index=[2, 4, 'null', 8])
self.panel_mixed = Panel(np.random.randn(4, 4, 4),
items=[2, 4, 'null', 8])
self.series_ts = Series(np.random.randn(4),
index=date_range('20130101', periods=4))
self.frame_ts = DataFrame(np.random.randn(4, 4),
index=date_range('20130101', periods=4))
self.panel_ts = Panel(np.random.randn(4, 4, 4),
items=date_range('20130101', periods=4))
dates_rev = (date_range('20130101', periods=4)
.sort_values(ascending=False))
self.series_ts_rev = Series(np.random.randn(4),
index=dates_rev)
self.frame_ts_rev = DataFrame(np.random.randn(4, 4),
index=dates_rev)
self.panel_ts_rev = Panel(np.random.randn(4, 4, 4),
items=dates_rev)
self.frame_empty = DataFrame({})
self.series_empty = Series({})
self.panel_empty = Panel({})
# form agglomerates
for o in self._objs:
d = dict()
for t in self._typs:
d[t] = getattr(self, '%s_%s' % (o, t), None)
setattr(self, o, d)
def check_values(self, f, func, values=False):
if f is None:
return
axes = f.axes
indicies = itertools.product(*axes)
for i in indicies:
result = getattr(f, func)[i]
# check agains values
if values:
expected = f.values[i]
else:
expected = f
for a in reversed(i):
expected = expected.__getitem__(a)
tm.assert_almost_equal(result, expected)
def check_result(self, name, method1, key1, method2, key2, typs=None,
objs=None, axes=None, fails=None):
def _eq(t, o, a, obj, k1, k2):
""" compare equal for these 2 keys """
if a is not None and a > obj.ndim - 1:
return
def _print(result, error=None):
if error is not None:
error = str(error)
v = ("%-16.16s [%-16.16s]: [typ->%-8.8s,obj->%-8.8s,"
"key1->(%-4.4s),key2->(%-4.4s),axis->%s] %s" %
(name, result, t, o, method1, method2, a, error or ''))
if _verbose:
pprint_thing(v)
try:
rs = getattr(obj, method1).__getitem__(_axify(obj, k1, a))
try:
xp = _get_result(obj, method2, k2, a)
except:
result = 'no comp'
_print(result)
return
detail = None
try:
if is_scalar(rs) and is_scalar(xp):
self.assertEqual(rs, xp)
elif xp.ndim == 1:
tm.assert_series_equal(rs, xp)
elif xp.ndim == 2:
tm.assert_frame_equal(rs, xp)
elif xp.ndim == 3:
tm.assert_panel_equal(rs, xp)
result = 'ok'
except AssertionError as e:
detail = str(e)
result = 'fail'
# reverse the checks
if fails is True:
if result == 'fail':
result = 'ok (fail)'
_print(result)
if not result.startswith('ok'):
raise AssertionError(detail)
except AssertionError:
raise
except Exception as detail:
# if we are in fails, the ok, otherwise raise it
if fails is not None:
if isinstance(detail, fails):
result = 'ok (%s)' % type(detail).__name__
_print(result)
return
result = type(detail).__name__
raise AssertionError(_print(result, error=detail))
if typs is None:
typs = self._typs
if objs is None:
objs = self._objs
if axes is not None:
if not isinstance(axes, (tuple, list)):
axes = [axes]
else:
axes = list(axes)
else:
axes = [0, 1, 2]
# check
for o in objs:
if o not in self._objs:
continue
d = getattr(self, o)
for a in axes:
for t in typs:
if t not in self._typs:
continue
obj = d[t]
if obj is not None:
obj = obj.copy()
k2 = key2
_eq(t, o, a, obj, key1, k2)
def test_ix_deprecation(self):
# GH 15114
df = DataFrame({'A': [1, 2, 3]})
with tm.assert_produces_warning(DeprecationWarning,
check_stacklevel=False):
df.ix[1, 'A']
def test_indexer_caching(self):
# GH5727
# make sure that indexers are in the _internal_names_set
n = 1000001
arrays = [lrange(n), lrange(n)]
index = MultiIndex.from_tuples(lzip(*arrays))
s = Series(np.zeros(n), index=index)
str(s)
# setitem
expected = Series(np.ones(n), index=index)
s = Series(np.zeros(n), index=index)
s[s == 0] = 1
tm.assert_series_equal(s, expected)
def test_at_and_iat_get(self):
def _check(f, func, values=False):
if f is not None:
indicies = _generate_indices(f, values)
for i in indicies:
result = getattr(f, func)[i]
expected = _get_value(f, i, values)
tm.assert_almost_equal(result, expected)
for o in self._objs:
d = getattr(self, o)
# iat
for f in [d['ints'], d['uints']]:
_check(f, 'iat', values=True)
for f in [d['labels'], d['ts'], d['floats']]:
if f is not None:
self.assertRaises(ValueError, self.check_values, f, 'iat')
# at
for f in [d['ints'], d['uints'], d['labels'],
d['ts'], d['floats']]:
_check(f, 'at')
def test_at_and_iat_set(self):
def _check(f, func, values=False):
if f is not None:
indicies = _generate_indices(f, values)
for i in indicies:
getattr(f, func)[i] = 1
expected = _get_value(f, i, values)
tm.assert_almost_equal(expected, 1)
for t in self._objs:
d = getattr(self, t)
# iat
for f in [d['ints'], d['uints']]:
_check(f, 'iat', values=True)
for f in [d['labels'], d['ts'], d['floats']]:
if f is not None:
self.assertRaises(ValueError, _check, f, 'iat')
# at
for f in [d['ints'], d['uints'], d['labels'],
d['ts'], d['floats']]:
_check(f, 'at')
def test_at_iat_coercion(self):
# as timestamp is not a tuple!
dates = date_range('1/1/2000', periods=8)
df = DataFrame(randn(8, 4), index=dates, columns=['A', 'B', 'C', 'D'])
s = df['A']
result = s.at[dates[5]]
xp = s.values[5]
self.assertEqual(result, xp)
# GH 7729
# make sure we are boxing the returns
s = Series(['2014-01-01', '2014-02-02'], dtype='datetime64[ns]')
expected = Timestamp('2014-02-02')
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
self.assertEqual(result, expected)
s = Series(['1 days', '2 days'], dtype='timedelta64[ns]')
expected = Timedelta('2 days')
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
self.assertEqual(result, expected)
def test_iat_invalid_args(self):
pass
def test_imethods_with_dups(self):
# GH6493
# iat/iloc with dups
s = Series(range(5), index=[1, 1, 2, 2, 3], dtype='int64')
result = s.iloc[2]
self.assertEqual(result, 2)
result = s.iat[2]
self.assertEqual(result, 2)
self.assertRaises(IndexError, lambda: s.iat[10])
self.assertRaises(IndexError, lambda: s.iat[-10])
result = s.iloc[[2, 3]]
expected = Series([2, 3], [2, 2], dtype='int64')
tm.assert_series_equal(result, expected)
df = s.to_frame()
result = df.iloc[2]
expected = Series(2, index=[0], name=2)
tm.assert_series_equal(result, expected)
result = df.iat[2, 0]
expected = 2
self.assertEqual(result, 2)
def test_repeated_getitem_dups(self):
# GH 5678
# repeated gettitems on a dup index returing a ndarray
df = DataFrame(
np.random.random_sample((20, 5)),
index=['ABCDE' [x % 5] for x in range(20)])
expected = df.loc['A', 0]
result = df.loc[:, 0].loc['A']
tm.assert_series_equal(result, expected)
def test_iloc_exceeds_bounds(self):
# GH6296
# iloc should allow indexers that exceed the bounds
df = DataFrame(np.random.random_sample((20, 5)), columns=list('ABCDE'))
expected = df
# lists of positions should raise IndexErrror!
with tm.assertRaisesRegexp(IndexError,
'positional indexers are out-of-bounds'):
df.iloc[:, [0, 1, 2, 3, 4, 5]]
self.assertRaises(IndexError, lambda: df.iloc[[1, 30]])
self.assertRaises(IndexError, lambda: df.iloc[[1, -30]])
self.assertRaises(IndexError, lambda: df.iloc[[100]])
s = df['A']
self.assertRaises(IndexError, lambda: s.iloc[[100]])
self.assertRaises(IndexError, lambda: s.iloc[[-100]])
# still raise on a single indexer
msg = 'single positional indexer is out-of-bounds'
with tm.assertRaisesRegexp(IndexError, msg):
df.iloc[30]
self.assertRaises(IndexError, lambda: df.iloc[-30])
# GH10779
# single positive/negative indexer exceeding Series bounds should raise
# an IndexError
with tm.assertRaisesRegexp(IndexError, msg):
s.iloc[30]
self.assertRaises(IndexError, lambda: s.iloc[-30])
# slices are ok
result = df.iloc[:, 4:10] # 0 < start < len < stop
expected = df.iloc[:, 4:]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -4:-10] # stop < 0 < start < len
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4:-1] # 0 < stop < len < start (down)
expected = df.iloc[:, :4:-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 4:-10:-1] # stop < 0 < start < len (down)
expected = df.iloc[:, 4::-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:4] # start < 0 < stop < len
expected = df.iloc[:, :4]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4] # 0 < stop < len < start
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:-11:-1] # stop < start < 0 < len (down)
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:11] # 0 < len < start < stop
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
# slice bounds exceeding is ok
result = s.iloc[18:30]
expected = s.iloc[18:]
tm.assert_series_equal(result, expected)
result = s.iloc[30:]
expected = s.iloc[:0]
tm.assert_series_equal(result, expected)
result = s.iloc[30::-1]
expected = s.iloc[::-1]
tm.assert_series_equal(result, expected)
# doc example
def check(result, expected):
str(result)
result.dtypes
tm.assert_frame_equal(result, expected)
dfl = DataFrame(np.random.randn(5, 2), columns=list('AB'))
check(dfl.iloc[:, 2:3], DataFrame(index=dfl.index))
check(dfl.iloc[:, 1:3], dfl.iloc[:, [1]])
check(dfl.iloc[4:6], dfl.iloc[[4]])
self.assertRaises(IndexError, lambda: dfl.iloc[[4, 5, 6]])
self.assertRaises(IndexError, lambda: dfl.iloc[:, 4])
def test_iloc_getitem_int(self):
# integer
self.check_result('integer', 'iloc', 2, 'ix',
{0: 4, 1: 6, 2: 8}, typs=['ints', 'uints'])
self.check_result('integer', 'iloc', 2, 'indexer', 2,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_neg_int(self):
# neg integer
self.check_result('neg int', 'iloc', -1, 'ix',
{0: 6, 1: 9, 2: 12}, typs=['ints', 'uints'])
self.check_result('neg int', 'iloc', -1, 'indexer', -1,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_list_int(self):
# list of ints
self.check_result('list int', 'iloc', [0, 1, 2], 'ix',
{0: [0, 2, 4], 1: [0, 3, 6], 2: [0, 4, 8]},
typs=['ints', 'uints'])
self.check_result('list int', 'iloc', [2], 'ix',
{0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints'])
self.check_result('list int', 'iloc', [0, 1, 2], 'indexer', [0, 1, 2],
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
# array of ints (GH5006), make sure that a single indexer is returning
# the correct type
self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'ix',
{0: [0, 2, 4],
1: [0, 3, 6],
2: [0, 4, 8]}, typs=['ints', 'uints'])
self.check_result('array int', 'iloc', np.array([2]), 'ix',
{0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints'])
self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'indexer',
[0, 1, 2],
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_neg_int_can_reach_first_index(self):
# GH10547 and GH10779
# negative integers should be able to reach index 0
df = DataFrame({'A': [2, 3, 5], 'B': [7, 11, 13]})
s = df['A']
expected = df.iloc[0]
result = df.iloc[-3]
tm.assert_series_equal(result, expected)
expected = df.iloc[[0]]
result = df.iloc[[-3]]
tm.assert_frame_equal(result, expected)
expected = s.iloc[0]
result = s.iloc[-3]
self.assertEqual(result, expected)
expected = s.iloc[[0]]
result = s.iloc[[-3]]
tm.assert_series_equal(result, expected)
# check the length 1 Series case highlighted in GH10547
expected = pd.Series(['a'], index=['A'])
result = expected.iloc[[-1]]
tm.assert_series_equal(result, expected)
def test_iloc_getitem_dups(self):
# no dups in panel (bug?)
self.check_result('list int (dups)', 'iloc', [0, 1, 1, 3], 'ix',
{0: [0, 2, 2, 6], 1: [0, 3, 3, 9]},
objs=['series', 'frame'], typs=['ints', 'uints'])
# GH 6766
df1 = DataFrame([{'A': None, 'B': 1}, {'A': 2, 'B': 2}])
df2 = DataFrame([{'A': 3, 'B': 3}, {'A': 4, 'B': 4}])
df = concat([df1, df2], axis=1)
# cross-sectional indexing
result = df.iloc[0, 0]
self.assertTrue(isnull(result))
result = df.iloc[0, :]
expected = Series([np.nan, 1, 3, 3], index=['A', 'B', 'A', 'B'],
name=0)
tm.assert_series_equal(result, expected)
def test_iloc_getitem_array(self):
# array like
s = Series(index=lrange(1, 4))
self.check_result('array like', 'iloc', s.index, 'ix',
{0: [2, 4, 6], 1: [3, 6, 9], 2: [4, 8, 12]},
typs=['ints', 'uints'])
def test_iloc_getitem_bool(self):
# boolean indexers
b = [True, False, True, False, ]
self.check_result('bool', 'iloc', b, 'ix', b, typs=['ints', 'uints'])
self.check_result('bool', 'iloc', b, 'ix', b,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_slice(self):
# slices
self.check_result('slice', 'iloc', slice(1, 3), 'ix',
{0: [2, 4], 1: [3, 6], 2: [4, 8]},
typs=['ints', 'uints'])
self.check_result('slice', 'iloc', slice(1, 3), 'indexer',
slice(1, 3),
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_slice_dups(self):
df1 = DataFrame(np.random.randn(10, 4), columns=['A', 'A', 'B', 'B'])
df2 = DataFrame(np.random.randint(0, 10, size=20).reshape(10, 2),
columns=['A', 'C'])
# axis=1
df = concat([df1, df2], axis=1)
tm.assert_frame_equal(df.iloc[:, :4], df1)
tm.assert_frame_equal(df.iloc[:, 4:], df2)
df = concat([df2, df1], axis=1)
tm.assert_frame_equal(df.iloc[:, :2], df2)
tm.assert_frame_equal(df.iloc[:, 2:], df1)
exp = concat([df2, df1.iloc[:, [0]]], axis=1)
tm.assert_frame_equal(df.iloc[:, 0:3], exp)
# axis=0
df = concat([df, df], axis=0)
tm.assert_frame_equal(df.iloc[0:10, :2], df2)
tm.assert_frame_equal(df.iloc[0:10, 2:], df1)
tm.assert_frame_equal(df.iloc[10:, :2], df2)
tm.assert_frame_equal(df.iloc[10:, 2:], df1)
def test_iloc_setitem(self):
df = self.frame_ints
df.iloc[1, 1] = 1
result = df.iloc[1, 1]
self.assertEqual(result, 1)
df.iloc[:, 2:3] = 0
expected = df.iloc[:, 2:3]
result = df.iloc[:, 2:3]
tm.assert_frame_equal(result, expected)
# GH5771
s = Series(0, index=[4, 5, 6])
s.iloc[1:2] += 1
expected = Series([0, 1, 0], index=[4, 5, 6])
tm.assert_series_equal(s, expected)
def test_loc_setitem_slice(self):
# GH10503
# assigning the same type should not change the type
df1 = DataFrame({'a': [0, 1, 1],
'b': Series([100, 200, 300], dtype='uint32')})
ix = df1['a'] == 1
newb1 = df1.loc[ix, 'b'] + 1
df1.loc[ix, 'b'] = newb1
expected = DataFrame({'a': [0, 1, 1],
'b': Series([100, 201, 301], dtype='uint32')})
tm.assert_frame_equal(df1, expected)
# assigning a new type should get the inferred type
df2 = DataFrame({'a': [0, 1, 1], 'b': [100, 200, 300]},
dtype='uint64')
ix = df1['a'] == 1
newb2 = df2.loc[ix, 'b']
df1.loc[ix, 'b'] = newb2
expected = DataFrame({'a': [0, 1, 1], 'b': [100, 200, 300]},
dtype='uint64')
tm.assert_frame_equal(df2, expected)
def test_ix_loc_setitem_consistency(self):
# GH 5771
# loc with slice and series
s = Series(0, index=[4, 5, 6])
s.loc[4:5] += 1
expected = Series([1, 1, 0], index=[4, 5, 6])
tm.assert_series_equal(s, expected)
# GH 5928
# chained indexing assignment
df = DataFrame({'a': [0, 1, 2]})
expected = df.copy()
with catch_warnings(record=True):
expected.ix[[0, 1, 2], 'a'] = -expected.ix[[0, 1, 2], 'a']
with catch_warnings(record=True):
df['a'].ix[[0, 1, 2]] = -df['a'].ix[[0, 1, 2]]
tm.assert_frame_equal(df, expected)
df = DataFrame({'a': [0, 1, 2], 'b': [0, 1, 2]})
with catch_warnings(record=True):
df['a'].ix[[0, 1, 2]] = -df['a'].ix[[0, 1, 2]].astype(
'float64') + 0.5
expected = DataFrame({'a': [0.5, -0.5, -1.5], 'b': [0, 1, 2]})
tm.assert_frame_equal(df, expected)
# GH 8607
# ix setitem consistency
df = DataFrame({'timestamp': [1413840976, 1413842580, 1413760580],
'delta': [1174, 904, 161],
'elapsed': [7673, 9277, 1470]})
expected = DataFrame({'timestamp': pd.to_datetime(
[1413840976, 1413842580, 1413760580], unit='s'),
'delta': [1174, 904, 161],
'elapsed': [7673, 9277, 1470]})
df2 = df.copy()
df2['timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
df2 = df.copy()
df2.loc[:, 'timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
df2 = df.copy()
with catch_warnings(record=True):
df2.ix[:, 2] = pd.to_datetime(df['timestamp'], unit='s')
tm.assert_frame_equal(df2, expected)
def test_ix_loc_consistency(self):
# GH 8613
# some edge cases where ix/loc should return the same
# this is not an exhaustive case
def compare(result, expected):
if is_scalar(expected):
self.assertEqual(result, expected)
else:
self.assertTrue(expected.equals(result))
# failure cases for .loc, but these work for .ix
df = pd.DataFrame(np.random.randn(5, 4), columns=list('ABCD'))
for key in [slice(1, 3), tuple([slice(0, 2), slice(0, 2)]),
tuple([slice(0, 2), df.columns[0:2]])]:
for index in [tm.makeStringIndex, tm.makeUnicodeIndex,
tm.makeDateIndex, tm.makePeriodIndex,
tm.makeTimedeltaIndex]:
df.index = index(len(df.index))
with catch_warnings(record=True):
df.ix[key]
self.assertRaises(TypeError, lambda: df.loc[key])
df = pd.DataFrame(np.random.randn(5, 4), columns=list('ABCD'),
index=pd.date_range('2012-01-01', periods=5))
for key in ['2012-01-03',
'2012-01-31',
slice('2012-01-03', '2012-01-03'),
slice('2012-01-03', '2012-01-04'),
slice('2012-01-03', '2012-01-06', 2),
slice('2012-01-03', '2012-01-31'),
tuple([[True, True, True, False, True]]), ]:
# getitem
# if the expected raises, then compare the exceptions
try:
with catch_warnings(record=True):
expected = df.ix[key]
except KeyError:
self.assertRaises(KeyError, lambda: df.loc[key])
continue
result = df.loc[key]
compare(result, expected)
# setitem
df1 = df.copy()
df2 = df.copy()
with catch_warnings(record=True):
df1.ix[key] = 10
df2.loc[key] = 10
compare(df2, df1)
# edge cases
s = Series([1, 2, 3, 4], index=list('abde'))
result1 = s['a':'c']
with catch_warnings(record=True):
result2 = s.ix['a':'c']
result3 = s.loc['a':'c']
tm.assert_series_equal(result1, result2)
tm.assert_series_equal(result1, result3)
# now work rather than raising KeyError
s = Series(range(5), [-2, -1, 1, 2, 3])
with catch_warnings(record=True):
result1 = s.ix[-10:3]
result2 = s.loc[-10:3]
tm.assert_series_equal(result1, result2)
with catch_warnings(record=True):
result1 = s.ix[0:3]
result2 = s.loc[0:3]
tm.assert_series_equal(result1, result2)
def test_loc_setitem_dups(self):
# GH 6541
df_orig = DataFrame(
{'me': list('rttti'),
'foo': list('aaade'),
'bar': np.arange(5, dtype='float64') * 1.34 + 2,
'bar2': np.arange(5, dtype='float64') * -.34 + 2}).set_index('me')
indexer = tuple(['r', ['bar', 'bar2']])
df = df_orig.copy()
df.loc[indexer] *= 2.0
tm.assert_series_equal(df.loc[indexer], 2.0 * df_orig.loc[indexer])
indexer = tuple(['r', 'bar'])
df = df_orig.copy()
df.loc[indexer] *= 2.0
self.assertEqual(df.loc[indexer], 2.0 * df_orig.loc[indexer])
indexer = tuple(['t', ['bar', 'bar2']])
df = df_orig.copy()
df.loc[indexer] *= 2.0
tm.assert_frame_equal(df.loc[indexer], 2.0 * df_orig.loc[indexer])
def test_iloc_setitem_dups(self):
# GH 6766
# iloc with a mask aligning from another iloc
df1 = DataFrame([{'A': None, 'B': 1}, {'A': 2, 'B': 2}])
df2 = DataFrame([{'A': 3, 'B': 3}, {'A': 4, 'B': 4}])
df = concat([df1, df2], axis=1)
expected = df.fillna(3)
expected['A'] = expected['A'].astype('float64')
inds = np.isnan(df.iloc[:, 0])
mask = inds[inds].index
df.iloc[mask, 0] = df.iloc[mask, 2]
tm.assert_frame_equal(df, expected)
# del a dup column across blocks
expected = DataFrame({0: [1, 2], 1: [3, 4]})
expected.columns = ['B', 'B']
del df['A']
tm.assert_frame_equal(df, expected)
# assign back to self
df.iloc[[0, 1], [0, 1]] = df.iloc[[0, 1], [0, 1]]
tm.assert_frame_equal(df, expected)
# reversed x 2
df.iloc[[1, 0], [0, 1]] = df.iloc[[1, 0], [0, 1]].reset_index(
drop=True)
df.iloc[[1, 0], [0, 1]] = df.iloc[[1, 0], [0, 1]].reset_index(
drop=True)
tm.assert_frame_equal(df, expected)
def test_chained_getitem_with_lists(self):
# GH6394
# Regression in chained getitem indexing with embedded list-like from
# 0.12
def check(result, expected):
tm.assert_numpy_array_equal(result, expected)
tm.assertIsInstance(result, np.ndarray)
df = DataFrame({'A': 5 * [np.zeros(3)], 'B': 5 * [np.ones(3)]})
expected = df['A'].iloc[2]
result = df.loc[2, 'A']
check(result, expected)
result2 = df.iloc[2]['A']
check(result2, expected)
result3 = df['A'].loc[2]
check(result3, expected)
result4 = df['A'].iloc[2]
check(result4, expected)
def test_loc_getitem_int(self):
# int label
self.check_result('int label', 'loc', 2, 'ix', 2,
typs=['ints', 'uints'], axes=0)
self.check_result('int label', 'loc', 3, 'ix', 3,
typs=['ints', 'uints'], axes=1)
self.check_result('int label', 'loc', 4, 'ix', 4,
typs=['ints', 'uints'], axes=2)
self.check_result('int label', 'loc', 2, 'ix', 2,
typs=['label'], fails=KeyError)
def test_loc_getitem_label(self):
# label
self.check_result('label', 'loc', 'c', 'ix', 'c', typs=['labels'],
axes=0)
self.check_result('label', 'loc', 'null', 'ix', 'null', typs=['mixed'],
axes=0)
self.check_result('label', 'loc', 8, 'ix', 8, typs=['mixed'], axes=0)
self.check_result('label', 'loc', Timestamp('20130102'), 'ix', 1,
typs=['ts'], axes=0)
self.check_result('label', 'loc', 'c', 'ix', 'c', typs=['empty'],
fails=KeyError)
def test_loc_getitem_label_out_of_range(self):
# out of range label
self.check_result('label range', 'loc', 'f', 'ix', 'f',
typs=['ints', 'uints', 'labels', 'mixed', 'ts'],
fails=KeyError)
self.check_result('label range', 'loc', 'f', 'ix', 'f',
typs=['floats'], fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20,
typs=['ints', 'uints', 'mixed'], fails=KeyError)
self.check_result('label range', 'loc', 20, 'ix', 20,
typs=['labels'], fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20, typs=['ts'],
axes=0, fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20, typs=['floats'],
axes=0, fails=TypeError)
def test_loc_getitem_label_list(self):
# list of labels
self.check_result('list lbl', 'loc', [0, 2, 4], 'ix', [0, 2, 4],
typs=['ints', 'uints'], axes=0)
self.check_result('list lbl', 'loc', [3, 6, 9], 'ix', [3, 6, 9],
typs=['ints', 'uints'], axes=1)
self.check_result('list lbl', 'loc', [4, 8, 12], 'ix', [4, 8, 12],
typs=['ints', 'uints'], axes=2)
self.check_result('list lbl', 'loc', ['a', 'b', 'd'], 'ix',
['a', 'b', 'd'], typs=['labels'], axes=0)
self.check_result('list lbl', 'loc', ['A', 'B', 'C'], 'ix',
['A', 'B', 'C'], typs=['labels'], axes=1)
self.check_result('list lbl', 'loc', ['Z', 'Y', 'W'], 'ix',
['Z', 'Y', 'W'], typs=['labels'], axes=2)
self.check_result('list lbl', 'loc', [2, 8, 'null'], 'ix',
[2, 8, 'null'], typs=['mixed'], axes=0)
self.check_result('list lbl', 'loc',
[Timestamp('20130102'), Timestamp('20130103')], 'ix',
[Timestamp('20130102'), Timestamp('20130103')],
typs=['ts'], axes=0)
self.check_result('list lbl', 'loc', [0, 1, 2], 'indexer', [0, 1, 2],
typs=['empty'], fails=KeyError)
self.check_result('list lbl', 'loc', [0, 2, 3], 'ix', [0, 2, 3],
typs=['ints', 'uints'], axes=0, fails=KeyError)
self.check_result('list lbl', 'loc', [3, 6, 7], 'ix', [3, 6, 7],
typs=['ints', 'uints'], axes=1, fails=KeyError)
self.check_result('list lbl', 'loc', [4, 8, 10], 'ix', [4, 8, 10],
typs=['ints', 'uints'], axes=2, fails=KeyError)
def test_loc_getitem_label_list_fails(self):
# fails
self.check_result('list lbl', 'loc', [20, 30, 40], 'ix', [20, 30, 40],
typs=['ints', 'uints'], axes=1, fails=KeyError)
self.check_result('list lbl', 'loc', [20, 30, 40], 'ix', [20, 30, 40],
typs=['ints', 'uints'], axes=2, fails=KeyError)
def test_loc_getitem_label_array_like(self):
# array like
self.check_result('array like', 'loc', Series(index=[0, 2, 4]).index,
'ix', [0, 2, 4], typs=['ints', 'uints'], axes=0)
self.check_result('array like', 'loc', Series(index=[3, 6, 9]).index,
'ix', [3, 6, 9], typs=['ints', 'uints'], axes=1)
self.check_result('array like', 'loc', Series(index=[4, 8, 12]).index,
'ix', [4, 8, 12], typs=['ints', 'uints'], axes=2)
def test_loc_getitem_bool(self):
# boolean indexers
b = [True, False, True, False]
self.check_result('bool', 'loc', b, 'ix', b,
typs=['ints', 'uints', 'labels',
'mixed', 'ts', 'floats'])
self.check_result('bool', 'loc', b, 'ix', b, typs=['empty'],
fails=KeyError)
def test_loc_getitem_int_slice(self):
# ok
self.check_result('int slice2', 'loc', slice(2, 4), 'ix', [2, 4],
typs=['ints', 'uints'], axes=0)
self.check_result('int slice2', 'loc', slice(3, 6), 'ix', [3, 6],
typs=['ints', 'uints'], axes=1)
self.check_result('int slice2', 'loc', slice(4, 8), 'ix', [4, 8],
typs=['ints', 'uints'], axes=2)
# GH 3053
# loc should treat integer slices like label slices
from itertools import product
index = MultiIndex.from_tuples([t for t in product(
[6, 7, 8], ['a', 'b'])])
df = DataFrame(np.random.randn(6, 6), index, index)
result = df.loc[6:8, :]
with catch_warnings(record=True):
expected = df.ix[6:8, :]
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_tuples([t
for t in product(
[10, 20, 30], ['a', 'b'])])
df = DataFrame(np.random.randn(6, 6), index, index)
result = df.loc[20:30, :]
with catch_warnings(record=True):
expected = df.ix[20:30, :]
tm.assert_frame_equal(result, expected)
# doc examples
result = df.loc[10, :]
with catch_warnings(record=True):
expected = df.ix[10, :]
tm.assert_frame_equal(result, expected)
result = df.loc[:, 10]
# expected = df.ix[:,10] (this fails)
expected = df[10]
tm.assert_frame_equal(result, expected)
def test_loc_to_fail(self):
# GH3449
df = DataFrame(np.random.random((3, 3)),
index=['a', 'b', 'c'],
columns=['e', 'f', 'g'])
# raise a KeyError?
self.assertRaises(KeyError, df.loc.__getitem__,
tuple([[1, 2], [1, 2]]))
# GH 7496
# loc should not fallback
s = Series()
s.loc[1] = 1
s.loc['a'] = 2
self.assertRaises(KeyError, lambda: s.loc[-1])
self.assertRaises(KeyError, lambda: s.loc[[-1, -2]])
self.assertRaises(KeyError, lambda: s.loc[['4']])
s.loc[-1] = 3
result = s.loc[[-1, -2]]
expected = Series([3, np.nan], index=[-1, -2])
tm.assert_series_equal(result, expected)
s['a'] = 2
self.assertRaises(KeyError, lambda: s.loc[[-2]])
del s['a']
def f():
s.loc[[-2]] = 0
self.assertRaises(KeyError, f)
# inconsistency between .loc[values] and .loc[values,:]
# GH 7999
df = DataFrame([['a'], ['b']], index=[1, 2], columns=['value'])
def f():
df.loc[[3], :]
self.assertRaises(KeyError, f)
def f():
df.loc[[3]]
self.assertRaises(KeyError, f)
def test_at_to_fail(self):
# at should not fallback
# GH 7814
s = Series([1, 2, 3], index=list('abc'))
result = s.at['a']
self.assertEqual(result, 1)
self.assertRaises(ValueError, lambda: s.at[0])
df = DataFrame({'A': [1, 2, 3]}, index=list('abc'))
result = df.at['a', 'A']
self.assertEqual(result, 1)
self.assertRaises(ValueError, lambda: df.at['a', 0])
s = Series([1, 2, 3], index=[3, 2, 1])
result = s.at[1]
self.assertEqual(result, 3)
self.assertRaises(ValueError, lambda: s.at['a'])
df = DataFrame({0: [1, 2, 3]}, index=[3, 2, 1])
result = df.at[1, 0]
self.assertEqual(result, 3)
self.assertRaises(ValueError, lambda: df.at['a', 0])
# GH 13822, incorrect error string with non-unique columns when missing
# column is accessed
df = DataFrame({'x': [1.], 'y': [2.], 'z': [3.]})
df.columns = ['x', 'x', 'z']
# Check that we get the correct value in the KeyError
self.assertRaisesRegexp(KeyError, r"\['y'\] not in index",
lambda: df[['x', 'y', 'z']])
def test_loc_getitem_label_slice(self):
# label slices (with ints)
self.check_result('lab slice', 'loc', slice(1, 3),
'ix', slice(1, 3),
typs=['labels', 'mixed', 'empty', 'ts', 'floats'],
fails=TypeError)
# real label slices
self.check_result('lab slice', 'loc', slice('a', 'c'),
'ix', slice('a', 'c'), typs=['labels'], axes=0)
self.check_result('lab slice', 'loc', slice('A', 'C'),
'ix', slice('A', 'C'), typs=['labels'], axes=1)
self.check_result('lab slice', 'loc', slice('W', 'Z'),
'ix', slice('W', 'Z'), typs=['labels'], axes=2)
self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
'ix', slice('20130102', '20130104'),
typs=['ts'], axes=0)
self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
'ix', slice('20130102', '20130104'),
typs=['ts'], axes=1, fails=TypeError)
self.check_result('ts slice', 'loc', slice('20130102', '20130104'),
'ix', slice('20130102', '20130104'),
typs=['ts'], axes=2, fails=TypeError)
# GH 14316
self.check_result('ts slice rev', 'loc', slice('20130104', '20130102'),
'indexer', [0, 1, 2], typs=['ts_rev'], axes=0)
self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
typs=['mixed'], axes=0, fails=TypeError)
self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
typs=['mixed'], axes=1, fails=KeyError)
self.check_result('mixed slice', 'loc', slice(2, 8), 'ix', slice(2, 8),
typs=['mixed'], axes=2, fails=KeyError)
self.check_result('mixed slice', 'loc', slice(2, 4, 2), 'ix', slice(
2, 4, 2), typs=['mixed'], axes=0, fails=TypeError)
def test_loc_general(self):
df = DataFrame(
np.random.rand(4, 4), columns=['A', 'B', 'C', 'D'],
index=['A', 'B', 'C', 'D'])
# want this to work
result = df.loc[:, "A":"B"].iloc[0:2, :]
self.assertTrue((result.columns == ['A', 'B']).all())
self.assertTrue((result.index == ['A', 'B']).all())
# mixed type
result = DataFrame({'a': [Timestamp('20130101')], 'b': [1]}).iloc[0]
expected = Series([Timestamp('20130101'), 1], index=['a', 'b'], name=0)
tm.assert_series_equal(result, expected)
self.assertEqual(result.dtype, object)
def test_loc_setitem_consistency(self):
# GH 6149
# coerce similary for setitem and loc when rows have a null-slice
expected = DataFrame({'date': Series(0, index=range(5),
dtype=np.int64),
'val': Series(range(5), dtype=np.int64)})
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series(
range(5), dtype=np.int64)})
df.loc[:, 'date'] = 0
tm.assert_frame_equal(df, expected)
df = DataFrame({'date': date_range('2000-01-01', '2000-01-5'),
'val': Series( | range(5) | pandas.compat.range |
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 26 15:24:17 2019
@author: <NAME>
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from io import StringIO
import math
df=pd.read_csv('file:///C:/Users/<NAME>/Desktop/Parkinsons/parkinsonsdisease/Data1.csv')
print(df.describe)
x=df.iloc[:,0:18].values
y=df.iloc[:,19].values
print("\nPrinting Shape of X :\n",x.shape)
print("\nPrinting Shape of Y :\n",y.shape)
print("\nTotal No of Unique Values :\n ",np.unique(y).sum())
print("\nPrinting Unique Values of Y :\n",np.unique(y))
print("\nNumber of Attritube :\n",x.shape[1])
print("\nNumber of Instance :\n",x.shape[0])
#Missing Values
print("\nChecking Null Values in our Parkinsons Dataset :\n")
print(df.isnull().sum())
print("\nAs we can see there is no Null Values in our Datasets\n")
print("Lets Suppose if we had Null Values,\nThen we can either use 'Drop' or 'Imputer Method' to correct it\n")
'''
df=pd.read_csv(StringIO(csv_data))
dl=df.dropna() ##it will drop the values which wil contain null values
print("after dropping\n",dl)
from sklearn.preprocessing import Imputer
imr=Imputer(missing_values='NaN',strategy='mean',axis=0) #it will calculate the avg of no in rows wise and replace the nan value with that no
imr=imr.fit(df)
imputed_data=imr.transform(df.values)
print(imputed_data)
'''
from sklearn.model_selection import train_test_split
x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.25,random_state=0)
print("\nScaling the features : ")
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler(feature_range=(0, 1))
x_train_scaled = scaler.fit_transform(x_train)
x_train = pd.DataFrame(x_train_scaled)
x_test_scaled = scaler.fit_transform(x_test)
x_test = pd.DataFrame(x_test_scaled)
print("\nBefore Applying Training and Testing Split,\nShape of x was :\n",np.shape(x))
print("\nAfter Applying Training and Testing Split,\nShape of x is :")
print("Training Data :",np.shape(x_train))
print("Testing Data :",np.shape(x_test))
from sklearn.preprocessing import StandardScaler
sc=StandardScaler()
x_train=sc.fit_transform(x_train)
x_test=sc.transform(x_test)
ch='y'
while(ch=='y'):
print("\nChoose Which Model You Want To Apply :")
s= int(input("1. Linear Regression \n2. DecisionTreeRegressor \n3. SVR \n4.KNeighborsRegressor"))
if s==1:
error = 1000
ind = 0
l=['l1','l2','l3']
for i in l:
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
reg = LinearRegression(fit_intercept=True, normalize=False, copy_X=True, n_jobs=None)
reg.fit(x_train,y_train)
y_pred=reg.predict(x_test)
mse = mean_squared_error(y_test, y_pred)
rmse = np.sqrt(mse)
if rmse < error:
error = rmse
ind = i
print("\nAccuracy Of LinearRegressor :")
print('Misclassified Samples: %d'%(y_test!=y_pred).sum())
print(100 - error, "Accuracy With Best Value Of Estimator Is ", ind)
print("\n\nPress y to continue and n to quit\n")
ch=(input())
if s==2:
error=1000
ind=0
rmse_val = []
#l=["F1","F2","F3"]
for i in range(10):
i=i+1
from sklearn.tree import DecisionTreeRegressor
tree = DecisionTreeRegressor(max_depth=i,random_state=0)
tree.fit(x_train,y_train)
y_pred=tree.predict(x_test)
from sklearn.metrics import mean_squared_error
mse = mean_squared_error(y_test, y_pred)
rmse = np.sqrt(mse)
print('Accuracy Of DecisionTreeRegressor for Max Depth' , i , 'is:', 100-rmse)
if rmse < error:
error = rmse
ind = i
print("\nAccuracy Of Decision Tree Regressor :")
print('Misclassified Samples: %d'%(y_test!=y_pred).sum())
print(100 - rmse, " Accuracy With Best Value Of Estimator Is ", ind)
print("\n\nPress y to continue and n to quit\n")
ch=(input())
if s==3:
error=1000
ind = 0
l = ['linear','rbf']
for i in l:
from sklearn.svm import SVR
classifier = SVR()
classifier.fit(x_train,y_train)
y_pred = classifier.predict(x_test)
from sklearn.metrics import mean_squared_error
mse = mean_squared_error(y_test, y_pred)
rmse = np.sqrt(mse)
print('Accuracy Of SVR for ' , i , 'is:', 100-rmse)
if rmse < error:
error = rmse
ind = i
#print best value of estimator
print("\nAccuracy Of SVR Regressor :")
print('Misclassified Samples: %d'%(y_test!=y_pred).sum())
print(100 - error, " Accuracy With Best Value Of Estimator Is ", ind)
print("\n\nPress y to continue and n to quit\n")
ch=(input())
if s==4:
rmse_val = [] #to store rmse values for different k
from sklearn.neighbors import KNeighborsRegressor
for K in range(5):
K = K+1
model = KNeighborsRegressor(n_neighbors = K)
model.fit(x_train, y_train) #fit the model
pred=model.predict(x_test) #make prediction on test set
from sklearn.metrics import mean_squared_error
error =np.sqrt(mean_squared_error(y_test,pred)) #calculate rmse
rmse_val.append(error) #store rmse values
print('Accuracy Of KNeighborsRegressor for k= ' , K , 'is:', 100-rmse)
if rmse < error:
error = rmse
ind = K
print("\nAccuracy Of KNeighborsRegressor :")
print('Misclassified Samples: %d'%(y_test!=y_pred).sum())
print(100 - error, " Accuracy With Best Value Of Estimator Is ", ind)
curve = | pd.DataFrame(rmse_val) | pandas.DataFrame |
import csv
import pandas as pd
import sanalytics.algorithms.utils as sau
import sanalytics.estimators.pu_estimators as pu
from sanalytics.estimators.utils import diff_df, join_df
from gensim.models.doc2vec import Doc2Vec
import joblib
from time import time
## Read arguments
while True:
finished = set(['.'.join(i.split(".")[:-1]) for i in [i for i in os.walk("outputcsvs/validation/")][0][2]])
info = pd.read_pickle("analysis/pu_learning/foldinfo.pkl")
info = info.loc[~info.index.isin(finished)].sample(1)
id = info.index.item()
val_fold = info.fold.item()
pu_type = info.putype.item()
classifier = info.clf.item()
params_1 = info.params_1.item()
params_2 = info.params_2.item()
print(id)
## Read training/validation folds
X_train = pd.read_parquet("datasets/folds/fold{}_train.parquet".format(val_fold))
X_val = pd.read_parquet("datasets/folds/fold{}_val.parquet".format(val_fold))
## Step 1: Extract RN
P = X_train.loc[X_train.label=="security"].copy()
U = X_train.loc[X_train.label=="unlabelled"].copy()
C1, fit_time = pu.NC(params_1).fit(X_train)
UL1, pred_time = C1.predict(U)
Q1 = UL1.loc[UL1.predicts == "nonsecurity"].copy()
RN1 = Q1
U1 = diff_df(U,Q1)
## Prepare CSV to write to
df_rows = []
columns = ["id", "recall", "prec_lower", "prec_opt", "f1_lower", "f1_opt", "f_measure", "fit_time", "predict_time", "eval_time"]
filename = 'outputcsvs/validation/{}'.format(id)
## BLANK means Step 1 only
if classifier=="BLANK":
results, eval_time = C1.evaluate(X_val)
row = [id] + list(results) + [fit_time, 0, eval_time]
df_rows.append(row)
pd.DataFrame(df_rows, columns=columns).to_csv("{}.csv".format(filename), index=False)
joblib.dump(C1, r'{}.pkl'.format(filename), compress = 1)
print(row)
continue
## Get estimator
pu_est = pu.get_estimator(classifier, params_2)
print(pu_est.model)
## Step 2: Train Classifier Iteratively
if (pu_type == '0'):
C2, fit_time = pu_est.fit(pd.concat([P, RN1], sort=False))
results, eval_time = C2.evaluate(X_val)
row = [id] + list(results) + [fit_time, 0, eval_time]
df_rows.append(row)
pd.DataFrame(df_rows, columns=columns).to_csv("{}.csv".format(filename), index=False)
joblib.dump(C2, r'{}.pkl'.format(filename), compress = 1)
print(row)
if (pu_type == '1'):
while True:
C2, fit_time = pu_est.fit(pd.concat([P, RN1], sort=False))
UL2, pred_time = C2.predict(U1)
Q2 = UL2.loc[UL2.predicts=="nonsecurity"].copy()
if len(Q2) == 0: break
U2 = diff_df(U1, Q2)
RN2 = join_df(RN1, Q2)
RN1 = RN2
U1 = U2
C1 = C2
results, eval_time = C2.evaluate(X_val)
row = [id] + list(results) + [fit_time, pred_time, eval_time]
df_rows.append(row)
pd.DataFrame(df_rows, columns=columns).to_csv("{}.csv".format(filename), index=False)
joblib.dump(C1, r'{}.pkl'.format(filename), compress = 1)
print(row)
if (pu_type == '2'):
while True:
C2, fit_time = pu_est.fit(pd.concat([P, RN1], sort=False))
RNL2, pred_time = C2.predict(RN1)
Q2 = RNL2.loc[RNL2.predicts=="nonsecurity"].copy()
# if len(Q2) >= len(Q1) and len(P) >= len(RN1):
# break
RN2 = Q2
RN1 = RN2
C1 = C2
results, eval_time = C2.evaluate(X_val)
row = [id] + list(results) + [fit_time, pred_time, eval_time]
df_rows.append(row)
| pd.DataFrame(df_rows, columns=columns) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import numpy as np
import pytest
from numpy.random import RandomState
from numpy import nan
from datetime import datetime
from itertools import permutations
from pandas import (Series, Categorical, CategoricalIndex,
Timestamp, DatetimeIndex, Index, IntervalIndex)
import pandas as pd
from pandas import compat
from pandas._libs import (groupby as libgroupby, algos as libalgos,
hashtable as ht)
from pandas._libs.hashtable import unique_label_indices
from pandas.compat import lrange, range
import pandas.core.algorithms as algos
import pandas.core.common as com
import pandas.util.testing as tm
import pandas.util._test_decorators as td
from pandas.core.dtypes.dtypes import CategoricalDtype as CDT
from pandas.compat.numpy import np_array_datetime64_compat
from pandas.util.testing import assert_almost_equal
class TestMatch(object):
def test_ints(self):
values = np.array([0, 2, 1])
to_match = np.array([0, 1, 2, 2, 0, 1, 3, 0])
result = algos.match(to_match, values)
expected = np.array([0, 2, 1, 1, 0, 2, -1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Series(algos.match(to_match, values, np.nan))
expected = Series(np.array([0, 2, 1, 1, 0, 2, np.nan, 0]))
tm.assert_series_equal(result, expected)
s = Series(np.arange(5), dtype=np.float32)
result = algos.match(s, [2, 4])
expected = np.array([-1, -1, 0, -1, 1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Series(algos.match(s, [2, 4], np.nan))
expected = Series(np.array([np.nan, np.nan, 0, np.nan, 1]))
tm.assert_series_equal(result, expected)
def test_strings(self):
values = ['foo', 'bar', 'baz']
to_match = ['bar', 'foo', 'qux', 'foo', 'bar', 'baz', 'qux']
result = algos.match(to_match, values)
expected = np.array([1, 0, -1, 0, 1, 2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Series(algos.match(to_match, values, np.nan))
expected = Series(np.array([1, 0, np.nan, 0, 1, 2, np.nan]))
tm.assert_series_equal(result, expected)
class TestFactorize(object):
def test_basic(self):
labels, uniques = algos.factorize(['a', 'b', 'b', 'a', 'a', 'c', 'c',
'c'])
tm.assert_numpy_array_equal(
uniques, np.array(['a', 'b', 'c'], dtype=object))
labels, uniques = algos.factorize(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], sort=True)
exp = np.array([0, 1, 1, 0, 0, 2, 2, 2], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(range(5))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4, 3, 2, 1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(range(5))), sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0, 1, 2, 3, 4], dtype=np.int64)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(np.arange(5.))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4., 3., 2., 1., 0.], dtype=np.float64)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(np.arange(5.))),
sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0., 1., 2., 3., 4.], dtype=np.float64)
tm.assert_numpy_array_equal(uniques, exp)
def test_mixed(self):
# doc example reshaping.rst
x = Series(['A', 'A', np.nan, 'B', 3.14, np.inf])
labels, uniques = algos.factorize(x)
exp = np.array([0, 0, -1, 1, 2, 3], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index(['A', 'B', 3.14, np.inf])
tm.assert_index_equal(uniques, exp)
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([2, 2, -1, 3, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index([3.14, np.inf, 'A', 'B'])
tm.assert_index_equal(uniques, exp)
def test_datelike(self):
# M8
v1 = Timestamp('20130101 09:00:00.00004')
v2 = Timestamp('20130101')
x = Series([v1, v1, v1, v2, v2, v1])
labels, uniques = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v1, v2])
tm.assert_index_equal(uniques, exp)
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([1, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v2, v1])
tm.assert_index_equal(uniques, exp)
# period
v1 = pd.Period('201302', freq='M')
v2 = pd.Period('201303', freq='M')
x = Series([v1, v1, v1, v2, v2, v1])
# periods are not 'sorted' as they are converted back into an index
labels, uniques = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.PeriodIndex([v1, v2]))
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.PeriodIndex([v1, v2]))
# GH 5986
v1 = pd.to_timedelta('1 day 1 min')
v2 = pd.to_timedelta('1 day')
x = Series([v1, v2, v1, v1, v2, v2, v1])
labels, uniques = algos.factorize(x)
exp = np.array([0, 1, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.to_timedelta([v1, v2]))
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([1, 0, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.to_timedelta([v2, v1]))
def test_factorize_nan(self):
# nan should map to na_sentinel, not reverse_indexer[na_sentinel]
# rizer.factorize should not raise an exception if na_sentinel indexes
# outside of reverse_indexer
key = np.array([1, 2, 1, np.nan], dtype='O')
rizer = ht.Factorizer(len(key))
for na_sentinel in (-1, 20):
ids = rizer.factorize(key, sort=True, na_sentinel=na_sentinel)
expected = np.array([0, 1, 0, na_sentinel], dtype='int32')
assert len(set(key)) == len(set(expected))
tm.assert_numpy_array_equal(pd.isna(key),
expected == na_sentinel)
# nan still maps to na_sentinel when sort=False
key = np.array([0, np.nan, 1], dtype='O')
na_sentinel = -1
# TODO(wesm): unused?
ids = rizer.factorize(key, sort=False, na_sentinel=na_sentinel) # noqa
expected = np.array([2, -1, 0], dtype='int32')
assert len(set(key)) == len(set(expected))
tm.assert_numpy_array_equal(pd.isna(key), expected == na_sentinel)
@pytest.mark.parametrize("data,expected_label,expected_level", [
(
[(1, 1), (1, 2), (0, 0), (1, 2), 'nonsense'],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), 'nonsense']
),
(
[(1, 1), (1, 2), (0, 0), (1, 2), (1, 2, 3)],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), (1, 2, 3)]
),
(
[(1, 1), (1, 2), (0, 0), (1, 2)],
[0, 1, 2, 1],
[(1, 1), (1, 2), (0, 0)]
)
])
def test_factorize_tuple_list(self, data, expected_label, expected_level):
# GH9454
result = pd.factorize(data)
tm.assert_numpy_array_equal(result[0],
np.array(expected_label, dtype=np.intp))
expected_level_array = com._asarray_tuplesafe(expected_level,
dtype=object)
tm.assert_numpy_array_equal(result[1], expected_level_array)
def test_complex_sorting(self):
# gh 12666 - check no segfault
# Test not valid numpy versions older than 1.11
if pd._np_version_under1p11:
pytest.skip("Test valid only for numpy 1.11+")
x17 = np.array([complex(i) for i in range(17)], dtype=object)
pytest.raises(TypeError, algos.factorize, x17[::-1], sort=True)
def test_uint64_factorize(self):
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_uniques = np.array([2**63, 1], dtype=np.uint64)
labels, uniques = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(uniques, exp_uniques)
data = np.array([2**63, -1, 2**63], dtype=object)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_uniques = np.array([2**63, -1], dtype=object)
labels, uniques = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(uniques, exp_uniques)
def test_deprecate_order(self):
# gh 19727 - check warning is raised for deprecated keyword, order.
# Test not valid once order keyword is removed.
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
with tm.assert_produces_warning(expected_warning=FutureWarning):
algos.factorize(data, order=True)
with tm.assert_produces_warning(False):
algos.factorize(data)
@pytest.mark.parametrize('data', [
np.array([0, 1, 0], dtype='u8'),
np.array([-2**63, 1, -2**63], dtype='i8'),
np.array(['__nan__', 'foo', '__nan__'], dtype='object'),
])
def test_parametrized_factorize_na_value_default(self, data):
# arrays that include the NA default for that type, but isn't used.
l, u = algos.factorize(data)
expected_uniques = data[[0, 1]]
expected_labels = np.array([0, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_uniques)
@pytest.mark.parametrize('data, na_value', [
(np.array([0, 1, 0, 2], dtype='u8'), 0),
(np.array([1, 0, 1, 2], dtype='u8'), 1),
(np.array([-2**63, 1, -2**63, 0], dtype='i8'), -2**63),
(np.array([1, -2**63, 1, 0], dtype='i8'), 1),
(np.array(['a', '', 'a', 'b'], dtype=object), 'a'),
(np.array([(), ('a', 1), (), ('a', 2)], dtype=object), ()),
(np.array([('a', 1), (), ('a', 1), ('a', 2)], dtype=object),
('a', 1)),
])
def test_parametrized_factorize_na_value(self, data, na_value):
l, u = algos._factorize_array(data, na_value=na_value)
expected_uniques = data[[1, 3]]
expected_labels = np.array([-1, 0, -1, 1], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_uniques)
class TestUnique(object):
def test_ints(self):
arr = np.random.randint(0, 100, size=50)
result = algos.unique(arr)
assert isinstance(result, np.ndarray)
def test_objects(self):
arr = np.random.randint(0, 100, size=50).astype('O')
result = algos.unique(arr)
assert isinstance(result, np.ndarray)
def test_object_refcount_bug(self):
lst = ['A', 'B', 'C', 'D', 'E']
for i in range(1000):
len(algos.unique(lst))
def test_on_index_object(self):
mindex = pd.MultiIndex.from_arrays([np.arange(5).repeat(5), np.tile(
np.arange(5), 5)])
expected = mindex.values
expected.sort()
mindex = mindex.repeat(2)
result = pd.unique(mindex)
result.sort()
tm.assert_almost_equal(result, expected)
def test_datetime64_dtype_array_returned(self):
# GH 9431
expected = np_array_datetime64_compat(
['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'],
dtype='M8[ns]')
dt_index = pd.to_datetime(['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'])
result = algos.unique(dt_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Series(dt_index)
result = algos.unique(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.unique(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_timedelta64_dtype_array_returned(self):
# GH 9431
expected = np.array([31200, 45678, 10000], dtype='m8[ns]')
td_index = pd.to_timedelta([31200, 45678, 31200, 10000, 45678])
result = algos.unique(td_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Series(td_index)
result = algos.unique(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.unique(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_uint64_overflow(self):
s = Series([1, 2, 2**63, 2**63], dtype=np.uint64)
exp = np.array([1, 2, 2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(algos.unique(s), exp)
def test_nan_in_object_array(self):
l = ['a', np.nan, 'c', 'c']
result = pd.unique(l)
expected = np.array(['a', np.nan, 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_categorical(self):
# we are expecting to return in the order
# of appearance
expected = Categorical(list('bac'), categories=list('bac'))
# we are expecting to return in the order
# of the categories
expected_o = Categorical(
list('bac'), categories=list('abc'), ordered=True)
# GH 15939
c = Categorical(list('baabc'))
result = c.unique()
tm.assert_categorical_equal(result, expected)
result = algos.unique(c)
tm.assert_categorical_equal(result, expected)
c = Categorical(list('baabc'), ordered=True)
result = c.unique()
tm.assert_categorical_equal(result, expected_o)
result = algos.unique(c)
tm.assert_categorical_equal(result, expected_o)
# Series of categorical dtype
s = Series(Categorical(list('baabc')), name='foo')
result = s.unique()
tm.assert_categorical_equal(result, expected)
result = pd.unique(s)
tm.assert_categorical_equal(result, expected)
# CI -> return CI
ci = CategoricalIndex(Categorical(list('baabc'),
categories=list('bac')))
expected = CategoricalIndex(expected)
result = ci.unique()
tm.assert_index_equal(result, expected)
result = pd.unique(ci)
tm.assert_index_equal(result, expected)
def test_datetime64tz_aware(self):
# GH 15939
result = Series(
Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])).unique()
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]).unique()
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
result = pd.unique(
Series(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])))
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = pd.unique(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
def test_order_of_appearance(self):
# 9346
# light testing of guarantee of order of appearance
# these also are the doc-examples
result = pd.unique(Series([2, 1, 3, 3]))
tm.assert_numpy_array_equal(result,
np.array([2, 1, 3], dtype='int64'))
result = pd.unique(Series([2] + [1] * 5))
tm.assert_numpy_array_equal(result,
np.array([2, 1], dtype='int64'))
result = pd.unique(Series([Timestamp('20160101'),
Timestamp('20160101')]))
expected = np.array(['2016-01-01T00:00:00.000000000'],
dtype='datetime64[ns]')
tm.assert_numpy_array_equal(result, expected)
result = pd.unique(Index(
[Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]',
freq=None)
tm.assert_index_equal(result, expected)
result = pd.unique(list('aabc'))
expected = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = pd.unique(Series(Categorical(list('aabc'))))
expected = Categorical(list('abc'))
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize("arg ,expected", [
(('1', '1', '2'), np.array(['1', '2'], dtype=object)),
(('foo',), np.array(['foo'], dtype=object))
])
def test_tuple_with_strings(self, arg, expected):
# see GH 17108
result = pd.unique(arg)
tm.assert_numpy_array_equal(result, expected)
class TestIsin(object):
def test_invalid(self):
pytest.raises(TypeError, lambda: algos.isin(1, 1))
pytest.raises(TypeError, lambda: algos.isin(1, [1]))
pytest.raises(TypeError, lambda: algos.isin([1], 1))
def test_basic(self):
result = algos.isin([1, 2], [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(np.array([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series([1, 2]), Series([1]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series([1, 2]), set([1]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(['a', 'b'], ['a'])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series(['a', 'b']), Series(['a']))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series(['a', 'b']), set(['a']))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(['a', 'b'], [1])
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
def test_i8(self):
arr = pd.date_range('20130101', periods=3).values
result = algos.isin(arr, [arr[0]])
expected = np.array([True, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(arr, arr[0:2])
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(arr, set(arr[0:2]))
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
arr = pd.timedelta_range('1 day', periods=3).values
result = algos.isin(arr, [arr[0]])
expected = np.array([True, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(arr, arr[0:2])
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(arr, set(arr[0:2]))
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
def test_large(self):
s = pd.date_range('20000101', periods=2000000, freq='s').values
result = algos.isin(s, s[0:2])
expected = np.zeros(len(s), dtype=bool)
expected[0] = True
expected[1] = True
tm.assert_numpy_array_equal(result, expected)
def test_categorical_from_codes(self):
# GH 16639
vals = np.array([0, 1, 2, 0])
cats = ['a', 'b', 'c']
Sd = Series(Categorical(1).from_codes(vals, cats))
St = Series(Categorical(1).from_codes(np.array([0, 1]), cats))
expected = np.array([True, True, False, True])
result = algos.isin(Sd, St)
tm.assert_numpy_array_equal(expected, result)
@pytest.mark.parametrize("empty", [[], Series(), np.array([])])
def test_empty(self, empty):
# see gh-16991
vals = Index(["a", "b"])
expected = np.array([False, False])
result = algos.isin(vals, empty)
tm.assert_numpy_array_equal(expected, result)
class TestValueCounts(object):
def test_value_counts(self):
np.random.seed(1234)
from pandas.core.reshape.tile import cut
arr = np.random.randn(4)
factor = cut(arr, 4)
# assert isinstance(factor, n)
result = algos.value_counts(factor)
breaks = [-1.194, -0.535, 0.121, 0.777, 1.433]
index = IntervalIndex.from_breaks(breaks).astype(CDT(ordered=True))
expected = Series([1, 1, 1, 1], index=index)
tm.assert_series_equal(result.sort_index(), expected.sort_index())
def test_value_counts_bins(self):
s = [1, 2, 3, 4]
result = algos.value_counts(s, bins=1)
expected = Series([4],
index=IntervalIndex.from_tuples([(0.996, 4.0)]))
tm.assert_series_equal(result, expected)
result = algos.value_counts(s, bins=2, sort=False)
expected = Series([2, 2],
index=IntervalIndex.from_tuples([(0.996, 2.5),
(2.5, 4.0)]))
tm.assert_series_equal(result, expected)
def test_value_counts_dtypes(self):
result = algos.value_counts([1, 1.])
assert len(result) == 1
result = algos.value_counts([1, 1.], bins=1)
assert len(result) == 1
result = algos.value_counts(Series([1, 1., '1'])) # object
assert len(result) == 2
pytest.raises(TypeError, lambda s: algos.value_counts(s, bins=1),
['1', 1])
def test_value_counts_nat(self):
td = Series([np.timedelta64(10000), pd.NaT], dtype='timedelta64[ns]')
dt = pd.to_datetime(['NaT', '2014-01-01'])
for s in [td, dt]:
vc = algos.value_counts(s)
vc_with_na = algos.value_counts(s, dropna=False)
assert len(vc) == 1
assert len(vc_with_na) == 2
exp_dt = Series({Timestamp('2014-01-01 00:00:00'): 1})
tm.assert_series_equal(algos.value_counts(dt), exp_dt)
# TODO same for (timedelta)
def test_value_counts_datetime_outofbounds(self):
# GH 13663
s = Series([datetime(3000, 1, 1), datetime(5000, 1, 1),
datetime(5000, 1, 1), datetime(6000, 1, 1),
datetime(3000, 1, 1), datetime(3000, 1, 1)])
res = s.value_counts()
exp_index = Index([datetime(3000, 1, 1), datetime(5000, 1, 1),
datetime(6000, 1, 1)], dtype=object)
exp = Series([3, 2, 1], index=exp_index)
tm.assert_series_equal(res, exp)
# GH 12424
res = pd.to_datetime(Series(['2362-01-01', np.nan]),
errors='ignore')
exp = Series(['2362-01-01', np.nan], dtype=object)
tm.assert_series_equal(res, exp)
def test_categorical(self):
s = Series(Categorical(list('aaabbc')))
result = s.value_counts()
expected = Series([3, 2, 1], index=CategoricalIndex(['a', 'b', 'c']))
tm.assert_series_equal(result, expected, check_index_type=True)
# preserve order?
s = s.cat.as_ordered()
result = s.value_counts()
expected.index = expected.index.as_ordered()
tm.assert_series_equal(result, expected, check_index_type=True)
def test_categorical_nans(self):
s = Series(Categorical(list('aaaaabbbcc'))) # 4,3,2,1 (nan)
s.iloc[1] = np.nan
result = s.value_counts()
expected = Series([4, 3, 2], index=CategoricalIndex(
['a', 'b', 'c'], categories=['a', 'b', 'c']))
tm.assert_series_equal(result, expected, check_index_type=True)
result = s.value_counts(dropna=False)
expected = Series([
4, 3, 2, 1
], index=CategoricalIndex(['a', 'b', 'c', np.nan]))
tm.assert_series_equal(result, expected, check_index_type=True)
# out of order
s = Series(Categorical(
list('aaaaabbbcc'), ordered=True, categories=['b', 'a', 'c']))
s.iloc[1] = np.nan
result = s.value_counts()
expected = Series([4, 3, 2], index=CategoricalIndex(
['a', 'b', 'c'], categories=['b', 'a', 'c'], ordered=True))
tm.assert_series_equal(result, expected, check_index_type=True)
result = s.value_counts(dropna=False)
expected = Series([4, 3, 2, 1], index=CategoricalIndex(
['a', 'b', 'c', np.nan], categories=['b', 'a', 'c'], ordered=True))
tm.assert_series_equal(result, expected, check_index_type=True)
def test_categorical_zeroes(self):
# keep the `d` category with 0
s = Series(Categorical(
list('bbbaac'), categories=list('abcd'), ordered=True))
result = s.value_counts()
expected = Series([3, 2, 1, 0], index=Categorical(
['b', 'a', 'c', 'd'], categories=list('abcd'), ordered=True))
tm.assert_series_equal(result, expected, check_index_type=True)
def test_dropna(self):
# https://github.com/pandas-dev/pandas/issues/9443#issuecomment-73719328
tm.assert_series_equal(
Series([True, True, False]).value_counts(dropna=True),
Series([2, 1], index=[True, False]))
tm.assert_series_equal(
Series([True, True, False]).value_counts(dropna=False),
Series([2, 1], index=[True, False]))
tm.assert_series_equal(
Series([True, True, False, None]).value_counts(dropna=True),
Series([2, 1], index=[True, False]))
tm.assert_series_equal(
Series([True, True, False, None]).value_counts(dropna=False),
Series([2, 1, 1], index=[True, False, np.nan]))
tm.assert_series_equal(
Series([10.3, 5., 5.]).value_counts(dropna=True),
Series([2, 1], index=[5., 10.3]))
tm.assert_series_equal(
Series([10.3, 5., 5.]).value_counts(dropna=False),
Series([2, 1], index=[5., 10.3]))
tm.assert_series_equal(
Series([10.3, 5., 5., None]).value_counts(dropna=True),
Series([2, 1], index=[5., 10.3]))
# 32-bit linux has a different ordering
if not compat.is_platform_32bit():
result = Series([10.3, 5., 5., None]).value_counts(dropna=False)
expected = Series([2, 1, 1], index=[5., 10.3, np.nan])
tm.assert_series_equal(result, expected)
def test_value_counts_normalized(self):
# GH12558
s = Series([1, 2, np.nan, np.nan, np.nan])
dtypes = (np.float64, np.object, 'M8[ns]')
for t in dtypes:
s_typed = s.astype(t)
result = s_typed.value_counts(normalize=True, dropna=False)
expected = Series([0.6, 0.2, 0.2],
index= | Series([np.nan, 2.0, 1.0], dtype=t) | pandas.Series |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
""" Subway Module
.. moduleauthor:: <NAME> <<EMAIL>>
"""
import io
import re
from bokeh import io as bkio
from bokeh import models as bkm
import geopy.distance as gpd
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import requests
import seaborn as sns
try:
from nyc_signature import keys
except ModuleNotFoundError:
print('A Google API Key is required to generate the geographic images.')
print('Upon instancing the Stations class please assign your key to the '
'api_key attribute.')
from nyc_signature import locations
from nyc_signature.utils import ax_formatter, size, save_fig
class Stations:
"""
Class to describe New York city subway stations.
.. _`Google API Key`: https://developers.google.com/maps/documentation/
javascript/get-api-key
.. note:: A `Google API Key`_ is required to create the geographic plots.
:Attributes:
- **api_key**: *str* Google API Key
- **data**: *DataFrame* New York City subway station data
- **data_types**: *dict* data types for each column
- **data_url**: *str* link to web page containing the source data
- **hosp**: *Hospitals* instance of locations.Hospitals class
- **hosp_dist**: *DataFrame* distance from hospitals to subway stations
- **hosp_prox**: *DataFrame** hospital and subway stations in close \
proximity
- **hosp_stations**: *DataFrame* subway stations closest to hospitals
- **map_glyph_hospital**: *Circle* map glyph for hospital points
- **map_glyph_subway**: *Diamond* map glyph for subway points
- **map_options**: *dict* Bokeh plot map options
- **map_plot**: *GMapPlot* Bokeh Google Map Plot object
- **trains**: *DataFrame* New York City subway train data
"""
def __init__(self):
try:
self.api_key = keys.GOOGLE_API_KEY
except NameError:
self.api_key = None
self.data = None
self.data_url = ('https://timothyhelton.github.io/assets/data/'
'nyc_subway_locations.csv')
self.data_types = {
'division': str,
'line': str,
'name': str,
'latitude': np.float64,
'longitude': np.float64,
'route_1': str,
'route_2': str,
'route_3': str,
'route_4': str,
'route_5': str,
'route_6': str,
'route_7': str,
'route_8': str,
'route_9': str,
'route_10': str,
'route_11': str,
'entrance_type': str,
'entry': str,
'exit_only': str,
'vending': str,
'staffing': str,
'staff_hours': str,
'ada': str,
'ada_notes': str,
'free_crossover': str,
'north_south_street': str,
'east_west_street': str,
'corner': str,
'entrance_latitude': np.float64,
'entrance_longitude': np.float64,
'station_location': str,
'entrance_location': str,
}
self.hosp = locations.Hospitals()
self.hosp_dist = None
self.hosp_prox = None
self.hosp_stations = None
self.map_glyph_hospital = bkm.Circle(
x='longitude',
y='latitude',
fill_alpha=0.8,
fill_color='#cd5b1b',
line_color=None,
size=14,
)
self.map_glyph_subway = bkm.Diamond(
x='longitude',
y='latitude',
fill_color='#3062C8',
line_color=None,
size=10,
)
self.map_options = {
'lat': 40.70,
'lng': -73.92,
'map_type': 'roadmap',
'zoom': 10,
}
self.map_plot = bkm.GMapPlot(
api_key=self.api_key,
x_range=bkm.Range1d(),
y_range=bkm.Range1d(),
map_options=bkm.GMapOptions(**self.map_options),
plot_width=400,
plot_height=600,
)
self.trains = None
self.load_data()
def __repr__(self):
return f'Stations()'
def load_data(self):
"""
Load data from file.
"""
self.data = pd.read_csv(self.data_url,
dtype=self.data_types,
header=0,
names=self.data_types.keys())
not_categories_cols = (
'name',
'latitude',
'longitude',
'north_south_street',
'east_west_street',
'entrance_latitude',
'entrance_longitude',
'station_location',
'entrance_location',
)
categories_cols = [x for x in self.data_types.keys()
if x not in not_categories_cols]
for col in categories_cols:
self.data.loc[:, col] = (self.data.loc[:, col]
.astype('category'))
self.trains = pd.melt(self.data,
id_vars=['latitude', 'longitude'],
value_vars=[f'route_{x}' for x in range(1, 12)],
var_name='route',
value_name='train')
self.trains.loc[:, 'train'] = (self.trains.loc[:, 'train']
.str.strip())
for col in ('route', 'train'):
self.trains.loc[:, col] = (self.trains.loc[:, col]
.astype('category'))
def hospital_distances(self):
"""
Distances from subway stations to hospitals in NYC.
"""
hospital_locs = np.array(self.hosp.hospitals
.loc[:, ['latitude', 'longitude']])
stations = (self.data
.loc[:, ['name', 'latitude', 'longitude']]
.drop_duplicates(['latitude', 'longitude'])
.reset_index(drop=True))
subway_locs = np.array(stations.loc[:, ['latitude', 'longitude']])
distances = np.empty((hospital_locs.shape[0], subway_locs.shape[0]))
for hosp_n, h in enumerate(hospital_locs):
for sub_n, s in enumerate(subway_locs):
distances[hosp_n, sub_n] = gpd.vincenty(h, s).miles
self.hosp_dist = pd.DataFrame(distances,
index=self.hosp.hospitals.name)
self.hosp_dist = pd.concat([(stations
.loc[:, ['latitude', 'longitude']]
.T),
self.hosp_dist])
self.hosp_dist.columns = stations.name
self.hosp_dist['min_dist'] = (
self.hosp_dist
.drop(['latitude', 'longitude'], axis=0)
.apply(lambda x: x.min(), axis=1))
self.hosp_prox = (self.hosp_dist
.sort_values('min_dist')
.idxmin(axis=1))
def hospital_proximity_plot(self, number=10, stations=None):
"""
Plot hospital and subway stations of interest
.. warning:: This method requires a Google API Key
:param int number: number of hospitals to query
:param list stations: only the stations supplied will be plotted
"""
def find_hospital_locs(interest_loc):
"""
Find specific hospital locations
:param interest_loc: hospital names
:return: hospital locations only for areas of interest
:rtype: pandas.DataFrame
"""
return (self.hosp.hospitals
.loc[(self.hosp.hospitals
.name.isin(interest_loc.index))])
if self.hosp_prox is None:
self.hospital_distances()
hosp_interest = self.hosp_prox[:number]
hospital_locs = find_hospital_locs(hosp_interest)
station_idx = (self.hosp_dist
.loc[hosp_interest.index, :]
.sort_values('min_dist')
.T
.reset_index(drop=True)
.T
.idxmin(axis=1))
self.hosp_stations = (self.hosp_dist
.iloc[:, station_idx]
.loc[['latitude', 'longitude'], :]
.T
.reset_index())
if stations:
idx = (self.hosp_stations[(self.hosp_stations
.name
.isin(stations))]
.index
.tolist())
self.hosp_stations = (self.hosp_stations.iloc[idx, :])
hosp_interest = hosp_interest.iloc[idx]
hospital_locs = find_hospital_locs(hosp_interest)
plot = self.map_plot
plot.title.text = ('New York City Hospitals and Subway Stations of '
'Interest')
hospitals = bkm.sources.ColumnDataSource(hospital_locs)
plot.add_glyph(hospitals, self.map_glyph_hospital)
subway_stations = bkm.sources.ColumnDataSource(self.hosp_stations)
plot.add_glyph(subway_stations, self.map_glyph_subway)
hover = bkm.HoverTool()
hover.tooltips = [
('Location', '@name'),
]
plot.add_tools(
hover,
bkm.PanTool(),
bkm.WheelZoomTool(),
)
bkio.output_file('stations_interest.html')
bkio.show(plot)
def stations_plot(self):
"""
Plot subway stations.
.. warning:: This method requires a Google API Key
"""
plot = self.map_plot
plot.title.text = 'New York City Subway Stations'
subway_stations = bkm.sources.ColumnDataSource(
data=(self.data
.loc[:, ['name', 'latitude', 'longitude']]
.join(self.data.loc[:, 'entrance_type']
.astype(str))
.join(self.data.loc[:, 'exit_only']
.astype(str)
.str.replace('nan', 'No'))
.drop_duplicates())
)
plot.add_glyph(subway_stations, self.map_glyph_subway)
hover = bkm.HoverTool()
hover.tooltips = [
('Location', '@name'),
('Entrance Type', '@entrance_type'),
('Exit Only', '@exit_only'),
]
plot.add_tools(
hover,
bkm.PanTool(),
bkm.WheelZoomTool(),
)
bkio.output_file('stations.html')
bkio.show(plot)
def stations_locations_plot(self):
"""
Plot subway stations and interest locations.
.. warning:: This method requires a Google API Key
"""
plot = self.map_plot
plot.title.text = 'New York City Hospitals and Subway Stations'
hospitals = bkm.sources.ColumnDataSource(self.hosp.hospitals)
plot.add_glyph(hospitals, self.map_glyph_hospital)
subway_stations = bkm.sources.ColumnDataSource(
data=(self.data
.loc[:, ['name', 'latitude', 'longitude']]
.drop_duplicates())
)
plot.add_glyph(subway_stations, self.map_glyph_subway)
hover = bkm.HoverTool()
hover.tooltips = [
('Location', '@name'),
]
plot.add_tools(
hover,
bkm.PanTool(),
bkm.WheelZoomTool(),
)
bkio.output_file('stations_locations.html')
bkio.show(plot)
def train_plot(self, save=False):
"""
Plot subway stations by train.
:param bool save: if True the figure will be saved
"""
sns.lmplot(x='longitude', y='latitude',
data=(self.trains
.sort_values(by='train')),
hue='train', fit_reg=False, legend=False, markers='d',
scatter_kws={'alpha': 0.3}, size=10)
legend = plt.legend(bbox_to_anchor=(1.10, 0.5), fancybox=True,
fontsize=size['legend'], loc='center right',
shadow=True, title='Train')
plt.setp(legend.get_title(), fontsize=size['label'])
plt.xlabel('Longitude', fontsize=size['label'])
plt.ylabel('Latitude', fontsize=size['label'])
plt.tight_layout()
plt.suptitle('New York Subway Train Stations',
fontsize=size['super_title'], y=1.03)
save_fig('stations_trains', save)
class Turnstile:
"""
Class to investigate the New York City subway turnstile data
:Attributes:
**available_data_files**: *pandas.Series* url address for available data \
files to be scraped from the NYC MTA website
**current_station**: *str* name of current station
**daily_use**: *pandas.DataFrame** average turnstile daily use for the \
current station
**data**: *pandas.DataFrame* NYC turnstile data
**data_end**: *str* end date for analyzing target locations
**data_files**: *list* names of all available data files to download from \
the url attribute
**data_start**: *str* start date for analyzing target locations
**data_text**: *str* scraped text of NYC turnstile data
**data_types**: *dict* data types for NYC turnstile data
**days**: *tuple* weekday names
**request**: *requests.models.Response* response object from scraping \
the url attribute
**target_data**: *pandas.DataFrame* filter turnstile data to only show \
the requested number of target subway stations
**targets**: *pandas.DataFrame* hospitals in close proximity to subway \
stations
**top_stations**: *pandas.DataFrame* the smallest number of target subway \
stations which comprise up to 90% of all turnstile use
**url**: *str* web address for turnstile data
"""
def __init__(self):
self.url = 'http://web.mta.info/developers/turnstile.html'
self.request = requests.get(self.url)
self.current_station = None
self.daily_use = None
self.data = None
self.data_files = None
self.data_text = None
self.data_types = {
'ctrl_area': str,
'unit': str,
'scp': str,
'station': str,
'line_name': str,
'division': str,
'date': str,
'time': str,
'description': str,
'entry': np.int32,
'exit': np.int32,
}
self.days = (
'Monday',
'Tuesday',
'Wednesday',
'Thursday',
'Friday',
'Saturday',
'Sunday',
)
self.target_data = None
self.targets = None
self.top_stations = None
self.available_data_files()
self.date_start = self.data_files.index[5].strftime('%Y-%m-%d')
self.date_end = self.data_files.index[1].strftime('%Y-%m-%d')
def __repr__(self):
return f'Turnstile()'
def average_daily_use(self, station_name):
"""
Determine the average turnstile use per day for a station.
:param str station_name: station name
"""
if self.target_data is None:
self.get_targets()
self.current_station = station_name
station = self.target_data.query(f'station == "{station_name}"')
self.daily_use = (station
.groupby([station.index.weekday,
station.index.time])
.mean())
def daily_use_plot(self, station_name, save=False):
"""
Plot average daily turnstile use.
:param str station_name: station name
:param bool save: if True the figure will be saved
"""
if self.target_data is not station_name:
self.average_daily_use(station_name)
fig = plt.figure('Station By Day',
figsize=(12, 12), facecolor='white',
edgecolor='black')
rows, cols = (3, 1)
ax0 = plt.subplot2grid((rows, cols), (0, 0))
ax1 = plt.subplot2grid((rows, cols), (1, 0), sharex=ax0, sharey=ax0)
ax2 = plt.subplot2grid((rows, cols), (2, 0), sharex=ax0, sharey=ax0)
bar_plot = (self.daily_use
.loc[(slice(0, 7),
slice( | pd.to_datetime('09:00:00') | pandas.to_datetime |
"""Expression Atlas."""
import logging
import os
import sys
from collections import OrderedDict
from typing import List, Tuple, Optional
import pandas as pd
from pandas.core.frame import DataFrame
import xmltodict
from pyorient import OrientDB
from tqdm import tqdm
from ebel.constants import DATA_DIR
from ebel.manager.orientdb import odb_meta, urls
from ebel.manager.rdbms.models import expression_atlas
from ebel.tools import get_standard_name
logger = logging.getLogger(__name__)
class ExpressionAtlas(odb_meta.Graph):
"""ExpressionAtlas."""
def __init__(self, client: OrientDB = None):
"""Init ExpressionAtlas."""
self.client = client
self.biodb_name = 'expression_atlas'
self.urls = {'latest_data': urls.EXPRESSION_ATLAS_EXPERIMENTS}
self.data_dir = os.path.join(DATA_DIR, self.biodb_name)
super().__init__(tables_base=expression_atlas.Base,
urls=self.urls,
biodb_name=self.biodb_name)
def __len__(self):
return self.number_of_generics
def __contains__(self, item):
# TODO: To be implemented
return True
def update(self):
"""Update ExpressionAtlas."""
logger.info("Update ExpressionAtlas")
downloaded = self.download()
if downloaded['latest_data']:
self.extract_files()
self.insert()
def extract_files(self):
"""Extract relevant files."""
os.chdir(self.data_dir)
cmd_temp = "tar -xzf atlas-latest-data.tar.gz --wildcards --no-anchored '{}'"
patterns = ['*.sdrf.txt',
'*.condensed-sdrf.tsv',
'*analytics.tsv',
'*-configuration.xml',
'*.idf.txt',
'*.go.gsea.tsv',
'*.interpro.gsea.tsv',
'*.reactome.gsea.tsv'
]
with tqdm(patterns) as t_patterns:
for pattern in t_patterns:
t_patterns.set_description(f"Extract files with pattern {pattern}")
command = cmd_temp.format(pattern)
os.system(command)
def insert_data(self):
"""Class method."""
pass
def update_interactions(self) -> int:
"""Class method."""
pass
def insert_experiment(self, experiment_name: str, title: str) -> int:
"""Insert individual experiment into SQL database.
Parameters
----------
experiment_name : str
Name of the Expression Atlas experiment.
title : str
Title of experiment.
Returns
-------
Table ID of newly inserted experiment.
"""
experiment = expression_atlas.Experiment(name=experiment_name, title=title)
self.session.add(experiment)
self.session.flush()
self.session.commit()
return experiment.id
def insert(self):
"""Override insert method for SQL data insertion."""
self.recreate_tables()
data_folder = os.scandir(self.data_dir)
for experiment_name in tqdm([(f.name) for f in data_folder if f.is_dir()]):
try:
df_configuration = self.get_configuration(experiment_name)
if isinstance(df_configuration, pd.DataFrame):
df_idf = self.get_idf(experiment_name)
title = df_idf[df_idf.key_name == 'investigation_title'].value.values[0]
experiment_id = self.insert_experiment(experiment_name, title)
groups_strs: Tuple[str, ...] = self.__insert_configuration(df_configuration, experiment_id)
self.__insert_idf(df_idf, experiment_id)
self.__insert_sdrf_condensed(experiment_id, experiment_name)
self.__insert_foldchange(experiment_id, experiment_name, groups_strs)
self.insert_gseas(experiment_id, experiment_name, groups_strs)
except Exception as e:
print(experiment_name)
print(e)
sys.exit()
def __insert_foldchange(self, experiment_id: int, experiment_name: str, groups_strs: Tuple[str, ...]):
df_log2foldchange = self.get_log2foldchange(experiment_name, groups_strs).set_index('group_comparison')
df_group_comparison = self.get_df_group_comparison(experiment_id, groups_strs).set_index('group_comparison')
df_log2foldchange.join(df_group_comparison).to_sql(expression_atlas.FoldChange.__tablename__,
self.engine, if_exists='append', index=False)
def get_df_group_comparison(self, experiment_id: int, groups_strs: Tuple[str, ...]) -> pd.DataFrame:
"""Get group comparison IDs and group comparison columns for pairs of group strings.
Parameters
----------
experiment_id : int
Experiment numerical ID.
groups_strs : tuple
Pairs of gene symbols.
Returns
-------
Pandas DataFrame of 'group_comparison_id' and 'group_comparison'.
"""
data = []
for groups_str in groups_strs:
group_comparison_id = self.session.query(expression_atlas.GroupComparison.id).filter_by(
experiment_id=experiment_id,
group_comparison=groups_str).first().id
data.append((group_comparison_id, groups_str))
return | pd.DataFrame(data, columns=['group_comparison_id', 'group_comparison']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
#
# wxtruss
# License: MIT License
# Author: <NAME>
# E-mail: <EMAIL>
# ~ from __future__ import division
import wx
import wx.grid as grid
import wx.html as html
import numpy as np
import matplotlib
matplotlib.use('WXAgg')
import matplotlib.pyplot as plt
from matplotlib.figure import Figure
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas
from matplotlib.backends.backend_wxagg import NavigationToolbar2WxAgg as NavigationToolbar
import matplotlib.lines as mlines
import wxtruss.iconos as ic
from nusa import * # FEA library
import webbrowser
import pandas as pd
import json
import os
# For versioning
dir_setup = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(dir_setup, '', 'version.py')) as f:
# Defines __version__
exec(f.read())
class wxTruss(wx.Frame):
def __init__(self,parent):
title = "wxTruss {version}".format(version=__version__)
wx.Frame.__init__(self,parent,title=title,size=(900,600))
self.init_menu()
self.init_ctrls()
self.init_model_data()
self.SetBackgroundColour("#FFFFFF")
self.SetIcon(ic.wxtruss.GetIcon())
self.Centre(True)
self.Show()
def init_ctrls(self):
self.mainsz = wx.BoxSizer(wx.HORIZONTAL)
self.upsz = wx.BoxSizer(wx.HORIZONTAL)
self.figsz = wx.BoxSizer(wx.VERTICAL)
self.toolbar = Toolbar(self)
self.toolbar.Realize()
self.upsz.Add(self.toolbar, 0, wx.ALIGN_LEFT)
# Creating figures, axes and canvas
self._set_mpl()
self.figure = Figure()
self.axes = self.figure.add_subplot(111)
self.canvas = FigureCanvas(self, -1, self.figure)
self.mpl_toolbar = NavigationToolbar(self.canvas)
self.mpl_toolbar.Realize()
self.figsz.Add(self.canvas, 12, wx.EXPAND|wx.ALL, 2)
self.figsz.Add(self.mpl_toolbar, 1, wx.EXPAND|wx.ALL, 20)
#~ self.figure.set_facecolor("w")
self.txtout = HTMLWindow(self)
#~ self.txtout.SetMinSize((200,-1))
#~ self.txtout.SetPage("<html></html>")
self.upsz.Add(self.figsz, 1, wx.EXPAND|wx.ALL, 2)
self.mainsz.Add(self.upsz, 5, wx.EXPAND)
self.mainsz.Add(self.txtout, 3, wx.EXPAND)
self.SetSizer(self.mainsz)
# toolbar events
self.Bind(wx.EVT_TOOL, self.add_nodes, self.toolbar.nodes_tool)
self.Bind(wx.EVT_TOOL, self.add_elements, self.toolbar.elements_tool)
self.Bind(wx.EVT_TOOL, self.add_constraints, self.toolbar.constraints_tool)
self.Bind(wx.EVT_TOOL, self.add_forces, self.toolbar.forces_tool)
self.Bind(wx.EVT_TOOL, self.plot_model, self.toolbar.plot_model_tool)
self.Bind(wx.EVT_TOOL, self.solve_model, self.toolbar.solve_tool)
self.Bind(wx.EVT_TOOL, self.plot_deformed_shape, self.toolbar.plot_deformed_shape_tool)
def _set_mpl(self):
matplotlib.rc('figure', facecolor="#ffffff")
matplotlib.rc('axes', facecolor="#ffffff", linewidth=0.1, grid=False)
# ~ matplotlib.rc('font', family="Times New Roman")
def init_menu(self):
m_file = wx.Menu()
new_model = m_file.Append(-1, "New model \tCtrl+N")
from_json = m_file.Append(-1, "Read model from Truss/JSON file... \tCtrl+J")
quit_app = m_file.Append(-1, "Quit \tCtrl+Q")
m_help = wx.Menu()
_help = m_help.Append(-1, "Help")
about = m_help.Append(-1, "About...")
menu_bar = wx.MenuBar()
menu_bar.Append(m_file, "File")
menu_bar.Append(m_help, "Help")
self.SetMenuBar(menu_bar)
self.Bind(wx.EVT_MENU, self.on_new_model, new_model)
self.Bind(wx.EVT_MENU, self.on_from_json, from_json)
self.Bind(wx.EVT_MENU, self.on_about, about)
self.Bind(wx.EVT_MENU, self.on_help, _help)
self.Bind(wx.EVT_MENU, self.on_quit, quit_app)
def init_model_data(self):
try:
self.read_model_from_json("data/exampsle_01.truss")
except:
# self.nodes = np.array([[0,0],[2,0],[0,2]])
# self.elements = np.array([[1,2,200e9,1e-4],[2,3,200e9,1e-4],[1,3,200e9,1e-4]])
# self.forces = np.array([[3,1000,0]])
# self.constraints = np.array([[1, 0, 0], [2, 0, 0]])
self.nodes = []
self.elements = []
self.forces = []
self.constraints = []
def isempty(self,arg):
if not arg:
return True
return False
def on_about(self,event):
AboutDialog(None)
def on_help(self,event):
print("Help unavailable")
def on_quit(self,event):
self.Close()
def on_new_model(self,event):
self.nodes = []
self.elements = []
self.forces = []
self.constraints = []
self.axes.cla()
self.txtout.SetPage("")
self.canvas.draw()
def on_from_json(self,event):
path = ""
wildcard = "Truss file (*.truss)|*.truss| JSON file (*.json)|*.json"
dlg = wx.FileDialog(self, message="Select a Truss/JSON file...",
defaultDir=os.getcwd(), wildcard=wildcard, style=wx.FD_OPEN)
if dlg.ShowModal() == wx.ID_OK:
path = dlg.GetPath()
dlg.Destroy()
if not path:
wx.MessageBox('Not file selected', 'wxTruss', wx.OK | wx.ICON_INFORMATION)
else:
self.read_model_from_json(path)
def build_model(self):
nc = self.nodes
ec = self.elements
x,y = nc[:,0], nc[:,1]
nodos = []
elementos = []
for k,nd in enumerate(nc):
cn = Node((x[k],y[k]))
nodos.append(cn)
for k,elm in enumerate(ec):
i,j,E,A = int(elm[0]-1),int(elm[1]-1),elm[2],elm[3]
ni,nj = nodos[i],nodos[j]
ce = Truss((ni,nj), E, A)
elementos.append(ce)
self.model = TrussModel("Truss Model")
for n in nodos: self.model.add_node(n)
for e in elementos: self.model.add_element(e)
for c in self.constraints:
k,ux,uy = int(c[0]),c[1],c[2]
if ~np.isnan(ux) and ~np.isnan(uy):
self.model.add_constraint(nodos[k-1], ux=ux, uy=uy)
elif ~np.isnan(ux):
self.model.add_constraint(nodos[k-1], ux=ux)
elif ~np.isnan(uy):
self.model.add_constraint(nodos[k-1], uy=uy)
for f in self.forces:
k,fx,fy = int(f[0]),f[1],f[2]
self.model.add_force(nodos[k-1],(fx,fy))
def solve_model(self,event):
self.build_model()
self.model.solve()
self.html_report()
def html_report(self):
m = self.model
NODES = [n.label+1 for n in m.get_nodes()]
ELEMENTS = [e.label+1 for e in m.get_elements()]
el = [e.get_nodes() for e in m.get_elements()]
ELEMENTS_CONN = [(ni.label+1,nj.label+1) for ni,nj in el]
NODAL_COORDS = [[n.x,n.y] for n in m.get_nodes()]
NODAL_DISPLACEMENTS = [[n.ux,n.uy] for n in m.get_nodes()]
NODAL_FORCES = [[n.fx,n.fy] for n in m.get_nodes()]
ELEMENT_FORCES = [e.f for e in m.get_elements()]
ELEMENT_STRESSES = [e.s for e in m.get_elements()]
EL_INFO = | pd.DataFrame(ELEMENTS_CONN, columns=["Ni","Nj"], index=ELEMENTS) | pandas.DataFrame |
import pandas as pd
import numpy as np
from suzieq.utils import SchemaForTable, humanize_timestamp, Schema
from suzieq.engines.base_engine import SqEngineObj
from suzieq.sqobjects import get_sqobject
from suzieq.db import get_sqdb_engine
from suzieq.exceptions import DBReadError, UserQueryError
import dateparser
from datetime import datetime
from pandas.core.groupby import DataFrameGroupBy
class SqPandasEngine(SqEngineObj):
def __init__(self, baseobj):
self.ctxt = baseobj.ctxt
self.iobj = baseobj
self.summary_row_order = []
self._summarize_on_add_field = []
self._summarize_on_add_with_query = []
self._summarize_on_add_list_or_count = []
self._summarize_on_add_stat = []
self._summarize_on_perdevice_stat = []
self._dbeng = get_sqdb_engine(baseobj.ctxt.cfg, baseobj.table, '',
None)
@property
def all_schemas(self) -> Schema:
return self.ctxt.schemas
@property
def schema(self) -> SchemaForTable:
return self.iobj.schema
@property
def cfg(self):
return self.iobj._cfg
@property
def table(self):
return self.iobj._table
def _get_ipvers(self, value: str) -> int:
"""Return the IP version in use"""
if ':' in value:
ipvers = 6
elif '.' in value:
ipvers = 4
else:
ipvers = ''
return ipvers
def _handle_user_query_str(self, df: pd.DataFrame,
query_str: str) -> pd.DataFrame:
"""Handle user query, trapping errors and returning exception
Args:
df (pd.DataFrame): The dataframe to run the query on
query_str (str): pandas query string
Raises:
UserQueryError: Exception if pandas query aborts with errmsg
Returns:
pd.DataFrame: dataframe post query
"""
if query_str:
if query_str.startswith('"') and query_str.endswith('"'):
query_str = query_str[1:-1]
try:
df = df.query(query_str).reset_index(drop=True)
except Exception as ex:
raise UserQueryError(ex)
return df
def get_valid_df(self, table: str, **kwargs) -> pd.DataFrame:
"""The heart of the engine: retrieving the data from the backing store
Args:
table (str): Name of the table to retrieve the data for
Returns:
pd.DataFrame: The data as a pandas dataframe
"""
if not self.ctxt.engine:
print("Specify an analysis engine using set engine command")
return pd.DataFrame(columns=["namespace", "hostname"])
# Thanks to things like OSPF, we cannot use self.schema here
sch = SchemaForTable(table, self.all_schemas)
phy_table = sch.get_phy_table_for_table()
columns = kwargs.pop('columns', ['default'])
addnl_fields = kwargs.pop('addnl_fields', [])
view = kwargs.pop('view', self.iobj.view)
active_only = kwargs.pop('active_only', True)
hostname = kwargs.get('hostname', [])
fields = sch.get_display_fields(columns)
key_fields = sch.key_fields()
drop_cols = []
if columns == ['*']:
drop_cols.append('sqvers')
aug_fields = sch.get_augmented_fields()
if 'timestamp' not in fields:
fields.append('timestamp')
if 'active' not in fields+addnl_fields:
addnl_fields.append('active')
drop_cols.append('active')
# Order matters. Don't put this before the missing key fields insert
for f in aug_fields:
dep_fields = sch.get_parent_fields(f)
addnl_fields += dep_fields
for fld in key_fields:
if fld not in fields+addnl_fields:
addnl_fields.insert(0, fld)
drop_cols.append(fld)
for f in addnl_fields:
if f not in fields:
# timestamp is always the last field
fields.insert(-1, f)
if self.iobj.start_time:
try:
start_time = int(dateparser.parse(
self.iobj.start_time.replace('last night', 'yesterday'))
.timestamp()*1000)
except Exception as e:
print(f"ERROR: invalid time {self.iobj.start_time}: {e}")
return pd.DataFrame()
else:
start_time = ''
if self.iobj.start_time and not start_time:
# Something went wrong with our parsing
print(f"ERROR: unable to parse {self.iobj.start_time}")
return pd.DataFrame()
if self.iobj.end_time:
try:
end_time = int(dateparser.parse(
self.iobj.end_time.replace('last night', 'yesterday'))
.timestamp()*1000)
except Exception as e:
print(f"ERROR: invalid time {self.iobj.end_time}: {e}")
return pd.DataFrame()
else:
end_time = ''
if self.iobj.end_time and not end_time:
# Something went wrong with our parsing
print(f"ERROR: Unable to parse {self.iobj.end_time}")
return pd.DataFrame()
table_df = self._dbeng.read(
phy_table,
'pandas',
start_time=start_time,
end_time=end_time,
columns=fields,
view=view,
key_fields=key_fields,
**kwargs
)
if not table_df.empty:
# hostname may not have been filtered if using regex
if hostname:
hdf_list = []
for hn in hostname:
df1 = table_df.query(f"hostname.str.match('{hn}')")
if not df1.empty:
hdf_list.append(df1)
if hdf_list:
table_df = pd.concat(hdf_list)
else:
return pd.DataFrame(columns=table_df.columns.tolist())
if view == "all" or not active_only:
table_df.drop(columns=drop_cols, inplace=True)
else:
table_df = table_df.query('active') \
.drop(columns=drop_cols)
if 'timestamp' in table_df.columns and not table_df.empty:
table_df['timestamp'] = humanize_timestamp(
table_df.timestamp, self.cfg.get('analyzer', {})
.get('timezone', None))
return table_df
def get(self, **kwargs) -> pd.DataFrame:
"""The default get method for all tables
Use this for a table if nothing special is desired. No table uses
this routine today.
Raises:
NotImplementedError: If no table has been defined
Returns:
pd.DataFrame: pandas dataframe of the object
"""
if not self.iobj.table:
raise NotImplementedError
user_query = kwargs.pop('query_str', '')
df = self.get_valid_df(self.iobj.table, **kwargs)
df = self._handle_user_query_str(df, user_query)
return df
def get_table_info(self, table: str, **kwargs) -> dict:
"""Returns information about the data available for a table
Used by table show command exclusively.
Args:
table (str): Name of the table about which info is desired
Returns:
dict: The desired data as a dictionary
"""
# You can't use view from user because we need to see all the data
# to compute data required.
kwargs.pop('view', None)
all_time_df = self.get_valid_df(table, view='all', **kwargs)
times = all_time_df['timestamp'].unique()
ret = {'firstTime': all_time_df.timestamp.min(),
'latestTime': all_time_df.timestamp.max(),
'intervals': len(times),
'allRows': len(all_time_df),
'namespaces': self._unique_or_zero(all_time_df, 'namespace'),
'deviceCnt': self._unique_or_zero(all_time_df, 'hostname')}
return ret
def _get_table_sqobj(self, table: str, start_time: str = None,
end_time: str = None, view=None):
"""Normalize pulling data from other tables into this one function
Typically pulling data involves calling get_sqobject with a bunch of
parameters that need to be passed to it, that a caller can forget to
pass. A classic example is passing the view, start-time and end-time
which is often forgotten. This function fixes this issue.
Args:
table (str): The table to retrieve the info from
verb (str): The verb to use in the get_sqobject call
"""
return get_sqobject(table)(
context=self.ctxt,
start_time=start_time or self.iobj.start_time,
end_time=end_time or self.iobj.end_time,
view=view or self.iobj.view)
def _unique_or_zero(self, df: pd.DataFrame, col: str) -> int:
"""Returns the unique count of a column in a dataframe or 0
Args:
df (pd.DataFrame): The dataframe to use
col (str): The column name to use
Returns:
int: Count of unique values
"""
if col in df.columns:
return df[col].nunique()
else:
return 0
def summarize(self, **kwargs):
"""Summarize the info about this resource/service.
There is a pattern of how to do these
use self._init_summarize():
- creates self.summary_df, which is the initial pandas dataframe
based on the table
- creates self.nsgrp of data grouped by namespace
- self.ns is the dict to add data to which will be turned into a
dataframe and then returned
if you want to simply take a field and run a pandas functon, then use
self._add_field_to_summary
at the end of te summarize
return pd.DataFrame(self.ns).convert_dtypes()
If you don't override this, then you get a default summary of all columns
"""
self._init_summarize(self.iobj._table, **kwargs)
if self.summary_df.empty:
return self.summary_df
self._gen_summarize_data()
self._post_summarize()
return self.ns_df.convert_dtypes()
def unique(self, **kwargs) -> pd.DataFrame:
"""Return the unique elements as per user specification
Raises:
ValueError: If len(columns) != 1
Returns:
pd.DataFrame: Pandas dataframe of unique values for given column
"""
count = kwargs.pop("count", 0)
query_str = kwargs.get('query_str', '')
columns = kwargs.pop("columns", None)
if query_str:
getcols = ['*']
else:
getcols = columns
column = columns[0]
df = self.get(columns=getcols, **kwargs)
if df.empty:
return df
# check if column we're looking at is a list, and if so explode it
if df.apply(lambda x: isinstance(x[column], np.ndarray), axis=1).all():
df = df.explode(column).dropna(how='any')
if not count:
return (pd.DataFrame({f'{column}': df[column].unique()}))
else:
r = df[column].value_counts()
return ( | pd.DataFrame({column: r}) | pandas.DataFrame |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import operator
import numpy as np
import pytest
import pandas.compat as compat
from pandas.compat import range
import pandas as pd
from pandas import (
Categorical, DataFrame, Index, NaT, Series, bdate_range, date_range, isna)
from pandas.core import ops
import pandas.core.nanops as nanops
import pandas.util.testing as tm
from pandas.util.testing import (
assert_almost_equal, assert_frame_equal, assert_series_equal)
from .common import TestData
class TestSeriesLogicalOps(object):
@pytest.mark.parametrize('bool_op', [operator.and_,
operator.or_, operator.xor])
def test_bool_operators_with_nas(self, bool_op):
# boolean &, |, ^ should work with object arrays and propagate NAs
ser = Series(bdate_range('1/1/2000', periods=10), dtype=object)
ser[::2] = np.nan
mask = ser.isna()
filled = ser.fillna(ser[0])
result = bool_op(ser < ser[9], ser > ser[3])
expected = bool_op(filled < filled[9], filled > filled[3])
expected[mask] = False
assert_series_equal(result, expected)
def test_operators_bitwise(self):
# GH#9016: support bitwise op for integer types
index = list('bca')
s_tft = Series([True, False, True], index=index)
s_fff = Series([False, False, False], index=index)
s_tff = Series([True, False, False], index=index)
s_empty = Series([])
# TODO: unused
# s_0101 = Series([0, 1, 0, 1])
s_0123 = Series(range(4), dtype='int64')
s_3333 = Series([3] * 4)
s_4444 = Series([4] * 4)
res = s_tft & s_empty
expected = s_fff
assert_series_equal(res, expected)
res = s_tft | s_empty
expected = s_tft
| assert_series_equal(res, expected) | pandas.util.testing.assert_series_equal |
import pandas as pd
def add_empty_buildings(missing_list: list, dict_to_add_to: dict):
'''
function adds missing buildings (with empty dict as value) to a dict over buildings
so that jsons being saved always have all buildings
'''
for building in missing_list:
dict_to_add_to[building] = {}
return dict_to_add_to
def sort_dict(d):
items = [[k, v] for k, v in sorted(d.items(), key=lambda x: x[0])]
for item in items:
if isinstance(item[1], dict):
item[1] = sort_dict(item[1])
return dict(items)
class Analysis():
config = dict()
analysis_parameters = dict()
def __str__(self):
return ' | '.join([self.config['analysis_name'], self.config['analysis_id']])
def __init__(self, config):
self.config = config
self.analysis_parameters = self.config['analysis_parameters']
def run(self, input_samples: dict, input_analyses: dict) -> dict:
raise NotImplementedError
def samples_dict_to_df(self, model_type: str, samples: dict, dates: list = None) -> pd.DataFrame:
if model_type == 'person':
df = pd.DataFrame(samples)
elif model_type == 'immunity':
df = pd.DataFrame(samples, columns=['immunity'])
elif model_type == 'prevalence':
df = pd.concat([pd.DataFrame(samples[i], index=dates)
for i in range(len(samples))],
keys=range(len(samples)))
df.columns = ['prevalence-' + c for c in df.columns]
elif model_type == 'action':
df = pd.concat([ | pd.DataFrame(samples[i], index=dates) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# pylint: disable=E1101
# flake8: noqa
from datetime import datetime
import csv
import os
import sys
import re
import nose
import platform
from multiprocessing.pool import ThreadPool
from numpy import nan
import numpy as np
from pandas.io.common import DtypeWarning
from pandas import DataFrame, Series, Index, MultiIndex, DatetimeIndex
from pandas.compat import(
StringIO, BytesIO, PY3, range, long, lrange, lmap, u
)
from pandas.io.common import URLError
import pandas.io.parsers as parsers
from pandas.io.parsers import (read_csv, read_table, read_fwf,
TextFileReader, TextParser)
import pandas.util.testing as tm
import pandas as pd
from pandas.compat import parse_date
import pandas.lib as lib
from pandas import compat
from pandas.lib import Timestamp
from pandas.tseries.index import date_range
import pandas.tseries.tools as tools
from numpy.testing.decorators import slow
import pandas.parser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def read_csv(self, *args, **kwargs):
raise NotImplementedError
def read_table(self, *args, **kwargs):
raise NotImplementedError
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
self.dirpath = tm.get_data_path()
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
def construct_dataframe(self, num_rows):
df = DataFrame(np.random.rand(num_rows, 5), columns=list('abcde'))
df['foo'] = 'foo'
df['bar'] = 'bar'
df['baz'] = 'baz'
df['date'] = pd.date_range('20000101 09:00:00',
periods=num_rows,
freq='s')
df['int'] = np.arange(num_rows, dtype='int64')
return df
def generate_multithread_dataframe(self, path, num_rows, num_tasks):
def reader(arg):
start, nrows = arg
if not start:
return pd.read_csv(path, index_col=0, header=0, nrows=nrows,
parse_dates=['date'])
return pd.read_csv(path,
index_col=0,
header=None,
skiprows=int(start) + 1,
nrows=nrows,
parse_dates=[9])
tasks = [
(num_rows * i / num_tasks,
num_rows / num_tasks) for i in range(num_tasks)
]
pool = ThreadPool(processes=num_tasks)
results = pool.map(reader, tasks)
header = results[0].columns
for r in results[1:]:
r.columns = header
final_dataframe = pd.concat(results)
return final_dataframe
def test_converters_type_must_be_dict(self):
with tm.assertRaisesRegexp(TypeError, 'Type converters.+'):
self.read_csv(StringIO(self.data1), converters=0)
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), decimal='')
def test_empty_thousands_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands='')
def test_multi_character_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands=',,')
def test_empty_string(self):
data = """\
One,Two,Three
a,1,one
b,2,two
,3,three
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(StringIO(data))
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []},
keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five',
'', 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(
StringIO(data), na_values=['a'], keep_default_na=False)
xp = DataFrame({'One': [np.nan, 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []})
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
# GH4318, passing na_values=None and keep_default_na=False yields
# 'None' as a na_value
data = """\
One,Two,Three
a,1,None
b,2,two
,3,None
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(
StringIO(data), keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['None', 'two', 'None', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
# it works!
read_csv(fname, index_col=0, parse_dates=True)
def test_dialect(self):
data = """\
label1,label2,label3
index1,"a,c,e
index2,b,d,f
"""
dia = csv.excel()
dia.quoting = csv.QUOTE_NONE
df = self.read_csv(StringIO(data), dialect=dia)
data = '''\
label1,label2,label3
index1,a,c,e
index2,b,d,f
'''
exp = self.read_csv(StringIO(data))
exp.replace('a', '"a', inplace=True)
tm.assert_frame_equal(df, exp)
def test_dialect_str(self):
data = """\
fruit:vegetable
apple:brocolli
pear:tomato
"""
exp = DataFrame({
'fruit': ['apple', 'pear'],
'vegetable': ['brocolli', 'tomato']
})
dia = csv.register_dialect('mydialect', delimiter=':') # noqa
df = self.read_csv(StringIO(data), dialect='mydialect')
tm.assert_frame_equal(df, exp)
csv.unregister_dialect('mydialect')
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
tm.assert_equal(expected.A.dtype, 'int64')
tm.assert_equal(expected.B.dtype, 'float')
tm.assert_equal(expected.C.dtype, 'float')
df = self.read_csv(StringIO(data), sep='|', thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|',
thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
data_with_odd_sep = """A|B|C
1|2.334,01|5
10|13|10,
"""
df = self.read_csv(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
def test_separator_date_conflict(self):
# Regression test for issue #4678: make sure thousands separator and
# date parsing do not conflict.
data = '06-02-2013;13:00;1-000.215'
expected = DataFrame(
[[datetime(2013, 6, 2, 13, 0, 0), 1000.215]],
columns=['Date', 2]
)
df = self.read_csv(StringIO(data), sep=';', thousands='-',
parse_dates={'Date': [0, 1]}, header=None)
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
tm.assertIsInstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# GH 8217
# series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
self.assertFalse(result._is_view)
def test_inf_parsing(self):
data = """\
,A
a,inf
b,-inf
c,Inf
d,-Inf
e,INF
f,-INF
g,INf
h,-INf
i,inF
j,-inF"""
inf = float('inf')
expected = Series([inf, -inf] * 5)
df = read_csv(StringIO(data), index_col=0)
tm.assert_almost_equal(df['A'].values, expected.values)
df = read_csv(StringIO(data), index_col=0, na_filter=False)
tm.assert_almost_equal(df['A'].values, expected.values)
def test_multiple_date_col(self):
# Can use multiple date parsers
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def func(*date_cols):
return lib.try_parse_dates(parsers._concat_date_cols(date_cols))
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
prefix='X',
parse_dates={'nominal': [1, 2],
'actual': [1, 3]})
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'nominal'], d)
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
parse_dates={'nominal': [1, 2],
'actual': [1, 3]},
keep_date_col=True)
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
df = read_csv(StringIO(data), header=None,
prefix='X',
parse_dates=[[1, 2], [1, 3]])
self.assertIn('X1_X2', df)
self.assertIn('X1_X3', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'X1_X2'], d)
df = read_csv(StringIO(data), header=None,
parse_dates=[[1, 2], [1, 3]], keep_date_col=True)
self.assertIn('1_2', df)
self.assertIn('1_3', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = '''\
KORD,19990127 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
'''
df = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[1], index_col=1)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.index[0], d)
def test_multiple_date_cols_int_cast(self):
data = ("KORD,19990127, 19:00:00, 18:56:00, 0.8100\n"
"KORD,19990127, 20:00:00, 19:56:00, 0.0100\n"
"KORD,19990127, 21:00:00, 20:56:00, -0.5900\n"
"KORD,19990127, 21:00:00, 21:18:00, -0.9900\n"
"KORD,19990127, 22:00:00, 21:56:00, -0.5900\n"
"KORD,19990127, 23:00:00, 22:56:00, -0.5900")
date_spec = {'nominal': [1, 2], 'actual': [1, 3]}
import pandas.io.date_converters as conv
# it works!
df = self.read_csv(StringIO(data), header=None, parse_dates=date_spec,
date_parser=conv.parse_date_time)
self.assertIn('nominal', df)
def test_multiple_date_col_timestamp_parse(self):
data = """05/31/2012,15:30:00.029,1306.25,1,E,0,,1306.25
05/31/2012,15:30:00.029,1306.25,8,E,0,,1306.25"""
result = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[[0, 1]], date_parser=Timestamp)
ex_val = Timestamp('05/31/2012 15:30:00.029')
self.assertEqual(result['0_1'][0], ex_val)
def test_single_line(self):
# GH 6607
# Test currently only valid with python engine because sep=None and
# delim_whitespace=False. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
'sep=None with delim_whitespace=False'):
# sniff separator
buf = StringIO()
sys.stdout = buf
# printing warning message when engine == 'c' for now
try:
# it works!
df = self.read_csv(StringIO('1,2'), names=['a', 'b'],
header=None, sep=None)
tm.assert_frame_equal(DataFrame({'a': [1], 'b': [2]}), df)
finally:
sys.stdout = sys.__stdout__
def test_multiple_date_cols_with_header(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
self.assertNotIsInstance(df.nominal[0], compat.string_types)
ts_data = """\
ID,date,nominalTime,actualTime,A,B,C,D,E
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def test_multiple_date_col_name_collision(self):
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
parse_dates={'ID': [1, 2]})
data = """\
date_NominalTime,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000""" # noqa
self.assertRaises(ValueError, self.read_csv, StringIO(data),
parse_dates=[[1, 2]])
def test_index_col_named(self):
no_header = """\
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
h = "ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir\n"
data = h + no_header
rs = self.read_csv(StringIO(data), index_col='ID')
xp = self.read_csv(StringIO(data), header=0).set_index('ID')
tm.assert_frame_equal(rs, xp)
self.assertRaises(ValueError, self.read_csv, StringIO(no_header),
index_col='ID')
data = """\
1,2,3,4,hello
5,6,7,8,world
9,10,11,12,foo
"""
names = ['a', 'b', 'c', 'd', 'message']
xp = DataFrame({'a': [1, 5, 9], 'b': [2, 6, 10], 'c': [3, 7, 11],
'd': [4, 8, 12]},
index=Index(['hello', 'world', 'foo'], name='message'))
rs = self.read_csv(StringIO(data), names=names, index_col=['message'])
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
rs = self.read_csv(StringIO(data), names=names, index_col='message')
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
def test_usecols_index_col_False(self):
# Issue 9082
s = "a,b,c,d\n1,2,3,4\n5,6,7,8"
s_malformed = "a,b,c,d\n1,2,3,4,\n5,6,7,8,"
cols = ['a', 'c', 'd']
expected = DataFrame({'a': [1, 5], 'c': [3, 7], 'd': [4, 8]})
df = self.read_csv(StringIO(s), usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(s_malformed),
usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
def test_index_col_is_True(self):
# Issue 9798
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
index_col=True)
def test_converter_index_col_bug(self):
# 1835
data = "A;B\n1;2\n3;4"
rs = self.read_csv(StringIO(data), sep=';', index_col='A',
converters={'A': lambda x: x})
xp = DataFrame({'B': [2, 4]}, index=Index([1, 3], name='A'))
tm.assert_frame_equal(rs, xp)
self.assertEqual(rs.index.name, xp.index.name)
def test_date_parser_int_bug(self):
# #3071
log_file = StringIO(
'posix_timestamp,elapsed,sys,user,queries,query_time,rows,'
'accountid,userid,contactid,level,silo,method\n'
'1343103150,0.062353,0,4,6,0.01690,3,'
'12345,1,-1,3,invoice_InvoiceResource,search\n'
)
def f(posix_string):
return datetime.utcfromtimestamp(int(posix_string))
# it works!
read_csv(log_file, index_col=0, parse_dates=0, date_parser=f)
def test_multiple_skts_example(self):
data = "year, month, a, b\n 2001, 01, 0.0, 10.\n 2001, 02, 1.1, 11."
pass
def test_malformed(self):
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#')
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# skip_footer
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
try:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'): # XXX
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#',
skip_footer=1)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
df = it.read(5)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read(2)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1, skiprows=[2])
df = it.read(1)
it.read()
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
def test_passing_dtype(self):
# GH 6607
# Passing dtype is currently only supported by the C engine.
# Temporarily copied to TestCParser*.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
df = DataFrame(np.random.rand(5, 2), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
df.to_csv(path)
# GH 3795
# passing 'str' as the dtype
result = self.read_csv(path, dtype=str, index_col=0)
tm.assert_series_equal(result.dtypes, Series(
{'A': 'object', 'B': 'object'}))
# we expect all object columns, so need to convert to test for
# equivalence
result = result.astype(float)
tm.assert_frame_equal(result, df)
# invalid dtype
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'foo', 'B': 'float64'},
index_col=0)
# valid but we don't support it (date)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0, parse_dates=['B'])
# valid but we don't support it
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'timedelta64', 'B': 'float64'},
index_col=0)
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
# empty frame
# GH12048
self.read_csv(StringIO('A,B'), dtype=str)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>"""
self.assertRaises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
self.assertEqual(len(df), 3)
def test_non_string_na_values(self):
# GH3611, na_values that are not a string are an issue
with tm.ensure_clean('__non_string_na_values__.csv') as path:
df = DataFrame({'A': [-999, 2, 3], 'B': [1.2, -999, 4.5]})
df.to_csv(path, sep=' ', index=False)
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, result2)
tm.assert_frame_equal(result2, result3)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, result3)
tm.assert_frame_equal(result5, result3)
tm.assert_frame_equal(result6, result3)
tm.assert_frame_equal(result7, result3)
good_compare = result3
# with an odd float format, so we can't match the string 999.0
# exactly, but need float matching
df.to_csv(path, sep=' ', index=False, float_format='%.3f')
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, good_compare)
tm.assert_frame_equal(result2, good_compare)
tm.assert_frame_equal(result3, good_compare)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, good_compare)
tm.assert_frame_equal(result5, good_compare)
tm.assert_frame_equal(result6, good_compare)
tm.assert_frame_equal(result7, good_compare)
def test_default_na_values(self):
_NA_VALUES = set(['-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN',
'#N/A', 'N/A', 'NA', '#NA', 'NULL', 'NaN',
'nan', '-NaN', '-nan', '#N/A N/A', ''])
self.assertEqual(_NA_VALUES, parsers._NA_VALUES)
nv = len(_NA_VALUES)
def f(i, v):
if i == 0:
buf = ''
elif i > 0:
buf = ''.join([','] * i)
buf = "{0}{1}".format(buf, v)
if i < nv - 1:
buf = "{0}{1}".format(buf, ''.join([','] * (nv - i - 1)))
return buf
data = StringIO('\n'.join([f(i, v) for i, v in enumerate(_NA_VALUES)]))
expected = DataFrame(np.nan, columns=range(nv), index=range(nv))
df = self.read_csv(data, header=None)
tm.assert_frame_equal(df, expected)
def test_custom_na_values(self):
data = """A,B,C
ignore,this,row
1,NA,3
-1.#IND,5,baz
7,8,NaN
"""
expected = [[1., nan, 3],
[nan, 5, nan],
[7, 8, nan]]
df = self.read_csv(StringIO(data), na_values=['baz'], skiprows=[1])
tm.assert_almost_equal(df.values, expected)
df2 = self.read_table(StringIO(data), sep=',', na_values=['baz'],
skiprows=[1])
tm.assert_almost_equal(df2.values, expected)
df3 = self.read_table(StringIO(data), sep=',', na_values='baz',
skiprows=[1])
tm.assert_almost_equal(df3.values, expected)
def test_nat_parse(self):
# GH 3062
df = DataFrame(dict({
'A': np.asarray(lrange(10), dtype='float64'),
'B': pd.Timestamp('20010101')}))
df.iloc[3:6, :] = np.nan
with tm.ensure_clean('__nat_parse_.csv') as path:
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
expected = Series(dict(A='float64', B='datetime64[ns]'))
tm.assert_series_equal(expected, result.dtypes)
# test with NaT for the nan_rep
# we don't have a method to specif the Datetime na_rep (it defaults
# to '')
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
def test_skiprows_bug(self):
# GH #505
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=lrange(6), header=None,
index_col=0, parse_dates=True)
data2 = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
tm.assert_frame_equal(data, data2)
def test_deep_skiprows(self):
# GH #4382
text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in range(10)])
condensed_text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in [0, 1, 2, 3, 4, 6, 8, 9]])
data = self.read_csv(StringIO(text), skiprows=[6, 8])
condensed_data = self.read_csv(StringIO(condensed_text))
tm.assert_frame_equal(data, condensed_data)
def test_skiprows_blank(self):
# GH 9832
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
def test_detect_string_na(self):
data = """A,B
foo,bar
NA,baz
NaN,nan
"""
expected = [['foo', 'bar'],
[nan, 'baz'],
[nan, nan]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
def test_unnamed_columns(self):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
df = self.read_table(StringIO(data), sep=',')
tm.assert_almost_equal(df.values, expected)
self.assert_numpy_array_equal(df.columns,
['A', 'B', 'C', 'Unnamed: 3',
'Unnamed: 4'])
def test_string_nas(self):
data = """A,B,C
a,b,c
d,,f
,g,h
"""
result = self.read_csv(StringIO(data))
expected = DataFrame([['a', 'b', 'c'],
['d', np.nan, 'f'],
[np.nan, 'g', 'h']],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(result, expected)
def test_duplicate_columns(self):
for engine in ['python', 'c']:
data = """A,A,B,B,B
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
# check default beahviour
df = self.read_table(StringIO(data), sep=',', engine=engine)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=False)
self.assertEqual(list(df.columns), ['A', 'A', 'B', 'B', 'B'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=True)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
def test_csv_mixed_type(self):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
df = self.read_csv(StringIO(data))
# TODO
def test_csv_custom_parser(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
f = lambda x: datetime.strptime(x, '%Y%m%d')
df = self.read_csv(StringIO(data), date_parser=f)
expected = self.read_csv(StringIO(data), parse_dates=True)
tm.assert_frame_equal(df, expected)
def test_parse_dates_implicit_first_col(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
df = self.read_csv(StringIO(data), parse_dates=True)
expected = self.read_csv(StringIO(data), index_col=0, parse_dates=True)
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
tm.assert_frame_equal(df, expected)
def test_parse_dates_string(self):
data = """date,A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
rs = self.read_csv(
StringIO(data), index_col='date', parse_dates='date')
idx = date_range('1/1/2009', periods=3)
idx.name = 'date'
xp = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 3, 4],
'C': [2, 4, 5]}, idx)
tm.assert_frame_equal(rs, xp)
def test_yy_format(self):
data = """date,time,B,C
090131,0010,1,2
090228,1020,3,4
090331,0830,5,6
"""
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[['date', 'time']])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[[0, 1]])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
def test_parse_dates_column_list(self):
from pandas.core.datetools import to_datetime
data = '''date;destination;ventilationcode;unitcode;units;aux_date
01/01/2010;P;P;50;1;12/1/2011
01/01/2010;P;R;50;1;13/1/2011
15/01/2010;P;P;50;1;14/1/2011
01/05/2010;P;P;50;1;15/1/2011'''
expected = self.read_csv(StringIO(data), sep=";", index_col=lrange(4))
lev = expected.index.levels[0]
levels = list(expected.index.levels)
levels[0] = lev.to_datetime(dayfirst=True)
# hack to get this to work - remove for final test
levels[0].name = lev.name
expected.index.set_levels(levels, inplace=True)
expected['aux_date'] = to_datetime(expected['aux_date'],
dayfirst=True)
expected['aux_date'] = lmap(Timestamp, expected['aux_date'])
tm.assertIsInstance(expected['aux_date'][0], datetime)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=[0, 5], dayfirst=True)
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=['date', 'aux_date'], dayfirst=True)
tm.assert_frame_equal(df, expected)
def test_no_header(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df = self.read_table(StringIO(data), sep=',', header=None)
df_pref = self.read_table(StringIO(data), sep=',', prefix='X',
header=None)
names = ['foo', 'bar', 'baz', 'quux', 'panda']
df2 = self.read_table(StringIO(data), sep=',', names=names)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df.values, expected)
tm.assert_almost_equal(df.values, df2.values)
self.assert_numpy_array_equal(df_pref.columns,
['X0', 'X1', 'X2', 'X3', 'X4'])
self.assert_numpy_array_equal(df.columns, lrange(5))
self.assert_numpy_array_equal(df2.columns, names)
def test_no_header_prefix(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df_pref = self.read_table(StringIO(data), sep=',', prefix='Field',
header=None)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df_pref.values, expected)
self.assert_numpy_array_equal(df_pref.columns,
['Field0', 'Field1', 'Field2', 'Field3', 'Field4'])
def test_header_with_index_col(self):
data = """foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
names = ['A', 'B', 'C']
df = self.read_csv(StringIO(data), names=names)
self.assertEqual(names, ['A', 'B', 'C'])
values = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
expected = DataFrame(values, index=['foo', 'bar', 'baz'],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(df, expected)
def test_read_csv_dataframe(self):
df = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv1, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D'])
self.assertEqual(df.index.name, 'index')
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_no_index_name(self):
df = self.read_csv(self.csv2, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv2, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D', 'E'])
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.ix[:, ['A', 'B', 'C', 'D']
].values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_infer_compression(self):
# GH 9770
expected = self.read_csv(self.csv1, index_col=0, parse_dates=True)
inputs = [self.csv1, self.csv1 + '.gz',
self.csv1 + '.bz2', open(self.csv1)]
for f in inputs:
df = self.read_csv(f, index_col=0, parse_dates=True,
compression='infer')
tm.assert_frame_equal(expected, df)
inputs[3].close()
def test_read_table_unicode(self):
fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8'))
df1 = read_table(fin, sep=";", encoding="utf-8", header=None)
tm.assertIsInstance(df1[0].values[0], compat.text_type)
def test_read_table_wrong_num_columns(self):
# too few!
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
self.assertRaises(Exception, self.read_csv, StringIO(data))
def test_read_table_duplicate_index(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
result = self.read_csv(StringIO(data), index_col=0)
expected = self.read_csv(StringIO(data)).set_index('index',
verify_integrity=False)
tm.assert_frame_equal(result, expected)
def test_read_table_duplicate_index_implicit(self):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
# it works!
result = self.read_csv(StringIO(data))
def test_parse_bools(self):
data = """A,B
True,1
False,2
True,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
YES,1
no,2
yes,3
No,3
Yes,3
"""
data = self.read_csv(StringIO(data),
true_values=['yes', 'Yes', 'YES'],
false_values=['no', 'NO', 'No'])
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
TRUE,1
FALSE,2
TRUE,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
foo,bar
bar,foo"""
result = self.read_csv(StringIO(data), true_values=['foo'],
false_values=['bar'])
expected = DataFrame({'A': [True, False], 'B': [False, True]})
tm.assert_frame_equal(result, expected)
def test_int_conversion(self):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.float64)
self.assertEqual(data['B'].dtype, np.int64)
def test_infer_index_col(self):
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
data = self.read_csv(StringIO(data))
self.assertTrue(data.index.equals(Index(['foo', 'bar', 'baz'])))
def test_read_nrows(self):
df = self.read_csv(StringIO(self.data1), nrows=3)
expected = self.read_csv(StringIO(self.data1))[:3]
tm.assert_frame_equal(df, expected)
def test_read_chunksize(self):
reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_read_chunksize_named(self):
reader = self.read_csv(
StringIO(self.data1), index_col='index', chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col='index')
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_get_chunk_passed_chunksize(self):
data = """A,B,C
1,2,3
4,5,6
7,8,9
1,2,3"""
result = self.read_csv(StringIO(data), chunksize=2)
piece = result.get_chunk()
self.assertEqual(len(piece), 2)
def test_read_text_list(self):
data = """A,B,C\nfoo,1,2,3\nbar,4,5,6"""
as_list = [['A', 'B', 'C'], ['foo', '1', '2', '3'], ['bar',
'4', '5', '6']]
df = self.read_csv(StringIO(data), index_col=0)
parser = TextParser(as_list, index_col=0, chunksize=2)
chunk = parser.read(None)
tm.assert_frame_equal(chunk, df)
def test_iterator(self):
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'):
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunk = reader.read(3)
tm.assert_frame_equal(chunk, df[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, df[3:])
# pass list
lines = list(csv.reader(StringIO(self.data1)))
parser = TextParser(lines, index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# pass skiprows
parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1])
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[1:3])
# test bad parameter (skip_footer)
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True, skip_footer=True)
self.assertRaises(ValueError, reader.read, 3)
treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,
iterator=True)
tm.assertIsInstance(treader, TextFileReader)
# stopping iteration when on chunksize is specified, GH 3967
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = self.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
tm.assert_frame_equal(result[0], expected)
# chunksize = 1
reader = self.read_csv(StringIO(data), chunksize=1)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
self.assertEqual(len(result), 3)
tm.assert_frame_equal(pd.concat(result), expected)
def test_header_not_first_line(self):
data = """got,to,ignore,this,line
got,to,ignore,this,line
index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
data2 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
df = self.read_csv(StringIO(data), header=2, index_col=0)
expected = self.read_csv(StringIO(data2), header=0, index_col=0)
tm.assert_frame_equal(df, expected)
def test_header_multi_index(self):
expected = tm.makeCustomDataframe(
5, 3, r_idx_nlevels=2, c_idx_nlevels=4)
data = """\
C0,,C_l0_g0,C_l0_g1,C_l0_g2
C1,,C_l1_g0,C_l1_g1,C_l1_g2
C2,,C_l2_g0,C_l2_g1,C_l2_g2
C3,,C_l3_g0,C_l3_g1,C_l3_g2
R0,R1,,,
R_l0_g0,R_l1_g0,R0C0,R0C1,R0C2
R_l0_g1,R_l1_g1,R1C0,R1C1,R1C2
R_l0_g2,R_l1_g2,R2C0,R2C1,R2C2
R_l0_g3,R_l1_g3,R3C0,R3C1,R3C2
R_l0_g4,R_l1_g4,R4C0,R4C1,R4C2
"""
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[
0, 1], tupleize_cols=False)
tm.assert_frame_equal(df, expected)
# skipping lines in the header
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[
0, 1], tupleize_cols=False)
tm.assert_frame_equal(df, expected)
#### invalid options ####
# no as_recarray
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], as_recarray=True, tupleize_cols=False)
# names
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], names=['foo', 'bar'], tupleize_cols=False)
# usecols
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], usecols=['foo', 'bar'], tupleize_cols=False)
# non-numeric index_col
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=['foo', 'bar'], tupleize_cols=False)
def test_header_multiindex_common_format(self):
df = DataFrame([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]],
index=['one', 'two'],
columns=MultiIndex.from_tuples([('a', 'q'), ('a', 'r'), ('a', 's'),
('b', 't'), ('c', 'u'), ('c', 'v')]))
# to_csv
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
,,,,,,
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(df, result)
# common
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(df, result)
# common, no index_col
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=None)
tm.assert_frame_equal(df.reset_index(drop=True), result)
# malformed case 1
expected = DataFrame(np.array([[2, 3, 4, 5, 6],
[8, 9, 10, 11, 12]], dtype='int64'),
index=Index([1, 7]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('r'), u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 0, 1, 2, 2], [
0, 1, 2, 3, 4]],
names=[u('a'), u('q')]))
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
# malformed case 2
expected = DataFrame(np.array([[2, 3, 4, 5, 6],
[8, 9, 10, 11, 12]], dtype='int64'),
index=Index([1, 7]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('r'), u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 0, 1, 2, 2], [
0, 1, 2, 3, 4]],
names=[None, u('q')]))
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
# mi on columns and index (malformed)
expected = DataFrame(np.array([[3, 4, 5, 6],
[9, 10, 11, 12]], dtype='int64'),
index=MultiIndex(levels=[[1, 7], [2, 8]],
labels=[[0, 1], [0, 1]]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 1, 2, 2],
[0, 1, 2, 3]],
names=[None, u('q')]))
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=[0, 1])
tm.assert_frame_equal(expected, result)
def test_pass_names_with_index(self):
lines = self.data1.split('\n')
no_header = '\n'.join(lines[1:])
# regular index
names = ['index', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=0, names=names)
expected = self.read_csv(StringIO(self.data1), index_col=0)
tm.assert_frame_equal(df, expected)
# multi index
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['index1', 'index2', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), index_col=['index1', 'index2'])
tm.assert_frame_equal(df, expected)
def test_multi_index_no_level_names(self):
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
data2 = """A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
header=None, names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected, check_names=False)
# 2 implicit first cols
df2 = self.read_csv(StringIO(data2))
tm.assert_frame_equal(df2, df)
# reverse order of index
df = self.read_csv(StringIO(no_header), index_col=[1, 0], names=names,
header=None)
expected = self.read_csv(StringIO(data), index_col=[1, 0])
tm.assert_frame_equal(df, expected, check_names=False)
def test_multi_index_parse_dates(self):
data = """index1,index2,A,B,C
20090101,one,a,1,2
20090101,two,b,3,4
20090101,three,c,4,5
20090102,one,a,1,2
20090102,two,b,3,4
20090102,three,c,4,5
20090103,one,a,1,2
20090103,two,b,3,4
20090103,three,c,4,5
"""
df = self.read_csv(StringIO(data), index_col=[0, 1], parse_dates=True)
self.assertIsInstance(df.index.levels[0][0],
(datetime, np.datetime64, Timestamp))
# specify columns out of order!
df2 = self.read_csv(StringIO(data), index_col=[1, 0], parse_dates=True)
self.assertIsInstance(df2.index.levels[1][0],
(datetime, np.datetime64, Timestamp))
def test_skip_footer(self):
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'):
data = """A,B,C
1,2,3
4,5,6
7,8,9
want to skip this
also also skip this
"""
result = self.read_csv(StringIO(data), skip_footer=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = self.read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), nrows=3)
tm.assert_frame_equal(result, expected)
# skipfooter alias
result = read_csv(StringIO(data), skipfooter=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
def test_no_unnamed_index(self):
data = """ id c0 c1 c2
0 1 0 a b
1 2 0 c d
2 2 2 e f
"""
df = self.read_table(StringIO(data), sep=' ')
self.assertIsNone(df.index.name)
def test_converters(self):
data = """A,B,C,D
a,1,2,01/01/2009
b,3,4,01/02/2009
c,4,5,01/03/2009
"""
from pandas.compat import parse_date
result = self.read_csv(StringIO(data), converters={'D': parse_date})
result2 = self.read_csv(StringIO(data), converters={3: parse_date})
expected = self.read_csv(StringIO(data))
expected['D'] = expected['D'].map(parse_date)
tm.assertIsInstance(result['D'][0], (datetime, Timestamp))
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
# produce integer
converter = lambda x: int(x.split('/')[2])
result = self.read_csv(StringIO(data), converters={'D': converter})
expected = self.read_csv(StringIO(data))
expected['D'] = expected['D'].map(converter)
tm.assert_frame_equal(result, expected)
def test_converters_no_implicit_conv(self):
# GH2184
data = """000102,1.2,A\n001245,2,B"""
f = lambda x: x.strip()
converter = {0: f}
df = self.read_csv(StringIO(data), header=None, converters=converter)
self.assertEqual(df[0].dtype, object)
def test_converters_euro_decimal_format(self):
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
f = lambda x: float(x.replace(",", "."))
converter = {'Number1': f, 'Number2': f, 'Number3': f}
df2 = self.read_csv(StringIO(data), sep=';', converters=converter)
self.assertEqual(df2['Number1'].dtype, float)
self.assertEqual(df2['Number2'].dtype, float)
self.assertEqual(df2['Number3'].dtype, float)
def test_converter_return_string_bug(self):
# GH #583
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
f = lambda x: float(x.replace(",", "."))
converter = {'Number1': f, 'Number2': f, 'Number3': f}
df2 = self.read_csv(StringIO(data), sep=';', converters=converter)
self.assertEqual(df2['Number1'].dtype, float)
def test_read_table_buglet_4x_multiindex(self):
# GH 6607
# Parsing multi-level index currently causes an error in the C parser.
# Temporarily copied to TestPythonParser.
# Here test that CParserError is raised:
with tm.assertRaises(pandas.parser.CParserError):
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
# it works!
df = self.read_table(StringIO(text), sep='\s+')
self.assertEqual(df.index.names, ('one', 'two', 'three', 'four'))
def test_line_comment(self):
data = """# empty
A,B,C
1,2.,4.#hello world
#ignore this line
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows(self):
data = """# empty
random line
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# this should ignore the first four lines (including comments)
df = self.read_csv(StringIO(data), comment='#', skiprows=4)
tm.assert_almost_equal(df.values, expected)
def test_comment_header(self):
data = """# empty
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# header should begin at the second non-comment line
df = self.read_csv(StringIO(data), comment='#', header=1)
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows_header(self):
data = """# empty
# second empty line
# third empty line
X,Y,Z
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# skiprows should skip the first 4 lines (including comments), while
# header should start from the second non-commented line starting
# with line 5
df = self.read_csv(StringIO(data), comment='#', skiprows=4, header=1)
tm.assert_almost_equal(df.values, expected)
def test_read_csv_parse_simple_list(self):
text = """foo
bar baz
qux foo
foo
bar"""
df = read_csv(StringIO(text), header=None)
expected = DataFrame({0: ['foo', 'bar baz', 'qux foo',
'foo', 'bar']})
tm.assert_frame_equal(df, expected)
def test_parse_dates_custom_euroformat(self):
text = """foo,bar,baz
31/01/2010,1,2
01/02/2010,1,NA
02/02/2010,1,2
"""
parser = lambda d: parse_date(d, dayfirst=True)
df = self.read_csv(StringIO(text),
names=['time', 'Q', 'NTU'], header=0,
index_col=0, parse_dates=True,
date_parser=parser, na_values=['NA'])
exp_index = Index([datetime(2010, 1, 31), datetime(2010, 2, 1),
datetime(2010, 2, 2)], name='time')
expected = DataFrame({'Q': [1, 1, 1], 'NTU': [2, np.nan, 2]},
index=exp_index, columns=['Q', 'NTU'])
tm.assert_frame_equal(df, expected)
parser = lambda d: parse_date(d, day_first=True)
self.assertRaises(Exception, self.read_csv,
StringIO(text), skiprows=[0],
names=['time', 'Q', 'NTU'], index_col=0,
parse_dates=True, date_parser=parser,
na_values=['NA'])
def test_na_value_dict(self):
data = """A,B,C
foo,bar,NA
bar,foo,foo
foo,bar,NA
bar,foo,foo"""
df = self.read_csv(StringIO(data),
na_values={'A': ['foo'], 'B': ['bar']})
expected = DataFrame({'A': [np.nan, 'bar', np.nan, 'bar'],
'B': [np.nan, 'foo', np.nan, 'foo'],
'C': [np.nan, 'foo', np.nan, 'foo']})
tm.assert_frame_equal(df, expected)
data = """\
a,b,c,d
0,NA,1,5
"""
xp = DataFrame({'b': [np.nan], 'c': [1], 'd': [5]}, index=[0])
xp.index.name = 'a'
df = self.read_csv(StringIO(data), na_values={}, index_col=0)
tm.assert_frame_equal(df, xp)
xp = DataFrame({'b': [np.nan], 'd': [5]},
MultiIndex.from_tuples([(0, 1)]))
xp.index.names = ['a', 'c']
df = self.read_csv(StringIO(data), na_values={}, index_col=[0, 2])
tm.assert_frame_equal(df, xp)
xp = DataFrame({'b': [np.nan], 'd': [5]},
MultiIndex.from_tuples([(0, 1)]))
xp.index.names = ['a', 'c']
df = self.read_csv(StringIO(data), na_values={}, index_col=['a', 'c'])
tm.assert_frame_equal(df, xp)
@tm.network
def test_url(self):
# HTTP(S)
url = ('https://raw.github.com/pydata/pandas/master/'
'pandas/io/tests/data/salary.table')
url_table = self.read_table(url)
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salary.table')
local_table = self.read_table(localtable)
tm.assert_frame_equal(url_table, local_table)
# TODO: ftp testing
@slow
def test_file(self):
# FILE
if sys.version_info[:2] < (2, 6):
raise nose.SkipTest("file:// not supported with Python < 2.6")
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salary.table')
local_table = self.read_table(localtable)
try:
url_table = self.read_table('file://localhost/' + localtable)
except URLError:
# fails on some systems
raise nose.SkipTest("failing on %s" %
' '.join(platform.uname()).strip())
tm.assert_frame_equal(url_table, local_table)
def test_parse_tz_aware(self):
import pytz
# #1693
data = StringIO("Date,x\n2012-06-13T01:39:00Z,0.5")
# it works
result = read_csv(data, index_col=0, parse_dates=True)
stamp = result.index[0]
self.assertEqual(stamp.minute, 39)
try:
self.assertIs(result.index.tz, pytz.utc)
except AssertionError: # hello Yaroslav
arr = result.index.to_pydatetime()
result = tools.to_datetime(arr, utc=True)[0]
self.assertEqual(stamp.minute, result.minute)
self.assertEqual(stamp.hour, result.hour)
self.assertEqual(stamp.day, result.day)
def test_multiple_date_cols_index(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
xp = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]},
index_col='nominal')
tm.assert_frame_equal(xp.set_index('nominal'), df)
df2 = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]},
index_col=0)
tm.assert_frame_equal(df2, df)
df3 = self.read_csv(StringIO(data), parse_dates=[[1, 2]], index_col=0)
tm.assert_frame_equal(df3, df, check_names=False)
def test_multiple_date_cols_chunked(self):
df = self.read_csv(StringIO(self.ts_data), parse_dates={
'nominal': [1, 2]}, index_col='nominal')
reader = self.read_csv(StringIO(self.ts_data), parse_dates={'nominal':
[1, 2]}, index_col='nominal', chunksize=2)
chunks = list(reader)
self.assertNotIn('nominalTime', df)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_multiple_date_col_named_components(self):
xp = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]},
index_col='nominal')
colspec = {'nominal': ['date', 'nominalTime']}
df = self.read_csv(StringIO(self.ts_data), parse_dates=colspec,
index_col='nominal')
tm.assert_frame_equal(df, xp)
def test_multiple_date_col_multiple_index(self):
df = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]},
index_col=['nominal', 'ID'])
xp = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]})
tm.assert_frame_equal(xp.set_index(['nominal', 'ID']), df)
def test_comment(self):
data = """A,B,C
1,2.,4.#hello world
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
df = self.read_table(StringIO(data), sep=',', comment='#',
na_values=['NaN'])
tm.assert_almost_equal(df.values, expected)
def test_bool_na_values(self):
data = """A,B,C
True,False,True
NA,True,False
False,NA,True"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': np.array([True, nan, False], dtype=object),
'B': np.array([False, True, nan], dtype=object),
'C': [True, False, True]})
tm.assert_frame_equal(result, expected)
def test_nonexistent_path(self):
# don't segfault pls #2428
path = '%s.csv' % tm.rands(10)
self.assertRaises(Exception, self.read_csv, path)
def test_missing_trailing_delimiters(self):
data = """A,B,C,D
1,2,3,4
1,3,3,
1,4,5"""
result = self.read_csv(StringIO(data))
self.assertTrue(result['D'].isnull()[1:].all())
def test_skipinitialspace(self):
s = ('"09-Apr-2012", "01:10:18.300", 2456026.548822908, 12849, '
'1.00361, 1.12551, 330.65659, 0355626618.16711, 73.48821, '
'314.11625, 1917.09447, 179.71425, 80.000, 240.000, -350, '
'70.06056, 344.98370, 1, 1, -0.689265, -0.692787, '
'0.212036, 14.7674, 41.605, -9999.0, -9999.0, '
'-9999.0, -9999.0, -9999.0, -9999.0, 000, 012, 128')
sfile = StringIO(s)
# it's 33 columns
result = self.read_csv(sfile, names=lrange(33), na_values=['-9999.0'],
header=None, skipinitialspace=True)
self.assertTrue(pd.isnull(result.ix[0, 29]))
def test_utf16_bom_skiprows(self):
# #2298
data = u("""skip this
skip this too
A\tB\tC
1\t2\t3
4\t5\t6""")
data2 = u("""skip this
skip this too
A,B,C
1,2,3
4,5,6""")
path = '__%s__.csv' % tm.rands(10)
with tm.ensure_clean(path) as path:
for sep, dat in [('\t', data), (',', data2)]:
for enc in ['utf-16', 'utf-16le', 'utf-16be']:
bytes = dat.encode(enc)
with open(path, 'wb') as f:
f.write(bytes)
s = BytesIO(dat.encode('utf-8'))
if compat.PY3:
# somewhat False since the code never sees bytes
from io import TextIOWrapper
s = TextIOWrapper(s, encoding='utf-8')
result = self.read_csv(path, encoding=enc, skiprows=2,
sep=sep)
expected = self.read_csv(s, encoding='utf-8', skiprows=2,
sep=sep)
tm.assert_frame_equal(result, expected)
def test_utf16_example(self):
path = tm.get_data_path('utf16_ex.txt')
# it works! and is the right length
result = self.read_table(path, encoding='utf-16')
self.assertEqual(len(result), 50)
if not compat.PY3:
buf = BytesIO(open(path, 'rb').read())
result = self.read_table(buf, encoding='utf-16')
self.assertEqual(len(result), 50)
def test_converters_corner_with_nas(self):
# skip aberration observed on Win64 Python 3.2.2
if hash(np.int64(-1)) != -2:
raise nose.SkipTest("skipping because of windows hash on Python"
" 3.2.2")
csv = """id,score,days
1,2,12
2,2-5,
3,,14+
4,6-12,2"""
def convert_days(x):
x = x.strip()
if not x:
return np.nan
is_plus = x.endswith('+')
if is_plus:
x = int(x[:-1]) + 1
else:
x = int(x)
return x
def convert_days_sentinel(x):
x = x.strip()
if not x:
return np.nan
is_plus = x.endswith('+')
if is_plus:
x = int(x[:-1]) + 1
else:
x = int(x)
return x
def convert_score(x):
x = x.strip()
if not x:
return np.nan
if x.find('-') > 0:
valmin, valmax = lmap(int, x.split('-'))
val = 0.5 * (valmin + valmax)
else:
val = float(x)
return val
fh = StringIO(csv)
result = self.read_csv(fh, converters={'score': convert_score,
'days': convert_days},
na_values=['', None])
self.assertTrue(pd.isnull(result['days'][1]))
fh = StringIO(csv)
result2 = self.read_csv(fh, converters={'score': convert_score,
'days': convert_days_sentinel},
na_values=['', None])
tm.assert_frame_equal(result, result2)
def test_unicode_encoding(self):
pth = tm.get_data_path('unicode_series.csv')
result = self.read_csv(pth, header=None, encoding='latin-1')
result = result.set_index(0)
got = result[1][1632]
expected = u('\xc1 k\xf6ldum klaka (Cold Fever) (1994)')
self.assertEqual(got, expected)
def test_trailing_delimiters(self):
# #2442. grumble grumble
data = """A,B,C
1,2,3,
4,5,6,
7,8,9,"""
result = self.read_csv(StringIO(data), index_col=False)
expected = DataFrame({'A': [1, 4, 7], 'B': [2, 5, 8],
'C': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_escapechar(self):
# http://stackoverflow.com/questions/13824840/feature-request-for-
# pandas-read-csv
data = '''SEARCH_TERM,ACTUAL_URL
"bra tv bord","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"tv p\xc3\xa5 hjul","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"SLAGBORD, \\"Bergslagen\\", IKEA:s 1700-tals serie","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"'''
result = self.read_csv(StringIO(data), escapechar='\\',
quotechar='"', encoding='utf-8')
self.assertEqual(result['SEARCH_TERM'][2],
'SLAGBORD, "Bergslagen", IKEA:s 1700-tals serie')
self.assertTrue(np.array_equal(result.columns,
['SEARCH_TERM', 'ACTUAL_URL']))
def test_header_names_backward_compat(self):
# #2539
data = '1,2,3\n4,5,6'
result = self.read_csv(StringIO(data), names=['a', 'b', 'c'])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
tm.assert_frame_equal(result, expected)
data2 = 'foo,bar,baz\n' + data
result = self.read_csv(StringIO(data2), names=['a', 'b', 'c'],
header=0)
tm.assert_frame_equal(result, expected)
def test_int64_min_issues(self):
# #2599
data = 'A,B\n0,0\n0,'
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': [0, 0], 'B': [0, np.nan]})
tm.assert_frame_equal(result, expected)
def test_parse_integers_above_fp_precision(self):
data = """Numbers
17007000002000191
17007000002000191
17007000002000191
17007000002000191
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000194"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'Numbers': [17007000002000191,
17007000002000191,
17007000002000191,
17007000002000191,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000194]})
self.assertTrue(np.array_equal(result['Numbers'], expected['Numbers']))
def test_usecols_index_col_conflict(self):
# Issue 4201 Test that index_col as integer reflects usecols
data = """SecId,Time,Price,P2,P3
10000,2013-5-11,100,10,1
500,2013-5-12,101,11,1
"""
expected = DataFrame({'Price': [100, 101]}, index=[
datetime(2013, 5, 11), datetime(2013, 5, 12)])
expected.index.name = 'Time'
df = self.read_csv(StringIO(data), usecols=[
'Time', 'Price'], parse_dates=True, index_col=0)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
'Time', 'Price'], parse_dates=True, index_col='Time')
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
1, 2], parse_dates=True, index_col='Time')
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
1, 2], parse_dates=True, index_col=0)
tm.assert_frame_equal(expected, df)
expected = DataFrame(
{'P3': [1, 1], 'Price': (100, 101), 'P2': (10, 11)})
expected = expected.set_index(['Price', 'P2'])
df = self.read_csv(StringIO(data), usecols=[
'Price', 'P2', 'P3'], parse_dates=True, index_col=['Price', 'P2'])
tm.assert_frame_equal(expected, df)
def test_chunks_have_consistent_numerical_type(self):
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ["1.0", "2.0"] + integers)
with tm.assert_produces_warning(False):
df = self.read_csv(StringIO(data))
# Assert that types were coerced.
self.assertTrue(type(df.a[0]) is np.float64)
self.assertEqual(df.a.dtype, np.float)
def test_warn_if_chunks_have_mismatched_type(self):
# See test in TestCParserLowMemory.
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ['a', 'b'] + integers)
with tm.assert_produces_warning(False):
df = self.read_csv(StringIO(data))
self.assertEqual(df.a.dtype, np.object)
def test_usecols(self):
data = """\
a,b,c
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), usecols=(1, 2))
result2 = self.read_csv(StringIO(data), usecols=('b', 'c'))
exp = self.read_csv(StringIO(data))
self.assertEqual(len(result.columns), 2)
self.assertTrue((result['b'] == exp['b']).all())
self.assertTrue((result['c'] == exp['c']).all())
tm.assert_frame_equal(result, result2)
result = self.read_csv(StringIO(data), usecols=[1, 2], header=0,
names=['foo', 'bar'])
expected = self.read_csv(StringIO(data), usecols=[1, 2])
expected.columns = ['foo', 'bar']
tm.assert_frame_equal(result, expected)
data = """\
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), names=['b', 'c'],
header=None, usecols=[1, 2])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
expected = expected[['b', 'c']]
tm.assert_frame_equal(result, expected)
result2 = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None, usecols=['b', 'c'])
tm.assert_frame_equal(result2, result)
# 5766
result = self.read_csv(StringIO(data), names=['a', 'b'],
header=None, usecols=[0, 1])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
expected = expected[['a', 'b']]
tm.assert_frame_equal(result, expected)
# length conflict, passed names and usecols disagree
self.assertRaises(ValueError, self.read_csv, StringIO(data),
names=['a', 'b'], usecols=[1], header=None)
def test_integer_overflow_bug(self):
# #2601
data = "65248E10 11\n55555E55 22\n"
result = self.read_csv(StringIO(data), header=None, sep=' ')
self.assertTrue(result[0].dtype == np.float64)
result = self.read_csv(StringIO(data), header=None, sep='\s+')
self.assertTrue(result[0].dtype == np.float64)
def test_catch_too_many_names(self):
# Issue 5156
data = """\
1,2,3
4,,6
7,8,9
10,11,12\n"""
tm.assertRaises(Exception, read_csv, StringIO(data),
header=0, names=['a', 'b', 'c', 'd'])
def test_ignore_leading_whitespace(self):
# GH 6607, GH 3374
data = ' a b c\n 1 2 3\n 4 5 6\n 7 8 9'
result = self.read_table(StringIO(data), sep='\s+')
expected = DataFrame({'a': [1, 4, 7], 'b': [2, 5, 8], 'c': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_nrows_and_chunksize_raises_notimplemented(self):
data = 'a b c'
self.assertRaises(NotImplementedError, self.read_csv, StringIO(data),
nrows=10, chunksize=5)
def test_single_char_leading_whitespace(self):
# GH 9710
data = """\
MyColumn
a
b
a
b\n"""
expected = DataFrame({'MyColumn': list('abab')})
result = self.read_csv(StringIO(data), skipinitialspace=True)
tm.assert_frame_equal(result, expected)
def test_chunk_begins_with_newline_whitespace(self):
# GH 10022
data = '\n hello\nworld\n'
result = self.read_csv(StringIO(data), header=None)
self.assertEqual(len(result), 2)
# GH 9735
chunk1 = 'a' * (1024 * 256 - 2) + '\na'
chunk2 = '\n a'
result = pd.read_csv(StringIO(chunk1 + chunk2), header=None)
expected = pd.DataFrame(['a' * (1024 * 256 - 2), 'a', ' a'])
tm.assert_frame_equal(result, expected)
def test_empty_with_index(self):
# GH 10184
data = 'x,y'
result = self.read_csv(StringIO(data), index_col=0)
expected = DataFrame([], columns=['y'], index=Index([], name='x'))
tm.assert_frame_equal(result, expected)
def test_emtpy_with_multiindex(self):
# GH 10467
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=['x', 'y'])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays([[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_reversed_multiindex(self):
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=[1, 0])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays([[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_index_col_scenarios(self):
data = 'x,y,z'
# None, no index
index_col, expected = None, DataFrame([], columns=list('xyz')),
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# False, no index
index_col, expected = False, DataFrame([], columns=list('xyz')),
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# int, first column
index_col, expected = 0, DataFrame(
[], columns=['y', 'z'], index=Index([], name='x'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# int, not first column
index_col, expected = 1, DataFrame(
[], columns=['x', 'z'], index=Index([], name='y'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# str, first column
index_col, expected = 'x', DataFrame(
[], columns=['y', 'z'], index=Index([], name='x'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# str, not the first column
index_col, expected = 'y', DataFrame(
[], columns=['x', 'z'], index=Index([], name='y'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# list of int
index_col, expected = [0, 1], DataFrame([], columns=['z'],
index=MultiIndex.from_arrays([[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
# list of str
index_col = ['x', 'y']
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
# list of int, reversed sequence
index_col = [1, 0]
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
# list of str, reversed sequence
index_col = ['y', 'x']
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
def test_empty_with_index_col_false(self):
# GH 10413
data = 'x,y'
result = self.read_csv(StringIO(data), index_col=False)
expected = DataFrame([], columns=['x', 'y'])
tm.assert_frame_equal(result, expected)
def test_float_parser(self):
# GH 9565
data = '45e-1,4.5,45.,inf,-inf'
result = self.read_csv(StringIO(data), header=None)
expected = pd.DataFrame([[float(s) for s in data.split(',')]])
tm.assert_frame_equal(result, expected)
def test_int64_overflow(self):
data = """ID
00013007854817840016671868
00013007854817840016749251
00013007854817840016754630
00013007854817840016781876
00013007854817840017028824
00013007854817840017963235
00013007854817840018860166"""
result = self.read_csv(StringIO(data))
self.assertTrue(result['ID'].dtype == object)
self.assertRaises((OverflowError, pandas.parser.OverflowError),
self.read_csv, StringIO(data),
converters={'ID': np.int64})
# Just inside int64 range: parse as integer
i_max = np.iinfo(np.int64).max
i_min = np.iinfo(np.int64).min
for x in [i_max, i_min]:
result = pd.read_csv(StringIO(str(x)), header=None)
expected = pd.DataFrame([x])
tm.assert_frame_equal(result, expected)
# Just outside int64 range: parse as string
too_big = i_max + 1
too_small = i_min - 1
for x in [too_big, too_small]:
result = pd.read_csv(StringIO(str(x)), header=None)
expected = pd.DataFrame([str(x)])
tm.assert_frame_equal(result, expected)
def test_empty_with_nrows_chunksize(self):
# GH 9535
expected = pd.DataFrame([], columns=['foo', 'bar'])
result = self.read_csv(StringIO('foo,bar\n'), nrows=10)
tm.assert_frame_equal(result, expected)
result = next(iter(pd.read_csv(StringIO('foo,bar\n'), chunksize=10)))
tm.assert_frame_equal(result, expected)
result = pd.read_csv(StringIO('foo,bar\n'), nrows=10, as_recarray=True)
result = pd.DataFrame(result[2], columns=result[1], index=result[0])
tm.assert_frame_equal(pd.DataFrame.from_records(
result), expected, check_index_type=False)
result = next(
iter(pd.read_csv(StringIO('foo,bar\n'), chunksize=10, as_recarray=True)))
result = pd.DataFrame(result[2], columns=result[1], index=result[0])
tm.assert_frame_equal(pd.DataFrame.from_records(
result), expected, check_index_type=False)
def test_eof_states(self):
# GH 10728 and 10548
# With skip_blank_lines = True
expected = pd.DataFrame([[4, 5, 6]], columns=['a', 'b', 'c'])
# GH 10728
# WHITESPACE_LINE
data = 'a,b,c\n4,5,6\n '
result = self.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
# GH 10548
# EAT_LINE_COMMENT
data = 'a,b,c\n4,5,6\n#comment'
result = self.read_csv(StringIO(data), comment='#')
tm.assert_frame_equal(result, expected)
# EAT_CRNL_NOP
data = 'a,b,c\n4,5,6\n\r'
result = self.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
# EAT_COMMENT
data = 'a,b,c\n4,5,6#comment'
result = self.read_csv(StringIO(data), comment='#')
tm.assert_frame_equal(result, expected)
# SKIP_LINE
data = 'a,b,c\n4,5,6\nskipme'
result = self.read_csv(StringIO(data), skiprows=[2])
tm.assert_frame_equal(result, expected)
# With skip_blank_lines = False
# EAT_LINE_COMMENT
data = 'a,b,c\n4,5,6\n#comment'
result = self.read_csv(
StringIO(data), comment='#', skip_blank_lines=False)
expected = pd.DataFrame([[4, 5, 6]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# IN_FIELD
data = 'a,b,c\n4,5,6\n '
result = self.read_csv(StringIO(data), skip_blank_lines=False)
expected = pd.DataFrame(
[['4', 5, 6], [' ', None, None]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# EAT_CRNL
data = 'a,b,c\n4,5,6\n\r'
result = self.read_csv(StringIO(data), skip_blank_lines=False)
expected = pd.DataFrame(
[[4, 5, 6], [None, None, None]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# Should produce exceptions
# ESCAPED_CHAR
data = "a,b,c\n4,5,6\n\\"
self.assertRaises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
# ESCAPE_IN_QUOTED_FIELD
data = 'a,b,c\n4,5,6\n"\\'
self.assertRaises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
# IN_QUOTED_FIELD
data = 'a,b,c\n4,5,6\n"'
self.assertRaises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
class TestPythonParser(ParserTests, tm.TestCase):
def test_negative_skipfooter_raises(self):
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
with tm.assertRaisesRegexp(ValueError,
'skip footer cannot be negative'):
df = self.read_csv(StringIO(text), skipfooter=-1)
def read_csv(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'python'
return read_csv(*args, **kwds)
def read_table(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'python'
return read_table(*args, **kwds)
def test_sniff_delimiter(self):
text = """index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
"""
data = self.read_csv(StringIO(text), index_col=0, sep=None)
self.assertTrue(data.index.equals(Index(['foo', 'bar', 'baz'])))
data2 = self.read_csv(StringIO(text), index_col=0, delimiter='|')
tm.assert_frame_equal(data, data2)
text = """ignore this
ignore this too
index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
"""
data3 = self.read_csv(StringIO(text), index_col=0,
sep=None, skiprows=2)
tm.assert_frame_equal(data, data3)
text = u("""ignore this
ignore this too
index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
""").encode('utf-8')
s = BytesIO(text)
if compat.PY3:
# somewhat False since the code never sees bytes
from io import TextIOWrapper
s = TextIOWrapper(s, encoding='utf-8')
data4 = self.read_csv(s, index_col=0, sep=None, skiprows=2,
encoding='utf-8')
tm.assert_frame_equal(data, data4)
def test_regex_separator(self):
data = """ A B C D
a 1 2 3 4
b 1 2 3 4
c 1 2 3 4
"""
df = self.read_table(StringIO(data), sep='\s+')
expected = self.read_csv(StringIO(re.sub('[ ]+', ',', data)),
index_col=0)
self.assertIsNone(expected.index.name)
tm.assert_frame_equal(df, expected)
def test_1000_fwf(self):
data = """
1 2,334.0 5
10 13 10.
"""
expected = [[1, 2334., 5],
[10, 13, 10]]
df = read_fwf(StringIO(data), colspecs=[(0, 3), (3, 11), (12, 16)],
thousands=',')
tm.assert_almost_equal(df.values, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_comment_fwf(self):
data = """
1 2. 4 #hello world
5 NaN 10.0
"""
expected = [[1, 2., 4],
[5, np.nan, 10.]]
df = read_fwf(StringIO(data), colspecs=[(0, 3), (4, 9), (9, 25)],
comment='#')
tm.assert_almost_equal(df.values, expected)
def test_fwf(self):
data_expected = """\
2011,58,360.242940,149.910199,11950.7
2011,59,444.953632,166.985655,11788.4
2011,60,364.136849,183.628767,11806.2
2011,61,413.836124,184.375703,11916.8
2011,62,502.953953,173.237159,12468.3
"""
expected = self.read_csv(StringIO(data_expected), header=None)
data1 = """\
201158 360.242940 149.910199 11950.7
201159 444.953632 166.985655 11788.4
201160 364.136849 183.628767 11806.2
201161 413.836124 184.375703 11916.8
201162 502.953953 173.237159 12468.3
"""
colspecs = [(0, 4), (4, 8), (8, 20), (21, 33), (34, 43)]
df = read_fwf(StringIO(data1), colspecs=colspecs, header=None)
tm.assert_frame_equal(df, expected)
data2 = """\
2011 58 360.242940 149.910199 11950.7
2011 59 444.953632 166.985655 11788.4
2011 60 364.136849 183.628767 11806.2
2011 61 413.836124 184.375703 11916.8
2011 62 502.953953 173.237159 12468.3
"""
df = read_fwf(StringIO(data2), widths=[5, 5, 13, 13, 7], header=None)
tm.assert_frame_equal(df, expected)
# From <NAME>: apparently some non-space filler characters can
# be seen, this is supported by specifying the 'delimiter' character:
# http://publib.boulder.ibm.com/infocenter/dmndhelp/v6r1mx/index.jsp?topic=/com.ibm.wbit.612.help.config.doc/topics/rfixwidth.html
data3 = """\
201158~~~~360.242940~~~149.910199~~~11950.7
201159~~~~444.953632~~~166.985655~~~11788.4
201160~~~~364.136849~~~183.628767~~~11806.2
201161~~~~413.836124~~~184.375703~~~11916.8
201162~~~~502.953953~~~173.237159~~~12468.3
"""
df = read_fwf(
StringIO(data3), colspecs=colspecs, delimiter='~', header=None)
tm.assert_frame_equal(df, expected)
with tm.assertRaisesRegexp(ValueError, "must specify only one of"):
read_fwf(StringIO(data3), colspecs=colspecs, widths=[6, 10, 10, 7])
with tm.assertRaisesRegexp(ValueError, "Must specify either"):
read_fwf(StringIO(data3), colspecs=None, widths=None)
def test_fwf_colspecs_is_list_or_tuple(self):
with tm.assertRaisesRegexp(TypeError,
'column specifications must be a list or '
'tuple.+'):
pd.io.parsers.FixedWidthReader(StringIO(self.data1),
{'a': 1}, ',', '#')
def test_fwf_colspecs_is_list_or_tuple_of_two_element_tuples(self):
with tm.assertRaisesRegexp(TypeError,
'Each column specification must be.+'):
read_fwf(StringIO(self.data1), [('a', 1)])
def test_fwf_colspecs_None(self):
# GH 7079
data = """\
123456
456789
"""
colspecs = [(0, 3), (3, None)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123, 456], [456, 789]])
tm.assert_frame_equal(result, expected)
colspecs = [(None, 3), (3, 6)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123, 456], [456, 789]])
tm.assert_frame_equal(result, expected)
colspecs = [(0, None), (3, None)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123456, 456], [456789, 789]])
tm.assert_frame_equal(result, expected)
colspecs = [(None, None), (3, 6)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123456, 456], [456789, 789]])
tm.assert_frame_equal(result, expected)
def test_fwf_regression(self):
# GH 3594
# turns out 'T060' is parsable as a datetime slice!
tzlist = [1, 10, 20, 30, 60, 80, 100]
ntz = len(tzlist)
tcolspecs = [16] + [8] * ntz
tcolnames = ['SST'] + ["T%03d" % z for z in tzlist[1:]]
data = """ 2009164202000 9.5403 9.4105 8.6571 7.8372 6.0612 5.8843 5.5192
2009164203000 9.5435 9.2010 8.6167 7.8176 6.0804 5.8728 5.4869
2009164204000 9.5873 9.1326 8.4694 7.5889 6.0422 5.8526 5.4657
2009164205000 9.5810 9.0896 8.4009 7.4652 6.0322 5.8189 5.4379
2009164210000 9.6034 9.0897 8.3822 7.4905 6.0908 5.7904 5.4039
"""
df = read_fwf(StringIO(data),
index_col=0,
header=None,
names=tcolnames,
widths=tcolspecs,
parse_dates=True,
date_parser=lambda s: datetime.strptime(s, '%Y%j%H%M%S'))
for c in df.columns:
res = df.loc[:, c]
self.assertTrue(len(res))
def test_fwf_for_uint8(self):
data = """1421302965.213420 PRI=3 PGN=0xef00 DST=0x17 SRC=0x28 04 154 00 00 00 00 00 127
1421302964.226776 PRI=6 PGN=0xf002 SRC=0x47 243 00 00 255 247 00 00 71"""
df = read_fwf(StringIO(data),
colspecs=[(0, 17), (25, 26), (33, 37),
(49, 51), (58, 62), (63, 1000)],
names=['time', 'pri', 'pgn', 'dst', 'src', 'data'],
converters={
'pgn': lambda x: int(x, 16),
'src': lambda x: int(x, 16),
'dst': lambda x: int(x, 16),
'data': lambda x: len(x.split(' '))})
expected = DataFrame([[1421302965.213420, 3, 61184, 23, 40, 8],
[1421302964.226776, 6, 61442, None, 71, 8]],
columns=["time", "pri", "pgn", "dst", "src", "data"])
expected["dst"] = expected["dst"].astype(object)
tm.assert_frame_equal(df, expected)
def test_fwf_compression(self):
try:
import gzip
import bz2
except ImportError:
raise nose.SkipTest("Need gzip and bz2 to run this test")
data = """1111111111
2222222222
3333333333""".strip()
widths = [5, 5]
names = ['one', 'two']
expected = read_fwf(StringIO(data), widths=widths, names=names)
if compat.PY3:
data = bytes(data, encoding='utf-8')
comps = [('gzip', gzip.GzipFile), ('bz2', bz2.BZ2File)]
for comp_name, compresser in comps:
with tm.ensure_clean() as path:
tmp = compresser(path, mode='wb')
tmp.write(data)
tmp.close()
result = read_fwf(path, widths=widths, names=names,
compression=comp_name)
tm.assert_frame_equal(result, expected)
def test_BytesIO_input(self):
if not compat.PY3:
raise nose.SkipTest(
"Bytes-related test - only needs to work on Python 3")
result = pd.read_fwf(BytesIO("שלום\nשלום".encode('utf8')), widths=[
2, 2], encoding='utf8')
expected = pd.DataFrame([["של", "ום"]], columns=["של", "ום"])
tm.assert_frame_equal(result, expected)
data = BytesIO("שלום::1234\n562::123".encode('cp1255'))
result = pd.read_table(data, sep="::", engine='python',
encoding='cp1255')
expected = pd.DataFrame([[562, 123]], columns=["שלום", "1234"])
tm.assert_frame_equal(result, expected)
def test_verbose_import(self):
text = """a,b,c,d
one,1,2,3
one,1,2,3
,1,2,3
one,1,2,3
,1,2,3
,1,2,3
one,1,2,3
two,1,2,3"""
buf = StringIO()
sys.stdout = buf
try:
# it works!
df = self.read_csv(StringIO(text), verbose=True)
self.assertEqual(
buf.getvalue(), 'Filled 3 NA values in column a\n')
finally:
sys.stdout = sys.__stdout__
buf = StringIO()
sys.stdout = buf
text = """a,b,c,d
one,1,2,3
two,1,2,3
three,1,2,3
four,1,2,3
five,1,2,3
,1,2,3
seven,1,2,3
eight,1,2,3"""
try:
# it works!
df = self.read_csv(StringIO(text), verbose=True, index_col=0)
self.assertEqual(
buf.getvalue(), 'Filled 1 NA values in column a\n')
finally:
sys.stdout = sys.__stdout__
def test_float_precision_specified(self):
# Should raise an error if float_precision (C parser option) is
# specified
with tm.assertRaisesRegexp(ValueError, "The 'float_precision' option "
"is not supported with the 'python' engine"):
self.read_csv(StringIO('a,b,c\n1,2,3'), float_precision='high')
def test_iteration_open_handle(self):
if PY3:
raise nose.SkipTest(
"won't work in Python 3 {0}".format(sys.version_info))
with tm.ensure_clean() as path:
with open(path, 'wb') as f:
f.write('AAA\nBBB\nCCC\nDDD\nEEE\nFFF\nGGG')
with open(path, 'rb') as f:
for line in f:
if 'CCC' in line:
break
try:
read_table(f, squeeze=True, header=None, engine='c')
except Exception:
pass
else:
raise ValueError('this should not happen')
result = read_table(f, squeeze=True, header=None,
engine='python')
expected = Series(['DDD', 'EEE', 'FFF', 'GGG'], name=0)
tm.assert_series_equal(result, expected)
def test_iterator(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the issue with the C parser is fixed
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunk = reader.read(3)
tm.assert_frame_equal(chunk, df[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, df[3:])
# pass list
lines = list(csv.reader(StringIO(self.data1)))
parser = TextParser(lines, index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# pass skiprows
parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1])
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[1:3])
# test bad parameter (skip_footer)
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True, skip_footer=True)
self.assertRaises(ValueError, reader.read, 3)
treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,
iterator=True)
tm.assertIsInstance(treader, TextFileReader)
# stopping iteration when on chunksize is specified, GH 3967
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = self.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
tm.assert_frame_equal(result[0], expected)
# chunksize = 1
reader = self.read_csv(StringIO(data), chunksize=1)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
self.assertEqual(len(result), 3)
tm.assert_frame_equal(pd.concat(result), expected)
def test_single_line(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the issue with the C parser is fixed
# sniff separator
buf = StringIO()
sys.stdout = buf
# printing warning message when engine == 'c' for now
try:
# it works!
df = self.read_csv(StringIO('1,2'), names=['a', 'b'],
header=None, sep=None)
tm.assert_frame_equal(DataFrame({'a': [1], 'b': [2]}), df)
finally:
sys.stdout = sys.__stdout__
def test_malformed(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the issue with the C parser is fixed
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#')
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# skip_footer
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#',
skip_footer=1)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(5)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read(2)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read()
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
def test_skip_footer(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the issue with the C parser is fixed
data = """A,B,C
1,2,3
4,5,6
7,8,9
want to skip this
also also skip this
"""
result = self.read_csv(StringIO(data), skip_footer=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = self.read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), nrows=3)
tm.assert_frame_equal(result, expected)
# skipfooter alias
result = self.read_csv(StringIO(data), skipfooter=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = self.read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
def test_decompression_regex_sep(self):
# GH 6607
# This is a copy which should eventually be moved to ParserTests
# when the issue with the C parser is fixed
try:
import gzip
import bz2
except ImportError:
raise nose.SkipTest('need gzip and bz2 to run')
data = open(self.csv1, 'rb').read()
data = data.replace(b',', b'::')
expected = self.read_csv(self.csv1)
with tm.ensure_clean() as path:
tmp = gzip.GzipFile(path, mode='wb')
tmp.write(data)
tmp.close()
result = self.read_csv(path, sep='::', compression='gzip')
tm.assert_frame_equal(result, expected)
with tm.ensure_clean() as path:
tmp = bz2.BZ2File(path, mode='wb')
tmp.write(data)
tmp.close()
result = self.read_csv(path, sep='::', compression='bz2')
tm.assert_frame_equal(result, expected)
self.assertRaises(ValueError, self.read_csv,
path, compression='bz3')
def test_read_table_buglet_4x_multiindex(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the issue with multi-level index is fixed in the C parser.
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
# it works!
df = self.read_table(StringIO(text), sep='\s+')
self.assertEqual(df.index.names, ('one', 'two', 'three', 'four'))
# GH 6893
data = ' A B C\na b c\n1 3 7 0 3 6\n3 1 4 1 5 9'
expected = DataFrame.from_records([(1, 3, 7, 0, 3, 6), (3, 1, 4, 1, 5, 9)],
columns=list('abcABC'), index=list('abc'))
actual = self.read_table(StringIO(data), sep='\s+')
tm.assert_frame_equal(actual, expected)
def test_line_comment(self):
data = """# empty
A,B,C
1,2.,4.#hello world
#ignore this line
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv( | StringIO(data) | pandas.compat.StringIO |
import os
import logging
from pprint import pprint
from typing import Dict
import scipy.signal
import numpy as np
import pandas as pd
from matplotlib import pyplot
from .log import logger
from .helpers import normalize, get_equidistant_signals
from .abstract_extractor import AbstractExtractor
from .synchronization_errors import ShakeMissingException, StartEqualsEndError
class Synchronizer:
@property
def extractor(self):
return self._extractor
@extractor.setter
def extractor(self, value):
if not issubclass(type(value), AbstractExtractor):
raise TypeError("Extractor needs to be a subclass of AbstractExtractor.")
self._extractor = value
def __init__(
self, sources, ref_source_name, extractor, sampling_freq=None, tags=None
):
self.sources = sources
self.ref_source_name = ref_source_name
self.extractor = extractor
self.ref_signals = self._prepare_ref_signals()
self.tags = tags
if sampling_freq is not None:
self.sampling_freq = sampling_freq
else:
self.sampling_freq = self.get_max_ref_frequency()
def truncate_data(self, buffer=300):
if self.tags is None:
return
before = self.tags.data.index.min() - pd.Timedelta(seconds=buffer)
after = self.tags.data.index.max() + pd.Timedelta(seconds=buffer)
self.ref_signals = self.ref_signals.truncate(before=before, after=after)
for source in self.sources.values():
source["data"] = source["data"].truncate(before=before, after=after)
def _prepare_ref_signals(self):
ref_signals = pd.DataFrame()
for source_name, source in self.sources.items():
signal = source["data"][source["ref_column"]].dropna()
ref_signals = ref_signals.join(signal, how="outer")
ref_signals.rename(
columns=(lambda x: source_name if x == source["ref_column"] else x),
inplace=True,
)
ref_signals = ref_signals.apply(normalize)
return ref_signals
def get_max_ref_frequency(self):
if self.ref_signals is None:
raise ValueError(
"Unable to get maximum frequency: Reference signals undefined."
)
frequencies = self.ref_signals.aggregate(Synchronizer._infer_freq)
return np.amax(frequencies)
@staticmethod
def _infer_freq(series):
index = series.dropna().index
timedeltas = index[1:] - index[:-1]
median = np.median(timedeltas)
return np.timedelta64(1, "s") / median
@staticmethod
def _stretch_signals(source, factor, start_time=None):
"""Returns copy of DataFrame with stretched DateTimeIndex."""
df = source.copy()
if start_time is None:
start_time = df.index.min()
logger.debug("Use start time: {}".format(start_time))
timedelta = df.index - start_time
new_index = timedelta * factor + start_time
df.set_index(new_index, inplace=True, verify_integrity=True)
return df
@staticmethod
def _get_stretch_factor(segments, timeshifts):
old_length = segments["second"]["start"] - segments["first"]["start"]
new_length = old_length + timeshifts["second"] - timeshifts["first"]
stretch_factor = new_length / old_length
return stretch_factor
@staticmethod
def _get_timeshifts(dataframe, columns, segments):
"""Returns timeshifts to synchronize columns[1] with columns[0].
First signal in columns will be used as reference.
Expects equidistant sampled signals.
"""
timeshifts = {}
segment_names = ["first", "second"]
ref_column = columns[0]
sig_column = columns[1]
fig, axes = None, None
if logger.isEnabledFor(logging.INFO):
fig, axes = pyplot.subplots(1, 2, figsize=(15, 4))
# Check that all segments are available
for col in columns:
for segment in segment_names:
for part in ["start", "end"]:
try:
segments[col][segment][part]
except KeyError:
print("Dumping all detected segments:")
pprint(segments)
raise ShakeMissingException(
f"No {segment} shake detected for {col}, missing the {part}"
)
for index, segment in enumerate(segment_names):
logger.debug(
"Calculate timeshift of {} segment for {} to {}.".format(
segment, sig_column, ref_column
)
)
# get segments from both signals
ref_start = segments[ref_column][segment]["start"]
ref_end = segments[ref_column][segment]["end"]
ref_segment = dataframe[ref_column][ref_start:ref_end]
sig_start = segments[sig_column][segment]["start"]
sig_end = segments[sig_column][segment]["end"]
sig_segment = dataframe[sig_column][sig_start:sig_end]
# calculate cross-correlation of segments
cross_corr = scipy.signal.correlate(ref_segment, sig_segment)
# get shift in samples
shift_in_samples = np.argmax(cross_corr) - len(sig_segment) + 1
# get timestamp at which sig_segment must start to sync signals
max_corr_ts = dataframe.index[
dataframe.index.get_loc(ref_start, method="nearest") + shift_in_samples
]
logger.debug(
"Highest correlation with start at {} with {}.".format(
max_corr_ts, np.max(cross_corr)
)
)
# calculate timeshift to move signal to maximize correlation
timeshifts[segment] = max_corr_ts - sig_start
logger.debug("Timeshift is {}.".format(str(timeshifts[segment])))
# plot shifted segments
if logger.isEnabledFor(logging.INFO):
try:
df = dataframe.copy()
df[sig_column] = df[sig_column].shift(1, freq=timeshifts[segment])
if axes is not None:
axes[index].set_title(
"{} segment of {c[0]} and {c[1]}".format(segment, c=columns)
)
df[columns][ref_start:ref_end].plot(ax=axes[index])
except MemoryError:
logger.warn(
f"Couldn't allocate enough memory to plot shifted segments, skipping"
)
if logger.isEnabledFor(logging.INFO):
try:
if fig is not None:
fig.tight_layout()
except MemoryError:
logger.warn(
f"Couldn't allocate enough memory to plot shifted segments, skipping"
)
return timeshifts
def _calculate_sync_params(self):
dataframe = self.ref_signals.copy()
start_time = self.ref_signals.index.min()
self.sources[self.ref_source_name]["timeshift"] = None
self.sources[self.ref_source_name]["stretch_factor"] = 1
# Interpolate and resample to equidistant signal
df_equi = get_equidistant_signals(self.ref_signals, self.sampling_freq)
segments = self.extractor.get_segments(df_equi)
# First round to determine stretch factor
for column in df_equi.columns:
if column == self.ref_source_name:
continue
else:
timeshifts = Synchronizer._get_timeshifts(
df_equi, [self.ref_source_name, column], segments
)
logger.debug(
"Timedelta between shifts before stretching: {}".format(
timeshifts["first"] - timeshifts["second"]
)
)
try:
self.sources[column][
"stretch_factor"
] = Synchronizer._get_stretch_factor(segments[column], timeshifts)
except ZeroDivisionError:
raise StartEqualsEndError(
"First and last segment have been identified as exactly the same. Bad window, maybe?"
)
logger.info(
"Stretch factor for {}: {}".format(
column, self.sources[column]["stretch_factor"]
)
)
# stretch signal and exchange it in dataframe
signal_stretched = Synchronizer._stretch_signals(
pd.DataFrame(dataframe[column]).dropna(),
self.sources[column]["stretch_factor"],
start_time,
)
dataframe = (
dataframe.drop(column, axis="columns")
.join(signal_stretched, how="outer")
.astype(pd.SparseDtype("float"))
)
# Resample again with stretched signal
df_equi = get_equidistant_signals(dataframe, self.sampling_freq)
segments = self.extractor.get_segments(df_equi)
# Second round to get timeshift for stretched signal
for column in df_equi.columns:
if column == self.ref_source_name:
continue
else:
timeshifts = Synchronizer._get_timeshifts(
df_equi, [self.ref_source_name, column], segments
)
timedelta = timeshifts["first"] - timeshifts["second"]
if timedelta > pd.Timedelta(0):
logger.warning(
"Timedelta between shifts after stretching: {}".format(
timedelta
)
)
logger.info("Timeshift for {}: {}".format(column, timeshifts["first"]))
self.sources[column]["timeshift"] = timeshifts["first"]
def get_sync_params(self, recalculate=False):
selected_keys = ["timeshift", "stretch_factor"]
if recalculate or "timeshift" not in self.sources[self.ref_source_name]:
self._calculate_sync_params()
return {
source_name: {
key: value for key, value in source.items() if key in selected_keys
}
for source_name, source in self.sources.items()
}
def get_synced_data(self, recalculate=False) -> Dict[str, pd.DataFrame]:
self.get_sync_params(recalculate)
synced_data = {}
start_time = self.ref_signals.index.min()
for source_name, source in self.sources.items():
data = source["data"].copy()
if source["stretch_factor"] != 1:
data = Synchronizer._stretch_signals(
data, source["stretch_factor"], start_time
)
if source["timeshift"] is not None:
data = data.shift(1, freq=source["timeshift"])
synced_data[source_name] = data
return synced_data
def save_pickles(self, path) -> Dict[str, pd.DataFrame]:
"""
Save a pickled, synced, dataframe for each source file. Does not save a total table. Sync parameters are saved as SYNC.PICKLE.
Returns the synced data. Sync parameter dataframe is in a dictionary entry with the key "SYNC".
"""
sync_params = pd.DataFrame(self.get_sync_params())
synced_data = self.get_synced_data()
sync_params.to_csv(os.path.join(path, "SYNC.csv"))
for source_name, synced_df in synced_data.items():
synced_df.to_pickle(os.path.join(path, f"{source_name.upper()}.PICKLE"))
return {**synced_data, "SYNC": sync_params}
def save_data(self, path, tables=None, save_total_table=True):
if "SYNC" in tables.keys():
raise ValueError(
"SYNC must not be one of the table names. It is reserved for the synchronization paramters."
)
if save_total_table and "TOTAL" in tables.keys():
raise ValueError(
"TOTAL must not be one of the table names, if the table with all data should be saved."
)
sync_params = self.get_sync_params()
synced_data = self.get_synced_data()
# Save sync params
pd.DataFrame(sync_params).to_csv(os.path.join(path, "SYNC.csv"))
# Save custom tables
logger.info(tables)
if tables is not None:
for table_name, table_spec in tables.items():
table_df = pd.DataFrame()
if self.tags is not None:
table_df = table_df.join(self.tags.data, how="outer")
for source_name, source_columns in table_spec.items():
# create dataframe for each source
source_df = | pd.DataFrame() | pandas.DataFrame |
import os
import torch
from nltk import sent_tokenize, word_tokenize
from collections import defaultdict
import json
import pandas as pd
import pickle
from nltk.tag.perceptron import PerceptronTagger
from nltk.stem.porter import *
from transformers import BertTokenizer, GPT2Tokenizer
from lemmagen3 import Lemmatizer
import LatvianStemmer
import langid
import multiprocessing
import numpy as np
from tqdm import tqdm
#unk_token = '<|endoftext|>'
#eos_token = '<|endoftext|>'
#pad_token = '<pad>'
eos_token = '<eos>'
unk_token = '[UNK]'
pad_token = '<pad>'
WORKERS = 1
def parse_from_cache(cache_stuff_df, idx, pos):
row = cache_stuff_df[cache_stuff_df['idxs'] == idx]
idx = row['idxs'].values[0]
words = row['words'].values[0].split(';')
if pos:
pos_tags = row['pos'].values[0].split(';')
return idx, words, pos_tags
return idx, words
def file_to_df(input_path, classification, bpe_mode = False):
all_docs = []
counter = 0
num_words = 0
with open(input_path, 'r', encoding='utf8') as f:
for line in f:
counter += 1
if counter % 10000 == 0:
print('Processing json: ', counter)
line = json.loads(line)
title = line.get('title') or ''
abstract = line.get('abstract') or ''
text = title + '. ' + abstract
if not classification:
fulltext = line.get("fulltext") or ''
text = text + ' ' + fulltext
num_words += len(text.split())
if bpe_mode:
all_docs.append([text,"",""])
continue
try:
kw = line['keywords']
except:
kw = []
try:
lang = line['lang']
except:
lang = langid.classify(text)[0]
if isinstance(kw, list):
kw = ";".join(kw)
all_docs.append([text,kw,lang])
df = | pd.DataFrame(all_docs) | pandas.DataFrame |
import pandas as pd
import numpy as np
import re
from unidecode import unidecode
# Map district in Kraków to integers.
# For details see:
# https://en.wikipedia.org/wiki/Districts_of_Krak%C3%B3w
districts = {'stare miasto': 1,
'grzegórzki': 2,
'prądnik czerwony': 3,
'prądnik biały': 4,
'krowodrza': 5,
'bronowice': 6,
'zwierzyniec': 7,
'dębniki': 8,
'łagiewniki': 9,
'borek fałęcki': 9,
'swoszowice': 10,
'podgórze duchackie': 11,
'bieżanów': 12,
'prokocim': 12,
'podgórze': 13,
'czyżyny': 14,
'mistrzejowice': 15,
'bieńczyce': 16,
'wzgórza krzesławickie': 17,
'nowa huta': 18}
# Remove polish characters from key names
for key in list(districts.keys()):
districts[unidecode(key)] = districts.pop(key)
# Translate data from polish to english.
translation = {'Cena': 'Price',
'Lokalizacja': 'Location',
'Data dodania': 'Date',
'Na sprzedaż przez': 'Seller',
'Rodzaj nieruchomości': 'Property',
'Liczba pokoi': 'Rooms',
'Liczba łazienek': 'Bathrooms',
'Wielkość (m2)': 'Area',
'Parking': 'Parking',
'Tytuł': 'Title',
'Opis': 'Description',
'Link': 'Link'}
def remove_polish_characters(x):
"""
Remove polsih chars
Examples
--------
>>> remove_polish_characters('ąćęłńóśźż')
'acelnoszz'
"""
if pd.isnull(x):
return x
else:
x = unidecode(x)
return x
def parse_price(x):
"""
Convert string with price to a integer value.
Parameters
----------
x : str
Row from price column.
Returns
-------
int :
Price of the property.
Example
-------
>>> parse_price('349\xa0000 zł')
349000
>>> parse_price('349 000 zł')
349000
>>> parse_price('349\xa0000')
349000
>>> parse_price('349000')
349000
>>> parse_price(349000)
349000
>>> parse_price(349000.1235)
349000
>>> parse_price(np.nan)
nan
>>> parse_price('Proszę o kontakt')
nan
"""
if pd.isnull(x):
return x
else:
if isinstance(x, str):
x = x.replace('\xa0', '')
x = x.replace('zł', '')
x = x.replace(' ', '')
x = x.strip()
try:
x = int(x)
except ValueError:
x = np.nan
return x
elif isinstance(x, int):
return x
elif isinstance(x, float):
x = int(x)
return x
else:
return np.nan
def extract_currency(x):
"""
Exctract currency from price column.
Examples
--------
>>> extract_currency('123000zł')
'pln'
"""
if | pd.isnull(x) | pandas.isnull |
# ********************************************************************************** #
# #
# Project: Data Frame Explorer #
# Author: <NAME> #
# Contact: <EMAIL>(a)gmail.<EMAIL> #
# #
# License: MIT License #
# Copyright (C) 2021.11.28 <NAME> #
# #
# Permission is hereby granted, free of charge, to any person obtaining a copy #
# of this software and associated documentation files (the "Software"), to deal #
# in the Software without restriction, including without limitation the rights #
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell #
# copies of the Software, and to permit persons to whom the Software is #
# furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in all #
# copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE #
# SOFTWARE. #
# #
# ********************************************************************************** #
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
import pandas as pd
import random
import glob
import re
import os
import seaborn as sns
from matplotlib import cm, colors
from matplotlib.ticker import MaxNLocator
from pandas.api.types import is_numeric_dtype
# Function, .................................................................
def show_df_exaples(df, var_names, n=3 ):
'''
returns dtype and examples of requested variables in input df"
df : pandas datafram
var_names : list
n : int, number of examples provided with each variable
CAUTION: .loc funciton, that I am using to subset dataframe,
do not accept, column names that are misisng in dataframe
example
>>> res_list = show_df_exaples(df=X_model, var_names=test_variables)
>>> pd.DataFrame(res_list)
# chech if class_nr is correct
'''
# test and work on copy
assert type(df)==pd.core.frame.DataFrame, "df dtype error, shodul be pandas dataframe"
df = df.copy()
# collect dtpye and exaples
res_list =[]
for var_name in var_names:
try:
# subset series
s = df.loc[:,var_name]
# count unique values
counted_values = s.value_counts(ascending=False)
# basic info on that feature
one_var_res_dct = {
"name":var_name,
"dtype": s.dtype,
"class_nr":counted_values.shape[0],
# ..
"instances": s.shape[0],
"na": s.isnull().sum()
}
# count unique values
counted_values = s.value_counts(ascending=False)
# get n, most frequent variables, or less
for i in range(n):
if i+1 <= counted_values.shape[0]:
one_var_res_dct[f"eg{i+1}_value"]=counted_values.index.values.tolist()[i]
one_var_res_dct[f"eg{i+1}_counts"]=f"{counted_values.iloc[i]}; ({np.round(counted_values.iloc[i]/s.shape[0]*100,1)}%)"
else:
one_var_res_dct[f"eg{i+1}_value"]=None
one_var_res_dct[f"eg{i+1}_counts"]=None
except:
# add info that a given feature was not found
one_var_res_dct = {
"name":var_name,
"dtype": "NOT FOUND",
"class_nr":None,
# ..
"instances": None,
"na": None
}
for i in range(n):
one_var_res_dct[f"eg{i+1}_value"]=None
one_var_res_dct[f"eg{i+1}_counts"]=None
# add to results list
res_list.append(one_var_res_dct)
return pd.DataFrame(res_list)
# Function, .................................................................
def show_unlisted_variables(df, var_list, n=2, verbose=False):
'''
prints variables that were not defined in lists/dct in df, opr were not present in df,
provides examples of each variable with show_df_exaples() function,
parameters:
- df : pandas dataframe
- var_list : list or dict with list, that will be concateneated into one list
- n : how many exmaples of eahc variable to present,
retuns:
- defined_var_examples, undefined_var_examples, unknownw_var_examples: all dataframes
example:
>>> defined_var_examples, undefined_var_examples, unknownw_var_examples = show_unlisted_variables(
... df = X_model,
... var_list = {"level_indicator_variables":level_indicator_variables,
... "numeric_variables": numeric_variables,
... "one_hot_encoded_variables": one_hot_encoded_variables
... },
... verbose=True
... )
'''
# test and work on copy
assert type(df)==pd.core.frame.DataFrame, "df dtype error, shodul be pandas dataframe"
df = df.copy()
# get var names from input df
df_var_list = df.columns.values.tolist().copy()
# ...
# list with pre-defined variables & provided_var_names
if isinstance(var_list, list):
defined_var_list = var_list.copy()
else:
defined_var_list =[]
for k,v in var_list.items():
defined_var_list.extend(v.copy())
provided_var_names = defined_var_list.copy() # for reporting, and loop, no chnages
# ...
# find undefined variables in df
unknownw_var_list = [] # not found in df
undefined_var_list = df_var_list.copy() # we will remove all defined viarblesd from that list
for var_name in provided_var_names:
search_res = ( | pd.Series(df_var_list) | pandas.Series |
import pandas as pd
import numpy as np
from pandas import DataFrame
from pandas import concat
from sklearn.preprocessing import MinMaxScaler
def getBatteryCapacity(Battery):
cycle = []
capacity = []
i = 1
# print(len(Battery))
# print(len(Battery))
for Bat in Battery:
if Bat['cycle'] == 'discharge':
cycle.append(i)
capacity.append(Bat['data']['Capacity'][0])
i += 1
# print(i)
return [cycle, capacity]
def getChargingValues(Battery, Index):
Battery = Battery[Index]['data']
index = []
i = 1
for iterator in Battery['Voltage_measured']:
index.append(i)
i += 1
return [index, Battery['Voltage_measured'], Battery['Current_measured'], Battery['Temperature_measured'], Battery['Voltage_charge'], Battery['Time']]
def getDischargingValues(Battery, Index):
Battery = Battery[Index]['data']
index = []
i = 1
for iterator in Battery['Voltage_measured']:
index.append(i)
i += 1
return [index, Battery['Voltage_measured'], Battery['Current_measured'], Battery['Temperature_measured'], Battery['Voltage_load'], Battery['Time']]
def getMaxDischargeTemp(Battery):
cycle = []
temp = []
i = 1
for Bat in Battery:
if Bat['cycle'] == 'discharge':
cycle.append(i)
temp.append(max(Bat['data']['Temperature_measured']))
i += 1
return [cycle, temp]
def getMaxChargeTemp(Battery, discharge_len):
cycle = []
temp = []
i = 1
for Bat in Battery:
if Bat['cycle'] == 'charge':
cycle.append(i)
temp.append(max(Bat['data']['Temperature_measured']))
i += 1
return [cycle[:discharge_len], temp[:discharge_len]]
def getDataframe(Battery):
l = getBatteryCapacity(Battery)
l1 = getMaxDischargeTemp(Battery)
l2 = getMaxChargeTemp(Battery, len(l1[0]))
data = {'cycle':l[0],'capacity':l[1], 'max_discharge_temp':l1[1], 'max_charge_temp':l2[1]}
return pd.DataFrame(data)
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
n_vars = 1 if type(data) is list else data.shape[1]
df = pd.DataFrame(data)
cols, names = list(), list()
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]
for i in range(0, n_out):
cols.append(df.shift(-i))
if i == 0:
names += [('var%d(t)' % (j+1)) for j in range(n_vars)]
else:
names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)]
agg = pd.concat(cols, axis=1)
agg.columns = names
if dropnan:
agg.dropna(inplace=True)
return agg
def supervisedDataframeBuilder(Batterydataframe, scaler):
values = Batterydataframe[['capacity']]
scaled = scaler.fit_transform(values)
data = series_to_supervised(scaled, 5, 1)
data['cycle'] = data.index
return data
def splitDataFrame(Dataframe, ratio):
X = Dataframe[['cycle', 'var1(t-5)', 'var1(t-4)', 'var1(t-3)', 'var1(t-2)', 'var1(t-1)']]
Y = Dataframe[['var1(t)']]
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size = ratio, shuffle=False)
return X_train, X_test, y_train, y_test
def moving_average(data, window_size):
window = np.ones(int(window_size))/float(window_size)
return np.convolve(data, window, 'same')
def rollingAverage(x_stuff, y_stuff):
window_size = 10
sigma=1.0
avg = moving_average(y_stuff, window_size)
avg_list = avg.tolist()
residual = y_stuff - avg
testing_std = residual.rolling(window_size).std()
testing_std_as_df = | pd.DataFrame(testing_std) | pandas.DataFrame |
import common_python.constants as cn
from common_python.testing import helpers
from common_python.classifier import feature_analyzer
from common_python.classifier.feature_analyzer import FeatureAnalyzer
from common_python.tests.classifier import helpers as test_helpers
import copy
import os
import pandas as pd
import numpy as np
from sklearn import svm
import shutil
import time
import unittest
IGNORE_TEST = False
IS_SCALE = False # Do scale tests
IS_REPORT = False
IS_PLOT = False
CLASS = 1
DF_X, SER_Y_ALL = test_helpers.getDataLong()
CLASSES = list(SER_Y_ALL.unique())
# Make binary classes (for CLASS)
SER_Y = pd.Series([
cn.PCLASS if v == CLASS else cn.NCLASS
for v in SER_Y_ALL], index=SER_Y_ALL.index)
NUM_CROSS_ITER = 5
NUM_CROSS_ITER_ACCURATE = 50
CLF = svm.LinearSVC()
FEATURE1 = "Rv0158"
FEATURE2 = "Rv1460"
FEATURES = [FEATURE1, FEATURE2]
# Number of features used for scaling runs
NUM_FEATURE_SCALE = 100
# File paths for tests
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
TEST_PERSISTER_PATH = os.path.join(TEST_DIR,
"persister.pcl")
TMP_DIR_DCT = {CLASS:
os.path.join(TEST_DIR, "tmp_feature_analyzer_%d") % CLASS}
TEST_SER_PATH = os.path.join(TEST_DIR, "ser.csv")
# Existing data
TEST_DIR_PATH = os.path.join(TEST_DIR,
"test_feature_analyzer_%d" % CLASS)
TEST_DIR_PATH_DCT = {CLASS: TEST_DIR_PATH}
ANALYZER = test_helpers.getFeatureAnalyzer()
ANALYZER_DCT = {test_helpers.CLASS: ANALYZER}
class TestFeatureAnalyzer(unittest.TestCase):
def _init(self):
self.df_X = copy.deepcopy(DF_X[FEATURES])
self.ser_y = copy.deepcopy(SER_Y)
self.clf = copy.deepcopy(CLF)
self.analyzer = feature_analyzer.FeatureAnalyzer(
self.clf, self.df_X, self.ser_y,
is_report=IS_REPORT,
num_cross_iter=NUM_CROSS_ITER_ACCURATE)
self.analyzer_dct = ANALYZER_DCT
def _remove(self):
paths = [TEST_SER_PATH, TEST_PERSISTER_PATH]
paths.extend(list(TMP_DIR_DCT.values()))
for path in paths:
if os.path.isdir(path):
shutil.rmtree(path)
if os.path.isfile(path):
os.remove(path)
def setUp(self):
if IGNORE_TEST:
return
self._remove()
self._init()
def tearDown(self):
self._remove()
def testConstructor(self):
if IGNORE_TEST:
return
self.assertEqual(len(self.analyzer._partitions),
NUM_CROSS_ITER_ACCURATE)
def _report(self, method, start):
print("\n*** Ran %s in %4.2f seconds" %
(method, time.time() - start))
def test_ser_sfa_scale(self):
if IGNORE_TEST:
return
if not IS_SCALE:
return
df_X = self._makeDFX(NUM_FEATURE_SCALE)
start = time.time()
analyzer = feature_analyzer.FeatureAnalyzer(
self.clf, df_X, self.ser_y,
num_cross_iter=NUM_CROSS_ITER)
_ = analyzer.ser_sfa
self._report("test_ser_sfa_scale", start)
def test_ser_sfa(self):
if IGNORE_TEST:
return
ser = self.analyzer.ser_sfa
trues = [isinstance(v, float) for v in ser.values]
self.assertTrue(all(trues))
def _makeDFX(self, num_cols):
features = list(DF_X.columns.tolist())
features = features[:num_cols]
return DF_X[features].copy()
def test_df_cpc(self):
if IGNORE_TEST:
return
df = self.analyzer.df_cpc
self.assertEqual(df.loc[FEATURE1, FEATURE1], 0)
self.assertTrue(np.isclose(
df.loc[FEATURE2, FEATURE2], 1))
self.assertTrue(helpers.isValidDataFrame(df,
[FEATURE1, FEATURE2]))
def test_df_cpc_scale(self):
if IGNORE_TEST:
return
if not IS_SCALE:
return
num_cols = int(np.sqrt(NUM_FEATURE_SCALE))
df_X = self._makeDFX(num_cols)
analyzer = feature_analyzer.FeatureAnalyzer(
self.clf, df_X, self.ser_y,
num_cross_iter=NUM_CROSS_ITER)
start = time.time()
_ = analyzer.df_cpc
self._report("test_df_cpc_scale", start)
def test_df_ipa(self):
if IGNORE_TEST:
return
df = self.analyzer.df_ipa
self.assertTrue(helpers.isValidDataFrame(df,
[FEATURE1, FEATURE2]))
trues = [isinstance(v, float) for v in
np.reshape(df.values, len(df)*len(df.columns))]
self.assertTrue(all(trues))
def test_df_ipa_scale(self):
if IGNORE_TEST:
return
if not IS_SCALE:
return
num_cols = int(np.sqrt(NUM_FEATURE_SCALE))
df_X = self._makeDFX(num_cols)
analyzer = feature_analyzer.FeatureAnalyzer(
self.clf, df_X, self.ser_y,
num_cross_iter=NUM_CROSS_ITER)
start = time.time()
_ = analyzer.df_ipa
self._report("test_df_ipa_scale", start)
def testReportProgress(self):
if IGNORE_TEST:
return
self.analyzer._reportProgress(
feature_analyzer.SFA, 0, 10)
self.analyzer._reportProgress(
feature_analyzer.SFA, 11, 10)
#
INTERVAL = 5
analyzer = feature_analyzer.FeatureAnalyzer(
self.clf, self.df_X, self.ser_y,
is_report=IS_REPORT,
report_interval = INTERVAL,
num_cross_iter=NUM_CROSS_ITER_ACCURATE)
analyzer._reportProgress(
feature_analyzer.SFA, 0, 10)
analyzer._reportProgress(
feature_analyzer.SFA, INTERVAL + 1, 10)
def testPlotSFA(self):
if IGNORE_TEST:
return
self._init()
analyzers = list(self.analyzer_dct.values()) * 6
feature_analyzer.plotSFA(analyzers, is_plot=IS_PLOT)
def testPlotCPC(self):
if IGNORE_TEST:
return
self.analyzer_dct[CLASS].plotCPC(is_plot=IS_PLOT)
criteria = lambda v: v < 0.5
self.analyzer_dct[CLASS].plotCPC(is_plot=IS_PLOT,
criteria=criteria)
def testPlotIPA(self):
if IGNORE_TEST:
return
self.analyzer_dct[CLASS].plotIPA(is_plot=IS_PLOT)
def testGetPath(self):
dir_path = TMP_DIR_DCT[CLASS]
path = FeatureAnalyzer._getPath(dir_path,
feature_analyzer.CPC)
self.assertTrue("csv" in path)
def testMakeSer(self):
if IGNORE_TEST:
return
index = ['a', 'b', 'c']
ser = pd.Series(range(len(index)), index=index)
ser.to_csv(TEST_SER_PATH)
df = pd.read_csv(TEST_SER_PATH)
ser_new = FeatureAnalyzer._makeSer(df, is_sort=False)
self.assertTrue(ser.equals(ser_new))
def _equals(self, analyzer1, analyzer2):
for metric in feature_analyzer.METRICS:
value1 = analyzer1.getMetric(metric)
value2 = analyzer2.getMetric(metric)
self.assertTrue(all(value1.eq(value2)))
def testSerializeAndDeserialize(self):
if IGNORE_TEST:
return
dir_path = TMP_DIR_DCT[CLASS]
self.analyzer.serialize(dir_path,
persister_path=TEST_PERSISTER_PATH)
for name in feature_analyzer.VARIABLES:
path = FeatureAnalyzer._getPath(dir_path, name)
self.assertTrue(os.path.isfile(path))
#
analyzer = feature_analyzer.FeatureAnalyzer.deserialize(
dir_path)
self._equals(self.analyzer, analyzer)
for metric in feature_analyzer.METRICS:
m_old = self.analyzer.getMetric(metric)
m_new = analyzer.getMetric(metric)
self.assertTrue(all(m_old.eq(m_new)))
def testSerializeWithPersister(self):
if IGNORE_TEST:
return
# Serialize the existing data
dir_path = TMP_DIR_DCT[CLASS]
self.analyzer.serialize(dir_path,
persister_path=TEST_PERSISTER_PATH)
# Create a new analyzer with no data
analyzer = FeatureAnalyzer(None,
pd.DataFrame(), | pd.Series() | pandas.Series |
import docx
from docx.shared import Pt
from docx.enum.text import WD_ALIGN_PARAGRAPH, WD_BREAK
from docx.shared import Cm
import os
import math
import pandas as pd
import numpy as np
import re
from datetime import date
import streamlit as st
import json
import glob
from PIL import Image
import smtplib
import docx2pdf
import shutil
import zipfile
from datetime import datetime
import platform
import matplotlib.pyplot as plt
def User_validation():
f=open("Validation/Validation.json","r")
past=json.loads(f.read())
f.close()
now=datetime.now()
dt_string = now.strftime("%d/%m/%Y %H:%M")
time_past=datetime.strptime(past['Acceso']["Hora"], "%d/%m/%Y %H:%M")
timesince = now - time_past
Time_min= int(timesince.total_seconds() / 60)
bool_negate = Time_min<120
if not bool_negate:
past['Acceso'].update({"Estado":"Negado"})
str_json_0=json.dumps(past, indent=4)
J_f=open("Validation/Validation.json","w")
J_f.write(str_json_0)
J_f.close()
bool_aprove= past['Acceso']["Estado"]=="Aprovado"
if not bool_aprove:
colums= st.columns([1,2,1])
with colums[1]:
#st.image("Imagenes/Escudo_unal.png")
st.subheader("Ingrese el usuario y contraseña")
Usuario=st.text_input("Usuario")
Clave=st.text_input("Contraseña",type="password")
Users=["Gestor Comercial"]
bool_user = Usuario in Users
bool_clave = (Clave)==("1234")
bool_user_email = past['Acceso']["User"] == Usuario
bool_time2 = Time_min<1000
bool_1 = bool_time2 and bool_user_email
bool_2 = bool_user and bool_clave
if not bool_user_email and bool_2:
past['Acceso'].update({"User":Usuario,"Estado":"Aprovado","Hora":dt_string})
str_json_0=json.dumps(past, indent=4)
J_f=open("Validation/Validation.json","w")
J_f.write(str_json_0)
J_f.close()
if not bool_2:
if (Usuario != "") and (Clave!=""):
with colums[1]:
st.warning("Usuario o contraseña incorrectos.\n\n Por favor intente nuevamente.")
elif bool_2 and not bool_1:
past['Acceso'].update({"User":Usuario,"Estado":"Aprovado","Hora":dt_string})
str_json_0=json.dumps(past, indent=4)
J_f=open("Validation/Validation.json","w")
J_f.write(str_json_0)
J_f.close()
EMAIL_ADDRESS = '<EMAIL>'
EMAIL_PASSWORD = '<PASSWORD>'
try:
with smtplib.SMTP('smtp.gmail.com', 587) as smtp:
smtp.ehlo()
smtp.starttls()
smtp.ehlo()
smtp.login(EMAIL_ADDRESS, EMAIL_PASSWORD)
subject = 'Acceso aplicacion Julia'
body = 'Acceso usuario ' + Usuario +' el '+dt_string
msg = f'Subject: {subject}\n\n{body}'
smtp.sendmail(EMAIL_ADDRESS, EMAIL_ADDRESS, msg)
except:
pass
with colums[1]:
st.button("Acceder a la aplicación")
elif bool_2:
past['Acceso'].update({"Estado":"Aprovado","Hora":dt_string,"User":Usuario})
str_json_0=json.dumps(past, indent=4)
J_f=open("Validation/Validation.json","w")
J_f.write(str_json_0)
J_f.close()
with colums[1]:
st.button("Acceder a la aplicación")
return bool_aprove
def Num_dias(leng):
if leng==1:
return "1 día"
else:
return str(leng) + " días"
def day_week(dia):
if dia ==0:
Dia="Lunes"
elif dia ==1:
Dia="Martes"
elif dia ==2:
Dia="Miércoles"
elif dia ==3:
Dia="Jueves"
elif dia ==4:
Dia="Viernes"
elif dia ==5:
Dia="Sábado"
elif dia ==6:
Dia="Domingo-Festivo"
return Dia
def remove_row(table, row):
tbl = table._tbl
tr = row._tr
tbl.remove(tr)
def Range_fecha(dates):
if len(dates)==1:
return pd.to_datetime(dates[0]).strftime('%Y-%m-%d')
else:
return pd.to_datetime(dates[0]).strftime('%Y-%m-%d')+" hasta "+ pd.to_datetime(dates[-1]).strftime('%Y-%m-%d')
def any2str(obj):
if isinstance(obj, str):
return obj
elif math.isnan(obj):
return ""
elif isinstance(obj, int):
return str(obj)
elif isinstance(obj, float):
return str(obj)
def dt_fechas(data,data_user,Fechas,tipo_dia):
dt_Final=pd.DataFrame(columns=["Dia","Fecha","Requerimiento","Respaldo"])
for dia in Fechas:
data_fecha=data_user[data_user["Fecha"]== dia]
data_dia_todos=data[data["Fecha"]==dia]
try:
d_week=tipo_dia[Tipo_dia["FECHA"]==dia]["TIPO D"].to_numpy()[0]
except:
st.warning("Actualizar el calendario del excel extra")
d_week=day_week(pd.Series(data=dia).dt.dayofweek.to_numpy()[0])
df=pd.DataFrame([[d_week,dia,data_dia_todos["CANTIDAD"].sum(),data_fecha["CANTIDAD"].sum()]],columns=["Dia","Fecha","Requerimiento","Respaldo"])
dt_Final=dt_Final.append(df, ignore_index=True)
return dt_Final
def dt_fechas_2(data,data_user,Fechas,tipo_dia):
dt_Final=pd.DataFrame(columns=["Dia","Fecha","Requerimiento","Respaldo"])
for dia in Fechas:
data_fecha=data_user[data_user["FECHA"]== dia]
data_dia_todos=data[data["FECHA"]==dia]
try:
d_week=tipo_dia[Tipo_dia["FECHA"]==dia]["TIPO D"].to_numpy()[0]
except:
st.warning("Actualizar el calendario del excel extra")
d_week=day_week(pd.Series(data=dia).dt.dayofweek.to_numpy()[0])
df=pd.DataFrame([[d_week,dia,data_dia_todos["CANTIDAD"].sum(),data_fecha["CANTIDAD"].sum()]],columns=["Dia","Fecha","Requerimiento","Respaldo"])
dt_Final=dt_Final.append(df, ignore_index=True)
return dt_Final
def dt_fechas_3(data,data_user,Fechas,tipo_dia):
dt_Final=pd.DataFrame(columns=["Dia","Fecha","Respaldo","P_neto","TRM","PRECIO PONDERADO"])
for dia in Fechas:
data_fecha=data_user[data_user["FECHA"]== dia]
try:
d_week=tipo_dia[Tipo_dia["FECHA"]==dia]["TIPO D"].to_numpy()[0]
except:
st.warning("Actualizar el calendario del excel extra")
d_week=day_week( | pd.Series(data=dia) | pandas.Series |
from __future__ import print_function
from __future__ import division
# load libraries
from builtins import str
from builtins import range
from past.utils import old_div
import pandas as pd
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import geopandas as gpd
import sys
import os
from matplotlib.collections import PatchCollection
from descartes import PolygonPatch
import shapely
import geopy
import argparse
import io
import unicodedata
import matplotlib.patches as mpatches
from matplotlib.lines import Line2D
import glob
import pickle
from os.path import expanduser
import pdb
import unidecode
import importlib
importlib.reload(sys)
#sys.setdefaultencoding('utf-8')
#####################################################
def ensure_dir(f):
d = os.path.dirname(f)
if not os.path.exists(d):
os.makedirs(d)
################################################
def string_2_bool(string):
if string in ['true', 'TRUE' , 'True' , '1', 't', 'y', 'yes', 'yeah', 'yup', 'certainly', 'uh-huh']:
return True
else:
return False
################################################
def saveplot(appellations_domain,vin,metropole,bassins):
#to set extent
appellations_domain = gpd.GeoDataFrame(pd.DataFrame(appellations_domain), crs=appellations.crs)
if len(appellations_domain)>0:
xmin,ymin, xmax,ymax = appellations_domain.total_bounds
elif len(bassins.loc[bassins.nom == vin.Bassin]) == 0:
xmin,ymin,xmax,ymax = 0, 0, 0, 0
else:
xmin,ymin,xmax,ymax = bassins.loc[bassins.nom == vin.Bassin].total_bounds
#vin.geometry.coords.xy[0][0]-1.e5, vin.geometry.coords.xy[0][0]+1.e5,\
#vin.geometry.coords.xy[1][0]-1.e5, vin.geometry.coords.xy[1][0]+1.e5
if vin.Bassin == 'Bourgogne': buffer_lim = 100.e3
elif u'domainedelamordor' in vin.DomaineChateau.replace('&','').replace(' ','').lower() : buffer_lim = 50.e3
else: buffer_lim = 200.e3
'''
for side in ax.spines.keys(): # 'top', 'bottom', 'left', 'right'
ax.spines[side].set_linewidth(1)
for side in bx.spines.keys(): # 'top', 'bottom', 'left', 'right'
bx.spines[side].set_linewidth(1)
'''
xx = xmax-xmin; yy = ymax-ymin
xc = xmin+.5*xx; yc = ymin+.5*yy
dd = max([xx,yy,buffer_lim])
xmin = xc-.5*dd; xmax = xc+.5*dd
ymin = yc-.5*dd; ymax = yc+.5*dd
bufferZone_map = 10.e3
ax.set_xlim(xmin-bufferZone_map, xmax+bufferZone_map)
ax.set_ylim(ymin-bufferZone_map, ymax+bufferZone_map)
#ax.legend(handles = LegendElement_domain, loc='upper right', labelspacing=1.1, handlelength=2.5, handleheight=1.9)
ax.legend(handles = LegendElement_domain, labelspacing=.7, handlelength=2.5, handleheight=1.9, ncol=int(vin['legend_nbreColumn']), loc=vin['legend_loc'],
fancybox=True, framealpha=0.9 )
# for multiple column
# probably need to add column in excel file to tell where to put the legend and the number of columns
#https://stackoverflow.com/questions/42103144/how-to-align-rows-in-matplotlib-legend-with-2-columns
# frame of left plot is not good. try better framing
#chateauAmpuis is doing weird stuff on the legend
# find a solution for alsace appellations
#add igp
if vin.Pays == 'France':
#add prefecture
if vin.DomaineChateau == u'Ch\xe2teau La Borie':
selectedCommune = ['Nice', 'Marseille', 'Montpellier', 'Avignon', 'Gap', 'Saint-\xc3\x89tienne', 'Valence', 'Bastia', 'Ajaccio']
else:
selectedCommune = prefectures.Commune.to_list()
prefectures.loc[prefectures.Commune.isin(selectedCommune)].plot(ax=ax, color='k', markersize=20,)
prefectures.loc[prefectures.Commune.isin(selectedCommune)].apply(lambda x: ax.annotate(text=str(x.Commune),\
xy=[x.geometry.centroid.x + x.add_to_name_position.coords.xy[0][0],\
x.geometry.centroid.y + x.add_to_name_position.coords.xy[1][0] ], ha=x.LabelLoc_ha,va=x.LabelLoc_va,zorder=5),axis=1);
try:
minx, miny, maxx, maxy = metropole.geometry.total_bounds
xx = maxx-minx; yy = maxy-miny
xc = minx+.5*xx; yc = miny+.5*yy
dd = max([xx,yy])
minx = xc-.5*dd; maxx = xc+.5*dd
miny = yc-.5*dd; maxy = yc+.5*dd
except:
minx = 0; maxx = 0
miny = 0; maxy = 0
buffer_lim = 50.e3
bx.set_xlim(minx-buffer_lim,maxx+buffer_lim)
bx.set_ylim(miny-buffer_lim,maxy+buffer_lim)
# add image location and France map
rect = mpatches.Rectangle((xmin-bufferZone_map,ymin-bufferZone_map),(xmax-xmin)+2*bufferZone_map,\
(ymax-ymin)+2*bufferZone_map, linewidth=1, edgecolor='k', facecolor='none')
bx.add_patch(rect)
fig.savefig(map_domain, dpi=dpi_map, facecolor=fig.get_facecolor())
plt.close(fig)
def simple_appelation(appelation):
if appelation == 'Alsace Gewurztraminer': return 'Alsace'
if appelation == 'Alsace Riesling' : return 'Alsace'
if appelation == 'Alsace Pinot Noir' : return 'Alsace'
return appelation
##########################
if __name__ == '__main__':
##########################
parser = argparse.ArgumentParser(description='draw wine map')
parser.add_argument('-s','--flag_start', help='False if input files needed to be reloaded',required=False)
parser.add_argument('-v','--flag_vin', help='True if only wine list need to be reloaded',required=False)
parser.add_argument('-b','--flag_border', help='True if reload border shapefile',required=False)
args = parser.parse_args()
home = expanduser("~")
# where are the input data
dir_in = home+'/Dropbox/CarteVin/'
# where to generate vin.tex
dir_out = './'
wkdir = dir_out + 'VinData/'
ensure_dir(wkdir)
#listDesVins
file_listDesVins = home+'/Dropbox/CarteVin/MaCave/ListeDesVins.xlsx'
dpi_largePlot = 100
dpi_map = 100
#define Input
if args.flag_start is None:
flag_restart = True
else:
flag_restart = string_2_bool(args.flag_start)
if args.flag_vin is None:
flag_vin = False
else:
flag_vin = string_2_bool(args.flag_vin)
if args.flag_border is None:
flag_border = False
else:
flag_border = string_2_bool(args.flag_border)
######################
# france border contour
######################
if ((flag_border) or (not(os.path.isfile(wkdir+"metropole.shp")))):
print('france border contour ...')
fp = dir_in+'communes-20150101-5m-shp/communes-20150101-5m.shp'
map_dfCommune = gpd.read_file(fp)
map_dfCommune = map_dfCommune.rename(columns={'nom':'Commune'})
map_dfCommune['metropole'] = 0
map_dfCommune.loc[ pd.to_numeric(map_dfCommune['insee'], errors='coerce') < 97000, 'metropole'] = 1
map_dfCommune.loc[map_dfCommune['insee'].str.contains('2A'), 'metropole'] = 1
map_dfCommune.loc[map_dfCommune['insee'].str.contains('2B'), 'metropole'] = 1
map_dfCommune.to_file(wkdir+"map_df_communes.shp")
metropole_geometry = map_dfCommune[['metropole','geometry']].dissolve(by='metropole')
metropole = gpd.GeoDataFrame(pd.DataFrame({'countryMainLand': ['France,']}),geometry=[metropole_geometry.geometry[1]],crs=metropole_geometry.crs)
metropole = metropole.to_crs(epsg=3395)
metropole.to_file(wkdir+"metropole.shp")
else:
metropole = gpd.read_file(wkdir+"metropole.shp")
map_dfCommune = gpd.read_file(wkdir+"map_df_communes.shp")
######################
#load appelation
######################
listAppellations = pd.read_csv(dir_in+'liste-AOC-vins-wikipedia.csv')
listAppellations['Appellation'] = listAppellations['Appellation'].str.strip()
listAppellations['Bassin'] = listAppellations['Bassin'].str.strip()
######################
#load bassins color code
######################
listBassinColor = pd.read_csv(dir_in+'bassins-colors.csv')
listBassinColor['Bassin'] = [str(xx) for xx in listBassinColor['Bassin']]
######################
#load vines list
######################
if ((flag_vin) or (not(flag_restart)) or (not(os.path.isfile(wkdir+"listVins.gpkg")))):
print('list vins de la cave ...')
print('le fichier est ici : ', file_listDesVins)
vins_ = pd.read_excel(file_listDesVins, sheet_name='france - Liste des vins', header=2)
vins_ = vins_.loc[ (vins_['Couleur'].str.strip()=='Blanc') |
(vins_['Couleur'].str.strip()==u'Blanc p\xe9tillant') |
(vins_['Couleur'].str.strip()=='Rouge') |
(vins_['Couleur'].str.strip()==u'Ros\xe9') |
(vins_['Couleur'].str.strip()==u'Pommeau')
]
vins_2_ = pd.read_excel(file_listDesVins, sheet_name='international - Liste des vins ', header=1)
vins_2_ = vins_2_.loc[ (vins_['Couleur'].str.strip()=='Blanc') |
(vins_['Couleur'].str.strip()==u'Blanc p\xe9tillant') |
(vins_['Couleur'].str.strip()=='Rouge') |
(vins_['Couleur'].str.strip()==u'Ros\xe9') |
(vins_['Couleur'].str.strip()==u'Pommeau')
]
cidres_ = pd.read_excel(file_listDesVins, sheet_name='cidre - Liste des Cidres', header=1)
cidres_.index = list(range(len(vins_),len(vins_)+len(cidres_)))
listVins = pd.concat([ vins_, cidres_, vins_2_ ], sort=True,
ignore_index=True)
#listVins = pd.concat([ vins_2_ ], sort=True)
#clean data
listVins = listVins.loc[ (listVins['Couleur'].str.strip()=='Blanc') |
(listVins['Couleur'].str.strip()==u'Blanc p\xe9tillant') |
(listVins['Couleur'].str.strip()=='Rouge') |
(listVins['Couleur'].str.strip()==u'Ros\xe9') |
(listVins['Couleur'].str.strip()==u'Cidre') |
(listVins['Couleur'].str.strip()==u'Pommeau')
]
geocoder = geopy.geocoders.BANFrance()
geocoder_bing = None
cave = geocoder.geocode('4 rue Coat Tanguy 29890 Brignogan-Plages')
listVins['latlong'] = [cave.point]*listVins.shape[0]
for index, row in listVins.iterrows():
address1 = row['Adresse'].split(' ')
address2 = []
for address_ in address1:
tmp_ = address_.rstrip(',').rstrip(' ')
if tmp_ != '':
address2.append(tmp_)
address3 = '{:s} {:05.0f} {:s} {:s}'.format( ' '.join(address2[:-1]), row['Code postal'], address2[-1], row['Pays'])
try:
if row['Pays'] == 'France':
listVins.at[index,'latlong'] = geocoder.geocode(address3,timeout=3).point
else:
if geocoder_bing == None:
key_bing = '<KEY>'
geocoder_bing = geopy.geocoders.Bing(key_bing)
listVins.at[index,'latlong'] = geocoder_bing.geocode(address3,timeout=3).point
except geopy.exc.GeocoderTimedOut :
print('geopy timeout on :', address3)
sys.exit()
#print address3, ' | ', listVins.at[index,'latlong']
lats = [pt.latitude for pt in listVins['latlong']]
lons = [pt.longitude for pt in listVins['latlong']]
listVins = gpd.GeoDataFrame( listVins.loc[:,listVins.columns!='latlong'] ,geometry= gpd.points_from_xy(x=lons,y=lats), crs={'init': 'epsg:4326'})
#listVins = gpd.GeoDataFrame( listVins.loc[:,:] ,geometry= gpd.points_from_xy(x=lons,y=lats), crs={'init': 'epsg:4326'})
listVins = listVins.to_crs(epsg=3395)
listVins['DomaineChateau'] = [ str(xx) for xx in listVins['DomaineChateau'] ]
listVins['Pays_order'] = listVins['Pays'].str.replace('France','AAA')
#load local legend info
legendParam = pd.read_csv(dir_in+'domaineChateau_legend_location.csv')
legendParam['DomaineChateau'] = [ str(xx) for xx in legendParam['DomaineChateau'] ]
listVins = pd.merge(listVins, legendParam, how='left', on='DomaineChateau')
listVins.loc[listVins['legend_nbreColumn'].isnull(),'legend_nbreColumn'] = np.int64(1)
listVins.loc[listVins['legend_loc'].isnull(),'legend_loc'] = 'upper right'
print('{:d} vins ont ete charge'.format(listVins.shape[0]))
listVins.to_file(wkdir+"listVins.gpkg", driver="GPKG")
else:
listVins = gpd.read_file(wkdir+"listVins.gpkg", driver="GPKG")
####################
# neighbourg borders
####################
#data from https://wambachers-osm.website/boundaries/
coasts_borders = gpd.read_file(dir_in+'Borders/neighbourgCountries.shp')
coasts_borders = coasts_borders.to_crs(epsg=3395)
####################
# river and lake
####################
clc12 = gpd.read_file(dir_in+'CLC12/CLC12_FR_RGF_SHP/CLC12_FR_RGF.shp')
inlandWater = clc12.loc[\
#(clc12['CODE_12']=='511')|(clc12['CODE_12']=='522')\
#|(clc12['CODE_12']=='411')\
(clc12['CODE_12']=='521')]
inlandWater_river = gpd.read_file(dir_in+'Wise_WaterData/EuropeanRiver.shp') # for large plot
inlandWater_lake = clc12.loc[(clc12['CODE_12']=='512')]
inlandWater_lake = inlandWater_lake.loc[inlandWater_lake.geometry.area>.5e6]
seaWater = clc12.loc[(clc12['CODE_12']=='423')|(clc12['CODE_12']=='523')\
|(clc12['CODE_12']=='421')|(clc12['CODE_12']=='331')]
inlandWater = inlandWater.to_crs(epsg=3395)
seaWater = seaWater.to_crs(epsg=3395)
inlandWater_river = inlandWater_river.to_crs(epsg=3395)
rivers_hydroFrance = gpd.read_file(dir_in+'ROUTE120_1-1_SHP_LAMB93_000_2012-11-26/ROUTE120/1_DONNEES_LIVRAISON_2012-11-00377/R120_1-1_SHP_LAMB93_FR-ED121/HYDROGRAPHIE/TRONCON_HYDROGRAPHIQUE.SHP')
rivers_hydroFrance = rivers_hydroFrance.to_crs(epsg=3395)
country_pakage = {}
paysCode = {};
paysCode['Hongrie'] = 'HUN'
paysCode['France'] = 'FRA'
pays = 'France'
country_pakage[pays] = {}
country_pakage[pays]['inlandWater'] = inlandWater
country_pakage[pays]['inlandWater_river'] = rivers_hydroFrance
country_pakage[pays]['inlandWater_lake'] = inlandWater_lake
country_pakage[pays]['seaWater'] = seaWater
country_pakage[pays]['coasts_borders'] = coasts_borders
country_pakage[pays]['metropole'] = metropole
######################
# border contour international
######################
for pays in listVins['Pays']:
if pays == 'France': continue
country_pakage[pays] = {}
if pays in [u'Nouvelle Z\xe9lande ', u'Italie']:
empty_ = gpd.GeoDataFrame([], crs="EPSG:4326")
country_pakage[pays]['inlandWater'] = empty_
country_pakage[pays]['inlandWater_river'] = empty_
country_pakage[pays]['inlandWater_lake'] = empty_
country_pakage[pays]['seaWater'] = empty_
country_pakage[pays]['coasts_borders'] = empty_
country_pakage[pays]['metropole'] = empty_
continue
clc12_ = gpd.read_file(dir_in+'CLC12/CLC12_{:s}/CLC12_{:s}.shp'.format(paysCode[pays],paysCode[pays]))
inlandWater_ = clc12_.loc[(clc12_['Code_12']=='521')]
inlandWater_river_ = gpd.read_file(dir_in+'river/{:s}/{:s}_water_lines_dcw.shp'.format(paysCode[pays],paysCode[pays]))
inlandWater_lake_ = gpd.read_file(dir_in+'river/{:s}/{:s}_water_areas_dcw.shp'.format(paysCode[pays],paysCode[pays]))
#inlandWater_lake_ = clc12_.loc[(clc12_['Code_12']=='512')]
#inlandWater_lake_ = inlandWater_lake_.loc[inlandWater_lake_.geometry.area>.5e6]
seaWater_ = clc12_.loc[(clc12_['Code_12']=='423')|(clc12_['Code_12']=='523')\
|(clc12_['Code_12']=='421')|(clc12_['Code_12']=='331')]
inlandWater_ = inlandWater_.to_crs(epsg=3395)
seaWater_= seaWater_.to_crs(epsg=3395)
inlandWater_river_ = inlandWater_river_.to_crs(epsg=3395)
inlandWater_lake_ = inlandWater_lake_.to_crs(epsg=3395)
coasts_borders_ = gpd.read_file(dir_in+'Borders/International/{:s}/neighbourgCountries.geojson'.format(pays))
coasts_borders_ = coasts_borders_.to_crs(epsg=3395)
metropole_ = gpd.read_file(dir_in+'Borders/International/{:s}/metropole.geojson'.format(pays))
metropole_ = metropole_.to_crs(epsg=3395)
country_pakage[pays]['inlandWater'] = inlandWater_
country_pakage[pays]['inlandWater_river'] = inlandWater_river_
country_pakage[pays]['inlandWater_lake'] = inlandWater_lake_
country_pakage[pays]['seaWater'] = seaWater_
country_pakage[pays]['coasts_borders'] = coasts_borders_
country_pakage[pays]['metropole'] = metropole_
######################
#load insee / postal code
######################
#insee_cp_commune = pd.read_csv(dir_in+'laposte_hexasmal.csv')
#insee_cp_commune = insee_cp_commune.rename(columns={'Code_postal':'CI'})
######################
#load insee / postal code
######################
prefectures = pd.read_csv(dir_in+'hotels-de-prefectures-fr.csv')
prefectures = gpd.GeoDataFrame( prefectures.loc[:,(prefectures.columns!='LonDD')&(prefectures.columns!='LatDD')] ,
geometry= gpd.points_from_xy(x=prefectures['LonDD'],y=prefectures['LatDD']), crs={'init': 'epsg:4326'})
prefectures = prefectures.to_crs(epsg=3395)
prefectures['add_to_name_position'] = shapely.geometry.Point(0,0)
######################
# Merge geo commune info and appellation
######################
if ((not(flag_restart)) or (not(os.path.isfile(wkdir+"map_df_communes_appellation_bassin.shp")))):
#load appelation par communes
allAppellation_per_communes = pd.read_csv(dir_in+'2020-02-26-comagri-communes-aires-ao_ronan.csv')
allAppellation_per_communes.rename(columns=lambda x: x.replace('Aire geographique','Appellation'), inplace=True)
allAppellation_per_communes.Appellation = allAppellation_per_communes.Appellation.str.lower().str.replace(' ','-')
allAppellation_per_communes = allAppellation_per_communes.rename(columns={'CI':'insee'})
print('merge commune and appellations ...')
#join df
appellation_bassin_per_communes = allAppellation_per_communes.set_index('Appellation').join(listAppellations.set_index('Appellation'))
#deal with Alsace
appellation_bassin_per_communes.loc['alsace':'alsacf','Bassin'] = 'Alsace'
#fiefs vendees
appellation_bassin_per_communes.loc[['fiefs-vend\xc3\xa9ens' in s for s in appellation_bassin_per_communes.index],'Bassin'] = 'Vall\xc3\xa9e de la Loire'
#gaillac
appellation_bassin_per_communes.loc[['gaillac' in s for s in appellation_bassin_per_communes.index],'Bassin'] = 'Sud-Ouest'
#saumur
appellation_bassin_per_communes.loc[['saumur' in s for s in appellation_bassin_per_communes.index],'Bassin'] = 'Vall\xc3\xa9e de la Loire'
#vosne-romanee
appellation_bassin_per_communes.loc[['vosne-roman\xc3\xa9e' in s for s in appellation_bassin_per_communes.index],'Bassin'] = 'Bourgogne'
#vougeot
appellation_bassin_per_communes.loc[['vougeot' in s for s in appellation_bassin_per_communes.index],'Bassin'] = 'Bourgogne'
appellation_bassin_per_communes = appellation_bassin_per_communes.loc[appellation_bassin_per_communes['Bassin'].notna()]
appellation_bassin_per_communes = appellation_bassin_per_communes.reset_index()
#convert CI to insee format
idx_notCorsica = appellation_bassin_per_communes.loc[pd.to_numeric(appellation_bassin_per_communes['insee'], errors='coerce').notna()].index
tmp_ = pd.to_numeric(appellation_bassin_per_communes.loc[idx_notCorsica,'insee'], errors='coerce').map('{:05g}'.format)
appellation_bassin_per_communes.loc[idx_notCorsica,'insee'] = tmp_
map_df = map_dfCommune.merge(appellation_bassin_per_communes, on='insee')
map_df.Bassin = [ str(xx) for xx in map_df.Bassin]
map_df.to_file(wkdir+"map_df_communes_appellation_bassin.shp")
else:
map_df = gpd.read_file(wkdir+"map_df_communes_appellation_bassin.shp")
######################
# bassins shapefile
######################
if ((not(flag_restart)) or (not(os.path.isfile(wkdir+"bassins.shp")))):
print('bassins ...')
bassins = map_df[['Bassin','geometry']].dissolve(by='Bassin').reset_index()
bassins = bassins.merge(listBassinColor,on='Bassin')
#sort by area to help plotting
bassins['area']=bassins.geometry.area
bassins = bassins.sort_values(by='area', ascending=False)
bassins = bassins.to_crs(epsg=3395)
bassins = bassins.rename(columns={'Bassin':'nom'})
bassins.to_file(wkdir+"bassins.shp")
else:
bassins = gpd.read_file(wkdir+"bassins.shp")
bassins = bassins.sort_values(by='area', ascending=False)
country_pakage['France']['bassins'] = bassins
######################
# appellations shapefile
######################
if ((not(flag_restart)) or (not(os.path.isfile(wkdir+"appellations.shp")))):
print('appellations ...')
appellations = map_df[['Appellation','geometry','Bassin']].dissolve(by='Appellation').reset_index()
appellations = appellations.rename(columns={'Bassin':'bassin'})
#sort by area to help plotting
appellations['area']=appellations.geometry.area
appellations = appellations.sort_values(by='area', ascending=False)
appellations = appellations.rename(columns={'Appellation':'nom'})
appellations = appellations.to_crs(epsg=3395)
appellations.to_file(wkdir+"appellations.shp")
else:
appellations = gpd.read_file(wkdir+"appellations.shp")
appellations = appellations.sort_values(by='area', ascending=False)
########################################
#Add IGP stored locally in appellations:
########################################
if ((not(flag_restart)) or (not(os.path.isfile(wkdir+"appellations_igp.shp")))):
dir_igp = dir_in+'IGP/'
appellations_igp = []
for csvFile in glob.glob(dir_igp+'*.csv'):
igp_ = pd.read_csv(csvFile)
igp_.rename(columns=lambda x: x.replace('Aire geographique','Appellation'), inplace=True)
igp_.Appellation = igp_.Appellation.str.lower().str.replace(' ','-')
igp_ = igp_.rename(columns={'CI':'insee'})
igp_.insee = [str(xx) for xx in igp_.insee]
igp_.Bassin = [ str(xx) for xx in igp_.Bassin]
map_df_ = map_dfCommune.merge(igp_, on='insee')
map_df_.Bassin = [ str(xx) for xx in map_df_.Bassin]
igp_gpd = map_df_[['Appellation','geometry','Bassin']].dissolve(by='Appellation').reset_index()
igp_gpd['area']=igp_gpd.geometry.area
igp_gpd = igp_gpd.to_crs(epsg=3395)
igp_gpd = igp_gpd.rename(columns={'Appellation':'nom'})
igp_gpd = igp_gpd.rename(columns={'Bassin':'bassin'})
appellations_igp.append(igp_gpd)
appellations_igp = pd.concat(appellations_igp, ignore_index=True)
appellations_igp.to_file(wkdir+"appellations_igp.shp")
else:
appellations_igp = gpd.read_file(wkdir+"appellations_igp.shp")
appellations_igp = appellations_igp.sort_values(by='area', ascending=False)
########################################
#replace geographic zone for alsace grand cru to production zone stored locally in CarteVin/GrandCruAslace:
########################################
if ((not(flag_restart)) or (not(os.path.isfile(wkdir+"appellations_AlsaceGrandCru.shp")))):
dir_agc = dir_in+'GrandCruAlsace/'
appellations_agc = []
for csvFile in glob.glob(dir_agc+'*.csv'):
try:
agc_ = pd.read_csv(csvFile, sep=';', encoding = "ISO-8859-1")
except:
pdb.set_trace()
agc_.rename(columns=lambda x: x.replace('Aire g\xe9ographique','Appellation'), inplace=True)
agc_.Appellation = agc_.Appellation.str.lower().str.replace(' ','-')
agc_ = agc_.rename(columns={'CI':'insee'})
agc_.insee = [str(xx) for xx in agc_.insee]
agc_['Bassin'] = ['Alsace']*len(agc_)
agc_ = agc_.loc[agc_['Zone'] == 'Zone de production des raisins']
map_df_ = map_dfCommune.merge(agc_, on='insee')
map_df_.Bassin = [ str(xx) for xx in map_df_.Bassin]
agc_gpd = map_df_[['Appellation','geometry','Bassin']].dissolve(by='Appellation').reset_index()
agc_gpd['area']=agc_gpd.geometry.area
agc_gpd = agc_gpd.to_crs(epsg=3395)
agc_gpd = agc_gpd.rename(columns={'Appellation':'nom'})
agc_gpd = agc_gpd.rename(columns={'Bassin':'bassin'})
appellations_agc.append(agc_gpd)
appellations_agc = pd.concat(appellations_agc, ignore_index=True)
appellations_agc.to_file(wkdir+"appellations_AlsaceGrandCru.shp")
else:
appellations_agc = gpd.read_file(wkdir+"appellations_AlsaceGrandCru.shp")
appellations_agc = appellations_agc.sort_values(by='area', ascending=False)
########################################
#Add particular zone stored locally in appellations: (ex: cidre fouesnantais)
########################################
if ((not(flag_restart)) or (not(os.path.isfile(wkdir+"appellations_other.shp")))):
dir_other = dir_in+'AutreAppellation/'
appellations_other = []
for csvFile in glob.glob(dir_other+'*.csv'):
other_ = pd.read_csv(csvFile)
other_.rename(columns=lambda x: x.replace('Aire geographique','Appellation'), inplace=True)
other_.Appellation = other_.Appellation.str.lower().str.replace(' ','-')
other_ = other_.rename(columns={'CI':'insee'})
other_.insee = [str(xx) for xx in other_.insee]
other_.Bassin = [ str(xx) for xx in other_.Bassin]
map_df_ = map_dfCommune.merge(other_, on='insee')
map_df_.Bassin = [ str(xx) for xx in map_df_.Bassin]
other_gpd = map_df_[['Appellation','geometry','Bassin']].dissolve(by='Appellation').reset_index()
other_gpd['area']=other_gpd.geometry.area
other_gpd = other_gpd.to_crs(epsg=3395)
other_gpd = other_gpd.rename(columns={'Appellation':'nom'})
other_gpd = other_gpd.rename(columns={'Bassin':'bassin'})
appellations_other.append(other_gpd)
appellations_other = pd.concat(appellations_other, ignore_index=True)
appellations_other.to_file(wkdir+"appellations_other.shp")
else:
appellations_other = gpd.read_file(wkdir+"appellations_other.shp")
appellations_other = appellations_other.sort_values(by='area', ascending=False)
######################
# international appellations shapefile
######################
if ((not(flag_restart)) or (not(os.path.isfile(wkdir+"appellations_international.shp")))):
print('international appellations ...')
shpFiles = glob.glob(dir_in + 'AppellationInternational/wineRegion*.shp')
appellations_international = []
bassins_international = {}
for shpFile in shpFiles:
pays_ = shpFile.split('wineRegion')[1].split('.')[0]
wineRegion = gpd.read_file(shpFile)
if pays_ == 'Hongrie':
wineRegion['Name'] = [xx.encode('latin-1').decode('utf-8') for xx in wineRegion['Name'] ]
wineRegion = wineRegion.rename(columns={'Name':'nom'})
wineRegion['nom'] = wineRegion['nom'].str.replace('wine region','').str.strip()
wineRegion['bassin'] = wineRegion['nom']
#sort by area to help plotting
wineRegion['area']=wineRegion.geometry.area
wineRegion = wineRegion.sort_values(by='area', ascending=False)
wineRegion = wineRegion.to_crs(epsg=3395)
wineRegion = wineRegion[['nom','bassin','geometry','area']]
bassins_international[pays_] = wineRegion.copy()
wineRegion['nom'] = wineRegion['nom'].str.lower().str.replace(' ','-')
wineRegion['bassin'] = wineRegion['bassin'].str.lower().str.replace(' ','-')
appellations_international.append(wineRegion)
appellations_international = pd.concat(appellations_international, ignore_index=True)
appellations_international.to_file(wkdir+"appellations_international.shp")
pickle.dump(bassins_international,open(wkdir+'bassins_international.pickle','wb'))
else:
appellations_international = gpd.read_file(wkdir+"appellations_international.shp")
appellations_international = appellations_international.sort_values(by='area', ascending=False)
bassins_international = pickle.load(open(wkdir+'bassins_international.pickle', 'rb'))
#collect international bassins from appellation
for key in list(country_pakage.keys()):
if key == 'France':
continue
elif key in [u'Nouvelle Z\xe9lande ', u'Italie']:
empty_ = gpd.GeoDataFrame(columns = [u'nom', u'geometry', u'color', u'area', u'add_to_name_position'], crs="EPSG:4326")
country_pakage[key]['bassins'] = empty_
else:
bassins_ = bassins_international[key].copy()
listBassinColor_ = pd.read_csv(dir_in+'AppellationInternational/bassins-colors-{:s}.csv'.format(key))
listBassinColor_['bassin'] = [str(xx) for xx in listBassinColor_['bassin']]
country_pakage[key]['bassins'] = bassins_.merge(listBassinColor_,on='bassin')
dir_maps = dir_out + 'VinMaps/'
ensure_dir(dir_maps)
######################
# Large Plot
######################
bassins['add_to_name_position'] = shapely.geometry.Point(0,0)
bassins.at[bassins[bassins['nom']=='Savoie-Bugey'].index[0],'add_to_name_position'] = shapely.geometry.Point(40.e3,-40.e3) #Savoie
bassins.at[bassins[bassins['nom']=='Languedoc-Roussillon'].index[0],'add_to_name_position'] = shapely.geometry.Point(-90.e3,-30.e3) #Lamguedoc
bassins.at[bassins[bassins['nom']==u'Vall\xe9e du Rh\xf4ne'].index[0],'add_to_name_position'] = shapely.geometry.Point(0.,30.e3) #Vallee Rhone
bassins.at[bassins[bassins['nom']=='Jura'].index[0],'add_to_name_position'] = shapely.geometry.Point(40.e3,0.) #Jura
bassins.at[bassins[bassins['nom']=='Lyonnais'].index[0],'add_to_name_position'] = shapely.geometry.Point(-60.e3,-20.e3) #Lyonnais
bassins.at[bassins[bassins['nom']=='Alsace'].index[0],'add_to_name_position'] = shapely.geometry.Point(-50.e3,0.) #Alsace
bassins.at[bassins[bassins['nom']=='<NAME>'].index[0],'add_to_name_position'] = shapely.geometry.Point(-20.e3,-5.e3)
bassins.at[bassins[bassins['nom']=='<NAME>'].index[0],'nom'] = '<NAME>'
bassins.at[bassins[bassins['nom']=='<NAME>'].index[0],'add_to_name_position'] = shapely.geometry.Point(100.e3,-100.e3)
bassins.at[bassins[bassins['nom']=='<NAME>'].index[0],'nom'] = '<NAME>'
if ((not(flag_restart)) or (not(os.path.isfile(dir_maps+"bassinViticoleFrance.png")))):
print('plot large map')
xmin, ymin, xmax, ymax = metropole.geometry.total_bounds
xx = xmax-xmin; yy = ymax-ymin
xc = xmin+.5*xx; yc = ymin+.5*yy
dd = max([xx,yy]) + 50.e3
xmin = xc-.5*dd; xmax = xc+.5*dd
ymin = yc-.5*dd; ymax = yc+.5*dd
ratio= old_div((ymax-ymin),(xmax-xmin))
x_image = 8
mpl.rcdefaults()
mpl.rcParams['mathtext.fontset'] = 'stix'
mpl.rcParams['font.family'] = 'STIXGeneral'
mpl.rcParams['font.size'] = 13
mpl.rcParams['legend.fontsize'] = 9
mpl.rcParams['figure.subplot.hspace'] = 0.0
mpl.rcParams['figure.subplot.wspace'] = 0.0
fig = plt.figure(figsize=(x_image,x_image*ratio),facecolor='.8')
ax = fig.add_axes([0.002,0.002, 0.996,.996])
#ax.set_axis_off()
ax.set_xticks([])
ax.set_yticks([])
seaWater.plot(ax=ax, antialiased=True,zorder=0,facecolor='.8')
ax.patch.set_facecolor('.8')
metropole.plot(ax=ax, facecolor='white', edgecolor='none',linewidth=.0,zorder=0)
bassins.plot( ax=ax, cmap=plt.cm.colors.ListedColormap(bassins['color']),zorder=1,alpha=.5)
bassins.apply(lambda x: ax.annotate(s=str(x.nom),\
xy=[x.geometry.centroid.x + x.add_to_name_position.coords.xy[0][0],\
x.geometry.centroid.y + x.add_to_name_position.coords.xy[1][0] ], ha='center',zorder=5),axis=1);
inlandWater.plot(ax=ax, antialiased=True,zorder=3,facecolor='.8',edgecolor='.8',linewidth=.15)
inlandWater_river.plot(ax=ax, antialiased=True,zorder=3,facecolor='none',edgecolor='.45',linewidth=.15)
#inlandWater_lake.plot(ax=ax, antialiased=True,zorder=2,facecolor='.8',edgecolor='.8',linewidth=.15)
metropole.plot(ax=ax, facecolor='none', edgecolor='k',linewidth=.4,zorder=4);
coasts_borders.plot(ax=ax, facecolor='white', edgecolor='k', antialiased=True,linewidth=.4,zorder=2)
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
#plt.show()
fig.savefig(dir_maps+'bassinViticoleFrance.png', dpi=dpi_largePlot,)# acecolor=fig.get_facecolor())
plt.close(fig)
bassins.at[bassins[bassins['nom']=='<NAME>\nBretagne'].index[0],'nom'] = '<NAME>'
bassins.at[bassins[bassins['nom']=='<NAME>'].index[0],'nom'] = '<NAME>'
######################
# single Plot
######################
#load template
f = open(dir_out+'InputTex/vin_template.tex','r')
lines_ori = f.readlines()
f.close()
lineXX = np.where(np.array(lines_ori)=='XX\n')[0][0]
final1_lines = lines_ori[:lineXX]
final2_lines = []
final3_lines = lines_ori[lineXX+1:]
listVins = listVins.sort_values(by=['Pays_order', 'Bassin','DomaineChateau','Couleur','Appelation','Cuvee']).reset_index()
section_bassin = 'mm'
section_domain = 'mm'
section_couleur = 'mm'
vinDictionary = {}
try:
vinDictionary2 = pickle.load(open(dir_out+'vinDictionary_fromWebSiteParsing.pickle', 'rb'))
except:
vinDictionary2 = {}
list_vin_noRecipies = []
for index, vin in listVins.iterrows():
flag_igp = 0
flag_other = 0
flag_international = 0
#select appellation from vin
appellations_containing_vin = gpd.tools.sjoin(appellations,listVins.loc[[index]],how='inner')
#select appellations that contains this appellation
#select appellations at the same level
#select river from ROUTE120_1-1_SHP_LAMB93_000_2012-11-26/
#https://stackoverflow.com/questions/16992038/inline-labels-in-matplotlib
#select topo?
#plot
#add in vin.tex
#--------------
newBassin = 0
if (section_bassin != vin.Bassin) & (section_bassin!='International'):
#add subsection
final2_lines.append('\n')
final2_lines.append('\\newpage')
newBassin = 1
if vin.Pays == 'France':
section_bassin = vin.Bassin
else:
section_bassin = 'International'
print(section_bassin)
final2_lines.append( u'\\fakesubsection{{{:s}}}\n'.format(section_bassin))
newDomain = 0
if section_domain != vin.DomaineChateau:
print(' ', vin.DomaineChateau)
tmp_ = '\\newpage' if (newBassin == 0) else ''
final2_lines.append('\n')
final2_lines.append('%##########\n')
if vin.Pays == 'France':
final2_lines.append(u'\\vinSection[{:s}]{{{:s}}}\n'.format(tmp_,vin.DomaineChateau.replace('&','\&')))
else:
final2_lines.append(u'\\vinSection[{:s}]{{{:s} - {:s}}}\n'.format(tmp_,vin.Pays,vin.DomaineChateau.replace('&','\&')))
final2_lines.append('\\label{{sec:{:s}}}\n'.format(\
vin.DomaineChateau.replace('&','').replace(' ','').lower() ))
final2_lines.append('%##########\n')
address1 = vin['Adresse'].split(' ')
address2 = []
for address_ in address1:
tmp_ = address_.rstrip(',').rstrip(' ')
if tmp_ != '':
address2.append(tmp_)
if ''.join(address2[:-1]) == '':
address3 = '{:s} {:05.0f}'.format( address2[-1], vin['Code postal'] )
else:
address3 = '{:s} \\newline {:s} {:05.0f}'.format( ' '.join(address2[:-1]), address2[-1], vin['Code postal'])
if section_bassin == 'International':
address3 += '\\newline {:s}'.format(vin.Pays)
#plot
if (section_domain != 'mm'):
if ((not(flag_restart)) or (not(os.path.isfile(map_domain)))):
saveplot(appellations_domain, vin_prev, metropole_prev, bassins_prev)
section_domain = vin.DomaineChateau
map_domain = dir_maps+'{:s}.png'.format(''.join(section_domain.split(' '))).replace("'",'')
map_domain = unicodedata.normalize('NFD', str(map_domain)).encode('ascii', 'ignore')
#print map_domain
final2_lines.append(u'\\vinShowInfoDomain{{{:s}}}{{{:s}}}{{{:s}}}\n'.format(vin['Nom Producteur'].replace('&','\&'), address3, map_domain.decode("utf-8")))
newDomain = 1
section_couleur = 'mm'
if (((not(flag_restart)) or (not(os.path.isfile(map_domain))))):
ratio= 1.
x_image = 8
mpl.rcdefaults()
mpl.rcParams['mathtext.fontset'] = 'stix'
mpl.rcParams['font.family'] = 'STIXGeneral'
mpl.rcParams['font.size'] = 20
mpl.rcParams['legend.fontsize'] = 18
mpl.rcParams['figure.subplot.left'] = .0
mpl.rcParams['figure.subplot.right'] = 1.
mpl.rcParams['figure.subplot.top'] = 1.
mpl.rcParams['figure.subplot.bottom'] = .0
mpl.rcParams['figure.subplot.hspace'] = 0.02
mpl.rcParams['figure.subplot.wspace'] = 0.02
fig = plt.figure(figsize=(x_image,x_image*ratio))#,facecolor='.8')
#plt.subplots_adjust(left=0.0,right=.66,top=1,bottom=0.0,wspace=0.15,hspace=0.05)
ax = fig.add_axes([0.01,0.01, 0.98,.98])
ax.set_xticks([])
ax.set_yticks([])
#--
if vin.DomaineChateau == u'Ch\xe2teau La Borie':
bx = fig.add_axes([0.02,0.02, 0.26,0.26])
elif vin.DomaineChateau == u'Ch\xe2teau Haut-Marbuzet':
bx = fig.add_axes([0.02,0.02, 0.26,0.26])
elif vin.DomaineChateau == u'Cidre S\xe9h\xe9dic':
bx = fig.add_axes([0.02,0.02, 0.26,0.26])
elif vin.DomaineChateau == u'<NAME>':
bx = fig.add_axes([0.02,0.72, 0.26,0.26])
else:
bx = fig.add_axes([0.72,0.02, 0.26,0.26])
bx.set_xticks([])
bx.set_yticks([])
try:
country_pakage[vin['Pays']]['seaWater'].plot(ax=ax, antialiased=True,zorder=0,facecolor='.8')
ax.patch.set_facecolor('.8')
country_pakage[vin['Pays']]['metropole'].plot(ax=ax, facecolor='white', edgecolor='None',linewidth=.1,zorder=0)
#--
country_pakage[vin['Pays']]['seaWater'].plot(ax=bx, antialiased=True,zorder=0,facecolor='.8')
bx.patch.set_facecolor('.8')
country_pakage[vin['Pays']]['metropole'].plot(ax=bx, facecolor='white', edgecolor='None',linewidth=.1,zorder=0)
bassins_ = country_pakage[vin['Pays']]['bassins']
bassins_.loc[bassins_.nom == vin.Bassin].plot( ax=ax, color= bassins_.loc[bassins_.nom==vin.Bassin,'color'],zorder=1, alpha=.5)
bassins_.loc[bassins_.nom != vin.Bassin].plot( ax=ax, color= bassins_.loc[bassins_.nom!=vin.Bassin,'color'],zorder=1, alpha=.5)
#--
bassins_.plot( ax=bx, color= bassins_.color, zorder=1, alpha=.5)
country_pakage[vin['Pays']]['inlandWater'].plot(ax=ax, antialiased=True,zorder=3,facecolor='.8',edgecolor='.8',linewidth=.15)
country_pakage[vin['Pays']]['inlandWater_river'].plot(ax=ax, antialiased=True,zorder=2,facecolor='none',edgecolor='.1',linewidth=.15)
country_pakage[vin['Pays']]['inlandWater_lake'].plot(ax=ax, antialiased=True,zorder=3,facecolor='.85',edgecolor='.1',linewidth=.15)
#--
#country_pakage[vin['Pays']]['inlandWater'].plot(ax=bx, antialiased=True,zorder=3,facecolor='.8',edgecolor='.8',linewidth=.15)
#country_pakage[vin['Pays']]['inlandWater_river'].plot(ax=bx, antialiased=True,zorder=3,facecolor='none',edgecolor='.1',linewidth=.15)
country_pakage[vin['Pays']]['metropole'].plot(ax=ax, facecolor='none', edgecolor='k',linewidth=.8,zorder=4);
#--
#metropole.plot(ax=bx, facecolor='none', edgecolor='k',linewidth=.1,zorder=4);
#coasts_borders.plot(ax=ax, facecolor='white', edgecolor='k', antialiased=True,linewidth=.8,zorder=2)
listVins.loc[[index],'geometry'].plot(ax=ax, zorder=6, color='k', markersize=30, marker='s')
country_pakage[vin['Pays']]['coasts_borders'].plot(ax=ax,facecolor='.9', edgecolor='.4', linewidth= 1, zorder=1)
#--
country_pakage[vin['Pays']]['coasts_borders'].plot(ax=bx,facecolor='.9', edgecolor='.4', linewidth=.5, zorder=1)
except:
pass
appellations_domain = []
appellations_domainName = []
LegendElement_domain = [Line2D([0], [0], marker='s', color='None', label=section_domain, markerfacecolor='k', markersize=10)]
#ax.set_xlim(vin.geometry.coords.xy[0][0]-buffer_lim,vin.geometry.coords.xy[0][0]+buffer_lim)
#ax.set_ylim(vin.geometry.coords.xy[1][0]-buffer_lim,vin.geometry.coords.xy[1][0]+buffer_lim)
newCouleur = 0
if section_couleur != vin.Couleur:
print(' ', vin.Couleur)
final2_lines.append('\n')
final2_lines.append(u'\n\\vinShowCouleur{{{:s}}}\n'.format(vin.Couleur))
final2_lines.append(u'%--------------\n')
section_couleur = vin.Couleur
newCouleur = 1
#add appellation to plot
print(' ', vin.Appelation)
appellation_ = appellations.loc[appellations.nom == '-'.join(vin.Appelation.lower().split(' ')) ]
vin_prev = vin # to get right name in the saveplot fct
metropole_prev = country_pakage[vin['Pays']]['metropole']
bassins_prev = country_pakage[vin['Pays']]['bassins']
flag_need_more = False
if (len(appellation_) == 0):
flag_need_more = True
else:
if ('alsace-grand-cru' in appellation_.nom.item()):
flag_need_more = True
if flag_need_more:
tmp_= '-'.join(vin.Appelation.lower().split(' '))
if 'maury' in tmp_ : appellation_ = appellations.loc[appellations.nom == 'maury'] # tous les maury sont sur la meme zone
if 'gaillac' in tmp_: appellation_ = appellations.loc[appellations.nom == u'gaillac-rouge-et-ros\xe9'] # tout les gaillac sont sur la meme zone
if 'cotentin' in tmp_: appellation_ = appellations.loc[appellations.nom == u'cidre-cotentin-ou-cotentin'] # tout les gaillac sont sur la meme zone
#alsace
appellation_agc_ = appellations_agc.loc[appellations_agc.nom == '-'.join(vin.Appelation.lower().split(' ')) ]
if len(appellation_agc_) != 0:
appellation_ = appellation_agc_
else:
if tmp_ == 'alsace' : appellation_ = appellations.loc[appellations.nom.str.contains('alsace-suivi-ou-non')]
if tmp_ == 'alsace-pinot-noir': appellation_ = appellations.loc[appellations.nom.str.contains('alsace-suivi-ou-non')]
if tmp_ == 'alsace-gewurztraminer' : appellation_ = appellations.loc[appellations.nom.str.contains('alsace-suivi-ou-non')]
if tmp_ == 'alsace-riesling' : appellation_ = appellations.loc[appellations.nom.str.contains('alsace-suivi-ou-non')]
#igp
appellation_igp_ = appellations_igp.loc[appellations_igp.nom == '-'.join(vin.Appelation.lower().split(' ')) ]
if len(appellation_igp_) != 0:
appellation_ = appellation_igp_
flag_igp = 1
#other
appellation_other_ = appellations_other.loc[appellations_other.nom == '-'.join(vin.Appelation.lower().split(' ')) ]
if len(appellation_other_) != 0:
appellation_ = appellation_other_
flag_other = 1
# international
appellation_international_ = appellations_international.loc[appellations_international.nom == '-'.join(vin.Bassin.lower().split(' ')) ]
if len(appellation_international_) != 0:
appellation_ = appellation_international_
flag_international = 1
if len(appellation_) == 0:
print(' **** missing appellation:', '-'.join(vin.Appelation.lower().split(' ')))
#if (vin.Bassin == 'Alsace') : pdb.set_trace()
#if (u'H\xe9rault' in vin.Appelation): continue
#if (u'Caume' in vin.Appelation): continue
#if (u'Vin de France' in vin.Appelation): continue
#if (u"Pineau d'Aunis" in vin.Appelation): continue
key = vin.Couleur.replace(' ', '').lower()+vin.Appelation.replace(' ', '').lower()+\
vin.DomaineChateau.replace(' ', '').lower()+vin.Cuvee.replace(' ', '').lower()
listRecipies_here = ''
if key in list(vinDictionary2.keys()):
for recipe in vinDictionary2[key]:
listRecipies_here += '{:s}, '.format(recipe)
listRecipies_here = listRecipies_here[:-2]+'.'
if listRecipies_here == '.':
list_vin_noRecipies.append([vin.DomaineChateau, vin.Couleur, vin.Appelation, vin.Cuvee])
if (flag_igp == 1):
vin_Appelation = vin.Appelation + ' (IGP)'
elif (flag_other == 1):
vin_Appelation = vin.Appelation + u' (Appellation Locale)'
else:
vin_Appelation = vin.Appelation
if False: #newCouleur == 1:
final2_lines.append(u'\\vinShowInfoAppellation{{{:s}}}{{{:s}}}{{{:s}}}{{{:s}}}{{{:s}}} \n'.format(vin_Appelation, vin.Cuvee, vin.Cepages.replace('%','\%'), listRecipies_here,vin.Couleur))
else:
final2_lines.append(u'\\vinShowInfoAppellation{{{:s}}}{{{:s}}}{{{:s}}}{{{:s}}} \n'.format(vin_Appelation, vin.Cuvee, vin.Cepages.replace('%','\%'), listRecipies_here))
#final2_lines.append(u'\n \\vspace{.05cm} \n')
#create_dictionary
vinDictionary[key] = []
#pdb.set_trace()
if (((not(flag_restart)) or (not(os.path.isfile(map_domain))))):
if len(appellation_) != 0:
hash_patterns = ('..', '//', 'o', '\\\\', 'O', '*', '-')
hash_colors = ('.5', '.5', '.5', '.5', '.5', '.5', '.5')
facecolor = 'none'
if u'domainedelamordor' in vin.DomaineChateau.replace('&','').replace(' ','').lower() :
hash_patterns = ('oo', '....', 'O', '//', '-')
hash_colors = ('.5', '0.1', '0.5', '.5', '.5', '.5', '.5')
facecolor = ''
if simple_appelation(appellation_.reset_index().nom[0]) not in appellations_domainName:
appellation_.plot(ax=ax, zorder=5, facecolor='none', edgecolor=hash_colors[len(appellations_domain)], hatch=hash_patterns[len(appellations_domain)])
appellations_domainName.append(simple_appelation(appellation_.reset_index().nom[0]))
if flag_international == 0:
LegendElement_domain.append( mpatches.Patch(facecolor='w', hatch=hash_patterns[len(appellations_domain)], \
edgecolor=hash_colors[len(appellations_domain)], label=simple_appelation(vin.Appelation) ) )
else:
LegendElement_domain.append( mpatches.Patch(facecolor='w', hatch=hash_patterns[len(appellations_domain)], \
edgecolor=hash_colors[len(appellations_domain)], label=simple_appelation(vin.Bassin) ) )
appellations_domain = appellation_ if len(appellations_domain)==0 else appellations_domain.append(appellation_)
flag_checkVinIng = False
if index == len(listVins)-1 :
flag_checkVinIng = True
else:
if section_domain != listVins.DomaineChateau[index+1]:
flag_checkVinIng = True
if flag_checkVinIng:
domainChateau_ = unidecode.unidecode(vin.DomaineChateau.replace('&','').replace(' ','').lower())
imgVinFiles = glob.glob( dir_out + 'VinImg/' + domainChateau_ + '.png')
imgVinFiles.extend(glob.glob( dir_out + 'VinImg/' + domainChateau_ + '.jpg'))
imgVinFiles.extend(glob.glob( dir_out + 'VinImg/' + domainChateau_ + '.jpeg'))
imgVinFiles.extend(glob.glob( dir_out + 'VinImg/' + domainChateau_ + '-extra*.png'))
imgVinFiles.extend(glob.glob( dir_out + 'VinImg/' + domainChateau_ + '-extra*.jpg'))
imgVinFiles.extend(glob.glob( dir_out + 'VinImg/' + domainChateau_ + '-extra*.jpeg'))
for imgVinFile in imgVinFiles:
configImgFile = os.path.dirname(imgVinFile)+'/'+os.path.basename(imgVinFile).split('.')[0]+'.txt'
if os.path.isfile(configImgFile):
with open(configImgFile,'r') as f:
lines_confImg = f.readlines()
widthImg = float(lines_confImg[0].split(':')[1])
vertivalSpaveImg = float(lines_confImg[1].split(':')[1])
else:
widthImg = 0.6
vertivalSpaveImg = 2
final2_lines.append(u'\\showExtraVinImg{{{:s}}}{{{:3.1f}}}{{{:3.1f}mm}}\n'.format(imgVinFile,widthImg,vertivalSpaveImg))
#for the last plot
if (((not(flag_restart)) or (not(os.path.isfile(map_domain)))) & (section_domain != 'mm')):
saveplot(appellations_domain, vin_prev, metropole_prev, bassins_prev )
#scan vin to remove wine with no recipies in reference
#remove no recipes
final22_lines = []
for line in final2_lines:
if '{.}' not in line: final22_lines.append(line)
if True:
#remove empty colour
final222_lines = []
for iline, line in enumerate(final22_lines):
if 'vinShowCouleur' in line:
flag_ok = False
ii = iline+1
while ii < len(final22_lines):
if 'vinShowInfoAppellation' in final22_lines[ii]:
flag_ok = True
final222_lines.append(line)
break
if 'vinShowCouleur' in final22_lines[ii]:
break
ii += 1
continue
final222_lines.append(line)
else:
final222_lines = final22_lines
if True:
#remove empty wine
final2222_lines = []
ii_no = -999
for iline, line in enumerate(final222_lines):
if 'vinSection' in line:
ii = iline+1
while ii < len(final222_lines):
if 'vinShowCouleur' in final222_lines[ii]:
final2222_lines.append(line)
ii_no = -999
break
if ('vinSection' in final222_lines[ii]) | ('newpage' in final222_lines[ii]):
ii_no = ii -1
break
if ii == len(final222_lines)-1:
ii_no = ii -1
break
ii += 1
continue
#print(ii_no)
#if 'Cabelier' in line: pdb.set_trace()
if iline <= ii_no: continue
final2222_lines.append(line)
else:
final2222_lines = final222_lines
#save file
f= io.open(dir_out+"vin.tex","w")
for line in final1_lines+final2222_lines+final3_lines:
f.write( line )
f.close()
#save file with wine that did not get recipies
f= io.open(dir_out+"LogError/listVin_noRecipies.csv","w",)
f.write( '{:s}, {:s}, {:s}, {:s}, \n'.format('domaine', 'couleur', 'appellation', 'cuvee') )
for [domain, couleur, app, cuvee] in list_vin_noRecipies :
line = '{:s}, {:s}, {:s}, {:s}, \n'.format(domain, couleur, app, cuvee)
f.write( line )
f.close()
pickle.dump(vinDictionary,open(dir_out+'vinDictionary_fromExcelFile.pickle','wb'))
sys.exit()
#listVins.plot(ax=ax,facecolor='.1',zorder=4, markersize=5)
#get location from address
mm = geopy.geocoders.BANFrance()
mm.geocode('34 Route de Rosenwiller, 67560 Rosheim')
map_df[map_df['insee']<97000 | map_df['insee']]
pd.to_numeric(map_df['insee'], errors='coerce')
rdf = gpd.GeoDataFrame( | pd.concat(dataframesListmap_df, ignore_index=True) | pandas.concat |
import os
import sys
import numpy as np
import pandas as pd
import xarray as xr
# import analysis_tools.naming_conventions.var_info
from oas_dev.util.filenames import get_filename_pressure_coordinate_field
from oas_dev.util.naming_conventions import var_info
def make_folders(path):
"""
Takes path and creates to folders
:param path: Path you want to create (if not already existant)
:return: nothing
"""
path = extract_path_from_filepath(path)
split_path = path.split('/')
if (path[0] == '/'):
path_inc = '/'
else:
path_inc = ''
for ii in np.arange(len(split_path)):
# if ii==0: path_inc=path_inc+split_path[ii]
path_inc = path_inc + split_path[ii]
if not os.path.exists(path_inc):
os.makedirs(path_inc)
path_inc = path_inc + '/'
return
def append2dic(self, ds_append, ds_add):
for key in ds_add.attrs.keys():
if key not in ds_append.attrs:
ds_append.attrs[key] = ds_add.attrs[key]
return ds_append
def extract_path_from_filepath(file_path):
"""
ex: 'folder/to/file.txt' returns 'folder/to/'
:param file_path:
:return:
"""
st_ind=file_path.rfind('/')
foldern = file_path[0:st_ind]+'/'
return foldern
def save_dataset_to_netcdf(dtset, filepath):
"""
:param dtset:
:param filepath:
:return:
"""
dummy = dtset.copy()
# dummy.time.encoding['calendar']='standard'
go_through_list= list(dummy.coords)
if isinstance(dummy, xr.Dataset):
go_through_list = go_through_list + list(dummy.data_vars)
for key in go_through_list:
if 'Pres_addj' in dummy[key].attrs:
if dummy[key].attrs['Pres_addj']:
dummy[key].attrs['Pres_addj'] = 'True'
else:
dummy[key].attrs['Pres_addj'] = 'False'
if ('Pres_addj' in dummy.attrs):
if dummy.attrs['Pres_addj']:
dummy.attrs['Pres_addj'] = 'True'
else:
dummy.attrs['Pres_addj'] = 'False'
if 'time' in dummy.coords:
if 'units' in dummy['time'].attrs:
del dummy['time'].attrs['units']
if 'calendar' in dummy['time'].attrs:
del dummy['time'].attrs['calendar']
print('Saving dataset to: '+ filepath)
make_folders(filepath)
dummy.load()
dummy.to_netcdf(filepath, mode='w') # ,encoding={'time':{'units':'days since 2000-01-01 00:00:00'}})
del dummy
return
#def get_varn_eusaar_comp(varn):
def save_pressure_coordinate_field(dtset, var, model, path_savePressCoord):
if (not dtset[var].attrs['Pres_addj']):
print('Not pressure adjusted! Will not save')
else:
argmax=dtset['time'].argmax().values
argmin=dtset['time'].argmin().values
if 'startyear' in dtset.attrs: startyear = dtset.attrs['startyear']
else: startyear = dtset['time.year'].min().values
if 'endyear' in dtset.attrs: endyear = dtset.attrs['endyear']
else: endyear = dtset['time.year'].max().values
startmonth = dtset['time.month'][argmin].values
endmonth = dtset['time.month'][argmax].values
case = dtset.attrs['case_name']
filename, filename_m = get_filename_pressure_coordinate_field(case, dtset, endmonth, endyear, model, path_savePressCoord,
startmonth, startyear, var)
dummy = dtset[var].copy()
if ('calendar' in dummy['time'].attrs):
del dummy['time'].attrs['calendar']
if ('units' in dummy['time'].attrs):
del dummy['time'].attrs['units']
dummy.time.encoding['units'] = 'days since 2000-01-01'
dummy.time.encoding['calendar'] = 'standard'
for key in dummy.coords:
# print(dummy.coords[key])
if 'Pres_addj' in dummy[key].attrs:
dummy[key].attrs['Pres_addj']= boolean_2_string(dummy[key].attrs['Pres_addj'])
print( boolean_2_string(dummy.attrs['Pres_addj']))
if ('Pres_addj' in dummy.attrs):
dummy.attrs['Pres_addj'] = boolean_2_string(dummy.attrs['Pres_addj'])
make_folders(extract_path_from_filepath(filename))
print('Saving %s pressure coordinate field to file %s' %(var,filename))
if len(dtset['time'])<12:
dummy.to_netcdf(filename_m, mode='w') # ,encoding={'time':{'units':'days since 2000-01-01 00:00:00'}})
else:
dummy.to_netcdf(filename, mode='w') # ,encoding={'time':{'units':'days since 2000-01-01 00:00:00'}})
del dummy
# check_dummy=xr.open_dataarray(filename)
# print(check_dummy['time'].values)
def boolean_2_string(b):
if type(b) is bool:
if b:
return 'True'
else:
return 'False'
else:
return b
#def get_filename_pressure_coordinate_field(case, dtset, endmonth, endyear, model, path_savePressCoord, startmonth,
# startyear, var):
# filename_m = path_savePressCoord + '/%s/%s_%s_%s_%s-%s_%s-%s.nc' % (model, var, model, case, startyear, startmonth, endyear, endmonth)
# filename_m = filename_m.replace(" ", "_")
# filename = path_savePressCoord + '/%s/%s_%s_%s_%s_%s.nc' % (model, var, model, case, startyear, endyear)
# filename = filename.replace(" ", "_")
# return filename, filename_m
def open_pressure_coordinate_field(dtset, var, model, path_savePressCoord):
startyear = dtset['time.year'].min().values
endyear = dtset['time.year'].max().values
argmax=dtset['time'].argmax().values
argmin=dtset['time'].argmin().values
startmonth = dtset['time.month'][argmin].values
endmonth = dtset['time.month'][argmax].values
case = dtset.attrs['case_name']
filename, filename_m = get_filename_pressure_coordinate_field(case, dtset, endmonth, endyear, model, path_savePressCoord,
startmonth, startyear, var)
print('Reading pressure coordinate from file:')
print(filename, filename_m)
#filename = path_savePressCoord + '/%s/%s_%s_%s_%s_%s.nc' % (model, var, model, case, startyear, endyear)
#filename.replace(" ", "_")
print('Checking for %s or %s' %(filename,filename_m))
# if (not dummy.attrs['Pres_addj']):
if not os.path.isfile(filename) and not os.path.isfile(filename_m):
print('file doesnt exist. Returns unadjusted file')
return dtset, False
else:
if os.path.isfile(filename):
dummy = xr.open_dataset(filename)#, autoclose=True)dd
if startmonth!=1 or endmonth!= 12:
#print(dummy.time.values)
dummy = dummy.sel(time = slice(dtset['time'].min(), dtset['time'].max()))#(time=slice(test['time'].min(), test['time'].max()))
else:
dummy = xr.open_dataset(filename_m)#, autoclose=True)
if len(dtset.time)!= len(dummy.time):
dummy = dummy.sel(time=slice(dtset.time.min(), dtset.time.max()))
if (var == 'pressure'):
dtset[var] = dummy[var]
dtset[var].values = dummy[var].values # .copy()
dtset[var].attrs = dummy[var].attrs
dtset[var].attrs['Pres_addj'] = True
del dummy
return dtset, True
def add_variable_info_to_model_info_csv(model_name, df_var_info, var, index_key, value):
var_mod_info_filen = '%s_variable_info.csv' % model_name
if index_key not in df_var_info.index:
df_var_info, var_mod_info_filen = add_index_model_info_csv([index_key], model_name)
if var not in df_var_info:
df_var_info[var]=np.zeros(len(df_var_info))*np.nan
df_var_info.loc[index_key, var] = value
df_var_info.to_csv(var_mod_info_filen+'_save.csv')
df_var_info.to_csv(var_mod_info_filen)
return
def open_model_info_csv(model_name):
var_mod_info_filen = '%s_variable_info.csv' % model_name
if os.path.isfile(var_mod_info_filen):
df_var_info = | pd.read_csv(var_mod_info_filen, index_col=0) | pandas.read_csv |
# being a bit too dynamic
# pylint: disable=E1101
import datetime
import warnings
import re
from math import ceil
from collections import namedtuple
from contextlib import contextmanager
from distutils.version import LooseVersion
import numpy as np
from pandas.util.decorators import cache_readonly, deprecate_kwarg
import pandas.core.common as com
from pandas.core.common import AbstractMethodError
from pandas.core.generic import _shared_docs, _shared_doc_kwargs
from pandas.core.index import Index, MultiIndex
from pandas.core.series import Series, remove_na
from pandas.tseries.index import DatetimeIndex
from pandas.tseries.period import PeriodIndex, Period
import pandas.tseries.frequencies as frequencies
from pandas.tseries.offsets import DateOffset
from pandas.compat import range, lrange, lmap, map, zip, string_types
import pandas.compat as compat
from pandas.util.decorators import Appender
try: # mpl optional
import pandas.tseries.converter as conv
conv.register() # needs to override so set_xlim works with str/number
except ImportError:
pass
# Extracted from https://gist.github.com/huyng/816622
# this is the rcParams set when setting display.with_mpl_style
# to True.
mpl_stylesheet = {
'axes.axisbelow': True,
'axes.color_cycle': ['#348ABD',
'#7A68A6',
'#A60628',
'#467821',
'#CF4457',
'#188487',
'#E24A33'],
'axes.edgecolor': '#bcbcbc',
'axes.facecolor': '#eeeeee',
'axes.grid': True,
'axes.labelcolor': '#555555',
'axes.labelsize': 'large',
'axes.linewidth': 1.0,
'axes.titlesize': 'x-large',
'figure.edgecolor': 'white',
'figure.facecolor': 'white',
'figure.figsize': (6.0, 4.0),
'figure.subplot.hspace': 0.5,
'font.family': 'monospace',
'font.monospace': ['Andale Mono',
'Nimbus Mono L',
'Courier New',
'Courier',
'Fixed',
'Terminal',
'monospace'],
'font.size': 10,
'interactive': True,
'keymap.all_axes': ['a'],
'keymap.back': ['left', 'c', 'backspace'],
'keymap.forward': ['right', 'v'],
'keymap.fullscreen': ['f'],
'keymap.grid': ['g'],
'keymap.home': ['h', 'r', 'home'],
'keymap.pan': ['p'],
'keymap.save': ['s'],
'keymap.xscale': ['L', 'k'],
'keymap.yscale': ['l'],
'keymap.zoom': ['o'],
'legend.fancybox': True,
'lines.antialiased': True,
'lines.linewidth': 1.0,
'patch.antialiased': True,
'patch.edgecolor': '#EEEEEE',
'patch.facecolor': '#348ABD',
'patch.linewidth': 0.5,
'toolbar': 'toolbar2',
'xtick.color': '#555555',
'xtick.direction': 'in',
'xtick.major.pad': 6.0,
'xtick.major.size': 0.0,
'xtick.minor.pad': 6.0,
'xtick.minor.size': 0.0,
'ytick.color': '#555555',
'ytick.direction': 'in',
'ytick.major.pad': 6.0,
'ytick.major.size': 0.0,
'ytick.minor.pad': 6.0,
'ytick.minor.size': 0.0
}
def _get_standard_kind(kind):
return {'density': 'kde'}.get(kind, kind)
def _get_standard_colors(num_colors=None, colormap=None, color_type='default',
color=None):
import matplotlib.pyplot as plt
if color is None and colormap is not None:
if isinstance(colormap, compat.string_types):
import matplotlib.cm as cm
cmap = colormap
colormap = cm.get_cmap(colormap)
if colormap is None:
raise ValueError("Colormap {0} is not recognized".format(cmap))
colors = lmap(colormap, np.linspace(0, 1, num=num_colors))
elif color is not None:
if colormap is not None:
warnings.warn("'color' and 'colormap' cannot be used "
"simultaneously. Using 'color'")
colors = color
else:
if color_type == 'default':
# need to call list() on the result to copy so we don't
# modify the global rcParams below
colors = list(plt.rcParams.get('axes.color_cycle',
list('bgrcmyk')))
if isinstance(colors, compat.string_types):
colors = list(colors)
elif color_type == 'random':
import random
def random_color(column):
random.seed(column)
return [random.random() for _ in range(3)]
colors = lmap(random_color, lrange(num_colors))
else:
raise ValueError("color_type must be either 'default' or 'random'")
if isinstance(colors, compat.string_types):
import matplotlib.colors
conv = matplotlib.colors.ColorConverter()
def _maybe_valid_colors(colors):
try:
[conv.to_rgba(c) for c in colors]
return True
except ValueError:
return False
# check whether the string can be convertable to single color
maybe_single_color = _maybe_valid_colors([colors])
# check whether each character can be convertable to colors
maybe_color_cycle = _maybe_valid_colors(list(colors))
if maybe_single_color and maybe_color_cycle and len(colors) > 1:
msg = ("'{0}' can be parsed as both single color and "
"color cycle. Specify each color using a list "
"like ['{0}'] or {1}")
raise ValueError(msg.format(colors, list(colors)))
elif maybe_single_color:
colors = [colors]
else:
# ``colors`` is regarded as color cycle.
# mpl will raise error any of them is invalid
pass
if len(colors) != num_colors:
multiple = num_colors//len(colors) - 1
mod = num_colors % len(colors)
colors += multiple * colors
colors += colors[:mod]
return colors
class _Options(dict):
"""
Stores pandas plotting options.
Allows for parameter aliasing so you can just use parameter names that are
the same as the plot function parameters, but is stored in a canonical
format that makes it easy to breakdown into groups later
"""
# alias so the names are same as plotting method parameter names
_ALIASES = {'x_compat': 'xaxis.compat'}
_DEFAULT_KEYS = ['xaxis.compat']
def __init__(self):
self['xaxis.compat'] = False
def __getitem__(self, key):
key = self._get_canonical_key(key)
if key not in self:
raise ValueError('%s is not a valid pandas plotting option' % key)
return super(_Options, self).__getitem__(key)
def __setitem__(self, key, value):
key = self._get_canonical_key(key)
return super(_Options, self).__setitem__(key, value)
def __delitem__(self, key):
key = self._get_canonical_key(key)
if key in self._DEFAULT_KEYS:
raise ValueError('Cannot remove default parameter %s' % key)
return super(_Options, self).__delitem__(key)
def __contains__(self, key):
key = self._get_canonical_key(key)
return super(_Options, self).__contains__(key)
def reset(self):
"""
Reset the option store to its initial state
Returns
-------
None
"""
self.__init__()
def _get_canonical_key(self, key):
return self._ALIASES.get(key, key)
@contextmanager
def use(self, key, value):
"""
Temporarily set a parameter value using the with statement.
Aliasing allowed.
"""
old_value = self[key]
try:
self[key] = value
yield self
finally:
self[key] = old_value
plot_params = _Options()
def scatter_matrix(frame, alpha=0.5, figsize=None, ax=None, grid=False,
diagonal='hist', marker='.', density_kwds=None,
hist_kwds=None, range_padding=0.05, **kwds):
"""
Draw a matrix of scatter plots.
Parameters
----------
frame : DataFrame
alpha : float, optional
amount of transparency applied
figsize : (float,float), optional
a tuple (width, height) in inches
ax : Matplotlib axis object, optional
grid : bool, optional
setting this to True will show the grid
diagonal : {'hist', 'kde'}
pick between 'kde' and 'hist' for
either Kernel Density Estimation or Histogram
plot in the diagonal
marker : str, optional
Matplotlib marker type, default '.'
hist_kwds : other plotting keyword arguments
To be passed to hist function
density_kwds : other plotting keyword arguments
To be passed to kernel density estimate plot
range_padding : float, optional
relative extension of axis range in x and y
with respect to (x_max - x_min) or (y_max - y_min),
default 0.05
kwds : other plotting keyword arguments
To be passed to scatter function
Examples
--------
>>> df = DataFrame(np.random.randn(1000, 4), columns=['A','B','C','D'])
>>> scatter_matrix(df, alpha=0.2)
"""
import matplotlib.pyplot as plt
from matplotlib.artist import setp
df = frame._get_numeric_data()
n = df.columns.size
naxes = n * n
fig, axes = _subplots(naxes=naxes, figsize=figsize, ax=ax,
squeeze=False)
# no gaps between subplots
fig.subplots_adjust(wspace=0, hspace=0)
mask = com.notnull(df)
marker = _get_marker_compat(marker)
hist_kwds = hist_kwds or {}
density_kwds = density_kwds or {}
# workaround because `c='b'` is hardcoded in matplotlibs scatter method
kwds.setdefault('c', plt.rcParams['patch.facecolor'])
boundaries_list = []
for a in df.columns:
values = df[a].values[mask[a].values]
rmin_, rmax_ = np.min(values), np.max(values)
rdelta_ext = (rmax_ - rmin_) * range_padding / 2.
boundaries_list.append((rmin_ - rdelta_ext, rmax_+ rdelta_ext))
for i, a in zip(lrange(n), df.columns):
for j, b in zip(lrange(n), df.columns):
ax = axes[i, j]
if i == j:
values = df[a].values[mask[a].values]
# Deal with the diagonal by drawing a histogram there.
if diagonal == 'hist':
ax.hist(values, **hist_kwds)
elif diagonal in ('kde', 'density'):
from scipy.stats import gaussian_kde
y = values
gkde = gaussian_kde(y)
ind = np.linspace(y.min(), y.max(), 1000)
ax.plot(ind, gkde.evaluate(ind), **density_kwds)
ax.set_xlim(boundaries_list[i])
else:
common = (mask[a] & mask[b]).values
ax.scatter(df[b][common], df[a][common],
marker=marker, alpha=alpha, **kwds)
ax.set_xlim(boundaries_list[j])
ax.set_ylim(boundaries_list[i])
ax.set_xlabel(b)
ax.set_ylabel(a)
if j!= 0:
ax.yaxis.set_visible(False)
if i != n-1:
ax.xaxis.set_visible(False)
if len(df.columns) > 1:
lim1 = boundaries_list[0]
locs = axes[0][1].yaxis.get_majorticklocs()
locs = locs[(lim1[0] <= locs) & (locs <= lim1[1])]
adj = (locs - lim1[0]) / (lim1[1] - lim1[0])
lim0 = axes[0][0].get_ylim()
adj = adj * (lim0[1] - lim0[0]) + lim0[0]
axes[0][0].yaxis.set_ticks(adj)
if np.all(locs == locs.astype(int)):
# if all ticks are int
locs = locs.astype(int)
axes[0][0].yaxis.set_ticklabels(locs)
_set_ticks_props(axes, xlabelsize=8, xrot=90, ylabelsize=8, yrot=0)
return axes
def _gca():
import matplotlib.pyplot as plt
return plt.gca()
def _gcf():
import matplotlib.pyplot as plt
return plt.gcf()
def _get_marker_compat(marker):
import matplotlib.lines as mlines
import matplotlib as mpl
if mpl.__version__ < '1.1.0' and marker == '.':
return 'o'
if marker not in mlines.lineMarkers:
return 'o'
return marker
def radviz(frame, class_column, ax=None, color=None, colormap=None, **kwds):
"""RadViz - a multivariate data visualization algorithm
Parameters:
-----------
frame: DataFrame
class_column: str
Column name containing class names
ax: Matplotlib axis object, optional
color: list or tuple, optional
Colors to use for the different classes
colormap : str or matplotlib colormap object, default None
Colormap to select colors from. If string, load colormap with that name
from matplotlib.
kwds: keywords
Options to pass to matplotlib scatter plotting method
Returns:
--------
ax: Matplotlib axis object
"""
import matplotlib.pyplot as plt
import matplotlib.patches as patches
def normalize(series):
a = min(series)
b = max(series)
return (series - a) / (b - a)
n = len(frame)
classes = frame[class_column].drop_duplicates()
class_col = frame[class_column]
df = frame.drop(class_column, axis=1).apply(normalize)
if ax is None:
ax = plt.gca(xlim=[-1, 1], ylim=[-1, 1])
to_plot = {}
colors = _get_standard_colors(num_colors=len(classes), colormap=colormap,
color_type='random', color=color)
for kls in classes:
to_plot[kls] = [[], []]
m = len(frame.columns) - 1
s = np.array([(np.cos(t), np.sin(t))
for t in [2.0 * np.pi * (i / float(m))
for i in range(m)]])
for i in range(n):
row = df.iloc[i].values
row_ = np.repeat(np.expand_dims(row, axis=1), 2, axis=1)
y = (s * row_).sum(axis=0) / row.sum()
kls = class_col.iat[i]
to_plot[kls][0].append(y[0])
to_plot[kls][1].append(y[1])
for i, kls in enumerate(classes):
ax.scatter(to_plot[kls][0], to_plot[kls][1], color=colors[i],
label=com.pprint_thing(kls), **kwds)
ax.legend()
ax.add_patch(patches.Circle((0.0, 0.0), radius=1.0, facecolor='none'))
for xy, name in zip(s, df.columns):
ax.add_patch(patches.Circle(xy, radius=0.025, facecolor='gray'))
if xy[0] < 0.0 and xy[1] < 0.0:
ax.text(xy[0] - 0.025, xy[1] - 0.025, name,
ha='right', va='top', size='small')
elif xy[0] < 0.0 and xy[1] >= 0.0:
ax.text(xy[0] - 0.025, xy[1] + 0.025, name,
ha='right', va='bottom', size='small')
elif xy[0] >= 0.0 and xy[1] < 0.0:
ax.text(xy[0] + 0.025, xy[1] - 0.025, name,
ha='left', va='top', size='small')
elif xy[0] >= 0.0 and xy[1] >= 0.0:
ax.text(xy[0] + 0.025, xy[1] + 0.025, name,
ha='left', va='bottom', size='small')
ax.axis('equal')
return ax
@deprecate_kwarg(old_arg_name='data', new_arg_name='frame')
def andrews_curves(frame, class_column, ax=None, samples=200, color=None,
colormap=None, **kwds):
"""
Parameters:
-----------
frame : DataFrame
Data to be plotted, preferably normalized to (0.0, 1.0)
class_column : Name of the column containing class names
ax : matplotlib axes object, default None
samples : Number of points to plot in each curve
color: list or tuple, optional
Colors to use for the different classes
colormap : str or matplotlib colormap object, default None
Colormap to select colors from. If string, load colormap with that name
from matplotlib.
kwds: keywords
Options to pass to matplotlib plotting method
Returns:
--------
ax: Matplotlib axis object
"""
from math import sqrt, pi, sin, cos
import matplotlib.pyplot as plt
def function(amplitudes):
def f(x):
x1 = amplitudes[0]
result = x1 / sqrt(2.0)
harmonic = 1.0
for x_even, x_odd in zip(amplitudes[1::2], amplitudes[2::2]):
result += (x_even * sin(harmonic * x) +
x_odd * cos(harmonic * x))
harmonic += 1.0
if len(amplitudes) % 2 != 0:
result += amplitudes[-1] * sin(harmonic * x)
return result
return f
n = len(frame)
class_col = frame[class_column]
classes = frame[class_column].drop_duplicates()
df = frame.drop(class_column, axis=1)
x = [-pi + 2.0 * pi * (t / float(samples)) for t in range(samples)]
used_legends = set([])
color_values = _get_standard_colors(num_colors=len(classes),
colormap=colormap, color_type='random',
color=color)
colors = dict(zip(classes, color_values))
if ax is None:
ax = plt.gca(xlim=(-pi, pi))
for i in range(n):
row = df.iloc[i].values
f = function(row)
y = [f(t) for t in x]
kls = class_col.iat[i]
label = com.pprint_thing(kls)
if label not in used_legends:
used_legends.add(label)
ax.plot(x, y, color=colors[kls], label=label, **kwds)
else:
ax.plot(x, y, color=colors[kls], **kwds)
ax.legend(loc='upper right')
ax.grid()
return ax
def bootstrap_plot(series, fig=None, size=50, samples=500, **kwds):
"""Bootstrap plot.
Parameters:
-----------
series: Time series
fig: matplotlib figure object, optional
size: number of data points to consider during each sampling
samples: number of times the bootstrap procedure is performed
kwds: optional keyword arguments for plotting commands, must be accepted
by both hist and plot
Returns:
--------
fig: matplotlib figure
"""
import random
import matplotlib.pyplot as plt
# random.sample(ndarray, int) fails on python 3.3, sigh
data = list(series.values)
samplings = [random.sample(data, size) for _ in range(samples)]
means = np.array([np.mean(sampling) for sampling in samplings])
medians = np.array([np.median(sampling) for sampling in samplings])
midranges = np.array([(min(sampling) + max(sampling)) * 0.5
for sampling in samplings])
if fig is None:
fig = plt.figure()
x = lrange(samples)
axes = []
ax1 = fig.add_subplot(2, 3, 1)
ax1.set_xlabel("Sample")
axes.append(ax1)
ax1.plot(x, means, **kwds)
ax2 = fig.add_subplot(2, 3, 2)
ax2.set_xlabel("Sample")
axes.append(ax2)
ax2.plot(x, medians, **kwds)
ax3 = fig.add_subplot(2, 3, 3)
ax3.set_xlabel("Sample")
axes.append(ax3)
ax3.plot(x, midranges, **kwds)
ax4 = fig.add_subplot(2, 3, 4)
ax4.set_xlabel("Mean")
axes.append(ax4)
ax4.hist(means, **kwds)
ax5 = fig.add_subplot(2, 3, 5)
ax5.set_xlabel("Median")
axes.append(ax5)
ax5.hist(medians, **kwds)
ax6 = fig.add_subplot(2, 3, 6)
ax6.set_xlabel("Midrange")
axes.append(ax6)
ax6.hist(midranges, **kwds)
for axis in axes:
plt.setp(axis.get_xticklabels(), fontsize=8)
plt.setp(axis.get_yticklabels(), fontsize=8)
return fig
@deprecate_kwarg(old_arg_name='colors', new_arg_name='color')
@deprecate_kwarg(old_arg_name='data', new_arg_name='frame')
def parallel_coordinates(frame, class_column, cols=None, ax=None, color=None,
use_columns=False, xticks=None, colormap=None,
axvlines=True, **kwds):
"""Parallel coordinates plotting.
Parameters
----------
frame: DataFrame
class_column: str
Column name containing class names
cols: list, optional
A list of column names to use
ax: matplotlib.axis, optional
matplotlib axis object
color: list or tuple, optional
Colors to use for the different classes
use_columns: bool, optional
If true, columns will be used as xticks
xticks: list or tuple, optional
A list of values to use for xticks
colormap: str or matplotlib colormap, default None
Colormap to use for line colors.
axvlines: bool, optional
If true, vertical lines will be added at each xtick
kwds: keywords
Options to pass to matplotlib plotting method
Returns
-------
ax: matplotlib axis object
Examples
--------
>>> from pandas import read_csv
>>> from pandas.tools.plotting import parallel_coordinates
>>> from matplotlib import pyplot as plt
>>> df = read_csv('https://raw.github.com/pydata/pandas/master/pandas/tests/data/iris.csv')
>>> parallel_coordinates(df, 'Name', color=('#556270', '#4ECDC4', '#C7F464'))
>>> plt.show()
"""
import matplotlib.pyplot as plt
n = len(frame)
classes = frame[class_column].drop_duplicates()
class_col = frame[class_column]
if cols is None:
df = frame.drop(class_column, axis=1)
else:
df = frame[cols]
used_legends = set([])
ncols = len(df.columns)
# determine values to use for xticks
if use_columns is True:
if not np.all(np.isreal(list(df.columns))):
raise ValueError('Columns must be numeric to be used as xticks')
x = df.columns
elif xticks is not None:
if not np.all(np.isreal(xticks)):
raise ValueError('xticks specified must be numeric')
elif len(xticks) != ncols:
raise ValueError('Length of xticks must match number of columns')
x = xticks
else:
x = lrange(ncols)
if ax is None:
ax = plt.gca()
color_values = _get_standard_colors(num_colors=len(classes),
colormap=colormap, color_type='random',
color=color)
colors = dict(zip(classes, color_values))
for i in range(n):
y = df.iloc[i].values
kls = class_col.iat[i]
label = com.pprint_thing(kls)
if label not in used_legends:
used_legends.add(label)
ax.plot(x, y, color=colors[kls], label=label, **kwds)
else:
ax.plot(x, y, color=colors[kls], **kwds)
if axvlines:
for i in x:
ax.axvline(i, linewidth=1, color='black')
ax.set_xticks(x)
ax.set_xticklabels(df.columns)
ax.set_xlim(x[0], x[-1])
ax.legend(loc='upper right')
ax.grid()
return ax
def lag_plot(series, lag=1, ax=None, **kwds):
"""Lag plot for time series.
Parameters:
-----------
series: Time series
lag: lag of the scatter plot, default 1
ax: Matplotlib axis object, optional
kwds: Matplotlib scatter method keyword arguments, optional
Returns:
--------
ax: Matplotlib axis object
"""
import matplotlib.pyplot as plt
# workaround because `c='b'` is hardcoded in matplotlibs scatter method
kwds.setdefault('c', plt.rcParams['patch.facecolor'])
data = series.values
y1 = data[:-lag]
y2 = data[lag:]
if ax is None:
ax = plt.gca()
ax.set_xlabel("y(t)")
ax.set_ylabel("y(t + %s)" % lag)
ax.scatter(y1, y2, **kwds)
return ax
def autocorrelation_plot(series, ax=None, **kwds):
"""Autocorrelation plot for time series.
Parameters:
-----------
series: Time series
ax: Matplotlib axis object, optional
kwds : keywords
Options to pass to matplotlib plotting method
Returns:
-----------
ax: Matplotlib axis object
"""
import matplotlib.pyplot as plt
n = len(series)
data = np.asarray(series)
if ax is None:
ax = plt.gca(xlim=(1, n), ylim=(-1.0, 1.0))
mean = np.mean(data)
c0 = np.sum((data - mean) ** 2) / float(n)
def r(h):
return ((data[:n - h] - mean) * (data[h:] - mean)).sum() / float(n) / c0
x = np.arange(n) + 1
y = lmap(r, x)
z95 = 1.959963984540054
z99 = 2.5758293035489004
ax.axhline(y=z99 / np.sqrt(n), linestyle='--', color='grey')
ax.axhline(y=z95 / np.sqrt(n), color='grey')
ax.axhline(y=0.0, color='black')
ax.axhline(y=-z95 / np.sqrt(n), color='grey')
ax.axhline(y=-z99 / np.sqrt(n), linestyle='--', color='grey')
ax.set_xlabel("Lag")
ax.set_ylabel("Autocorrelation")
ax.plot(x, y, **kwds)
if 'label' in kwds:
ax.legend()
ax.grid()
return ax
class MPLPlot(object):
"""
Base class for assembling a pandas plot using matplotlib
Parameters
----------
data :
"""
_layout_type = 'vertical'
_default_rot = 0
orientation = None
_pop_attributes = ['label', 'style', 'logy', 'logx', 'loglog',
'mark_right', 'stacked']
_attr_defaults = {'logy': False, 'logx': False, 'loglog': False,
'mark_right': True, 'stacked': False}
def __init__(self, data, kind=None, by=None, subplots=False, sharex=None,
sharey=False, use_index=True,
figsize=None, grid=None, legend=True, rot=None,
ax=None, fig=None, title=None, xlim=None, ylim=None,
xticks=None, yticks=None,
sort_columns=False, fontsize=None,
secondary_y=False, colormap=None,
table=False, layout=None, **kwds):
self.data = data
self.by = by
self.kind = kind
self.sort_columns = sort_columns
self.subplots = subplots
if sharex is None:
if ax is None:
self.sharex = True
else:
# if we get an axis, the users should do the visibility setting...
self.sharex = False
else:
self.sharex = sharex
self.sharey = sharey
self.figsize = figsize
self.layout = layout
self.xticks = xticks
self.yticks = yticks
self.xlim = xlim
self.ylim = ylim
self.title = title
self.use_index = use_index
self.fontsize = fontsize
if rot is not None:
self.rot = rot
# need to know for format_date_labels since it's rotated to 30 by
# default
self._rot_set = True
else:
self._rot_set = False
if isinstance(self._default_rot, dict):
self.rot = self._default_rot[self.kind]
else:
self.rot = self._default_rot
if grid is None:
grid = False if secondary_y else self.plt.rcParams['axes.grid']
self.grid = grid
self.legend = legend
self.legend_handles = []
self.legend_labels = []
for attr in self._pop_attributes:
value = kwds.pop(attr, self._attr_defaults.get(attr, None))
setattr(self, attr, value)
self.ax = ax
self.fig = fig
self.axes = None
# parse errorbar input if given
xerr = kwds.pop('xerr', None)
yerr = kwds.pop('yerr', None)
self.errors = {}
for kw, err in zip(['xerr', 'yerr'], [xerr, yerr]):
self.errors[kw] = self._parse_errorbars(kw, err)
if not isinstance(secondary_y, (bool, tuple, list, np.ndarray, Index)):
secondary_y = [secondary_y]
self.secondary_y = secondary_y
# ugly TypeError if user passes matplotlib's `cmap` name.
# Probably better to accept either.
if 'cmap' in kwds and colormap:
raise TypeError("Only specify one of `cmap` and `colormap`.")
elif 'cmap' in kwds:
self.colormap = kwds.pop('cmap')
else:
self.colormap = colormap
self.table = table
self.kwds = kwds
self._validate_color_args()
def _validate_color_args(self):
if 'color' not in self.kwds and 'colors' in self.kwds:
warnings.warn(("'colors' is being deprecated. Please use 'color'"
"instead of 'colors'"))
colors = self.kwds.pop('colors')
self.kwds['color'] = colors
if ('color' in self.kwds and self.nseries == 1):
# support series.plot(color='green')
self.kwds['color'] = [self.kwds['color']]
if ('color' in self.kwds or 'colors' in self.kwds) and \
self.colormap is not None:
warnings.warn("'color' and 'colormap' cannot be used "
"simultaneously. Using 'color'")
if 'color' in self.kwds and self.style is not None:
if com.is_list_like(self.style):
styles = self.style
else:
styles = [self.style]
# need only a single match
for s in styles:
if re.match('^[a-z]+?', s) is not None:
raise ValueError("Cannot pass 'style' string with a color "
"symbol and 'color' keyword argument. Please"
" use one or the other or pass 'style' "
"without a color symbol")
def _iter_data(self, data=None, keep_index=False, fillna=None):
if data is None:
data = self.data
if fillna is not None:
data = data.fillna(fillna)
if self.sort_columns:
columns = com._try_sort(data.columns)
else:
columns = data.columns
for col in columns:
if keep_index is True:
yield col, data[col]
else:
yield col, data[col].values
@property
def nseries(self):
if self.data.ndim == 1:
return 1
else:
return self.data.shape[1]
def draw(self):
self.plt.draw_if_interactive()
def generate(self):
self._args_adjust()
self._compute_plot_data()
self._setup_subplots()
self._make_plot()
self._add_table()
self._make_legend()
self._post_plot_logic()
self._adorn_subplots()
def _args_adjust(self):
pass
def _has_plotted_object(self, ax):
"""check whether ax has data"""
return (len(ax.lines) != 0 or
len(ax.artists) != 0 or
len(ax.containers) != 0)
def _maybe_right_yaxis(self, ax, axes_num):
if not self.on_right(axes_num):
# secondary axes may be passed via ax kw
return self._get_ax_layer(ax)
if hasattr(ax, 'right_ax'):
# if it has right_ax proparty, ``ax`` must be left axes
return ax.right_ax
elif hasattr(ax, 'left_ax'):
# if it has left_ax proparty, ``ax`` must be right axes
return ax
else:
# otherwise, create twin axes
orig_ax, new_ax = ax, ax.twinx()
new_ax._get_lines.color_cycle = orig_ax._get_lines.color_cycle
orig_ax.right_ax, new_ax.left_ax = new_ax, orig_ax
if not self._has_plotted_object(orig_ax): # no data on left y
orig_ax.get_yaxis().set_visible(False)
return new_ax
def _setup_subplots(self):
if self.subplots:
fig, axes = _subplots(naxes=self.nseries,
sharex=self.sharex, sharey=self.sharey,
figsize=self.figsize, ax=self.ax,
layout=self.layout,
layout_type=self._layout_type)
else:
if self.ax is None:
fig = self.plt.figure(figsize=self.figsize)
axes = fig.add_subplot(111)
else:
fig = self.ax.get_figure()
if self.figsize is not None:
fig.set_size_inches(self.figsize)
axes = self.ax
axes = _flatten(axes)
if self.logx or self.loglog:
[a.set_xscale('log') for a in axes]
if self.logy or self.loglog:
[a.set_yscale('log') for a in axes]
self.fig = fig
self.axes = axes
@property
def result(self):
"""
Return result axes
"""
if self.subplots:
if self.layout is not None and not com.is_list_like(self.ax):
return self.axes.reshape(*self.layout)
else:
return self.axes
else:
sec_true = isinstance(self.secondary_y, bool) and self.secondary_y
all_sec = (com.is_list_like(self.secondary_y) and
len(self.secondary_y) == self.nseries)
if (sec_true or all_sec):
# if all data is plotted on secondary, return right axes
return self._get_ax_layer(self.axes[0], primary=False)
else:
return self.axes[0]
def _compute_plot_data(self):
data = self.data
if isinstance(data, Series):
label = self.label
if label is None and data.name is None:
label = 'None'
data = data.to_frame(name=label)
numeric_data = data.convert_objects()._get_numeric_data()
try:
is_empty = numeric_data.empty
except AttributeError:
is_empty = not len(numeric_data)
# no empty frames or series allowed
if is_empty:
raise TypeError('Empty {0!r}: no numeric data to '
'plot'.format(numeric_data.__class__.__name__))
self.data = numeric_data
def _make_plot(self):
raise AbstractMethodError(self)
def _add_table(self):
if self.table is False:
return
elif self.table is True:
data = self.data.transpose()
else:
data = self.table
ax = self._get_ax(0)
table(ax, data)
def _post_plot_logic(self):
pass
def _adorn_subplots(self):
to_adorn = self.axes
if len(self.axes) > 0:
all_axes = self._get_axes()
nrows, ncols = self._get_axes_layout()
_handle_shared_axes(axarr=all_axes, nplots=len(all_axes),
naxes=nrows * ncols, nrows=nrows,
ncols=ncols, sharex=self.sharex,
sharey=self.sharey)
for ax in to_adorn:
if self.yticks is not None:
ax.set_yticks(self.yticks)
if self.xticks is not None:
ax.set_xticks(self.xticks)
if self.ylim is not None:
ax.set_ylim(self.ylim)
if self.xlim is not None:
ax.set_xlim(self.xlim)
ax.grid(self.grid)
if self.title:
if self.subplots:
self.fig.suptitle(self.title)
else:
self.axes[0].set_title(self.title)
labels = [com.pprint_thing(key) for key in self.data.index]
labels = dict(zip(range(len(self.data.index)), labels))
for ax in self.axes:
if self.orientation == 'vertical' or self.orientation is None:
if self._need_to_set_index:
xticklabels = [labels.get(x, '') for x in ax.get_xticks()]
ax.set_xticklabels(xticklabels)
self._apply_axis_properties(ax.xaxis, rot=self.rot,
fontsize=self.fontsize)
self._apply_axis_properties(ax.yaxis, fontsize=self.fontsize)
elif self.orientation == 'horizontal':
if self._need_to_set_index:
yticklabels = [labels.get(y, '') for y in ax.get_yticks()]
ax.set_yticklabels(yticklabels)
self._apply_axis_properties(ax.yaxis, rot=self.rot,
fontsize=self.fontsize)
self._apply_axis_properties(ax.xaxis, fontsize=self.fontsize)
def _apply_axis_properties(self, axis, rot=None, fontsize=None):
labels = axis.get_majorticklabels() + axis.get_minorticklabels()
for label in labels:
if rot is not None:
label.set_rotation(rot)
if fontsize is not None:
label.set_fontsize(fontsize)
@property
def legend_title(self):
if not isinstance(self.data.columns, MultiIndex):
name = self.data.columns.name
if name is not None:
name = com.pprint_thing(name)
return name
else:
stringified = map(com.pprint_thing,
self.data.columns.names)
return ','.join(stringified)
def _add_legend_handle(self, handle, label, index=None):
if not label is None:
if self.mark_right and index is not None:
if self.on_right(index):
label = label + ' (right)'
self.legend_handles.append(handle)
self.legend_labels.append(label)
def _make_legend(self):
ax, leg = self._get_ax_legend(self.axes[0])
handles = []
labels = []
title = ''
if not self.subplots:
if not leg is None:
title = leg.get_title().get_text()
handles = leg.legendHandles
labels = [x.get_text() for x in leg.get_texts()]
if self.legend:
if self.legend == 'reverse':
self.legend_handles = reversed(self.legend_handles)
self.legend_labels = reversed(self.legend_labels)
handles += self.legend_handles
labels += self.legend_labels
if not self.legend_title is None:
title = self.legend_title
if len(handles) > 0:
ax.legend(handles, labels, loc='best', title=title)
elif self.subplots and self.legend:
for ax in self.axes:
if ax.get_visible():
ax.legend(loc='best')
def _get_ax_legend(self, ax):
leg = ax.get_legend()
other_ax = (getattr(ax, 'left_ax', None) or
getattr(ax, 'right_ax', None))
other_leg = None
if other_ax is not None:
other_leg = other_ax.get_legend()
if leg is None and other_leg is not None:
leg = other_leg
ax = other_ax
return ax, leg
@cache_readonly
def plt(self):
import matplotlib.pyplot as plt
return plt
_need_to_set_index = False
def _get_xticks(self, convert_period=False):
index = self.data.index
is_datetype = index.inferred_type in ('datetime', 'date',
'datetime64', 'time')
if self.use_index:
if convert_period and isinstance(index, PeriodIndex):
self.data = self.data.reindex(index=index.order())
x = self.data.index.to_timestamp()._mpl_repr()
elif index.is_numeric():
"""
Matplotlib supports numeric values or datetime objects as
xaxis values. Taking LBYL approach here, by the time
matplotlib raises exception when using non numeric/datetime
values for xaxis, several actions are already taken by plt.
"""
x = index._mpl_repr()
elif is_datetype:
self.data = self.data.sort_index()
x = self.data.index._mpl_repr()
else:
self._need_to_set_index = True
x = lrange(len(index))
else:
x = lrange(len(index))
return x
def _is_datetype(self):
index = self.data.index
return (isinstance(index, (PeriodIndex, DatetimeIndex)) or
index.inferred_type in ('datetime', 'date', 'datetime64',
'time'))
def _get_plot_function(self):
'''
Returns the matplotlib plotting function (plot or errorbar) based on
the presence of errorbar keywords.
'''
errorbar = any(e is not None for e in self.errors.values())
def plotf(ax, x, y, style=None, **kwds):
mask = com.isnull(y)
if mask.any():
y = np.ma.array(y)
y = np.ma.masked_where(mask, y)
if errorbar:
return self.plt.Axes.errorbar(ax, x, y, **kwds)
else:
# prevent style kwarg from going to errorbar, where it is unsupported
if style is not None:
args = (ax, x, y, style)
else:
args = (ax, x, y)
return self.plt.Axes.plot(*args, **kwds)
return plotf
def _get_index_name(self):
if isinstance(self.data.index, MultiIndex):
name = self.data.index.names
if any(x is not None for x in name):
name = ','.join([com.pprint_thing(x) for x in name])
else:
name = None
else:
name = self.data.index.name
if name is not None:
name = com.pprint_thing(name)
return name
@classmethod
def _get_ax_layer(cls, ax, primary=True):
"""get left (primary) or right (secondary) axes"""
if primary:
return getattr(ax, 'left_ax', ax)
else:
return getattr(ax, 'right_ax', ax)
def _get_ax(self, i):
# get the twinx ax if appropriate
if self.subplots:
ax = self.axes[i]
ax = self._maybe_right_yaxis(ax, i)
self.axes[i] = ax
else:
ax = self.axes[0]
ax = self._maybe_right_yaxis(ax, i)
ax.get_yaxis().set_visible(True)
return ax
def on_right(self, i):
if isinstance(self.secondary_y, bool):
return self.secondary_y
if isinstance(self.secondary_y, (tuple, list, np.ndarray, Index)):
return self.data.columns[i] in self.secondary_y
def _get_style(self, i, col_name):
style = ''
if self.subplots:
style = 'k'
if self.style is not None:
if isinstance(self.style, list):
try:
style = self.style[i]
except IndexError:
pass
elif isinstance(self.style, dict):
style = self.style.get(col_name, style)
else:
style = self.style
return style or None
def _get_colors(self, num_colors=None, color_kwds='color'):
if num_colors is None:
num_colors = self.nseries
return _get_standard_colors(num_colors=num_colors,
colormap=self.colormap,
color=self.kwds.get(color_kwds))
def _maybe_add_color(self, colors, kwds, style, i):
has_color = 'color' in kwds or self.colormap is not None
if has_color and (style is None or re.match('[a-z]+', style) is None):
kwds['color'] = colors[i % len(colors)]
def _parse_errorbars(self, label, err):
'''
Look for error keyword arguments and return the actual errorbar data
or return the error DataFrame/dict
Error bars can be specified in several ways:
Series: the user provides a pandas.Series object of the same
length as the data
ndarray: provides a np.ndarray of the same length as the data
DataFrame/dict: error values are paired with keys matching the
key in the plotted DataFrame
str: the name of the column within the plotted DataFrame
'''
if err is None:
return None
from pandas import DataFrame, Series
def match_labels(data, e):
e = e.reindex_axis(data.index)
return e
# key-matched DataFrame
if isinstance(err, DataFrame):
err = match_labels(self.data, err)
# key-matched dict
elif isinstance(err, dict):
pass
# Series of error values
elif isinstance(err, Series):
# broadcast error series across data
err = match_labels(self.data, err)
err = np.atleast_2d(err)
err = np.tile(err, (self.nseries, 1))
# errors are a column in the dataframe
elif isinstance(err, string_types):
evalues = self.data[err].values
self.data = self.data[self.data.columns.drop(err)]
err = np.atleast_2d(evalues)
err = np.tile(err, (self.nseries, 1))
elif com.is_list_like(err):
if com.is_iterator(err):
err = np.atleast_2d(list(err))
else:
# raw error values
err = np.atleast_2d(err)
err_shape = err.shape
# asymmetrical error bars
if err.ndim == 3:
if (err_shape[0] != self.nseries) or \
(err_shape[1] != 2) or \
(err_shape[2] != len(self.data)):
msg = "Asymmetrical error bars should be provided " + \
"with the shape (%u, 2, %u)" % \
(self.nseries, len(self.data))
raise ValueError(msg)
# broadcast errors to each data series
if len(err) == 1:
err = np.tile(err, (self.nseries, 1))
elif com.is_number(err):
err = np.tile([err], (self.nseries, len(self.data)))
else:
msg = "No valid %s detected" % label
raise ValueError(msg)
return err
def _get_errorbars(self, label=None, index=None, xerr=True, yerr=True):
from pandas import DataFrame
errors = {}
for kw, flag in zip(['xerr', 'yerr'], [xerr, yerr]):
if flag:
err = self.errors[kw]
# user provided label-matched dataframe of errors
if isinstance(err, (DataFrame, dict)):
if label is not None and label in err.keys():
err = err[label]
else:
err = None
elif index is not None and err is not None:
err = err[index]
if err is not None:
errors[kw] = err
return errors
def _get_axes(self):
return self.axes[0].get_figure().get_axes()
def _get_axes_layout(self):
axes = self._get_axes()
x_set = set()
y_set = set()
for ax in axes:
# check axes coordinates to estimate layout
points = ax.get_position().get_points()
x_set.add(points[0][0])
y_set.add(points[0][1])
return (len(y_set), len(x_set))
class ScatterPlot(MPLPlot):
_layout_type = 'single'
def __init__(self, data, x, y, c=None, **kwargs):
MPLPlot.__init__(self, data, **kwargs)
if x is None or y is None:
raise ValueError( 'scatter requires and x and y column')
if com.is_integer(x) and not self.data.columns.holds_integer():
x = self.data.columns[x]
if com.is_integer(y) and not self.data.columns.holds_integer():
y = self.data.columns[y]
if com.is_integer(c) and not self.data.columns.holds_integer():
c = self.data.columns[c]
self.x = x
self.y = y
self.c = c
@property
def nseries(self):
return 1
def _make_plot(self):
import matplotlib as mpl
mpl_ge_1_3_1 = str(mpl.__version__) >= LooseVersion('1.3.1')
import matplotlib.pyplot as plt
x, y, c, data = self.x, self.y, self.c, self.data
ax = self.axes[0]
c_is_column = com.is_hashable(c) and c in self.data.columns
# plot a colorbar only if a colormap is provided or necessary
cb = self.kwds.pop('colorbar', self.colormap or c_is_column)
# pandas uses colormap, matplotlib uses cmap.
cmap = self.colormap or 'Greys'
cmap = plt.cm.get_cmap(cmap)
if c is None:
c_values = self.plt.rcParams['patch.facecolor']
elif c_is_column:
c_values = self.data[c].values
else:
c_values = c
if self.legend and hasattr(self, 'label'):
label = self.label
else:
label = None
scatter = ax.scatter(data[x].values, data[y].values, c=c_values,
label=label, cmap=cmap, **self.kwds)
if cb:
img = ax.collections[0]
kws = dict(ax=ax)
if mpl_ge_1_3_1:
kws['label'] = c if c_is_column else ''
self.fig.colorbar(img, **kws)
if label is not None:
self._add_legend_handle(scatter, label)
else:
self.legend = False
errors_x = self._get_errorbars(label=x, index=0, yerr=False)
errors_y = self._get_errorbars(label=y, index=0, xerr=False)
if len(errors_x) > 0 or len(errors_y) > 0:
err_kwds = dict(errors_x, **errors_y)
err_kwds['ecolor'] = scatter.get_facecolor()[0]
ax.errorbar(data[x].values, data[y].values, linestyle='none', **err_kwds)
def _post_plot_logic(self):
ax = self.axes[0]
x, y = self.x, self.y
ax.set_ylabel(com.pprint_thing(y))
ax.set_xlabel(com.pprint_thing(x))
class HexBinPlot(MPLPlot):
_layout_type = 'single'
def __init__(self, data, x, y, C=None, **kwargs):
MPLPlot.__init__(self, data, **kwargs)
if x is None or y is None:
raise ValueError('hexbin requires and x and y column')
if com.is_integer(x) and not self.data.columns.holds_integer():
x = self.data.columns[x]
if com.is_integer(y) and not self.data.columns.holds_integer():
y = self.data.columns[y]
if com.is_integer(C) and not self.data.columns.holds_integer():
C = self.data.columns[C]
self.x = x
self.y = y
self.C = C
@property
def nseries(self):
return 1
def _make_plot(self):
import matplotlib.pyplot as plt
x, y, data, C = self.x, self.y, self.data, self.C
ax = self.axes[0]
# pandas uses colormap, matplotlib uses cmap.
cmap = self.colormap or 'BuGn'
cmap = plt.cm.get_cmap(cmap)
cb = self.kwds.pop('colorbar', True)
if C is None:
c_values = None
else:
c_values = data[C].values
ax.hexbin(data[x].values, data[y].values, C=c_values, cmap=cmap,
**self.kwds)
if cb:
img = ax.collections[0]
self.fig.colorbar(img, ax=ax)
def _make_legend(self):
pass
def _post_plot_logic(self):
ax = self.axes[0]
x, y = self.x, self.y
ax.set_ylabel(com.pprint_thing(y))
ax.set_xlabel(com.pprint_thing(x))
class LinePlot(MPLPlot):
_default_rot = 0
orientation = 'vertical'
def __init__(self, data, **kwargs):
MPLPlot.__init__(self, data, **kwargs)
if self.stacked:
self.data = self.data.fillna(value=0)
self.x_compat = plot_params['x_compat']
if 'x_compat' in self.kwds:
self.x_compat = bool(self.kwds.pop('x_compat'))
def _index_freq(self):
freq = getattr(self.data.index, 'freq', None)
if freq is None:
freq = getattr(self.data.index, 'inferred_freq', None)
if freq == 'B':
weekdays = np.unique(self.data.index.dayofweek)
if (5 in weekdays) or (6 in weekdays):
freq = None
return freq
def _is_dynamic_freq(self, freq):
if isinstance(freq, DateOffset):
freq = freq.rule_code
else:
freq = frequencies.get_base_alias(freq)
freq = frequencies.get_period_alias(freq)
return freq is not None and self._no_base(freq)
def _no_base(self, freq):
# hack this for 0.10.1, creating more technical debt...sigh
if isinstance(self.data.index, DatetimeIndex):
base = frequencies.get_freq(freq)
x = self.data.index
if (base <= frequencies.FreqGroup.FR_DAY):
return x[:1].is_normalized
return Period(x[0], freq).to_timestamp(tz=x.tz) == x[0]
return True
def _use_dynamic_x(self):
freq = self._index_freq()
ax = self._get_ax(0)
ax_freq = getattr(ax, 'freq', None)
if freq is None: # convert irregular if axes has freq info
freq = ax_freq
else: # do not use tsplot if irregular was plotted first
if (ax_freq is None) and (len(ax.get_lines()) > 0):
return False
return (freq is not None) and self._is_dynamic_freq(freq)
def _is_ts_plot(self):
# this is slightly deceptive
return not self.x_compat and self.use_index and self._use_dynamic_x()
def _make_plot(self):
self._initialize_prior(len(self.data))
if self._is_ts_plot():
data = self._maybe_convert_index(self.data)
x = data.index # dummy, not used
plotf = self._get_ts_plot_function()
it = self._iter_data(data=data, keep_index=True)
else:
x = self._get_xticks(convert_period=True)
plotf = self._get_plot_function()
it = self._iter_data()
colors = self._get_colors()
for i, (label, y) in enumerate(it):
ax = self._get_ax(i)
style = self._get_style(i, label)
kwds = self.kwds.copy()
self._maybe_add_color(colors, kwds, style, i)
errors = self._get_errorbars(label=label, index=i)
kwds = dict(kwds, **errors)
label = com.pprint_thing(label) # .encode('utf-8')
kwds['label'] = label
newlines = plotf(ax, x, y, style=style, column_num=i, **kwds)
self._add_legend_handle(newlines[0], label, index=i)
lines = _get_all_lines(ax)
left, right = _get_xlim(lines)
ax.set_xlim(left, right)
def _get_stacked_values(self, y, label):
if self.stacked:
if (y >= 0).all():
return self._pos_prior + y
elif (y <= 0).all():
return self._neg_prior + y
else:
raise ValueError('When stacked is True, each column must be either all positive or negative.'
'{0} contains both positive and negative values'.format(label))
else:
return y
def _get_plot_function(self):
f = MPLPlot._get_plot_function(self)
def plotf(ax, x, y, style=None, column_num=None, **kwds):
# column_num is used to get the target column from protf in line and area plots
if column_num == 0:
self._initialize_prior(len(self.data))
y_values = self._get_stacked_values(y, kwds['label'])
lines = f(ax, x, y_values, style=style, **kwds)
self._update_prior(y)
return lines
return plotf
def _get_ts_plot_function(self):
from pandas.tseries.plotting import tsplot
plotf = self._get_plot_function()
def _plot(ax, x, data, style=None, **kwds):
# accept x to be consistent with normal plot func,
# x is not passed to tsplot as it uses data.index as x coordinate
lines = tsplot(data, plotf, ax=ax, style=style, **kwds)
return lines
return _plot
def _initialize_prior(self, n):
self._pos_prior = np.zeros(n)
self._neg_prior = np.zeros(n)
def _update_prior(self, y):
if self.stacked and not self.subplots:
# tsplot resample may changedata length
if len(self._pos_prior) != len(y):
self._initialize_prior(len(y))
if (y >= 0).all():
self._pos_prior += y
elif (y <= 0).all():
self._neg_prior += y
def _maybe_convert_index(self, data):
# tsplot converts automatically, but don't want to convert index
# over and over for DataFrames
if isinstance(data.index, DatetimeIndex):
freq = getattr(data.index, 'freq', None)
if freq is None:
freq = getattr(data.index, 'inferred_freq', None)
if isinstance(freq, DateOffset):
freq = freq.rule_code
if freq is None:
ax = self._get_ax(0)
freq = getattr(ax, 'freq', None)
if freq is None:
raise ValueError('Could not get frequency alias for plotting')
freq = frequencies.get_base_alias(freq)
freq = frequencies.get_period_alias(freq)
data.index = data.index.to_period(freq=freq)
return data
def _post_plot_logic(self):
df = self.data
condition = (not self._use_dynamic_x()
and df.index.is_all_dates
and not self.subplots
or (self.subplots and self.sharex))
index_name = self._get_index_name()
for ax in self.axes:
if condition:
# irregular TS rotated 30 deg. by default
# probably a better place to check / set this.
if not self._rot_set:
self.rot = 30
format_date_labels(ax, rot=self.rot)
if index_name is not None and self.use_index:
ax.set_xlabel(index_name)
class AreaPlot(LinePlot):
def __init__(self, data, **kwargs):
kwargs.setdefault('stacked', True)
data = data.fillna(value=0)
LinePlot.__init__(self, data, **kwargs)
if not self.stacked:
# use smaller alpha to distinguish overlap
self.kwds.setdefault('alpha', 0.5)
def _get_plot_function(self):
if self.logy or self.loglog:
raise ValueError("Log-y scales are not supported in area plot")
else:
f = MPLPlot._get_plot_function(self)
def plotf(ax, x, y, style=None, column_num=None, **kwds):
if column_num == 0:
self._initialize_prior(len(self.data))
y_values = self._get_stacked_values(y, kwds['label'])
lines = f(ax, x, y_values, style=style, **kwds)
# get data from the line to get coordinates for fill_between
xdata, y_values = lines[0].get_data(orig=False)
if (y >= 0).all():
start = self._pos_prior
elif (y <= 0).all():
start = self._neg_prior
else:
start = np.zeros(len(y))
if not 'color' in kwds:
kwds['color'] = lines[0].get_color()
self.plt.Axes.fill_between(ax, xdata, start, y_values, **kwds)
self._update_prior(y)
return lines
return plotf
def _add_legend_handle(self, handle, label, index=None):
from matplotlib.patches import Rectangle
# Because fill_between isn't supported in legend,
# specifically add Rectangle handle here
alpha = self.kwds.get('alpha', None)
handle = Rectangle((0, 0), 1, 1, fc=handle.get_color(), alpha=alpha)
LinePlot._add_legend_handle(self, handle, label, index=index)
def _post_plot_logic(self):
LinePlot._post_plot_logic(self)
if self.ylim is None:
if (self.data >= 0).all().all():
for ax in self.axes:
ax.set_ylim(0, None)
elif (self.data <= 0).all().all():
for ax in self.axes:
ax.set_ylim(None, 0)
class BarPlot(MPLPlot):
_default_rot = {'bar': 90, 'barh': 0}
def __init__(self, data, **kwargs):
self.bar_width = kwargs.pop('width', 0.5)
pos = kwargs.pop('position', 0.5)
kwargs.setdefault('align', 'center')
self.tick_pos = np.arange(len(data))
self.bottom = kwargs.pop('bottom', 0)
self.left = kwargs.pop('left', 0)
self.log = kwargs.pop('log',False)
MPLPlot.__init__(self, data, **kwargs)
if self.stacked or self.subplots:
self.tickoffset = self.bar_width * pos
if kwargs['align'] == 'edge':
self.lim_offset = self.bar_width / 2
else:
self.lim_offset = 0
else:
if kwargs['align'] == 'edge':
w = self.bar_width / self.nseries
self.tickoffset = self.bar_width * (pos - 0.5) + w * 0.5
self.lim_offset = w * 0.5
else:
self.tickoffset = self.bar_width * pos
self.lim_offset = 0
self.ax_pos = self.tick_pos - self.tickoffset
def _args_adjust(self):
if com.is_list_like(self.bottom):
self.bottom = np.array(self.bottom)
if com.is_list_like(self.left):
self.left = np.array(self.left)
def _get_plot_function(self):
if self.kind == 'bar':
def f(ax, x, y, w, start=None, **kwds):
start = start + self.bottom
return ax.bar(x, y, w, bottom=start, log=self.log, **kwds)
elif self.kind == 'barh':
def f(ax, x, y, w, start=None, log=self.log, **kwds):
start = start + self.left
return ax.barh(x, y, w, left=start, log=self.log, **kwds)
else:
raise ValueError("BarPlot kind must be either 'bar' or 'barh'")
return f
def _make_plot(self):
import matplotlib as mpl
colors = self._get_colors()
ncolors = len(colors)
bar_f = self._get_plot_function()
pos_prior = neg_prior = np.zeros(len(self.data))
K = self.nseries
for i, (label, y) in enumerate(self._iter_data(fillna=0)):
ax = self._get_ax(i)
kwds = self.kwds.copy()
kwds['color'] = colors[i % ncolors]
errors = self._get_errorbars(label=label, index=i)
kwds = dict(kwds, **errors)
label = com.pprint_thing(label)
if (('yerr' in kwds) or ('xerr' in kwds)) \
and (kwds.get('ecolor') is None):
kwds['ecolor'] = mpl.rcParams['xtick.color']
start = 0
if self.log and (y >= 1).all():
start = 1
if self.subplots:
w = self.bar_width / 2
rect = bar_f(ax, self.ax_pos + w, y, self.bar_width,
start=start, label=label, **kwds)
ax.set_title(label)
elif self.stacked:
mask = y > 0
start = np.where(mask, pos_prior, neg_prior)
w = self.bar_width / 2
rect = bar_f(ax, self.ax_pos + w, y, self.bar_width,
start=start, label=label, **kwds)
pos_prior = pos_prior + np.where(mask, y, 0)
neg_prior = neg_prior + np.where(mask, 0, y)
else:
w = self.bar_width / K
rect = bar_f(ax, self.ax_pos + (i + 0.5) * w, y, w,
start=start, label=label, **kwds)
self._add_legend_handle(rect, label, index=i)
def _post_plot_logic(self):
for ax in self.axes:
if self.use_index:
str_index = [com.pprint_thing(key) for key in self.data.index]
else:
str_index = [com.pprint_thing(key) for key in
range(self.data.shape[0])]
name = self._get_index_name()
s_edge = self.ax_pos[0] - 0.25 + self.lim_offset
e_edge = self.ax_pos[-1] + 0.25 + self.bar_width + self.lim_offset
if self.kind == 'bar':
ax.set_xlim((s_edge, e_edge))
ax.set_xticks(self.tick_pos)
ax.set_xticklabels(str_index)
if name is not None and self.use_index:
ax.set_xlabel(name)
elif self.kind == 'barh':
# horizontal bars
ax.set_ylim((s_edge, e_edge))
ax.set_yticks(self.tick_pos)
ax.set_yticklabels(str_index)
if name is not None and self.use_index:
ax.set_ylabel(name)
else:
raise NotImplementedError(self.kind)
@property
def orientation(self):
if self.kind == 'bar':
return 'vertical'
elif self.kind == 'barh':
return 'horizontal'
else:
raise NotImplementedError(self.kind)
class HistPlot(LinePlot):
def __init__(self, data, bins=10, bottom=0, **kwargs):
self.bins = bins # use mpl default
self.bottom = bottom
# Do not call LinePlot.__init__ which may fill nan
MPLPlot.__init__(self, data, **kwargs)
def _args_adjust(self):
if com.is_integer(self.bins):
# create common bin edge
values = self.data.convert_objects()._get_numeric_data()
values = np.ravel(values)
values = values[~ | com.isnull(values) | pandas.core.common.isnull |
import re
from inspect import isclass
import numpy as np
import pandas as pd
import pytest
from mock import patch
import woodwork as ww
from woodwork.accessor_utils import (
_is_dask_dataframe,
_is_dask_series,
_is_koalas_dataframe,
_is_koalas_series,
init_series,
)
from woodwork.exceptions import (
ColumnNotPresentError,
IndexTagRemovedWarning,
ParametersIgnoredWarning,
TypeConversionError,
TypingInfoMismatchWarning,
WoodworkNotInitError,
)
from woodwork.logical_types import (
URL,
Address,
Age,
AgeFractional,
AgeNullable,
Boolean,
BooleanNullable,
Categorical,
CountryCode,
Datetime,
Double,
EmailAddress,
Filepath,
Integer,
IntegerNullable,
IPAddress,
LatLong,
NaturalLanguage,
Ordinal,
PersonFullName,
PhoneNumber,
PostalCode,
SubRegionCode,
Unknown,
)
from woodwork.table_accessor import (
WoodworkTableAccessor,
_check_index,
_check_logical_types,
_check_partial_schema,
_check_time_index,
_check_unique_column_names,
_check_use_standard_tags,
_infer_missing_logical_types,
)
from woodwork.table_schema import TableSchema
from woodwork.tests.testing_utils import (
is_property,
is_public_method,
to_pandas,
validate_subset_schema,
)
from woodwork.tests.testing_utils.table_utils import assert_schema_equal
from woodwork.utils import import_or_none
dd = import_or_none("dask.dataframe")
ks = import_or_none("databricks.koalas")
def test_check_index_errors(sample_df):
error_message = "Specified index column `foo` not found in dataframe"
with pytest.raises(ColumnNotPresentError, match=error_message):
_check_index(dataframe=sample_df, index="foo")
if isinstance(sample_df, pd.DataFrame):
# Does not check for index uniqueness with Dask
error_message = "Index column must be unique"
with pytest.raises(LookupError, match=error_message):
_check_index(sample_df, index="age")
def test_check_logical_types_errors(sample_df):
error_message = "logical_types must be a dictionary"
with pytest.raises(TypeError, match=error_message):
_check_logical_types(sample_df, logical_types="type")
bad_logical_types_keys = {
"full_name": None,
"age": None,
"birthday": None,
"occupation": None,
}
error_message = re.escape(
"logical_types contains columns that are not present in dataframe: ['birthday', 'occupation']"
)
with pytest.raises(ColumnNotPresentError, match=error_message):
_check_logical_types(sample_df, bad_logical_types_keys)
def test_check_time_index_errors(sample_df):
error_message = "Specified time index column `foo` not found in dataframe"
with pytest.raises(ColumnNotPresentError, match=error_message):
_check_time_index(dataframe=sample_df, time_index="foo")
def test_check_unique_column_names_errors(sample_df):
if _is_koalas_dataframe(sample_df):
pytest.skip("Koalas enforces unique column names")
duplicate_cols_df = sample_df.copy()
if _is_dask_dataframe(sample_df):
duplicate_cols_df = dd.concat(
[duplicate_cols_df, duplicate_cols_df["age"]], axis=1
)
else:
duplicate_cols_df.insert(0, "age", [18, 21, 65, 43], allow_duplicates=True)
with pytest.raises(
IndexError, match="Dataframe cannot contain duplicate columns names"
):
_check_unique_column_names(duplicate_cols_df)
def test_check_use_standard_tags_errors():
error_message = "use_standard_tags must be a dictionary or a boolean"
with pytest.raises(TypeError, match=error_message):
_check_use_standard_tags(1)
def test_accessor_init(sample_df):
assert sample_df.ww.schema is None
sample_df.ww.init()
assert isinstance(sample_df.ww.schema, TableSchema)
def test_accessor_schema_property(sample_df):
sample_df.ww.init()
assert sample_df.ww._schema is not sample_df.ww.schema
assert sample_df.ww._schema == sample_df.ww.schema
def test_set_accessor_name(sample_df):
df = sample_df.copy()
error = re.escape(
"Woodwork not initialized for this DataFrame. Initialize by calling DataFrame.ww.init"
)
with pytest.raises(WoodworkNotInitError, match=error):
df.ww.name
with pytest.raises(WoodworkNotInitError, match=error):
df.ww.name = "name"
df.ww.init()
assert df.ww.name is None
df.ww.name = "name"
assert df.ww.schema.name == "name"
assert df.ww.name == "name"
def test_rename_init_with_name(sample_df):
df = sample_df.copy()
df.ww.init(name="name")
assert df.ww.name == "name"
df.ww.name = "new_name"
assert df.ww.schema.name == "new_name"
assert df.ww.name == "new_name"
def test_name_error_on_init(sample_df):
err_msg = "Table name must be a string"
with pytest.raises(TypeError, match=err_msg):
sample_df.ww.init(name=123)
def test_name_error_on_update(sample_df):
sample_df.ww.init()
err_msg = "Table name must be a string"
with pytest.raises(TypeError, match=err_msg):
sample_df.ww.name = 123
def test_name_persists_after_drop(sample_df):
df = sample_df.copy()
df.ww.init()
df.ww.name = "name"
assert df.ww.name == "name"
dropped_df = df.ww.drop(["id"])
assert dropped_df.ww.name == "name"
assert dropped_df.ww.schema.name == "name"
def test_set_accessor_metadata(sample_df):
df = sample_df.copy()
error = re.escape(
"Woodwork not initialized for this DataFrame. Initialize by calling DataFrame.ww.init"
)
with pytest.raises(WoodworkNotInitError, match=error):
df.ww.metadata
with pytest.raises(WoodworkNotInitError, match=error):
df.ww.metadata = {"new": "metadata"}
df.ww.init()
assert df.ww.metadata == {}
df.ww.metadata = {"new": "metadata"}
assert df.ww.schema.metadata == {"new": "metadata"}
assert df.ww.metadata == {"new": "metadata"}
def test_set_metadata_after_init_with_metadata(sample_df):
df = sample_df.copy()
df.ww.init(table_metadata={"new": "metadata"})
assert df.ww.metadata == {"new": "metadata"}
df.ww.metadata = {"new": "new_metadata"}
assert df.ww.schema.metadata == {"new": "new_metadata"}
assert df.ww.metadata == {"new": "new_metadata"}
def test_metadata_persists_after_drop(sample_df):
df = sample_df.copy()
df.ww.init()
df.ww.metadata = {"new": "metadata"}
assert df.ww.metadata == {"new": "metadata"}
dropped_df = df.ww.drop(["id"])
assert dropped_df.ww.metadata == {"new": "metadata"}
assert dropped_df.ww.schema.metadata == {"new": "metadata"}
def test_metadata_error_on_init(sample_df):
err_msg = "Table metadata must be a dictionary."
with pytest.raises(TypeError, match=err_msg):
sample_df.ww.init(table_metadata=123)
def test_metadata_error_on_update(sample_df):
sample_df.ww.init()
err_msg = "Table metadata must be a dictionary."
with pytest.raises(TypeError, match=err_msg):
sample_df.ww.metadata = 123
def test_accessor_physical_types_property(sample_df):
sample_df.ww.init(logical_types={"age": "Categorical"})
assert isinstance(sample_df.ww.physical_types, dict)
assert set(sample_df.ww.physical_types.keys()) == set(sample_df.columns)
for k, v in sample_df.ww.physical_types.items():
logical_type = sample_df.ww.columns[k].logical_type
if _is_koalas_dataframe(sample_df) and logical_type.backup_dtype is not None:
assert v == logical_type.backup_dtype
else:
assert v == logical_type.primary_dtype
def test_accessor_separation_of_params(sample_df):
# mix up order of acccessor and schema params
schema_df = sample_df.copy()
schema_df.ww.init(
name="test_name",
index="id",
semantic_tags={"id": "test_tag"},
time_index="signup_date",
)
assert schema_df.ww.semantic_tags["id"] == {"index", "test_tag"}
assert schema_df.ww.index == "id"
assert schema_df.ww.time_index == "signup_date"
assert schema_df.ww.name == "test_name"
def test_init_with_full_schema(sample_df):
schema_df = sample_df.copy()
schema_df.ww.init(name="test_schema", semantic_tags={"id": "test_tag"}, index="id")
schema = schema_df.ww._schema
head_df = schema_df.head(2)
assert head_df.ww.schema is None
head_df.ww.init_with_full_schema(schema=schema)
assert head_df.ww._schema is schema
assert head_df.ww.name == "test_schema"
assert head_df.ww.semantic_tags["id"] == {"index", "test_tag"}
iloc_df = schema_df.loc[[2, 3]]
assert iloc_df.ww.schema is None
iloc_df.ww.init_with_full_schema(schema=schema)
assert iloc_df.ww._schema is schema
assert iloc_df.ww.name == "test_schema"
assert iloc_df.ww.semantic_tags["id"] == {"index", "test_tag"}
# Extra parameters do not take effect
assert isinstance(iloc_df.ww.logical_types["id"], Integer)
def test_accessor_init_errors_methods(sample_df):
methods_to_exclude = ["init", "init_with_full_schema", "init_with_partial_schema"]
public_methods = [
method
for method in dir(sample_df.ww)
if is_public_method(WoodworkTableAccessor, method)
]
public_methods = [
method for method in public_methods if method not in methods_to_exclude
]
method_args_dict = {
"add_semantic_tags": [{"id": "new_tag"}],
"describe": None,
"pop": ["id"],
"describe": None,
"describe_dict": None,
"drop": ["id"],
"get_valid_mi_columns": None,
"mutual_information": None,
"mutual_information_dict": None,
"remove_semantic_tags": [{"id": "new_tag"}],
"rename": [{"id": "new_id"}],
"reset_semantic_tags": None,
"select": [["Double"]],
"set_index": ["id"],
"set_time_index": ["signup_date"],
"set_types": [{"id": "Integer"}],
"to_disk": ["dir"],
"to_dictionary": None,
"value_counts": None,
"infer_temporal_frequencies": None,
}
error = re.escape(
"Woodwork not initialized for this DataFrame. Initialize by calling DataFrame.ww.init"
)
for method in public_methods:
func = getattr(sample_df.ww, method)
method_args = method_args_dict[method]
with pytest.raises(WoodworkNotInitError, match=error):
if method_args:
func(*method_args)
else:
func()
def test_accessor_init_errors_properties(sample_df):
props_to_exclude = ["iloc", "loc", "schema", "_dataframe"]
props = [
prop
for prop in dir(sample_df.ww)
if is_property(WoodworkTableAccessor, prop) and prop not in props_to_exclude
]
error = re.escape(
"Woodwork not initialized for this DataFrame. Initialize by calling DataFrame.ww.init"
)
for prop in props:
with pytest.raises(WoodworkNotInitError, match=error):
getattr(sample_df.ww, prop)
def test_init_accessor_with_schema_errors(sample_df):
schema_df = sample_df.copy()
schema_df.ww.init()
schema = schema_df.ww.schema
iloc_df = schema_df.iloc[:, :-1]
assert iloc_df.ww.schema is None
error = "Provided schema must be a Woodwork.TableSchema object."
with pytest.raises(TypeError, match=error):
iloc_df.ww.init_with_full_schema(schema=int)
error = (
"Woodwork typing information is not valid for this DataFrame: "
"The following columns in the typing information were missing from the DataFrame: {'ip_address'}"
)
with pytest.raises(ValueError, match=error):
iloc_df.ww.init_with_full_schema(schema=schema)
def test_accessor_with_schema_parameter_warning(sample_df):
schema_df = sample_df.copy()
schema_df.ww.init(name="test_schema", semantic_tags={"id": "test_tag"}, index="id")
schema = schema_df.ww.schema
head_df = schema_df.head(2)
warning = (
"A schema was provided and the following parameters were ignored: index, "
"time_index, logical_types, already_sorted, semantic_tags, use_standard_tags"
)
with pytest.warns(ParametersIgnoredWarning, match=warning):
head_df.ww.init_with_full_schema(
index="ignored_id",
time_index="ignored_time_index",
logical_types={"ignored": "ltypes"},
already_sorted=True,
semantic_tags={"ignored_id": "ignored_test_tag"},
use_standard_tags={"id": True, "age": False},
schema=schema,
)
assert head_df.ww.name == "test_schema"
assert head_df.ww.semantic_tags["id"] == {"index", "test_tag"}
def test_accessor_getattr(sample_df):
schema_df = sample_df.copy()
# We can access attributes on the Accessor class before the schema is initialized
assert schema_df.ww.schema is None
error = re.escape(
"Woodwork not initialized for this DataFrame. Initialize by calling DataFrame.ww.init"
)
with pytest.raises(WoodworkNotInitError, match=error):
schema_df.ww.index
schema_df.ww.init()
assert schema_df.ww.name is None
assert schema_df.ww.index is None
assert schema_df.ww.time_index is None
assert set(schema_df.ww.columns.keys()) == set(sample_df.columns)
error = re.escape("Woodwork has no attribute 'not_present'")
with pytest.raises(AttributeError, match=error):
sample_df.ww.init()
sample_df.ww.not_present
def test_getitem(sample_df):
df = sample_df
df.ww.init(
time_index="signup_date",
index="id",
name="df_name",
logical_types={"age": "Double"},
semantic_tags={"age": {"custom_tag"}},
)
assert list(df.columns) == list(df.ww.schema.columns)
subset = ["id", "signup_date"]
df_subset = df.ww[subset]
pd.testing.assert_frame_equal(to_pandas(df[subset]), to_pandas(df_subset))
assert subset == list(df_subset.ww._schema.columns)
assert df_subset.ww.index == "id"
assert df_subset.ww.time_index == "signup_date"
subset = ["age", "email"]
df_subset = df.ww[subset]
pd.testing.assert_frame_equal(to_pandas(df[subset]), to_pandas(df_subset))
assert subset == list(df_subset.ww._schema.columns)
assert df_subset.ww.index is None
assert df_subset.ww.time_index is None
assert isinstance(df_subset.ww.logical_types["age"], Double)
assert df_subset.ww.semantic_tags["age"] == {"custom_tag", "numeric"}
subset = df.ww[[]]
assert len(subset.ww.columns) == 0
assert subset.ww.index is None
assert subset.ww.time_index is None
series = df.ww["age"]
pd.testing.assert_series_equal(to_pandas(series), to_pandas(df["age"]))
assert isinstance(series.ww.logical_type, Double)
assert series.ww.semantic_tags == {"custom_tag", "numeric"}
series = df.ww["id"]
pd.testing.assert_series_equal(to_pandas(series), to_pandas(df["id"]))
assert isinstance(series.ww.logical_type, Integer)
assert series.ww.semantic_tags == {"index"}
def test_getitem_init_error(sample_df):
error = re.escape(
"Woodwork not initialized for this DataFrame. Initialize by calling DataFrame.ww.init"
)
with pytest.raises(WoodworkNotInitError, match=error):
sample_df.ww["age"]
def test_getitem_invalid_input(sample_df):
df = sample_df
df.ww.init()
error_msg = r"Column\(s\) '\[1, 2\]' not found in DataFrame"
with pytest.raises(ColumnNotPresentError, match=error_msg):
df.ww[["email", 2, 1]]
error_msg = "Column with name 'invalid_column' not found in DataFrame"
with pytest.raises(ColumnNotPresentError, match=error_msg):
df.ww["invalid_column"]
def test_accessor_equality(sample_df):
# Confirm equality with same schema and same data
schema_df = sample_df.copy()
schema_df.ww.init()
copy_df = schema_df.ww.copy()
assert schema_df.ww == copy_df.ww
# Confirm not equal with different schema but same data
copy_df.ww.set_time_index("signup_date")
assert schema_df.ww != copy_df.ww
# Confirm not equal with same schema but different data - only pandas
loc_df = schema_df.ww.loc[:2, :]
if isinstance(sample_df, pd.DataFrame):
assert schema_df.ww != loc_df
else:
assert schema_df.ww == loc_df
def test_accessor_shallow_equality(sample_df):
metadata_table = sample_df.copy()
metadata_table.ww.init(table_metadata={"user": "user0"})
diff_metadata_table = sample_df.copy()
diff_metadata_table.ww.init(table_metadata={"user": "user2"})
assert diff_metadata_table.ww.__eq__(metadata_table, deep=False)
assert not diff_metadata_table.ww.__eq__(metadata_table, deep=True)
schema = metadata_table.ww.schema
diff_data_table = metadata_table.ww.loc[:2, :]
same_data_table = metadata_table.ww.copy()
assert diff_data_table.ww.schema.__eq__(schema, deep=True)
assert same_data_table.ww.schema.__eq__(schema, deep=True)
assert same_data_table.ww.__eq__(metadata_table.ww, deep=False)
assert same_data_table.ww.__eq__(metadata_table.ww, deep=True)
assert diff_data_table.ww.__eq__(metadata_table.ww, deep=False)
if isinstance(sample_df, pd.DataFrame):
assert not diff_data_table.ww.__eq__(metadata_table.ww, deep=True)
def test_accessor_init_with_valid_string_time_index(time_index_df):
time_index_df.ww.init(name="schema", index="id", time_index="times")
assert time_index_df.ww.name == "schema"
assert time_index_df.ww.index == "id"
assert time_index_df.ww.time_index == "times"
assert isinstance(
time_index_df.ww.columns[time_index_df.ww.time_index].logical_type, Datetime
)
def test_accessor_init_with_numeric_datetime_time_index(time_index_df):
schema_df = time_index_df.copy()
schema_df.ww.init(time_index="ints", logical_types={"ints": Datetime})
error_msg = "Time index column must contain datetime or numeric values"
with pytest.raises(TypeError, match=error_msg):
time_index_df.ww.init(
name="schema", time_index="strs", logical_types={"strs": Datetime}
)
assert schema_df.ww.time_index == "ints"
assert schema_df["ints"].dtype == "datetime64[ns]"
def test_accessor_with_numeric_time_index(time_index_df):
# Set a numeric time index on init
schema_df = time_index_df.copy()
schema_df.ww.init(time_index="ints")
date_col = schema_df.ww.columns["ints"]
assert schema_df.ww.time_index == "ints"
assert isinstance(date_col.logical_type, Integer)
assert date_col.semantic_tags == {"time_index", "numeric"}
# Specify logical type for time index on init
schema_df = time_index_df.copy()
schema_df.ww.init(time_index="ints", logical_types={"ints": "Double"})
date_col = schema_df.ww.columns["ints"]
assert schema_df.ww.time_index == "ints"
assert isinstance(date_col.logical_type, Double)
assert date_col.semantic_tags == {"time_index", "numeric"}
schema_df = time_index_df.copy()
schema_df.ww.init(time_index="strs", logical_types={"strs": "Double"})
date_col = schema_df.ww.columns["strs"]
assert schema_df.ww.time_index == "strs"
assert isinstance(date_col.logical_type, Double)
assert date_col.semantic_tags == {"time_index", "numeric"}
error_msg = "Time index column must contain datetime or numeric values"
with pytest.raises(TypeError, match=error_msg):
time_index_df.ww.init(time_index="ints", logical_types={"ints": "Categorical"})
error_msg = "Time index column must contain datetime or numeric values"
with pytest.raises(TypeError, match=error_msg):
time_index_df.ww.init(time_index="letters", logical_types={"strs": "Integer"})
# Set numeric time index after init
schema_df = time_index_df.copy()
schema_df.ww.init(logical_types={"ints": "Double"})
assert schema_df.ww.time_index is None
schema_df.ww.set_time_index("ints")
date_col = schema_df.ww.columns["ints"]
assert schema_df.ww.time_index == "ints"
assert isinstance(date_col.logical_type, Double)
assert date_col.semantic_tags == {"numeric", "time_index"}
def test_numeric_time_index_dtypes(numeric_time_index_df):
numeric_time_index_df.ww.init(time_index="ints")
assert numeric_time_index_df.ww.time_index == "ints"
assert isinstance(numeric_time_index_df.ww.logical_types["ints"], Integer)
assert numeric_time_index_df.ww.semantic_tags["ints"] == {"time_index", "numeric"}
numeric_time_index_df.ww.set_time_index("floats")
assert numeric_time_index_df.ww.time_index == "floats"
assert isinstance(numeric_time_index_df.ww.logical_types["floats"], Double)
assert numeric_time_index_df.ww.semantic_tags["floats"] == {"time_index", "numeric"}
numeric_time_index_df.ww.set_time_index("with_null")
assert numeric_time_index_df.ww.time_index == "with_null"
assert isinstance(
numeric_time_index_df.ww.logical_types["with_null"], IntegerNullable
)
assert numeric_time_index_df.ww.semantic_tags["with_null"] == {
"time_index",
"numeric",
}
def test_accessor_init_with_invalid_string_time_index(sample_df):
error_msg = "Time index column must contain datetime or numeric values"
with pytest.raises(TypeError, match=error_msg):
sample_df.ww.init(name="schema", time_index="full_name")
def test_accessor_init_with_string_logical_types(sample_df):
logical_types = {"full_name": "natural_language", "age": "Double"}
schema_df = sample_df.copy()
schema_df.ww.init(name="schema", logical_types=logical_types)
assert isinstance(schema_df.ww.columns["full_name"].logical_type, NaturalLanguage)
assert isinstance(schema_df.ww.columns["age"].logical_type, Double)
logical_types = {
"full_name": "NaturalLanguage",
"age": "IntegerNullable",
"signup_date": "Datetime",
}
schema_df = sample_df.copy()
schema_df.ww.init(
name="schema", logical_types=logical_types, time_index="signup_date"
)
assert isinstance(schema_df.ww.columns["full_name"].logical_type, NaturalLanguage)
assert isinstance(schema_df.ww.columns["age"].logical_type, IntegerNullable)
assert schema_df.ww.time_index == "signup_date"
def test_int_dtype_inference_on_init():
df = pd.DataFrame(
{
"ints_no_nans": pd.Series([1, 2]),
"ints_nan": pd.Series([1, np.nan]),
"ints_NA": pd.Series([1, pd.NA]),
"ints_NA_specified": pd.Series([1, pd.NA], dtype="Int64"),
}
)
df = df.loc[df.index.repeat(5)].reset_index(drop=True)
df.ww.init()
assert df["ints_no_nans"].dtype == "int64"
assert df["ints_nan"].dtype == "float64"
assert df["ints_NA"].dtype == "category"
assert df["ints_NA_specified"].dtype == "Int64"
def test_bool_dtype_inference_on_init():
df = pd.DataFrame(
{
"bools_no_nans": pd.Series([True, False]),
"bool_nan": pd.Series([True, np.nan]),
"bool_NA": pd.Series([True, pd.NA]),
"bool_NA_specified": pd.Series([True, pd.NA], dtype="boolean"),
}
)
df = df.loc[df.index.repeat(5)].reset_index(drop=True)
df.ww.init()
assert df["bools_no_nans"].dtype == "bool"
assert df["bool_nan"].dtype == "category"
assert df["bool_NA"].dtype == "category"
assert df["bool_NA_specified"].dtype == "boolean"
def test_str_dtype_inference_on_init():
df = pd.DataFrame(
{
"str_no_nans": pd.Series(["a", "b"]),
"str_nan": pd.Series(["a", np.nan]),
"str_NA": pd.Series(["a", pd.NA]),
"str_NA_specified": pd.Series([1, pd.NA], dtype="string"),
}
)
df = df.loc[df.index.repeat(5)].reset_index(drop=True)
df.ww.init()
assert df["str_no_nans"].dtype == "category"
assert df["str_nan"].dtype == "category"
assert df["str_NA"].dtype == "category"
assert df["str_NA_specified"].dtype == "category"
def test_float_dtype_inference_on_init():
df = pd.DataFrame(
{
"floats_no_nans": pd.Series([1.1, 2.2]),
"floats_nan": pd.Series([1.1, np.nan]),
"floats_NA": pd.Series([1.1, pd.NA]),
"floats_nan_specified": pd.Series([1.1, np.nan], dtype="float"),
}
)
df = df.loc[df.index.repeat(5)].reset_index(drop=True)
df.ww.init()
assert df["floats_no_nans"].dtype == "float64"
assert df["floats_nan"].dtype == "float64"
assert df["floats_NA"].dtype == "category"
assert df["floats_nan_specified"].dtype == "float64"
def test_datetime_dtype_inference_on_init():
df = pd.DataFrame(
{
"date_no_nans": pd.Series([pd.to_datetime("2020-09-01")] * 2),
"date_nan": pd.Series([pd.to_datetime("2020-09-01"), np.nan]),
"date_NA": pd.Series([pd.to_datetime("2020-09-01"), pd.NA]),
"date_NaT": pd.Series([pd.to_datetime("2020-09-01"), pd.NaT]),
"date_NA_specified": pd.Series(
[pd.to_datetime("2020-09-01"), pd.NA], dtype="datetime64[ns]"
),
}
)
df.ww.init()
assert df["date_no_nans"].dtype == "datetime64[ns]"
assert df["date_nan"].dtype == "datetime64[ns]"
assert df["date_NA"].dtype == "datetime64[ns]"
assert df["date_NaT"].dtype == "datetime64[ns]"
assert df["date_NA_specified"].dtype == "datetime64[ns]"
def test_datetime_inference_with_format_param():
df = pd.DataFrame(
{
"index": [0, 1, 2],
"dates": ["2019/01/01", "2019/01/02", "2019/01/03"],
"ymd_special": ["2019~01~01", "2019~01~02", "2019~01~03"],
"mdy_special": pd.Series(
["3~11~2000", "3~12~2000", "3~13~2000"], dtype="string"
),
}
)
df.ww.init(
name="df_name",
logical_types={
"ymd_special": Datetime(datetime_format="%Y~%m~%d"),
"mdy_special": Datetime(datetime_format="%m~%d~%Y"),
"dates": Datetime,
},
time_index="ymd_special",
)
assert df["dates"].dtype == "datetime64[ns]"
assert df["ymd_special"].dtype == "datetime64[ns]"
assert df["mdy_special"].dtype == "datetime64[ns]"
assert df.ww.time_index == "ymd_special"
assert isinstance(df.ww["dates"].ww.logical_type, Datetime)
assert isinstance(df.ww["ymd_special"].ww.logical_type, Datetime)
assert isinstance(df.ww["mdy_special"].ww.logical_type, Datetime)
df.ww.set_time_index("mdy_special")
assert df.ww.time_index == "mdy_special"
df = pd.DataFrame(
{
"mdy_special": pd.Series(
["3&11&2000", "3&12&2000", "3&13&2000"], dtype="string"
),
}
)
df = df.loc[df.index.repeat(5)].reset_index(drop=True)
df.ww.init()
assert df["mdy_special"].dtype == "category"
df.ww.set_types(logical_types={"mdy_special": Datetime(datetime_format="%m&%d&%Y")})
assert df["mdy_special"].dtype == "datetime64[ns]"
df.ww.set_time_index("mdy_special")
assert isinstance(df.ww["mdy_special"].ww.logical_type, Datetime)
assert df.ww.time_index == "mdy_special"
def test_timedelta_dtype_inference_on_init():
df = pd.DataFrame(
{
"delta_no_nans": (
pd.Series([pd.to_datetime("2020-09-01")] * 2)
- pd.to_datetime("2020-07-01")
),
"delta_nan": (
pd.Series([pd.to_datetime("2020-09-01"), np.nan])
- pd.to_datetime("2020-07-01")
),
"delta_NaT": (
pd.Series([pd.to_datetime("2020-09-01"), pd.NaT])
- pd.to_datetime("2020-07-01")
),
"delta_NA_specified": (
pd.Series([pd.to_datetime("2020-09-01"), pd.NA], dtype="datetime64[ns]")
- pd.to_datetime("2020-07-01")
),
}
)
df.ww.init()
assert df["delta_no_nans"].dtype == "timedelta64[ns]"
assert df["delta_nan"].dtype == "timedelta64[ns]"
assert df["delta_NaT"].dtype == "timedelta64[ns]"
assert df["delta_NA_specified"].dtype == "timedelta64[ns]"
def test_sets_category_dtype_on_init():
column_name = "test_series"
series_list = [
pd.Series(["a", "b", "c"], name=column_name),
pd.Series(["a", None, "c"], name=column_name),
pd.Series(["a", np.nan, "c"], name=column_name),
pd.Series(["a", pd.NA, "c"], name=column_name),
pd.Series(["a", pd.NaT, "c"], name=column_name),
]
logical_types = [
Categorical,
CountryCode,
Ordinal(order=["a", "b", "c"]),
PostalCode,
SubRegionCode,
]
for series in series_list:
series = series.astype("object")
for logical_type in logical_types:
if isclass(logical_type):
logical_type = logical_type()
ltypes = {
column_name: logical_type,
}
df = pd.DataFrame(series)
df.ww.init(logical_types=ltypes)
assert df.ww.columns[column_name].logical_type == logical_type
assert df[column_name].dtype == logical_type.primary_dtype
def test_sets_object_dtype_on_init(latlong_df):
for column_name in latlong_df.columns:
ltypes = {
column_name: LatLong,
}
df = latlong_df.loc[:, [column_name]]
df.ww.init(logical_types=ltypes)
assert isinstance(df.ww.columns[column_name].logical_type, LatLong)
assert df[column_name].dtype == LatLong.primary_dtype
df_pandas = to_pandas(df[column_name])
expected_val = (3, 4)
if _is_koalas_dataframe(latlong_df):
expected_val = [3, 4]
assert df_pandas.iloc[-1] == expected_val
def test_sets_string_dtype_on_init():
column_name = "test_series"
series_list = [
pd.Series(["a", "b", "c"], name=column_name),
pd.Series(["a", None, "c"], name=column_name),
pd.Series(["a", np.nan, "c"], name=column_name),
pd.Series(["a", pd.NA, "c"], name=column_name),
]
logical_types = [
Address,
Filepath,
PersonFullName,
IPAddress,
NaturalLanguage,
PhoneNumber,
URL,
]
for series in series_list:
series = series.astype("object")
for logical_type in logical_types:
ltypes = {
column_name: logical_type,
}
df = pd.DataFrame(series)
df.ww.init(logical_types=ltypes)
assert isinstance(df.ww.columns[column_name].logical_type, logical_type)
assert df[column_name].dtype == logical_type.primary_dtype
def test_sets_boolean_dtype_on_init():
column_name = "test_series"
series_list = [
pd.Series([True, False, True], name=column_name),
pd.Series([True, None, True], name=column_name),
pd.Series([True, np.nan, True], name=column_name),
pd.Series([True, pd.NA, True], name=column_name),
]
logical_types = [Boolean, BooleanNullable]
for series in series_list:
for logical_type in logical_types:
if series.isnull().any() and logical_type == Boolean:
continue
series = series.astype("object")
ltypes = {
column_name: logical_type,
}
df = pd.DataFrame(series)
df.ww.init(logical_types=ltypes)
assert isinstance(df.ww.columns[column_name].logical_type, logical_type)
assert df[column_name].dtype == logical_type.primary_dtype
def test_sets_int64_dtype_on_init():
column_name = "test_series"
series_list = [
pd.Series([1, 2, 3], name=column_name),
pd.Series([1, None, 3], name=column_name),
pd.Series([1, np.nan, 3], name=column_name),
pd.Series([1, pd.NA, 3], name=column_name),
]
logical_types = [Integer, IntegerNullable, Age, AgeNullable]
for series in series_list:
series = series.astype("object")
for logical_type in logical_types:
if series.isnull().any() and logical_type in [Integer, Age]:
continue
ltypes = {
column_name: logical_type,
}
df = pd.DataFrame(series)
df.ww.init(logical_types=ltypes)
assert isinstance(df.ww.columns[column_name].logical_type, logical_type)
assert df[column_name].dtype == logical_type.primary_dtype
def test_sets_float64_dtype_on_init():
column_name = "test_series"
series_list = [
pd.Series([1.1, 2, 3], name=column_name),
pd.Series([1.1, None, 3], name=column_name),
pd.Series([1.1, np.nan, 3], name=column_name),
]
logical_types = [Double, AgeFractional]
for series in series_list:
series = series.astype("object")
for logical_type in logical_types:
ltypes = {
column_name: logical_type,
}
df = pd.DataFrame(series)
df.ww.init(logical_types=ltypes)
assert isinstance(df.ww.columns[column_name].logical_type, logical_type)
assert df[column_name].dtype == logical_type.primary_dtype
def test_sets_datetime64_dtype_on_init():
column_name = "test_series"
series_list = [
pd.Series(["2020-01-01", "2020-01-02", "2020-01-03"], name=column_name),
pd.Series(["2020-01-01", None, "2020-01-03"], name=column_name),
pd.Series(["2020-01-01", np.nan, "2020-01-03"], name=column_name),
pd.Series(["2020-01-01", pd.NA, "2020-01-03"], name=column_name),
pd.Series(
["2020-01-01", pd.NaT, "2020-01-03"], name=column_name, dtype="object"
),
]
logical_type = Datetime
for series in series_list:
series = series.astype("object")
ltypes = {
column_name: logical_type,
}
df = pd.DataFrame(series)
df.ww.init(logical_types=ltypes)
assert isinstance(df.ww.columns[column_name].logical_type, logical_type)
assert df[column_name].dtype == logical_type.primary_dtype
def test_invalid_dtype_casting():
column_name = "test_series"
# Cannot cast a column with pd.NA to Double
series = pd.Series([1.1, pd.NA, 3], name=column_name)
ltypes = {
column_name: Double,
}
err_msg = (
"Error converting datatype for test_series from type object to type "
"float64. Please confirm the underlying data is consistent with logical type Double."
)
df = pd.DataFrame(series)
with pytest.raises(TypeConversionError, match=err_msg):
df.ww.init(logical_types=ltypes)
# Cannot cast Datetime to Double
df = pd.DataFrame({column_name: ["2020-01-01", "2020-01-02", "2020-01-03"]})
df.ww.init(logical_types={column_name: Datetime})
err_msg = (
"Error converting datatype for test_series from type datetime64[ns] to type "
"float64. Please confirm the underlying data is consistent with logical type Double."
)
with pytest.raises(TypeConversionError, match=re.escape(err_msg)):
df.ww.set_types(logical_types={column_name: Double})
# Cannot cast invalid strings to integers
series = pd.Series(["1", "two", "3"], name=column_name)
ltypes = {
column_name: Integer,
}
err_msg = (
"Error converting datatype for test_series from type object to type "
"int64. Please confirm the underlying data is consistent with logical type Integer."
)
df = pd.DataFrame(series)
with pytest.raises(TypeConversionError, match=err_msg):
df.ww.init(logical_types=ltypes)
def test_underlying_index_set_no_index_on_init(sample_df):
if _is_dask_dataframe(sample_df):
pytest.xfail("Setting underlying index is not supported with Dask input")
if _is_koalas_dataframe(sample_df):
pytest.xfail("Setting underlying index is not supported with Koalas input")
input_index = pd.Int64Index([99, 88, 77, 66])
schema_df = sample_df.copy()
schema_df.index = input_index.copy()
pd.testing.assert_index_equal(input_index, schema_df.index)
schema_df.ww.init()
assert schema_df.ww.index is None
pd.testing.assert_index_equal(input_index, schema_df.index)
sorted_df = schema_df.ww.sort_values("full_name")
assert sorted_df.ww.index is None
pd.testing.assert_index_equal(pd.Int64Index([88, 77, 99, 66]), sorted_df.index)
def test_underlying_index_set(sample_df):
if _is_dask_dataframe(sample_df):
pytest.xfail("Setting underlying index is not supported with Dask input")
if _is_koalas_dataframe(sample_df):
pytest.xfail("Setting underlying index is not supported with Koalas input")
# Sets underlying index at init
schema_df = sample_df.copy()
schema_df.ww.init(index="full_name")
assert "full_name" in schema_df.columns
assert schema_df.index.name is None
assert (schema_df.index == schema_df["full_name"]).all()
# Sets underlying index on update
schema_df = sample_df.copy()
schema_df.ww.init(index="id")
schema_df.ww.set_index("full_name")
assert schema_df.ww.index == "full_name"
assert "full_name" in schema_df.columns
assert (schema_df.index == schema_df["full_name"]).all()
assert schema_df.index.name is None
# confirm removing Woodwork index doesn't change underlying index
schema_df.ww.set_index(None)
assert schema_df.ww.index is None
assert (schema_df.index == schema_df["full_name"]).all()
def test_underlying_index_reset(sample_df):
if _is_dask_dataframe(sample_df):
pytest.xfail("Setting underlying index is not supported with Dask input")
if _is_koalas_dataframe(sample_df):
pytest.xfail("Setting underlying index is not supported with Koalas input")
specified_index = pd.Index
unspecified_index = pd.RangeIndex
sample_df.ww.init()
assert type(sample_df.index) == unspecified_index
sample_df.ww.set_index("full_name")
assert type(sample_df.index) == specified_index
copied_df = sample_df.ww.copy()
warning = "Index mismatch between DataFrame and typing information"
with pytest.warns(TypingInfoMismatchWarning, match=warning):
copied_df.ww.reset_index(drop=True, inplace=True)
assert copied_df.ww.schema is None
assert type(copied_df.index) == unspecified_index
sample_df.ww.set_index(None)
assert type(sample_df.index) == specified_index
# Use pandas operation to reset index
reset_df = sample_df.ww.reset_index(drop=True, inplace=False)
assert type(sample_df.index) == specified_index
assert type(reset_df.index) == unspecified_index
sample_df.ww.reset_index(drop=True, inplace=True)
assert type(sample_df.index) == unspecified_index
def test_underlying_index_unchanged_after_updates(sample_df):
if _is_dask_dataframe(sample_df):
pytest.xfail("Setting underlying index is not supported with Dask input")
if _is_koalas_dataframe(sample_df):
pytest.xfail("Setting underlying index is not supported with Koalas input")
sample_df.ww.init(index="full_name")
assert "full_name" in sample_df
assert sample_df.ww.index == "full_name"
assert (sample_df.index == sample_df["full_name"]).all()
copied_df = sample_df.ww.copy()
dropped_df = copied_df.ww.drop("full_name")
assert "full_name" not in dropped_df
assert dropped_df.ww.index is None
assert (dropped_df.index == sample_df["full_name"]).all()
selected_df = copied_df.ww.select("Integer")
assert "full_name" not in dropped_df
assert selected_df.ww.index is None
assert (selected_df.index == sample_df["full_name"]).all()
iloc_df = copied_df.ww.iloc[:, 2:]
assert "full_name" not in iloc_df
assert iloc_df.ww.index is None
assert (iloc_df.index == sample_df["full_name"]).all()
loc_df = copied_df.ww.loc[:, ["id", "email"]]
assert "full_name" not in loc_df
assert loc_df.ww.index is None
assert (loc_df.index == sample_df["full_name"]).all()
subset_df = copied_df.ww[["id", "email"]]
assert "full_name" not in subset_df
assert subset_df.ww.index is None
assert (subset_df.index == sample_df["full_name"]).all()
reset_tags_df = sample_df.ww.copy()
reset_tags_df.ww.reset_semantic_tags("full_name", retain_index_tags=False)
assert reset_tags_df.ww.index is None
assert (reset_tags_df.index == sample_df["full_name"]).all()
remove_tags_df = sample_df.ww.copy()
remove_tags_df.ww.remove_semantic_tags({"full_name": "index"})
assert remove_tags_df.ww.index is None
assert (remove_tags_df.index == sample_df["full_name"]).all()
set_types_df = sample_df.ww.copy()
set_types_df.ww.set_types(
semantic_tags={"full_name": "new_tag"}, retain_index_tags=False
)
assert set_types_df.ww.index is None
assert (set_types_df.index == sample_df["full_name"]).all()
popped_df = sample_df.ww.copy()
popped_df.ww.pop("full_name")
assert popped_df.ww.index is None
assert (popped_df.index == sample_df["full_name"]).all()
def test_accessor_already_sorted(sample_unsorted_df):
if _is_dask_dataframe(sample_unsorted_df):
pytest.xfail("Sorting dataframe is not supported with Dask input")
if _is_koalas_dataframe(sample_unsorted_df):
pytest.xfail("Sorting dataframe is not supported with Koalas input")
schema_df = sample_unsorted_df.copy()
schema_df.ww.init(name="schema", index="id", time_index="signup_date")
assert schema_df.ww.time_index == "signup_date"
assert isinstance(
schema_df.ww.columns[schema_df.ww.time_index].logical_type, Datetime
)
sorted_df = (
to_pandas(sample_unsorted_df)
.sort_values(["signup_date", "id"])
.set_index("id", drop=False)
)
sorted_df.index.name = None
pd.testing.assert_frame_equal(
sorted_df, to_pandas(schema_df), check_index_type=False, check_dtype=False
)
schema_df = sample_unsorted_df.copy()
schema_df.ww.init(
name="schema", index="id", time_index="signup_date", already_sorted=True
)
assert schema_df.ww.time_index == "signup_date"
assert isinstance(
schema_df.ww.columns[schema_df.ww.time_index].logical_type, Datetime
)
unsorted_df = to_pandas(sample_unsorted_df.set_index("id", drop=False))
unsorted_df.index.name = None
pd.testing.assert_frame_equal(
unsorted_df, to_pandas(schema_df), check_index_type=False, check_dtype=False
)
def test_ordinal_with_order(sample_series):
if _is_koalas_series(sample_series) or _is_dask_series(sample_series):
pytest.xfail(
"Fails with Dask and Koalas - ordinal data validation not compatible"
)
ordinal_with_order = Ordinal(order=["a", "b", "c"])
schema_df = pd.DataFrame(sample_series)
schema_df.ww.init(logical_types={"sample_series": ordinal_with_order})
column_logical_type = schema_df.ww.logical_types["sample_series"]
assert isinstance(column_logical_type, Ordinal)
assert column_logical_type.order == ["a", "b", "c"]
schema_df = pd.DataFrame(sample_series)
schema_df.ww.init()
schema_df.ww.set_types(logical_types={"sample_series": ordinal_with_order})
logical_type = schema_df.ww.logical_types["sample_series"]
assert isinstance(logical_type, Ordinal)
assert logical_type.order == ["a", "b", "c"]
def test_ordinal_with_incomplete_ranking(sample_series):
if _is_koalas_series(sample_series) or _is_dask_series(sample_series):
pytest.xfail(
"Fails with Dask and Koalas - ordinal data validation not supported"
)
ordinal_incomplete_order = Ordinal(order=["a", "b"])
error_msg = re.escape(
"Ordinal column sample_series contains values that are not "
"present in the order values provided: ['c']"
)
schema_df = pd.DataFrame(sample_series)
with pytest.raises(ValueError, match=error_msg):
schema_df.ww.init(logical_types={"sample_series": ordinal_incomplete_order})
schema_df.ww.init()
with pytest.raises(ValueError, match=error_msg):
schema_df.ww.set_types(
logical_types={"sample_series": ordinal_incomplete_order}
)
def test_ordinal_with_nan_values():
nan_df = pd.DataFrame(pd.Series(["a", "b", np.nan, "a"], name="nan_series"))
ordinal_with_order = Ordinal(order=["a", "b"])
nan_df.ww.init(logical_types={"nan_series": ordinal_with_order})
column_logical_type = nan_df.ww.logical_types["nan_series"]
assert isinstance(column_logical_type, Ordinal)
assert column_logical_type.order == ["a", "b"]
def test_accessor_with_falsy_column_names(falsy_names_df):
if _is_dask_dataframe(falsy_names_df):
pytest.xfail("Dask DataFrames cannot handle integer column names")
schema_df = falsy_names_df.copy()
schema_df.ww.init(index=0, time_index="")
assert schema_df.ww.index == 0
assert schema_df.ww.time_index == ""
schema_df.ww.set_time_index(None)
assert schema_df.ww.time_index is None
schema_df.ww.set_time_index("")
assert schema_df.ww.time_index == ""
popped_col = schema_df.ww.pop("")
assert "" not in schema_df
assert "" not in schema_df.ww.columns
assert schema_df.ww.time_index is None
schema_df.ww.set_index(None)
assert schema_df.ww.index is None
schema_df.ww[""] = popped_col
assert schema_df.ww[""].name == ""
renamed_df = schema_df.ww.rename({0: "col_with_name"})
assert 0 not in renamed_df.columns
assert "col_with_name" in renamed_df.columns
def test_dataframe_methods_on_accessor(sample_df):
schema_df = sample_df.copy()
schema_df.ww.init(name="test_schema")
copied_df = schema_df.ww.copy()
assert schema_df is not copied_df
assert schema_df.ww._schema is not copied_df.ww._schema
assert copied_df.ww.schema == schema_df.ww.schema
pd.testing.assert_frame_equal(to_pandas(schema_df), to_pandas(copied_df))
ltype_dtype = "int64"
new_dtype = "string"
warning = (
"Operation performed by astype has invalidated the Woodwork typing information:\n "
f"dtype mismatch for column id between DataFrame dtype, {new_dtype}, and Integer dtype, {ltype_dtype}.\n "
"Please initialize Woodwork with DataFrame.ww.init"
)
with pytest.warns(TypingInfoMismatchWarning, match=warning):
new_df = schema_df.ww.astype({"id": new_dtype})
assert new_df["id"].dtype == new_dtype
assert new_df.ww.schema is None
assert schema_df.ww.schema is not None
def test_dataframe_methods_on_accessor_new_schema_object(sample_df):
sample_df.ww.init(
index="id",
semantic_tags={"email": "new_tag"},
table_metadata={"contributors": ["user1", "user2"], "created_on": "2/12/20"},
column_metadata={"id": {"important_keys": [1, 2, 3]}},
)
copied_df = sample_df.ww.copy()
assert sample_df.ww.schema == copied_df.ww.schema
assert sample_df.ww._schema is not copied_df.ww._schema
copied_df.ww.metadata["contributors"].append("user3")
assert copied_df.ww.metadata == {
"contributors": ["user1", "user2", "user3"],
"created_on": "2/12/20",
}
assert sample_df.ww.metadata == {
"contributors": ["user1", "user2"],
"created_on": "2/12/20",
}
copied_df.ww.reset_semantic_tags(retain_index_tags=False)
assert copied_df.ww.index is None
assert sample_df.ww.index == "id"
assert copied_df.ww.semantic_tags["email"] == set()
assert sample_df.ww.semantic_tags["email"] == {"new_tag"}
copied_df.ww.columns["id"].metadata["important_keys"].append(4)
assert copied_df.ww.columns["id"].metadata == {"important_keys": [1, 2, 3, 4]}
assert sample_df.ww.columns["id"].metadata == {"important_keys": [1, 2, 3]}
def test_dataframe_methods_on_accessor_inplace(sample_df):
# TODO: Try to find a supported inplace method for Dask, if one exists
if _is_dask_dataframe(sample_df):
pytest.xfail("Dask does not support sort_values or rename inplace.")
schema_df = sample_df.copy()
schema_df.ww.init(name="test_schema")
df_pre_sort = schema_df.copy()
schema_df.ww.sort_values(["full_name"], inplace=True)
assert schema_df.ww.name == "test_schema"
pd.testing.assert_frame_equal(
to_pandas(schema_df), to_pandas(df_pre_sort.sort_values(["full_name"]))
)
warning = "Operation performed by insert has invalidated the Woodwork typing information:\n "
"The following columns in the DataFrame were missing from the typing information: {'new_name'}.\n "
"Please initialize Woodwork with DataFrame.ww.init"
with pytest.warns(TypingInfoMismatchWarning, match=warning):
schema_df.ww.insert(loc=0, column="new_name", value=[1, 2, 3, 4])
assert "new_name" in schema_df.columns
assert schema_df.ww.schema is None
def test_dataframe_methods_on_accessor_returning_series(sample_df):
schema_df = sample_df[["id", "age", "is_registered"]]
schema_df.ww.init(name="test_schema")
dtypes = schema_df.ww.dtypes
assert schema_df.ww.name == "test_schema"
pd.testing.assert_series_equal(dtypes, schema_df.dtypes)
all_series = schema_df.ww.all()
assert schema_df.ww.name == "test_schema"
pd.testing.assert_series_equal(to_pandas(all_series), to_pandas(schema_df.all()))
def test_dataframe_methods_on_accessor_other_returns(sample_df):
schema_df = sample_df.copy()
schema_df.ww.init(name="test_schema")
shape = schema_df.ww.shape
assert schema_df.ww.name == "test_schema"
if _is_dask_dataframe(sample_df):
shape = (shape[0].compute(), shape[1])
assert shape == to_pandas(schema_df).shape
assert schema_df.ww.name == "test_schema"
if not _is_dask_dataframe(sample_df):
# keys() not supported with Dask
pd.testing.assert_index_equal(schema_df.ww.keys(), schema_df.keys())
def test_dataframe_methods_on_accessor_to_pandas(sample_df):
if isinstance(sample_df, pd.DataFrame):
pytest.skip("No need to test converting pandas DataFrame to pandas")
sample_df.ww.init(name="woodwork", index="id")
if _is_dask_dataframe(sample_df):
pd_df = sample_df.ww.compute()
elif _is_koalas_dataframe(sample_df):
pd_df = sample_df.ww.to_pandas()
pytest.skip(
"Bug #1071: Woodwork not initialized after to_pandas call with Koalas categorical column"
)
assert isinstance(pd_df, pd.DataFrame)
assert pd_df.ww.index == "id"
assert pd_df.ww.name == "woodwork"
def test_get_subset_df_with_schema(sample_df):
schema_df = sample_df.copy()
schema_df.ww.init(
time_index="signup_date",
index="id",
name="df_name",
logical_types={
"full_name": PersonFullName,
"email": EmailAddress,
"phone_number": PhoneNumber,
"age": Double,
"signup_date": Datetime,
},
semantic_tags={"full_name": ["new_tag", "tag2"], "age": "numeric"},
)
schema = schema_df.ww.schema
empty_df = schema_df.ww._get_subset_df_with_schema([])
assert len(empty_df.columns) == 0
assert empty_df.ww.schema is not None
pd.testing.assert_frame_equal(to_pandas(empty_df), to_pandas(schema_df[[]]))
validate_subset_schema(empty_df.ww.schema, schema)
just_index = schema_df.ww._get_subset_df_with_schema(["id"])
assert just_index.ww.index == schema.index
assert just_index.ww.time_index is None
pd.testing.assert_frame_equal(to_pandas(just_index), to_pandas(schema_df[["id"]]))
validate_subset_schema(just_index.ww.schema, schema)
just_time_index = schema_df.ww._get_subset_df_with_schema(["signup_date"])
assert just_time_index.ww.time_index == schema.time_index
assert just_time_index.ww.index is None
pd.testing.assert_frame_equal(
to_pandas(just_time_index), to_pandas(schema_df[["signup_date"]])
)
validate_subset_schema(just_time_index.ww.schema, schema)
transfer_schema = schema_df.ww._get_subset_df_with_schema(["phone_number"])
assert transfer_schema.ww.index is None
assert transfer_schema.ww.time_index is None
pd.testing.assert_frame_equal(
to_pandas(transfer_schema), to_pandas(schema_df[["phone_number"]])
)
validate_subset_schema(transfer_schema.ww.schema, schema)
def test_select_ltypes_no_match_and_all(sample_df, sample_correct_logical_types):
schema_df = sample_df.copy()
schema_df.ww.init(logical_types=sample_correct_logical_types)
assert len(schema_df.ww.select(PostalCode).columns) == 0
assert len(schema_df.ww.select(["PostalCode", PhoneNumber]).columns) == 1
all_types = ww.type_system.registered_types
assert len(schema_df.ww.select(exclude=all_types).columns) == 0
df_all_types = schema_df.ww.select(all_types)
pd.testing.assert_frame_equal(to_pandas(df_all_types), to_pandas(schema_df))
assert df_all_types.ww.schema == schema_df.ww.schema
def test_select_ltypes_strings(sample_df, sample_correct_logical_types):
schema_df = sample_df.copy()
schema_df.ww.init(logical_types=sample_correct_logical_types)
df_multiple_ltypes = schema_df.ww.select(
["PersonFullName", "email_address", "double", "BooleanNullable", "datetime"]
)
assert len(df_multiple_ltypes.columns) == 7
assert "phone_number" not in df_multiple_ltypes.columns
assert "id" not in df_multiple_ltypes.columns
df_single_ltype = schema_df.ww.select("person_full_name")
assert set(df_single_ltype.columns) == {"full_name"}
def test_select_ltypes_objects(sample_df, sample_correct_logical_types):
schema_df = sample_df.copy()
schema_df.ww.init(logical_types=sample_correct_logical_types)
df_multiple_ltypes = schema_df.ww.select(
[PersonFullName, EmailAddress, Double, BooleanNullable, Datetime]
)
assert len(df_multiple_ltypes.columns) == 7
assert "phone_number" not in df_multiple_ltypes.columns
assert "id" not in df_multiple_ltypes.columns
df_single_ltype = schema_df.ww.select(PersonFullName)
assert len(df_single_ltype.columns) == 1
def test_select_ltypes_mixed(sample_df, sample_correct_logical_types):
schema_df = sample_df.copy()
schema_df.ww.init(logical_types=sample_correct_logical_types)
df_mixed_ltypes = schema_df.ww.select(["PersonFullName", "email_address", Double])
assert len(df_mixed_ltypes.columns) == 4
assert "phone_number" not in df_mixed_ltypes.columns
def test_select_ltypes_mixed_exclude(sample_df, sample_correct_logical_types):
schema_df = sample_df.copy()
schema_df.ww.init(logical_types=sample_correct_logical_types)
df_mixed_ltypes = schema_df.ww.select(
exclude=["PersonFullName", "email_address", Double]
)
assert len(df_mixed_ltypes.columns) == 12
assert "full_name" not in df_mixed_ltypes.columns
assert "email_address" not in df_mixed_ltypes.columns
assert "double" not in df_mixed_ltypes.columns
assert "double_with_nan" not in df_mixed_ltypes.columns
def test_select_ltypes_table(sample_df, sample_correct_logical_types):
schema_df = sample_df.copy()
schema_df.ww.init(
name="testing",
index="id",
time_index="signup_date",
logical_types=sample_correct_logical_types,
semantic_tags={
"full_name": ["new_tag", "tag2"],
"age": "numeric",
},
)
df_no_indices = schema_df.ww.select("phone_number")
assert df_no_indices.ww.index is None
assert df_no_indices.ww.time_index is None
df_with_indices = schema_df.ww.select(["Datetime", "Integer"])
assert df_with_indices.ww.index == "id"
assert df_with_indices.ww.time_index == "signup_date"
df_values = schema_df.ww.select(["PersonFullName"])
assert df_values.ww.name == schema_df.ww.name
assert df_values.ww.columns["full_name"] == schema_df.ww.columns["full_name"]
def test_select_semantic_tags(sample_df):
schema_df = sample_df.copy()
schema_df.ww.init(
semantic_tags={
"full_name": "tag1",
"email": ["tag2"],
"age": ["numeric", "tag2"],
"phone_number": ["tag3", "tag2"],
"is_registered": "category",
},
time_index="signup_date",
)
df_one_match = schema_df.ww.select("numeric")
assert len(df_one_match.columns) == 6
assert "age" in df_one_match.columns
assert "id" in df_one_match.columns
df_multiple_matches = schema_df.ww.select("tag2")
assert len(df_multiple_matches.columns) == 3
assert "age" in df_multiple_matches.columns
assert "phone_number" in df_multiple_matches.columns
assert "email" in df_multiple_matches.columns
df_multiple_tags = schema_df.ww.select(["numeric", "time_index"])
assert len(df_multiple_tags.columns) == 7
assert "id" in df_multiple_tags.columns
assert "age" in df_multiple_tags.columns
assert "signup_date" in df_multiple_tags.columns
df_overlapping_tags = schema_df.ww.select(["numeric", "tag2"])
assert len(df_overlapping_tags.columns) == 8
assert "id" in df_overlapping_tags.columns
assert "age" in df_overlapping_tags.columns
assert "phone_number" in df_overlapping_tags.columns
assert "email" in df_overlapping_tags.columns
df_common_tags = schema_df.ww.select(["category", "numeric"])
assert len(df_common_tags.columns) == 8
assert "id" in df_common_tags.columns
assert "is_registered" in df_common_tags.columns
assert "age" in df_common_tags.columns
def test_select_semantic_tags_exclude(sample_df):
schema_df = sample_df.copy()
schema_df.ww.init(
semantic_tags={
"full_name": "tag1",
"email": ["tag2"],
"age": ["numeric", "tag2"],
"phone_number": ["tag3", "tag2"],
"is_registered": "category",
},
time_index="signup_date",
)
df_one_match = schema_df.ww.select(exclude="numeric")
assert len(df_one_match.columns) == 10
assert "age" not in df_one_match.columns
assert "id" not in df_one_match.columns
df_multiple_matches = schema_df.ww.select(exclude="tag2")
assert len(df_multiple_matches.columns) == 13
assert "age" not in df_multiple_matches.columns
assert "phone_number" not in df_multiple_matches.columns
assert "email" not in df_multiple_matches.columns
df_multiple_tags = schema_df.ww.select(exclude=["numeric", "time_index"])
assert len(df_multiple_tags.columns) == 9
assert "id" not in df_multiple_tags.columns
assert "age" not in df_multiple_tags.columns
assert "signup_date" not in df_multiple_tags.columns
df_overlapping_tags = schema_df.ww.select(exclude=["numeric", "tag2"])
assert len(df_overlapping_tags.columns) == 8
assert "id" not in df_overlapping_tags.columns
assert "age" not in df_overlapping_tags.columns
assert "phone_number" not in df_overlapping_tags.columns
assert "email" not in df_overlapping_tags.columns
df_common_tags = schema_df.ww.select(exclude=["category", "numeric"])
assert len(df_common_tags.columns) == 8
assert "id" not in df_common_tags.columns
assert "is_registered" not in df_common_tags.columns
assert "age" not in df_common_tags.columns
def test_select_single_inputs(sample_df):
schema_df = sample_df.copy()
schema_df.ww.init(
time_index="signup_date",
index="id",
name="df_name",
logical_types={
"full_name": PersonFullName,
"email": EmailAddress,
"phone_number": PhoneNumber,
"signup_date": Datetime(datetime_format="%Y-%m-%d"),
},
semantic_tags={
"full_name": ["new_tag", "tag2"],
"age": "numeric",
"signup_date": "date_of_birth",
},
)
df_ltype_string = schema_df.ww.select("person_full_name")
assert len(df_ltype_string.columns) == 1
assert "full_name" in df_ltype_string.columns
df_ltype_obj = schema_df.ww.select(IntegerNullable)
assert len(df_ltype_obj.columns) == 2
assert "age" in df_ltype_obj.columns
df_tag_string = schema_df.ww.select("index")
assert len(df_tag_string.columns) == 1
assert "id" in df_tag_string.columns
df_tag_instantiated = schema_df.ww.select("Datetime")
assert len(df_tag_instantiated.columns) == 2
assert "signup_date" in df_tag_instantiated.columns
def test_select_list_inputs(sample_df):
schema_df = sample_df.copy()
schema_df.ww.init(
time_index="signup_date",
index="id",
name="df_name",
logical_types={
"full_name": PersonFullName,
"email": EmailAddress,
"phone_number": PhoneNumber,
"signup_date": Datetime(datetime_format="%Y-%m-%d"),
},
semantic_tags={
"full_name": ["new_tag", "tag2"],
"age": "numeric",
"signup_date": "date_of_birth",
"email": "tag2",
"is_registered": "category",
},
)
df_just_strings = schema_df.ww.select(
["PersonFullName", "index", "tag2", "boolean_nullable"]
)
assert len(df_just_strings.columns) == 4
assert "id" in df_just_strings.columns
assert "full_name" in df_just_strings.columns
assert "email" in df_just_strings.columns
assert "is_registered" in df_just_strings.columns
df_mixed_selectors = schema_df.ww.select(
[PersonFullName, "index", "time_index", Integer]
)
assert len(df_mixed_selectors.columns) == 4
assert "id" in df_mixed_selectors.columns
assert "full_name" in df_mixed_selectors.columns
assert "signup_date" in df_mixed_selectors.columns
df_common_tags = schema_df.ww.select(
["category", "numeric", BooleanNullable, Datetime]
)
assert len(df_common_tags.columns) == 9
assert "is_registered" in df_common_tags.columns
assert "age" in df_common_tags.columns
assert "signup_date" in df_common_tags.columns
def test_select_semantic_tags_no_match(sample_df):
schema_df = sample_df.copy()
schema_df.ww.init(
time_index="signup_date",
index="id",
name="df_name",
logical_types={
"full_name": PersonFullName,
"email": EmailAddress,
"phone_number": PhoneNumber,
"signup_date": Datetime(datetime_format="%Y-%m-%d"),
},
semantic_tags={
"full_name": ["new_tag", "tag2"],
"age": "numeric",
"signup_date": "date_of_birth",
"email": "tag2",
},
)
assert len(schema_df.ww.select(["doesnt_exist"]).columns) == 0
df_multiple_unused = schema_df.ww.select(
["doesnt_exist", "boolean_nullable", "category", PhoneNumber]
)
assert len(df_multiple_unused.columns) == 3
df_unused_ltype = schema_df.ww.select(
["date_of_birth", "doesnt_exist", PostalCode, Integer]
)
assert len(df_unused_ltype.columns) == 3
def test_select_repetitive(sample_df):
schema_df = sample_df.copy()
schema_df.ww.init(
time_index="signup_date",
index="id",
name="df_name",
logical_types={
"full_name": PersonFullName,
"email": EmailAddress,
"phone_number": PhoneNumber,
"signup_date": Datetime(datetime_format="%Y-%m-%d"),
},
semantic_tags={
"full_name": ["new_tag", "tag2"],
"age": "numeric",
"signup_date": "date_of_birth",
"email": "tag2",
},
)
df_repeat_tags = schema_df.ww.select(["new_tag", "new_tag"])
assert len(df_repeat_tags.columns) == 1
assert set(df_repeat_tags.columns) == {"full_name"}
df_repeat_ltypes = schema_df.ww.select(["PhoneNumber", PhoneNumber, "phone_number"])
assert len(df_repeat_ltypes.columns) == 1
assert set(df_repeat_ltypes.columns) == {"phone_number"}
def test_select_instantiated_ltype():
ymd_format = Datetime(datetime_format="%Y~%m~%d")
df = pd.DataFrame(
{
"dates": ["2019/01/01", "2019/01/02", "2019/01/03"],
"ymd": ["2019~01~01", "2019~01~02", "2019~01~03"],
}
)
df.ww.init(logical_types={"ymd": ymd_format, "dates": Datetime})
new_df = df.ww.select("Datetime")
assert len(new_df.columns) == 2
new_df = df.ww.select(Datetime)
assert len(new_df.columns) == 2
err_msg = "Invalid selector used in include: Datetime cannot be instantiated"
with pytest.raises(TypeError, match=err_msg):
df.ww.select(ymd_format)
def test_select_return_schema(sample_df):
sample_df.ww.init()
# Multiple column matches
df_schema = sample_df.ww.select(include="Unknown", return_schema=True)
assert isinstance(df_schema, TableSchema)
assert len(df_schema.columns) == 2
assert df_schema == sample_df.ww.select(include="Unknown").ww.schema
# Single column match
single_schema = sample_df.ww.select(include="BooleanNullable", return_schema=True)
assert isinstance(single_schema, TableSchema)
assert len(single_schema.columns) == 1
assert single_schema == sample_df.ww.select(include="BooleanNullable").ww.schema
# No matches
empty_schema = sample_df.ww.select(include="PhoneNumber", return_schema=True)
assert isinstance(empty_schema, TableSchema)
assert len(empty_schema.columns) == 0
@pytest.mark.parametrize(
"ww_type, pandas_type",
[
(["Integer", "IntegerNullable"], "int"),
(["Double"], "float"),
(["Datetime"], "datetime"),
(["Unknown", "EmailAddress", "URL", "IPAddress"], "string"),
(["Categorical"], "category"),
(["Boolean", "BooleanNullable"], "boolean"),
],
)
def test_select_retains_column_order(ww_type, pandas_type, sample_df):
if _is_koalas_dataframe(sample_df) and pandas_type in ["category", "string"]:
pytest.skip("Koalas stores categories as strings")
sample_df.ww.init()
ww_schema_column_order = [
x for x in sample_df.ww.select(ww_type, return_schema=True).columns.keys()
]
pandas_column_order = [
x for x in sample_df.select_dtypes(include=pandas_type).columns
]
assert ww_schema_column_order == pandas_column_order
def test_select_include_and_exclude_error(sample_df):
sample_df.ww.init()
err_msg = "Cannot specify values for both 'include' and 'exclude' in a single call."
with pytest.raises(ValueError, match=err_msg):
sample_df.ww.select(include="Integer", exclude="Double")
with pytest.raises(ValueError, match=err_msg):
sample_df.ww.select(include=[], exclude=[])
def test_select_no_selectors_error(sample_df):
sample_df.ww.init()
err_msg = "Must specify values for either 'include' or 'exclude'."
with pytest.raises(ValueError, match=err_msg):
sample_df.ww.select()
def test_accessor_set_index(sample_df):
sample_df.ww.init()
sample_df.ww.set_index("id")
assert sample_df.ww.index == "id"
if isinstance(sample_df, pd.DataFrame):
# underlying index not set for Dask/Koalas
assert (sample_df.index == sample_df["id"]).all()
sample_df.ww.set_index("full_name")
assert sample_df.ww.index == "full_name"
if isinstance(sample_df, pd.DataFrame):
# underlying index not set for Dask/Koalas
assert (sample_df.index == sample_df["full_name"]).all()
sample_df.ww.set_index(None)
assert sample_df.ww.index is None
if isinstance(sample_df, pd.DataFrame):
# underlying index not set for Dask/Koalas
# Check that underlying index doesn't get reset when Woodwork index is removed
assert (sample_df.index == sample_df["full_name"]).all()
def test_accessor_set_index_errors(sample_df):
sample_df.ww.init()
error = "Specified index column `testing` not found in TableSchema."
with pytest.raises(ColumnNotPresentError, match=error):
sample_df.ww.set_index("testing")
if isinstance(sample_df, pd.DataFrame):
# Index uniqueness not validate for Dask/Koalas
error = "Index column must be unique"
with pytest.raises(LookupError, match=error):
sample_df.ww.set_index("age")
def test_set_types(sample_df):
sample_df.ww.init(index="full_name", time_index="signup_date")
original_df = sample_df.ww.copy()
sample_df.ww.set_types()
assert original_df.ww.schema == sample_df.ww.schema
pd.testing.assert_frame_equal(to_pandas(original_df), to_pandas(sample_df))
sample_df.ww.set_types(logical_types={"is_registered": "IntegerNullable"})
assert sample_df["is_registered"].dtype == "Int64"
sample_df.ww.set_types(
semantic_tags={"signup_date": ["new_tag"]},
logical_types={"full_name": "Categorical"},
retain_index_tags=False,
)
assert sample_df.ww.index is None
assert sample_df.ww.time_index is None
def test_set_types_errors(sample_df):
sample_df.ww.init(index="full_name")
error = "String invalid is not a valid logical type"
with pytest.raises(ValueError, match=error):
sample_df.ww.set_types(logical_types={"id": "invalid"})
if isinstance(sample_df, pd.DataFrame):
# Dask does not error on invalid type conversion until compute
# Koalas does conversion and fills values with NaN
error = (
"Error converting datatype for email from type string "
"to type float64. Please confirm the underlying data is consistent with "
"logical type Double."
)
with pytest.raises(TypeConversionError, match=error):
sample_df.ww.set_types(logical_types={"email": "Double"})
error = re.escape(
"Cannot add 'index' tag directly for column email. To set a column as the index, "
"use DataFrame.ww.set_index() instead."
)
with pytest.raises(ValueError, match=error):
sample_df.ww.set_types(semantic_tags={"email": "index"})
def test_pop(sample_df):
schema_df = sample_df.copy()
schema_df.ww.init(semantic_tags={"age": "custom_tag"})
original_schema = schema_df.ww.schema
popped_series = schema_df.ww.pop("age")
assert isinstance(popped_series, type(sample_df["age"]))
assert popped_series.ww.semantic_tags == {"custom_tag", "numeric"}
pd.testing.assert_series_equal(
to_pandas(popped_series),
pd.Series([pd.NA, 33, 33, 57], dtype="Int64", name="age"),
)
assert isinstance(popped_series.ww.logical_type, IntegerNullable)
assert "age" not in schema_df.columns
assert "age" not in schema_df.ww.columns
assert "age" not in schema_df.ww.logical_types.keys()
assert "age" not in schema_df.ww.semantic_tags.keys()
assert schema_df.ww.schema == original_schema.get_subset_schema(
list(schema_df.columns)
)
schema_df = sample_df.copy()
schema_df.ww.init(
name="table",
logical_types={"age": IntegerNullable},
semantic_tags={"age": "custom_tag"},
use_standard_tags=False,
)
popped_series = schema_df.ww.pop("age")
assert popped_series.ww.semantic_tags == {"custom_tag"}
def test_pop_index(sample_df):
sample_df.ww.init(index="id", name="df_name")
assert sample_df.ww.index == "id"
id_col = sample_df.ww.pop("id")
assert sample_df.ww.index is None
assert "index" in id_col.ww.semantic_tags
def test_pop_error(sample_df):
sample_df.ww.init(
name="table",
logical_types={"age": IntegerNullable},
semantic_tags={"age": "custom_tag"},
use_standard_tags=True,
)
with pytest.raises(
ColumnNotPresentError, match="Column with name 'missing' not found in DataFrame"
):
sample_df.ww.pop("missing")
def test_accessor_drop(sample_df):
schema_df = sample_df.copy()
schema_df.ww.init()
single_input_df = schema_df.ww.drop("is_registered")
assert len(single_input_df.ww.columns) == (len(schema_df.columns) - 1)
assert "is_registered" not in single_input_df.ww.columns
assert (
to_pandas(schema_df)
.drop("is_registered", axis="columns")
.equals(to_pandas(single_input_df))
)
list_input_df = schema_df.ww.drop(["is_registered"])
assert len(list_input_df.ww.columns) == (len(schema_df.columns) - 1)
assert "is_registered" not in list_input_df.ww.columns
assert (
to_pandas(schema_df)
.drop("is_registered", axis="columns")
.equals(to_pandas(list_input_df))
)
# should be equal to the single input example above
assert single_input_df.ww.schema == list_input_df.ww.schema
assert to_pandas(single_input_df).equals(to_pandas(list_input_df))
multiple_list_df = schema_df.ww.drop(["age", "full_name", "is_registered"])
assert len(multiple_list_df.ww.columns) == (len(schema_df.columns) - 3)
assert "is_registered" not in multiple_list_df.ww.columns
assert "full_name" not in multiple_list_df.ww.columns
assert "age" not in multiple_list_df.ww.columns
assert (
to_pandas(schema_df)
.drop(["is_registered", "age", "full_name"], axis="columns")
.equals(to_pandas(multiple_list_df))
)
# Drop the same columns in a different order and confirm resulting DataFrame column order doesn't change
different_order_df = schema_df.ww.drop(["is_registered", "age", "full_name"])
assert different_order_df.ww.schema == multiple_list_df.ww.schema
assert to_pandas(multiple_list_df).equals(to_pandas(different_order_df))
def test_accessor_drop_inplace(sample_df):
sample_df.ww.init()
inplace_df = sample_df.copy()
inplace_df.ww.init()
if _is_dask_dataframe(sample_df):
error = "Drop inplace not supported for Dask"
with pytest.raises(ValueError, match=error):
inplace_df.ww.drop(["is_registered"], inplace=True)
elif _is_koalas_dataframe(sample_df):
error = "Drop inplace not supported for Koalas"
with pytest.raises(ValueError, match=error):
inplace_df.ww.drop(["is_registered"], inplace=True)
else:
inplace_df.ww.drop(["is_registered"], inplace=True)
assert len(inplace_df.ww.columns) == (len(sample_df.columns) - 1)
assert "is_registered" not in inplace_df.ww.columns
assert sample_df.drop("is_registered", axis="columns").equals(inplace_df)
def test_accessor_drop_indices(sample_df):
sample_df.ww.init(index="id", time_index="signup_date")
assert sample_df.ww.index == "id"
assert sample_df.ww.time_index == "signup_date"
dropped_index_df = sample_df.ww.drop("id")
assert "id" not in dropped_index_df.ww.columns
assert dropped_index_df.ww.index is None
assert dropped_index_df.ww.time_index == "signup_date"
dropped_time_index_df = sample_df.ww.drop(["signup_date"])
assert "signup_date" not in dropped_time_index_df.ww.columns
assert dropped_time_index_df.ww.time_index is None
assert dropped_time_index_df.ww.index == "id"
def test_accessor_drop_errors(sample_df):
sample_df.ww.init()
error = re.escape("Column(s) '['not_present']' not found in DataFrame")
with pytest.raises(ColumnNotPresentError, match=error):
sample_df.ww.drop("not_present")
with pytest.raises(ColumnNotPresentError, match=error):
sample_df.ww.drop(["age", "not_present"])
error = re.escape("Column(s) '['not_present1', 4]' not found in DataFrame")
with pytest.raises(ColumnNotPresentError, match=error):
sample_df.ww.drop(["not_present1", 4])
def test_accessor_rename(sample_df):
table_metadata = {"table_info": "this is text"}
id_description = "the id of the row"
id_origin = "base"
sample_df.ww.init(
index="id",
time_index="signup_date",
table_metadata=table_metadata,
column_descriptions={"id": id_description},
column_origins={"id": id_origin},
semantic_tags={"age": "test_tag"},
logical_types={"age": Double},
)
original_df = sample_df.ww.copy()
new_df = sample_df.ww.rename({"age": "birthday"})
assert to_pandas(sample_df.rename(columns={"age": "birthday"})).equals(
to_pandas(new_df)
)
# Confirm original dataframe hasn't changed
assert to_pandas(sample_df).equals(to_pandas(original_df))
assert sample_df.ww.schema == original_df.ww.schema
assert original_df.columns.get_loc("age") == new_df.columns.get_loc("birthday")
pd.testing.assert_series_equal(
to_pandas(original_df["age"]), to_pandas(new_df["birthday"]), check_names=False
)
# confirm that metadata and descriptions are there
assert new_df.ww.metadata == table_metadata
assert new_df.ww.columns["id"].description == id_description
assert new_df.ww.columns["id"].origin == id_origin
old_col = sample_df.ww.columns["age"]
new_col = new_df.ww.columns["birthday"]
assert old_col.logical_type == new_col.logical_type
assert old_col.semantic_tags == new_col.semantic_tags
new_df = sample_df.ww.rename({"age": "full_name", "full_name": "age"})
pd.testing.assert_series_equal(
to_pandas(original_df["age"]), to_pandas(new_df["full_name"]), check_names=False
)
pd.testing.assert_series_equal(
to_pandas(original_df["full_name"]), to_pandas(new_df["age"]), check_names=False
)
assert original_df.columns.get_loc("age") == new_df.columns.get_loc("full_name")
assert original_df.columns.get_loc("full_name") == new_df.columns.get_loc("age")
def test_accessor_rename_inplace(sample_df):
table_metadata = {"table_info": "this is text"}
id_description = "the id of the row"
id_origin = "base"
sample_df.ww.init(
index="id",
time_index="signup_date",
table_metadata=table_metadata,
column_descriptions={"id": id_description},
column_origins={"id": id_origin},
semantic_tags={"age": "test_tag"},
logical_types={"age": Double},
)
original_df = sample_df.ww.copy()
inplace_df = sample_df.ww.copy()
if _is_dask_dataframe(sample_df):
error = "Rename inplace not supported for Dask"
with pytest.raises(ValueError, match=error):
inplace_df.ww.rename({"age": "birthday"}, inplace=True)
elif _is_koalas_dataframe(sample_df):
error = "Rename inplace not supported for Koalas"
with pytest.raises(ValueError, match=error):
inplace_df.ww.rename({"age": "birthday"}, inplace=True)
else:
inplace_df.ww.rename({"age": "birthday"}, inplace=True)
assert original_df.columns.get_loc("age") == inplace_df.columns.get_loc(
"birthday"
)
pd.testing.assert_series_equal(
to_pandas(original_df["age"]),
to_pandas(inplace_df["birthday"]),
check_names=False,
)
# confirm that metadata and descriptions are there
assert inplace_df.ww.metadata == table_metadata
assert inplace_df.ww.columns["id"].description == id_description
assert inplace_df.ww.columns["id"].origin == id_origin
old_col = sample_df.ww.columns["age"]
new_col = inplace_df.ww.columns["birthday"]
assert old_col.logical_type == new_col.logical_type
assert old_col.semantic_tags == new_col.semantic_tags
new_df = sample_df.ww.copy()
new_df.ww.rename({"age": "full_name", "full_name": "age"}, inplace=True)
pd.testing.assert_series_equal(
to_pandas(original_df["age"]),
to_pandas(new_df["full_name"]),
check_names=False,
)
pd.testing.assert_series_equal(
to_pandas(original_df["full_name"]),
to_pandas(new_df["age"]),
check_names=False,
)
assert original_df.columns.get_loc("age") == new_df.columns.get_loc("full_name")
assert original_df.columns.get_loc("full_name") == new_df.columns.get_loc("age")
def test_accessor_rename_indices(sample_df):
sample_df.ww.init(index="id", time_index="signup_date")
renamed_df = sample_df.ww.rename(
{"id": "renamed_index", "signup_date": "renamed_time_index"}
)
assert "id" not in renamed_df.columns
assert "signup_date" not in renamed_df.columns
assert "renamed_index" in renamed_df.columns
assert "renamed_time_index" in renamed_df.columns
if isinstance(sample_df, pd.DataFrame):
# underlying index not set for Dask/Koalas
assert all(renamed_df.index == renamed_df["renamed_index"])
assert renamed_df.ww.index == "renamed_index"
assert renamed_df.ww.time_index == "renamed_time_index"
def test_accessor_schema_properties(sample_df):
sample_df.ww.init(index="id", time_index="signup_date")
schema_properties = [
"logical_types",
"semantic_tags",
"index",
"time_index",
"use_standard_tags",
]
for schema_property in schema_properties:
prop_from_accessor = getattr(sample_df.ww, schema_property)
prop_from_schema = getattr(sample_df.ww.schema, schema_property)
assert prop_from_accessor == prop_from_schema
# Assumes we don't have setters for any of these attributes
error = "can't set attribute"
with pytest.raises(AttributeError, match=error):
setattr(sample_df.ww, schema_property, "new_value")
def test_sets_koalas_option_on_init(sample_df_koalas):
if ks:
ks.set_option("compute.ops_on_diff_frames", False)
sample_df_koalas.ww.init()
assert ks.get_option("compute.ops_on_diff_frames") is True
def test_setitem_invalid_input(sample_df):
df = sample_df.copy()
df.ww.init(index="id", time_index="signup_date")
error_msg = "New column must be of Series type"
with pytest.raises(ValueError, match=error_msg):
df.ww["test"] = [1, 2, 3]
error_msg = "Cannot reassign index. Change column name and then use df.ww.set_index to reassign index."
with pytest.raises(KeyError, match=error_msg):
df.ww["id"] = df.id
error_msg = "Cannot reassign time index. Change column name and then use df.ww.set_time_index to reassign time index."
with pytest.raises(KeyError, match=error_msg):
df.ww["signup_date"] = df.signup_date
def test_setitem_indexed_column_on_unindexed_dataframe(sample_df):
sample_df.ww.init()
col = sample_df.ww.pop("id")
col.ww.add_semantic_tags(semantic_tags="index")
warning = 'Cannot add "index" tag on id directly to the DataFrame. The "index" tag has been removed from id. To set this column as a Woodwork index, please use df.ww.set_index'
with pytest.warns(IndexTagRemovedWarning, match=warning):
sample_df.ww["id"] = col
assert sample_df.ww.index is None
assert ww.is_schema_valid(sample_df, sample_df.ww.schema)
assert sample_df.ww["id"].ww.semantic_tags == {"numeric"}
def test_setitem_indexed_column_on_indexed_dataframe(sample_df):
sample_df.ww.init()
sample_df.ww.set_index("id")
col = sample_df.ww.pop("id")
warning = 'Cannot add "index" tag on id directly to the DataFrame. The "index" tag has been removed from id. To set this column as a Woodwork index, please use df.ww.set_index'
with pytest.warns(IndexTagRemovedWarning, match=warning):
sample_df.ww["id"] = col
assert sample_df.ww.index is None
assert ww.is_schema_valid(sample_df, sample_df.ww.schema)
assert sample_df.ww["id"].ww.semantic_tags == {"numeric"}
sample_df.ww.init(logical_types={"email": "Categorical"})
sample_df.ww.set_index("id")
col = sample_df.ww.pop("email")
col.ww.add_semantic_tags(semantic_tags="index")
warning = 'Cannot add "index" tag on email directly to the DataFrame. The "index" tag has been removed from email. To set this column as a Woodwork index, please use df.ww.set_index'
with pytest.warns(IndexTagRemovedWarning, match=warning):
sample_df.ww["email"] = col
assert sample_df.ww.index == "id"
assert sample_df.ww.semantic_tags["email"] == {"category"}
def test_setitem_indexed_column_on_unindexed_dataframe_no_standard_tags(sample_df):
sample_df.ww.init()
col = sample_df.ww.pop("id")
col.ww.init(semantic_tags="index", use_standard_tags=False)
warning = 'Cannot add "index" tag on id directly to the DataFrame. The "index" tag has been removed from id. To set this column as a Woodwork index, please use df.ww.set_index'
with pytest.warns(IndexTagRemovedWarning, match=warning):
sample_df.ww["id"] = col
assert sample_df.ww.index is None
assert ww.is_schema_valid(sample_df, sample_df.ww.schema)
assert sample_df.ww["id"].ww.semantic_tags == set()
def test_setitem_different_name(sample_df):
df = sample_df.copy()
df.ww.init()
new_series = pd.Series([1, 2, 3, 4], name="wrong", dtype="float")
if _is_koalas_dataframe(sample_df):
new_series = ks.Series(new_series)
# Assign series with name `wrong` to existing column with name `id`
df.ww["id"] = new_series
assert df.ww["id"].name == "id"
assert "id" in df.ww.columns
assert "wrong" not in df.ww.columns
assert "wrong" not in df.columns
new_series2 = pd.Series([1, 2, 3, 4], name="wrong2", dtype="float")
if _is_koalas_dataframe(sample_df):
new_series2 = ks.Series(new_series2)
# Assign series with name `wrong2` to new column with name `new_col`
df.ww["new_col"] = new_series2
assert df.ww["new_col"].name == "new_col"
assert "new_col" in df.ww.columns
assert "wrong2" not in df.ww.columns
assert "wrong2" not in df.columns
def test_setitem_new_column(sample_df):
df = sample_df.copy()
df.ww.init(use_standard_tags=False)
new_series = pd.Series([1, 2, 3, 4])
if _is_koalas_dataframe(sample_df):
new_series = ks.Series(new_series)
dtype = "int64"
df.ww["test_col2"] = new_series
assert "test_col2" in df.columns
assert "test_col2" in df.ww._schema.columns.keys()
assert isinstance(df.ww["test_col2"].ww.logical_type, Integer)
assert df.ww["test_col2"].ww.use_standard_tags is True
assert df.ww["test_col2"].ww.semantic_tags == {"numeric"}
assert df.ww["test_col2"].name == "test_col2"
assert df.ww["test_col2"].dtype == dtype
new_series = pd.Series([1, 2, 3], dtype="float")
if _is_koalas_dataframe(sample_df):
new_series = ks.Series(new_series)
new_series = init_series(
new_series,
logical_type=Double,
use_standard_tags=False,
semantic_tags={"test_tag"},
)
df.ww["test_col3"] = new_series
assert "test_col3" in df.ww.columns
assert isinstance(df.ww["test_col3"].ww.logical_type, Double)
assert df.ww["test_col3"].ww.use_standard_tags is False
assert df.ww["test_col3"].ww.semantic_tags == {"test_tag"}
assert df.ww["test_col3"].name == "test_col3"
assert df.ww["test_col3"].dtype == "float"
# Standard tags and no logical type
df = sample_df.copy()
df.ww.init(use_standard_tags=True)
new_series = | pd.Series(["new", "column", "inserted"], name="test_col") | pandas.Series |
# script for preparing necessary data for single tasks
import os
os.environ["PYTHONWARNINGS"] = "ignore"
import json
import time
import warnings
warnings.filterwarnings('ignore')
import numpy as np
import pandas as pd
from utils.data import Dataset, create_adult_dataset, create_compas_dataset, create_titanic_dataset, create_communities_dataset, create_german_dataset, create_bank_dataset
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import LinearSVC , SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neural_network import MLPClassifier
from imblearn.over_sampling import SMOTE
# define enum
target_metrics = {
"acc": 0,
"f1": 1
}
# prepare datasets for parameter searching
def prepare_datasets():
results = {
"adult": None,
"compas": None,
"titanic": None,
"communities": None,
"german": None,
"bank": None
}
# compas dataset
c_data = create_compas_dataset()
tmp_concat = pd.concat([c_data.X, pd.DataFrame(c_data.y, columns=["_TARGET_"])], axis=1)
tmp_concat.dropna(inplace=True)
tmp_concat.reset_index(drop=True, inplace=True)
c_data.X = tmp_concat.drop(columns=["_TARGET_"]).copy()
c_data.y = tmp_concat["_TARGET_"].copy().to_numpy().ravel()
results["compas"] = c_data
# adult dataset
a_data = create_adult_dataset()
tmp_concat = pd.concat([a_data.X, pd.DataFrame(a_data.y, columns=["_TARGET_"])], axis=1)
tmp_concat.dropna(inplace=True)
tmp_concat.reset_index(drop=True, inplace=True)
a_data.X = tmp_concat.drop(columns=["_TARGET_"]).copy()
a_data.y = tmp_concat["_TARGET_"].copy().to_numpy().ravel()
results["adult"] = a_data
# titanic dataset
t_data = create_titanic_dataset()
tmp_concat = pd.concat([t_data.X, | pd.DataFrame(t_data.y, columns=["_TARGET_"]) | pandas.DataFrame |
import pandas as pd
import numpy as np
import fileinput
import json
from scipy.stats import beta
import matplotlib.pyplot as plt
import re
import networkx as nx
import math
from scipy.stats import wilcoxon
from statistics import mean
from scipy.stats import pearsonr
# from cpt_valuation import evaluateProspectVals
class HumanDecisionModels:
def __init__(self,teamId,directory):
#Constants
self.numQuestions = 45
self.trainingSetSize = 30
self.testSetSize = 15
self.numAgents = 4
self.numCentralityReports = 9
self.c = 4
self.e = -1
self.z = -1
# Other Parameters
self.influenceMatrixIndex = 0
self.machineUseCount = [-1, -1, -1, -1]
self.firstMachineUsage = [-1, -1, -1, -1]
# Preloading of the data
eventLog = pd.read_csv(directory+"event_log.csv", sep=',',quotechar="|", names=["id","event_type","event_content","timestamp","completed_task_id","sender_subject_id","receiver_subject_id","session_id","sender","receiver","extra_data"])
teamSubjects = pd.read_csv(directory+"team_has_subject.csv",sep=',',quotechar="|",names=["id","teamId","sender_subject_id"]).drop('id',1)
elNoMessage = eventLog[(eventLog['event_type'] == "TASK_ATTRIBUTE")]
elNoMessage["sender_subject_id"] = | pd.to_numeric(elNoMessage["sender_subject_id"]) | pandas.to_numeric |
import numpy as np
import matplotlib.pyplot as plt
from numpy import array,identity,diagonal
import os
import numpy
import pandas as pd
import sys
import random
import math
#from scipy.linalg import svd
from math import sqrt
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from random import randrange
import operator
from sklearn.metrics import f1_score
from sklearn.decomposition import PCA
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.preprocessing import Imputer
import re, string, unicodedata
import nltk
import warnings
from itertools import combinations
from sklearn.metrics.pairwise import pairwise_distances
from nltk.corpus import stopwords
import bayes as b1
import nn as nn1
import projections as pro1
import lsh as lsh1
warnings.filterwarnings("ignore")
np.set_printoptions(threshold=np.nan)
prior={}
dic1={}
dic2={}
testset=[]
trainset=[]
trainlabel=[]
testlabel=[]
prior={}
train=[]
labels=[]
def F1_score_1(testlabel,predictions):
for i in range(len(testlabel)):
false_negative=0
false_positive=0
true_negative=0
true_positive=0
if testlabel[i]!=predictions[i]:
if predictions[i]==0:
false_negative=false_negative+1
else:
false_positive=false_positive+1
else:
if predictions[i]==0:
true_negative=true_negative+1
else:
true_positive=true_positive+1
precision=0
recall=0
precision=true_positive/(true_positive+false_positive)
recall=true_positive/(true_positive+false_negative)
f1_score_micro=0
f1_score_macro=0
def F1_score(testlabel,predictions):
return ((f1_score(testlabel, predictions, average='macro')),(f1_score(testlabel, predictions, average='micro')))
def cross_validation_k(train,labels,k):
k=10
#global train,labels
classes={}
index=0
for labelinst in labels:
#print(labelinst)
if labelinst[0] in classes:
classes[labelinst[0]].add(index)
else:
classes[labelinst[0]] = {index}
index=index+1
fold_classes_list={}
for label in classes:
l=len(list(classes[label]))
dataset_copy=list(classes[label])
dataset_split = list()
fold_size = (int(l / k))
for i in range(k):
fold = list()
while len(fold) < fold_size:
index = randrange(len(dataset_copy))
fold.append(dataset_copy.pop(index))
dataset_split.append(fold)
#print(dataset_split)
fold_classes_list[label]=dataset_split
#print(fold_classes_list[0])
list_k_fold=[0 for i in range(k)]
list_k_fold1=[0 for i in range(k)]
for i in range(k):
list_small=[]
for label in fold_classes_list:
list_small.extend(fold_classes_list[label][i])
list_k_fold[i]=list(list_small)
#print(list_k_fold)
return list_k_fold
def testing_dolphin_pubmed(testfile,labelfile):
print("FOR DOLPHIN DATA SET: ")
#f2.write(str("DOLPHIN DATA SET"))
train = pd.read_csv(testfile,delimiter=' ',header=None)
labels=pd.read_csv(labelfile,delimiter=' ',header=None)
train=np.array(train)
labels=np.array(labels)
predictions,acc=predictions_calculate(train,labels)
predictions,acc=priors(dic)
print("Our bayes classifier accuracy is : ",end=" ")
print(acc)
a,b=F1_score(labels,predictions)
print("Our bayes classifier F1 score (macro and micro) are : ",end=" ")
print(a,b)
predictions,acc=knn1()
print("Our bayes classifier accuracy is : ",end=" ")
print(acc)
a,b=F1_score(labels,predictions)
print("Our bayes classifier F1 score (macro and micro) are : ",end=" ")
print(a,b)
predictions,acc=scikit_knn()
print("Our bayes classifier accuracy is : ",end=" ")
print(acc)
a,b=F1_score(labels,predictions)
print("Our bayes classifier F1 score (macro and micro) are : ",end=" ")
print(a,b)
predictions,acc=sklearn_bayes()
print("Our bayes classifier accuracy is : ",end=" ")
print(acc)
a,b=F1_score(labels,predictions)
print("Our bayes classifier F1 score (macro and micro) are : ",end=" ")
print(a,b)
def testing_twitter(testfile,labelfile):
print("FOR TWITTER DATA SET: ")
train=[]
labels=[]
train = pd.read_csv(testfile,header=None)
labels=pd.read_csv(labelfile,header=None)
bag_of_words = set()
finalmat = []
words1=set()
for i,sentence in train.iterrows():
text = {}
for word in sentence[0].strip().split():
if word not in stopwords.words('english'):
if word in text:
text[word] += 1
else:
text[word]=1
finalmat.append(text)
bag_of_words.update(text)
#print(bag_of_words)
mat = [[(text[word] if word in text else 0) for word in bag_of_words] for text in finalmat]
train=np.array(mat)
labels=np.array(labels)
#calc_mean_stddev(train,dic)
predictions,acc=predictions_calculate(train,labels)
predictions,acc=priors(dic)
print("Our bayes classifier accuracy is : ",end=" ")
print(acc)
a,b=F1_score(labels,predictions)
print("Our bayes classifier F1 score (macro and micro) are : ",end=" ")
print(a,b)
predictions,acc=knn1()
print("Our bayes classifier accuracy is : ",end=" ")
print(acc)
a,b=F1_score(labels,predictions)
print("Our bayes classifier F1 score (macro and micro) are : ",end=" ")
print(a,b)
predictions,acc=scikit_knn()
print("Our bayes classifier accuracy is : ",end=" ")
print(acc)
a,b=F1_score(labels,predictions)
print("Our bayes classifier F1 score (macro and micro) are : ",end=" ")
print(a,b)
predictions,acc=sklearn_bayes()
print("Our bayes classifier accuracy is : ",end=" ")
print(acc)
a,b=F1_score(labels,predictions)
print("Our bayes classifier F1 score (macro and micro) are : ",end=" ")
print(a,b)
# f.write("Bayes,KNN,Sklearn_knn,sklearn_bayes")
# print("Bayes,KNN,Sklearn_knn,sklearn_bayes:")
# f2.write(str("F1 MACRO SCORE:")+str(f1_macro_average)+str(" F1 MICRO SCORE:")+str(f1_micro_average)+str(" ACCURACY:")+str(accuracy_average))
# print(str("F1 MACRO SCORE:")+str(f1_macro_average)+str(" F1 MICRO SCORE:")+str(f1_micro_average)+str(" ACCURACY:")+str(accuracy_average))
# f2.write(str("\n"))
if __name__=='__main__':
array_of_arguments=sys.argv
testdata_path=array_of_arguments[2]
testlabel_path=array_of_arguments[4]
strng=array_of_arguments[6]
current_directory = os.getcwd()
#strng="twitter"
#strng="pubmed"
#f2=open('task3.txt','w')
if strng=="dolphins" or strng=="pubmed":
# f=open("task_3.txt",'w')
# f11=open("task_4.txt",'w')
prior={}
dic1={}
dic2={}
trainset=[]
trainlabel=[]
testset = pd.read_csv(testdata_path,delimiter=' ',header=None)
testlabel=pd.read_csv(testlabel_path,delimiter=' ',header=None)
testset=np.array(testset)
testlabel=np.array(testlabel)
prior={}
R=[]
train=[]
labels=[]
if strng=="dolphins":
print("FOR DOLPHIN DATA SET: ")
final_directory = os.path.join(current_directory, r'dolphins_D_matrices')
if not os.path.exists(final_directory):
os.makedirs(final_directory)
final_directory_out = os.path.join(current_directory, r'output_plots')
if not os.path.exists(final_directory_out):
os.makedirs(final_directory_out)
f=open("task_3_dolphin.txt",'w')
f11=open("task_4_dolphin.txt",'w')
f.write("********************DOLPHIN DATASET***************************************")
#f2.write(str("DOLPHIN DATA SET\n"))
f11.write("********************DOLPHIN DATASET***************************************")
train = pd.read_csv("dolphins.csv",delimiter=' ',header=None)
labels= | pd.read_csv("dolphins_label.csv",delimiter=' ',header=None) | pandas.read_csv |
import pandas as pd
from sklearn import linear_model
import statsmodels.api as sm
import numpy as np
from scipy import stats
df_all = pd.read_csv("/mnt/nadavrap-students/STS/data/imputed_data2.csv")
print(df_all.columns.tolist())
print (df_all.info())
df_all = df_all.replace({'MtOpD':{False:0, True:1}})
df_all = df_all.replace({'Complics':{False:0, True:1}})
mask_reop = df_all['Reoperation'] == 'Reoperation'
df_reop = df_all[mask_reop]
mask = df_all['surgyear'] == 2010
df_2010 = df_all[mask]
mask = df_all['surgyear'] == 2011
df_2011 = df_all[mask]
mask = df_all['surgyear'] == 2012
df_2012 = df_all[mask]
mask = df_all['surgyear'] == 2013
df_2013 = df_all[mask]
mask = df_all['surgyear'] == 2014
df_2014 = df_all[mask]
mask = df_all['surgyear'] == 2015
df_2015 = df_all[mask]
mask = df_all['surgyear'] == 2016
df_2016 = df_all[mask]
mask = df_all['surgyear'] == 2017
df_2017 = df_all[mask]
mask = df_all['surgyear'] == 2018
df_2018 = df_all[mask]
mask = df_all['surgyear'] == 2019
df_2019 = df_all[mask]
avg_hospid = | pd.DataFrame() | pandas.DataFrame |
import os
import sys
from numpy.core.numeric import zeros_like
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
plt.style.use("seaborn-poster")
# I hate this too but it allows everything to use the same helper functions.
sys.path.insert(0, "TP_model")
from helper_functions import read_in_NNDSS
from Reff_constants import *
def read_in_posterior(date):
"""
read in samples from posterior from inference
"""
df = pd.read_hdf(
"results/"
+ date
+ "/soc_mob_posterior"
+ date
+ ".h5",
key="samples"
)
return df
def read_in_google(Aus_only=True, local=True, moving=False, moving_window=7):
"""
Read in the Google data set
"""
if local:
if type(local) == str:
df = pd.read_csv(local, parse_dates=["date"])
elif type(local) == bool:
local = "data/Global_Mobility_Report.csv"
df = pd.read_csv(local, parse_dates=["date"])
else:
# Download straight from the web
df = pd.read_csv(
"https://www.gstatic.com/covid19/mobility/Global_Mobility_Report.csv",
parse_dates=["date"],
)
# Make it save automatically.
df.to_csv("data/Global_Mobility_Report.csv", index=False)
if Aus_only:
df = df.loc[df.country_region_code == "AU"]
# Change state column to state initials
df["state"] = df.sub_region_1.map(
lambda x: states_initials[x] if not pd.isna(x) else "AUS"
)
df = df.loc[df.sub_region_2.isna()]
if moving:
# generate moving average columns in reverse
df = df.sort_values(by="date")
mov_values = []
for val in value_vars:
mov_values.append(val[:-29] + "_7days")
# df[mov_values[-1]] = df.groupby(["state"])[val].transform(
# lambda x: x[::-1].rolling(moving_window, 1, center=True).mean()[::-1]
# ) # minimumnumber of 1
# # minimum of moving_window days for std, forward fill the rest
# df[mov_values[-1] + "_std"] = df.groupby(["state"])[val].transform(
# lambda x: x[::-1].rolling(moving_window, moving_window, center=True).std()[::-1]
# )
# MA was taken in reverse, what about when we do it normally?
df[mov_values[-1]] = df.groupby(["state"])[val].transform(
lambda x: x.rolling(moving_window, 1, center=True).mean()
) # minimumnumber of 1
# minimum of moving_window days for std, forward fill the rest
df[mov_values[-1] + "_std"] = df.groupby(["state"])[val].transform(
lambda x: x.rolling(moving_window, moving_window, center=True).std()
)
# fill final values as std doesn't work with single value
df[mov_values[-1] + "_std"] = df.groupby("state")[
mov_values[-1] + "_std"
].fillna(method="ffill")
# show latest date
print("Latest date in Google indices " + str(df.date.values[-1]))
name_addon = "ma" * moving + (1 - moving) * "standard"
df.to_csv("results/mobility_" + name_addon + ".csv")
return df
def predict_plot(
samples,
df,
moving=True,
grocery=True,
rho=None,
second_phase=False,
third_phase=False,
third_plot_type="combined",
):
"""
Produce posterior predictive plots for all states using the inferred mu_hat. This should run
regardless of the form of the model as it only requires the mu_hat parameter which is
calculated inside stan (the TP model fitted to the Reff).
"""
value_vars = [
"retail_and_recreation_percent_change_from_baseline",
"grocery_and_pharmacy_percent_change_from_baseline",
"parks_percent_change_from_baseline",
"transit_stations_percent_change_from_baseline",
"workplaces_percent_change_from_baseline",
"residential_percent_change_from_baseline",
]
value_vars.remove("residential_percent_change_from_baseline")
if not grocery:
value_vars.remove("grocery_and_pharmacy_percent_change_from_baseline")
if moving:
value_vars = [val[:-29] + "_7days" for val in value_vars]
# all states
fig, ax = plt.subplots(figsize=(15, 12), ncols=4, nrows=2, sharex=True, sharey=True)
states = sorted(list(states_initials.keys()))
if not third_phase:
states.remove("Northern Territory")
states.remove("Australian Capital Territory")
# no R_eff modelled for these states, skip
# counter for brho_v
pos = 0
for i, state in enumerate(states):
df_state = df.loc[df.sub_region_1 == state]
if second_phase:
df_state = df_state.loc[df_state.is_sec_wave == 1]
elif third_phase:
df_state = df_state.loc[df_state.is_third_wave == 1]
# directly plot the fitted TP values
states_to_fitd = {s: i + 1 for i, s in enumerate(rho)}
if not second_phase and not third_phase:
mu_hat = samples[
[
"mu_hat["
+ str(j + 1)
+ ","
+ str(states_to_fitd[states_initials[state]])
+ "]"
for j in range(df_state.shape[0])
]
].values.T
elif second_phase:
mu_hat = samples[
[
"mu_hat_sec[" + str(j + 1) + "]"
for j in range(
pos,
pos
+ df.loc[
df.state == states_initials[state]
].is_sec_wave.sum(),
)
]
].values.T
pos = (
pos
+ df.loc[
df.state == states_initials[state]
].is_sec_wave.sum()
)
elif third_phase:
if third_plot_type == "combined":
mu_hat = samples[
[
"mu_hat_third[" + str(j + 1) + "]"
for j in range(
pos,
pos
+ df.loc[
df.state == states_initials[state]
].is_third_wave.sum(),
)
]
].values.T
pos = (
pos
+ df.loc[
df.state == states_initials[state]
].is_third_wave.sum()
)
elif third_plot_type == "delta":
mu_hat = samples[
[
"mu_hat_delta_only[" + str(j + 1) + "]"
for j in range(
pos,
pos
+ df.loc[
df.state == states_initials[state]
].is_third_wave.sum(),
)
]
].values.T
pos = (
pos
+ df.loc[
df.state == states_initials[state]
].is_third_wave.sum()
)
elif third_plot_type == "omicron":
mu_hat = samples[
[
"mu_hat_omicron_only[" + str(j + 1) + "]"
for j in range(
pos,
pos
+ df.loc[
df.state == states_initials[state]
].is_omicron_wave.sum(),
)
]
].values.T
pos = (
pos
+ df.loc[
df.state == states_initials[state]
].is_omicron_wave.sum()
)
df_hat = pd.DataFrame(mu_hat.T)
# df_hat.to_csv('mu_hat_' + state + '.csv')
if states_initials[state] not in rho:
if i // 4 == 1:
ax[i // 4, i % 4].tick_params(axis="x", rotation=90)
continue
if not third_phase:
# plot actual R_eff
ax[i // 4, i % 4].plot(
df_state.date, df_state["mean"], label="$R_{eff}$", color="C1"
)
ax[i // 4, i % 4].fill_between(
df_state.date, df_state["bottom"], df_state["top"], color="C1", alpha=0.3
)
ax[i // 4, i % 4].fill_between(
df_state.date, df_state["lower"], df_state["upper"], color="C1", alpha=0.3
)
elif third_phase:
if third_plot_type in ("combined", "omicron"):
# plot actual R_eff
ax[i // 4, i % 4].plot(
df_state.date, df_state["mean_omicron"], label="$R_{eff}$", color="C1"
)
ax[i // 4, i % 4].fill_between(
df_state.date,
df_state["bottom_omicron"],
df_state["top_omicron"],
color="C1",
alpha=0.3
)
ax[i // 4, i % 4].fill_between(
df_state.date,
df_state["lower_omicron"],
df_state["upper_omicron"],
color="C1",
alpha=0.3
)
else:
# plot actual R_eff
ax[i // 4, i % 4].plot(
df_state.date, df_state["mean"], label="$R_{eff}$", color="C1"
)
ax[i // 4, i % 4].fill_between(
df_state.date,
df_state["bottom"],
df_state["top"],
color="C1",
alpha=0.3
)
ax[i // 4, i % 4].fill_between(
df_state.date,
df_state["lower"],
df_state["upper"],
color="C1",
alpha=0.3
)
ax[i // 4, i % 4].plot(
df_state.date, df_hat.quantile(0.5, axis=0), label="$\hat{\mu}$", color="C0"
)
ax[i // 4, i % 4].fill_between(
df_state.date,
df_hat.quantile(0.25, axis=0),
df_hat.quantile(0.75, axis=0),
color="C0",
alpha=0.3,
)
ax[i // 4, i % 4].fill_between(
df_state.date,
df_hat.quantile(0.05, axis=0),
df_hat.quantile(0.95, axis=0),
color="C0",
alpha=0.3,
)
ax[i // 4, i % 4].set_title(state)
# grid line at R_eff =1
ax[i // 4, i % 4].axhline(1, ls="--", c="k", lw=1)
ax[i // 4, i % 4].set_yticks([0, 1, 2], minor=False)
ax[i // 4, i % 4].set_yticklabels([0, 1, 2], minor=False)
ax[i // 4, i % 4].yaxis.grid(which="minor", linestyle="--", color="black", linewidth=2)
ax[i // 4, i % 4].set_ylim((0, 2.5))
if i // 4 == 1:
ax[i // 4, i % 4].tick_params(axis="x", rotation=90)
plt.legend()
return ax
def predict_multiplier_plot(samples, df, param=""):
"""
Produce posterior predictive plots for all states of the micro and macro factors. This should
enable us to look into the overall factor multiplying TP at any given time.
"""
# all states
fig, ax = plt.subplots(figsize=(15, 12), ncols=4, nrows=2, sharex=True, sharey=True)
states = sorted(list(states_initials.keys()))
# dictionary for mapping between plot type and variable name
factor_dict = {
"micro": "micro_factor",
"macro": "macro_factor",
"susceptibility": "sus_dep_factor"
}
pos = 0
for i, state in enumerate(states):
df_state = df.loc[df.sub_region_1 == state]
df_state = df_state.loc[df_state.is_third_wave == 1]
factor = samples[
[
factor_dict[param] + "[" + str(j + 1) + "]"
for j in range(
pos,
pos
+ df.loc[
df.state == states_initials[state]
].is_third_wave.sum(),
)
]
].values.T
pos = (
pos
+ df.loc[
df.state == states_initials[state]
].is_third_wave.sum()
)
df_hat = pd.DataFrame(factor.T)
# df_hat.to_csv('mu_hat_' + state + '.csv')
ax[i // 4, i % 4].plot(df_state.date, df_hat.quantile(0.5, axis=0), color="C0")
ax[i // 4, i % 4].fill_between(
df_state.date,
df_hat.quantile(0.25, axis=0),
df_hat.quantile(0.75, axis=0),
color="C0",
alpha=0.3,
)
ax[i // 4, i % 4].fill_between(
df_state.date,
df_hat.quantile(0.05, axis=0),
df_hat.quantile(0.95, axis=0),
color="C0",
alpha=0.3,
)
ax[i // 4, i % 4].set_title(state)
ax[i // 4, i % 4].set_yticks([0, 0.25, 0.5, 0.75, 1, 1.25], minor=False)
ax[i // 4, i % 4].set_yticklabels([0, 0.25, 0.5, 0.75, 1, 1.25], minor=False)
ax[i // 4, i % 4].axhline(1, ls="--", c="k", lw=1)
if i // 4 == 1:
ax[i // 4, i % 4].tick_params(axis="x", rotation=90)
plt.legend()
return ax
def macro_factor_plots(samples, df):
"""
Produce posterior predictive plots for all states of the micro and macro factors. This should
enable us to look into the overall factor multiplying TP at any given time.
"""
# all states
fig, ax = plt.subplots(figsize=(15, 12), ncols=4, nrows=2, sharex=True, sharey=True)
states = sorted(list(states_initials.keys()))
# dictionary for mapping between plot type and variable name
factor_dict = {
"micro": "micro_factor",
"macro": "macro_factor",
"susceptibility": "sus_dep_factor"
}
pos = 0
for i, state in enumerate(states):
df_state = df.loc[df.sub_region_1 == state]
df_state = df_state.loc[df_state.is_third_wave == 1]
data_factor = samples[
[
"macro_level_data[" + str(j + 1) + "]"
for j in range(
pos,
pos
+ df.loc[
df.state == states_initials[state]
].is_third_wave.sum(),
)
]
].values.T
inferred_factor = samples[
[
"macro_level_inferred[" + str(j + 1) + "]"
for j in range(
pos,
pos
+ df.loc[
df.state == states_initials[state]
].is_third_wave.sum(),
)
]
].values.T
pos = (
pos
+ df.loc[
df.state == states_initials[state]
].is_third_wave.sum()
)
df_data = pd.DataFrame(data_factor.T)
df_inferred = pd.DataFrame(inferred_factor.T)
# df_hat.to_csv('mu_hat_' + state + '.csv')
ax[i // 4, i % 4].plot(df_state.date, df_data.quantile(0.5, axis=0), color="C0")
ax[i // 4, i % 4].fill_between(
df_state.date,
df_data.quantile(0.25, axis=0),
df_data.quantile(0.75, axis=0),
color="C0",
alpha=0.3,
)
ax[i // 4, i % 4].fill_between(
df_state.date,
df_data.quantile(0.05, axis=0),
df_data.quantile(0.95, axis=0),
color="C0",
alpha=0.3,
)
ax[i // 4, i % 4].plot(df_state.date, df_inferred.quantile(0.5, axis=0), color="C1")
ax[i // 4, i % 4].fill_between(
df_state.date,
df_inferred.quantile(0.25, axis=0),
df_inferred.quantile(0.75, axis=0),
color="C1",
alpha=0.3,
)
ax[i // 4, i % 4].fill_between(
df_state.date,
df_inferred.quantile(0.05, axis=0),
df_inferred.quantile(0.95, axis=0),
color="C1",
alpha=0.3,
)
ax[i // 4, i % 4].set_title(state)
ax[i // 4, i % 4].set_yticks([0, 0.25, 0.5, 0.75, 1, 1.25], minor=False)
ax[i // 4, i % 4].set_yticklabels([0, 0.25, 0.5, 0.75, 1, 1.25], minor=False)
ax[i // 4, i % 4].axhline(1, ls="--", c="k", lw=1)
if i // 4 == 1:
ax[i // 4, i % 4].tick_params(axis="x", rotation=90)
plt.legend()
return ax
def plot_adjusted_ve(
data_date,
samples_mov_gamma,
states,
vaccination_by_state,
third_states,
third_date_range,
ve_samples,
ve_idx_ranges,
figs_dir,
strain,
):
"""
A function to process the inferred VE. This will save an updated timeseries which
is the mean posterior estimates.
"""
fig, ax = plt.subplots(figsize=(15, 12), ncols=2, nrows=4, sharey=True, sharex=True)
# temporary state vector
# make a dataframe for the adjusted vacc_ts
df_vacc_ts_adjusted = pd.DataFrame()
# for i, state in enumerate(third_states):
for i, state in enumerate(states):
# for i, state in enumerate(states_tmp):
# grab states vaccination data
vacc_ts_data = vaccination_by_state.loc[state]
# apply different vaccine form depending on if NSW
if state in third_states:
# get the sampled vaccination effect (this will be incomplete as it's only
# over the fitting period)
vacc_tmp = ve_samples.iloc[ve_idx_ranges[state], :]
# get before and after fitting and tile them
vacc_ts_data_before = pd.concat(
[vacc_ts_data.loc[vacc_ts_data.index < third_date_range[state][0]]]
* samples_mov_gamma.shape[0],
axis=1,
)
vacc_ts_data_after = pd.concat(
[vacc_ts_data.loc[vacc_ts_data.index > third_date_range[state][-1]]]
* samples_mov_gamma.shape[0],
axis=1,
)
# rename columns for easy merging
vacc_ts_data_before.columns = vacc_tmp.columns
vacc_ts_data_after.columns = vacc_tmp.columns
# merge in order
vacc_ts = pd.concat(
[vacc_ts_data_before, vacc_tmp, vacc_ts_data_after],
axis=0,
ignore_index=True,
)
vacc_ts.set_index(vacc_ts_data.index[: vacc_ts.shape[0]], inplace=True)
else:
# just tile the data
vacc_ts = pd.concat(
[vacc_ts_data] * samples_mov_gamma.shape[0],
axis=1,
)
# reset the index to be the dates for easier information handling
vacc_ts.set_index(vacc_ts_data.index, inplace=True)
# need to name columns samples for consistent indexing
vacc_ts.columns = range(0, samples_mov_gamma.shape[0])
dates = vacc_ts.index
vals = vacc_ts.median(axis=1).values
state_vec = np.repeat([state], vals.shape[0])
df_vacc_ts_adjusted = pd.concat(
[
df_vacc_ts_adjusted,
pd.DataFrame({"state": state_vec, "date": dates, "effect": vals}),
]
)
# create zero array to fill in with the full vaccine effect model
vacc_eff = np.zeros_like(vacc_ts)
# Note that in here we apply the entire sample to the vaccination data
# to create a days by samples array
for ii in range(vacc_eff.shape[0]):
vacc_eff[ii] = vacc_ts.iloc[ii, :]
row = i % 4
col = i // 4
ax[row, col].plot(
dates,
vaccination_by_state.loc[state][: dates.shape[0]].values,
label="data",
color="C1",
)
ax[row, col].plot(dates, np.median(vacc_eff, axis=1), label="fit", color="C0")
ax[row, col].fill_between(
dates,
np.quantile(vacc_eff, 0.25, axis=1),
np.quantile(vacc_eff, 0.75, axis=1),
color="C0",
alpha=0.4,
)
ax[row, col].fill_between(
dates,
np.quantile(vacc_eff, 0.05, axis=1),
np.quantile(vacc_eff, 0.95, axis=1),
color="C0",
alpha=0.4,
)
# plot the start and end of the fitting
if state in third_states:
ax[row, col].axvline(third_date_range[state][0], ls="--", color="red", lw=1)
ax[row, col].axvline(third_date_range[state][-1], ls="--", color="red", lw=1)
ax[row, col].set_title(state)
ax[row, col].tick_params(axis="x", rotation=90)
ax[1, 0].set_ylabel("reduction in TP from vaccination")
df_vacc_ts_adjusted.to_csv(
"results/"
+ data_date.strftime("%Y-%m-%d")
+ "/adjusted_vaccine_ts_"
+ strain
+ data_date.strftime("%Y-%m-%d")
+ ".csv",
index=False,
)
plt.savefig(
figs_dir
+ data_date.strftime("%Y-%m-%d")
+ "_"
+ strain
+ "_ve_reduction_in_TP.png",
dpi=144,
)
# remove plots from memory
fig.clear()
plt.close(fig)
return None
def read_in_cases(
case_file_date,
apply_delay_at_read=False,
apply_inc_at_read=False,
):
"""
Read in NNDSS data and from data, find rho
"""
df_NNDSS = read_in_NNDSS(
case_file_date,
apply_delay_at_read=apply_delay_at_read,
apply_inc_at_read=apply_inc_at_read,
)
df_state = (
df_NNDSS[["date_inferred", "STATE", "imported", "local"]]
.groupby(["STATE", "date_inferred"])
.sum()
)
df_state["rho"] = [
0 if (i + l == 0) else i / (i + l)
for l, i in zip(df_state.local, df_state.imported)
]
return df_state
def remove_sus_from_Reff(strain, data_date):
"""
This removes the inferred susceptibility depletion from the Reff estimates out of EpyReff.
The inferred Reff = S(t) * Reff_1 where S(t) is the effect of susceptible depletion (i.e. a
factor between 0 and 1) and Reff_1 is the Reff without the effect of a reducing susceptibility
pool.
"""
from params import pop_sizes
data_date = pd.to_datetime(data_date)
# read in Reff samples
df_Reff = pd.read_csv(
"results/EpyReff/Reff_"
+ strain
+ "_samples"
+ data_date.strftime("%Y-%m-%d")
+ "tau_4.csv",
parse_dates=["INFECTION_DATES"],
)
# read in assumed CA
CA = pd.read_csv(
"results/"
+ "CA_"
+ data_date.strftime("%Y-%m-%d")
+ ".csv"
)
# read in cases by infection dates
cases = pd.read_csv(
"results/"
+ "cases_"
+ data_date.strftime("%Y-%m-%d")
+ ".csv",
parse_dates=["date_inferred"]
)
# scale the local cases by the assumed CA
cases["local_scaled"] = cases["local"]
cases.loc[cases.date_inferred <= pd.to_datetime("2021-12-09"), "local_scaled"] *= 1 / 0.75
cases.loc[cases.date_inferred > pd.to_datetime("2021-12-09"), "local_scaled"] *= 1 / 0.50
# read in the inferred susceptibility depletion factor and convert to a simple array
samples = pd.read_csv(
"results/"
+ data_date.strftime("%Y-%m-%d")
+ "/posterior_sample_"
+ data_date.strftime("%Y-%m-%d")
+ ".csv"
)
sus_dep_factor = samples["phi"][:2000]
sus_dep_factor.to_csv(
"results/"
+ data_date.strftime("%Y-%m-%d")
+ "sampled_susceptible_depletion_"
+ data_date.strftime("%Y-%m-%d")
+ ".csv"
)
sus_dep_factor = sus_dep_factor.to_numpy()
states = sorted(["NSW", "QLD", "SA", "VIC", "TAS", "WA", "ACT", "NT"])
# init a dataframe to hold the Reff samples without susceptible depletion
df_Reff_adjusted = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
# import tensorflow as tf
import pandas as pd
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout
from keras.optimizers import SGD, Adam
from keras.utils import plot_model
import matplotlib.pyplot as plt
import re
import tensorflowvisu as tfvu
df = pd.read_csv('train.csv')
training_data = df.copy()
labels = training_data['Survived']
test = pd.read_csv('test.csv')
# print(df['PassengerId'])
datavis = tfvu.MnistDataVis
def title_extractor(string):
match_object = re.search(', (.+?\.) .', string)
title_string = match_object.group(1)
return title_string
def cabin_num_extractor(cabin_string):
split = cabin_string.split(' ')
if len(split) > 1:
num_list = []
for cabin in split:
if cabin[1:]:
num_list.append(int(cabin[1:]))
number = int(pd.DataFrame(num_list).median())
else:
if not split[1:]:
return
number = int(split[1:])
return number
names = training_data['Name']
titles = pd.DataFrame()
titles['Title'] = training_data['Name'].map(title_extractor)
titles_count = pd.DataFrame()
titles_count['Counts'] = titles['Title'].value_counts()
title_categories = titles_count.index.tolist()
# plt.figure(0)
# titles_count.plot(kind = 'bar')
# plt.axhline(0, color='k')
Title_Dict = {
'Mr.': 'Normal',
'Miss.': 'Normal',
'Mrs.': 'Normal',
'Master.': 'Important',
'Dr.': 'Important',
'Rev.': 'Important',
'Mlle.': 'Normal',
'Major.': 'Important',
'Col.': 'Important',
'Capt.': 'Important',
'Sir.': 'Nobility',
'Jonkheer.': 'Nobility',
'Don.': 'Nobility',
'the Countess.':'Nobility',
'Ms.': 'Normal',
'Mme.': 'Normal',
'Lady.': 'Nobility'
}
titles['Title'] = titles['Title'].map(Title_Dict)
titles['Title'] = pd.get_dummies(titles['Title'])
# print(titles['Title'])
sexes = pd.DataFrame()
sexes['Genders'] = training_data['Sex']
sexes['Genders'] = pd.get_dummies(sexes['Genders'])
male_age = pd.DataFrame()
female_age = pd.DataFrame()
average_age_male = training_data[training_data['Sex'] == 'male']['Age'].mean()
average_age_female = training_data[training_data['Sex'] == 'female']['Age'].mean()
male_age['Age'] = training_data[training_data['Sex'] == 'male']['Age'].fillna(average_age_male)
female_age['Age'] = training_data[training_data['Sex'] == 'female']['Age'].fillna(average_age_female)
ages = male_age.append(female_age, ignore_index=True)
training_data['Cabin'] = training_data['Cabin'].fillna('U')
cabin_letters = | pd.DataFrame() | pandas.DataFrame |
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
import pandas as pd
import numpy as np
import datetime
import os, sys
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import NearestNeighbors
from lifelines import CoxPHFitter
def generate_survival_data(df_raw, name_PatientID, covariates=None, thresh_censor=None, name_DrugName='DrugName', name_StartDate='StartDate', name_OutcomeDate='OutcomeDate', name_LastVisitDate='LastVisitDate', indicator_miss='Missing'):
'''
Generate survival data from features.
Add column event - whether outcome recorded (event=1) or censored (event=0).
Add column duration - time from StartDate to OutcomeDate (event=1) or LastVisitDate (event=0)
'''
df = df_raw[[name_PatientID, name_DrugName]].copy()
df['duration'] = indicator_miss
# Add event.
df.loc[df.index, 'event'] = 0
ids_death = df_raw[name_OutcomeDate]!=indicator_miss
df.loc[ids_death, 'event'] = 1
# death
inds = ids_death
d_days = pd.to_datetime(df_raw[name_OutcomeDate].loc[inds]) - pd.to_datetime(df_raw[name_StartDate].loc[inds])
df.loc[inds, 'duration'] = [d_day.days for d_day in d_days]
# not death and has last_vist
inds = ~ids_death & (df_raw[name_LastVisitDate]!=indicator_miss)
d_days = | pd.to_datetime(df_raw[name_LastVisitDate].loc[inds]) | pandas.to_datetime |
# -*- coding: utf-8 -*-
from typing import Optional, IO
import pandas as pd
import os
from PySDDP.dessem.script.templates.deflant import DeflAntTemplate
MNE = 'DEFANT'
COMENTARIO = '&'
class DeflAnt(DeflAntTemplate):
"""
Classe que contem todos os elementos comuns a qualquer versao do arquivo DeflAnt do Dessem.
Esta classe tem como intuito fornecer duck typing para a classe Dessem e ainda adicionar um nivel de especificacao
dentro da fabrica. Alem disso esta classe deve passar adiante a responsabilidade da implementacao dos metodos de
leitura e escrita
"""
def __init__(self):
super().__init__()
self.defluencias_uhe_anteriores = dict()
self.defluencias_uhe_anteriores_df: pd.DataFrame()
self._comentarios_: Optional[list] = None
def ler(self, file_name: str) -> None:
"""
Metodo para leitura do arquivo de defluências das usinas hidroelétricas anteriores ao estudo
Manual do Usuario III.15 Arquivo de Defluências das Usinas Hidroelétricas Anteriores ao Estudo para Consideração
do Tempo de Viagem (DEFLANT.XXX)
:param file_name: string com o caminho completo para o arquivo
:return:
"""
# dir_base = os.path.split(file_name)[0]
self._comentarios_ = list()
self.defluencias_uhe_anteriores['mne'] = list()
self.defluencias_uhe_anteriores['numuhemon'] = list()
self.defluencias_uhe_anteriores['numuhejus'] = list()
self.defluencias_uhe_anteriores['ent'] = list()
self.defluencias_uhe_anteriores['di'] = list()
self.defluencias_uhe_anteriores['hi'] = list()
self.defluencias_uhe_anteriores['mi'] = list()
self.defluencias_uhe_anteriores['df'] = list()
self.defluencias_uhe_anteriores['hf'] = list()
self.defluencias_uhe_anteriores['mf'] = list()
self.defluencias_uhe_anteriores['defluencia'] = list()
# noinspection PyBroadException
try:
with open(file_name, 'r', encoding='latin-1') as f: # type: IO[str]
continua = True
while continua:
self.next_line(f)
linha = self.linha.strip()
# Se linha for comentario ou diferente do mneumônico, a leitura do arquivo deve ser encerrada
if linha[0] == COMENTARIO:
self._comentarios_.append(linha)
continue
else:
if linha[:6] != MNE:
self.dados['defluencias_uhe_anteriores']['valores'] = self.defluencias_uhe_anteriores
self.defluencias_uhe_anteriores_df = pd.DataFrame(self.defluencias_uhe_anteriores)
raise NotImplementedError(f"Mneumônico {linha[:6]} não implementado!")
# O ideal seria validarmos antes de carregar na estrutura
self.defluencias_uhe_anteriores['mne'].append(self.linha[:6])
self.defluencias_uhe_anteriores['numuhemon'].append(self.linha[7:12])
self.defluencias_uhe_anteriores['numuhejus'].append(self.linha[13:17])
self.defluencias_uhe_anteriores['ent'].append(self.linha[18:20])
self.defluencias_uhe_anteriores['di'].append(self.linha[21:26])
self.defluencias_uhe_anteriores['hi'].append(self.linha[27:29])
self.defluencias_uhe_anteriores['mi'].append(self.linha[30:31])
self.defluencias_uhe_anteriores['df'].append(self.linha[32:34])
self.defluencias_uhe_anteriores['hf'].append(self.linha[35:37])
self.defluencias_uhe_anteriores['mf'].append(self.linha[38:39])
self.defluencias_uhe_anteriores['defluencia'].append(self.linha[40:54])
except Exception as err:
if isinstance(err, StopIteration):
# Verifica se atingiu o final do bloco
if self.linha[0].upper() == COMENTARIO or self.linha[:6].upper() == MNE:
self.dados['defluencias_uhe_anteriores']['valores'] = self.defluencias_uhe_anteriores
self.defluencias_uhe_anteriores_df = | pd.DataFrame(self.defluencias_uhe_anteriores) | pandas.DataFrame |
from urllib import response
from pyparsing import col
from IMLearn.utils import split_train_test
from IMLearn.learners.regressors import LinearRegression
from typing import NoReturn
import numpy as np
import pandas as pd
import plotly.graph_objects as go
import plotly.express as px
import plotly.io as pio
from os.path import join
from matplotlib import pyplot as plt
from typing import List
pio.templates.default = "simple_white"
def load_data(filename: str):
"""
Load house prices dataset and preprocess data.
Parameters
----------
filename: str
Path to house prices dataset
Returns
-------
Design matrix and response vector (prices) - either as a single
DataFrame or a Tuple[DataFrame, Series]
"""
# Load data and perform basic filtering
df = | pd.read_csv(filename) | pandas.read_csv |
from http.server import BaseHTTPRequestHandler, HTTPServer
import socketserver
import pickle
import urllib.request
import json
from pprint import pprint
from pandas.io.json import json_normalize
import pandas as pd
from sklearn import preprocessing
from sklearn.preprocessing import PolynomialFeatures
from sklearn import datasets, linear_model
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import scale
from sklearn.preprocessing import PolynomialFeatures
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.metrics import mean_squared_error
from sklearn.linear_model import Ridge
from math import sqrt
import os
import errno
from pymongo import MongoClient
import urllib.parse as urlparse
from influxdb import InfluxDBClient
from pymongo import MongoClient
import pandas as pd
from pandas.io.json import json_normalize
from sklearn.linear_model import Ridge
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import TheilSenRegressor
from sklearn.datasets import make_regression
class Terminus(BaseHTTPRequestHandler):
def getAllNodeNames(self,client):
queryResult = client.query("SHOW TAG VALUES FROM uptime WITH KEY=nodename;")
nodeNames_temp = list(queryResult.get_points())
dfnodeNames = pd.DataFrame(nodeNames_temp)
allNodeNames = dfnodeNames[:]["value"]
return allNodeNames
def getNamespaceNames(self,client,node):
nsQuery = client.query("SHOW TAG VALUES FROM uptime WITH KEY=namespace_name WHERE nodename = '"+node+"';")
nsQuery_temp = list(nsQuery.get_points())
dfnsNames = pd.DataFrame(nsQuery_temp)
allnsNames = dfnsNames[:]["value"]
return allnsNames
def getAllPodNames(self,client,node,ns_name):
queryResult = client.query("SHOW TAG VALUES FROM uptime WITH KEY = pod_name WHERE namespace_name = '"+ns_name+"' AND nodename = '"+node+"';")
podNames_temp = list(queryResult.get_points())
dfpodNames = pd.DataFrame(podNames_temp)
if dfpodNames.empty:
return dfpodNames
else:
allpodNames = dfpodNames[:]["value"]
return allpodNames
def getCPUUtilizationNode(self,client, node):
queryResult = client.query('SELECT * FROM "cpu/node_utilization" where nodename = \''+node+'\' AND type=\'node\';')
dfcpuUtilization = pd.DataFrame(queryResult['cpu/node_utilization'])
return dfcpuUtilization
def getCPUUtilizationPod(self,client, node,ns_name, pod_name):
queryResult = client.query('SELECT * FROM "cpu/usage_rate" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod\';')
dfcpuUtilization = pd.DataFrame(queryResult['cpu/usage_rate'])
return dfcpuUtilization
def getCPUUtilizationPodContainer(self,client,node,ns_name, pod_name):
queryResult = client.query('SELECT * FROM "cpu/usage_rate" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod_container\';')
dfcpuUtilization = pd.DataFrame(queryResult['cpu/usage_rate'])
return dfcpuUtilization
def prepareCpuUtilization(self,client,node,ns_name, pod_name):
cpuUtilization = self.getCPUUtilizationNode(client,node)
podCpuUtilization = self.getCPUUtilizationPod(client,node,ns_name, pod_name)
containercpuUtilization = self.getCPUUtilizationPodContainer(client,node,ns_name, pod_name)
plt.plot(cpuUtilization.index, cpuUtilization['value'] *1000, 'r', label="node") # plotting t, a separately
plt.plot(podCpuUtilization.index, podCpuUtilization['value'], 'b', label="pod") # plotting t, b separately
plt.plot(containercpuUtilization.index, containercpuUtilization['value'], 'g', label="container") # plotting t, c separately
plt.legend(loc='upper left')
plt.show()
def getMemoryUtilizationNode(self,client,node):
queryResult = client.query('SELECT * FROM "memory/node_utilization" where nodename = \''+node+'\' AND type=\'node\';')
dfmemUtilization = pd.DataFrame(queryResult['memory/node_utilization'])
return dfmemUtilization
def getMemoryUtilizationPod(self,client,node,ns_name, pod_name):
queryResult = client.query('SELECT * FROM "memory/usage" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod\';')
dfmemUtilization = pd.DataFrame(queryResult['memory/usage'])
return dfmemUtilization
def getMemoryUtilizationPodContainer(self,client,node,ns_name, pod_name):
queryResult = client.query('SELECT * FROM "memory/usage" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod_container\';')
dfmemUtilization = pd.DataFrame(queryResult['memory/usage'])
return dfmemUtilization
def prepareMemoryUtilization(self,client,node,ns_name, pod_name):
memoryUtilization = self.getMemoryUtilizationNode(client,node)
podMemoryUtilization = self.getMemoryUtilizationPod(client,node,ns_name, pod_name)
containerMemoryUtilization = self.getMemoryUtilizationPodContainer(client,node,ns_name, pod_name)
plt.plot(memoryUtilization.index, memoryUtilization['value'], 'r', label="node") # plotting t, a separately
plt.plot(podMemoryUtilization.index, podMemoryUtilization['value'], 'b', label="pod") # plotting t, b separately
plt.plot(containerMemoryUtilization.index, containerMemoryUtilization['value'], 'g', label="container") # plotting t, c separately
plt.legend(loc='upper left')
plt.show()
def getNetworkTxRatePod(self,client,node,ns_name, pod_name):
queryResult = client.query('SELECT * FROM "network/tx_rate" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod\';')
dfmemUtilization = pd.DataFrame(queryResult['network/tx_rate'])
return dfmemUtilization
def getNetworkTxPod(self,client,node,ns_name, pod_name):
queryResult = client.query('SELECT * FROM "network/tx" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod\';')
dfmemUtilization = pd.DataFrame(queryResult['network/tx'])
return dfmemUtilization
def getNetworkTxErrorsPod(self,client,node,ns_name, pod_name):
queryResult = client.query('SELECT * FROM "network/tx_errors" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod\';')
dfmemUtilization = pd.DataFrame(queryResult['network/tx_errors'])
return dfmemUtilization
def getNetworkTxErrorsRatePod(self,client,node,ns_name, pod_name):
queryResult = client.query('SELECT * FROM "network/tx_errors_rate" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod\';')
dfmemUtilization = pd.DataFrame(queryResult['network/tx_errors_rate'])
return dfmemUtilization
def prepareNetworkTxRateUtilization(self,client,node,ns_name, pod_name):
podNetworTxRate = self.getNetworkTxRatePod(client,node,ns_name, pod_name)
podNetworTx = self.getNetworkTxPod(client,node,ns_name, pod_name)
podNetworkError = self.getNetworkTxErrorsPod(client,node,ns_name, pod_name)
podNetworkErrorRate = self.getNetworkTxErrorsRatePod(client,node,ns_name, pod_name)
plt.plot(podNetworTxRate.index, podNetworTxRate['value'], 'b') # plotting t, b separately
#plt.plot(podNetworTx.index, podNetworTx['value'], 'g') # plotting t, b separately
#plt.plot(podNetworkError.index, podNetworkError['value'], 'y') # plotting t, b separately
plt.plot(podNetworkErrorRate.index, podNetworkErrorRate['value'], 'r') # plotting t, b separately
plt.show()
def getNetworkRxRatePod(self,client,node,ns_name, pod_name):
queryResult = client.query('SELECT * FROM "network/rx_rate" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod\';')
dfmemUtilization = pd.DataFrame(queryResult['network/rx_rate'])
return dfmemUtilization
def getNetworkRxPod(self,client,node,ns_name, pod_name):
queryResult = client.query('SELECT * FROM "network/rx" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod\';')
dfmemUtilization = pd.DataFrame(queryResult['network/rx'])
return dfmemUtilization
def getNetworkRxErrorsPod(self,client,node,ns_name, pod_name):
queryResult = client.query('SELECT * FROM "network/rx_errors" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod\';')
dfmemUtilization = pd.DataFrame(queryResult['network/rx_errors'])
return dfmemUtilization
def getNetworkRxErrorsRatePod(self,client,node,ns_name, pod_name):
queryResult = client.query('SELECT * FROM "network/rx_errors_rate" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod\';')
dfmemUtilization = pd.DataFrame(queryResult['network/rx_errors_rate'])
return dfmemUtilization
def prepareNetworkRxRateUtilization(self,client,node,ns_name, pod_name):
podNetworRxRate = self.getNetworkRxRatePod(client,node,ns_name, pod_name)
podNetworRx = self.getNetworkRxPod(client,node,ns_name, pod_name)
podNetworkError = self.getNetworkRxErrorsPod(client,node,ns_name, pod_name)
podNetworkErrorRate = self.getNetworkRxErrorsRatePod(client,node,ns_name, pod_name)
plt.plot(podNetworRxRate.index, podNetworRxRate['value'], 'b') # plotting t, b separately
#plt.plot(podNetworRx.index, podNetworRx['value'], 'g') # plotting t, b separately
#plt.plot(podNetworkError.index, podNetworkError['value'], 'y') # plotting t, b separately
plt.plot(podNetworkErrorRate.index, podNetworkErrorRate['value'], 'r') # plotting t, b separately
plt.show()
def getRelevantNodeName(self,client,ns_name):
allNodeNames = self.getAllNodeNames(client)
#nsNames = getNamespaceNames(allNodeNames[0])
relevantNodes = []
for node in allNodeNames:
allPodNamesNode = self.getAllPodNames(client,node,'default')
if(not allPodNamesNode.empty):
relevantNodes.append(node)
return relevantNodes
def getNodeResourceUtilizationDf(self,client, nodeName):
Result_node_CPU = client.query("SELECT value from \"cpu/node_utilization\" where nodename = '"+nodeName+"' AND type = 'node' ")
Result_node_MEM = client.query("SELECT value from \"memory/node_utilization\" where nodename = '"+nodeName+"' AND type = 'node' ")
Result_node_CPU_Cores = client.query("SELECT mean(\"value\") FROM \"cpu/node_capacity\" where nodename = '"+nodeName+
"' AND type = 'node' GROUP BY time(1m)")
Result_node_mem_node = client.query("SELECT mean(\"value\")FROM \"memory/node_capacity\" where nodename = '"+
nodeName+"' AND type = 'node' GROUP BY time(1m)")
cpu_points = pd.DataFrame(Result_node_CPU.get_points())
cpu_points['time'] = pd.to_datetime(cpu_points['time'])
cpu_points = cpu_points.set_index('time')
cpu_points.columns = ['node_cpu_util']
mem_points = pd.DataFrame(Result_node_MEM.get_points())
mem_points['time'] = pd.to_datetime(mem_points['time'])
mem_points = mem_points.set_index('time')
mem_points.columns = ['node_mem_util']
cores_points = pd.DataFrame(Result_node_CPU_Cores.get_points())
cores_points['time'] = pd.to_datetime(cores_points['time'])
cores_points = cores_points.set_index('time')
cores_points.columns = ['node_cores']
mem_node_points = pd.DataFrame(Result_node_mem_node.get_points())
mem_node_points['time'] = pd.to_datetime(mem_node_points['time'])
mem_node_points = mem_node_points.set_index('time')
mem_node_points.columns = ['node_mem']
df_node =pd.concat([cpu_points, mem_points,cores_points,mem_node_points], axis=1)
return df_node
def getPodResourceUtilizationDf(self,client, node, ns_name, pod_name):
Result_Pod_CPU_usage = client.query('SELECT value FROM "cpu/usage_rate" where nodename = \''+node+
'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+
'\' AND type=\'pod\';')
Result_Pod_MEM_usage = client.query('SELECT value from \"memory/usage\" where nodename = \''+node+
'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+
'\' AND type=\'pod\';')
Result_Pod_CPU_limit = client.query('SELECT mean(\"value\") FROM "cpu/limit" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod\' group by time(1m);')
Result_Pod_MEM_limit = client.query('SELECT mean(\"value\") from \"memory/limit\" where nodename = \''+node+
'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+
'\' AND type=\'pod\' group by time(1m);')
Result_Pod_CPU_requests = client.query('SELECT mean(\"value\") FROM "cpu/request" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod\' group by time(1m);')
Result_Pod_MEM_requests = client.query('SELECT mean(\"value\") from \"memory/request\" where nodename = \''+node+
'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+
'\' AND type=\'pod\' group by time(1m);')
cpu_points_usage = pd.DataFrame(Result_Pod_CPU_usage.get_points())
cpu_points_usage['time'] = pd.to_datetime(cpu_points_usage['time'])
cpu_points_usage = cpu_points_usage.set_index('time')
cpu_points_usage.columns = ['pod_cpu_usage']
mem_points_usage = pd.DataFrame(Result_Pod_MEM_usage.get_points())
mem_points_usage['time'] = pd.to_datetime(mem_points_usage['time'])
mem_points_usage = mem_points_usage.set_index('time')
mem_points_usage.columns = ['pod_mem_usage']
cpu_points_limits = pd.DataFrame(Result_Pod_CPU_limit.get_points())
cpu_points_limits['time'] = pd.to_datetime(cpu_points_limits['time'])
cpu_points_limits = cpu_points_limits.set_index('time')
cpu_points_limits.columns = ['pod_cpu_limit']
mem_points_limits = pd.DataFrame(Result_Pod_MEM_limit.get_points())
mem_points_limits['time'] = pd.to_datetime(mem_points_limits['time'])
mem_points_limits = mem_points_limits.set_index('time')
mem_points_limits.columns = ['pod_mem_limit']
cpu_points_request = pd.DataFrame(Result_Pod_CPU_requests.get_points())
cpu_points_request['time'] = pd.to_datetime(cpu_points_request['time'])
cpu_points_request = cpu_points_request.set_index('time')
cpu_points_request.columns = ['pod_cpu_request']
mem_points_request = pd.DataFrame(Result_Pod_MEM_requests.get_points())
mem_points_request['time'] = pd.to_datetime(mem_points_request['time'])
mem_points_request = mem_points_request.set_index('time')
mem_points_request.columns = ['pod_mem_request']
df_pod =pd.concat([cpu_points_usage, mem_points_usage,cpu_points_limits,mem_points_limits,cpu_points_request,mem_points_request ], axis=1)
return df_pod
def getRequestsDf(self,clientK6):
queryResult = clientK6.query('SELECT sum("value") FROM "vus" group by time(1m);')
vus = pd.DataFrame(queryResult['vus'])
vus.columns = ['vus','time']
vus = vus.set_index('time')
queryResultReqs = clientK6.query('SELECT sum("value") FROM "http_reqs" group by time(1m);')
reqs = pd.DataFrame(queryResultReqs['http_reqs'])
reqs.columns = ['requests','time']
reqs = reqs.set_index('time')
queryResultReqsDuration95 = clientK6.query('SELECT percentile("value", 95) FROM "http_req_duration" group by time(1m) ;')
reqs_duration95 = pd.DataFrame(queryResultReqsDuration95['http_req_duration'])
reqs_duration95.columns = [ 'requests_duration_percentile_95','time']
reqs_duration95 = reqs_duration95.set_index('time')
queryResultReqsDuration90 = clientK6.query('SELECT percentile("value", 90) FROM "http_req_duration" group by time(1m) ;')
reqs_duration90 = pd.DataFrame(queryResultReqsDuration90['http_req_duration'])
reqs_duration90.columns = ['requests_duration_percentile_90','time']
reqs_duration90 = reqs_duration90.set_index('time')
queryResultMaxDuration = clientK6.query('SELECT max("value") FROM "http_req_duration" group by time(1m);')
reqs_duration_max = pd.DataFrame(queryResultMaxDuration['http_req_duration'])
reqs_duration_max.columns = ['requests_duration_max','time']
reqs_duration_max = reqs_duration_max.set_index('time')
queryResultMinDuration = clientK6.query('SELECT min("value") FROM "http_req_duration" group by time(1m);')
reqs_duration_min = pd.DataFrame(queryResultMinDuration['http_req_duration'])
reqs_duration_min.columns = ['requests_duration_min','time']
reqs_duration_min = reqs_duration_min.set_index('time')
queryResultMeanDuration = clientK6.query('SELECT mean("value") FROM "http_req_duration" group by time(1m);')
reqs_duration_mean = pd.DataFrame(queryResultMeanDuration['http_req_duration'])
reqs_duration_mean.columns = ['requests_duration_mean','time']
reqs_duration_mean = reqs_duration_mean.set_index('time')
queryResultMedianDuration = clientK6.query('SELECT median("value") FROM "http_req_duration" group by time(1m);')
reqs_duration_median = pd.DataFrame(queryResultMedianDuration['http_req_duration'])
reqs_duration_median.columns = ['requests_duration_median','time']
reqs_duration_median = reqs_duration_median.set_index('time')
finalDF = pd.merge(vus, reqs, left_index=True, right_index=True)
finalDF = pd.merge(finalDF, reqs_duration95, left_index=True, right_index=True)
finalDF = pd.merge(finalDF, reqs_duration90, left_index=True, right_index=True)
finalDF = | pd.merge(finalDF,reqs_duration_max, left_index=True, right_index=True) | pandas.merge |
import datetime
import numpy as np
import pytest
import pytz
import pandas as pd
from pandas import Timedelta, merge_asof, read_csv, to_datetime
import pandas._testing as tm
from pandas.core.reshape.merge import MergeError
class TestAsOfMerge:
def read_data(self, datapath, name, dedupe=False):
path = datapath("reshape", "merge", "data", name)
x = read_csv(path)
if dedupe:
x = x.drop_duplicates(["time", "ticker"], keep="last").reset_index(
drop=True
)
x.time = to_datetime(x.time)
return x
@pytest.fixture(autouse=True)
def setup_method(self, datapath):
self.trades = self.read_data(datapath, "trades.csv")
self.quotes = self.read_data(datapath, "quotes.csv", dedupe=True)
self.asof = self.read_data(datapath, "asof.csv")
self.tolerance = self.read_data(datapath, "tolerance.csv")
self.allow_exact_matches = self.read_data(datapath, "allow_exact_matches.csv")
self.allow_exact_matches_and_tolerance = self.read_data(
datapath, "allow_exact_matches_and_tolerance.csv"
)
def test_examples1(self):
""" doc-string examples """
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 6, 7], "right_val": [1, 2, 3, 6, 7]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [1, 3, 7]}
)
result = pd.merge_asof(left, right, on="a")
tm.assert_frame_equal(result, expected)
def test_examples2(self):
""" doc-string examples """
trades = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.038",
"20160525 13:30:00.048",
"20160525 13:30:00.048",
"20160525 13:30:00.048",
]
),
"ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
},
columns=["time", "ticker", "price", "quantity"],
)
quotes = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.030",
"20160525 13:30:00.041",
"20160525 13:30:00.048",
"20160525 13:30:00.049",
"20160525 13:30:00.072",
"20160525 13:30:00.075",
]
),
"ticker": [
"GOOG",
"MSFT",
"MSFT",
"MSFT",
"GOOG",
"AAPL",
"GOOG",
"MSFT",
],
"bid": [720.50, 51.95, 51.97, 51.99, 720.50, 97.99, 720.50, 52.01],
"ask": [720.93, 51.96, 51.98, 52.00, 720.93, 98.01, 720.88, 52.03],
},
columns=["time", "ticker", "bid", "ask"],
)
pd.merge_asof(trades, quotes, on="time", by="ticker")
pd.merge_asof(
trades, quotes, on="time", by="ticker", tolerance=pd.Timedelta("2ms")
)
expected = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.038",
"20160525 13:30:00.048",
"20160525 13:30:00.048",
"20160525 13:30:00.048",
]
),
"ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
"bid": [np.nan, 51.97, np.nan, np.nan, np.nan],
"ask": [np.nan, 51.98, np.nan, np.nan, np.nan],
},
columns=["time", "ticker", "price", "quantity", "bid", "ask"],
)
result = pd.merge_asof(
trades,
quotes,
on="time",
by="ticker",
tolerance=pd.Timedelta("10ms"),
allow_exact_matches=False,
)
tm.assert_frame_equal(result, expected)
def test_examples3(self):
""" doc-string examples """
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 6, 7], "right_val": [1, 2, 3, 6, 7]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [1, 6, np.nan]}
)
result = pd.merge_asof(left, right, on="a", direction="forward")
tm.assert_frame_equal(result, expected)
def test_examples4(self):
""" doc-string examples """
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 6, 7], "right_val": [1, 2, 3, 6, 7]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [1, 6, 7]}
)
result = pd.merge_asof(left, right, on="a", direction="nearest")
tm.assert_frame_equal(result, expected)
def test_basic(self):
expected = self.asof
trades = self.trades
quotes = self.quotes
result = merge_asof(trades, quotes, on="time", by="ticker")
tm.assert_frame_equal(result, expected)
def test_basic_categorical(self):
expected = self.asof
trades = self.trades.copy()
trades.ticker = trades.ticker.astype("category")
quotes = self.quotes.copy()
quotes.ticker = quotes.ticker.astype("category")
expected.ticker = expected.ticker.astype("category")
result = merge_asof(trades, quotes, on="time", by="ticker")
tm.assert_frame_equal(result, expected)
def test_basic_left_index(self):
# GH14253
expected = self.asof
trades = self.trades.set_index("time")
quotes = self.quotes
result = merge_asof(
trades, quotes, left_index=True, right_on="time", by="ticker"
)
# left-only index uses right"s index, oddly
expected.index = result.index
# time column appears after left"s columns
expected = expected[result.columns]
tm.assert_frame_equal(result, expected)
def test_basic_right_index(self):
expected = self.asof
trades = self.trades
quotes = self.quotes.set_index("time")
result = merge_asof(
trades, quotes, left_on="time", right_index=True, by="ticker"
)
tm.assert_frame_equal(result, expected)
def test_basic_left_index_right_index(self):
expected = self.asof.set_index("time")
trades = self.trades.set_index("time")
quotes = self.quotes.set_index("time")
result = merge_asof(
trades, quotes, left_index=True, right_index=True, by="ticker"
)
tm.assert_frame_equal(result, expected)
def test_multi_index_on(self):
def index_by_time_then_arbitrary_new_level(df):
df = df.set_index("time")
df = pd.concat([df, df], keys=["f1", "f2"], names=["f", "time"])
return df.reorder_levels([1, 0]).sort_index()
trades = index_by_time_then_arbitrary_new_level(self.trades)
quotes = index_by_time_then_arbitrary_new_level(self.quotes)
expected = index_by_time_then_arbitrary_new_level(self.asof)
result = merge_asof(trades, quotes, on="time", by=["ticker"])
tm.assert_frame_equal(result, expected)
def test_on_and_index(self):
# "on" parameter and index together is prohibited
trades = self.trades.set_index("time")
quotes = self.quotes.set_index("time")
with pytest.raises(MergeError):
merge_asof(
trades, quotes, left_on="price", left_index=True, right_index=True
)
trades = self.trades.set_index("time")
quotes = self.quotes.set_index("time")
with pytest.raises(MergeError):
merge_asof(
trades, quotes, right_on="bid", left_index=True, right_index=True
)
def test_basic_left_by_right_by(self):
# GH14253
expected = self.asof
trades = self.trades
quotes = self.quotes
result = merge_asof(
trades, quotes, on="time", left_by="ticker", right_by="ticker"
)
tm.assert_frame_equal(result, expected)
def test_missing_right_by(self):
expected = self.asof
trades = self.trades
quotes = self.quotes
q = quotes[quotes.ticker != "MSFT"]
result = merge_asof(trades, q, on="time", by="ticker")
expected.loc[expected.ticker == "MSFT", ["bid", "ask"]] = np.nan
tm.assert_frame_equal(result, expected)
def test_multiby(self):
# GH13936
trades = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.046",
"20160525 13:30:00.048",
"20160525 13:30:00.050",
]
),
"ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"],
"exch": ["ARCA", "NSDQ", "NSDQ", "BATS", "NSDQ"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
},
columns=["time", "ticker", "exch", "price", "quantity"],
)
quotes = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.030",
"20160525 13:30:00.041",
"20160525 13:30:00.045",
"20160525 13:30:00.049",
]
),
"ticker": ["GOOG", "MSFT", "MSFT", "MSFT", "GOOG", "AAPL"],
"exch": ["BATS", "NSDQ", "ARCA", "ARCA", "NSDQ", "ARCA"],
"bid": [720.51, 51.95, 51.97, 51.99, 720.50, 97.99],
"ask": [720.92, 51.96, 51.98, 52.00, 720.93, 98.01],
},
columns=["time", "ticker", "exch", "bid", "ask"],
)
expected = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.046",
"20160525 13:30:00.048",
"20160525 13:30:00.050",
]
),
"ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"],
"exch": ["ARCA", "NSDQ", "NSDQ", "BATS", "NSDQ"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
"bid": [np.nan, 51.95, 720.50, 720.51, np.nan],
"ask": [np.nan, 51.96, 720.93, 720.92, np.nan],
},
columns=["time", "ticker", "exch", "price", "quantity", "bid", "ask"],
)
result = pd.merge_asof(trades, quotes, on="time", by=["ticker", "exch"])
tm.assert_frame_equal(result, expected)
def test_multiby_heterogeneous_types(self):
# GH13936
trades = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.046",
"20160525 13:30:00.048",
"20160525 13:30:00.050",
]
),
"ticker": [0, 0, 1, 1, 2],
"exch": ["ARCA", "NSDQ", "NSDQ", "BATS", "NSDQ"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
},
columns=["time", "ticker", "exch", "price", "quantity"],
)
quotes = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.030",
"20160525 13:30:00.041",
"20160525 13:30:00.045",
"20160525 13:30:00.049",
]
),
"ticker": [1, 0, 0, 0, 1, 2],
"exch": ["BATS", "NSDQ", "ARCA", "ARCA", "NSDQ", "ARCA"],
"bid": [720.51, 51.95, 51.97, 51.99, 720.50, 97.99],
"ask": [720.92, 51.96, 51.98, 52.00, 720.93, 98.01],
},
columns=["time", "ticker", "exch", "bid", "ask"],
)
expected = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.046",
"20160525 13:30:00.048",
"20160525 13:30:00.050",
]
),
"ticker": [0, 0, 1, 1, 2],
"exch": ["ARCA", "NSDQ", "NSDQ", "BATS", "NSDQ"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
"bid": [np.nan, 51.95, 720.50, 720.51, np.nan],
"ask": [np.nan, 51.96, 720.93, 720.92, np.nan],
},
columns=["time", "ticker", "exch", "price", "quantity", "bid", "ask"],
)
result = pd.merge_asof(trades, quotes, on="time", by=["ticker", "exch"])
tm.assert_frame_equal(result, expected)
def test_multiby_indexed(self):
# GH15676
left = pd.DataFrame(
[
[pd.to_datetime("20160602"), 1, "a"],
[pd.to_datetime("20160602"), 2, "a"],
[pd.to_datetime("20160603"), 1, "b"],
[pd.to_datetime("20160603"), 2, "b"],
],
columns=["time", "k1", "k2"],
).set_index("time")
right = pd.DataFrame(
[
[pd.to_datetime("20160502"), 1, "a", 1.0],
[pd.to_datetime("20160502"), 2, "a", 2.0],
[pd.to_datetime("20160503"), 1, "b", 3.0],
[pd.to_datetime("20160503"), 2, "b", 4.0],
],
columns=["time", "k1", "k2", "value"],
).set_index("time")
expected = pd.DataFrame(
[
[pd.to_datetime("20160602"), 1, "a", 1.0],
[pd.to_datetime("20160602"), 2, "a", 2.0],
[pd.to_datetime("20160603"), 1, "b", 3.0],
[pd.to_datetime("20160603"), 2, "b", 4.0],
],
columns=["time", "k1", "k2", "value"],
).set_index("time")
result = pd.merge_asof(
left, right, left_index=True, right_index=True, by=["k1", "k2"]
)
tm.assert_frame_equal(expected, result)
with pytest.raises(MergeError):
pd.merge_asof(
left,
right,
left_index=True,
right_index=True,
left_by=["k1", "k2"],
right_by=["k1"],
)
def test_basic2(self, datapath):
expected = self.read_data(datapath, "asof2.csv")
trades = self.read_data(datapath, "trades2.csv")
quotes = self.read_data(datapath, "quotes2.csv", dedupe=True)
result = merge_asof(trades, quotes, on="time", by="ticker")
tm.assert_frame_equal(result, expected)
def test_basic_no_by(self):
f = (
lambda x: x[x.ticker == "MSFT"]
.drop("ticker", axis=1)
.reset_index(drop=True)
)
# just use a single ticker
expected = f(self.asof)
trades = f(self.trades)
quotes = f(self.quotes)
result = merge_asof(trades, quotes, on="time")
tm.assert_frame_equal(result, expected)
def test_valid_join_keys(self):
trades = self.trades
quotes = self.quotes
with pytest.raises(MergeError):
merge_asof(trades, quotes, left_on="time", right_on="bid", by="ticker")
with pytest.raises(MergeError):
merge_asof(trades, quotes, on=["time", "ticker"], by="ticker")
with pytest.raises(MergeError):
merge_asof(trades, quotes, by="ticker")
def test_with_duplicates(self, datapath):
q = (
pd.concat([self.quotes, self.quotes])
.sort_values(["time", "ticker"])
.reset_index(drop=True)
)
result = merge_asof(self.trades, q, on="time", by="ticker")
expected = self.read_data(datapath, "asof.csv")
tm.assert_frame_equal(result, expected)
def test_with_duplicates_no_on(self):
df1 = pd.DataFrame({"key": [1, 1, 3], "left_val": [1, 2, 3]})
df2 = pd.DataFrame({"key": [1, 2, 2], "right_val": [1, 2, 3]})
result = merge_asof(df1, df2, on="key")
expected = pd.DataFrame(
{"key": [1, 1, 3], "left_val": [1, 2, 3], "right_val": [1, 1, 3]}
)
tm.assert_frame_equal(result, expected)
def test_valid_allow_exact_matches(self):
trades = self.trades
quotes = self.quotes
with pytest.raises(MergeError):
merge_asof(
trades, quotes, on="time", by="ticker", allow_exact_matches="foo"
)
def test_valid_tolerance(self):
trades = self.trades
quotes = self.quotes
# dti
merge_asof(trades, quotes, on="time", by="ticker", tolerance=Timedelta("1s"))
# integer
merge_asof(
trades.reset_index(),
quotes.reset_index(),
on="index",
by="ticker",
tolerance=1,
)
# incompat
with pytest.raises(MergeError):
merge_asof(trades, quotes, on="time", by="ticker", tolerance=1)
# invalid
with pytest.raises(MergeError):
merge_asof(
trades.reset_index(),
quotes.reset_index(),
on="index",
by="ticker",
tolerance=1.0,
)
# invalid negative
with pytest.raises(MergeError):
merge_asof(
trades, quotes, on="time", by="ticker", tolerance=-Timedelta("1s")
)
with pytest.raises(MergeError):
merge_asof(
trades.reset_index(),
quotes.reset_index(),
on="index",
by="ticker",
tolerance=-1,
)
def test_non_sorted(self):
trades = self.trades.sort_values("time", ascending=False)
quotes = self.quotes.sort_values("time", ascending=False)
# we require that we are already sorted on time & quotes
assert not trades.time.is_monotonic
assert not quotes.time.is_monotonic
with pytest.raises(ValueError):
merge_asof(trades, quotes, on="time", by="ticker")
trades = self.trades.sort_values("time")
assert trades.time.is_monotonic
assert not quotes.time.is_monotonic
with pytest.raises(ValueError):
merge_asof(trades, quotes, on="time", by="ticker")
quotes = self.quotes.sort_values("time")
assert trades.time.is_monotonic
assert quotes.time.is_monotonic
# ok, though has dupes
merge_asof(trades, self.quotes, on="time", by="ticker")
@pytest.mark.parametrize(
"tolerance",
[Timedelta("1day"), datetime.timedelta(days=1)],
ids=["pd.Timedelta", "datetime.timedelta"],
)
def test_tolerance(self, tolerance):
trades = self.trades
quotes = self.quotes
result = merge_asof(trades, quotes, on="time", by="ticker", tolerance=tolerance)
expected = self.tolerance
tm.assert_frame_equal(result, expected)
def test_tolerance_forward(self):
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 7, 11], "right_val": [1, 2, 3, 7, 11]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [1, np.nan, 11]}
)
result = pd.merge_asof(left, right, on="a", direction="forward", tolerance=1)
tm.assert_frame_equal(result, expected)
def test_tolerance_nearest(self):
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 7, 11], "right_val": [1, 2, 3, 7, 11]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [1, np.nan, 11]}
)
result = pd.merge_asof(left, right, on="a", direction="nearest", tolerance=1)
tm.assert_frame_equal(result, expected)
def test_tolerance_tz(self):
# GH 14844
left = pd.DataFrame(
{
"date": pd.date_range(
start=pd.to_datetime("2016-01-02"),
freq="D",
periods=5,
tz=pytz.timezone("UTC"),
),
"value1": np.arange(5),
}
)
right = pd.DataFrame(
{
"date": pd.date_range(
start=pd.to_datetime("2016-01-01"),
freq="D",
periods=5,
tz=pytz.timezone("UTC"),
),
"value2": list("ABCDE"),
}
)
result = pd.merge_asof(left, right, on="date", tolerance=pd.Timedelta("1 day"))
expected = pd.DataFrame(
{
"date": pd.date_range(
start=pd.to_datetime("2016-01-02"),
freq="D",
periods=5,
tz=pytz.timezone("UTC"),
),
"value1": np.arange(5),
"value2": list("BCDEE"),
}
)
tm.assert_frame_equal(result, expected)
def test_tolerance_float(self):
# GH22981
left = pd.DataFrame({"a": [1.1, 3.5, 10.9], "left_val": ["a", "b", "c"]})
right = pd.DataFrame(
{"a": [1.0, 2.5, 3.3, 7.5, 11.5], "right_val": [1.0, 2.5, 3.3, 7.5, 11.5]}
)
expected = pd.DataFrame(
{
"a": [1.1, 3.5, 10.9],
"left_val": ["a", "b", "c"],
"right_val": [1, 3.3, np.nan],
}
)
result = pd.merge_asof(left, right, on="a", direction="nearest", tolerance=0.5)
tm.assert_frame_equal(result, expected)
def test_index_tolerance(self):
# GH 15135
expected = self.tolerance.set_index("time")
trades = self.trades.set_index("time")
quotes = self.quotes.set_index("time")
result = pd.merge_asof(
trades,
quotes,
left_index=True,
right_index=True,
by="ticker",
tolerance=pd.Timedelta("1day"),
)
tm.assert_frame_equal(result, expected)
def test_allow_exact_matches(self):
result = merge_asof(
self.trades, self.quotes, on="time", by="ticker", allow_exact_matches=False
)
expected = self.allow_exact_matches
tm.assert_frame_equal(result, expected)
def test_allow_exact_matches_forward(self):
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 7, 11], "right_val": [1, 2, 3, 7, 11]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [2, 7, 11]}
)
result = pd.merge_asof(
left, right, on="a", direction="forward", allow_exact_matches=False
)
tm.assert_frame_equal(result, expected)
def test_allow_exact_matches_nearest(self):
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 7, 11], "right_val": [1, 2, 3, 7, 11]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [2, 3, 11]}
)
result = pd.merge_asof(
left, right, on="a", direction="nearest", allow_exact_matches=False
)
tm.assert_frame_equal(result, expected)
def test_allow_exact_matches_and_tolerance(self):
result = merge_asof(
self.trades,
self.quotes,
on="time",
by="ticker",
tolerance=Timedelta("100ms"),
allow_exact_matches=False,
)
expected = self.allow_exact_matches_and_tolerance
tm.assert_frame_equal(result, expected)
def test_allow_exact_matches_and_tolerance2(self):
# GH 13695
df1 = pd.DataFrame(
{"time": pd.to_datetime(["2016-07-15 13:30:00.030"]), "username": ["bob"]}
)
df2 = pd.DataFrame(
{
"time": pd.to_datetime(
["2016-07-15 13:30:00.000", "2016-07-15 13:30:00.030"]
),
"version": [1, 2],
}
)
result = pd.merge_asof(df1, df2, on="time")
expected = pd.DataFrame(
{
"time": pd.to_datetime(["2016-07-15 13:30:00.030"]),
"username": ["bob"],
"version": [2],
}
)
tm.assert_frame_equal(result, expected)
result = pd.merge_asof(df1, df2, on="time", allow_exact_matches=False)
expected = pd.DataFrame(
{
"time": pd.to_datetime(["2016-07-15 13:30:00.030"]),
"username": ["bob"],
"version": [1],
}
)
tm.assert_frame_equal(result, expected)
result = pd.merge_asof(
df1,
df2,
on="time",
allow_exact_matches=False,
tolerance=pd.Timedelta("10ms"),
)
expected = pd.DataFrame(
{
"time": pd.to_datetime(["2016-07-15 13:30:00.030"]),
"username": ["bob"],
"version": [np.nan],
}
)
tm.assert_frame_equal(result, expected)
def test_allow_exact_matches_and_tolerance3(self):
# GH 13709
df1 = pd.DataFrame(
{
"time": pd.to_datetime(
["2016-07-15 13:30:00.030", "2016-07-15 13:30:00.030"]
),
"username": ["bob", "charlie"],
}
)
df2 = pd.DataFrame(
{
"time": pd.to_datetime(
["2016-07-15 13:30:00.000", "2016-07-15 13:30:00.030"]
),
"version": [1, 2],
}
)
result = pd.merge_asof(
df1,
df2,
on="time",
allow_exact_matches=False,
tolerance=pd.Timedelta("10ms"),
)
expected = pd.DataFrame(
{
"time": pd.to_datetime(
["2016-07-15 13:30:00.030", "2016-07-15 13:30:00.030"]
),
"username": ["bob", "charlie"],
"version": [np.nan, np.nan],
}
)
tm.assert_frame_equal(result, expected)
def test_allow_exact_matches_and_tolerance_forward(self):
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 3, 4, 6, 11], "right_val": [1, 3, 4, 6, 11]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [np.nan, 6, 11]}
)
result = pd.merge_asof(
left,
right,
on="a",
direction="forward",
allow_exact_matches=False,
tolerance=1,
)
tm.assert_frame_equal(result, expected)
def test_allow_exact_matches_and_tolerance_nearest(self):
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 3, 4, 6, 11], "right_val": [1, 3, 4, 7, 11]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [np.nan, 4, 11]}
)
result = pd.merge_asof(
left,
right,
on="a",
direction="nearest",
allow_exact_matches=False,
tolerance=1,
)
tm.assert_frame_equal(result, expected)
def test_forward_by(self):
# GH14887
left = pd.DataFrame(
{
"a": [1, 5, 10, 12, 15],
"b": ["X", "X", "Y", "Z", "Y"],
"left_val": ["a", "b", "c", "d", "e"],
}
)
right = pd.DataFrame(
{
"a": [1, 6, 11, 15, 16],
"b": ["X", "Z", "Y", "Z", "Y"],
"right_val": [1, 6, 11, 15, 16],
}
)
expected = pd.DataFrame(
{
"a": [1, 5, 10, 12, 15],
"b": ["X", "X", "Y", "Z", "Y"],
"left_val": ["a", "b", "c", "d", "e"],
"right_val": [1, np.nan, 11, 15, 16],
}
)
result = pd.merge_asof(left, right, on="a", by="b", direction="forward")
tm.assert_frame_equal(result, expected)
def test_nearest_by(self):
# GH14887
left = pd.DataFrame(
{
"a": [1, 5, 10, 12, 15],
"b": ["X", "X", "Z", "Z", "Y"],
"left_val": ["a", "b", "c", "d", "e"],
}
)
right = pd.DataFrame(
{
"a": [1, 6, 11, 15, 16],
"b": ["X", "Z", "Z", "Z", "Y"],
"right_val": [1, 6, 11, 15, 16],
}
)
expected = pd.DataFrame(
{
"a": [1, 5, 10, 12, 15],
"b": ["X", "X", "Z", "Z", "Y"],
"left_val": ["a", "b", "c", "d", "e"],
"right_val": [1, 1, 11, 11, 16],
}
)
result = pd.merge_asof(left, right, on="a", by="b", direction="nearest")
tm.assert_frame_equal(result, expected)
def test_by_int(self):
# we specialize by type, so test that this is correct
df1 = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.020",
"20160525 13:30:00.030",
"20160525 13:30:00.040",
"20160525 13:30:00.050",
"20160525 13:30:00.060",
]
),
"key": [1, 2, 1, 3, 2],
"value1": [1.1, 1.2, 1.3, 1.4, 1.5],
},
columns=["time", "key", "value1"],
)
df2 = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.015",
"20160525 13:30:00.020",
"20160525 13:30:00.025",
"20160525 13:30:00.035",
"20160525 13:30:00.040",
"20160525 13:30:00.055",
"20160525 13:30:00.060",
"20160525 13:30:00.065",
]
),
"key": [2, 1, 1, 3, 2, 1, 2, 3],
"value2": [2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8],
},
columns=["time", "key", "value2"],
)
result = pd.merge_asof(df1, df2, on="time", by="key")
expected = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.020",
"20160525 13:30:00.030",
"20160525 13:30:00.040",
"20160525 13:30:00.050",
"20160525 13:30:00.060",
]
),
"key": [1, 2, 1, 3, 2],
"value1": [1.1, 1.2, 1.3, 1.4, 1.5],
"value2": [2.2, 2.1, 2.3, 2.4, 2.7],
},
columns=["time", "key", "value1", "value2"],
)
tm.assert_frame_equal(result, expected)
def test_on_float(self):
# mimics how to determine the minimum-price variation
df1 = pd.DataFrame(
{
"price": [5.01, 0.0023, 25.13, 340.05, 30.78, 1040.90, 0.0078],
"symbol": list("ABCDEFG"),
},
columns=["symbol", "price"],
)
df2 = pd.DataFrame(
{"price": [0.0, 1.0, 100.0], "mpv": [0.0001, 0.01, 0.05]},
columns=["price", "mpv"],
)
df1 = df1.sort_values("price").reset_index(drop=True)
result = pd.merge_asof(df1, df2, on="price")
expected = pd.DataFrame(
{
"symbol": list("BGACEDF"),
"price": [0.0023, 0.0078, 5.01, 25.13, 30.78, 340.05, 1040.90],
"mpv": [0.0001, 0.0001, 0.01, 0.01, 0.01, 0.05, 0.05],
},
columns=["symbol", "price", "mpv"],
)
tm.assert_frame_equal(result, expected)
def test_on_specialized_type(self, any_real_dtype):
# see gh-13936
dtype = np.dtype(any_real_dtype).type
df1 = pd.DataFrame(
{"value": [5, 2, 25, 100, 78, 120, 79], "symbol": list("ABCDEFG")},
columns=["symbol", "value"],
)
df1.value = dtype(df1.value)
df2 = pd.DataFrame(
{"value": [0, 80, 120, 125], "result": list("xyzw")},
columns=["value", "result"],
)
df2.value = dtype(df2.value)
df1 = df1.sort_values("value").reset_index(drop=True)
result = pd.merge_asof(df1, df2, on="value")
expected = pd.DataFrame(
{
"symbol": list("BACEGDF"),
"value": [2, 5, 25, 78, 79, 100, 120],
"result": list("xxxxxyz"),
},
columns=["symbol", "value", "result"],
)
expected.value = dtype(expected.value)
tm.assert_frame_equal(result, expected)
def test_on_specialized_type_by_int(self, any_real_dtype):
# see gh-13936
dtype = np.dtype(any_real_dtype).type
df1 = pd.DataFrame(
{
"value": [5, 2, 25, 100, 78, 120, 79],
"key": [1, 2, 3, 2, 3, 1, 2],
"symbol": list("ABCDEFG"),
},
columns=["symbol", "key", "value"],
)
df1.value = dtype(df1.value)
df2 = pd.DataFrame(
{"value": [0, 80, 120, 125], "key": [1, 2, 2, 3], "result": list("xyzw")},
columns=["value", "key", "result"],
)
df2.value = dtype(df2.value)
df1 = df1.sort_values("value").reset_index(drop=True)
result = pd.merge_asof(df1, df2, on="value", by="key")
expected = pd.DataFrame(
{
"symbol": list("BACEGDF"),
"key": [2, 1, 3, 3, 2, 2, 1],
"value": [2, 5, 25, 78, 79, 100, 120],
"result": [np.nan, "x", np.nan, np.nan, np.nan, "y", "x"],
},
columns=["symbol", "key", "value", "result"],
)
expected.value = dtype(expected.value)
| tm.assert_frame_equal(result, expected) | pandas._testing.assert_frame_equal |
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 15 15:08:00 2018
@author: Mangifera
"""
from datetime import datetime, timezone
from dateutil import tz
import pandas as pd
def formatTime(timestamp, t_format, city_timezone):
utc = datetime.fromtimestamp(timestamp, timezone.utc)
city_timezone = tz.gettz(city_timezone)
return utc.astimezone(city_timezone).strftime(t_format)
def time_columns(df, time__zone):
df["Year"] = df["UNIX_UTC"].apply(formatTime, t_format = "%Y", city_timezone=time__zone)
df["Year"] = pd.to_numeric(df["Year"], errors='coerce')
df["Month"] = df["UNIX_UTC"].apply(formatTime, t_format = "%m", city_timezone=time__zone)
df["Month"] = pd.to_numeric(df["Month"], errors='coerce')
df["Day"] = df["UNIX_UTC"].apply(formatTime, t_format = "%d", city_timezone=time__zone)
df["Day"] = | pd.to_numeric(df["Day"], errors='coerce') | pandas.to_numeric |
from __future__ import print_function
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from pandas import (Series, Index, Int64Index, Timestamp, Period,
DatetimeIndex, PeriodIndex, TimedeltaIndex,
Timedelta, timedelta_range, date_range, Float64Index,
_np_version_under1p10)
import pandas.tslib as tslib
import pandas.tseries.period as period
import pandas.util.testing as tm
from pandas.tests.test_base import Ops
class TestDatetimeIndexOps(Ops):
tz = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific']
def setUp(self):
super(TestDatetimeIndexOps, self).setUp()
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
self.check_ops_properties(
['year', 'month', 'day', 'hour', 'minute', 'second', 'weekofyear',
'week', 'dayofweek', 'dayofyear', 'quarter'])
self.check_ops_properties(['date', 'time', 'microsecond', 'nanosecond',
'is_month_start', 'is_month_end',
'is_quarter_start',
'is_quarter_end', 'is_year_start',
'is_year_end', 'weekday_name'],
lambda x: isinstance(x, DatetimeIndex))
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH7206
for op in ['year', 'day', 'second', 'weekday']:
self.assertRaises(TypeError, lambda x: getattr(self.dt_series, op))
# attribute access should still work!
s = Series(dict(year=2000, month=1, day=10))
self.assertEqual(s.year, 2000)
self.assertEqual(s.month, 1)
self.assertEqual(s.day, 10)
self.assertRaises(AttributeError, lambda: s.weekday)
def test_asobject_tolist(self):
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [Timestamp('2013-01-31'),
Timestamp('2013-02-28'),
Timestamp('2013-03-31'),
Timestamp('2013-04-30')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx', tz='Asia/Tokyo')
expected_list = [Timestamp('2013-01-31', tz='Asia/Tokyo'),
Timestamp('2013-02-28', tz='Asia/Tokyo'),
Timestamp('2013-03-31', tz='Asia/Tokyo'),
Timestamp('2013-04-30', tz='Asia/Tokyo')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = DatetimeIndex([datetime(2013, 1, 1), datetime(2013, 1, 2),
pd.NaT, datetime(2013, 1, 4)], name='idx')
expected_list = [Timestamp('2013-01-01'),
Timestamp('2013-01-02'), pd.NaT,
Timestamp('2013-01-04')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
for tz in self.tz:
# monotonic
idx1 = pd.DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], tz=tz)
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.DatetimeIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], tz=tz)
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timestamp('2011-01-01', tz=tz))
self.assertEqual(idx.max(), Timestamp('2011-01-03', tz=tz))
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = DatetimeIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
self.assertEqual(np.min(dr),
Timestamp('2016-01-15 00:00:00', freq='D'))
self.assertEqual(np.max(dr),
Timestamp('2016-01-20 00:00:00', freq='D'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, dr, out=0)
self.assertEqual(np.argmin(dr), 0)
self.assertEqual(np.argmax(dr), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, dr, out=0)
def test_round(self):
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=5,
freq='30Min', tz=tz)
elt = rng[1]
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 01:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(rng.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with tm.assertRaisesRegexp(ValueError, msg):
rng.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, rng.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_repeat_range(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
self.assertIsNone(result.freq)
self.assertEqual(len(result), 5 * len(rng))
for tz in self.tz:
index = pd.date_range('2001-01-01', periods=2, freq='D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-02', '2001-01-02'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.date_range('2001-01-01', periods=2, freq='2D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-03', '2001-01-03'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.DatetimeIndex(['2001-01-01', 'NaT', '2003-01-01'],
tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01', '2001-01-01',
'NaT', 'NaT', 'NaT',
'2003-01-01', '2003-01-01', '2003-01-01'],
tz=tz)
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
def test_repeat(self):
reps = 2
msg = "the 'axis' parameter is not supported"
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=2,
freq='30Min', tz=tz)
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
])
res = rng.repeat(reps)
tm.assert_index_equal(res, expected_rng)
self.assertIsNone(res.freq)
tm.assert_index_equal(np.repeat(rng, reps), expected_rng)
tm.assertRaisesRegexp(ValueError, msg, np.repeat,
rng, reps, axis=1)
def test_representation(self):
idx = []
idx.append(DatetimeIndex([], freq='D'))
idx.append(DatetimeIndex(['2011-01-01'], freq='D'))
idx.append(DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'
], freq='H', tz='Asia/Tokyo'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='US/Eastern'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='UTC'))
exp = []
exp.append("""DatetimeIndex([], dtype='datetime64[ns]', freq='D')""")
exp.append("DatetimeIndex(['2011-01-01'], dtype='datetime64[ns]', "
"freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+09:00', "
"'2011-01-01 10:00:00+09:00', '2011-01-01 11:00:00+09:00']"
", dtype='datetime64[ns, Asia/Tokyo]', freq='H')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00-05:00', "
"'2011-01-01 10:00:00-05:00', 'NaT'], "
"dtype='datetime64[ns, US/Eastern]', freq=None)")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+00:00', "
"'2011-01-01 10:00:00+00:00', 'NaT'], "
"dtype='datetime64[ns, UTC]', freq=None)""")
with pd.option_context('display.width', 300):
for indx, expected in zip(idx, exp):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(indx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
idx7 = DatetimeIndex(['2011-01-01 09:00', '2011-01-02 10:15'])
exp1 = """Series([], dtype: datetime64[ns])"""
exp2 = """0 2011-01-01
dtype: datetime64[ns]"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: datetime64[ns]"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: datetime64[ns]"""
exp5 = """0 2011-01-01 09:00:00+09:00
1 2011-01-01 10:00:00+09:00
2 2011-01-01 11:00:00+09:00
dtype: datetime64[ns, Asia/Tokyo]"""
exp6 = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 NaT
dtype: datetime64[ns, US/Eastern]"""
exp7 = """0 2011-01-01 09:00:00
1 2011-01-02 10:15:00
dtype: datetime64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4,
idx5, idx6, idx7],
[exp1, exp2, exp3, exp4,
exp5, exp6, exp7]):
result = repr(Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'],
freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
exp1 = """DatetimeIndex: 0 entries
Freq: D"""
exp2 = """DatetimeIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """DatetimeIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """DatetimeIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = ("DatetimeIndex: 3 entries, 2011-01-01 09:00:00+09:00 "
"to 2011-01-01 11:00:00+09:00\n"
"Freq: H")
exp6 = """DatetimeIndex: 3 entries, 2011-01-01 09:00:00-05:00 to NaT"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6],
[exp1, exp2, exp3, exp4, exp5, exp6]):
result = idx.summary()
self.assertEqual(result, expected)
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T',
'S', 'L', 'U'],
['day', 'day', 'day', 'day', 'hour',
'minute', 'second', 'millisecond',
'microsecond']):
for tz in self.tz:
idx = pd.date_range(start='2013-04-01', periods=30, freq=freq,
tz=tz)
self.assertEqual(idx.resolution, expected)
def test_union(self):
for tz in self.tz:
# union
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=10, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=8, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_union = rng.union(other)
tm.assert_index_equal(result_union, expected)
def test_add_iadd(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
result = rng + delta
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng + 1
expected = pd.date_range('2000-01-01 10:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add a datelike to a DatetimeIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx + Timestamp('2011-01-01')
with tm.assertRaisesRegexp(TypeError, msg):
Timestamp('2011-01-01') + idx
def test_add_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now raises
# TypeError (GH14164)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
with tm.assertRaises(TypeError):
dti + dti
with tm.assertRaises(TypeError):
dti_tz + dti_tz
with tm.assertRaises(TypeError):
dti_tz + dti
with tm.assertRaises(TypeError):
dti + dti_tz
def test_difference(self):
for tz in self.tz:
# diff
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=3, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_diff = rng.difference(other)
tm.assert_index_equal(result_diff, expected)
def test_sub_isub(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
result = rng - delta
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng - 1
expected = pd.date_range('2000-01-01 08:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
def test_sub_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now changed to
# return subtraction -> TimeDeltaIndex (GH ...)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
dti_tz2 = date_range('20130101', periods=3).tz_localize('UTC')
expected = TimedeltaIndex([0, 0, 0])
result = dti - dti
tm.assert_index_equal(result, expected)
result = dti_tz - dti_tz
tm.assert_index_equal(result, expected)
with tm.assertRaises(TypeError):
dti_tz - dti
with tm.assertRaises(TypeError):
dti - dti_tz
with tm.assertRaises(TypeError):
dti_tz - dti_tz2
# isub
dti -= dti
tm.assert_index_equal(dti, expected)
# different length raises ValueError
dti1 = date_range('20130101', periods=3)
dti2 = date_range('20130101', periods=4)
with tm.assertRaises(ValueError):
dti1 - dti2
# NaN propagation
dti1 = DatetimeIndex(['2012-01-01', np.nan, '2012-01-03'])
dti2 = DatetimeIndex(['2012-01-02', '2012-01-03', np.nan])
expected = TimedeltaIndex(['1 days', np.nan, np.nan])
result = dti2 - dti1
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'D']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], freq=freq)
with tm.assertRaises(TypeError):
idx - p
with tm.assertRaises(TypeError):
p - idx
def test_comp_nat(self):
left = pd.DatetimeIndex([pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')])
right = pd.DatetimeIndex([pd.NaT, pd.NaT, pd.Timestamp('2011-01-03')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
for tz in self.tz:
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = DatetimeIndex(np.repeat(idx.values, range(1, len(idx) + 1)),
tz=tz)
exp_idx = pd.date_range('2011-01-01 18:00', freq='-1H', periods=10,
tz=tz)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = pd.date_range('2011-01-01 09:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(idx.unique(), expected)
idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 09:00',
'2013-01-01 09:00', '2013-01-01 08:00',
'2013-01-01 08:00', pd.NaT], tz=tz)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00'],
tz=tz)
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00',
pd.NaT], tz=tz)
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False),
expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(DatetimeIndex,
([0, 1, 0], [0, 0, -1], [0, -1, -1],
['2015', '2015', '2016'], ['2015', '2015', '2014'])):
tm.assertIn(idx[0], idx)
def test_order(self):
# with freq
idx1 = DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], freq='D', name='idx')
idx2 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H',
tz='Asia/Tokyo', name='tzidx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assert_numpy_array_equal(indexer,
np.array([2, 1, 0]),
check_dtype=False)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
# without freq
for tz in self.tz:
idx1 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz=tz, name='idx1')
exp1 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz=tz, name='idx1')
idx2 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz=tz, name='idx2')
exp2 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz=tz, name='idx2')
idx3 = DatetimeIndex([pd.NaT, '2011-01-03', '2011-01-05',
'2011-01-02', pd.NaT], tz=tz, name='idx3')
exp3 = DatetimeIndex([pd.NaT, pd.NaT, '2011-01-02', '2011-01-03',
'2011-01-05'], tz=tz, name='idx3')
for idx, expected in [(idx1, exp1), (idx2, exp2), (idx3, exp3)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertIsNone(ordered.freq)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
def test_getitem(self):
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx[0]
self.assertEqual(result, Timestamp('2011-01-01', tz=idx.tz))
result = idx[0:5]
expected = pd.date_range('2011-01-01', '2011-01-05', freq='D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[0:10:2]
expected = pd.date_range('2011-01-01', '2011-01-09', freq='2D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[-20:-5:3]
expected = pd.date_range('2011-01-12', '2011-01-24', freq='3D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[4::-1]
expected = DatetimeIndex(['2011-01-05', '2011-01-04', '2011-01-03',
'2011-01-02', '2011-01-01'],
freq='-1D', tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx)
self.assertIsNone(idx_dup.freq) # freq is reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertIsNone(result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx.take([0])
self.assertEqual(result, Timestamp('2011-01-01', tz=idx.tz))
result = idx.take([0, 1, 2])
expected = pd.date_range('2011-01-01', '2011-01-03', freq='D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.date_range('2011-01-01', '2011-01-05', freq='2D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([7, 4, 1])
expected = pd.date_range('2011-01-08', '2011-01-02', freq='-3D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([3, 2, 5])
expected = DatetimeIndex(['2011-01-04', '2011-01-03',
'2011-01-06'],
freq=None, tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
result = idx.take([-3, 2, 5])
expected = DatetimeIndex(['2011-01-29', '2011-01-03',
'2011-01-06'],
freq=None, tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
def test_take_invalid_kwargs(self):
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, mode='clip')
def test_infer_freq(self):
# GH 11018
for freq in ['A', '2A', '-2A', 'Q', '-1Q', 'M', '-1M', 'D', '3D',
'-3D', 'W', '-1W', 'H', '2H', '-2H', 'T', '2T', 'S',
'-3S']:
idx = pd.date_range('2011-01-01 09:00:00', freq=freq, periods=10)
result = pd.DatetimeIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
self.assertEqual(result.freq, freq)
def test_nat_new(self):
idx = pd.date_range('2011-01-01', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = pd.DatetimeIndex([pd.NaT] * 5, name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
for tz in self.tz:
idx = pd.DatetimeIndex([], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-01 11:00'
'2011-01-01 12:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.DatetimeIndex(['2011-01-01 13:00', '2011-01-01 14:00'
'2011-01-01 15:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.DatetimeIndex(['2011-01-01 07:00', '2011-01-01 08:00'
'2011-01-01 09:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
def test_nat(self):
self.assertIs(pd.DatetimeIndex._na_value, pd.NaT)
self.assertIs(pd.DatetimeIndex([])._na_value, pd.NaT)
for tz in [None, 'US/Eastern', 'UTC']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], tz=tz)
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.DatetimeIndex(['2011-01-01', 'NaT'], tz=tz)
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
for tz in [None, 'UTC', 'US/Eastern', 'Asia/Tokyo']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'])
self.assertTrue(idx.equals(idx))
self.assertTrue(idx.equals(idx.copy()))
self.assertTrue(idx.equals(idx.asobject))
self.assertTrue(idx.asobject.equals(idx))
self.assertTrue(idx.asobject.equals(idx.asobject))
self.assertFalse(idx.equals(list(idx)))
self.assertFalse(idx.equals(pd.Series(idx)))
idx2 = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'],
tz='US/Pacific')
self.assertFalse(idx.equals(idx2))
self.assertFalse(idx.equals(idx2.copy()))
self.assertFalse(idx.equals(idx2.asobject))
self.assertFalse(idx.asobject.equals(idx2))
self.assertFalse(idx.equals(list(idx2)))
self.assertFalse(idx.equals(pd.Series(idx2)))
# same internal, different tz
idx3 = pd.DatetimeIndex._simple_new(idx.asi8, tz='US/Pacific')
tm.assert_numpy_array_equal(idx.asi8, idx3.asi8)
self.assertFalse(idx.equals(idx3))
self.assertFalse(idx.equals(idx3.copy()))
self.assertFalse(idx.equals(idx3.asobject))
self.assertFalse(idx.asobject.equals(idx3))
self.assertFalse(idx.equals(list(idx3)))
self.assertFalse(idx.equals(pd.Series(idx3)))
class TestTimedeltaIndexOps(Ops):
def setUp(self):
super(TestTimedeltaIndexOps, self).setUp()
mask = lambda x: isinstance(x, TimedeltaIndex)
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = []
def test_ops_properties(self):
self.check_ops_properties(['days', 'hours', 'minutes', 'seconds',
'milliseconds'])
self.check_ops_properties(['microseconds', 'nanoseconds'])
def test_asobject_tolist(self):
idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'),
Timedelta('3 days'), Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = TimedeltaIndex([timedelta(days=1), timedelta(days=2), pd.NaT,
timedelta(days=4)], name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'), pd.NaT,
Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
# monotonic
idx1 = TimedeltaIndex(['1 days', '2 days', '3 days'])
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = TimedeltaIndex(['1 days', np.nan, '3 days', 'NaT'])
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timedelta('1 days')),
self.assertEqual(idx.max(), Timedelta('3 days')),
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = TimedeltaIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
td = TimedeltaIndex(np.asarray(dr))
self.assertEqual(np.min(td), Timedelta('16815 days'))
self.assertEqual(np.max(td), Timedelta('16820 days'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, td, out=0)
self.assertEqual(np.argmin(td), 0)
self.assertEqual(np.argmax(td), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, td, out=0)
def test_round(self):
td = pd.timedelta_range(start='16801 days', periods=5, freq='30Min')
elt = td[1]
expected_rng = TimedeltaIndex([
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 01:00:00'),
Timedelta('16801 days 02:00:00'),
Timedelta('16801 days 02:00:00'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(td.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with self.assertRaisesRegexp(ValueError, msg):
td.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, td.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_representation(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex([], dtype='timedelta64[ns]', freq='D')"""
exp2 = ("TimedeltaIndex(['1 days'], dtype='timedelta64[ns]', "
"freq='D')")
exp3 = ("TimedeltaIndex(['1 days', '2 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp4 = ("TimedeltaIndex(['1 days', '2 days', '3 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp5 = ("TimedeltaIndex(['1 days 00:00:01', '2 days 00:00:00', "
"'3 days 00:00:00'], dtype='timedelta64[ns]', freq=None)")
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """Series([], dtype: timedelta64[ns])"""
exp2 = """0 1 days
dtype: timedelta64[ns]"""
exp3 = """0 1 days
1 2 days
dtype: timedelta64[ns]"""
exp4 = """0 1 days
1 2 days
2 3 days
dtype: timedelta64[ns]"""
exp5 = """0 1 days 00:00:01
1 2 days 00:00:00
2 3 days 00:00:00
dtype: timedelta64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex: 0 entries
Freq: D"""
exp2 = """TimedeltaIndex: 1 entries, 1 days to 1 days
Freq: D"""
exp3 = """TimedeltaIndex: 2 entries, 1 days to 2 days
Freq: D"""
exp4 = """TimedeltaIndex: 3 entries, 1 days to 3 days
Freq: D"""
exp5 = ("TimedeltaIndex: 3 entries, 1 days 00:00:01 to 3 days "
"00:00:00")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = idx.summary()
self.assertEqual(result, expected)
def test_add_iadd(self):
# only test adding/sub offsets as + is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng + delta
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng + 1
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_sub_isub(self):
# only test adding/sub offsets as - is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng - delta
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng - 1
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
idx = | TimedeltaIndex(['1 day', '2 day']) | pandas.TimedeltaIndex |
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime, timedelta
import functools
import itertools
import numpy as np
import numpy.ma as ma
import numpy.ma.mrecords as mrecords
from numpy.random import randn
import pytest
from pandas.compat import (
PY3, PY36, OrderedDict, is_platform_little_endian, lmap, long, lrange,
lzip, range, zip)
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas.core.dtypes.common import is_integer_dtype
import pandas as pd
from pandas import (
Categorical, DataFrame, Index, MultiIndex, Series, Timedelta, Timestamp,
compat, date_range, isna)
from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
MIXED_FLOAT_DTYPES = ['float16', 'float32', 'float64']
MIXED_INT_DTYPES = ['uint8', 'uint16', 'uint32', 'uint64', 'int8', 'int16',
'int32', 'int64']
class TestDataFrameConstructors(TestData):
def test_constructor(self):
df = DataFrame()
assert len(df.index) == 0
df = DataFrame(data={})
assert len(df.index) == 0
def test_constructor_mixed(self):
index, data = tm.getMixedTypeDict()
# TODO(wesm), incomplete test?
indexed_frame = DataFrame(data, index=index) # noqa
unindexed_frame = DataFrame(data) # noqa
assert self.mixed_frame['foo'].dtype == np.object_
def test_constructor_cast_failure(self):
foo = DataFrame({'a': ['a', 'b', 'c']}, dtype=np.float64)
assert foo['a'].dtype == object
# GH 3010, constructing with odd arrays
df = DataFrame(np.ones((4, 2)))
# this is ok
df['foo'] = np.ones((4, 2)).tolist()
# this is not ok
pytest.raises(ValueError, df.__setitem__, tuple(['test']),
np.ones((4, 2)))
# this is ok
df['foo2'] = np.ones((4, 2)).tolist()
def test_constructor_dtype_copy(self):
orig_df = DataFrame({
'col1': [1.],
'col2': [2.],
'col3': [3.]})
new_df = pd.DataFrame(orig_df, dtype=float, copy=True)
new_df['col1'] = 200.
assert orig_df['col1'][0] == 1.
def test_constructor_dtype_nocast_view(self):
df = DataFrame([[1, 2]])
should_be_view = DataFrame(df, dtype=df[0].dtype)
should_be_view[0][0] = 99
assert df.values[0, 0] == 99
should_be_view = DataFrame(df.values, dtype=df[0].dtype)
should_be_view[0][0] = 97
assert df.values[0, 0] == 97
def test_constructor_dtype_list_data(self):
df = DataFrame([[1, '2'],
[None, 'a']], dtype=object)
assert df.loc[1, 0] is None
assert df.loc[0, 1] == '2'
def test_constructor_list_frames(self):
# see gh-3243
result = DataFrame([DataFrame([])])
assert result.shape == (1, 0)
result = DataFrame([DataFrame(dict(A=lrange(5)))])
assert isinstance(result.iloc[0, 0], DataFrame)
def test_constructor_mixed_dtypes(self):
def _make_mixed_dtypes_df(typ, ad=None):
if typ == 'int':
dtypes = MIXED_INT_DTYPES
arrays = [np.array(np.random.rand(10), dtype=d)
for d in dtypes]
elif typ == 'float':
dtypes = MIXED_FLOAT_DTYPES
arrays = [np.array(np.random.randint(
10, size=10), dtype=d) for d in dtypes]
zipper = lzip(dtypes, arrays)
for d, a in zipper:
assert(a.dtype == d)
if ad is None:
ad = dict()
ad.update({d: a for d, a in zipper})
return DataFrame(ad)
def _check_mixed_dtypes(df, dtypes=None):
if dtypes is None:
dtypes = MIXED_FLOAT_DTYPES + MIXED_INT_DTYPES
for d in dtypes:
if d in df:
assert(df.dtypes[d] == d)
# mixed floating and integer coexinst in the same frame
df = _make_mixed_dtypes_df('float')
_check_mixed_dtypes(df)
# add lots of types
df = _make_mixed_dtypes_df('float', dict(A=1, B='foo', C='bar'))
_check_mixed_dtypes(df)
# GH 622
df = _make_mixed_dtypes_df('int')
_check_mixed_dtypes(df)
def test_constructor_complex_dtypes(self):
# GH10952
a = np.random.rand(10).astype(np.complex64)
b = np.random.rand(10).astype(np.complex128)
df = DataFrame({'a': a, 'b': b})
assert a.dtype == df.a.dtype
assert b.dtype == df.b.dtype
def test_constructor_dtype_str_na_values(self, string_dtype):
# https://github.com/pandas-dev/pandas/issues/21083
df = DataFrame({'A': ['x', None]}, dtype=string_dtype)
result = df.isna()
expected = DataFrame({"A": [False, True]})
tm.assert_frame_equal(result, expected)
assert df.iloc[1, 0] is None
df = DataFrame({'A': ['x', np.nan]}, dtype=string_dtype)
assert np.isnan(df.iloc[1, 0])
def test_constructor_rec(self):
rec = self.frame.to_records(index=False)
if PY3:
# unicode error under PY2
rec.dtype.names = list(rec.dtype.names)[::-1]
index = self.frame.index
df = DataFrame(rec)
tm.assert_index_equal(df.columns, pd.Index(rec.dtype.names))
df2 = DataFrame(rec, index=index)
tm.assert_index_equal(df2.columns, pd.Index(rec.dtype.names))
tm.assert_index_equal(df2.index, index)
rng = np.arange(len(rec))[::-1]
df3 = DataFrame(rec, index=rng, columns=['C', 'B'])
expected = DataFrame(rec, index=rng).reindex(columns=['C', 'B'])
tm.assert_frame_equal(df3, expected)
def test_constructor_bool(self):
df = DataFrame({0: np.ones(10, dtype=bool),
1: np.zeros(10, dtype=bool)})
assert df.values.dtype == np.bool_
def test_constructor_overflow_int64(self):
# see gh-14881
values = np.array([2 ** 64 - i for i in range(1, 10)],
dtype=np.uint64)
result = DataFrame({'a': values})
assert result['a'].dtype == np.uint64
# see gh-2355
data_scores = [(6311132704823138710, 273), (2685045978526272070, 23),
(8921811264899370420, 45),
(long(17019687244989530680), 270),
(long(9930107427299601010), 273)]
dtype = [('uid', 'u8'), ('score', 'u8')]
data = np.zeros((len(data_scores),), dtype=dtype)
data[:] = data_scores
df_crawls = DataFrame(data)
assert df_crawls['uid'].dtype == np.uint64
@pytest.mark.parametrize("values", [np.array([2**64], dtype=object),
np.array([2**65]), [2**64 + 1],
np.array([-2**63 - 4], dtype=object),
np.array([-2**64 - 1]), [-2**65 - 2]])
def test_constructor_int_overflow(self, values):
# see gh-18584
value = values[0]
result = DataFrame(values)
assert result[0].dtype == object
assert result[0][0] == value
def test_constructor_ordereddict(self):
import random
nitems = 100
nums = lrange(nitems)
random.shuffle(nums)
expected = ['A%d' % i for i in nums]
df = DataFrame(OrderedDict(zip(expected, [[0]] * nitems)))
assert expected == list(df.columns)
def test_constructor_dict(self):
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2})
# col2 is padded with NaN
assert len(self.ts1) == 30
assert len(self.ts2) == 25
tm.assert_series_equal(self.ts1, frame['col1'], check_names=False)
exp = pd.Series(np.concatenate([[np.nan] * 5, self.ts2.values]),
index=self.ts1.index, name='col2')
tm.assert_series_equal(exp, frame['col2'])
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2},
columns=['col2', 'col3', 'col4'])
assert len(frame) == len(self.ts2)
assert 'col1' not in frame
assert isna(frame['col3']).all()
# Corner cases
assert len(DataFrame({})) == 0
# mix dict and array, wrong size - no spec for which error should raise
# first
with pytest.raises(ValueError):
DataFrame({'A': {'a': 'a', 'b': 'b'}, 'B': ['a', 'b', 'c']})
# Length-one dict micro-optimization
frame = DataFrame({'A': {'1': 1, '2': 2}})
tm.assert_index_equal(frame.index, pd.Index(['1', '2']))
# empty dict plus index
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx)
assert frame.index is idx
# empty with index and columns
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx, columns=idx)
assert frame.index is idx
assert frame.columns is idx
assert len(frame._series) == 3
# with dict of empty list and Series
frame = DataFrame({'A': [], 'B': []}, columns=['A', 'B'])
tm.assert_index_equal(frame.index, Index([], dtype=np.int64))
# GH 14381
# Dict with None value
frame_none = DataFrame(dict(a=None), index=[0])
frame_none_list = DataFrame(dict(a=[None]), index=[0])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none.get_value(0, 'a') is None
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none_list.get_value(0, 'a') is None
tm.assert_frame_equal(frame_none, frame_none_list)
# GH10856
# dict with scalar values should raise error, even if columns passed
msg = 'If using all scalar values, you must pass an index'
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7})
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7}, columns=['a'])
@pytest.mark.parametrize("scalar", [2, np.nan, None, 'D'])
def test_constructor_invalid_items_unused(self, scalar):
# No error if invalid (scalar) value is in fact not used:
result = DataFrame({'a': scalar}, columns=['b'])
expected = DataFrame(columns=['b'])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [2, np.nan, None, float('nan')])
def test_constructor_dict_nan_key(self, value):
# GH 18455
cols = [1, value, 3]
idx = ['a', value]
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = DataFrame(data).sort_values(1).sort_values('a', axis=1)
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values('a', axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [np.nan, None, float('nan')])
def test_constructor_dict_nan_tuple_key(self, value):
# GH 18455
cols = Index([(11, 21), (value, 22), (13, value)])
idx = Index([('a', value), (value, 2)])
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = (DataFrame(data)
.sort_values((11, 21))
.sort_values(('a', value), axis=1))
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values(('a', value), axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.skipif(not PY36, reason='Insertion order for Python>=3.6')
def test_constructor_dict_order_insertion(self):
# GH19018
# initialization ordering: by insertion order if python>= 3.6
d = {'b': self.ts2, 'a': self.ts1}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ba'))
tm.assert_frame_equal(frame, expected)
@pytest.mark.skipif(PY36, reason='order by value for Python<3.6')
def test_constructor_dict_order_by_values(self):
# GH19018
# initialization ordering: by value if python<3.6
d = {'b': self.ts2, 'a': self.ts1}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ab'))
tm.assert_frame_equal(frame, expected)
def test_constructor_multi_index(self):
# GH 4078
# construction error with mi and all-nan frame
tuples = [(2, 3), (3, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
tuples = [(3, 3), (2, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
def test_constructor_error_msgs(self):
msg = "Empty data passed with indices specified."
# passing an empty array with columns specified.
with pytest.raises(ValueError, match=msg):
DataFrame(np.empty(0), columns=list('abc'))
msg = "Mixing dicts with non-Series may lead to ambiguous ordering."
# mix dict and array, wrong size
with pytest.raises(ValueError, match=msg):
DataFrame({'A': {'a': 'a', 'b': 'b'},
'B': ['a', 'b', 'c']})
# wrong size ndarray, GH 3105
msg = r"Shape of passed values is \(3, 4\), indices imply \(3, 3\)"
with pytest.raises(ValueError, match=msg):
DataFrame(np.arange(12).reshape((4, 3)),
columns=['foo', 'bar', 'baz'],
index=pd.date_range('2000-01-01', periods=3))
# higher dim raise exception
with pytest.raises(ValueError, match='Must pass 2-d input'):
DataFrame(np.zeros((3, 3, 3)), columns=['A', 'B', 'C'], index=[1])
# wrong size axis labels
msg = ("Shape of passed values "
r"is \(3, 2\), indices "
r"imply \(3, 1\)")
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B', 'C'], index=[1])
msg = ("Shape of passed values "
r"is \(3, 2\), indices "
r"imply \(2, 2\)")
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B'], index=[1, 2])
msg = ("If using all scalar "
"values, you must pass "
"an index")
with pytest.raises(ValueError, match=msg):
DataFrame({'a': False, 'b': True})
def test_constructor_with_embedded_frames(self):
# embedded data frames
df1 = DataFrame({'a': [1, 2, 3], 'b': [3, 4, 5]})
df2 = DataFrame([df1, df1 + 10])
df2.dtypes
str(df2)
result = df2.loc[0, 0]
tm.assert_frame_equal(result, df1)
result = df2.loc[1, 0]
tm.assert_frame_equal(result, df1 + 10)
def test_constructor_subclass_dict(self):
# Test for passing dict subclass to constructor
data = {'col1': tm.TestSubDict((x, 10.0 * x) for x in range(10)),
'col2': tm.TestSubDict((x, 20.0 * x) for x in range(10))}
df = DataFrame(data)
refdf = DataFrame({col: dict(compat.iteritems(val))
for col, val in compat.iteritems(data)})
tm.assert_frame_equal(refdf, df)
data = tm.TestSubDict(compat.iteritems(data))
df = DataFrame(data)
tm.assert_frame_equal(refdf, df)
# try with defaultdict
from collections import defaultdict
data = {}
self.frame['B'][:10] = np.nan
for k, v in compat.iteritems(self.frame):
dct = defaultdict(dict)
dct.update(v.to_dict())
data[k] = dct
frame = DataFrame(data)
tm.assert_frame_equal(self.frame.sort_index(), frame)
def test_constructor_dict_block(self):
expected = np.array([[4., 3., 2., 1.]])
df = DataFrame({'d': [4.], 'c': [3.], 'b': [2.], 'a': [1.]},
columns=['d', 'c', 'b', 'a'])
tm.assert_numpy_array_equal(df.values, expected)
def test_constructor_dict_cast(self):
# cast float tests
test_data = {
'A': {'1': 1, '2': 2},
'B': {'1': '1', '2': '2', '3': '3'},
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 3
assert frame['B'].dtype == np.float64
assert frame['A'].dtype == np.float64
frame = DataFrame(test_data)
assert len(frame) == 3
assert frame['B'].dtype == np.object_
assert frame['A'].dtype == np.float64
# can't cast to float
test_data = {
'A': dict(zip(range(20), tm.makeStringIndex(20))),
'B': dict(zip(range(15), randn(15)))
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 20
assert frame['A'].dtype == np.object_
assert frame['B'].dtype == np.float64
def test_constructor_dict_dont_upcast(self):
d = {'Col1': {'Row1': 'A String', 'Row2': np.nan}}
df = DataFrame(d)
assert isinstance(df['Col1']['Row2'], float)
dm = DataFrame([[1, 2], ['a', 'b']], index=[1, 2], columns=[1, 2])
assert isinstance(dm[1][1], int)
def test_constructor_dict_of_tuples(self):
# GH #1491
data = {'a': (1, 2, 3), 'b': (4, 5, 6)}
result = DataFrame(data)
expected = DataFrame({k: list(v) for k, v in compat.iteritems(data)})
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_dict_multiindex(self):
def check(result, expected):
return tm.assert_frame_equal(result, expected, check_dtype=True,
check_index_type=True,
check_column_type=True,
check_names=True)
d = {('a', 'a'): {('i', 'i'): 0, ('i', 'j'): 1, ('j', 'i'): 2},
('b', 'a'): {('i', 'i'): 6, ('i', 'j'): 5, ('j', 'i'): 4},
('b', 'c'): {('i', 'i'): 7, ('i', 'j'): 8, ('j', 'i'): 9}}
_d = sorted(d.items())
df = DataFrame(d)
expected = DataFrame(
[x[1] for x in _d],
index=MultiIndex.from_tuples([x[0] for x in _d])).T
expected.index = MultiIndex.from_tuples(expected.index)
check(df, expected)
d['z'] = {'y': 123., ('i', 'i'): 111, ('i', 'j'): 111, ('j', 'i'): 111}
_d.insert(0, ('z', d['z']))
expected = DataFrame(
[x[1] for x in _d],
index=Index([x[0] for x in _d], tupleize_cols=False)).T
expected.index = Index(expected.index, tupleize_cols=False)
df = DataFrame(d)
df = df.reindex(columns=expected.columns, index=expected.index)
check(df, expected)
def test_constructor_dict_datetime64_index(self):
# GH 10160
dates_as_str = ['1984-02-19', '1988-11-06', '1989-12-03', '1990-03-15']
def create_data(constructor):
return {i: {constructor(s): 2 * i}
for i, s in enumerate(dates_as_str)}
data_datetime64 = create_data(np.datetime64)
data_datetime = create_data(lambda x: datetime.strptime(x, '%Y-%m-%d'))
data_Timestamp = create_data(Timestamp)
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timestamp(dt) for dt in dates_as_str])
result_datetime64 = DataFrame(data_datetime64)
result_datetime = DataFrame(data_datetime)
result_Timestamp = DataFrame(data_Timestamp)
tm.assert_frame_equal(result_datetime64, expected)
tm.assert_frame_equal(result_datetime, expected)
tm.assert_frame_equal(result_Timestamp, expected)
def test_constructor_dict_timedelta64_index(self):
# GH 10160
td_as_int = [1, 2, 3, 4]
def create_data(constructor):
return {i: {constructor(s): 2 * i}
for i, s in enumerate(td_as_int)}
data_timedelta64 = create_data(lambda x: np.timedelta64(x, 'D'))
data_timedelta = create_data(lambda x: timedelta(days=x))
data_Timedelta = create_data(lambda x: Timedelta(x, 'D'))
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timedelta(td, 'D') for td in td_as_int])
result_timedelta64 = DataFrame(data_timedelta64)
result_timedelta = DataFrame(data_timedelta)
result_Timedelta = DataFrame(data_Timedelta)
tm.assert_frame_equal(result_timedelta64, expected)
tm.assert_frame_equal(result_timedelta, expected)
tm.assert_frame_equal(result_Timedelta, expected)
def test_constructor_period(self):
# PeriodIndex
a = pd.PeriodIndex(['2012-01', 'NaT', '2012-04'], freq='M')
b = pd.PeriodIndex(['2012-02-01', '2012-03-01', 'NaT'], freq='D')
df = pd.DataFrame({'a': a, 'b': b})
assert df['a'].dtype == a.dtype
assert df['b'].dtype == b.dtype
# list of periods
df = pd.DataFrame({'a': a.astype(object).tolist(),
'b': b.astype(object).tolist()})
assert df['a'].dtype == a.dtype
assert df['b'].dtype == b.dtype
def test_nested_dict_frame_constructor(self):
rng = pd.period_range('1/1/2000', periods=5)
df = DataFrame(randn(10, 5), columns=rng)
data = {}
for col in df.columns:
for row in df.index:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
data.setdefault(col, {})[row] = df.get_value(row, col)
result = DataFrame(data, columns=rng)
tm.assert_frame_equal(result, df)
data = {}
for col in df.columns:
for row in df.index:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
data.setdefault(row, {})[col] = df.get_value(row, col)
result = DataFrame(data, index=rng).T
tm.assert_frame_equal(result, df)
def _check_basic_constructor(self, empty):
# mat: 2d matrix with shape (3, 2) to input. empty - makes sized
# objects
mat = empty((2, 3), dtype=float)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
# 1-D input
frame = DataFrame(empty((3,)), columns=['A'], index=[1, 2, 3])
assert len(frame.index) == 3
assert len(frame.columns) == 1
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
assert frame.values.dtype == np.int64
# wrong size axis labels
msg = r'Shape of passed values is \(3, 2\), indices imply \(3, 1\)'
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=['A', 'B', 'C'], index=[1])
msg = r'Shape of passed values is \(3, 2\), indices imply \(2, 2\)'
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=['A', 'B'], index=[1, 2])
# higher dim raise exception
with pytest.raises(ValueError, match='Must pass 2-d input'):
DataFrame(empty((3, 3, 3)), columns=['A', 'B', 'C'],
index=[1])
# automatic labeling
frame = DataFrame(mat)
tm.assert_index_equal(frame.index, pd.Index(lrange(2)))
tm.assert_index_equal(frame.columns, pd.Index(lrange(3)))
frame = DataFrame(mat, index=[1, 2])
tm.assert_index_equal(frame.columns, pd.Index(lrange(3)))
frame = DataFrame(mat, columns=['A', 'B', 'C'])
tm.assert_index_equal(frame.index, pd.Index(lrange(2)))
# 0-length axis
frame = DataFrame(empty((0, 3)))
assert len(frame.index) == 0
frame = DataFrame(empty((3, 0)))
assert len(frame.columns) == 0
def test_constructor_ndarray(self):
self._check_basic_constructor(np.ones)
frame = DataFrame(['foo', 'bar'], index=[0, 1], columns=['A'])
assert len(frame) == 2
def test_constructor_maskedarray(self):
self._check_basic_constructor(ma.masked_all)
# Check non-masked values
mat = ma.masked_all((2, 3), dtype=float)
mat[0, 0] = 1.0
mat[1, 2] = 2.0
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert 1.0 == frame['A'][1]
assert 2.0 == frame['C'][2]
# what is this even checking??
mat = ma.masked_all((2, 3), dtype=float)
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert np.all(~np.asarray(frame == frame))
def test_constructor_maskedarray_nonfloat(self):
# masked int promoted to float
mat = ma.masked_all((2, 3), dtype=int)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.float64)
assert frame.values.dtype == np.float64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert 1 == frame['A'][1]
assert 2 == frame['C'][2]
# masked np.datetime64 stays (use NaT as null)
mat = ma.masked_all((2, 3), dtype='M8[ns]')
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert isna(frame).values.all()
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
assert frame.values.dtype == np.int64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert 1 == frame['A'].view('i8')[1]
assert 2 == frame['C'].view('i8')[2]
# masked bool promoted to object
mat = ma.masked_all((2, 3), dtype=bool)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=object)
assert frame.values.dtype == object
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = True
mat2[1, 2] = False
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert frame['A'][1] is True
assert frame['C'][2] is False
def test_constructor_mrecarray(self):
# Ensure mrecarray produces frame identical to dict of masked arrays
# from GH3479
assert_fr_equal = functools.partial(tm.assert_frame_equal,
check_index_type=True,
check_column_type=True,
check_frame_type=True)
arrays = [
('float', np.array([1.5, 2.0])),
('int', np.array([1, 2])),
('str', np.array(['abc', 'def'])),
]
for name, arr in arrays[:]:
arrays.append(('masked1_' + name,
np.ma.masked_array(arr, mask=[False, True])))
arrays.append(('masked_all', np.ma.masked_all((2,))))
arrays.append(('masked_none',
np.ma.masked_array([1.0, 2.5], mask=False)))
# call assert_frame_equal for all selections of 3 arrays
for comb in itertools.combinations(arrays, 3):
names, data = zip(*comb)
mrecs = mrecords.fromarrays(data, names=names)
# fill the comb
comb = {k: (v.filled() if hasattr(v, 'filled') else v)
for k, v in comb}
expected = DataFrame(comb, columns=names)
result = DataFrame(mrecs)
assert_fr_equal(result, expected)
# specify columns
expected = DataFrame(comb, columns=names[::-1])
result = DataFrame(mrecs, columns=names[::-1])
assert_fr_equal(result, expected)
# specify index
expected = DataFrame(comb, columns=names, index=[1, 2])
result = DataFrame(mrecs, index=[1, 2])
assert_fr_equal(result, expected)
def test_constructor_corner_shape(self):
df = DataFrame(index=[])
assert df.values.shape == (0, 0)
@pytest.mark.parametrize("data, index, columns, dtype, expected", [
(None, lrange(10), ['a', 'b'], object, np.object_),
(None, None, ['a', 'b'], 'int64', np.dtype('int64')),
(None, lrange(10), ['a', 'b'], int, np.dtype('float64')),
({}, None, ['foo', 'bar'], None, np.object_),
({'b': 1}, lrange(10), list('abc'), int, np.dtype('float64'))
])
def test_constructor_dtype(self, data, index, columns, dtype, expected):
df = DataFrame(data, index, columns, dtype)
assert df.values.dtype == expected
def test_constructor_scalar_inference(self):
data = {'int': 1, 'bool': True,
'float': 3., 'complex': 4j, 'object': 'foo'}
df = DataFrame(data, index=np.arange(10))
assert df['int'].dtype == np.int64
assert df['bool'].dtype == np.bool_
assert df['float'].dtype == np.float64
assert df['complex'].dtype == np.complex128
assert df['object'].dtype == np.object_
def test_constructor_arrays_and_scalars(self):
df = DataFrame({'a': randn(10), 'b': True})
exp = DataFrame({'a': df['a'].values, 'b': [True] * 10})
tm.assert_frame_equal(df, exp)
with pytest.raises(ValueError, match='must pass an index'):
DataFrame({'a': False, 'b': True})
def test_constructor_DataFrame(self):
df = DataFrame(self.frame)
tm.assert_frame_equal(df, self.frame)
df_casted = DataFrame(self.frame, dtype=np.int64)
assert df_casted.values.dtype == np.int64
def test_constructor_more(self):
# used to be in test_matrix.py
arr = randn(10)
dm = DataFrame(arr, columns=['A'], index=np.arange(10))
assert dm.values.ndim == 2
arr = randn(0)
dm = DataFrame(arr)
assert dm.values.ndim == 2
assert dm.values.ndim == 2
# no data specified
dm = DataFrame(columns=['A', 'B'], index=np.arange(10))
assert dm.values.shape == (10, 2)
dm = DataFrame(columns=['A', 'B'])
assert dm.values.shape == (0, 2)
dm = DataFrame(index=np.arange(10))
assert dm.values.shape == (10, 0)
# can't cast
mat = np.array(['foo', 'bar'], dtype=object).reshape(2, 1)
with pytest.raises(ValueError, match='cast'):
DataFrame(mat, index=[0, 1], columns=[0], dtype=float)
dm = DataFrame(DataFrame(self.frame._series))
tm.assert_frame_equal(dm, self.frame)
# int cast
dm = DataFrame({'A': np.ones(10, dtype=int),
'B': np.ones(10, dtype=np.float64)},
index=np.arange(10))
assert len(dm.columns) == 2
assert dm.values.dtype == np.float64
def test_constructor_empty_list(self):
df = DataFrame([], index=[])
expected = DataFrame(index=[])
tm.assert_frame_equal(df, expected)
# GH 9939
df = DataFrame([], columns=['A', 'B'])
expected = DataFrame({}, columns=['A', 'B'])
tm.assert_frame_equal(df, expected)
# Empty generator: list(empty_gen()) == []
def empty_gen():
return
yield
df = DataFrame(empty_gen(), columns=['A', 'B'])
tm.assert_frame_equal(df, expected)
def test_constructor_list_of_lists(self):
# GH #484
df = DataFrame(data=[[1, 'a'], [2, 'b']], columns=["num", "str"])
assert is_integer_dtype(df['num'])
assert df['str'].dtype == np.object_
# GH 4851
# list of 0-dim ndarrays
expected = DataFrame({0: np.arange(10)})
data = [np.array(x) for x in range(10)]
result = DataFrame(data)
tm.assert_frame_equal(result, expected)
def test_constructor_sequence_like(self):
# GH 3783
# collections.Squence like
class DummyContainer(compat.Sequence):
def __init__(self, lst):
self._lst = lst
def __getitem__(self, n):
return self._lst.__getitem__(n)
def __len__(self, n):
return self._lst.__len__()
lst_containers = [DummyContainer([1, 'a']), DummyContainer([2, 'b'])]
columns = ["num", "str"]
result = DataFrame(lst_containers, columns=columns)
expected = DataFrame([[1, 'a'], [2, 'b']], columns=columns)
tm.assert_frame_equal(result, expected, check_dtype=False)
# GH 4297
# support Array
import array
result = DataFrame({'A': array.array('i', range(10))})
expected = DataFrame({'A': list(range(10))})
tm.assert_frame_equal(result, expected, check_dtype=False)
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([array.array('i', range(10)),
array.array('i', range(10))])
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_iterable(self):
# GH 21987
class Iter():
def __iter__(self):
for i in range(10):
yield [1, 2, 3]
expected = DataFrame([[1, 2, 3]] * 10)
result = DataFrame(Iter())
tm.assert_frame_equal(result, expected)
def test_constructor_iterator(self):
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([range(10), range(10)])
tm.assert_frame_equal(result, expected)
def test_constructor_generator(self):
# related #2305
gen1 = (i for i in range(10))
gen2 = (i for i in range(10))
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([gen1, gen2])
tm.assert_frame_equal(result, expected)
gen = ([i, 'a'] for i in range(10))
result = DataFrame(gen)
expected = DataFrame({0: range(10), 1: 'a'})
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_list_of_dicts(self):
data = [OrderedDict([['a', 1.5], ['b', 3], ['c', 4], ['d', 6]]),
OrderedDict([['a', 1.5], ['b', 3], ['d', 6]]),
OrderedDict([['a', 1.5], ['d', 6]]),
OrderedDict(),
OrderedDict([['a', 1.5], ['b', 3], ['c', 4]]),
OrderedDict([['b', 3], ['c', 4], ['d', 6]])]
result = DataFrame(data)
expected = DataFrame.from_dict(dict(zip(range(len(data)), data)),
orient='index')
tm.assert_frame_equal(result, expected.reindex(result.index))
result = DataFrame([{}])
expected = DataFrame(index=[0])
tm.assert_frame_equal(result, expected)
def test_constructor_ordered_dict_preserve_order(self):
# see gh-13304
expected = DataFrame([[2, 1]], columns=['b', 'a'])
data = OrderedDict()
data['b'] = [2]
data['a'] = [1]
result = DataFrame(data)
tm.assert_frame_equal(result, expected)
data = OrderedDict()
data['b'] = 2
data['a'] = 1
result = DataFrame([data])
tm.assert_frame_equal(result, expected)
def test_constructor_ordered_dict_conflicting_orders(self):
# the first dict element sets the ordering for the DataFrame,
# even if there are conflicting orders from subsequent ones
row_one = OrderedDict()
row_one['b'] = 2
row_one['a'] = 1
row_two = OrderedDict()
row_two['a'] = 1
row_two['b'] = 2
row_three = {'b': 2, 'a': 1}
expected = DataFrame([[2, 1], [2, 1]], columns=['b', 'a'])
result = DataFrame([row_one, row_two])
tm.assert_frame_equal(result, expected)
expected = DataFrame([[2, 1], [2, 1], [2, 1]], columns=['b', 'a'])
result = DataFrame([row_one, row_two, row_three])
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_series(self):
data = [OrderedDict([['a', 1.5], ['b', 3.0], ['c', 4.0]]),
OrderedDict([['a', 1.5], ['b', 3.0], ['c', 6.0]])]
sdict = OrderedDict(zip(['x', 'y'], data))
idx = Index(['a', 'b', 'c'])
# all named
data2 = [Series([1.5, 3, 4], idx, dtype='O', name='x'),
Series([1.5, 3, 6], idx, name='y')]
result = DataFrame(data2)
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result, expected)
# some unnamed
data2 = [Series([1.5, 3, 4], idx, dtype='O', name='x'),
Series([1.5, 3, 6], idx)]
result = DataFrame(data2)
sdict = OrderedDict(zip(['x', 'Unnamed 0'], data))
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result.sort_index(), expected)
# none named
data = [OrderedDict([['a', 1.5], ['b', 3], ['c', 4], ['d', 6]]),
OrderedDict([['a', 1.5], ['b', 3], ['d', 6]]),
OrderedDict([['a', 1.5], ['d', 6]]),
OrderedDict(),
OrderedDict([['a', 1.5], ['b', 3], ['c', 4]]),
OrderedDict([['b', 3], ['c', 4], ['d', 6]])]
data = [Series(d) for d in data]
result = DataFrame(data)
sdict = OrderedDict(zip(range(len(data)), data))
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result, expected.reindex(result.index))
result2 = DataFrame(data, index=np.arange(6))
tm.assert_frame_equal(result, result2)
result = DataFrame([Series({})])
expected = DataFrame(index=[0])
tm.assert_frame_equal(result, expected)
data = [OrderedDict([['a', 1.5], ['b', 3.0], ['c', 4.0]]),
OrderedDict([['a', 1.5], ['b', 3.0], ['c', 6.0]])]
sdict = OrderedDict(zip(range(len(data)), data))
idx = Index(['a', 'b', 'c'])
data2 = [Series([1.5, 3, 4], idx, dtype='O'),
Series([1.5, 3, 6], idx)]
result = DataFrame(data2)
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_series_aligned_index(self):
series = [pd.Series(i, index=['b', 'a', 'c'], name=str(i))
for i in range(3)]
result = pd.DataFrame(series)
expected = pd.DataFrame({'b': [0, 1, 2],
'a': [0, 1, 2],
'c': [0, 1, 2]},
columns=['b', 'a', 'c'],
index=['0', '1', '2'])
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_derived_dicts(self):
class CustomDict(dict):
pass
d = {'a': 1.5, 'b': 3}
data_custom = [CustomDict(d)]
data = [d]
result_custom = DataFrame(data_custom)
result = DataFrame(data)
tm.assert_frame_equal(result, result_custom)
def test_constructor_ragged(self):
data = {'A': randn(10),
'B': randn(8)}
with pytest.raises(ValueError, match='arrays must all be same length'):
DataFrame(data)
def test_constructor_scalar(self):
idx = Index(lrange(3))
df = DataFrame({"a": 0}, index=idx)
expected = DataFrame({"a": [0, 0, 0]}, index=idx)
tm.assert_frame_equal(df, expected, check_dtype=False)
def test_constructor_Series_copy_bug(self):
df = DataFrame(self.frame['A'], index=self.frame.index, columns=['A'])
df.copy()
def test_constructor_mixed_dict_and_Series(self):
data = {}
data['A'] = {'foo': 1, 'bar': 2, 'baz': 3}
data['B'] = Series([4, 3, 2, 1], index=['bar', 'qux', 'baz', 'foo'])
result = DataFrame(data)
assert result.index.is_monotonic
# ordering ambiguous, raise exception
with pytest.raises(ValueError, match='ambiguous ordering'):
DataFrame({'A': ['a', 'b'], 'B': {'a': 'a', 'b': 'b'}})
# this is OK though
result = DataFrame({'A': ['a', 'b'],
'B': Series(['a', 'b'], index=['a', 'b'])})
expected = DataFrame({'A': ['a', 'b'], 'B': ['a', 'b']},
index=['a', 'b'])
tm.assert_frame_equal(result, expected)
def test_constructor_tuples(self):
result = DataFrame({'A': [(1, 2), (3, 4)]})
expected = DataFrame({'A': Series([(1, 2), (3, 4)])})
tm.assert_frame_equal(result, expected)
def test_constructor_namedtuples(self):
# GH11181
from collections import namedtuple
named_tuple = namedtuple("Pandas", list('ab'))
tuples = [named_tuple(1, 3), named_tuple(2, 4)]
expected = DataFrame({'a': [1, 2], 'b': [3, 4]})
result = DataFrame(tuples)
tm.assert_frame_equal(result, expected)
# with columns
expected = DataFrame({'y': [1, 2], 'z': [3, 4]})
result = DataFrame(tuples, columns=['y', 'z'])
tm.assert_frame_equal(result, expected)
def test_constructor_orient(self):
data_dict = self.mixed_frame.T._series
recons = DataFrame.from_dict(data_dict, orient='index')
expected = self.mixed_frame.sort_index()
tm.assert_frame_equal(recons, expected)
# dict of sequence
a = {'hi': [32, 3, 3],
'there': [3, 5, 3]}
rs = DataFrame.from_dict(a, orient='index')
xp = DataFrame.from_dict(a).T.reindex(list(a.keys()))
tm.assert_frame_equal(rs, xp)
def test_from_dict_columns_parameter(self):
# GH 18529
# Test new columns parameter for from_dict that was added to make
# from_items(..., orient='index', columns=[...]) easier to replicate
result = DataFrame.from_dict(OrderedDict([('A', [1, 2]),
('B', [4, 5])]),
orient='index', columns=['one', 'two'])
expected = DataFrame([[1, 2], [4, 5]], index=['A', 'B'],
columns=['one', 'two'])
tm.assert_frame_equal(result, expected)
msg = "cannot use columns parameter with orient='columns'"
with pytest.raises(ValueError, match=msg):
DataFrame.from_dict(dict([('A', [1, 2]), ('B', [4, 5])]),
orient='columns', columns=['one', 'two'])
with pytest.raises(ValueError, match=msg):
DataFrame.from_dict(dict([('A', [1, 2]), ('B', [4, 5])]),
columns=['one', 'two'])
def test_constructor_Series_named(self):
a = Series([1, 2, 3], index=['a', 'b', 'c'], name='x')
df = DataFrame(a)
assert df.columns[0] == 'x'
tm.assert_index_equal(df.index, a.index)
# ndarray like
arr = np.random.randn(10)
s = Series(arr, name='x')
df = DataFrame(s)
expected = DataFrame(dict(x=s))
tm.assert_frame_equal(df, expected)
s = Series(arr, index=range(3, 13))
df = DataFrame(s)
expected = DataFrame({0: s})
tm.assert_frame_equal(df, expected)
pytest.raises(ValueError, DataFrame, s, columns=[1, 2])
# #2234
a = Series([], name='x')
df = DataFrame(a)
assert df.columns[0] == 'x'
# series with name and w/o
s1 = Series(arr, name='x')
df = DataFrame([s1, arr]).T
expected = DataFrame({'x': s1, 'Unnamed 0': arr},
columns=['x', 'Unnamed 0'])
tm.assert_frame_equal(df, expected)
# this is a bit non-intuitive here; the series collapse down to arrays
df = DataFrame([arr, s1]).T
expected = DataFrame({1: s1, 0: arr}, columns=[0, 1])
tm.assert_frame_equal(df, expected)
def test_constructor_Series_named_and_columns(self):
# GH 9232 validation
s0 = Series(range(5), name=0)
s1 = Series(range(5), name=1)
# matching name and column gives standard frame
tm.assert_frame_equal(pd.DataFrame(s0, columns=[0]),
s0.to_frame())
tm.assert_frame_equal(pd.DataFrame(s1, columns=[1]),
s1.to_frame())
# non-matching produces empty frame
assert pd.DataFrame(s0, columns=[1]).empty
assert pd.DataFrame(s1, columns=[0]).empty
def test_constructor_Series_differently_indexed(self):
# name
s1 = Series([1, 2, 3], index=['a', 'b', 'c'], name='x')
# no name
s2 = Series([1, 2, 3], index=['a', 'b', 'c'])
other_index = Index(['a', 'b'])
df1 = DataFrame(s1, index=other_index)
exp1 = DataFrame(s1.reindex(other_index))
assert df1.columns[0] == 'x'
tm.assert_frame_equal(df1, exp1)
df2 = DataFrame(s2, index=other_index)
exp2 = DataFrame(s2.reindex(other_index))
assert df2.columns[0] == 0
tm.assert_index_equal(df2.index, other_index)
tm.assert_frame_equal(df2, exp2)
def test_constructor_manager_resize(self):
index = list(self.frame.index[:5])
columns = list(self.frame.columns[:3])
result = DataFrame(self.frame._data, index=index,
columns=columns)
tm.assert_index_equal(result.index, Index(index))
tm.assert_index_equal(result.columns, Index(columns))
def test_constructor_from_items(self):
items = [(c, self.frame[c]) for c in self.frame.columns]
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
recons = DataFrame.from_items(items)
tm.assert_frame_equal(recons, self.frame)
# pass some columns
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
recons = DataFrame.from_items(items, columns=['C', 'B', 'A'])
tm.assert_frame_equal(recons, self.frame.loc[:, ['C', 'B', 'A']])
# orient='index'
row_items = [(idx, self.mixed_frame.xs(idx))
for idx in self.mixed_frame.index]
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
recons = DataFrame.from_items(row_items,
columns=self.mixed_frame.columns,
orient='index')
tm.assert_frame_equal(recons, self.mixed_frame)
assert recons['A'].dtype == np.float64
msg = "Must pass columns with orient='index'"
with pytest.raises(TypeError, match=msg):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items(row_items, orient='index')
# orient='index', but thar be tuples
arr = construct_1d_object_array_from_listlike(
[('bar', 'baz')] * len(self.mixed_frame))
self.mixed_frame['foo'] = arr
row_items = [(idx, list(self.mixed_frame.xs(idx)))
for idx in self.mixed_frame.index]
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
recons = DataFrame.from_items(row_items,
columns=self.mixed_frame.columns,
orient='index')
tm.assert_frame_equal(recons, self.mixed_frame)
assert isinstance(recons['foo'][0], tuple)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
rs = DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])],
orient='index',
columns=['one', 'two', 'three'])
xp = DataFrame([[1, 2, 3], [4, 5, 6]], index=['A', 'B'],
columns=['one', 'two', 'three'])
tm.assert_frame_equal(rs, xp)
def test_constructor_from_items_scalars(self):
# GH 17312
msg = (r'The value in each \(key, value\) '
'pair must be an array, Series, or dict')
with pytest.raises(ValueError, match=msg):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items([('A', 1), ('B', 4)])
msg = (r'The value in each \(key, value\) '
'pair must be an array, Series, or dict')
with pytest.raises(ValueError, match=msg):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items([('A', 1), ('B', 2)], columns=['col1'],
orient='index')
def test_from_items_deprecation(self):
# GH 17320
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])],
columns=['col1', 'col2', 'col3'],
orient='index')
def test_constructor_mix_series_nonseries(self):
df = DataFrame({'A': self.frame['A'],
'B': list(self.frame['B'])}, columns=['A', 'B'])
tm.assert_frame_equal(df, self.frame.loc[:, ['A', 'B']])
msg = 'does not match index length'
with pytest.raises(ValueError, match=msg):
DataFrame({'A': self.frame['A'], 'B': list(self.frame['B'])[:-2]})
def test_constructor_miscast_na_int_dtype(self):
df = DataFrame([[np.nan, 1], [1, 0]], dtype=np.int64)
expected = DataFrame([[np.nan, 1], [1, 0]])
tm.assert_frame_equal(df, expected)
def test_constructor_column_duplicates(self):
# it works! #2079
df = DataFrame([[8, 5]], columns=['a', 'a'])
edf = DataFrame([[8, 5]])
edf.columns = ['a', 'a']
tm.assert_frame_equal(df, edf)
idf = DataFrame.from_records([(8, 5)],
columns=['a', 'a'])
tm.assert_frame_equal(idf, edf)
pytest.raises(ValueError, DataFrame.from_dict,
OrderedDict([('b', 8), ('a', 5), ('a', 6)]))
def test_constructor_empty_with_string_dtype(self):
# GH 9428
expected = DataFrame(index=[0, 1], columns=[0, 1], dtype=object)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype=str)
tm.assert_frame_equal(df, expected)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype=np.str_)
tm.assert_frame_equal(df, expected)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype=np.unicode_)
tm.assert_frame_equal(df, expected)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype='U5')
tm.assert_frame_equal(df, expected)
def test_constructor_single_value(self):
# expecting single value upcasting here
df = DataFrame(0., index=[1, 2, 3], columns=['a', 'b', 'c'])
tm.assert_frame_equal(df,
DataFrame(np.zeros(df.shape).astype('float64'),
df.index, df.columns))
df = DataFrame(0, index=[1, 2, 3], columns=['a', 'b', 'c'])
tm.assert_frame_equal(df, DataFrame(np.zeros(df.shape).astype('int64'),
df.index, df.columns))
df = DataFrame('a', index=[1, 2], columns=['a', 'c'])
tm.assert_frame_equal(df, DataFrame(np.array([['a', 'a'], ['a', 'a']],
dtype=object),
index=[1, 2], columns=['a', 'c']))
pytest.raises(ValueError, DataFrame, 'a', [1, 2])
pytest.raises(ValueError, DataFrame, 'a', columns=['a', 'c'])
msg = 'incompatible data and dtype'
with pytest.raises(TypeError, match=msg):
DataFrame('a', [1, 2], ['a', 'c'], float)
def test_constructor_with_datetimes(self):
intname = np.dtype(np.int_).name
floatname = np.dtype(np.float_).name
datetime64name = np.dtype('M8[ns]').name
objectname = np.dtype(np.object_).name
# single item
df = DataFrame({'A': 1, 'B': 'foo', 'C': 'bar',
'D': Timestamp("20010101"),
'E': datetime(2001, 1, 2, 0, 0)},
index=np.arange(10))
result = df.get_dtype_counts()
expected = Series({'int64': 1, datetime64name: 2, objectname: 2})
result.sort_index()
expected.sort_index()
tm.assert_series_equal(result, expected)
# check with ndarray construction ndim==0 (e.g. we are passing a ndim 0
# ndarray with a dtype specified)
df = DataFrame({'a': 1., 'b': 2, 'c': 'foo',
floatname: np.array(1., dtype=floatname),
intname: np.array(1, dtype=intname)},
index=np.arange(10))
result = df.get_dtype_counts()
expected = {objectname: 1}
if intname == 'int64':
expected['int64'] = 2
else:
expected['int64'] = 1
expected[intname] = 1
if floatname == 'float64':
expected['float64'] = 2
else:
expected['float64'] = 1
expected[floatname] = 1
result = result.sort_index()
expected = Series(expected).sort_index()
tm.assert_series_equal(result, expected)
# check with ndarray construction ndim>0
df = DataFrame({'a': 1., 'b': 2, 'c': 'foo',
floatname: np.array([1.] * 10, dtype=floatname),
intname: np.array([1] * 10, dtype=intname)},
index=np.arange(10))
result = df.get_dtype_counts()
result = result.sort_index()
tm.assert_series_equal(result, expected)
# GH 2809
ind = date_range(start="2000-01-01", freq="D", periods=10)
datetimes = [ts.to_pydatetime() for ts in ind]
datetime_s = Series(datetimes)
assert datetime_s.dtype == 'M8[ns]'
df = DataFrame({'datetime_s': datetime_s})
result = df.get_dtype_counts()
expected = Series({datetime64name: 1})
result = result.sort_index()
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
# GH 2810
ind = date_range(start="2000-01-01", freq="D", periods=10)
datetimes = [ts.to_pydatetime() for ts in ind]
dates = [ts.date() for ts in ind]
df = DataFrame({'datetimes': datetimes, 'dates': dates})
result = df.get_dtype_counts()
expected = Series({datetime64name: 1, objectname: 1})
result = result.sort_index()
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
# GH 7594
# don't coerce tz-aware
import pytz
tz = pytz.timezone('US/Eastern')
dt = tz.localize(datetime(2012, 1, 1))
df = DataFrame({'End Date': dt}, index=[0])
assert df.iat[0, 0] == dt
tm.assert_series_equal(df.dtypes, Series(
{'End Date': 'datetime64[ns, US/Eastern]'}))
df = DataFrame([{'End Date': dt}])
assert df.iat[0, 0] == dt
tm.assert_series_equal(df.dtypes, Series(
{'End Date': 'datetime64[ns, US/Eastern]'}))
# tz-aware (UTC and other tz's)
# GH 8411
dr = date_range('20130101', periods=3)
df = DataFrame({'value': dr})
assert df.iat[0, 0].tz is None
dr = date_range('20130101', periods=3, tz='UTC')
df = DataFrame({'value': dr})
assert str(df.iat[0, 0].tz) == 'UTC'
dr = date_range('20130101', periods=3, tz='US/Eastern')
df = DataFrame({'value': dr})
assert str(df.iat[0, 0].tz) == 'US/Eastern'
# GH 7822
# preserver an index with a tz on dict construction
i = date_range('1/1/2011', periods=5, freq='10s', tz='US/Eastern')
expected = DataFrame(
{'a': i.to_series(keep_tz=True).reset_index(drop=True)})
df = DataFrame()
df['a'] = i
tm.assert_frame_equal(df, expected)
df = DataFrame({'a': i})
tm.assert_frame_equal(df, expected)
# multiples
i_no_tz = date_range('1/1/2011', periods=5, freq='10s')
df = DataFrame({'a': i, 'b': i_no_tz})
expected = DataFrame({'a': i.to_series(keep_tz=True)
.reset_index(drop=True), 'b': i_no_tz})
tm.assert_frame_equal(df, expected)
def test_constructor_datetimes_with_nulls(self):
# gh-15869
for arr in [np.array([None, None, None, None,
datetime.now(), None]),
np.array([None, None, datetime.now(), None])]:
result = DataFrame(arr).get_dtype_counts()
expected = Series({'datetime64[ns]': 1})
tm.assert_series_equal(result, expected)
def test_constructor_for_list_with_dtypes(self):
# TODO(wesm): unused
intname = np.dtype(np.int_).name # noqa
floatname = np.dtype(np.float_).name # noqa
datetime64name = np.dtype('M8[ns]').name
objectname = np.dtype(np.object_).name
# test list of lists/ndarrays
df = DataFrame([np.arange(5) for x in range(5)])
result = df.get_dtype_counts()
expected = Series({'int64': 5})
df = DataFrame([np.array(np.arange(5), dtype='int32')
for x in range(5)])
result = df.get_dtype_counts()
expected = Series({'int32': 5})
# overflow issue? (we always expecte int64 upcasting here)
df = DataFrame({'a': [2 ** 31, 2 ** 31 + 1]})
result = df.get_dtype_counts()
expected = Series({'int64': 1})
tm.assert_series_equal(result, expected)
# GH #2751 (construction with no index specified), make sure we cast to
# platform values
df = DataFrame([1, 2])
result = df.get_dtype_counts()
expected = Series({'int64': 1})
tm.assert_series_equal(result, expected)
df = DataFrame([1., 2.])
result = df.get_dtype_counts()
expected = Series({'float64': 1})
tm.assert_series_equal(result, expected)
df = DataFrame({'a': [1, 2]})
result = df.get_dtype_counts()
expected = Series({'int64': 1})
tm.assert_series_equal(result, expected)
df = DataFrame({'a': [1., 2.]})
result = df.get_dtype_counts()
expected = Series({'float64': 1})
tm.assert_series_equal(result, expected)
df = DataFrame({'a': 1}, index=lrange(3))
result = df.get_dtype_counts()
expected = Series({'int64': 1})
tm.assert_series_equal(result, expected)
df = DataFrame({'a': 1.}, index=lrange(3))
result = df.get_dtype_counts()
expected = Series({'float64': 1})
tm.assert_series_equal(result, expected)
# with object list
df = DataFrame({'a': [1, 2, 4, 7], 'b': [1.2, 2.3, 5.1, 6.3],
'c': list('abcd'),
'd': [datetime(2000, 1, 1) for i in range(4)],
'e': [1., 2, 4., 7]})
result = df.get_dtype_counts()
expected = Series(
{'int64': 1, 'float64': 2, datetime64name: 1, objectname: 1})
result = result.sort_index()
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
def test_constructor_frame_copy(self):
cop = DataFrame(self.frame, copy=True)
cop['A'] = 5
assert (cop['A'] == 5).all()
assert not (self.frame['A'] == 5).all()
def test_constructor_ndarray_copy(self):
df = DataFrame(self.frame.values)
self.frame.values[5] = 5
assert (df.values[5] == 5).all()
df = DataFrame(self.frame.values, copy=True)
self.frame.values[6] = 6
assert not (df.values[6] == 6).all()
def test_constructor_series_copy(self):
series = self.frame._series
df = DataFrame({'A': series['A']})
df['A'][:] = 5
assert not (series['A'] == 5).all()
def test_constructor_with_nas(self):
# GH 5016
# na's in indices
def check(df):
for i in range(len(df.columns)):
df.iloc[:, i]
indexer = np.arange(len(df.columns))[isna(df.columns)]
# No NaN found -> error
if len(indexer) == 0:
def f():
df.loc[:, np.nan]
pytest.raises(TypeError, f)
# single nan should result in Series
elif len(indexer) == 1:
tm.assert_series_equal(df.iloc[:, indexer[0]],
df.loc[:, np.nan])
# multiple nans should result in DataFrame
else:
tm.assert_frame_equal(df.iloc[:, indexer],
df.loc[:, np.nan])
df = DataFrame([[1, 2, 3], [4, 5, 6]], index=[1, np.nan])
check(df)
df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=[1.1, 2.2, np.nan])
check(df)
df = DataFrame([[0, 1, 2, 3], [4, 5, 6, 7]],
columns=[np.nan, 1.1, 2.2, np.nan])
check(df)
df = DataFrame([[0.0, 1, 2, 3.0], [4, 5, 6, 7]],
columns=[np.nan, 1.1, 2.2, np.nan])
check(df)
# GH 21428 (non-unique columns)
df = DataFrame([[0.0, 1, 2, 3.0], [4, 5, 6, 7]],
columns=[np.nan, 1, 2, 2])
check(df)
def test_constructor_lists_to_object_dtype(self):
# from #1074
d = DataFrame({'a': [np.nan, False]})
assert d['a'].dtype == np.object_
assert not d['a'][1]
def test_constructor_categorical(self):
# GH8626
# dict creation
df = DataFrame({'A': list('abc')}, dtype='category')
expected = Series(list('abc'), dtype='category', name='A')
tm.assert_series_equal(df['A'], expected)
# to_frame
s = Series(list('abc'), dtype='category')
result = s.to_frame()
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(result[0], expected)
result = s.to_frame(name='foo')
expected = Series(list('abc'), dtype='category', name='foo')
tm.assert_series_equal(result['foo'], expected)
# list-like creation
df = DataFrame(list('abc'), dtype='category')
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(df[0], expected)
# ndim != 1
df = DataFrame([Categorical(list('abc'))])
expected = DataFrame({0: Series(list('abc'), dtype='category')})
tm.assert_frame_equal(df, expected)
df = DataFrame([Categorical(list('abc')), Categorical(list('abd'))])
expected = DataFrame({0: Series(list('abc'), dtype='category'),
1: Series(list('abd'), dtype='category')},
columns=[0, 1])
tm.assert_frame_equal(df, expected)
# mixed
df = DataFrame([Categorical(list('abc')), list('def')])
expected = DataFrame({0: Series(list('abc'), dtype='category'),
1: list('def')}, columns=[0, 1])
tm.assert_frame_equal(df, expected)
# invalid (shape)
pytest.raises(ValueError,
lambda: DataFrame([Categorical(list('abc')),
Categorical(list('abdefg'))]))
# ndim > 1
pytest.raises(NotImplementedError,
lambda: Categorical(np.array([list('abcd')])))
def test_constructor_categorical_series(self):
items = [1, 2, 3, 1]
exp = Series(items).astype('category')
res = Series(items, dtype='category')
tm.assert_series_equal(res, exp)
items = ["a", "b", "c", "a"]
exp = Series(items).astype('category')
res = Series(items, dtype='category')
tm.assert_series_equal(res, exp)
# insert into frame with different index
# GH 8076
index = date_range('20000101', periods=3)
expected = Series(Categorical(values=[np.nan, np.nan, np.nan],
categories=['a', 'b', 'c']))
expected.index = index
expected = DataFrame({'x': expected})
df = DataFrame(
{'x': Series(['a', 'b', 'c'], dtype='category')}, index=index)
tm.assert_frame_equal(df, expected)
def test_from_records_to_records(self):
# from numpy documentation
arr = np.zeros((2,), dtype=('i4,f4,a10'))
arr[:] = [(1, 2., 'Hello'), (2, 3., "World")]
# TODO(wesm): unused
frame = DataFrame.from_records(arr) # noqa
index = pd.Index(np.arange(len(arr))[::-1])
indexed_frame = DataFrame.from_records(arr, index=index)
tm.assert_index_equal(indexed_frame.index, index)
# without names, it should go to last ditch
arr2 = np.zeros((2, 3))
tm.assert_frame_equal(DataFrame.from_records(arr2), DataFrame(arr2))
# wrong length
msg = r'Shape of passed values is \(3, 2\), indices imply \(3, 1\)'
with pytest.raises(ValueError, match=msg):
DataFrame.from_records(arr, index=index[:-1])
indexed_frame = DataFrame.from_records(arr, index='f1')
# what to do?
records = indexed_frame.to_records()
assert len(records.dtype.names) == 3
records = indexed_frame.to_records(index=False)
assert len(records.dtype.names) == 2
assert 'index' not in records.dtype.names
def test_from_records_nones(self):
tuples = [(1, 2, None, 3),
(1, 2, None, 3),
(None, 2, 5, 3)]
df = DataFrame.from_records(tuples, columns=['a', 'b', 'c', 'd'])
assert np.isnan(df['c'][0])
def test_from_records_iterator(self):
arr = np.array([(1.0, 1.0, 2, 2), (3.0, 3.0, 4, 4), (5., 5., 6, 6),
(7., 7., 8, 8)],
dtype=[('x', np.float64), ('u', np.float32),
('y', np.int64), ('z', np.int32)])
df = DataFrame.from_records(iter(arr), nrows=2)
xp = DataFrame({'x': np.array([1.0, 3.0], dtype=np.float64),
'u': np.array([1.0, 3.0], dtype=np.float32),
'y': np.array([2, 4], dtype=np.int64),
'z': np.array([2, 4], dtype=np.int32)})
tm.assert_frame_equal(df.reindex_like(xp), xp)
# no dtypes specified here, so just compare with the default
arr = [(1.0, 2), (3.0, 4), (5., 6), (7., 8)]
df = DataFrame.from_records(iter(arr), columns=['x', 'y'],
nrows=2)
tm.assert_frame_equal(df, xp.reindex(columns=['x', 'y']),
check_dtype=False)
def test_from_records_tuples_generator(self):
def tuple_generator(length):
for i in range(length):
letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
yield (i, letters[i % len(letters)], i / length)
columns_names = ['Integer', 'String', 'Float']
columns = [[i[j] for i in tuple_generator(
10)] for j in range(len(columns_names))]
data = {'Integer': columns[0],
'String': columns[1], 'Float': columns[2]}
expected = DataFrame(data, columns=columns_names)
generator = tuple_generator(10)
result = DataFrame.from_records(generator, columns=columns_names)
tm.assert_frame_equal(result, expected)
def test_from_records_lists_generator(self):
def list_generator(length):
for i in range(length):
letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
yield [i, letters[i % len(letters)], i / length]
columns_names = ['Integer', 'String', 'Float']
columns = [[i[j] for i in list_generator(
10)] for j in range(len(columns_names))]
data = {'Integer': columns[0],
'String': columns[1], 'Float': columns[2]}
expected = DataFrame(data, columns=columns_names)
generator = list_generator(10)
result = DataFrame.from_records(generator, columns=columns_names)
tm.assert_frame_equal(result, expected)
def test_from_records_columns_not_modified(self):
tuples = [(1, 2, 3),
(1, 2, 3),
(2, 5, 3)]
columns = ['a', 'b', 'c']
original_columns = list(columns)
df = DataFrame.from_records(tuples, columns=columns, index='a') # noqa
assert columns == original_columns
def test_from_records_decimal(self):
from decimal import Decimal
tuples = [(Decimal('1.5'),), (Decimal('2.5'),), (None,)]
df = DataFrame.from_records(tuples, columns=['a'])
assert df['a'].dtype == object
df = DataFrame.from_records(tuples, columns=['a'], coerce_float=True)
assert df['a'].dtype == np.float64
assert np.isnan(df['a'].values[-1])
def test_from_records_duplicates(self):
result = DataFrame.from_records([(1, 2, 3), (4, 5, 6)],
columns=['a', 'b', 'a'])
expected = DataFrame([(1, 2, 3), (4, 5, 6)],
columns=['a', 'b', 'a'])
tm.assert_frame_equal(result, expected)
def test_from_records_set_index_name(self):
def create_dict(order_id):
return {'order_id': order_id, 'quantity': np.random.randint(1, 10),
'price': np.random.randint(1, 10)}
documents = [create_dict(i) for i in range(10)]
# demo missing data
documents.append({'order_id': 10, 'quantity': 5})
result = DataFrame.from_records(documents, index='order_id')
assert result.index.name == 'order_id'
# MultiIndex
result = DataFrame.from_records(documents,
index=['order_id', 'quantity'])
assert result.index.names == ('order_id', 'quantity')
def test_from_records_misc_brokenness(self):
# #2179
data = {1: ['foo'], 2: ['bar']}
result = DataFrame.from_records(data, columns=['a', 'b'])
exp = DataFrame(data, columns=['a', 'b'])
tm.assert_frame_equal(result, exp)
# overlap in index/index_names
data = {'a': [1, 2, 3], 'b': [4, 5, 6]}
result = DataFrame.from_records(data, index=['a', 'b', 'c'])
exp = DataFrame(data, index=['a', 'b', 'c'])
tm.assert_frame_equal(result, exp)
# GH 2623
rows = []
rows.append([datetime(2010, 1, 1), 1])
rows.append([datetime(2010, 1, 2), 'hi']) # test col upconverts to obj
df2_obj = DataFrame.from_records(rows, columns=['date', 'test'])
results = df2_obj.get_dtype_counts()
expected = Series({'datetime64[ns]': 1, 'object': 1})
rows = []
rows.append([datetime(2010, 1, 1), 1])
rows.append([datetime(2010, 1, 2), 1])
df2_obj = DataFrame.from_records(rows, columns=['date', 'test'])
results = df2_obj.get_dtype_counts().sort_index()
expected = Series({'datetime64[ns]': 1, 'int64': 1})
tm.assert_series_equal(results, expected)
def test_from_records_empty(self):
# 3562
result = DataFrame.from_records([], columns=['a', 'b', 'c'])
expected = DataFrame(columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
result = DataFrame.from_records([], columns=['a', 'b', 'b'])
expected = DataFrame(columns=['a', 'b', 'b'])
tm.assert_frame_equal(result, expected)
def test_from_records_empty_with_nonempty_fields_gh3682(self):
a = np.array([(1, 2)], dtype=[('id', np.int64), ('value', np.int64)])
df = DataFrame.from_records(a, index='id')
tm.assert_index_equal(df.index, Index([1], name='id'))
assert df.index.name == 'id'
tm.assert_index_equal(df.columns, Index(['value']))
b = np.array([], dtype=[('id', np.int64), ('value', np.int64)])
df = DataFrame.from_records(b, index='id')
tm.assert_index_equal(df.index, Index([], name='id'))
assert df.index.name == 'id'
def test_from_records_with_datetimes(self):
# this may fail on certain platforms because of a numpy issue
# related GH6140
if not is_platform_little_endian():
pytest.skip("known failure of test on non-little endian")
# construction with a null in a recarray
# GH 6140
expected = DataFrame({'EXPIRY': [datetime(2005, 3, 1, 0, 0), None]})
arrdata = [np.array([datetime(2005, 3, 1, 0, 0), None])]
dtypes = [('EXPIRY', '<M8[ns]')]
try:
recarray = np.core.records.fromarrays(arrdata, dtype=dtypes)
except (ValueError):
pytest.skip("known failure of numpy rec array creation")
result = DataFrame.from_records(recarray)
tm.assert_frame_equal(result, expected)
# coercion should work too
arrdata = [np.array([datetime(2005, 3, 1, 0, 0), None])]
dtypes = [('EXPIRY', '<M8[m]')]
recarray = np.core.records.fromarrays(arrdata, dtype=dtypes)
result = DataFrame.from_records(recarray)
tm.assert_frame_equal(result, expected)
def test_from_records_sequencelike(self):
df = DataFrame({'A': np.array(np.random.randn(6), dtype=np.float64),
'A1': np.array(np.random.randn(6), dtype=np.float64),
'B': np.array(np.arange(6), dtype=np.int64),
'C': ['foo'] * 6,
'D': np.array([True, False] * 3, dtype=bool),
'E': np.array(np.random.randn(6), dtype=np.float32),
'E1': np.array(np.random.randn(6), dtype=np.float32),
'F': np.array(np.arange(6), dtype=np.int32)})
# this is actually tricky to create the recordlike arrays and
# have the dtypes be intact
blocks = df._to_dict_of_blocks()
tuples = []
columns = []
dtypes = []
for dtype, b in compat.iteritems(blocks):
columns.extend(b.columns)
dtypes.extend([(c, np.dtype(dtype).descr[0][1])
for c in b.columns])
for i in range(len(df.index)):
tup = []
for _, b in compat.iteritems(blocks):
tup.extend(b.iloc[i].values)
tuples.append(tuple(tup))
recarray = np.array(tuples, dtype=dtypes).view(np.recarray)
recarray2 = df.to_records()
lists = [list(x) for x in tuples]
# tuples (lose the dtype info)
result = (DataFrame.from_records(tuples, columns=columns)
.reindex(columns=df.columns))
# created recarray and with to_records recarray (have dtype info)
result2 = (DataFrame.from_records(recarray, columns=columns)
.reindex(columns=df.columns))
result3 = (DataFrame.from_records(recarray2, columns=columns)
.reindex(columns=df.columns))
# list of tupels (no dtype info)
result4 = (DataFrame.from_records(lists, columns=columns)
.reindex(columns=df.columns))
tm.assert_frame_equal(result, df, check_dtype=False)
tm.assert_frame_equal(result2, df)
tm.assert_frame_equal(result3, df)
tm.assert_frame_equal(result4, df, check_dtype=False)
# tuples is in the order of the columns
result = DataFrame.from_records(tuples)
tm.assert_index_equal(result.columns, pd.Index(lrange(8)))
# test exclude parameter & we are casting the results here (as we don't
# have dtype info to recover)
columns_to_test = [columns.index('C'), columns.index('E1')]
exclude = list(set(range(8)) - set(columns_to_test))
result = DataFrame.from_records(tuples, exclude=exclude)
result.columns = [columns[i] for i in sorted(columns_to_test)]
tm.assert_series_equal(result['C'], df['C'])
tm.assert_series_equal(result['E1'], df['E1'].astype('float64'))
# empty case
result = DataFrame.from_records([], columns=['foo', 'bar', 'baz'])
assert len(result) == 0
tm.assert_index_equal(result.columns,
pd.Index(['foo', 'bar', 'baz']))
result = DataFrame.from_records([])
assert len(result) == 0
assert len(result.columns) == 0
def test_from_records_dictlike(self):
# test the dict methods
df = DataFrame({'A': np.array(np.random.randn(6), dtype=np.float64),
'A1': np.array(np.random.randn(6), dtype=np.float64),
'B': np.array(np.arange(6), dtype=np.int64),
'C': ['foo'] * 6,
'D': np.array([True, False] * 3, dtype=bool),
'E': np.array(np.random.randn(6), dtype=np.float32),
'E1': np.array(np.random.randn(6), dtype=np.float32),
'F': np.array(np.arange(6), dtype=np.int32)})
# columns is in a different order here than the actual items iterated
# from the dict
blocks = df._to_dict_of_blocks()
columns = []
for dtype, b in compat.iteritems(blocks):
columns.extend(b.columns)
asdict = {x: y for x, y in compat.iteritems(df)}
asdict2 = {x: y.values for x, y in compat.iteritems(df)}
# dict of series & dict of ndarrays (have dtype info)
results = []
results.append(DataFrame.from_records(
asdict).reindex(columns=df.columns))
results.append(DataFrame.from_records(asdict, columns=columns)
.reindex(columns=df.columns))
results.append(DataFrame.from_records(asdict2, columns=columns)
.reindex(columns=df.columns))
for r in results:
tm.assert_frame_equal(r, df)
def test_from_records_with_index_data(self):
df = DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
data = np.random.randn(10)
df1 = DataFrame.from_records(df, index=data)
tm.assert_index_equal(df1.index, Index(data))
def test_from_records_bad_index_column(self):
df = DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
# should pass
df1 = DataFrame.from_records(df, index=['C'])
tm.assert_index_equal(df1.index, Index(df.C))
df1 = DataFrame.from_records(df, index='C')
tm.assert_index_equal(df1.index, Index(df.C))
# should fail
pytest.raises(ValueError, DataFrame.from_records, df, index=[2])
pytest.raises(KeyError, DataFrame.from_records, df, index=2)
def test_from_records_non_tuple(self):
class Record(object):
def __init__(self, *args):
self.args = args
def __getitem__(self, i):
return self.args[i]
def __iter__(self):
return iter(self.args)
recs = [Record(1, 2, 3), Record(4, 5, 6), Record(7, 8, 9)]
tups = lmap(tuple, recs)
result = DataFrame.from_records(recs)
expected = DataFrame.from_records(tups)
tm.assert_frame_equal(result, expected)
def test_from_records_len0_with_columns(self):
# #2633
result = DataFrame.from_records([], index='foo',
columns=['foo', 'bar'])
expected = Index(['bar'])
assert len(result) == 0
assert result.index.name == 'foo'
tm.assert_index_equal(result.columns, expected)
def test_to_frame_with_falsey_names(self):
# GH 16114
result = Series(name=0).to_frame().dtypes
expected = Series({0: np.float64})
tm.assert_series_equal(result, expected)
result = DataFrame(Series(name=0)).dtypes
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('dtype', [None, 'uint8', 'category'])
def test_constructor_range_dtype(self, dtype):
# GH 16804
expected = DataFrame({'A': [0, 1, 2, 3, 4]}, dtype=dtype or 'int64')
result = DataFrame({'A': range(5)}, dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_frame_from_list_subclass(self):
# GH21226
class List(list):
pass
expected = DataFrame([[1, 2, 3], [4, 5, 6]])
result = DataFrame(List([List([1, 2, 3]), List([4, 5, 6])]))
tm.assert_frame_equal(result, expected)
class TestDataFrameConstructorWithDatetimeTZ(TestData):
def test_from_dict(self):
# 8260
# support datetime64 with tz
idx = Index(date_range('20130101', periods=3, tz='US/Eastern'),
name='foo')
dr = date_range('20130110', periods=3)
# construction
df = DataFrame({'A': idx, 'B': dr})
assert df['A'].dtype, 'M8[ns, US/Eastern'
assert df['A'].name == 'A'
tm.assert_series_equal(df['A'], Series(idx, name='A'))
tm.assert_series_equal(df['B'], Series(dr, name='B'))
def test_from_index(self):
# from index
idx2 = date_range('20130101', periods=3, tz='US/Eastern', name='foo')
df2 = DataFrame(idx2)
tm.assert_series_equal(df2['foo'], Series(idx2, name='foo'))
df2 = DataFrame(Series(idx2))
tm.assert_series_equal(df2['foo'], Series(idx2, name='foo'))
idx2 = date_range('20130101', periods=3, tz='US/Eastern')
df2 = DataFrame(idx2)
tm.assert_series_equal(df2[0], Series(idx2, name=0))
df2 = DataFrame(Series(idx2))
tm.assert_series_equal(df2[0], Series(idx2, name=0))
def test_frame_dict_constructor_datetime64_1680(self):
dr = date_range('1/1/2012', periods=10)
s = Series(dr, index=dr)
# it works!
DataFrame({'a': 'foo', 'b': s}, index=dr)
DataFrame({'a': 'foo', 'b': s.values}, index=dr)
def test_frame_datetime64_mixed_index_ctor_1681(self):
dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI')
ts = Series(dr)
# it works!
d = DataFrame({'A': 'foo', 'B': ts}, index=dr)
assert d['B'].isna().all()
def test_frame_timeseries_to_records(self):
index = date_range('1/1/2000', periods=10)
df = DataFrame(np.random.randn(10, 3), index=index,
columns=['a', 'b', 'c'])
result = df.to_records()
result['index'].dtype == 'M8[ns]'
result = df.to_records(index=False)
def test_frame_timeseries_column(self):
# GH19157
dr = date_range(start='20130101T10:00:00', periods=3, freq='T',
tz='US/Eastern')
result = DataFrame(dr, columns=['timestamps'])
expected = DataFrame({'timestamps': [
| Timestamp('20130101T10:00:00', tz='US/Eastern') | pandas.Timestamp |
from collections import defaultdict
from utils import plot_utils
def mean(*lst):
columns = lst.key
return sum(lst) / len(lst)
# if __name__ == '__main__':
#
# ls_dct=[{'Stars':2, 'Cast':0.11},
# {'Stars':3, 'Cast':0.01},
# {'Stars':5, 'Cast':0.01}
# ]
#
# # result =map(mean, **ls_dct)
# # print(list(result))
# dct_sum = defaultdict(float)
#
# import numpy as np
# test = np.array(ls_dct).mean()
#
# for dict in ls_dct:
# for key, val in dict.items():
# dct_sum[key] += val
# np_mean_vals = np.array(list(dct_sum.values()))/len(ls_dct)
# dct_mean = list(zip(dct_sum.keys(), np_mean_vals))
# print(dct_mean)
#
import random
# import random
def create_synthetic_data():
no_samples =50
genres = ['Crime', 'Mystery', 'Thriller', 'Action', 'Drama', 'Romance','Comedy', 'War','Adventure', 'Family']
year = ['1980', '1990', '2000', '2010', '2020']
stars = ['<NAME>', '<NAME>', '<NAME>','<NAME>', '<NAME>', '<NAME>']
rating = ['7', '8', '9', '10']
dct_base_data ={'genres': genres, 'year': year, 'stars': stars, 'rating': rating}
ls_movies = []
#genre-users
ls_attributes = ['genres', 'year', 'stars', 'rating']
n_users = 300
n_movies = len(ls_attributes * no_samples)
np_user_item = np.zeros((n_users,n_movies))
for attribute in ls_attributes:
for i in range(no_samples):
movie = {}
movie[attribute] = [dct_base_data[attribute][0]]
for other_attribute in ls_attributes:
if(other_attribute == attribute):
continue
if(other_attribute == 'rating' or other_attribute == 'year'):
movie[other_attribute] = random.choices(dct_base_data[other_attribute], k=1)
else:
movie[other_attribute] = random.choices(dct_base_data[other_attribute], k=2)
ls_movies.append(movie)
df_synthentic_data = | pd.DataFrame(columns=['genres', 'year', 'stars', 'rating'], data=ls_movies) | pandas.DataFrame |
import collections
import os
import geopandas as gpd
import numpy as np
import pandas as pd
import requests
from datetime import datetime, timedelta
from typing import Tuple, Dict, Union
import pytz
from pandas.core.dtypes.common import is_string_dtype, is_numeric_dtype
from hydrodataset.data.data_base import DataSourceBase
from hydrodataset.data.stat import cal_fdc
from hydrodataset.utils import hydro_utils
from hydrodataset.utils.hydro_utils import (
is_any_elem_in_a_lst,
unzip_nested_zip,
hydro_logger,
download_one_zip,
download_small_file,
)
class Gages(DataSourceBase):
def __init__(self, data_path, download=False):
super().__init__(data_path)
self.data_source_description = self.set_data_source_describe()
if download:
self.download_data_source()
self.gages_sites = self.read_site_info()
def get_name(self):
return "GAGES"
def get_constant_cols(self) -> np.array:
"""all readable attrs in GAGES-II"""
dir_gage_attr = self.data_source_description["GAGES_ATTR_DIR"]
var_desc_file = os.path.join(dir_gage_attr, "variable_descriptions.txt")
var_desc = pd.read_csv(var_desc_file)
return var_desc["VARIABLE_NAME"].values
def get_relevant_cols(self):
return np.array(["dayl", "prcp", "srad", "swe", "tmax", "tmin", "vp"])
def get_target_cols(self):
return np.array(["usgsFlow"])
def get_other_cols(self) -> dict:
return {
"FDC": {"time_range": ["1980-01-01", "2000-01-01"], "quantile_num": 100}
}
def set_data_source_describe(self):
gages_db = self.data_source_dir
# region shapefiles
gage_region_dir = os.path.join(
gages_db,
"boundaries_shapefiles_by_aggeco",
"boundaries-shapefiles-by-aggeco",
)
gages_regions = [
"bas_ref_all",
"bas_nonref_CntlPlains",
"bas_nonref_EastHghlnds",
"bas_nonref_MxWdShld",
"bas_nonref_NorthEast",
"bas_nonref_SECstPlain",
"bas_nonref_SEPlains",
"bas_nonref_WestMnts",
"bas_nonref_WestPlains",
"bas_nonref_WestXeric",
]
# point shapefile
gagesii_points_file = os.path.join(
gages_db, "gagesII_9322_point_shapefile", "gagesII_9322_sept30_2011.shp"
)
# config of flow data
flow_dir = os.path.join(gages_db, "gages_streamflow", "gages_streamflow")
# forcing
forcing_dir = os.path.join(gages_db, "basin_mean_forcing", "basin_mean_forcing")
forcing_types = ["daymet"]
# attr
attr_dir = os.path.join(
gages_db, "basinchar_and_report_sept_2011", "spreadsheets-in-csv-format"
)
gauge_id_file = os.path.join(attr_dir, "conterm_basinid.txt")
download_url_lst = [
"https://water.usgs.gov/GIS/dsdl/basinchar_and_report_sept_2011.zip",
"https://water.usgs.gov/GIS/dsdl/gagesII_9322_point_shapefile.zip",
"https://water.usgs.gov/GIS/dsdl/boundaries_shapefiles_by_aggeco.zip",
"https://www.sciencebase.gov/catalog/file/get/59692a64e4b0d1f9f05fbd39",
]
usgs_streamflow_url = "https://waterdata.usgs.gov/nwis/dv?cb_00060=on&format=rdb&site_no={}&referred_module=sw&period=&begin_date={}-{}-{}&end_date={}-{}-{}"
# GAGES-II time series data_source dir
gagests_dir = os.path.join(gages_db, "59692a64e4b0d1f9f05f")
population_file = os.path.join(
gagests_dir,
"Dataset8_Population-Housing",
"Dataset8_Population-Housing",
"PopulationHousing.txt",
)
wateruse_file = os.path.join(
gagests_dir,
"Dataset10_WaterUse",
"Dataset10_WaterUse",
"WaterUse_1985-2010.txt",
)
return collections.OrderedDict(
GAGES_DIR=gages_db,
GAGES_FLOW_DIR=flow_dir,
GAGES_FORCING_DIR=forcing_dir,
GAGES_FORCING_TYPE=forcing_types,
GAGES_ATTR_DIR=attr_dir,
GAGES_GAUGE_FILE=gauge_id_file,
GAGES_DOWNLOAD_URL_LST=download_url_lst,
GAGES_REGIONS_SHP_DIR=gage_region_dir,
GAGES_REGION_LIST=gages_regions,
GAGES_POINT_SHP_FILE=gagesii_points_file,
GAGES_POPULATION_FILE=population_file,
GAGES_WATERUSE_FILE=wateruse_file,
USGS_FLOW_URL=usgs_streamflow_url,
)
def read_other_cols(self, object_ids=None, other_cols=None, **kwargs) -> dict:
# TODO: not finish
out_dict = {}
for key, value in other_cols.items():
if key == "FDC":
assert "time_range" in value.keys()
if "quantile_num" in value.keys():
quantile_num = value["quantile_num"]
out = cal_fdc(
self.read_target_cols(
object_ids, value["time_range"], "usgsFlow"
),
quantile_num=quantile_num,
)
else:
out = cal_fdc(
self.read_target_cols(
object_ids, value["time_range"], "usgsFlow"
)
)
else:
raise NotImplementedError("No this item yet!!")
out_dict[key] = out
return out_dict
def read_attr_all(self, gages_ids: Union[list, np.ndarray]):
"""
read all attr data for some sites in GAGES-II
TODO: now it is not same as functions in CAMELS where read_attr_all has no "gages_ids" parameter
Parameters
----------
gages_ids : Union[list, np.ndarray]
gages sites' ids
Returns
-------
ndarray
all attr data for gages_ids
"""
dir_gage_attr = self.data_source_description["GAGES_ATTR_DIR"]
f_dict = dict() # factorize dict
# each key-value pair for atts in a file (list)
var_dict = dict()
# all attrs
var_lst = list()
out_lst = list()
# read all attrs
var_des = pd.read_csv(
os.path.join(dir_gage_attr, "variable_descriptions.txt"), sep=","
)
var_des_map_values = var_des["VARIABLE_TYPE"].tolist()
for i in range(len(var_des)):
var_des_map_values[i] = var_des_map_values[i].lower()
# sort by type
key_lst = list(set(var_des_map_values))
key_lst.sort(key=var_des_map_values.index)
# remove x_region_names
key_lst.remove("x_region_names")
for key in key_lst:
# in "spreadsheets-in-csv-format" directory, the name of "flow_record" file is conterm_flowrec.txt
if key == "flow_record":
key = "flowrec"
data_file = os.path.join(dir_gage_attr, "conterm_" + key + ".txt")
# remove some unused atttrs in bas_classif
if key == "bas_classif":
# https://stackoverflow.com/questions/22216076/unicodedecodeerror-utf8-codec-cant-decode-byte-0xa5-in-position-0-invalid-s
data_temp = pd.read_csv(
data_file,
sep=",",
dtype={"STAID": str},
usecols=range(0, 4),
encoding="unicode_escape",
)
else:
data_temp = pd.read_csv(data_file, sep=",", dtype={"STAID": str})
if key == "flowrec":
# remove final column which is nan
data_temp = data_temp.iloc[:, range(0, data_temp.shape[1] - 1)]
# all attrs in files
var_lst_temp = list(data_temp.columns[1:])
var_dict[key] = var_lst_temp
var_lst.extend(var_lst_temp)
k = 0
n_gage = len(gages_ids)
out_temp = np.full(
[n_gage, len(var_lst_temp)], np.nan
) # 1d:sites,2d: attrs in current data_file
# sites intersection,ind2 is the index of sites in conterm_ files,set them in out_temp
range1 = gages_ids
range2 = data_temp.iloc[:, 0].astype(str).tolist()
assert all(x < y for x, y in zip(range2, range2[1:]))
# Notice the sequence of station ids ! Some id_lst_all are not sorted, so don't use np.intersect1d
ind2 = [range2.index(tmp) for tmp in range1]
for field in var_lst_temp:
if is_string_dtype(data_temp[field]): # str vars -> categorical vars
value, ref = pd.factorize(data_temp.loc[ind2, field], sort=True)
out_temp[:, k] = value
f_dict[field] = ref.tolist()
elif is_numeric_dtype(data_temp[field]):
out_temp[:, k] = data_temp.loc[ind2, field].values
k = k + 1
out_lst.append(out_temp)
out = np.concatenate(out_lst, 1)
return out, var_lst, var_dict, f_dict
def read_constant_cols(
self, object_ids=None, constant_cols: list = None, **kwargs
) -> np.array:
"""
read some attrs of some sites
Parameters
----------
object_ids : [type], optional
sites_ids, by default None
constant_cols : list, optional
attrs' names, by default None
Returns
-------
np.array
attr data for object_ids
"""
# assert all(x < y for x, y in zip(object_ids, object_ids[1:]))
attr_all, var_lst_all, var_dict, f_dict = self.read_attr_all(object_ids)
ind_var = list()
for var in constant_cols:
ind_var.append(var_lst_all.index(var))
out = attr_all[:, ind_var]
return out
def read_attr_origin(self, gages_ids, attr_lst) -> np.ndarray:
"""
this function read the attrs data in GAGES-II but not transform them to int when they are str
Parameters
----------
gages_ids : [type]
[description]
attr_lst : [type]
[description]
Returns
-------
np.ndarray
the first dim is types of attrs, and the second one is sites
"""
dir_gage_attr = self.data_source_description["GAGES_ATTR_DIR"]
var_des = pd.read_csv(
os.path.join(dir_gage_attr, "variable_descriptions.txt"), sep=","
)
var_des_map_values = var_des["VARIABLE_TYPE"].tolist()
for i in range(len(var_des)):
var_des_map_values[i] = var_des_map_values[i].lower()
key_lst = list(set(var_des_map_values))
key_lst.sort(key=var_des_map_values.index)
key_lst.remove("x_region_names")
out_lst = []
for i in range(len(attr_lst)):
out_lst.append([])
range1 = gages_ids
gage_id_file = self.data_source_description["GAGES_GAUGE_FILE"]
data_all = pd.read_csv(gage_id_file, sep=",", dtype={0: str})
range2 = data_all["STAID"].values.tolist()
assert all(x < y for x, y in zip(range2, range2[1:]))
# Notice the sequence of station ids ! Some id_lst_all are not sorted, so don't use np.intersect1d
ind2 = [range2.index(tmp) for tmp in range1]
for key in key_lst:
# in "spreadsheets-in-csv-format" directory, the name of "flow_record" file is conterm_flowrec.txt
if key == "flow_record":
key = "flowrec"
data_file = os.path.join(dir_gage_attr, "conterm_" + key + ".txt")
if key == "bas_classif":
data_temp = pd.read_csv(
data_file,
sep=",",
dtype={
"STAID": str,
"WR_REPORT_REMARKS": str,
"ADR_CITATION": str,
"SCREENING_COMMENTS": str,
},
engine="python",
encoding="unicode_escape",
)
elif key == "bound_qa":
# "DRAIN_SQKM" already exists
data_temp = pd.read_csv(
data_file,
sep=",",
dtype={"STAID": str},
usecols=[
"STAID",
"BASIN_BOUNDARY_CONFIDENCE",
"NWIS_DRAIN_SQKM",
"PCT_DIFF_NWIS",
"HUC10_CHECK",
],
)
else:
data_temp = pd.read_csv(data_file, sep=",", dtype={"STAID": str})
if key == "flowrec":
data_temp = data_temp.iloc[:, range(0, data_temp.shape[1] - 1)]
var_lst_temp = list(data_temp.columns[1:])
do_exist, idx_lst = is_any_elem_in_a_lst(
attr_lst, var_lst_temp, return_index=True
)
if do_exist:
for idx in idx_lst:
idx_in_var = (
var_lst_temp.index(attr_lst[idx]) + 1
) # +1 because the first col of data_temp is ID
out_lst[idx] = data_temp.iloc[ind2, idx_in_var].values
else:
continue
out = np.array(out_lst)
return out
def read_forcing_gage(self, usgs_id, var_lst, t_range_list, forcing_type="daymet"):
gage_dict = self.gages_sites
ind = np.argwhere(gage_dict["STAID"] == usgs_id)[0][0]
huc = gage_dict["HUC02"][ind]
data_folder = os.path.join(
self.data_source_description["GAGES_FORCING_DIR"], forcing_type
)
# original daymet file not for leap year, there is no data in 12.31 in leap year,
# so files which have been interpolated for nan value have name "_leap"
data_file = os.path.join(
data_folder, huc, "%s_lump_%s_forcing_leap.txt" % (usgs_id, forcing_type)
)
print("reading", forcing_type, "forcing data ", usgs_id)
data_temp = pd.read_csv(data_file, sep=r"\s+", header=None, skiprows=1)
df_date = data_temp[[0, 1, 2]]
df_date.columns = ["year", "month", "day"]
date = pd.to_datetime(df_date).values.astype("datetime64[D]")
nf = len(var_lst)
assert all(x < y for x, y in zip(date, date[1:]))
[c, ind1, ind2] = np.intersect1d(date, t_range_list, return_indices=True)
assert date[0] <= t_range_list[0] and date[-1] >= t_range_list[-1]
nt = t_range_list.size
out = np.empty([nt, nf])
var_lst_in_file = [
"dayl(s)",
"prcp(mm/day)",
"srad(W/m2)",
"swe(mm)",
"tmax(C)",
"tmin(C)",
"vp(Pa)",
]
for k in range(nf):
# assume all files are of same columns. May check later.
ind = [
i
for i in range(len(var_lst_in_file))
if var_lst[k] in var_lst_in_file[i]
][0]
out[ind2, k] = data_temp[ind + 4].values[ind1]
return out
def read_relevant_cols(
self, object_ids=None, t_range_list=None, var_lst=None, **kwargs
) -> np.array:
assert all(x < y for x, y in zip(object_ids, object_ids[1:]))
assert all(x < y for x, y in zip(t_range_list, t_range_list[1:]))
print("reading formatted data:")
t_lst = hydro_utils.t_range_days(t_range_list)
nt = t_lst.shape[0]
x = np.empty([len(object_ids), nt, len(var_lst)])
for k in range(len(object_ids)):
data = self.read_forcing_gage(
object_ids[k],
var_lst,
t_lst,
forcing_type=self.data_source_description["GAGES_FORCING_TYPE"][0],
)
x[k, :, :] = data
return x
def read_target_cols(
self, usgs_id_lst=None, t_range_list=None, target_cols=None, **kwargs
) -> np.array:
"""
Read USGS daily average streamflow data according to id and time
Parameters
----------
usgs_id_lst
site information
t_range_list
must be time range for downloaded data
target_cols
kwargs
optional
Returns
-------
np.array
streamflow data, 1d-axis: gages, 2d-axis: day, 3d-axis: streamflow
"""
t_lst = hydro_utils.t_range_days(t_range_list)
nt = t_lst.shape[0]
y = np.empty([len(usgs_id_lst), nt, 1])
for k in range(len(usgs_id_lst)):
data_obs = self.read_usgs_gage(usgs_id_lst[k], t_lst)
y[k, :, 0] = data_obs
return y
def read_usgs_gage(self, usgs_id, t_lst):
"""
read data for one gage
Parameters
----------
usgs_id : [type]
[description]
t_lst : [type]
[description]
Returns
-------
[type]
[description]
"""
print(usgs_id)
dir_gage_flow = self.data_source_description["GAGES_FLOW_DIR"]
gage_id_df = pd.DataFrame(self.gages_sites)
huc = gage_id_df[gage_id_df["STAID"] == usgs_id]["HUC02"].values[0]
usgs_file = os.path.join(dir_gage_flow, str(huc), usgs_id + ".txt")
# ignore the comment lines and the first non-value row
df_flow = pd.read_csv(
usgs_file, comment="#", sep="\t", dtype={"site_no": str}
).iloc[1:, :]
# change the original column names
columns_names = df_flow.columns.tolist()
columns_flow = []
columns_flow_cd = []
for column_name in columns_names:
# 00060 means "discharge",00003 represents "mean value"
# one special case: 126801 00060 00003 Discharge, cubic feet per second (Mean) and
# 126805 00060 00003 Discharge, cubic feet per second (Mean), PUBLISHED
# Both are mean values, here I will choose the column with more records
if "_00060_00003" in column_name and "_00060_00003_cd" not in column_name:
columns_flow.append(column_name)
for column_name in columns_names:
if "_00060_00003_cd" in column_name:
columns_flow_cd.append(column_name)
if len(columns_flow) > 1:
print("there are some columns for flow, choose one\n")
df_date_temp = df_flow["datetime"]
date_temp = pd.to_datetime(df_date_temp).values.astype("datetime64[D]")
c_temp, ind1_temp, ind2_temp = np.intersect1d(
date_temp, t_lst, return_indices=True
)
num_nan_lst = []
for i in range(len(columns_flow)):
out_temp = np.full([len(t_lst)], np.nan)
df_flow.loc[df_flow[columns_flow[i]] == "Ice", columns_flow[i]] = np.nan
df_flow.loc[df_flow[columns_flow[i]] == "Ssn", columns_flow[i]] = np.nan
df_flow.loc[df_flow[columns_flow[i]] == "Tst", columns_flow[i]] = np.nan
df_flow.loc[df_flow[columns_flow[i]] == "Eqp", columns_flow[i]] = np.nan
df_flow.loc[df_flow[columns_flow[i]] == "Rat", columns_flow[i]] = np.nan
df_flow.loc[df_flow[columns_flow[i]] == "Dis", columns_flow[i]] = np.nan
df_flow.loc[df_flow[columns_flow[i]] == "Bkw", columns_flow[i]] = np.nan
df_flow.loc[df_flow[columns_flow[i]] == "***", columns_flow[i]] = np.nan
df_flow.loc[df_flow[columns_flow[i]] == "Mnt", columns_flow[i]] = np.nan
df_flow.loc[df_flow[columns_flow[i]] == "ZFL", columns_flow[i]] = np.nan
df_flow_temp = df_flow[columns_flow[i]].copy()
out_temp[ind2_temp] = df_flow_temp[ind1_temp]
num_nan = np.isnan(out_temp).sum()
num_nan_lst.append(num_nan)
num_nan_np = np.array(num_nan_lst)
index_flow_num = np.argmin(num_nan_np)
df_flow.rename(columns={columns_flow[index_flow_num]: "flow"}, inplace=True)
df_flow.rename(
columns={columns_flow_cd[index_flow_num]: "mode"}, inplace=True
)
else:
for column_name in columns_names:
if (
"_00060_00003" in column_name
and "_00060_00003_cd" not in column_name
):
df_flow.rename(columns={column_name: "flow"}, inplace=True)
break
for column_name in columns_names:
if "_00060_00003_cd" in column_name:
df_flow.rename(columns={column_name: "mode"}, inplace=True)
break
columns = ["agency_cd", "site_no", "datetime", "flow", "mode"]
if df_flow.empty:
df_flow = pd.DataFrame(columns=columns)
if not ("flow" in df_flow.columns.intersection(columns)):
data_temp = df_flow.loc[:, df_flow.columns.intersection(columns)]
# add nan column to data_temp
data_temp = pd.concat([data_temp, pd.DataFrame(columns=["flow", "mode"])])
else:
data_temp = df_flow.loc[:, columns]
# fix flow which is not numeric data
data_temp.loc[data_temp["flow"] == "Ice", "flow"] = np.nan
data_temp.loc[data_temp["flow"] == "Ssn", "flow"] = np.nan
data_temp.loc[data_temp["flow"] == "Tst", "flow"] = np.nan
data_temp.loc[data_temp["flow"] == "Eqp", "flow"] = np.nan
data_temp.loc[data_temp["flow"] == "Rat", "flow"] = np.nan
data_temp.loc[data_temp["flow"] == "Dis", "flow"] = np.nan
data_temp.loc[data_temp["flow"] == "Bkw", "flow"] = np.nan
data_temp.loc[data_temp["flow"] == "***", "flow"] = np.nan
data_temp.loc[data_temp["flow"] == "Mnt", "flow"] = np.nan
data_temp.loc[data_temp["flow"] == "ZFL", "flow"] = np.nan
# set negative value -- nan
obs = data_temp["flow"].astype("float").values
obs[obs < 0] = np.nan
# time range intersection. set points without data nan values
nt = len(t_lst)
out = np.full([nt], np.nan)
# date in df is str,so transform them to datetime
df_date = data_temp["datetime"]
date = pd.to_datetime(df_date).values.astype("datetime64[D]")
c, ind1, ind2 = np.intersect1d(date, t_lst, return_indices=True)
out[ind2] = obs[ind1]
return out
def read_object_ids(self, object_params=None) -> np.array:
return self.gages_sites["STAID"]
def read_basin_area(self, object_ids) -> np.array:
return self.read_constant_cols(object_ids, ["DRAIN_SQKM"], is_return_dict=False)
def read_mean_prep(self, object_ids) -> np.array:
mean_prep = self.read_constant_cols(
object_ids, ["PPTAVG_BASIN"], is_return_dict=False
)
mean_prep = mean_prep / 365 * 10
return mean_prep
def download_data_source(self):
print("Please download data manually!")
if not os.path.isdir(self.data_source_description["GAGES_DIR"]):
os.makedirs(self.data_source_description["GAGES_DIR"])
zip_files = [
"59692a64e4b0d1f9f05fbd39",
"basin_mean_forcing.zip",
"basinchar_and_report_sept_2011.zip",
"boundaries_shapefiles_by_aggeco.zip",
"gages_streamflow.zip",
"gagesII_9322_point_shapefile.zip",
]
download_zip_files = [
os.path.join(self.data_source_description["GAGES_DIR"], zip_file)
for zip_file in zip_files
]
for download_zip_file in download_zip_files:
if not os.path.isfile(download_zip_file):
raise RuntimeError(
download_zip_file + " not found! Please download the data"
)
unzip_dirs = [
os.path.join(self.data_source_description["GAGES_DIR"], zip_file[:-4])
for zip_file in zip_files
]
for i in range(len(unzip_dirs)):
if not os.path.isdir(unzip_dirs[i]):
print("unzip directory:" + unzip_dirs[i])
unzip_nested_zip(download_zip_files[i], unzip_dirs[i])
else:
print("unzip directory -- " + unzip_dirs[i] + " has existed")
def read_site_info(self):
gage_id_file = self.data_source_description["GAGES_GAUGE_FILE"]
data_all = pd.read_csv(gage_id_file, sep=",", dtype={0: str})
gage_fld_lst = data_all.columns.values
out = dict()
df_id_region = data_all.iloc[:, 0].values
assert all(x < y for x, y in zip(df_id_region, df_id_region[1:]))
for s in gage_fld_lst:
if s is gage_fld_lst[1]:
out[s] = data_all[s].values.tolist()
else:
out[s] = data_all[s].values
return out
def prepare_usgs_data(
data_source_description: Dict, t_download_range: Union[tuple, list]
):
hydro_logger.info("NOT all data_source could be downloaded from website directly!")
# download zip files
[
download_one_zip(attr_url, data_source_description["GAGES_DIR"])
for attr_url in data_source_description["GAGES_DOWNLOAD_URL_LST"]
]
# download streamflow data from USGS website
dir_gage_flow = data_source_description["GAGES_FLOW_DIR"]
streamflow_url = data_source_description["USGS_FLOW_URL"]
if not os.path.isdir(dir_gage_flow):
os.makedirs(dir_gage_flow)
dir_list = os.listdir(dir_gage_flow)
# if no streamflow data for the usgs_id_lst, then download them from the USGS website
data_all = pd.read_csv(
data_source_description["GAGES_GAUGE_FILE"], sep=",", dtype={0: str}
)
usgs_id_lst = data_all.iloc[:, 0].values.tolist()
gage_fld_lst = data_all.columns.values
for ind in range(len(usgs_id_lst)): # different hucs different directories
huc_02 = data_all[gage_fld_lst[3]][ind]
dir_huc_02 = str(huc_02)
if dir_huc_02 not in dir_list:
dir_huc_02 = os.path.join(dir_gage_flow, str(huc_02))
os.mkdir(dir_huc_02)
dir_list = os.listdir(dir_gage_flow)
dir_huc_02 = os.path.join(dir_gage_flow, str(huc_02))
file_list = os.listdir(dir_huc_02)
file_usgs_id = str(usgs_id_lst[ind]) + ".txt"
if file_usgs_id not in file_list:
# download data and save as txt file
start_time_str = datetime.strptime(t_download_range[0], "%Y-%m-%d")
end_time_str = datetime.strptime(
t_download_range[1], "%Y-%m-%d"
) - timedelta(days=1)
url = streamflow_url.format(
usgs_id_lst[ind],
start_time_str.year,
start_time_str.month,
start_time_str.day,
end_time_str.year,
end_time_str.month,
end_time_str.day,
)
# save in its HUC02 dir
temp_file = os.path.join(dir_huc_02, str(usgs_id_lst[ind]) + ".txt")
download_small_file(url, temp_file)
print("successfully download " + temp_file + " streamflow data!")
def get_dor_values(gages: Gages, usgs_id) -> np.array:
"""
get dor values from gages for the usgs_id-sites
"""
assert all(x < y for x, y in zip(usgs_id, usgs_id[1:]))
# mm/year 1-km grid, megaliters total storage per sq km (1 megaliters = 1,000,000 liters = 1,000 cubic meters)
# attr_lst = ["RUNAVE7100", "STOR_NID_2009"]
attr_lst = ["RUNAVE7100", "STOR_NOR_2009"]
data_attr = gages.read_constant_cols(usgs_id, attr_lst)
run_avg = data_attr[:, 0] * (10 ** (-3)) * (10 ** 6) # m^3 per year
nor_storage = data_attr[:, 1] * 1000 # m^3
dors = nor_storage / run_avg
return dors
def get_diversion(gages: Gages, usgs_id) -> np.array:
diversion_strs = ["diversion", "divert"]
assert all(x < y for x, y in zip(usgs_id, usgs_id[1:]))
attr_lst = ["WR_REPORT_REMARKS", "SCREENING_COMMENTS"]
data_attr = gages.read_attr_origin(usgs_id, attr_lst)
diversion_strs_lower = [elem.lower() for elem in diversion_strs]
data_attr0_lower = np.array(
[elem.lower() if type(elem) == str else elem for elem in data_attr[0]]
)
data_attr1_lower = np.array(
[elem.lower() if type(elem) == str else elem for elem in data_attr[1]]
)
data_attr_lower = np.vstack((data_attr0_lower, data_attr1_lower)).T
diversions = [
is_any_elem_in_a_lst(diversion_strs_lower, data_attr_lower[i], include=True)
for i in range(len(usgs_id))
]
return np.array(diversions)
def read_usgs_daily_flow(
usgs_site_ids: list,
date_tuple: tuple,
gage_dict: dict,
save_dir: str,
unit: str = "cfs",
) -> pd.DataFrame:
"""
Read USGS flow data by HyRivers' pygeohydro tool.
The tool's tutorial: https://github.com/cheginit/HyRiver-examples/blob/main/notebooks/nwis.ipynb
Parameters
----------
usgs_site_ids
ids of USGS sites
date_tuple
start and end date
gage_dict
a dict containing gage's ids and the correspond HUC02 ids
save_dir
where we save streamflow data in files like CAMELS
unit
unit of streamflow, cms or cfs
Returns
-------
pd.DataFrame
streamflow data -- index is date; column is gage id
"""
from pygeohydro import NWIS
nwis = NWIS()
qobs = nwis.get_streamflow(usgs_site_ids, date_tuple, mmd=False)
# the unit of qobs is cms, but unit in CAMELS and GAGES-II is cfs, so here we transform it
# use round(2) because in both CAMELS and GAGES-II, data with cfs only have two float digits
if unit == "cfs":
qobs = (qobs * 35.314666212661).round(2)
dates = qobs.index
camels_format_index = ["GAGE_ID", "Year", "Mnth", "Day", "streamflow(" + unit + ")"]
year_month_day = pd.DataFrame(
[[dt.year, dt.month, dt.day] for dt in dates], columns=camels_format_index[1:4]
)
if "STAID" in gage_dict.keys():
gage_id_key = "STAID"
elif "gauge_id" in gage_dict.keys():
gage_id_key = "gauge_id"
elif "gage_id" in gage_dict.keys():
gage_id_key = "gage_id"
else:
raise NotImplementedError("No such gage id name")
if "HUC02" in gage_dict.keys():
huc02_key = "HUC02"
elif "huc_02" in gage_dict.keys():
huc02_key = "huc_02"
else:
raise NotImplementedError("No such huc02 id")
read_sites = [col[5:] for col in qobs.columns.values]
for site_id in usgs_site_ids:
if site_id not in read_sites:
df_flow = pd.DataFrame(
np.full(qobs.shape[0], np.nan), columns=camels_format_index[4:5]
)
else:
qobs_i = qobs["USGS-" + site_id]
df_flow = pd.DataFrame(qobs_i.values, columns=camels_format_index[4:5])
df_id = pd.DataFrame(
np.full(qobs.shape[0], site_id), columns=camels_format_index[0:1]
)
new_data_df = pd.concat([df_id, year_month_day, df_flow], axis=1)
# output the result
i_basin = gage_dict[gage_id_key].values.tolist().index(site_id)
huc_id = gage_dict[huc02_key][i_basin]
output_huc_dir = os.path.join(save_dir, huc_id)
if not os.path.isdir(output_huc_dir):
os.makedirs(output_huc_dir)
output_file = os.path.join(output_huc_dir, site_id + "_streamflow_qc.txt")
if os.path.isfile(output_file):
os.remove(output_file)
new_data_df.to_csv(
output_file, header=True, index=False, sep=",", float_format="%.2f"
)
return qobs
# TODO: the following functions are not tested now. They may be useful when handling with hourly data
def make_usgs_data(
start_date: datetime, end_date: datetime, site_number: str
) -> pd.DataFrame:
"""This method could also be used to download usgs streamflow data"""
base_url = (
"https://nwis.waterdata.usgs.gov/usa/nwis/uv/?cb_00060=on&cb_00065&format=rdb&"
)
full_url = (
base_url
+ "site_no="
+ site_number
+ "&period=&begin_date="
+ start_date.strftime("%Y-%m-%d")
+ "&end_date="
+ end_date.strftime("%Y-%m-%d")
)
print("Getting request from USGS")
print(full_url)
r = requests.get(full_url)
with open(site_number + ".txt", "w") as f:
f.write(r.text)
print("Request finished")
response_data = process_response_text(site_number + ".txt")
create_csv(response_data[0], response_data[1], site_number)
return pd.read_csv(site_number + "_flow_data.csv")
def process_response_text(file_name: str) -> Tuple[str, Dict]:
extractive_params = {}
with open(file_name, "r") as f:
lines = f.readlines()
i = 0
params = False
while "#" in lines[i]:
# TODO figure out getting height and discharge code efficently
the_split_line = lines[i].split()[1:]
if params:
print(the_split_line)
if len(the_split_line) < 2:
params = False
else:
extractive_params[
the_split_line[0] + "_" + the_split_line[1]
] = df_label(the_split_line[2])
if len(the_split_line) > 2:
if the_split_line[0] == "TS":
params = True
i += 1
with open(file_name.split(".")[0] + "data.tsv", "w") as t:
t.write("".join(lines[i:]))
return file_name.split(".")[0] + "data.tsv", extractive_params
def df_label(usgs_text: str) -> str:
usgs_text = usgs_text.replace(",", "")
if usgs_text == "Discharge":
return "cfs"
elif usgs_text == "Gage":
return "height"
else:
return usgs_text
def create_csv(file_path: str, params_names: dict, site_number: str):
"""
Function that creates the final version of the CSV files
Parameters
----------
file_path : str
[description]
params_names : dict
[description]
site_number : str
[description]
"""
df = | pd.read_csv(file_path, sep="\t") | pandas.read_csv |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
matplotlib.style.use('seaborn-notebook')
plt.rcParams['figure.figsize'] = (14, 12)
REGION_ID = 3034
# Load data for new snow line calculated in *new_snow_line.py*
nsl = pd.read_csv(r"C:\Users\kmu\PycharmProjects\APS\aps\scripts\tmp\new_snow_line_{0}_20201201_20210531.csv".format(REGION_ID), sep=";", parse_dates=['Date'])
nsl['Altitude_nsl'] = np.clip(nsl['Altitude'], a_min=0, a_max=None)
# group by day and keep only the highest value of 00, 06, 12, or 18 o'clock
nsl_gr = nsl.groupby(by='Date', as_index=False).max()
# Load data for new snow line from APS db
# Used for extraction: exec GetTimeSerieData @RegionId='3034.0', @parameter='2014', @FromDate='2020-12-01', @ToDate='2021-05-31', @Model='met_obs_v2.0'
db = pd.read_csv(r"C:\Users\kmu\PycharmProjects\APS\aps\scripts\tmp\{0}_newsnowline2021.csv".format(REGION_ID), sep=";", parse_dates=['Time'])
db['Altitude_wetB'] = np.clip(db['Value'], a_min=0, a_max=None)
db['Date'] = db['Time'].apply(lambda x: pd.Timestamp(x.date()))
db['Hour'] = db['Time'].apply(lambda x: x.hour)
# group by day and keep only the highest value of 00, 06, 12, or 18 o'clock
db_gr = db.groupby(by='Date', as_index=False).max()
# Load data for 0-isotherm from APS db
db_0iso = pd.read_csv(r"C:\Users\kmu\PycharmProjects\APS\aps\scripts\tmp\{0}_0isoterm2021.csv".format(REGION_ID), sep=";", parse_dates=['Time'])
db_0iso['Altitude_0iso'] = np.clip(db_0iso['Value'], a_min=0, a_max=None)
db_0iso['Date'] = db_0iso['Time'].apply(lambda x: pd.Timestamp(x.date()))
db_0iso['Hour'] = db_0iso['Time'].apply(lambda x: x.hour)
# group by day and keep only the highest value of 00, 06, 12, or 18 o'clock
db_0iso_gr = db_0iso.groupby(by='Date', as_index=False).max()
# Load varsom-data containing published mountain-weather
aw2 = pd.read_csv(r"C:\Users\kmu\PycharmProjects\varsomdata\varsomdata\{0}_forecasts_20_21.csv".format(REGION_ID), sep=";", header=0, index_col=0, parse_dates=['valid_from', 'date_valid'])
aw2['Date'] = aw2['date_valid']
_merged = pd.merge(nsl_gr, db_gr, how='left', on='Date', suffixes=['_nsl', '_wetB'])
_merged2 = pd.merge(_merged, db_0iso_gr, how='left', on='Date', suffixes=['_nsl', '_APSwetB'])
_merged3 = | pd.merge(_merged2, aw2, how='left', on='Date', suffixes=['_nsl', '_APS0iso']) | pandas.merge |
# LIBRARIES
# set up backend for ssh -x11 figures
import matplotlib
matplotlib.use('Agg')
# read and write
import os
import sys
import glob
import re
import fnmatch
import csv
import shutil
from datetime import datetime
# maths
import numpy as np
import pandas as pd
import math
import random
# miscellaneous
import warnings
import gc
import timeit
# sklearn
from sklearn.utils import resample
from sklearn.preprocessing import OneHotEncoder
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score, log_loss, roc_auc_score, \
accuracy_score, f1_score, precision_score, recall_score, confusion_matrix, average_precision_score
from sklearn.utils.validation import check_is_fitted
from sklearn.model_selection import KFold, PredefinedSplit, cross_validate
from sklearn.pipeline import Pipeline
from sklearn.linear_model import LinearRegression, ElasticNet
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.neural_network import MLPRegressor
from sklearn.preprocessing import StandardScaler
# Statistics
from scipy.stats import pearsonr, ttest_rel, norm
# Other tools for ensemble models building (<NAME>'s InnerCV class)
from hyperopt import fmin, tpe, space_eval, Trials, hp, STATUS_OK
from xgboost import XGBRegressor
from lightgbm import LGBMRegressor
# CPUs
from multiprocessing import Pool
# GPUs
from GPUtil import GPUtil
# tensorflow
import tensorflow as tf
# keras
from keras_preprocessing.image import ImageDataGenerator, Iterator
from keras_preprocessing.image.utils import load_img, img_to_array, array_to_img
from tensorflow.keras.utils import Sequence
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.layers import Flatten, Dense, Dropout, GlobalAveragePooling2D, concatenate
from tensorflow.keras import regularizers
from tensorflow.keras.optimizers import Adam, RMSprop, Adadelta
from tensorflow.keras.callbacks import Callback, EarlyStopping, ReduceLROnPlateau, ModelCheckpoint, CSVLogger
from tensorflow.keras.losses import MeanSquaredError, BinaryCrossentropy
from tensorflow.keras.metrics import RootMeanSquaredError, MeanAbsoluteError, AUC, BinaryAccuracy, Precision, Recall, \
TruePositives, FalsePositives, FalseNegatives, TrueNegatives
from tensorflow_addons.metrics import RSquare, F1Score
# Plots
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from PIL import Image
from bioinfokit import visuz
# Model's attention
from keract import get_activations, get_gradients_of_activations
from scipy.ndimage.interpolation import zoom
# Survival
from lifelines.utils import concordance_index
# Necessary to define MyCSVLogger
import collections
import csv
import io
import six
from tensorflow.python.lib.io import file_io
from tensorflow.python.util.compat import collections_abc
from tensorflow.keras.backend import eval
# Set display parameters
pd.set_option('display.max_rows', 200)
# CLASSES
class Basics:
"""
Root class herited by most other class. Includes handy helper functions
"""
def __init__(self):
# seeds for reproducibility
self.seed = 0
os.environ['PYTHONHASHSEED'] = str(self.seed)
np.random.seed(self.seed)
random.seed(self.seed)
# other parameters
self.path_data = '../data/'
self.folds = ['train', 'val', 'test']
self.n_CV_outer_folds = 10
self.outer_folds = [str(x) for x in list(range(self.n_CV_outer_folds))]
self.modes = ['', '_sd', '_str']
self.id_vars = ['id', 'eid', 'instance', 'outer_fold']
self.instances = ['0', '1', '1.5', '1.51', '1.52', '1.53', '1.54', '2', '3']
self.ethnicities_vars_forgot_Other = \
['Ethnicity.White', 'Ethnicity.British', 'Ethnicity.Irish', 'Ethnicity.White_Other', 'Ethnicity.Mixed',
'Ethnicity.White_and_Black_Caribbean', 'Ethnicity.White_and_Black_African', 'Ethnicity.White_and_Asian',
'Ethnicity.Mixed_Other', 'Ethnicity.Asian', 'Ethnicity.Indian', 'Ethnicity.Pakistani',
'Ethnicity.Bangladeshi', 'Ethnicity.Asian_Other', 'Ethnicity.Black', 'Ethnicity.Caribbean',
'Ethnicity.African', 'Ethnicity.Black_Other', 'Ethnicity.Chinese', 'Ethnicity.Other_ethnicity',
'Ethnicity.Do_not_know', 'Ethnicity.Prefer_not_to_answer', 'Ethnicity.NA']
self.ethnicities_vars = \
['Ethnicity.White', 'Ethnicity.British', 'Ethnicity.Irish', 'Ethnicity.White_Other', 'Ethnicity.Mixed',
'Ethnicity.White_and_Black_Caribbean', 'Ethnicity.White_and_Black_African', 'Ethnicity.White_and_Asian',
'Ethnicity.Mixed_Other', 'Ethnicity.Asian', 'Ethnicity.Indian', 'Ethnicity.Pakistani',
'Ethnicity.Bangladeshi', 'Ethnicity.Asian_Other', 'Ethnicity.Black', 'Ethnicity.Caribbean',
'Ethnicity.African', 'Ethnicity.Black_Other', 'Ethnicity.Chinese', 'Ethnicity.Other',
'Ethnicity.Other_ethnicity', 'Ethnicity.Do_not_know', 'Ethnicity.Prefer_not_to_answer', 'Ethnicity.NA']
self.demographic_vars = ['Age', 'Sex'] + self.ethnicities_vars
self.names_model_parameters = ['target', 'organ', 'view', 'transformation', 'architecture', 'n_fc_layers',
'n_fc_nodes', 'optimizer', 'learning_rate', 'weight_decay', 'dropout_rate',
'data_augmentation_factor']
self.targets_regression = ['Age']
self.targets_binary = ['Sex']
self.models_types = ['', '_bestmodels']
self.dict_prediction_types = {'Age': 'regression', 'Sex': 'binary'}
self.dict_side_predictors = {'Age': ['Sex'] + self.ethnicities_vars_forgot_Other,
'Sex': ['Age'] + self.ethnicities_vars_forgot_Other}
self.organs = ['Brain', 'Eyes', 'Arterial', 'Heart', 'Abdomen', 'Musculoskeletal']
self.left_right_organs_views = ['Eyes_Fundus', 'Eyes_OCT', 'Arterial_Carotids', 'Musculoskeletal_Hips',
'Musculoskeletal_Knees']
self.dict_organs_to_views = {'Brain': ['MRI'],
'Eyes': ['Fundus', 'OCT'],
'Arterial': ['Carotids'],
'Heart': ['MRI'],
'Abdomen': ['Liver', 'Pancreas'],
'Musculoskeletal': ['Spine', 'Hips', 'Knees', 'FullBody'],
'PhysicalActivity': ['FullWeek']}
self.dict_organsviews_to_transformations = \
{'Brain_MRI': ['SagittalRaw', 'SagittalReference', 'CoronalRaw', 'CoronalReference', 'TransverseRaw',
'TransverseReference'],
'Arterial_Carotids': ['Mixed', 'LongAxis', 'CIMT120', 'CIMT150', 'ShortAxis'],
'Heart_MRI': ['2chambersRaw', '2chambersContrast', '3chambersRaw', '3chambersContrast', '4chambersRaw',
'4chambersContrast'],
'Musculoskeletal_Spine': ['Sagittal', 'Coronal'],
'Musculoskeletal_FullBody': ['Mixed', 'Figure', 'Skeleton', 'Flesh'],
'PhysicalActivity_FullWeek': ['GramianAngularField1minDifference', 'GramianAngularField1minSummation',
'MarkovTransitionField1min', 'RecurrencePlots1min']}
self.dict_organsviews_to_transformations.update(dict.fromkeys(['Eyes_Fundus', 'Eyes_OCT'], ['Raw']))
self.dict_organsviews_to_transformations.update(
dict.fromkeys(['Abdomen_Liver', 'Abdomen_Pancreas'], ['Raw', 'Contrast']))
self.dict_organsviews_to_transformations.update(
dict.fromkeys(['Musculoskeletal_Hips', 'Musculoskeletal_Knees'], ['MRI']))
self.organsviews_not_to_augment = []
self.organs_instances23 = ['Brain', 'Eyes', 'Arterial', 'Heart', 'Abdomen', 'Musculoskeletal',
'PhysicalActivity']
self.organs_XWAS = \
['*', '*instances01', '*instances1.5x', '*instances23', 'Brain', 'BrainCognitive', 'BrainMRI', 'Eyes',
'EyesFundus', 'EyesOCT', 'Hearing', 'Lungs', 'Arterial', 'ArterialPulseWaveAnalysis', 'ArterialCarotids',
'Heart', 'HeartECG', 'HeartMRI', 'Abdomen', 'AbdomenLiver', 'AbdomenPancreas', 'Musculoskeletal',
'MusculoskeletalSpine', 'MusculoskeletalHips', 'MusculoskeletalKnees', 'MusculoskeletalFullBody',
'MusculoskeletalScalars', 'PhysicalActivity', 'Biochemistry', 'BiochemistryUrine', 'BiochemistryBlood',
'ImmuneSystem']
# Others
if '/Users/Alan/' in os.getcwd():
os.chdir('/Users/Alan/Desktop/Aging/Medical_Images/scripts/')
else:
os.chdir('/n/groups/patel/Alan/Aging/Medical_Images/scripts/')
gc.enable() # garbage collector
warnings.filterwarnings('ignore')
def _version_to_parameters(self, model_name):
parameters = {}
parameters_list = model_name.split('_')
for i, parameter in enumerate(self.names_model_parameters):
parameters[parameter] = parameters_list[i]
if len(parameters_list) > 11:
parameters['outer_fold'] = parameters_list[11]
return parameters
@staticmethod
def _parameters_to_version(parameters):
return '_'.join(parameters.values())
@staticmethod
def convert_string_to_boolean(string):
if string == 'True':
boolean = True
elif string == 'False':
boolean = False
else:
print('ERROR: string must be either \'True\' or \'False\'')
sys.exit(1)
return boolean
class Metrics(Basics):
"""
Helper class defining dictionaries of metrics and custom metrics
"""
def __init__(self):
# Parameters
Basics.__init__(self)
self.metrics_displayed_in_int = ['True-Positives', 'True-Negatives', 'False-Positives', 'False-Negatives']
self.metrics_needing_classpred = ['F1-Score', 'Binary-Accuracy', 'Precision', 'Recall']
self.dict_metrics_names_K = {'regression': ['RMSE'], # For now, R-Square is buggy. Try again in a few months.
'binary': ['ROC-AUC', 'PR-AUC', 'F1-Score', 'Binary-Accuracy', 'Precision',
'Recall', 'True-Positives', 'False-Positives', 'False-Negatives',
'True-Negatives'],
'multiclass': ['Categorical-Accuracy']}
self.dict_metrics_names = {'regression': ['RMSE', 'MAE', 'R-Squared', 'Pearson-Correlation'],
'binary': ['ROC-AUC', 'F1-Score', 'PR-AUC', 'Binary-Accuracy', 'Sensitivity',
'Specificity', 'Precision', 'Recall', 'True-Positives', 'False-Positives',
'False-Negatives', 'True-Negatives'],
'multiclass': ['Categorical-Accuracy']}
self.dict_losses_names = {'regression': 'MSE', 'binary': 'Binary-Crossentropy',
'multiclass': 'categorical_crossentropy'}
self.dict_main_metrics_names_K = {'Age': 'MAE', 'Sex': 'PR-AUC', 'imbalanced_binary_placeholder': 'PR-AUC'}
self.dict_main_metrics_names = {'Age': 'R-Squared', 'Sex': 'ROC-AUC',
'imbalanced_binary_placeholder': 'PR-AUC'}
self.main_metrics_modes = {'loss': 'min', 'R-Squared': 'max', 'Pearson-Correlation': 'max', 'RMSE': 'min',
'MAE': 'min', 'ROC-AUC': 'max', 'PR-AUC': 'max', 'F1-Score': 'max', 'C-Index': 'max',
'C-Index-difference': 'max'}
self.n_bootstrap_iterations = 1000
def rmse(y_true, y_pred):
return math.sqrt(mean_squared_error(y_true, y_pred))
def sensitivity_score(y, pred):
_, _, fn, tp = confusion_matrix(y, pred.round()).ravel()
return tp / (tp + fn)
def specificity_score(y, pred):
tn, fp, _, _ = confusion_matrix(y, pred.round()).ravel()
return tn / (tn + fp)
def true_positives_score(y, pred):
_, _, _, tp = confusion_matrix(y, pred.round()).ravel()
return tp
def false_positives_score(y, pred):
_, fp, _, _ = confusion_matrix(y, pred.round()).ravel()
return fp
def false_negatives_score(y, pred):
_, _, fn, _ = confusion_matrix(y, pred.round()).ravel()
return fn
def true_negatives_score(y, pred):
tn, _, _, _ = confusion_matrix(y, pred.round()).ravel()
return tn
self.dict_metrics_sklearn = {'mean_squared_error': mean_squared_error,
'mean_absolute_error': mean_absolute_error,
'RMSE': rmse,
'Pearson-Correlation': pearsonr,
'R-Squared': r2_score,
'Binary-Crossentropy': log_loss,
'ROC-AUC': roc_auc_score,
'F1-Score': f1_score,
'PR-AUC': average_precision_score,
'Binary-Accuracy': accuracy_score,
'Sensitivity': sensitivity_score,
'Specificity': specificity_score,
'Precision': precision_score,
'Recall': recall_score,
'True-Positives': true_positives_score,
'False-Positives': false_positives_score,
'False-Negatives': false_negatives_score,
'True-Negatives': true_negatives_score}
def _bootstrap(self, data, function):
results = []
for i in range(self.n_bootstrap_iterations):
data_i = resample(data, replace=True, n_samples=len(data.index))
results.append(function(data_i['y'], data_i['pred']))
return np.mean(results), np.std(results)
class PreprocessingMain(Basics):
"""
This class executes the code for step 01. It preprocesses the main dataframe by:
- reformating the rows and columns
- splitting the dataset into folds for the future cross validations
- imputing key missing data
- adding a new UKB instance for physical activity data
- formating the demographics columns (age, sex and ethnicity)
- reformating the dataframe so that different instances of the same participant are treated as different rows
- saving the dataframe
"""
def __init__(self):
Basics.__init__(self)
self.data_raw = None
self.data_features = None
self.data_features_eids = None
def _add_outer_folds(self):
outer_folds_split = pd.read_csv(self.path_data + 'All_eids.csv')
outer_folds_split.rename(columns={'fold': 'outer_fold'}, inplace=True)
outer_folds_split['eid'] = outer_folds_split['eid'].astype('str')
outer_folds_split['outer_fold'] = outer_folds_split['outer_fold'].astype('str')
outer_folds_split.set_index('eid', inplace=True)
self.data_raw = self.data_raw.join(outer_folds_split)
def _impute_missing_ecg_instances(self):
data_ecgs = pd.read_csv('/n/groups/patel/Alan/Aging/TimeSeries/scripts/age_analysis/missing_samples.csv')
data_ecgs['eid'] = data_ecgs['eid'].astype(str)
data_ecgs['instance'] = data_ecgs['instance'].astype(str)
for _, row in data_ecgs.iterrows():
self.data_raw.loc[row['eid'], 'Date_attended_center_' + row['instance']] = row['observation_date']
def _add_physicalactivity_instances(self):
data_pa = pd.read_csv(
'/n/groups/patel/Alan/Aging/TimeSeries/series/PhysicalActivity/90001/features/PA_visit_date.csv')
data_pa['eid'] = data_pa['eid'].astype(str)
data_pa.set_index('eid', drop=False, inplace=True)
data_pa.index.name = 'column_names'
self.data_raw = self.data_raw.merge(data_pa, on=['eid'], how='outer')
self.data_raw.set_index('eid', drop=False, inplace=True)
def _compute_sex(self):
# Use genetic sex when available
self.data_raw['Sex_genetic'][self.data_raw['Sex_genetic'].isna()] = \
self.data_raw['Sex'][self.data_raw['Sex_genetic'].isna()]
self.data_raw.drop(['Sex'], axis=1, inplace=True)
self.data_raw.rename(columns={'Sex_genetic': 'Sex'}, inplace=True)
self.data_raw.dropna(subset=['Sex'], inplace=True)
def _compute_age(self):
# Recompute age with greater precision by leveraging the month of birth
self.data_raw['Year_of_birth'] = self.data_raw['Year_of_birth'].astype(int)
self.data_raw['Month_of_birth'] = self.data_raw['Month_of_birth'].astype(int)
self.data_raw['Date_of_birth'] = self.data_raw.apply(
lambda row: datetime(row.Year_of_birth, row.Month_of_birth, 15), axis=1)
for i in self.instances:
self.data_raw['Date_attended_center_' + i] = \
self.data_raw['Date_attended_center_' + i].apply(
lambda x: pd.NaT if pd.isna(x) else datetime.strptime(x, '%Y-%m-%d'))
self.data_raw['Age_' + i] = self.data_raw['Date_attended_center_' + i] - self.data_raw['Date_of_birth']
self.data_raw['Age_' + i] = self.data_raw['Age_' + i].dt.days / 365.25
self.data_raw.drop(['Date_attended_center_' + i], axis=1, inplace=True)
self.data_raw.drop(['Year_of_birth', 'Month_of_birth', 'Date_of_birth'], axis=1, inplace=True)
self.data_raw.dropna(how='all', subset=['Age_0', 'Age_1', 'Age_1.5', 'Age_1.51', 'Age_1.52', 'Age_1.53',
'Age_1.54', 'Age_2', 'Age_3'], inplace=True)
def _encode_ethnicity(self):
# Fill NAs for ethnicity on instance 0 if available in other instances
eids_missing_ethnicity = self.data_raw['eid'][self.data_raw['Ethnicity'].isna()]
for eid in eids_missing_ethnicity:
sample = self.data_raw.loc[eid, :]
if not math.isnan(sample['Ethnicity_1']):
self.data_raw.loc[eid, 'Ethnicity'] = self.data_raw.loc[eid, 'Ethnicity_1']
elif not math.isnan(sample['Ethnicity_2']):
self.data_raw.loc[eid, 'Ethnicity'] = self.data_raw.loc[eid, 'Ethnicity_2']
self.data_raw.drop(['Ethnicity_1', 'Ethnicity_2'], axis=1, inplace=True)
# One hot encode ethnicity
dict_ethnicity_codes = {'1': 'Ethnicity.White', '1001': 'Ethnicity.British', '1002': 'Ethnicity.Irish',
'1003': 'Ethnicity.White_Other',
'2': 'Ethnicity.Mixed', '2001': 'Ethnicity.White_and_Black_Caribbean',
'2002': 'Ethnicity.White_and_Black_African',
'2003': 'Ethnicity.White_and_Asian', '2004': 'Ethnicity.Mixed_Other',
'3': 'Ethnicity.Asian', '3001': 'Ethnicity.Indian', '3002': 'Ethnicity.Pakistani',
'3003': 'Ethnicity.Bangladeshi', '3004': 'Ethnicity.Asian_Other',
'4': 'Ethnicity.Black', '4001': 'Ethnicity.Caribbean', '4002': 'Ethnicity.African',
'4003': 'Ethnicity.Black_Other',
'5': 'Ethnicity.Chinese',
'6': 'Ethnicity.Other_ethnicity',
'-1': 'Ethnicity.Do_not_know',
'-3': 'Ethnicity.Prefer_not_to_answer',
'-5': 'Ethnicity.NA'}
self.data_raw['Ethnicity'] = self.data_raw['Ethnicity'].fillna(-5).astype(int).astype(str)
ethnicities = pd.get_dummies(self.data_raw['Ethnicity'])
self.data_raw.drop(['Ethnicity'], axis=1, inplace=True)
ethnicities.rename(columns=dict_ethnicity_codes, inplace=True)
ethnicities['Ethnicity.White'] = ethnicities['Ethnicity.White'] + ethnicities['Ethnicity.British'] + \
ethnicities['Ethnicity.Irish'] + ethnicities['Ethnicity.White_Other']
ethnicities['Ethnicity.Mixed'] = ethnicities['Ethnicity.Mixed'] + \
ethnicities['Ethnicity.White_and_Black_Caribbean'] + \
ethnicities['Ethnicity.White_and_Black_African'] + \
ethnicities['Ethnicity.White_and_Asian'] + \
ethnicities['Ethnicity.Mixed_Other']
ethnicities['Ethnicity.Asian'] = ethnicities['Ethnicity.Asian'] + ethnicities['Ethnicity.Indian'] + \
ethnicities['Ethnicity.Pakistani'] + ethnicities['Ethnicity.Bangladeshi'] + \
ethnicities['Ethnicity.Asian_Other']
ethnicities['Ethnicity.Black'] = ethnicities['Ethnicity.Black'] + ethnicities['Ethnicity.Caribbean'] + \
ethnicities['Ethnicity.African'] + ethnicities['Ethnicity.Black_Other']
ethnicities['Ethnicity.Other'] = ethnicities['Ethnicity.Other_ethnicity'] + \
ethnicities['Ethnicity.Do_not_know'] + \
ethnicities['Ethnicity.Prefer_not_to_answer'] + \
ethnicities['Ethnicity.NA']
self.data_raw = self.data_raw.join(ethnicities)
def generate_data(self):
# Preprocessing
dict_UKB_fields_to_names = {'34-0.0': 'Year_of_birth', '52-0.0': 'Month_of_birth',
'53-0.0': 'Date_attended_center_0', '53-1.0': 'Date_attended_center_1',
'53-2.0': 'Date_attended_center_2', '53-3.0': 'Date_attended_center_3',
'31-0.0': 'Sex', '22001-0.0': 'Sex_genetic', '21000-0.0': 'Ethnicity',
'21000-1.0': 'Ethnicity_1', '21000-2.0': 'Ethnicity_2',
'22414-2.0': 'Abdominal_images_quality'}
self.data_raw = pd.read_csv('/n/groups/patel/uk_biobank/project_52887_41230/ukb41230.csv',
usecols=['eid', '31-0.0', '22001-0.0', '21000-0.0', '21000-1.0', '21000-2.0',
'34-0.0', '52-0.0', '53-0.0', '53-1.0', '53-2.0', '53-3.0', '22414-2.0'])
# Formatting
self.data_raw.rename(columns=dict_UKB_fields_to_names, inplace=True)
self.data_raw['eid'] = self.data_raw['eid'].astype(str)
self.data_raw.set_index('eid', drop=False, inplace=True)
self.data_raw.index.name = 'column_names'
self._add_outer_folds()
self._impute_missing_ecg_instances()
self._add_physicalactivity_instances()
self._compute_sex()
self._compute_age()
self._encode_ethnicity()
# Concatenate the data from the different instances
self.data_features = None
for i in self.instances:
print('Preparing the samples for instance ' + i)
df_i = self.data_raw[['eid', 'outer_fold', 'Age_' + i, 'Sex'] + self.ethnicities_vars +
['Abdominal_images_quality']].dropna(subset=['Age_' + i])
print(str(len(df_i.index)) + ' samples found in instance ' + i)
df_i.rename(columns={'Age_' + i: 'Age'}, inplace=True)
df_i['instance'] = i
df_i['id'] = df_i['eid'] + '_' + df_i['instance']
df_i = df_i[self.id_vars + self.demographic_vars + ['Abdominal_images_quality']]
if i != '2':
df_i['Abdominal_images_quality'] = np.nan # not defined for instance 3, not relevant for instances 0, 1
if self.data_features is None:
self.data_features = df_i
else:
self.data_features = self.data_features.append(df_i)
print('The size of the full concatenated dataframe is now ' + str(len(self.data_features.index)))
# Save age as a float32 instead of float64
self.data_features['Age'] = np.float32(self.data_features['Age'])
# Shuffle the rows before saving the dataframe
self.data_features = self.data_features.sample(frac=1)
# Generate dataframe for eids pipeline as opposed to instances pipeline
self.data_features_eids = self.data_features[self.data_features.instance == '0']
self.data_features_eids['instance'] = '*'
self.data_features_eids['id'] = [ID.replace('_0', '_*') for ID in self.data_features_eids['id'].values]
def save_data(self):
self.data_features.to_csv(self.path_data + 'data-features_instances.csv', index=False)
self.data_features_eids.to_csv(self.path_data + 'data-features_eids.csv', index=False)
class PreprocessingImagesIDs(Basics):
"""
Splits the different images datasets into folds for the future cross validation
"""
def __init__(self):
Basics.__init__(self)
# Instances 2 and 3 datasets (most medical images, mostly medical images)
self.instances23_eids = None
self.HEART_EIDs = None
self.heart_eids = None
self.FOLDS_23_EIDS = None
def _load_23_eids(self):
data_features = pd.read_csv(self.path_data + 'data-features_instances.csv')
images_eids = data_features['eid'][data_features['instance'].isin([2, 3])]
self.images_eids = list(set(images_eids))
def _load_heart_eids(self):
# IDs already used in Heart videos
HEART_EIDS = {}
heart_eids = []
for i in range(10):
# Important: The i's data fold is used as *validation* fold for outer fold i.
data_i = pd.read_csv(
"/n/groups/patel/JbProst/Heart/Data/FoldsAugmented/data-features_Heart_20208_Augmented_Age_val_" + str(
i) + ".csv")
HEART_EIDS[i] = list(set([int(str(ID)[:7]) for ID in data_i['eid']]))
heart_eids = heart_eids + HEART_EIDS[i]
self.HEART_EIDS = HEART_EIDS
self.heart_eids = heart_eids
def _split_23_eids_folds(self):
self._load_23_eids()
self._load_heart_eids()
# List extra images ids, and split them between the different folds.
extra_eids = [eid for eid in self.images_eids if eid not in self.heart_eids]
random.shuffle(extra_eids)
n_samples = len(extra_eids)
n_samples_by_fold = n_samples / self.n_CV_outer_folds
FOLDS_EXTRAEIDS = {}
FOLDS_EIDS = {}
for outer_fold in self.outer_folds:
FOLDS_EXTRAEIDS[outer_fold] = \
extra_eids[int((int(outer_fold)) * n_samples_by_fold):int((int(outer_fold) + 1) * n_samples_by_fold)]
FOLDS_EIDS[outer_fold] = self.HEART_EIDS[int(outer_fold)] + FOLDS_EXTRAEIDS[outer_fold]
self.FOLDS_23_EIDS = FOLDS_EIDS
def _save_23_eids_folds(self):
for outer_fold in self.outer_folds:
with open(self.path_data + 'instances23_eids_' + outer_fold + '.csv', 'w', newline='') as myfile:
wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)
wr.writerow(self.FOLDS_23_EIDS[outer_fold])
def generate_eids_splits(self):
print("Generating eids split for organs on instances 2 and 3")
self._split_23_eids_folds()
self._save_23_eids_folds()
class PreprocessingFolds(Metrics):
"""
Splits the data into training, validation and testing sets for all CV folds
"""
def __init__(self, target, organ, regenerate_data):
Metrics.__init__(self)
self.target = target
self.organ = organ
self.list_ids_per_view_transformation = None
# Check if these folds have already been generated
if not regenerate_data:
if len(glob.glob(self.path_data + 'data-features_' + organ + '_*_' + target + '_*.csv')) > 0:
print("Error: The files already exist! Either change regenerate_data to True or delete the previous"
" version.")
sys.exit(1)
self.side_predictors = self.dict_side_predictors[target]
self.variables_to_normalize = self.side_predictors
if target in self.targets_regression:
self.variables_to_normalize.append(target)
self.dict_image_quality_col = {'Liver': 'Abdominal_images_quality'}
self.dict_image_quality_col.update(
dict.fromkeys(['Brain', 'Eyes', 'Arterial', 'Heart', 'Abdomen', 'Musculoskeletal', 'PhysicalActivity'],
None))
self.image_quality_col = self.dict_image_quality_col[organ]
self.views = self.dict_organs_to_views[organ]
self.list_ids = None
self.list_ids_per_view = {}
self.data = None
self.EIDS = None
self.EIDS_per_view = {'train': {}, 'val': {}, 'test': {}}
self.data_fold = None
def _get_list_ids(self):
self.list_ids_per_view_transformation = {}
list_ids = []
# if different views are available, take the union of the ids
for view in self.views:
self.list_ids_per_view_transformation[view] = {}
for transformation in self.dict_organsviews_to_transformations[self.organ + '_' + view]:
list_ids_transformation = []
path = '../images/' + self.organ + '/' + view + '/' + transformation + '/'
# for paired organs, take the unions of the ids available on the right and the left sides
if self.organ + '_' + view in self.left_right_organs_views:
for side in ['right', 'left']:
list_ids_transformation += os.listdir(path + side + '/')
list_ids_transformation = np.unique(list_ids_transformation).tolist()
else:
list_ids_transformation += os.listdir(path)
self.list_ids_per_view_transformation[view][transformation] = \
[im.replace('.jpg', '') for im in list_ids_transformation]
list_ids += self.list_ids_per_view_transformation[view][transformation]
self.list_ids = np.unique(list_ids).tolist()
self.list_ids.sort()
def _filter_and_format_data(self):
"""
Clean the data before it can be split between the rows
"""
cols_data = self.id_vars + self.demographic_vars
if self.image_quality_col is not None:
cols_data.append(self.dict_image_quality_col[self.organ])
data = pd.read_csv(self.path_data + 'data-features_instances.csv', usecols=cols_data)
data.rename(columns={self.dict_image_quality_col[self.organ]: 'Data_quality'}, inplace=True)
for col_name in self.id_vars:
data[col_name] = data[col_name].astype(str)
data.set_index('id', drop=False, inplace=True)
if self.image_quality_col is not None:
data = data[data['Data_quality'] != np.nan]
data.drop('Data_quality', axis=1, inplace=True)
# get rid of samples with NAs
data.dropna(inplace=True)
# list the samples' ids for which images are available
data = data.loc[self.list_ids]
self.data = data
def _split_data(self):
# Generate the data for each outer_fold
for i, outer_fold in enumerate(self.outer_folds):
of_val = outer_fold
of_test = str((int(outer_fold) + 1) % len(self.outer_folds))
DATA = {
'train': self.data[~self.data['outer_fold'].isin([of_val, of_test])],
'val': self.data[self.data['outer_fold'] == of_val],
'test': self.data[self.data['outer_fold'] == of_test]
}
# Generate the data for the different views and transformations
for view in self.views:
for transformation in self.dict_organsviews_to_transformations[self.organ + '_' + view]:
print('Splitting data for view ' + view + ', and transformation ' + transformation)
DF = {}
for fold in self.folds:
idx = DATA[fold]['id'].isin(self.list_ids_per_view_transformation[view][transformation]).values
DF[fold] = DATA[fold].iloc[idx, :]
# compute values for scaling of variables
normalizing_values = {}
for var in self.variables_to_normalize:
var_mean = DF['train'][var].mean()
if len(DF['train'][var].unique()) < 2:
print('Variable ' + var + ' has a single value in fold ' + outer_fold +
'. Using 1 as std for normalization.')
var_std = 1
else:
var_std = DF['train'][var].std()
normalizing_values[var] = {'mean': var_mean, 'std': var_std}
# normalize the variables
for fold in self.folds:
for var in self.variables_to_normalize:
DF[fold][var + '_raw'] = DF[fold][var]
DF[fold][var] = (DF[fold][var] - normalizing_values[var]['mean']) \
/ normalizing_values[var]['std']
# report issue if NAs were detected (most likely comes from a sample whose id did not match)
n_mismatching_samples = DF[fold].isna().sum().max()
if n_mismatching_samples > 0:
print(DF[fold][DF[fold].isna().any(axis=1)])
print('/!\\ WARNING! ' + str(n_mismatching_samples) + ' ' + fold + ' images ids out of ' +
str(len(DF[fold].index)) + ' did not match the dataframe!')
# save the data
DF[fold].to_csv(self.path_data + 'data-features_' + self.organ + '_' + view + '_' +
transformation + '_' + self.target + '_' + fold + '_' + outer_fold + '.csv',
index=False)
print('For outer_fold ' + outer_fold + ', the ' + fold + ' fold has a sample size of ' +
str(len(DF[fold].index)))
def generate_folds(self):
self._get_list_ids()
self._filter_and_format_data()
self._split_data()
class PreprocessingSurvival(Basics):
"""
Preprocesses the main dataframe for survival purposes.
Mirrors the PreprocessingMain class, but computes Death time and FollowTime for the future survival analysis
"""
def __init__(self):
Basics.__init__(self)
self.data_raw = None
self.data_features = None
self.data_features_eids = None
self.survival_vars = ['FollowUpTime', 'Death']
def _preprocessing(self):
usecols = ['eid', '40000-0.0', '34-0.0', '52-0.0', '53-0.0', '53-1.0', '53-2.0', '53-3.0']
self.data_raw = | pd.read_csv('/n/groups/patel/uk_biobank/project_52887_41230/ukb41230.csv', usecols=usecols) | pandas.read_csv |
# -*- coding: utf-8 -*-
from __future__ import print_function
import pytest
import random
import numpy as np
import pandas as pd
from pandas.compat import lrange
from pandas.api.types import CategoricalDtype
from pandas import (DataFrame, Series, MultiIndex, Timestamp,
date_range, NaT, IntervalIndex, Categorical)
from pandas.util.testing import assert_series_equal, assert_frame_equal
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
class TestDataFrameSorting(TestData):
def test_sort_values(self):
frame = DataFrame([[1, 1, 2], [3, 1, 0], [4, 5, 6]],
index=[1, 2, 3], columns=list('ABC'))
# by column (axis=0)
sorted_df = frame.sort_values(by='A')
indexer = frame['A'].argsort().values
expected = frame.loc[frame.index[indexer]]
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by='A', ascending=False)
indexer = indexer[::-1]
expected = frame.loc[frame.index[indexer]]
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by='A', ascending=False)
assert_frame_equal(sorted_df, expected)
# GH4839
sorted_df = frame.sort_values(by=['A'], ascending=[False])
assert_frame_equal(sorted_df, expected)
# multiple bys
sorted_df = frame.sort_values(by=['B', 'C'])
expected = frame.loc[[2, 1, 3]]
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=['B', 'C'], ascending=False)
assert_frame_equal(sorted_df, expected[::-1])
sorted_df = frame.sort_values(by=['B', 'A'], ascending=[True, False])
assert_frame_equal(sorted_df, expected)
pytest.raises(ValueError, lambda: frame.sort_values(
by=['A', 'B'], axis=2, inplace=True))
# by row (axis=1): GH 10806
sorted_df = frame.sort_values(by=3, axis=1)
expected = frame
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=3, axis=1, ascending=False)
expected = frame.reindex(columns=['C', 'B', 'A'])
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 2], axis='columns')
expected = frame.reindex(columns=['B', 'A', 'C'])
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 3], axis=1,
ascending=[True, False])
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 3], axis=1, ascending=False)
expected = frame.reindex(columns=['C', 'B', 'A'])
assert_frame_equal(sorted_df, expected)
msg = r'Length of ascending \(5\) != length of by \(2\)'
with tm.assert_raises_regex(ValueError, msg):
frame.sort_values(by=['A', 'B'], axis=0, ascending=[True] * 5)
def test_sort_values_inplace(self):
frame = DataFrame(np.random.randn(4, 4), index=[1, 2, 3, 4],
columns=['A', 'B', 'C', 'D'])
sorted_df = frame.copy()
sorted_df.sort_values(by='A', inplace=True)
expected = frame.sort_values(by='A')
assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
sorted_df.sort_values(by=1, axis=1, inplace=True)
expected = frame.sort_values(by=1, axis=1)
assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
sorted_df.sort_values(by='A', ascending=False, inplace=True)
expected = frame.sort_values(by='A', ascending=False)
assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
sorted_df.sort_values(by=['A', 'B'], ascending=False, inplace=True)
expected = frame.sort_values(by=['A', 'B'], ascending=False)
assert_frame_equal(sorted_df, expected)
def test_sort_nan(self):
# GH3917
nan = np.nan
df = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4],
'B': [9, nan, 5, 2, 5, 4, 5]})
# sort one column only
expected = DataFrame(
{'A': [nan, 1, 1, 2, 4, 6, 8],
'B': [5, 9, 2, nan, 5, 5, 4]},
index=[2, 0, 3, 1, 6, 4, 5])
sorted_df = df.sort_values(['A'], na_position='first')
assert_frame_equal(sorted_df, expected)
expected = DataFrame(
{'A': [nan, 8, 6, 4, 2, 1, 1],
'B': [5, 4, 5, 5, nan, 9, 2]},
index=[2, 5, 4, 6, 1, 0, 3])
sorted_df = df.sort_values(['A'], na_position='first', ascending=False)
assert_frame_equal(sorted_df, expected)
expected = df.reindex(columns=['B', 'A'])
sorted_df = df.sort_values(by=1, axis=1, na_position='first')
assert_frame_equal(sorted_df, expected)
# na_position='last', order
expected = DataFrame(
{'A': [1, 1, 2, 4, 6, 8, nan],
'B': [2, 9, nan, 5, 5, 4, 5]},
index=[3, 0, 1, 6, 4, 5, 2])
sorted_df = df.sort_values(['A', 'B'])
assert_frame_equal(sorted_df, expected)
# na_position='first', order
expected = DataFrame(
{'A': [nan, 1, 1, 2, 4, 6, 8],
'B': [5, 2, 9, nan, 5, 5, 4]},
index=[2, 3, 0, 1, 6, 4, 5])
sorted_df = df.sort_values(['A', 'B'], na_position='first')
assert_frame_equal(sorted_df, expected)
# na_position='first', not order
expected = DataFrame(
{'A': [nan, 1, 1, 2, 4, 6, 8],
'B': [5, 9, 2, nan, 5, 5, 4]},
index=[2, 0, 3, 1, 6, 4, 5])
sorted_df = df.sort_values(['A', 'B'], ascending=[
1, 0], na_position='first')
assert_frame_equal(sorted_df, expected)
# na_position='last', not order
expected = DataFrame(
{'A': [8, 6, 4, 2, 1, 1, nan],
'B': [4, 5, 5, nan, 2, 9, 5]},
index=[5, 4, 6, 1, 3, 0, 2])
sorted_df = df.sort_values(['A', 'B'], ascending=[
0, 1], na_position='last')
assert_frame_equal(sorted_df, expected)
# Test DataFrame with nan label
df = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4],
'B': [9, nan, 5, 2, 5, 4, 5]},
index=[1, 2, 3, 4, 5, 6, nan])
# NaN label, ascending=True, na_position='last'
sorted_df = df.sort_index(
kind='quicksort', ascending=True, na_position='last')
expected = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4],
'B': [9, nan, 5, 2, 5, 4, 5]},
index=[1, 2, 3, 4, 5, 6, nan])
assert_frame_equal(sorted_df, expected)
# NaN label, ascending=True, na_position='first'
sorted_df = df.sort_index(na_position='first')
expected = DataFrame({'A': [4, 1, 2, nan, 1, 6, 8],
'B': [5, 9, nan, 5, 2, 5, 4]},
index=[nan, 1, 2, 3, 4, 5, 6])
assert_frame_equal(sorted_df, expected)
# NaN label, ascending=False, na_position='last'
sorted_df = df.sort_index(kind='quicksort', ascending=False)
expected = DataFrame({'A': [8, 6, 1, nan, 2, 1, 4],
'B': [4, 5, 2, 5, nan, 9, 5]},
index=[6, 5, 4, 3, 2, 1, nan])
assert_frame_equal(sorted_df, expected)
# NaN label, ascending=False, na_position='first'
sorted_df = df.sort_index(
kind='quicksort', ascending=False, na_position='first')
expected = DataFrame({'A': [4, 8, 6, 1, nan, 2, 1],
'B': [5, 4, 5, 2, 5, nan, 9]},
index=[nan, 6, 5, 4, 3, 2, 1])
assert_frame_equal(sorted_df, expected)
def test_stable_descending_sort(self):
# GH #6399
df = DataFrame([[2, 'first'], [2, 'second'], [1, 'a'], [1, 'b']],
columns=['sort_col', 'order'])
sorted_df = df.sort_values(by='sort_col', kind='mergesort',
ascending=False)
assert_frame_equal(df, sorted_df)
def test_stable_descending_multicolumn_sort(self):
nan = np.nan
df = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4],
'B': [9, nan, 5, 2, 5, 4, 5]})
# test stable mergesort
expected = DataFrame(
{'A': [nan, 8, 6, 4, 2, 1, 1],
'B': [5, 4, 5, 5, nan, 2, 9]},
index=[2, 5, 4, 6, 1, 3, 0])
sorted_df = df.sort_values(['A', 'B'], ascending=[0, 1],
na_position='first',
kind='mergesort')
assert_frame_equal(sorted_df, expected)
expected = DataFrame(
{'A': [nan, 8, 6, 4, 2, 1, 1],
'B': [5, 4, 5, 5, nan, 9, 2]},
index=[2, 5, 4, 6, 1, 0, 3])
sorted_df = df.sort_values(['A', 'B'], ascending=[0, 0],
na_position='first',
kind='mergesort')
assert_frame_equal(sorted_df, expected)
def test_stable_categorial(self):
# GH 16793
df = DataFrame({
'x': pd.Categorical(np.repeat([1, 2, 3, 4], 5), ordered=True)
})
expected = df.copy()
sorted_df = df.sort_values('x', kind='mergesort')
assert_frame_equal(sorted_df, expected)
def test_sort_datetimes(self):
# GH 3461, argsort / lexsort differences for a datetime column
df = DataFrame(['a', 'a', 'a', 'b', 'c', 'd', 'e', 'f', 'g'],
columns=['A'],
index=date_range('20130101', periods=9))
dts = [Timestamp(x)
for x in ['2004-02-11', '2004-01-21', '2004-01-26',
'2005-09-20', '2010-10-04', '2009-05-12',
'2008-11-12', '2010-09-28', '2010-09-28']]
df['B'] = dts[::2] + dts[1::2]
df['C'] = 2.
df['A1'] = 3.
df1 = df.sort_values(by='A')
df2 = df.sort_values(by=['A'])
assert_frame_equal(df1, df2)
df1 = df.sort_values(by='B')
df2 = df.sort_values(by=['B'])
assert_frame_equal(df1, df2)
df1 = df.sort_values(by='B')
df2 = df.sort_values(by=['C', 'B'])
assert_frame_equal(df1, df2)
def test_frame_column_inplace_sort_exception(self):
s = self.frame['A']
with tm.assert_raises_regex(ValueError, "This Series is a view"):
s.sort_values(inplace=True)
cp = s.copy()
cp.sort_values() # it works!
def test_sort_nat_values_in_int_column(self):
# GH 14922: "sorting with large float and multiple columns incorrect"
# cause was that the int64 value NaT was considered as "na". Which is
# only correct for datetime64 columns.
int_values = (2, int(NaT))
float_values = (2.0, -1.797693e308)
df = DataFrame(dict(int=int_values, float=float_values),
columns=["int", "float"])
df_reversed = DataFrame(dict(int=int_values[::-1],
float=float_values[::-1]),
columns=["int", "float"],
index=[1, 0])
# NaT is not a "na" for int64 columns, so na_position must not
# influence the result:
df_sorted = df.sort_values(["int", "float"], na_position="last")
assert_frame_equal(df_sorted, df_reversed)
df_sorted = df.sort_values(["int", "float"], na_position="first")
assert_frame_equal(df_sorted, df_reversed)
# reverse sorting order
df_sorted = df.sort_values(["int", "float"], ascending=False)
assert_frame_equal(df_sorted, df)
# and now check if NaT is still considered as "na" for datetime64
# columns:
df = DataFrame(dict(datetime=[Timestamp("2016-01-01"), NaT],
float=float_values), columns=["datetime", "float"])
df_reversed = DataFrame(dict(datetime=[NaT, Timestamp("2016-01-01")],
float=float_values[::-1]),
columns=["datetime", "float"],
index=[1, 0])
df_sorted = df.sort_values(["datetime", "float"], na_position="first")
assert_frame_equal(df_sorted, df_reversed)
df_sorted = df.sort_values(["datetime", "float"], na_position="last")
assert_frame_equal(df_sorted, df)
# Ascending should not affect the results.
df_sorted = df.sort_values(["datetime", "float"], ascending=False)
assert_frame_equal(df_sorted, df)
def test_sort_nat(self):
# GH 16836
d1 = [Timestamp(x) for x in ['2016-01-01', '2015-01-01',
np.nan, '2016-01-01']]
d2 = [ | Timestamp(x) | pandas.Timestamp |
# The xrayvis bokeh app
import os
import numpy as np
import pandas as pd
import requests
import yaml
from tempfile import TemporaryDirectory, NamedTemporaryFile
from base64 import b64decode
import parselmouth
from bokeh_phon.utils import remote_jupyter_proxy_url_callback, set_default_jupyter_url
from bokeh_phon.models.audio_button import AudioButton
from phonlab.array import nonzero_groups
from bokeh.core.query import find
from bokeh.plotting import figure
from bokeh.colors import RGB
from bokeh.models import BoxAnnotation, BoxSelectTool, BoxZoomTool, Button, Circle, \
ColumnDataSource, CrosshairTool, Div, FileInput, HoverTool, LinearColorMapper, \
LogColorMapper, MultiLine, MultiSelect, RadioButtonGroup, Range1d, RangeSlider, \
Select, Slider, Span, Spinner, PanTool, ResetTool, TapTool, \
WheelZoomTool, ZoomInTool, ZoomOutTool
from bokeh.models.widgets import DataTable, NumberFormatter, Panel, Tabs, TableColumn
from bokeh.io import show, output_notebook, push_notebook
from bokeh.layouts import column, gridplot, row
from bokeh.events import MouseMove, SelectionGeometry, Tap
from bokeh.transform import linear_cmap
from bokeh.palettes import Greens, Greys, Greys256, Reds
r_Greens9 = list(reversed(Greens[9]))
r_Greys9 = list(reversed(Greys[9]))
r_Greys256 = list(reversed(Greys256))
r_Reds9 = list(reversed(Reds[9]))
# The remote_jupyter_proxy_url function is required when running on a BinderHub instance.
# Use the set_default_jupyter_url function to set the hostname of your instance after it has
# started. The current value is the most frequent result when launching from mybinder.org.
# Change to another value if running Binder host is not this most common one:
# set_default_jupyter_url('https://datahub.berkeley.edu/')
# Or if you are running locally (on localhost), ignore the previous line and set
# `local_notebook` to True:
# local_notebook = False
output_notebook()
# bad values in .txy files are 1000000 (scaled to 1000)
# TODO:
# when bokeh can handle plots with NaN, use that to filter instead of badval
badval = 1000
params = {
'low_thresh_color': 'white',
'low_thresh_power': 3.5,
'window_size': 5.0,
'spslice_lastx': 0.0,
'downsample_rate': 20000
}
snd = None
wavname = None
dfs = {}
# Info for loading/caching audio files
tempdirobj = TemporaryDirectory()
tempdir = tempdirobj.name
resource_url = 'https://linguistics.berkeley.edu/phonapps/resource/'
manifest_name = 'manifest.yaml'
manifest_key = 'resources'
timesource = ColumnDataSource(
{
'T1x': [], 'T1y': [], 'T2x': [], 'T2y': [],
'T3x': [], 'T3y': [], 'T4x': [], 'T4y': [],
'ULx': [], 'ULy': [], 'LLx': [], 'LLy': [],
'MIx': [], 'MIy': [], 'MMx': [], 'MMy': [],
'color': [], 'sec': []
}
)
allwddf = pd.read_feather('all_words.feather')
allphdf = pd.read_feather('all_phones.feather')
fileopt_demo_dtypes = {
'speaker': 'category', 'wavname': 'category', 'uttid': 'category', 'rep': 'category',
'bytes': np.int16,
'subject': 'category', 'sex': 'category', 'dialect_base_state': 'category', 'dialect_base_city': 'category'
}
fileoptsdf = pd.read_csv('file_opts.csv', dtype=fileopt_demo_dtypes)
demo = pd.merge(
pd.read_csv('speaker_demographics1.csv', dtype=fileopt_demo_dtypes),
pd.read_csv('speaker_demographics2.csv', dtype=fileopt_demo_dtypes),
on='subject'
)
# Remove subjects that are not in the transcribed speaker set.
demo = demo[demo.subject.isin(allphdf.speaker.cat.categories)]
def snd2specgram(snd, winsize, pre_emphasize=True):
'''Return a spectrogram created from snd.'''
if pre_emphasize is True:
specsnd = snd.copy()
specsnd.pre_emphasize()
else:
specsnd = snd
return specsnd.to_spectrogram(window_length=winsize, maximum_frequency=params['downsample_rate']/2)
def get_cached_fname(fname, base_url, speaker):
'''Get filename from tempdir, or base_url and fname if not already in cache.'''
# Load from e.g. https://linguistics.berkeley.edu/phonapps/xray_microbeam_database/JW11/tp001.txy
tempspkrdir = os.path.join(tempdir, speaker)
os.makedirs(tempspkrdir, exist_ok=True)
cachefile = os.path.join(tempspkrdir, fname)
if not os.path.isfile(cachefile):
if not base_url.endswith('/'):
base_url += '/'
# Get .wav
r = requests.get(base_url + fname)
tempwav = os.path.join(tempspkrdir, 'temp.wav')
with open(tempwav, 'wb') as twav:
twav.write(r.content)
tempsnd = parselmouth.Sound(tempwav)
ds_rate = params['downsample_rate']
if tempsnd.end_time >= 15.0:
ds_rate = int(ds_rate / 2)
ds_snd = tempsnd.resample(ds_rate, 50)
ds_snd.save(cachefile, parselmouth.SoundFileFormat.WAV)
# Get .txy
txyfile = cachefile.replace('.wav', '.txy')
r = requests.get(base_url + fname.replace('.wav', '.txy'))
with open(txyfile, 'wb') as tfile:
tfile.write(r.content)
for f in ('PAL.DAT', 'PHA.DAT'):
landmarkfile = os.path.join(tempspkrdir, f)
if not os.path.isfile(landmarkfile):
r = requests.get(base_url + f)
with open(landmarkfile, 'wb') as tfile:
tfile.write(r.content)
return cachefile
def xrayvis_app(doc):
def load_wav_cb(attr, old, new):
'''Handle selection of audio file to be loaded.'''
if new == '':
return
global wavname
global snd
spkr, fname = os.path.split(new)
wavname = get_cached_fname(
fname,
f'https://linguistics.berkeley.edu/phonapps/xray_microbeam_database/{spkr}',
spkr
)
# wavname = new
if not wavname.endswith('.wav'):
return
snd = parselmouth.Sound(wavname)
srcdf = pd.DataFrame(dict(
seconds=snd.ts().astype(np.float32),
ch0=snd.values[0,:].astype(np.float32),
))
#! TODO: do file caching
phdf = allphdf.loc[allphdf.wavpath == new, :].copy()
phdf['t1'] = phdf['t1'].astype(np.float32)
wddf = allwddf.loc[allwddf.wavpath == new, :].copy()
wddf['t1'] = wddf['t1'].astype(np.float32)
uttdiv.text = '<b>Utterance:</b> ' + ' '.join(wddf.word.str.replace('sp', '')).strip()
phwddf = pd.merge_asof(
phdf[['t1', 'phone']],
wddf[['t1', 'word']],
on='t1',
suffixes=['_ph', '_wd']
)
# TODO: separate t1_ph and t1_wd columns
srcdf = pd.merge_asof(srcdf, phwddf, left_on='seconds', right_on='t1')
srcdf[['phone', 'word']] = srcdf[['phone', 'word']].fillna('')
srcdf = srcdf.drop('t1', axis='columns')
dfs['srcdf'] = srcdf
source.data = srcdf
tngsource.data = {'x': [], 'y': []}
othsource.data = {'x': [], 'y': []}
timesource.data = {k: [] for k in timesource.data.keys()}
lasttngtimesource.data = {'x': [], 'y': []}
lastothtimesource.data = {'x': [], 'y': []}
playvisbtn.channels = channels
playvisbtn.disabled = False
playselbtn.channels = channels
playselbtn.disabled = False
playvisbtn.fs = snd.sampling_frequency
playvisbtn.start = snd.start_time
playvisbtn.end = snd.end_time
playselbtn.fs = snd.sampling_frequency
playselbtn.start = 0.0
playselbtn.end = 0.0
selbox.left = 0.0
selbox.right = 0.0
selbox.visible = False
cursor.location = 0.0
cursor.visible = False
ch0.visible = True
update_sgram()
load_artic()
set_limits(0.0, srcdf['seconds'].max())
def load_artic():
'''Load articulation data.'''
trace.title.text = 'Static trace'
traj.title.text = 'Trajectories'
tngfile = os.path.splitext(wavname)[0] + '.txy'
palfile = os.path.join(os.path.dirname(wavname), 'PAL.DAT')
phafile = os.path.join(os.path.dirname(wavname), 'PHA.DAT')
tngdf = pd.read_csv(
tngfile,
sep='\t',
names=[
'sec', 'ULx', 'ULy', 'LLx', 'LLy', 'T1x', 'T1y', 'T2x', 'T2y',
'T3x', 'T3y', 'T4x', 'T4y', 'MIx', 'MIy', 'MMx', 'MMy'
]
)
# Convert to seconds
tngdf['sec'] = tngdf['sec'] / 1e6
tngdf = tngdf.set_index(['sec'])
# Convert to mm
tngdf[[
'ULx', 'ULy', 'LLx', 'LLy', 'T1x', 'T1y', 'T2x', 'T2y',
'T3x', 'T3y', 'T4x', 'T4y', 'MIx', 'MIy', 'MMx', 'MMy'
]] = tngdf[[
'ULx', 'ULy', 'LLx', 'LLy', 'T1x', 'T1y', 'T2x', 'T2y',
'T3x', 'T3y', 'T4x', 'T4y', 'MIx', 'MIy', 'MMx', 'MMy'
]] * 1e-3
# Find global x/y max/min in this recording to set axis limits.
# Exclude bad values (1000000 in data file; 1000 mm in scaled dataframe).
cmpdf = tngdf[tngdf < badval]
xmax = np.max(
np.max(
cmpdf[['ULx','LLx','T1x', 'T2x', 'T3x', 'T4x', 'MIx', 'MMx']]
)
)
xmin = np.min(
np.min(
cmpdf[['ULx','LLx','T1x', 'T2x', 'T3x', 'T4x', 'MIx', 'MMx']]
)
)
ymax = np.max(
np.max(
cmpdf[['ULy','LLy','T1y', 'T2y', 'T3y', 'T4y', 'MIy', 'MMy']]
)
)
ymin = np.min(
np.min(
cmpdf[['ULy','LLy','T1y', 'T2y', 'T3y', 'T4y', 'MIy', 'MMy']]
)
)
paldf = pd.read_csv(palfile, sep='\s+', header=None, names=['x', 'y'])
paldf = paldf * 1e-3
palsource.data = {'x': paldf['x'], 'y': paldf['y']}
phadf = pd.read_csv(phafile, sep='\s+', header=None, names=['x', 'y'])
phadf = phadf * 1e-3
phasource.data = {'x': phadf['x'], 'y': phadf['y']}
xmin = np.min([xmin, np.min(paldf['x']), np.min(phadf['x'])])
xmax = np.max([xmax, np.max(paldf['x']), np.max(phadf['x'])])
ymin = np.min([ymin, np.min(paldf['y']), np.min(phadf['y'])])
ymax = np.max([ymax, np.max(paldf['y']), np.max(phadf['y'])])
xsz = xmax - xmin
ysz = ymax - ymin
xrng = [xmin - (xsz * 0.05), xmax + (xsz * 0.05)]
yrng = [ymin - (ysz * 0.05), ymax + (ysz * 0.05)]
dfs['tngdf'] = tngdf
dfs['paldf'] = paldf
dfs['phadf'] = phadf
def update_sgram():
'''Update spectrogram based on current values.'''
if snd.end_time < 15:
sgrams[0] = snd2specgram(snd, 0.005)
specsource.data = dict(
sgram0=[sgrams[0].values.astype(np.float32)]
)
spec0img.glyph.dw = sgrams[0].x_grid().max()
spec0img.glyph.dh = sgrams[0].y_grid().max()
spec0cmap.low = _low_thresh()
spec0.visible = True
else:
specsource.data = dict(
sgram0=[]
)
spec0.visible = False
def update_trace():
'''Update the static trace at the cursor time.'''
trace.title.text = f'Static trace ({cursor.location:0.4f})'
tidx = dfs['tngdf'].index.get_loc(cursor.location, method='nearest')
row = dfs['tngdf'].iloc[tidx]
tngsource.data = {
'x': [row.T1x, row.T2x, row.T3x, row.T4x],
'y': [row.T1y, row.T2y, row.T3y, row.T4y]
}
othsource.data = {
'x': [row.ULx, row.LLx, row.MIx, row.MMx],
'y': [row.ULy, row.LLy, row.MIy, row.MMy]
}
def update_traj():
'''Update the trajectories during the selected time range.'''
traj.title.text = f'Trajectories ({selbox.left:0.4f} - {selbox.right:0.4f})'
seldf = dfs['tngdf'].loc[
(dfs['tngdf'].index >= selbox.left) & (dfs['tngdf'].index <= selbox.right)
]
dfs['seldf'] = seldf
pts = (
'T1x', 'T1y', 'T2x', 'T2y', 'T3x', 'T3y', 'T4x', 'T4y',
'ULx', 'ULy', 'LLx', 'LLy', 'MIx', 'MIy', 'MMx', 'MMy'
)
# Create a list of line segments for each tracked element.
newdata = {
pt: list(np.squeeze(np.dstack((seldf[pt].iloc[:-1], seldf[pt].iloc[1:])))) \
for pt in pts
}
newdata['color'] = np.arange(1, len(seldf))
newdata['sec'] = seldf.index[1:]
timesource.data = newdata
anim_slider.start = seldf.index[0]
anim_slider.end = seldf.index[-1]
anim_slider.step = np.diff(newdata['sec']).mean()
anim_slider.value = anim_slider.end
anim_slider.disabled = False
anim_btn.disabled = False
lastrow = seldf.iloc[-1]
lasttngtimesource.data = {
'x': [lastrow.T1x, lastrow.T2x, lastrow.T3x, lastrow.T4x],
'y': [lastrow.T1y, lastrow.T2y, lastrow.T3y, lastrow.T4y]
}
lastothtimesource.data = {
'x': [lastrow.ULx, lastrow.LLx, lastrow.MIx, lastrow.MMx],
'y': [lastrow.ULy, lastrow.LLy, lastrow.MIy, lastrow.MMy]
}
# TODO: this is a workaround until we can set x_range, y_range directly
# See https://github.com/bokeh/bokeh/issues/4014
def set_limits(xstart, xend):
'''Set axis limits.'''
ch0.x_range.start = xstart
ch0.x_range.end = xend
ch0.axis[0].bounds = (xstart, xend)
def update_select_widgets(clicked_x=None):
'''Update widgets based on current selection. Use the clicked_x param to
designate the cursor location if this function is called as the result of
a Tap event. If clicked_x is None, then use the existing cursor location
to set the center of the selection.'''
mode = selmodebtn.labels[selmodebtn.active]
if clicked_x is None and cursor.visible:
x_loc = cursor.location
elif clicked_x is not None:
x_loc = clicked_x
else:
return
if mode == '200ms':
start = x_loc - 0.100
end = x_loc + 0.100
cursor.location = x_loc
else: # 'word' or 'phone'
idx = np.abs(source.data['seconds'] - x_loc).argmin()
# TODO: clean up the redundancy
fld = {'word': 'word', 'phone': 'phone'}[mode]
label = source.data[fld][idx]
indexes = nonzero_groups(source.data[fld]==label, include_any=idx)
secs = source.data['seconds'][indexes]
start = secs.min()
end = secs.max()
cursor.location = secs.mean()
playselbtn.start = start
playselbtn.end = end
selbox.left = start
selbox.right = end
selbox.visible = True
cursor.visible = True
def spkr_select_cb(attr, old, new):
'''Respond to changes in speaker multiselect.'''
try:
spkrs = demo[
(demo.sex.isin(sex_select.value) \
& demo.dialect_base_state.isin(state_select.value) \
& (demo.dialect_base_city.isin(city_select.value)))
].subject.unique()
new_opts = [''] + [
(f.value, f.label) for f in fileoptsdf[fileoptsdf.speaker.isin(spkrs)].itertuples()
]
fselect.options = new_opts
fselect.value = ''
except NameError as e:
pass # Values not set yet, so ignore
def cursor_cb(e):
'''Handle cursor mouse click in the waveform.'''
update_select_widgets(clicked_x=e.x)
update_trace()
update_traj()
def x_range_cb(attr, old, new):
'''Handle change of x range in waveform/spectrogram.'''
if attr == 'start':
playvisbtn.start = new
elif attr == 'end':
playvisbtn.end = new
def selection_cb(e):
'''Handle data range selection event.'''
#! TODO: handle situation in which selection is too short, i.e. len(seldf) <= 1
cursor.location = (e.geometry['x0'] + e.geometry['x1']) / 2
cursor.visible = True
playselbtn.start = e.geometry['x0']
playselbtn.end = e.geometry['x1']
selbox.left = e.geometry['x0']
selbox.right = e.geometry['x1']
selbox.visible = True
update_trace()
update_traj()
def selmode_cb(attr, old, new):
'''Handle change in click selection value.'''
update_select_widgets(clicked_x=None)
def anim_cb(attr, old, new):
'''Handle change in the animation slider.'''
idx = np.argmin(np.abs(timesource.data['sec'] - new))
n = len(timesource.data['color'])
active = np.arange(n - idx, n + 1)
timesource.data['color'] = np.pad(
active, (0, n - len(active)), constant_values=0
)
anim_cmap = LinearColorMapper(palette=r_Greys256, low=1, high=n+1, low_color='white')
for tag, palette in (('anim_tng', r_Reds9), ('anim_oth', r_Greens9)):
for a in find(traj.references(), {'tags': tag}):
a.line_color = linear_cmap('color', palette, low=1, high=n+1, low_color='white')
lasttngtimesource.data = {
'x': [timesource.data[pt][idx][1] for pt in ('T1x', 'T2x', 'T3x', 'T4x')],
'y': [timesource.data[pt][idx][1] for pt in ('T1y', 'T2y', 'T3y', 'T4y')]
}
lastothtimesource.data = {
'x': [timesource.data[pt][idx][1] for pt in ('ULx', 'LLx', 'MIx', 'MMx')],
'y': [timesource.data[pt][idx][1] for pt in ('ULy', 'LLy', 'MIy', 'MMy')]
}
def anim_btn_cb():
'''Handle click of anim_btn animate trajectories of selected audio.'''
values = np.linspace(anim_slider.start, anim_slider.end, len(timesource.data['T1x']))
for v in values:
anim_slider.value = v
def low_thresh_cb(attr, old, new):
'''Handle change in threshold slider to fade out low spectrogram values.'''
params['low_thresh_power'] = new
lt = _low_thresh()
spec0cmap.low = lt
def _low_thresh():
return sgrams[0].values.min() \
+ sgrams[0].values.std()**params['low_thresh_power']
step = None
rate = orig_rate = None
# dfs = {}
xrng = []
yrng = []
width = 1000
height = 200
cutoff = 50
order = 3
tngcolor = 'DarkRed'
othcolor = 'Indigo'
fselect = Select(options=[], value='')
fselect.on_change('value', load_wav_cb)
sex_select = MultiSelect(
options=[('F', 'female'), ('M', 'male')],
value=['F', 'M']
)
state_select = MultiSelect(
options=list(demo.dialect_base_state.cat.categories),
value=list(demo.dialect_base_state.cat.categories)
)
city_select = MultiSelect(
options=list(demo.dialect_base_city.cat.categories),
value=list(demo.dialect_base_city.cat.categories)
)
sex_select.on_change('value', spkr_select_cb)
state_select.on_change('value', spkr_select_cb)
city_select.on_change('value', spkr_select_cb)
spkr_select_cb('', '', '')
source = ColumnDataSource(data=dict(seconds=[], ch0=[]))
channels = ['ch0']
playvisbtn = AudioButton(
label='Play visible signal', source=source, channels=channels,
width=120, disabled=True
)
playselbtn = AudioButton(
label='Play selected signal', source=source, channels=channels,
width=120, disabled=True
)
selmodebtn = RadioButtonGroup(labels=['200ms', 'word', 'phone'], active=1)
selmodebtn.on_change('active', selmode_cb)
# Instantiate and share specific select/zoom tools so that
# highlighting is synchronized on all plots.
boxsel = BoxSelectTool(dimensions='width')
spboxsel = BoxSelectTool(dimensions='width')
boxzoom = BoxZoomTool(dimensions='width')
zoomin = ZoomInTool(dimensions='width')
zoomout = ZoomOutTool(dimensions='width')
crosshair = CrosshairTool(dimensions='height')
shared_tools = [
'xpan', boxzoom, boxsel, crosshair, zoomin, zoomout, 'reset'
]
uttdiv = Div(text='')
figargs = dict(
tools=shared_tools,
)
cursor = Span(dimension='height', line_color='red', line_dash='dashed', line_width=1)
wavspec_height = 280
ch0 = figure(
name='ch0',
tooltips=[('time', '$x{0.0000}'), ('word', '@word'), ('phone', '@phone')],
height=wavspec_height,
**figargs
)
ch0.toolbar.logo = None
ch0.line(x='seconds', y='ch0', source=source, nonselection_line_alpha=0.6)
# Link pan, zoom events for plots with x_range.
ch0.x_range.on_change('start', x_range_cb)
ch0.x_range.on_change('end', x_range_cb)
ch0.on_event(SelectionGeometry, selection_cb)
ch0.on_event(Tap, cursor_cb)
ch0.add_layout(cursor)
wavtab = Panel(child=ch0, title='Waveform')
selbox = BoxAnnotation(
name='selbox',
left=None, right=None,
fill_color='green', fill_alpha=0.1,
line_color='green', line_width=1.5, line_dash='dashed',
visible=False
)
ch0.add_layout(selbox)
sgrams = [np.ones((1, 1))]
specsource = ColumnDataSource(data=dict(sgram0=[sgrams[0]]))
spec0 = figure(
name='spec0',
x_range=ch0.x_range, # Keep times synchronized
tooltips=[("time", "$x{0.0000}"), ("freq", "$y{0.0000}"), ("value", "@sgram0{0.000000}")],
height=wavspec_height,
**figargs
)
spec0.toolbar.logo = None
spec0.x_range.on_change('start', x_range_cb)
spec0.x_range.on_change('end', x_range_cb)
spec0.on_event(SelectionGeometry, selection_cb)
spec0.on_event(Tap, cursor_cb)
spec0.add_layout(cursor)
spec0.x_range.range_padding = spec0.y_range.range_padding = 0
spec0cmap = LogColorMapper(palette=r_Greys256, low_color=params['low_thresh_color'])
low_thresh_slider = Slider(
start=1.0, end=12.0, step=0.03125, value=params['low_thresh_power'],
title='Spectrogram threshold'
)
low_thresh_slider.on_change('value', low_thresh_cb)
spec0img = spec0.image(
image='sgram0',
x=0, y=0,
color_mapper=spec0cmap,
level='image',
source=specsource
)
spec0.grid.grid_line_width = 0.0
spec0.add_layout(selbox)
sgramtab = Panel(child=spec0, title='Spectrogram')
tngsource = ColumnDataSource(data={'x': [], 'y': []})
othsource = ColumnDataSource(data={'x': [], 'y': []})
timesource = ColumnDataSource({
'T1x': [], 'T1y': [], 'T2x': [], 'T2y': [],
'T3x': [], 'T3y': [], 'T4x': [], 'T4y': [],
'ULx': [], 'ULy': [], 'LLx': [], 'LLy': [],
'MIx': [], 'MIy': [], 'MMx': [], 'MMy': [],
'color': [], 'sec': []
})
lasttngtimesource = ColumnDataSource(data={'x': [], 'y': []})
lastothtimesource = ColumnDataSource(data={'x': [], 'y': []})
palsource = ColumnDataSource(pd.DataFrame({'x': [], 'y': []}))
phasource = ColumnDataSource( | pd.DataFrame({'x': [], 'y': []}) | pandas.DataFrame |
from superiq import VolumeData
from superiq.pipeline_utils import *
import boto3
import pandas as pd
from datetime import datetime
def collect_brain_age():
bucket = "mjff-ppmi"
version = "simple-v2"
prefix = f"superres-pipeline-{version}/"
objects = list_images(bucket, prefix)
brain_age = [i for i in objects if i.endswith('brain_age.csv')]
dfs = []
for i in brain_age:
ba = get_s3_object(bucket, i, '/tmp')
filename = ba.split('/')[-1]
splits = filename.split('-')
ba_df = pd.read_csv(ba)
ba_df['Repeat'] = splits[4]
dfs.append(ba_df)
dfs = pd.concat(dfs)
return dfs
if __name__ == "__main__":
bucket = "mjff-ppmi"
#metadata_key = "volume_measures/data_w_metadata_v01.csv"
version = "simple-v2"
prefix = f"superres-pipeline-{version}/"
stack_filename = f'ppmi_stacked_volumes_{version}.csv'
pivoted_filename = f'ppmi_pivoted_volumes_{version}.csv'
#merge_filename = f"dkt_with_metdata_{version}.csv"
upload_prefix = "volume_measures/"
vd = VolumeData(bucket, prefix, upload_prefix, cache=False)
local_stack = vd.stack_volumes(stack_filename)
local_pivot = vd.pivot_data(local_stack, pivoted_filename)
local_pivot_df = pd.read_csv(local_pivot)
local_pivot_df = local_pivot_df
ba = collect_brain_age()
local_pivot_df['join_date'] = [str(i)[:6] for i in local_pivot_df['Date']]
print(local_pivot_df.shape)
local_pivot_df = pd.merge(local_pivot_df, ba, on='Repeat')
print(local_pivot_df.shape)
s3 = boto3.client('s3')
local_pivot_df.to_csv('local_pivot.csv')
s3.upload_file(
'local_pivot.csv',
bucket,
upload_prefix + "simple_reg_sr_ppmi_volumes.csv"
)
metadata = 'metadata/PPMI_Original_Cohort_BL_to_Year_5_Dataset_Apr2020.csv'
prodro = 'metadata/PPMI_Prodromal_Cohort_BL_to_Year_1_Dataset_Apr2020.csv'
metadata_path = 'ppmi_metadata.csv'
prodro_path = 'ppmi_prodro.csv'
s3.download_file(bucket, metadata, metadata_path)
s3.download_file(bucket, prodro, prodro_path)
metadata_df = pd.read_csv(metadata_path)
prodro_df = pd.read_csv(prodro_path)
stack = | pd.concat([metadata_df, prodro_df]) | pandas.concat |
import pandas as pd
from pandas.tseries.offsets import DateOffset
import configparser
import fire
import os
import math
import numpy as np
import qlib
from qlib.data import D
import matplotlib
import matplotlib.pyplot as plt
matplotlib.use('Agg')
from sklearn.metrics.pairwise import cosine_similarity
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '../'))
from src.util import getLatestFile, getFolderNameInConfig
def analyzeHistoricalValue(ifUseNewIssues = True, ifUseOldIssues = True, ifUseWatchList = False, ifUseAdjustFactorToLatestDay = False, ifPrintFundCode = False):
'''
Args:
ifUseNewIssues: if use those funds whose days range are less than daysRangeToAnalyze
ifUseOldIssues: if use those funds whose days range are more than daysRangeToAnalyze
ifUseWatchList: if only figure funds in config/watchlist.txt
ifUseAdjustFactorToLatestDay: if use adjustFactorToLatestDay generated by trainGBDT.py
ifPrintFundCode: if print fund code, if so, the image would be larger
'''
print ("------------------------ Begin to analyze historical value... ------------------------")
# read config file
cf = configparser.ConfigParser()
cf.read("config/config.ini")
# offset of days
numberOfYears = int(cf.get("Parameter", "numberOfYears"))
numberOfMonths = int(cf.get("Parameter", "numberOfMonths"))
numberOfDays = int(cf.get("Parameter", "numberOfDays"))
minDaysRange = int(cf.get("Parameter", "minDaysRange"))
daysRangeInOneYear = int(cf.get("Parameter", "daysRangeInOneYear"))
if ifUseAdjustFactorToLatestDay:
dfAdjustFactorToLatestDay = pd.read_csv(cf.get("Analyze", "pathOfDfAdjustFactorToLatestDay"), dtype={'Unnamed: 0':object})
# read watchlist
watchlist = []
for line in open("./config/watchlist.txt", "r"): # ['110011', '161028', '110020', '180003', '006479', '007994', '001015']
watchlist.append(line.split("\n")[0])
# we should ignore some strange funds
ignorelist = []
for line in open("./config/ignorelist.txt", "r"): # ['009317', '009763', '009764']
ignorelist.append(line.split("\n")[0])
# qlib init
qlib.init(provider_uri='data/bin')
# use one fund be the standard of trading day
calendar = D.calendar(freq='day')
lastDay = calendar[-1] # 2021-02-10 00:00:00
firstDay = lastDay - DateOffset(years=numberOfYears, months=numberOfMonths, days=numberOfDays) # 2018-02-10 00:00:00
# exclude the influence of days without trading
calendarBetweenFirstDayAndLastDay = D.calendar(freq='day', start_time=firstDay, end_time=lastDay)
firstDayToAnalyze = calendarBetweenFirstDayAndLastDay[0]
lastDayToAnalyze = calendarBetweenFirstDayAndLastDay[-1]
daysRangeToAnalyze = (lastDayToAnalyze - firstDayToAnalyze).days # 1094
count = 0
riskListForOldIssues = []
returnListForOldIssues = []
fundCodeListForOldIssues = []
riskListForNewIssues = []
returnListForNewIssues = []
fundCodeListForNewIssues = []
instruments = D.instruments(market='all')
for file in D.list_instruments(instruments=instruments, as_list=True):
fundCode = file.split("_")[0] # 000001
# exclude some funds
if fundCode in ignorelist:
continue
if ifUseWatchList and fundCode not in watchlist:
continue
if count % 100 == 0:
print ("\ncount = %s\tfundCode = %s" % (count, fundCode)) # 180003
try:
# read file and remove empty line
df = D.features([file], [
'$AccumulativeNetAssetValue',
'($AccumulativeNetAssetValue - Ref($AccumulativeNetAssetValue, 1)) / Ref($AccumulativeNetAssetValue, 1)'
], start_time=firstDayToAnalyze, end_time=lastDayToAnalyze)
df.columns = [
'AccumulativeNetAssetValue',
'GrowthRatio'
]
#df = df.unstack(level=0)
df["datetime"] = df.index.levels[1]
# abandom those values before the date when GrowthRatio is too large (abs >= 1.0)
df["AbsoluteGrowthRatio"] = df["GrowthRatio"].abs()
if df[df["AbsoluteGrowthRatio"] > 1].shape[0] > 0:
df = df.loc[0:df[df["AbsoluteGrowthRatio"] > 1].first_valid_index() - 1]
# reset the index
df = df.dropna(axis=0, subset=['datetime', 'GrowthRatio']).reset_index(drop=True)
# like http://fundf10.eastmoney.com/jjjz_010476.html, the return in 30 days is 26%, so the annualized return is too high
if df.shape[0] <= minDaysRange:
continue
# count the days between first day and last day
day = df['datetime']
# TODO: how about fund 519858, which trade in 2018-01-28 (Sunday)
firstDayInThisFund = day[day.first_valid_index()] # 2018-02-12 00:00:00, 2018-02-10 is Satuaday
lastDayInThisFund = day[day.last_valid_index()] # 2021-02-10 00:00:00
daysRange = (lastDayInThisFund - firstDayInThisFund).days # 1094
# get the value in important days
earliestNetValue = df[df['datetime'] == firstDayInThisFund]["AccumulativeNetAssetValue"].tolist()[0] # 3.49
lastestNetValue = df[df['datetime'] == lastDayInThisFund]["AccumulativeNetAssetValue"].tolist()[0] # 4.046
# standardrize the risk in one year
# assume the value is a list like (0, 1, 0, 1,...), growth ratio is a list like (1, -1, 1, -1,...)
# set ddof be 0 to standardrize the risk by n, not (n - 1), then the std is 1, not related to daysRange
riskCurrent = df["GrowthRatio"].std(ddof=0)
returnCurrent = (lastestNetValue-earliestNetValue)/earliestNetValue/daysRange*daysRangeInOneYear
if not ifUseNewIssues:
if (firstDayInThisFund - firstDayToAnalyze).days > 0:
continue
else:
# use latest value to reflect the true percentage gain
# this is worthful if the fund rise rapidly recently but have no change in long previous days
if ifUseAdjustFactorToLatestDay:
if (firstDayInThisFund - firstDayToAnalyze).days > 0:
# if the fund code locates in dfAdjustFactorToLatestDay, adjust the latest value and days range
adjustedFactor = dfAdjustFactorToLatestDay[fundCode]
adjustedFactor = adjustedFactor[adjustedFactor.first_valid_index()] # 0.987561058590916
lastestNetValue = lastestNetValue * adjustedFactor
returnCurrent = (lastestNetValue-earliestNetValue)/earliestNetValue/daysRangeToAnalyze*daysRangeInOneYear
# new issues
if (firstDayInThisFund - firstDayToAnalyze).days > 0:
riskListForNewIssues.append(riskCurrent)
returnListForNewIssues.append(returnCurrent)
fundCodeListForNewIssues.append(fundCode)
else:
riskListForOldIssues.append(riskCurrent)
returnListForOldIssues.append(returnCurrent)
fundCodeListForOldIssues.append(fundCode)
count += 1
except Exception as e:
print ("fundCode = %s\terror = %s" % (fundCode, e))
continue
if not ifUseWatchList and ifPrintFundCode:
plt.figure(figsize=(10, 10))
if ifUseOldIssues:
plt.scatter(riskListForOldIssues, returnListForOldIssues, c='k')
if ifUseNewIssues:
plt.scatter(riskListForNewIssues, returnListForNewIssues, c='k')
plt.xlabel("Risk")
plt.ylabel("Annualized return")
ax = plt.gca()
# no line in right and top border
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
if ifPrintFundCode:
if ifUseOldIssues:
for i in range(len(fundCodeListForOldIssues)):
x = riskListForOldIssues[i]
y = returnListForOldIssues[i]
fundCode = fundCodeListForOldIssues[i]
plt.text(x, y, fundCode, fontsize=10)
if ifUseNewIssues:
for i in range(len(fundCodeListForNewIssues)):
x = riskListForNewIssues[i]
y = returnListForNewIssues[i]
fundCode = fundCodeListForNewIssues[i]
plt.text(x, y, fundCode, fontsize=10)
nameOfPicture = "risk_return"
nameOfPicture = nameOfPicture + "_watchlist" if ifUseWatchList else nameOfPicture + "_noWatchlist"
nameOfPicture = nameOfPicture + "_useNewIssues" if ifUseNewIssues else nameOfPicture + "_notUseNewIssues"
nameOfPicture = nameOfPicture + "_useOldIssues" if ifUseOldIssues else nameOfPicture + "_notUseOldIssues"
nameOfPicture = nameOfPicture + "_useAdjustFactor" if ifUseAdjustFactorToLatestDay else nameOfPicture + "_notUseAdjustFactor"
plt.savefig("./image/%s.png" % nameOfPicture)
print ("------------------------ Done. ------------------------")
def getAverageSlopeForFundsInSameRange(ifUseAdjustFactorToLatestDay=True):
'''
in return-risk figure, the return is proportional to risk in most cases,
so we can use slope(return/risk) as the feature of this fund, if we want
to summarize funds in same range, we can use average slope to represent it.
'''
print ("------------------------ Begin to get average slope for funds in same range... ------------------------")
# read config file
cf = configparser.ConfigParser()
cf.read("config/config.ini")
# offset of days
numberOfYears = int(cf.get("Parameter", "numberOfYears"))
numberOfMonths = int(cf.get("Parameter", "numberOfMonths"))
numberOfDays = int(cf.get("Parameter", "numberOfDays"))
minDaysRange = int(cf.get("Parameter", "minDaysRange"))
daysRangeInOneYear = int(cf.get("Parameter", "daysRangeInOneYear"))
# qlib init
qlib.init(provider_uri='data/bin')
# use one fund be the standard of trading day
calendar = D.calendar(freq='day')
lastDay = calendar[-1] # 2021-02-10 00:00:00
firstDay = lastDay - | DateOffset(years=numberOfYears, months=numberOfMonths, days=numberOfDays) | pandas.tseries.offsets.DateOffset |
#!/usr/bin/env python3.6
import os
import statistics
import requests
import datetime
from typing import Dict, List, Tuple, Optional, Union, Iterable, Any
from collections import defaultdict
from shutil import copyfile
import pandas as pd
from urllib import parse
from dataclasses import dataclass, field
from enum import Enum
from runner import ClusterInfoLoader
import logging
FORMAT = "%(asctime)-15s:%(levelname)s %(module)s %(message)s"
logging.basicConfig(format=FORMAT, level=logging.DEBUG)
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
class ExperimentType(Enum):
SingleWorkloadsRun = 'SingleWorkloadsRun'
SteppingSingleWorkloadsRun = 'SteppingSingleWorkloadsRun'
ThreeStageStandardRun = 'ThreeStageStandardRun'
WINDOW_LENGTH = 60 * 5
@dataclass
class ExperimentMeta:
data_path: str
title: str
description: str
params: Dict[str, str]
changelog: str
bugs: str
experiment_type: ExperimentType = ExperimentType.ThreeStageStandardRun
experiment_baseline_index: int = 0
commit_hash: str = 'unknown'
def data_path_(self):
return os.path.basename(self.data_path)
class PrometheusClient:
BASE_URL = "http://192.168.127.12:30900"
@staticmethod
def instant_query(query, time):
""" instant query
https://prometheus.io/docs/prometheus/latest/querying/api/#instant-vectors
Sample usage:
r = instant_query("avg_over_time(task_llc_occupancy_bytes
{app='redis-memtier-big', host='node37',
task_name='default/redis-memtier-big-0'}[3000s])",
1583395200)
"""
urli = PrometheusClient.BASE_URL + '/api/v1/query?{}'.format(parse.urlencode(dict(
query=query, time=time, )))
try:
r = requests.get(urli)
except requests.exceptions.ConnectionError as e:
logging.error("Connecting error with Prometheus. Error: {}".format(e.response))
return []
try:
r.raise_for_status()
except requests.exceptions.HTTPError as e:
raise Exception(r.content) from e
j = r.json()
assert j['status'] != 'error'
assert j['data']['resultType'] == 'vector'
data = j['data']['result']
return data
@staticmethod
def convert_result_to_dict(result):
""" Very memory inefficient!"""
d = defaultdict(list)
for series in result:
metric = series['metric']
# instant query
if 'value' in series:
for label_name, label_value in metric.items():
d[label_name].append(label_value)
timestamp, value = series['value']
d['value'].append(value)
d['timestamp'].append(pd.Timestamp(timestamp, unit='s'))
# range query
elif 'values' in series:
for value in series['values']:
for label_name, label_value in metric.items():
d[label_name].append(label_value)
timestamp, value = value
d['value'].append(value)
d['timestamp'].append(pd.Timestamp(timestamp, unit='s'))
else:
raise Exception('unsupported result type! (only matrix and instant are supported!)')
d = dict(d)
return d
@dataclass
class Stage:
def __init__(self, t_end: int):
SAFE_DELTA = 60 # 60 seconds back
t_end -= SAFE_DELTA
self.tasks: List[Task] = AnalyzerQueries.query_tasks_list(t_end)
AnalyzerQueries.query_task_performance_metrics(time=t_end, tasks=self.tasks)
self.nodes: Dict[str, Node] = AnalyzerQueries.query_nodes_list(t_end)
AnalyzerQueries.query_platform_performance_metrics(time=t_end, nodes=self.nodes)
@dataclass
class Task:
name: str
workload_name: str
node: str
performance_metrics: Dict[str, float] = field(default_factory=lambda: {})
def if_aep(self):
return self.node in ClusterInfoLoader.get_instance().get_aep_nodes()
def get_throughput(self, subvalue) -> Optional[float]:
if Metric.TASK_THROUGHPUT in self.performance_metrics:
return float(self.performance_metrics[Metric.TASK_THROUGHPUT][subvalue])
else:
return None
def get_latency(self, subvalue) -> Optional[float]:
if Metric.TASK_LATENCY in self.performance_metrics:
return float(self.performance_metrics[Metric.TASK_LATENCY][subvalue])
else:
return None
@dataclass
class Node:
name: str
performance_metrics: Dict[str, float] = field(default_factory=lambda: {})
def to_dict(self, nodes_capacities: Dict[str, Dict]) -> Dict:
# @TODO should be taken from queries
node_cpu = nodes_capacities[self.name]['cpu']
node_mem = nodes_capacities[self.name]['mem']
if Metric.PLATFORM_CPU_REQUESTED not in self.performance_metrics:
# means that no tasks were run on the node
return {}
return {
'name': self.name,
'cpu_requested': round(
float(self.performance_metrics[Metric.PLATFORM_CPU_REQUESTED]['instant']), 2),
'cpu_requested [%]': round(float(
self.performance_metrics[Metric.PLATFORM_CPU_REQUESTED][
'instant']) / node_cpu * 100, 2),
'cpu_util [experimental]': round(
float(self.performance_metrics[Metric.PLATFORM_CPU_UTIL]['instant']), 2),
'mem_requested': round(
float(self.performance_metrics[Metric.PLATFORM_MEM_USAGE]['instant']), 2),
'mem_requested [%]': round(float(
self.performance_metrics[Metric.PLATFORM_MEM_USAGE]['instant']) / node_mem * 100,
2),
'mbw_reads [GB]': round(
float(self.performance_metrics[Metric.PLATFORM_MBW_READS]['instant']), 2),
'mbw_writes [GB]': round(
float(self.performance_metrics[Metric.PLATFORM_MBW_WRITES]['instant']), 2),
'mbw_flat [GB]': round(3.7 * float(
self.performance_metrics[Metric.PLATFORM_MBW_WRITES]['instant']) + float(
self.performance_metrics[Metric.PLATFORM_MBW_READS]['instant']), 2),
'dram_hit_ratio [%]': round(
float(self.performance_metrics[Metric.PLATFORM_DRAM_HIT_RATIO]['instant']) * 100,
2),
'wss_used (aprox)': round(
float(self.performance_metrics[Metric.PLATFORM_WSS_USED]['instant']), 2),
'mem/cpu (requested)': round(
float(self.performance_metrics[Metric.PLATFORM_MEM_USAGE]['instant']) /
float(self.performance_metrics[Metric.PLATFORM_CPU_REQUESTED]['instant']), 2)
}
@staticmethod
def to_dataframe(nodes: List[Any], nodes_capacities: Dict[str, Dict]) -> pd.DataFrame:
return pd.DataFrame([node.to_dict(nodes_capacities) for node in nodes])
@dataclass
class Stat:
"""Statistics"""
avg: float
min: float
max: float
stdev: float
@dataclass
class WStat:
"""Workload Statistics"""
name: str
latency: Stat
throughput: Stat
count: int
def to_dict(self):
return {
"LB_min": round(self.latency.min, 2),
"LB_avg": round(self.latency.avg, 2),
"LB_max": round(self.latency.max, 2),
"L_stdev": round(self.latency.stdev, 2),
"L_stdev[%]": round(self.latency.stdev / self.latency.avg * 100, 2),
# ---
"TB_min": round(self.throughput.min, 2),
"TB_avg": round(self.throughput.avg, 2),
"TB_max": round(self.throughput.max, 2),
"T_stdev": round(self.throughput.stdev, 2),
"T_stdev[%]": round(self.throughput.stdev / self.throughput.avg * 100, 2),
# ---
"B_count": self.count,
"app": self.name
}
@staticmethod
def to_dataframe(wstats: List[Any]) -> pd.DataFrame:
return pd.DataFrame([wstat.to_dict() for wstat in wstats])
def calculate_task_summaries(tasks: List[Task], workloads_baseline: Dict[str, WStat]) -> List[
Dict[str, Union[float, str]]]:
"""
Calculate summary for each task defined in >>tasks<< as comparison to workloads_baseline.
@TODO what if there is no given task workload in workloads_baseline ?
"""
tasks_summaries = []
for task in tasks:
workload = task.workload_name
# avg of a task behaviour
throughput = task.get_throughput('avg')
latency = task.get_latency('avg')
if throughput is None or latency is None:
logging.debug('Ignoring task {} cause not available'.format(task))
continue
assert throughput is not None
assert latency is not None
task_summary = {
"L": latency,
"L[avg][{}s]".format(WINDOW_LENGTH): latency,
"L[q0.1][{}s]".format(WINDOW_LENGTH): task.get_latency('q0.1,'),
"L[q0.9][{}s]".format(WINDOW_LENGTH): task.get_latency('q0.9,'),
"L[stdev][{}s]".format(WINDOW_LENGTH): task.get_latency('stdev'),
"L[stdev][{}s][%]".format(WINDOW_LENGTH): -1 if task.get_latency(
'avg') == 0 else task.get_latency('stdev') / task.get_latency('avg') * 100,
# ----
"T": throughput,
"T[avg][{}s]".format(WINDOW_LENGTH): throughput,
"T[q0.9][{}s]".format(WINDOW_LENGTH): task.get_throughput('q0.9,'),
"T[q0.1][{}s]".format(WINDOW_LENGTH): task.get_throughput('q0.1,'),
"T[stdev][{}s]".format(WINDOW_LENGTH): task.get_throughput('stdev'),
"T[stdev][{}s][%]".format(WINDOW_LENGTH): -1 if task.get_throughput(
'avg') == 0 else task.get_throughput('stdev') / task.get_throughput('avg') * 100,
# ----
"L_nice[%]": latency / workloads_baseline[workload].latency.max * 100,
"T_nice[%]": throughput / workloads_baseline[workload].throughput.min * 100,
# # ----
"L_avg[%]": latency / workloads_baseline[workload].latency.avg * 100,
"T_avg[%]": throughput / workloads_baseline[workload].throughput.avg * 100,
# # ----
"L_strict[%]": latency / workloads_baseline[workload].latency.min * 100,
"T_strict[%]": throughput / workloads_baseline[workload].throughput.max * 100,
# ----
"task": task.name, "app": task.workload_name, "node": task.node
}
for key, val in task_summary.items():
if type(val) == float:
task_summary[key] = round(val, 3)
ts = task_summary
ts["pass_nice"] = ts['T_nice[%]'] > 80 and ts['L_nice[%]'] < 150
ts["pass_avg"] = ts['T_avg[%]'] > 80 and ts['L_avg[%]'] < 150
ts["pass_strict"] = ts['T_strict[%]'] > 80 and ts['L_strict[%]'] < 150
tasks_summaries.append(ts)
return tasks_summaries
class StagesAnalyzer:
def __init__(self, events, workloads):
self.events_data = (events, workloads)
assert len(self.events_data[0]) % 2 == 0
self.stages_count = int(len(self.events_data[0]) / 2)
# @Move to loader
T_DELTA = os.environ.get('T_DELTA', 0)
self.stages = []
for i in range(self.stages_count):
self.stages.append(Stage(t_end=events[i * 2 + 1][0].timestamp() + T_DELTA))
def delete_report_files(self, report_root_dir):
if os.path.isdir(report_root_dir):
for file_ in os.listdir(report_root_dir):
os.remove(os.path.join(report_root_dir, file_))
def get_all_tasks_count_in_stage(self, stage: int) -> int:
"""Only returns tasks count, directly from metric."""
return sum(int(node.performance_metrics[Metric.POD_SCHEDULED]['instant'])
for node in self.stages[stage].nodes.values()
if Metric.POD_SCHEDULED in node.performance_metrics)
def get_all_workloads_in_stage(self, stage_index: int):
return set(task.workload_name for task in self.stages[stage_index].tasks.values())
def get_all_tasks_in_stage_on_nodes(self, stage_index: int, nodes: List[str]):
return [task for task in self.stages[stage_index].tasks.values() if task.node in nodes]
def get_all_nodes_in_stage(self, stage_index: int) -> List[str]:
return [nodename for nodename in self.stages[stage_index].nodes]
def calculate_per_workload_wstats_per_stage(self, workloads: Iterable[str],
stage_index: int, filter_nodes: List[str]) -> Dict[
str, WStat]:
"""
Calculate WStat for all workloads in list for stage (stage_index).
Takes data from all nodes.
"""
workloads_wstats: Dict[str, WStat] = {}
for workload in workloads:
# filter tasks of a given workload
tasks = [task for task in self.stages[stage_index].tasks.values() if
task.workload_name == workload]
# filter out tasks which were run on >>filter_nodes<<
tasks = [task for task in tasks if task.node not in filter_nodes]
# avg but from 12 sec for a single task
throughputs_list = [task.get_throughput('avg') for task in tasks if
task.get_throughput('avg') is not None]
latencies_list = [task.get_latency('avg') for task in tasks if
task.get_latency('avg') is not None]
if len(throughputs_list) == 0:
exception_value = float('inf')
t_max, t_min, t_avg, t_stdev = [exception_value] * 4
l_max, l_min, l_avg, l_stdev = [exception_value] * 4
elif len(throughputs_list) == 1:
t_max, t_min, t_avg, t_stdev = [throughputs_list[0], throughputs_list[0],
throughputs_list[0], 0]
l_max, l_min, l_avg, l_stdev = [throughputs_list[0], throughputs_list[0],
throughputs_list[0], 0]
else:
t_max, t_min, t_avg, t_stdev = max(throughputs_list), min(throughputs_list), \
statistics.mean(throughputs_list), statistics.stdev(
throughputs_list)
l_max, l_min, l_avg, l_stdev = max(latencies_list), min(latencies_list), \
statistics.mean(latencies_list), statistics.stdev(latencies_list)
workloads_wstats[workload] = WStat(latency=Stat(l_avg, l_min, l_max, l_stdev),
throughput=Stat(t_avg, t_min, t_max, t_stdev),
count=len(tasks), name=workload)
return workloads_wstats
def get_stages_count(self):
return self.stages_count
def aep_report(self, experiment_meta: ExperimentMeta, experiment_index: int):
"""
Compare results from AEP to DRAM:
1) list all workloads which are run on AEP (Task.workload.name) in stage 3 (or 2)
a) for all this workloads read performance on DRAM in stage 1
2) for assertion and consistency we could also check how to compare results in all stages
3) compare results which we got AEP vs DRAM separately for stage 2 and 3
a) for each workload:
"""
# baseline results in stage0 on DRAM
for i in range(len(self.stages)):
check = self.get_all_tasks_count_in_stage(0)
assert check > 5
workloads_wstats: List[Dict[str, WStat]] = []
tasks_summaries__per_stage: List[List[Dict]] = []
node_summaries__per_stage: List[List[Dict]] = []
workloads_baseline: Dict[str, WStat] = None
aep_nodes = ClusterInfoLoader.get_instance().get_aep_nodes()
for stage_index in range(0, self.get_stages_count()):
workloads_wstat = self.calculate_per_workload_wstats_per_stage(
workloads=self.get_all_workloads_in_stage(stage_index), stage_index=stage_index,
filter_nodes=aep_nodes)
workloads_wstats.append(workloads_wstat)
# Only take nodes node10*
# @TODO replace with more generic solution, like param in MetaExperiment
nodes_to_filter = [node for node in
self.get_all_nodes_in_stage(experiment_meta.experiment_baseline_index)
if node in aep_nodes or not node.startswith('node10')]
workloads_baseline = self.calculate_per_workload_wstats_per_stage(
workloads=self.get_all_workloads_in_stage(experiment_meta.experiment_baseline_index),
stage_index=experiment_meta.experiment_baseline_index,
filter_nodes=nodes_to_filter)
for stage_index in range(0, self.get_stages_count()):
tasks = self.get_all_tasks_in_stage_on_nodes(stage_index=stage_index,
nodes=self.get_all_nodes_in_stage(
stage_index))
# ---
tasks_summaries = calculate_task_summaries(tasks, workloads_baseline)
tasks_summaries__per_stage.append(tasks_summaries)
# ---
nodes_capacities = ClusterInfoLoader.get_instance().get_nodes()
nodes_summaries = [self.stages[stage_index].nodes[node].to_dict(nodes_capacities) for
node in self.get_all_nodes_in_stage(stage_index)]
nodes_summaries = [s for s in nodes_summaries if list(s.keys())]
node_summaries__per_stage.append(nodes_summaries)
# Transform to DataFrames keeping the same names
workloads_wstats: List[pd.DataFrame] = [WStat.to_dataframe(el.values()) for el in
workloads_wstats]
tasks_summaries__per_stage: List[pd.DataFrame] = [pd.DataFrame(el) for el in
tasks_summaries__per_stage]
node_summaries__per_stage: List[pd.DataFrame] = [ | pd.DataFrame(el) | pandas.DataFrame |
import re
import pandas as pd
import numpy as np
from collections import Counter
from tqdm import tqdm
tqdm.pandas()
class TextPreprocessing:
"""
Clean and preprocess your text data
"""
@staticmethod
def text_case(df, columns, case='lower', verbose=True):
"""
Perform string manipulation to convert text to/from:
1. Lower case
2. Upper case
3. Capitalize
Parameters
----------
df : DataFrame
The df to perform case operation on
column : string, int
The column on which the operation has to be performed
case : string, default 'lower'
Options: 'lower' , 'upper', 'capitalize'
Returns
-------
List
"""
assert isinstance(df, pd.DataFrame), "Pass a DataFrame"
for column in columns:
assert column in df.columns, f"The column: <{column}> is not present in the DataFrame, pass a valid column name"
__cleaned_data__df = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import explained_variance_score
from sklearn.metrics import r2_score
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeRegressor
from sklearn.neighbors import KNeighborsRegressor
from scipy.interpolate import interp1d
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
# -------------------------------------------------------------------------------- 5.1 Approximation Demand and Supply
# ---------- Demand and Supply Functions ----------
def demand(p):
"""Vectorized Function to determine *demand*.
Args:
p (np.array): Price vector for demand.
Raises:
ValueError: Argument p has to be an array.
AssertionError: Type of q and p has to be identical.
Returns:
np.array: Returns demand quantity.
"""
if not isinstance(p, np.ndarray):
raise TypeError("Price vector has to be an array!")
r = np.random.rand() * 2
n = abs(np.random.randn()) * 2
q = (
40 / (p + n)
+ 1 / (1 + np.exp(p - 75 + r))
+ 2 / (1 + np.exp(p - 50 + r))
+ 3 / (1 + np.exp(p - 25 + r))
)
q[q > 20] = np.nan
assert type(q) == type(p), "Type of output does not equal type of input!"
return q
def supply(p):
"""Vectorized Function to determine *supply.*
Args:
p (np.array): Price vector for supply.
Raises:
ValueError: Argument p has to be an array.
AssertionError: Type of q and p has to be identical.
Returns:
np.array: Returns supply quantity.
"""
if not isinstance(p, np.ndarray):
raise TypeError("Price vector has to be an array!")
q = np.zeros(p.shape)
for i, c in enumerate(p):
if (c > 0) and (c < 10):
q[i] = 1.0
elif (c >= 10) and (c < 20):
q[i] = 1.5
elif (c >= 20) and (c < 25):
q[i] = 3.0
elif (c >= 25) and (c < 35):
q[i] = 3.6
elif (c >= 35) and (c < 45):
q[i] = 4.2
elif (c >= 45) and (c < 60):
q[i] = 5.0
elif (c >= 60) and (c < 75):
q[i] = 8.0
elif (c >= 75) and (c < 85):
q[i] = 12.0
elif (c >= 85) and (c < 90):
q[i] = 16.5
elif (c >= 90) and (c < 95):
q[i] = 18.5
elif c >= 95:
q[i] = 20.0
assert type(q) == type(p), "Type of output does not equals type of input!"
return q
# ---------- Approximation using scipy ----------
class PolynomialDS:
"""Object that approximates supply and demand functions using sicpy
interpolate method.
Args:
a (int): Lower bound of prices.
b (int): Upper bound of prices.
nodes (int): Interpolation nodes for demand and supply.
demand (function): Benchmark function supply.
supply (function): Benchmark function demand.
Raises:
AssertionError: Price must be non-negative.
AssertionError: By Assumption: price cannot exceed 100.
"""
def __init__(self, a, b, nodes, demand, supply):
"""Constructor method.
"""
self.a = a
self.b = b
assert a >= 0, "Price cannot be negative!"
assert (b > a) and (b <= 100), "By Assumption: Price cannot exceed 100!"
self.nodes = nodes
self.demand = demand
self.supply = supply
self.p = np.linspace(a, b, nodes)
self.qd = demand(self.p)
self.qs = supply(self.p)
def __len__(self):
"""Returns number of interpolation nodes.
Returns:
int: Number of known prices.
"""
return len(self.p)
def __repr__(self):
"""String representation of object.
"""
p = np.around(self.p, decimals=2)
qd = np.around(self.qd, decimals=2)
qs = np.around(self.qs, decimals=2)
return f"{len(self)} known values for Demand and Supply:\n\nPrices={p} \n\nDemand={qd} \nSupply={qs}"
def __call__(self, p):
"""Returns true and approximated value of demand and supply for a
given price.
Args:
p (np.array): Price vector.
Returns:
: Comparison.
"""
self.apprx_qd = interp1d(self.p, self.qd)
self.apprx_qs = interp1d(self.p, self.qs)
return f"-- Real value -- at price {p}: \n\nDemand = {self.demand(p)} \nSupply = {self.supply(p)} \n\n-- Approximated value -- at price {p}: \n\nDemand = {self.apprx_qd(p)} \nSupply = {self.apprx_qs(p)}"
@staticmethod
def __name__():
"""Returns the name of the object.
"""
return "Demand and Supply Interpolator"
def plt_approx(self, fs=(14, 7), num1=16.1, num2=16.2, num3=16.3, num4=16.4):
"""Plots Approximation and true supply as well as demand.
Args:
fs (tuple, optional): Figuresize. Defaults to (14, 7).
num1 (float, optional): Number first figure. Defaults to 16.1.
num2 (float, optional): Number second figure. Defaults to 16.2.
num3 (float, optional): Number third figure. Defaults to 16.3.
num4 (float, optional): Number fourth figure. Defaults to 16.4.
"""
prices = np.linspace(self.a, self.b, self.nodes * 150)
apprx_qd = self.apprx_qd(prices)
apprx_qs = self.apprx_qs(prices)
qd = self.demand(prices)
qs = self.supply(prices)
fig, (ax1, ax2) = plt.subplots(2, 2, figsize=fs)
ax1[0].plot(self.qd, self.p, "o", label="Nodes Demand", color="#4B045D")
ax1[0].plot(
apprx_qd, prices, label="Interpolation Demand", ls="--", color="#8E0C08"
)
ax1[0].plot(qd, prices, label="Real Demand", alpha=0.7, color="#D98D08")
ax1[0].set_title(f"Figure {num1}: Approximation of Demand")
ax1[0].legend(loc="center right")
ax1[0].grid()
ax1[1].plot(self.qs, self.p, "o", label="Nodes Supply", color="#4B045D")
ax1[1].plot(
apprx_qs, prices, label="Interpolation Supply", ls="--", color="#0C5BCD"
)
ax1[1].plot(qs, prices, label="Real Supply", alpha=0.7, color="#67853E")
ax1[1].set_title(f"Figure {num2}: Approximation of Supply")
ax1[1].legend(loc="center right")
ax1[1].grid()
ax2[0].plot(
apprx_qd, prices, label="Interpolation Demand", ls="--", color="#8E0C08"
)
ax2[0].plot(
apprx_qs, prices, label="Interpolation Supply", ls="--", color="#0C5BCD"
)
ax2[0].set_title(f"Figure {num3}: Approximated Demand and Supply")
ax2[0].legend(loc="center right")
ax2[0].grid()
ax2[1].plot(qd, prices, label="Real Demand", color="#D98D08")
ax2[1].plot(qs, prices, label="Real Supply", color="#67853E")
ax2[1].set_title(f"Figure {num4}: True Demand and Supply")
ax2[1].legend(loc="center right")
ax2[1].grid()
plt.show()
abs_error_qd = np.array(abs(qd - apprx_qd))
abs_error_qd = abs_error_qd[~np.isnan(abs_error_qd)]
abs_error_qs = np.array(abs(qs - apprx_qs))
print(
f"Mean Absolute Error: \n\nDemand = {abs_error_qd.mean():.4f} \nSupply = {abs_error_qs.mean():.4f}"
)
def close_intersection(self, nodes=1000000):
"""Returns true and approximated market equilibrium.
Args:
nodes (int, optional): Number of interpolation nodes. Defaults to 1000000.
"""
prices = np.linspace(self.a, self.b, nodes)
f = lambda p: self.demand(p) - self.supply(p)
abs_sd = f(prices)
abs_sd = abs_sd[~np.isnan(abs_sd)]
argmin = abs(abs_sd).argmin()
pe = prices[argmin]
qe_demand = np.around(demand(np.array([pe])), decimals=3)
qe_supply = np.around(supply(np.array([pe])), decimals=3)
g = lambda p: self.apprx_qd(p) - self.apprx_qs(p)
abs_asd = f(prices)
abs_asd = abs_asd[~np.isnan(abs_asd)]
argmin_a = abs(abs_asd).argmin()
pea = prices[argmin_a]
aqe_demand = np.around(self.apprx_qd(np.array([pea])), decimals=3)
aqe_supply = np.around(self.apprx_qs(np.array([pea])), decimals=3)
print(
f"Equilibrium True (Quantity, Price) \n*** *** *** *** \nDemand: {(qe_demand[0], np.around(pe, decimals=3))} \nSupply: {(qe_supply[0], np.around(pe, decimals=3))}\n"
)
print(
f"Equilibrium Approximation (Quantity, Price) \n*** *** *** *** \nDemand: {(aqe_demand[0], np.around(pea, decimals=3))} \nSupply: {(aqe_supply[0], np.around(pea, decimals=3))}"
)
# ---------- Approximation using ML ----------
class AISupplyDemandApprox:
"""Object that approximates supply and demand using various ML methods.
Args:
nodes (int): Number of known nodes.
supply (function): Unknown supply function.
demand (function): Unknown demand function.
a (int, optional): Lower bound of prices. Defaults to 0.
b (int, optional): Upper bound of prices. Defaults to 100.
ts (float, optional): Size of testing data. Defaults to 0.4.
rs (int, optional): Random state. Defaults to 42.
Raises:
AssertionError: Price must be non-negative.
AssertionError: Training data includes nan values.
AssertionError: Testing data includes nan values.
"""
def __init__(self, nodes, supply, demand, a=0, b=100, ts=0.4, rs=42):
"""Constructor method.
"""
assert a >= 0, "Price must be Non Negative!"
p = np.linspace(a, b, nodes)
q = supply(p)
qd = demand(p)
p_train, p_test, q_train, q_test = train_test_split(
p, q, test_size=ts, random_state=rs
)
pd_train, pd_test, qd_train, qd_test = train_test_split(
p, qd, test_size=ts, random_state=rs
)
self.p_train = p_train.reshape(-1, 1) # reshape data
self.p_test = p_test.reshape(-1, 1) # reshape data
self.q_train = q_train.reshape(-1, 1) # reshape data
self.q_test = q_test.reshape(-1, 1) # reshape data
nan_ind = np.argwhere(np.isnan(qd_train)) # select index of nan values
qd_train_mod = np.delete(qd_train, nan_ind) # delete nan index value
pd_train_mod = np.delete(pd_train, nan_ind)
self.pd_train = pd_train_mod.reshape(-1, 1)
self.pd_test = pd_test.reshape(-1, 1)
self.qd_train = qd_train_mod.reshape(-1, 1)
self.qd_test = qd_test.reshape(-1, 1)
assert np.isnan(self.pd_train).all() == False, "There are nan Values!"
assert np.isnan(self.pd_test).all() == False, "There are nan Values!"
@staticmethod
def __name__():
"""Returns name of AISupplyDemandApprox object.
"""
return "Modern-ML Demand and Supply Interpolator"
def plots(
self,
colors=["teal", "yellowgreen", "gold"],
label=["Training Values", "Testing Values"] * 2,
markers=["x", "*", "v"],
n_neighbors=4,
degrees=[3, 6],
weight="distance",
fs=(15, 10),
num1=17.1,
num2=17.2,
num3=17.3,
num4=17.4,
):
"""Plots approximation results as well as training and testing data.
Args:
colors (list, optional): Colors of approximation results. Defaults
to ["teal", "yellowgreen", "gold"].
label (list, optional): Labels of training and testing data.
Defaults to ["Training Values", "Testing Values"]*2.
markers (list, optional): Markers of approximation. Defaults
to ["x", "*", "v"].
n_neighbors (int, optional): Number of k-nearest neighbors. Defaults to 4.
degrees (list, optional): Number of degrees for Linear Regression.
Defaults to [3, 6].
weight (str, optional): Weight of KNN Regression. Defaults to "distance".
fs (tuple, optional): Figuresize. Defaults to (15, 10)
num1 (float, optional): Number of first Figure. Defaults to 17.1.
num2 (float, optional): Number of second Figure. Defaults to 17.2.
num3 (float, optional): Number of third Figure. Defaults to 17.3.
num4 (float, optional): Number of fourth Figure. Defaults to 17.4.
Raises:
AssertionError: Length of degrees is out of range.
"""
self.degrees = degrees
assert len(degrees) == 2, "List out of range!"
qsup, psup = [self.q_train, self.q_test], [self.p_train, self.p_test]
qdem, pdem = [self.qd_train, self.qd_test], [self.pd_train, self.pd_test]
fig, (ax1, ax2) = plt.subplots(2, 2, figsize=fs)
for i, (qs, ps, qd, pd) in enumerate(zip(qsup, psup, qdem, pdem)):
for ax in [ax1[0], ax1[1]]:
ax.plot(qs, ps, "o", ms=4, label=label[i])
for ax in [ax2[0], ax2[1]]:
ax.plot(qd, pd, "o", ms=4, label=label[i])
self.maes, self.maed = [], []
self.mses, self.msed = [], []
self.evss, self.evsd = [], []
self.r2s, self.r2d = [], []
for i, ax in enumerate([ax1, ax2]):
for j, d in enumerate(degrees):
model = make_pipeline(PolynomialFeatures(d), LinearRegression())
if i == 0:
model.fit(self.p_train, self.q_train)
pred = model.predict(self.p_test)
ax[i].plot(
pred,
self.p_test,
markers[j],
color=colors[j],
ms=5,
label=f"Approximation Degree {d}",
)
indexs_to_order_by = pred.ravel().argsort()
pred_ordered = pred[indexs_to_order_by]
ptest_ordered = self.p_test.ravel()[indexs_to_order_by]
ax[i].plot(pred_ordered, ptest_ordered, color=colors[j], alpha=0.5)
ax[i].set_title(
f"Figure {num1}: Linear Regression Approximation Supply"
)
ax[i].grid(True)
ax[i].legend(loc="center right")
self.maes.append(mean_absolute_error(pred, self.q_test))
self.mses.append(mean_squared_error(pred, self.q_test))
self.evss.append(explained_variance_score(pred, self.q_test))
self.r2s.append(r2_score(pred, self.q_test))
elif i == 1:
model.fit(self.pd_train, self.qd_train)
pred = model.predict(self.pd_test)
ax[i - 1].plot(
pred,
self.pd_test,
markers[j],
color=colors[j],
ms=5,
label=f"Approximation Degree {d}",
)
indexs_to_order_by = pred.ravel().argsort()
pred_ordered = pred[indexs_to_order_by]
ptest_ordered = self.pd_test.ravel()[indexs_to_order_by]
ax[i - 1].plot(
pred_ordered, ptest_ordered, color=colors[j], alpha=0.5
)
ax[i - 1].set_title(
f"Figure {num3}: Linear Regression Approximation Demand"
)
ax[i - 1].grid(True)
ax[i - 1].legend(loc="center right")
self.maed.append(mean_absolute_error(pred, self.qd_test))
self.msed.append(mean_squared_error(pred, self.qd_test))
self.evsd.append(explained_variance_score(pred, self.qd_test))
self.r2d.append(r2_score(pred, self.qd_test))
methods = ["KNN", "DecisionTree"]
knn = KNeighborsRegressor(n_neighbors, weights=weight)
tree = DecisionTreeRegressor()
for i, ax in enumerate([ax1, ax2]):
for j, m in enumerate([knn, tree]):
if i == 0:
m.fit(self.p_train, self.q_train)
pred = m.predict(self.p_test)
ax[i + 1].plot(
pred,
self.p_test,
markers[j],
color=colors[j],
ms=4,
label=f"Approximation using {methods[j]}",
)
indexs_to_order_by = pred.ravel().argsort()
pred_ordered = pred[indexs_to_order_by]
ptest_ordered = self.pd_test.ravel()[indexs_to_order_by]
ax[i + 1].plot(
pred_ordered, ptest_ordered, color=colors[j], alpha=0.5
)
ax[i + 1].set_title(
f"Figure {num2}: KNN and DT Approximation Supply"
)
ax[i + 1].grid(True)
ax[i + 1].legend(loc="center right")
self.maes.append(mean_absolute_error(pred, self.q_test))
self.mses.append(mean_squared_error(pred, self.q_test))
self.evss.append(explained_variance_score(pred, self.q_test))
self.r2s.append(r2_score(pred, self.q_test))
elif i == 1:
m.fit(self.pd_train, self.qd_train)
pred = m.predict(self.pd_test)
ax[i].plot(
pred,
self.pd_test,
markers[j],
color=colors[j],
ms=4,
label=f"Approximation using {methods[j]}",
)
indexs_to_order_by = pred.ravel().argsort()
pred_ordered = pred[indexs_to_order_by]
ptest_ordered = self.pd_test.ravel()[indexs_to_order_by]
ax[i].plot(pred_ordered, ptest_ordered, color=colors[j], alpha=0.5)
ax[i].set_title(f"Figure {num4}: KNN and DT Approximation Demand")
ax[i].grid(True)
ax[i].legend(loc="center right")
self.maed.append(mean_absolute_error(pred, self.qd_test))
self.msed.append(mean_squared_error(pred, self.qd_test))
self.evsd.append(explained_variance_score(pred, self.qd_test))
self.r2d.append(r2_score(pred, self.qd_test))
plt.show()
def reslts_as_frame(self, num=14):
"""Returns accuracy of approximation using ML.
Args:
num (int, float, optional): Number of dataframe. Defaults to 14.
Returns:
pd.DataFrame: Accuracy of approximation.
"""
d1, d2 = self.degrees[0], self.degrees[1]
index_as_array_sup = [
np.array(["Supply"] * 4),
np.array(["Linear Regression"] * 2 + ["KNN Regression", "DTR"]),
np.array([f"{d1} Degrees", f"{d2} Degrees", "", ""]),
]
index_as_array_dem = [
np.array(["Demand"] * 4),
np.array(["Linear Regression"] * 2 + ["KNN Regression", "DTR"]),
np.array([f"{d1} Degrees", f"{d2} Degrees", "", ""]),
]
col = [
"Mean Absolute Error",
"Mean Squared Error",
"Explained Variance Score",
"$R^2$-Score",
]
data_supply = pd.concat(
[
pd.DataFrame(self.maes, index=index_as_array_sup),
pd.DataFrame(self.mses, index=index_as_array_sup),
pd.DataFrame(self.evss, index=index_as_array_sup),
pd.DataFrame(self.r2s, index=index_as_array_sup),
],
axis=1,
)
data_demand = pd.concat(
[
pd.DataFrame(self.msed, index=index_as_array_dem),
pd.DataFrame(self.msed, index=index_as_array_dem),
| pd.DataFrame(self.evsd, index=index_as_array_dem) | pandas.DataFrame |
# Copyright (c) 2022 <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import contextlib
import pathlib
import queue
import socket
import threading
import time
from collections import deque
from dataclasses import asdict, dataclass
from typing import Callable, Dict, Iterator, Tuple
import pandas as pd
from gabriel_lego import FrameResult, LEGOTask
from loguru import logger
from ..common import frame_stream_unpack, pack_response
from ... import data as e_data
@dataclass(frozen=True, eq=True)
class FrameRecord:
seq: int
received: float
received_monotonic: float
processed: float
processed_monotonic: float
processing_time: float
result: FrameResult
def to_dict(self) -> Dict[str, int | float | FrameResult]:
return asdict(self)
def server(
task_name: str,
sock: socket.SocketType,
result_cb: Callable[[FrameResult], None] = lambda _: None,
) -> pd.DataFrame:
logger.info(f"Starting LEGO task '{task_name}'")
records = deque()
task = LEGOTask(e_data.load_default_task(task_name))
with contextlib.closing(frame_stream_unpack(sock)) as frame_stream:
for seq, image_data in frame_stream:
recv_time_mono = time.monotonic()
recv_time = time.time()
logger.info(f"Received frame with SEQ {seq}")
result = task.submit_frame(image_data)
proc_time_mono = time.monotonic()
proc_time = time.time()
logger.debug(f"Processing result: {result}")
result_cb(result)
if result == FrameResult.SUCCESS:
logger.success(
f"Frame with SEQ {seq} triggers advancement to next step"
)
transition = True
else:
transition = False
sock.sendall(
pack_response(
transition,
task.get_current_guide_illustration(),
task.get_current_instruction(),
)
)
# finally, store frame record
records.append(
FrameRecord(
seq=seq,
result=result,
received=recv_time,
received_monotonic=recv_time_mono,
processed=proc_time,
processed_monotonic=proc_time_mono,
processing_time=proc_time_mono - recv_time_mono,
)
)
# finally, return the recorded frames
return | pd.DataFrame(records) | pandas.DataFrame |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import sys
import unittest
import pytest
from numpy.testing import assert_array_equal
import numpy as np
from pandas.util.testing import assert_frame_equal
import pandas as pd
import pyarrow as pa
from pyarrow.compat import guid
from pyarrow.feather import (read_feather, write_feather,
FeatherReader)
from pyarrow.lib import FeatherWriter
def random_path():
return 'feather_{}'.format(guid())
class TestFeatherReader(unittest.TestCase):
def setUp(self):
self.test_files = []
def tearDown(self):
for path in self.test_files:
try:
os.remove(path)
except os.error:
pass
def test_file_not_exist(self):
with self.assertRaises(pa.ArrowIOError):
FeatherReader('test_invalid_file')
def _get_null_counts(self, path, columns=None):
reader = FeatherReader(path)
counts = []
for i in range(reader.num_columns):
col = reader.get_column(i)
if columns is None or col.name in columns:
counts.append(col.null_count)
return counts
def _check_pandas_roundtrip(self, df, expected=None, path=None,
columns=None, null_counts=None,
nthreads=1):
if path is None:
path = random_path()
self.test_files.append(path)
write_feather(df, path)
if not os.path.exists(path):
raise Exception('file not written')
result = read_feather(path, columns, nthreads=nthreads)
if expected is None:
expected = df
assert_frame_equal(result, expected)
if null_counts is None:
null_counts = np.zeros(len(expected.columns))
np.testing.assert_array_equal(self._get_null_counts(path, columns),
null_counts)
def _assert_error_on_write(self, df, exc, path=None):
# check that we are raising the exception
# on writing
if path is None:
path = random_path()
self.test_files.append(path)
def f():
write_feather(df, path)
self.assertRaises(exc, f)
def test_num_rows_attr(self):
df = pd.DataFrame({'foo': [1, 2, 3, 4, 5]})
path = random_path()
self.test_files.append(path)
write_feather(df, path)
reader = FeatherReader(path)
assert reader.num_rows == len(df)
df = pd.DataFrame({})
path = random_path()
self.test_files.append(path)
write_feather(df, path)
reader = FeatherReader(path)
assert reader.num_rows == 0
def test_float_no_nulls(self):
data = {}
numpy_dtypes = ['f4', 'f8']
num_values = 100
for dtype in numpy_dtypes:
values = np.random.randn(num_values)
data[dtype] = values.astype(dtype)
df = pd.DataFrame(data)
self._check_pandas_roundtrip(df)
def test_float_nulls(self):
num_values = 100
path = random_path()
self.test_files.append(path)
writer = FeatherWriter()
writer.open(path)
null_mask = np.random.randint(0, 10, size=num_values) < 3
dtypes = ['f4', 'f8']
expected_cols = []
null_counts = []
for name in dtypes:
values = np.random.randn(num_values).astype(name)
writer.write_array(name, values, null_mask)
values[null_mask] = np.nan
expected_cols.append(values)
null_counts.append(null_mask.sum())
writer.close()
ex_frame = pd.DataFrame(dict(zip(dtypes, expected_cols)),
columns=dtypes)
result = read_feather(path)
assert_frame_equal(result, ex_frame)
assert_array_equal(self._get_null_counts(path), null_counts)
def test_integer_no_nulls(self):
data = {}
numpy_dtypes = ['i1', 'i2', 'i4', 'i8',
'u1', 'u2', 'u4', 'u8']
num_values = 100
for dtype in numpy_dtypes:
values = np.random.randint(0, 100, size=num_values)
data[dtype] = values.astype(dtype)
df = | pd.DataFrame(data) | pandas.DataFrame |
import urllib
import requests
import pandas as pd
from bs4 import BeautifulSoup
def get_cols(table):
header = table.find_all("th")
cols = []
for column in header:
try:
col = column.find("a").get_text()
except AttributeError:
col = column.get_text()
cols.append(col.strip())
if column.get("colspan"):
for c in range(1, int(column.get("colspan"))):
cols.append("{} {}".format(col.strip(), c))
return cols
def get_data(years):
url = "https://www.thecompleteuniversityguide.co.uk/league-tables/rankings?v=wide&y="
data = []
for year in years:
for index, subject in pd.read_csv("lookup.csv")["Subject"].iteritems():
r = requests.get(url + str(year) + "&s=" + urllib.parse.quote(subject))
soup = BeautifulSoup(r.text, "lxml")
table = soup.find("table", {"class": "league-table-table"})
table_cols = get_cols(table)
table_data = []
for row in table.find_all("tr"):
table_row = []
for cell in row.find_all("td"):
table_row.append(cell.get_text().strip())
table_data.append(table_row)
table_data = [l for l in table_data if len(l) > 2]
table_data = | pd.DataFrame(data=table_data, columns=table_cols) | pandas.DataFrame |
'''
Plotter to collect all plotting functionality at one place.
If available, it uses simple plotting functionalities included into the different classes.
Merges them together to create more meaningfull plots.
'''
from __future__ import print_function, division
import numpy as np
import pandas as pd
import math
from warnings import warn
#from .metergroup import MeterGroup, iterate_through_submeters_of_two_metergroups
#from .electric import align_two_meters
import matplotlib as mpl
import matplotlib.pyplot as plt
import itertools
import seaborn as sns
from nilmtk import TimeFrameGroup
import itertools
from nilmtk import TimeFrameGroup, TimeFrame
import matplotlib.dates as mdates
#############################################################
#region Nilm Plotting
def plot_overall_power_vs_disaggregation(main_meter, disaggregations, verbose = False):
""" The plot for validating the NILM algorithm.
Plots the disaggregation below the overall powerflow together with
orientation lines.
Parameters
----------
predictions: nilmtk.Electrical
Electrical with the disaggregation of the meters.
ground_truth : nilmtk.MeterGroup
MeterGroup with all the disaggregated meters.
verbose:
Whether additional ouput is printed.
"""
# Create the main figure
fig = plt.figure() #, tight_layout=True)
# Create one bigger subplot for the overall power
timeframe = disaggregations.get_timeframe(intersection_instead_union = False)
timeframe.start = timeframe.end - pd.Timedelta("48h")
ax = fig.add_subplot(4,1,1)
if not main_meter is None:
main_meter.plot(ax, timeframe=timeframe, sample_period=2)
ax.set_xlim([timeframe.start, timeframe.end])
ax.set_xlabel('Time', fontsize=12)
ax.set_title('Disaggregation', fontsize=14)
#ax.set_ylabel('{0}'.format(i), fontsize=12)
# Create multiple smaller ones for the disaggregated flows
n = len(disaggregations.meters)
sections = math.ceil(n / 2 * 3)
size_main_figure = math.ceil(sections / 3)
for i, dis in enumerate(disaggregations.meters):
if verbose:
print(str(i) + "/" + str(n))
sub_ax = fig.add_subplot(sections, 1, size_main_figure+i+1)
dis.plot(sub_ax,timeframe=timeframe, legend = False, sample_period = 2)
ax.get_shared_x_axes().join(ax, sub_ax)
ax.get_shared_y_axes().join(ax, sub_ax)
sub_ax.set_ylim(ax.get_ylim())
if i != 2:
ax.set_ylabel("")
#sub_ax.set_xlim([timeframe.start, timeframe.end])
# Link the axis
plt.setp(ax.get_xticklabels(), visible=True)
#fig.subplots_adjust(hspace=0.0)
return fig
def plot_phases(building, interval = pd.Timedelta("1d"), verbose = False):
''' Simply plots all three phases to see the output.
This is equal to plotting the different sitemeters of the building.
Parameters
----------
building: nilmtk.building
The building for which the different phases are plottet.
interval: pd.Timedelta
The timedelta to plot.
verbose: bool
Whether to plot additional output.
'''
fig = plt.figure()
start = building.elec.sitemeters()[1].get_timeframe().start
new_timeframe = TimeFrameGroup([TimeFrame(start=start, end = start + interval)])
flows = []
for i in range(1,4):
if verbose:
print("Load {0}/{1}".format(i,3))
flows.append(building.elec.sitemeters()[i].power_series_all_data(sections=new_timeframe))
all = pd.concat(flows, axis = 1)
all.columns = ['Phase 1', 'Phase 2', 'Phase 3']
all.plot(colors=['r', 'g', 'b'], ax = fig.add_subplot(111))
return fig
def plot_stackplot(disaggregations, total_power = None, stacked = True, verbose = True):
""" Plots a stackplot, which stacks all disaggregation results on top of each other.
Parameters
----------
disaggregations: nilmtk.MeterGroup
Remember appliance 0 is the rest powerflow
plot_total_power: nilmtk.Electric (optional)
Just for comparison an additional plot with the whole powerflow.
Should be the same as all the diaggregated meters stacked together.
verbose: bool
Whether to print additional information
Returns
-------
fig: matplotlib.figure.Figure
The newly plot figure
"""
timeframe = disaggregations.get_timeframe(intersection_instead_union = False)
timeframe.start = timeframe.end - | pd.Timedelta("48h") | pandas.Timedelta |
from collections import deque
from functools import lru_cache
import pandas as pd
import numpy as np
from pyrich.record import Record
from pyrich import stock
class Portfolio(Record):
currency_mapping = {
'CRYPTO': 'KRW',
'KOR': 'KRW',
'USA': 'USD',
}
def __init__(self, name: str, table: str) -> None:
super().__init__(table)
self.name = name
def _get_pivot_table(self, column: str, remove_na: bool=False) -> pd.DataFrame:
record_pivot_table = pd.pivot_table(
self.record,
values=column,
index=['country', 'symbol'],
columns='type',
aggfunc=np.sum
)
if remove_na:
record_pivot_table.fillna(0, inplace=True)
return record_pivot_table
@lru_cache
def _get_current_stock(self) -> pd.DataFrame:
stock = self._get_pivot_table('quantity', remove_na=True)
stock['quantity'] = stock['buy'] - stock['sell']
return stock
@lru_cache
def _get_trades(self) -> pd.DataFrame:
trades = self._get_pivot_table('total_price_paid')
return trades
def _get_average_price_paid(self, symbol: str) -> float:
symbol_transaction = self.record[self.record['symbol']==symbol]
symbol_transaction = symbol_transaction[['type', 'quantity', 'price']]
transactions = deque()
for i in symbol_transaction.values:
transaction_type = i[0]
quantity = i[1]
price = i[2]
while quantity > 0:
if transaction_type == 'buy':
transactions.append(price)
else:
transactions.popleft()
quantity -= 1
transactions = np.array(transactions)
try:
average_price_paid = transactions.mean()
except Exception:
average_price_paid = 0
finally:
return average_price_paid
def _get_portfolio_average_price(self, portfolio: pd.DataFrame) -> pd.Series:
average_price_paid = {
symbol: self._get_average_price_paid(symbol)
for symbol
in portfolio.index
}
average_price_paid = pd.Series(
average_price_paid,
name='average_price_paid'
)
return average_price_paid
def _get_stock_quote(self, portfolio: pd.DataFrame) -> pd.DataFrame:
portfolio_stock_price = []
for symbol in portfolio.index:
country = portfolio.loc[symbol, 'country']
current_stock_data = stock.get_current_price(symbol, country)
portfolio_stock_price.append(current_stock_data)
day_change = pd.DataFrame(portfolio_stock_price)
day_change['dp'] = day_change['dp'].apply(round, args=(2,))
col_name = ['current_price', 'day_change(%)']
day_change.columns = col_name
day_change.index = portfolio.index
current_portfolio = portfolio.join(day_change)
return current_portfolio
def _get_gain(self, current_portfolio: pd.DataFrame) -> tuple:
price_data = current_portfolio[['current_value', 'invested_amount']]
total_gain = price_data.agg(lambda x: x[0]-x[1], axis=1)
pct_gain = price_data.agg(lambda x: (x[0]-x[1])/x[1], axis=1)
pct_gain *= 100
pct_gain = round(pct_gain, 2)
return total_gain, pct_gain
def _get_current_stock_value(self, current_portfolio: pd.DataFrame) -> pd.Series:
investment = current_portfolio[['quantity', 'current_price']]
current_stock_value = investment.agg(np.prod, axis=1)
return current_stock_value
def current_portfolio(self) -> pd.DataFrame:
quantity = self._get_current_stock()
trades = self._get_trades()
transaction_summary = trades.join(quantity['quantity'])
currently_owned_stock = transaction_summary[transaction_summary['quantity'] > 0]
currently_owned_stock = currently_owned_stock.fillna(0)
currently_owned_stock['invested_amount'] = currently_owned_stock['buy'] - currently_owned_stock['sell']
currently_owned_stock.drop(['buy', 'sell'], axis=1, inplace=True)
portfolio = | pd.DataFrame(currently_owned_stock) | pandas.DataFrame |
import pandas as pd
import numpy as np
from scipy.stats import ttest_ind
'''Assignment 4 - Hypothesis Testing
This assignment requires more individual learning than previous assignments - you are encouraged to check
out the pandas documentation to find functions or methods you might not have used yet, or ask questions on
Stack Overflow and tag them as pandas and python related. And of course, the discussion forums are open for
interaction with your peers and the course staff.
Definitions:
A quarter is a specific three month period, Q1 is January through March, Q2 is April through June, Q3 is July
rough September, Q4 is October through December.
A recession is defined as starting with two consecutive quarters of GDP decline, and ending with two
consecutive quarters of GDP growth.
A recession bottom is the quarter within a recession which had the lowest GDP.
A university town is a city which has a high percentage of university students compared to the total
population of the city.
Hypothesis: University towns have their mean housing prices less effected by recessions. Run a t-test to
compare the ratio of the mean price of houses in university towns the quarter before the recession starts
compared to the recession bottom. (price_ratio=quarter_before_recession/recession_bottom)
The following data files are available for this assignment:
From the Zillow research data site there is housing data for the United States. In particular the datafile
for all homes at a city level, City_Zhvi_AllHomes.csv, has median home sale prices at a fine grained level.
From the Wikipedia page on college towns is a list of university towns in the United States which has been
copy and pasted into the file university_towns.txt.
From Bureau of Economic Analysis, US Department of Commerce, the GDP over time of the United States in
current dollars (use the chained value in 2009 dollars), in quarterly intervals, in the file gdplev.xls.
For this assignment, only look at GDP data from the first quarter of 2000 onward.
Each function in this assignment below is worth 10%, with the exception of run_ttest(), which is worth 50%.'''
#-----------------------------------------------------------------------
'''Returns a DataFrame of towns and the states they are in from the
university_towns.txt list. The format of the DataFrame should be:
DataFrame( [ ["Michigan", "<NAME>"], ["Michigan", "Yipsilanti"] ],
columns=["State", "RegionName"] )
The following cleaning needs to be done:
1. For "State", removing characters from "[" to the end.
2. For "RegionName", when applicable, removing every character from " (" to the end.
3. Depending on how you read the data, you may need to remove newline character '\n'. '''
def get_list_of_university_towns():
import pandas as pd
import re
rute = 'university_towns.txt'
with open(rute) as file:
townslist = file.readlines()
townslist = [x.rstrip() for x in townslist]
townslist2 =[]
for i in townslist:
if '[edit]' in i:
state_string = re.sub(r" *\(.*\)| *\[.*\]","",i)
else:
region_string = re.sub(r" *\(.*\)| *\[.*\]","",i)
townslist2.append([state_string,region_string])
df = pd.DataFrame(townslist2, columns=['State','RegionName'])
return df
get_list_of_university_towns()
#---------- SHAPE 2 ----------
def get_list_of_university_towns():
State = []
RegionName = []
with open ('university_towns.txt', "r") as fileObj:
line = fileObj.readline().strip()
while line != '':
if line[-6:] == '[edit]':
st = line[:-6]
else:
State.append(st)
RegionName.append(line)
line = fileObj.readline().strip()
ut = pd.DataFrame(list(zip(State,RegionName)),columns=['State','RegionName'])
ut.RegionName.replace(r" \([^(]*|\([^(]*","", inplace=True,regex = True)
return ut
get_list_of_university_towns()
#---------- ANSWER ----------
'''
State RegionName
0 Alabama Auburn
1 Alabama Florence
2 Alabama Jacksonville
3 Alabama Livingston
4 Alabama Montevallo
.. ... ...
512 Wisconsin River Falls
513 Wisconsin Stevens Point
514 Wisconsin Waukesha
515 Wisconsin Whitewater
516 Wyoming Laramie
[517 rows x 2 columns]'''
#-----------------------------------------------------------------------
# Use this dictionary to map state names to two letter acronyms
states = {'OH': 'Ohio', 'KY': 'Kentucky', 'AS': 'American Samoa', 'NV': 'Nevada', 'WY': 'Wyoming',
'NA': 'National', 'AL': 'Alabama', 'MD': 'Maryland', 'AK': 'Alaska', 'UT': 'Utah', 'OR': 'Oregon',
'MT': 'Montana', 'IL': 'Illinois', 'TN': 'Tennessee', 'DC': 'District of Columbia', 'VT': 'Vermont',
'ID': 'Idaho', 'AR': 'Arkansas', 'ME': 'Maine', 'WA': 'Washington', 'HI': 'Hawaii', 'WI': 'Wisconsin',
'MI': 'Michigan', 'IN': 'Indiana', 'NJ': 'New Jersey', 'AZ': 'Arizona', 'GU': 'Guam', 'MS': 'Mississippi',
'PR': 'Puerto Rico', 'NC': 'North Carolina', 'TX': 'Texas', 'SD': 'South Dakota',
'MP': 'Northern Mariana Islands', 'IA': 'Iowa', 'MO': 'Missouri', 'CT': 'Connecticut',
'WV': 'West Virginia', 'SC': 'South Carolina', 'LA': 'Louisiana', 'KS': 'Kansas', 'NY': 'New York',
'NE': 'Nebraska', 'OK': 'Oklahoma', 'FL': 'Florida', 'CA': 'California', 'CO': 'Colorado',
'PA': 'Pennsylvania', 'DE': 'Delaware', 'NM': 'New Mexico', 'RI': 'Rhode Island', 'MN': 'Minnesota',
'VI': 'Virgin Islands', 'NH': 'New Hampshire', 'MA': 'Massachusetts', 'GA': 'Georgia', 'ND': 'North Dakota',
'VA': 'Virginia'}
#-----------------------------------------------------------------------
'''Returns the year and quarter of the recession start time as a string value in a format such as 2005q3'''
def get_recession_start():
import pandas as pd
gdp= pd.read_excel(
'gdplev.xls',
skiprows= 219)
gdp = gdp[['1999q4', 12323.3]]
gdp = gdp.rename(columns={'1999q4':'Quarter', 12323.3:'GDP in billions'})
for i in range(0,gdp.shape[0]-1):
if (gdp.iloc[i-2][1]> gdp.iloc[i-1][1]) and (gdp.iloc[i-1][1]> gdp.iloc[i][1]):
startdate = gdp.iloc[i-3][0]
return startdate
get_recession_start()
#---------- ANSWER ----------
'''2008q3'''
#-----------------------------------------------------------------------
'''Returns the year and quarter of the recession end time as a string value in a format such as 2005q3'''
def get_recession_end():
import pandas as pd
gdplev = | pd.ExcelFile('gdplev.xls') | pandas.ExcelFile |
# -*- coding: utf-8 -*-
import csv
import os
import platform
import codecs
import re
import sys
from datetime import datetime
import pytest
import numpy as np
from pandas._libs.lib import Timestamp
import pandas as pd
import pandas.util.testing as tm
from pandas import DataFrame, Series, Index, MultiIndex
from pandas import compat
from pandas.compat import (StringIO, BytesIO, PY3,
range, lrange, u)
from pandas.errors import DtypeWarning, EmptyDataError, ParserError
from pandas.io.common import URLError
from pandas.io.parsers import TextFileReader, TextParser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
# Parsers support only length-1 decimals
msg = 'Only length-1 decimal markers supported'
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(data), decimal='')
def test_bad_stream_exception(self):
# Issue 13652:
# This test validates that both python engine
# and C engine will raise UnicodeDecodeError instead of
# c engine raising ParserError and swallowing exception
# that caused read to fail.
handle = open(self.csv_shiftjs, "rb")
codec = codecs.lookup("utf-8")
utf8 = codecs.lookup('utf-8')
# stream must be binary UTF8
stream = codecs.StreamRecoder(
handle, utf8.encode, utf8.decode, codec.streamreader,
codec.streamwriter)
if compat.PY3:
msg = "'utf-8' codec can't decode byte"
else:
msg = "'utf8' codec can't decode byte"
with tm.assert_raises_regex(UnicodeDecodeError, msg):
self.read_csv(stream)
stream.close()
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
self.read_csv(fname, index_col=0, parse_dates=True)
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
assert isinstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# see gh-8217
# Series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
assert not result._is_view
def test_malformed(self):
# see gh-6607
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 4, saw 5'
with tm.assert_raises_regex(Exception, msg):
self.read_table(StringIO(data), sep=',',
header=1, comment='#')
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
it.read(5)
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
it.read(3)
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
it.read()
# skipfooter is not supported with the C parser yet
if self.engine == 'python':
# skipfooter
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
msg = 'Expected 3 fields in line 4, saw 5'
with tm.assert_raises_regex(Exception, msg):
self.read_table(StringIO(data), sep=',',
header=1, comment='#',
skipfooter=1)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>""" # noqa
pytest.raises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
assert len(df) == 3
def test_unnamed_columns(self):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = np.array([[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]], dtype=np.int64)
df = self.read_table(StringIO(data), sep=',')
tm.assert_almost_equal(df.values, expected)
tm.assert_index_equal(df.columns,
Index(['A', 'B', 'C', 'Unnamed: 3',
'Unnamed: 4']))
def test_csv_mixed_type(self):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
expected = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 3, 4],
'C': [2, 4, 5]})
out = self.read_csv(StringIO(data))
tm.assert_frame_equal(out, expected)
def test_read_csv_dataframe(self):
df = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv1, sep=',', index_col=0,
parse_dates=True)
tm.assert_index_equal(df.columns, pd.Index(['A', 'B', 'C', 'D']))
assert df.index.name == 'index'
assert isinstance(
df.index[0], (datetime, np.datetime64, Timestamp))
assert df.values.dtype == np.float64
tm.assert_frame_equal(df, df2)
def test_read_csv_no_index_name(self):
df = self.read_csv(self.csv2, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv2, sep=',', index_col=0,
parse_dates=True)
tm.assert_index_equal(df.columns,
pd.Index(['A', 'B', 'C', 'D', 'E']))
assert isinstance(df.index[0], (datetime, np.datetime64, Timestamp))
assert df.loc[:, ['A', 'B', 'C', 'D']].values.dtype == np.float64
tm.assert_frame_equal(df, df2)
def test_read_table_unicode(self):
fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8'))
df1 = self.read_table(fin, sep=";", encoding="utf-8", header=None)
assert isinstance(df1[0].values[0], compat.text_type)
def test_read_table_wrong_num_columns(self):
# too few!
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
pytest.raises(ValueError, self.read_csv, StringIO(data))
def test_read_duplicate_index_explicit(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
result = self.read_csv(StringIO(data), index_col=0)
expected = self.read_csv(StringIO(data)).set_index(
'index', verify_integrity=False)
tm.assert_frame_equal(result, expected)
result = self.read_table(StringIO(data), sep=',', index_col=0)
expected = self.read_table(StringIO(data), sep=',', ).set_index(
'index', verify_integrity=False)
tm.assert_frame_equal(result, expected)
def test_read_duplicate_index_implicit(self):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
# make sure an error isn't thrown
self.read_csv(StringIO(data))
self.read_table(StringIO(data), sep=',')
def test_parse_bools(self):
data = """A,B
True,1
False,2
True,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.bool_
data = """A,B
YES,1
no,2
yes,3
No,3
Yes,3
"""
data = self.read_csv(StringIO(data),
true_values=['yes', 'Yes', 'YES'],
false_values=['no', 'NO', 'No'])
assert data['A'].dtype == np.bool_
data = """A,B
TRUE,1
FALSE,2
TRUE,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.bool_
data = """A,B
foo,bar
bar,foo"""
result = self.read_csv(StringIO(data), true_values=['foo'],
false_values=['bar'])
expected = DataFrame({'A': [True, False], 'B': [False, True]})
tm.assert_frame_equal(result, expected)
def test_int_conversion(self):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.float64
assert data['B'].dtype == np.int64
def test_read_nrows(self):
expected = self.read_csv(StringIO(self.data1))[:3]
df = self.read_csv(StringIO(self.data1), nrows=3)
tm.assert_frame_equal(df, expected)
# see gh-10476
df = self.read_csv(StringIO(self.data1), nrows=3.0)
tm.assert_frame_equal(df, expected)
msg = r"'nrows' must be an integer >=0"
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), nrows=1.2)
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), nrows='foo')
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), nrows=-1)
def test_read_chunksize(self):
reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# with invalid chunksize value:
msg = r"'chunksize' must be an integer >=1"
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), chunksize=1.3)
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), chunksize='foo')
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), chunksize=0)
def test_read_chunksize_and_nrows(self):
# gh-15755
# With nrows
reader = self.read_csv(StringIO(self.data1), index_col=0,
chunksize=2, nrows=5)
df = self.read_csv(StringIO(self.data1), index_col=0, nrows=5)
tm.assert_frame_equal(pd.concat(reader), df)
# chunksize > nrows
reader = self.read_csv(StringIO(self.data1), index_col=0,
chunksize=8, nrows=5)
df = self.read_csv(StringIO(self.data1), index_col=0, nrows=5)
tm.assert_frame_equal(pd.concat(reader), df)
# with changing "size":
reader = self.read_csv(StringIO(self.data1), index_col=0,
chunksize=8, nrows=5)
df = self.read_csv(StringIO(self.data1), index_col=0, nrows=5)
tm.assert_frame_equal(reader.get_chunk(size=2), df.iloc[:2])
tm.assert_frame_equal(reader.get_chunk(size=4), df.iloc[2:5])
with pytest.raises(StopIteration):
reader.get_chunk(size=3)
def test_read_chunksize_named(self):
reader = self.read_csv(
StringIO(self.data1), index_col='index', chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col='index')
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_get_chunk_passed_chunksize(self):
data = """A,B,C
1,2,3
4,5,6
7,8,9
1,2,3"""
result = self.read_csv(StringIO(data), chunksize=2)
piece = result.get_chunk()
assert len(piece) == 2
def test_read_chunksize_generated_index(self):
# GH 12185
reader = self.read_csv(StringIO(self.data1), chunksize=2)
df = self.read_csv(StringIO(self.data1))
tm.assert_frame_equal(pd.concat(reader), df)
reader = self.read_csv(StringIO(self.data1), chunksize=2, index_col=0)
df = self.read_csv(StringIO(self.data1), index_col=0)
tm.assert_frame_equal(pd.concat(reader), df)
def test_read_text_list(self):
data = """A,B,C\nfoo,1,2,3\nbar,4,5,6"""
as_list = [['A', 'B', 'C'], ['foo', '1', '2', '3'], ['bar',
'4', '5', '6']]
df = self.read_csv(StringIO(data), index_col=0)
parser = TextParser(as_list, index_col=0, chunksize=2)
chunk = parser.read(None)
tm.assert_frame_equal(chunk, df)
def test_iterator(self):
# See gh-6607
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunk = reader.read(3)
tm.assert_frame_equal(chunk, df[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, df[3:])
# pass list
lines = list(csv.reader(StringIO(self.data1)))
parser = TextParser(lines, index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# pass skiprows
parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1])
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[1:3])
treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,
iterator=True)
assert isinstance(treader, TextFileReader)
# gh-3967: stopping iteration when chunksize is specified
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = self.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
tm.assert_frame_equal(result[0], expected)
# chunksize = 1
reader = self.read_csv(StringIO(data), chunksize=1)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
assert len(result) == 3
tm.assert_frame_equal(pd.concat(result), expected)
# skipfooter is not supported with the C parser yet
if self.engine == 'python':
# test bad parameter (skipfooter)
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True, skipfooter=1)
pytest.raises(ValueError, reader.read, 3)
def test_pass_names_with_index(self):
lines = self.data1.split('\n')
no_header = '\n'.join(lines[1:])
# regular index
names = ['index', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=0, names=names)
expected = self.read_csv(StringIO(self.data1), index_col=0)
tm.assert_frame_equal(df, expected)
# multi index
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['index1', 'index2', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), index_col=['index1', 'index2'])
tm.assert_frame_equal(df, expected)
def test_multi_index_no_level_names(self):
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
data2 = """A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
header=None, names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected, check_names=False)
# 2 implicit first cols
df2 = self.read_csv(StringIO(data2))
tm.assert_frame_equal(df2, df)
# reverse order of index
df = self.read_csv(StringIO(no_header), index_col=[1, 0], names=names,
header=None)
expected = self.read_csv(StringIO(data), index_col=[1, 0])
tm.assert_frame_equal(df, expected, check_names=False)
def test_multi_index_blank_df(self):
# GH 14545
data = """a,b
"""
df = self.read_csv(StringIO(data), header=[0])
expected = DataFrame(columns=['a', 'b'])
tm.assert_frame_equal(df, expected)
round_trip = self.read_csv(StringIO(
expected.to_csv(index=False)), header=[0])
tm.assert_frame_equal(round_trip, expected)
data_multiline = """a,b
c,d
"""
df2 = self.read_csv(StringIO(data_multiline), header=[0, 1])
cols = MultiIndex.from_tuples([('a', 'c'), ('b', 'd')])
expected2 = DataFrame(columns=cols)
tm.assert_frame_equal(df2, expected2)
round_trip = self.read_csv(StringIO(
expected2.to_csv(index=False)), header=[0, 1])
tm.assert_frame_equal(round_trip, expected2)
def test_no_unnamed_index(self):
data = """ id c0 c1 c2
0 1 0 a b
1 2 0 c d
2 2 2 e f
"""
df = self.read_table(StringIO(data), sep=' ')
assert df.index.name is None
def test_read_csv_parse_simple_list(self):
text = """foo
bar baz
qux foo
foo
bar"""
df = self.read_csv(StringIO(text), header=None)
expected = DataFrame({0: ['foo', 'bar baz', 'qux foo',
'foo', 'bar']})
tm.assert_frame_equal(df, expected)
@tm.network
def test_url(self):
# HTTP(S)
url = ('https://raw.github.com/pandas-dev/pandas/master/'
'pandas/tests/io/parser/data/salaries.csv')
url_table = self.read_table(url)
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salaries.csv')
local_table = self.read_table(localtable)
tm.assert_frame_equal(url_table, local_table)
# TODO: ftp testing
@pytest.mark.slow
def test_file(self):
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salaries.csv')
local_table = self.read_table(localtable)
try:
url_table = self.read_table('file://localhost/' + localtable)
except URLError:
# fails on some systems
pytest.skip("failing on %s" %
' '.join(platform.uname()).strip())
tm.assert_frame_equal(url_table, local_table)
def test_path_pathlib(self):
df = tm.makeDataFrame()
result = tm.round_trip_pathlib(df.to_csv,
lambda p: self.read_csv(p, index_col=0))
tm.assert_frame_equal(df, result)
def test_path_localpath(self):
df = tm.makeDataFrame()
result = tm.round_trip_localpath(
df.to_csv,
lambda p: self.read_csv(p, index_col=0))
tm.assert_frame_equal(df, result)
def test_nonexistent_path(self):
# gh-2428: pls no segfault
# gh-14086: raise more helpful FileNotFoundError
path = '%s.csv' % tm.rands(10)
pytest.raises(compat.FileNotFoundError, self.read_csv, path)
def test_missing_trailing_delimiters(self):
data = """A,B,C,D
1,2,3,4
1,3,3,
1,4,5"""
result = self.read_csv(StringIO(data))
assert result['D'].isna()[1:].all()
def test_skipinitialspace(self):
s = ('"09-Apr-2012", "01:10:18.300", 2456026.548822908, 12849, '
'1.00361, 1.12551, 330.65659, 0355626618.16711, 73.48821, '
'314.11625, 1917.09447, 179.71425, 80.000, 240.000, -350, '
'70.06056, 344.98370, 1, 1, -0.689265, -0.692787, '
'0.212036, 14.7674, 41.605, -9999.0, -9999.0, '
'-9999.0, -9999.0, -9999.0, -9999.0, 000, 012, 128')
sfile = StringIO(s)
# it's 33 columns
result = self.read_csv(sfile, names=lrange(33), na_values=['-9999.0'],
header=None, skipinitialspace=True)
assert pd.isna(result.iloc[0, 29])
def test_utf16_bom_skiprows(self):
# #2298
data = u("""skip this
skip this too
A\tB\tC
1\t2\t3
4\t5\t6""")
data2 = u("""skip this
skip this too
A,B,C
1,2,3
4,5,6""")
path = '__%s__.csv' % tm.rands(10)
with tm.ensure_clean(path) as path:
for sep, dat in [('\t', data), (',', data2)]:
for enc in ['utf-16', 'utf-16le', 'utf-16be']:
bytes = dat.encode(enc)
with open(path, 'wb') as f:
f.write(bytes)
s = BytesIO(dat.encode('utf-8'))
if compat.PY3:
# somewhat False since the code never sees bytes
from io import TextIOWrapper
s = TextIOWrapper(s, encoding='utf-8')
result = self.read_csv(path, encoding=enc, skiprows=2,
sep=sep)
expected = self.read_csv(s, encoding='utf-8', skiprows=2,
sep=sep)
s.close()
tm.assert_frame_equal(result, expected)
def test_utf16_example(self):
path = tm.get_data_path('utf16_ex.txt')
# it works! and is the right length
result = self.read_table(path, encoding='utf-16')
assert len(result) == 50
if not compat.PY3:
buf = BytesIO(open(path, 'rb').read())
result = self.read_table(buf, encoding='utf-16')
assert len(result) == 50
def test_unicode_encoding(self):
pth = tm.get_data_path('unicode_series.csv')
result = self.read_csv(pth, header=None, encoding='latin-1')
result = result.set_index(0)
got = result[1][1632]
expected = u('\xc1 k\xf6ldum klaka (Cold Fever) (1994)')
assert got == expected
def test_trailing_delimiters(self):
# #2442. grumble grumble
data = """A,B,C
1,2,3,
4,5,6,
7,8,9,"""
result = self.read_csv(StringIO(data), index_col=False)
expected = DataFrame({'A': [1, 4, 7], 'B': [2, 5, 8],
'C': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_escapechar(self):
# http://stackoverflow.com/questions/13824840/feature-request-for-
# pandas-read-csv
data = '''SEARCH_TERM,ACTUAL_URL
"bra tv bord","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"tv p\xc3\xa5 hjul","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"SLAGBORD, \\"Bergslagen\\", IKEA:s 1700-tals serie","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"''' # noqa
result = self.read_csv(StringIO(data), escapechar='\\',
quotechar='"', encoding='utf-8')
assert result['SEARCH_TERM'][2] == ('SLAGBORD, "Bergslagen", '
'IKEA:s 1700-tals serie')
tm.assert_index_equal(result.columns,
Index(['SEARCH_TERM', 'ACTUAL_URL']))
def test_int64_min_issues(self):
# #2599
data = 'A,B\n0,0\n0,'
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': [0, 0], 'B': [0, np.nan]})
tm.assert_frame_equal(result, expected)
def test_parse_integers_above_fp_precision(self):
data = """Numbers
17007000002000191
17007000002000191
17007000002000191
17007000002000191
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000194"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'Numbers': [17007000002000191,
17007000002000191,
17007000002000191,
17007000002000191,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000194]})
tm.assert_series_equal(result['Numbers'], expected['Numbers'])
def test_chunks_have_consistent_numerical_type(self):
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ["1.0", "2.0"] + integers)
with tm.assert_produces_warning(False):
df = self.read_csv(StringIO(data))
# Assert that types were coerced.
assert type(df.a[0]) is np.float64
assert df.a.dtype == np.float
def test_warn_if_chunks_have_mismatched_type(self):
warning_type = False
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ['a', 'b'] + integers)
# see gh-3866: if chunks are different types and can't
# be coerced using numerical types, then issue warning.
if self.engine == 'c' and self.low_memory:
warning_type = DtypeWarning
with tm.assert_produces_warning(warning_type):
df = self.read_csv(StringIO(data))
assert df.a.dtype == np.object
def test_integer_overflow_bug(self):
# see gh-2601
data = "65248E10 11\n55555E55 22\n"
result = self.read_csv(StringIO(data), header=None, sep=' ')
assert result[0].dtype == np.float64
result = self.read_csv(StringIO(data), header=None, sep=r'\s+')
assert result[0].dtype == np.float64
def test_catch_too_many_names(self):
# see gh-5156
data = """\
1,2,3
4,,6
7,8,9
10,11,12\n"""
pytest.raises(ValueError, self.read_csv, StringIO(data),
header=0, names=['a', 'b', 'c', 'd'])
def test_ignore_leading_whitespace(self):
# see gh-3374, gh-6607
data = ' a b c\n 1 2 3\n 4 5 6\n 7 8 9'
result = self.read_table(StringIO(data), sep=r'\s+')
expected = DataFrame({'a': [1, 4, 7], 'b': [2, 5, 8], 'c': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_chunk_begins_with_newline_whitespace(self):
# see gh-10022
data = '\n hello\nworld\n'
result = self.read_csv(StringIO(data), header=None)
assert len(result) == 2
# see gh-9735: this issue is C parser-specific (bug when
# parsing whitespace and characters at chunk boundary)
if self.engine == 'c':
chunk1 = 'a' * (1024 * 256 - 2) + '\na'
chunk2 = '\n a'
result = self.read_csv(StringIO(chunk1 + chunk2), header=None)
expected = DataFrame(['a' * (1024 * 256 - 2), 'a', ' a'])
tm.assert_frame_equal(result, expected)
def test_empty_with_index(self):
# see gh-10184
data = 'x,y'
result = self.read_csv(StringIO(data), index_col=0)
expected = DataFrame([], columns=['y'], index=Index([], name='x'))
tm.assert_frame_equal(result, expected)
def test_empty_with_multiindex(self):
# see gh-10467
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=['x', 'y'])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays(
[[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_reversed_multiindex(self):
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=[1, 0])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays(
[[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_float_parser(self):
# see gh-9565
data = '45e-1,4.5,45.,inf,-inf'
result = self.read_csv(StringIO(data), header=None)
expected = DataFrame([[float(s) for s in data.split(',')]])
tm.assert_frame_equal(result, expected)
def test_scientific_no_exponent(self):
# see gh-12215
df = DataFrame.from_items([('w', ['2e']), ('x', ['3E']),
('y', ['42e']), ('z', ['632E'])])
data = df.to_csv(index=False)
for prec in self.float_precision_choices:
df_roundtrip = self.read_csv(
StringIO(data), float_precision=prec)
tm.assert_frame_equal(df_roundtrip, df)
def test_int64_overflow(self):
data = """ID
00013007854817840016671868
00013007854817840016749251
00013007854817840016754630
00013007854817840016781876
00013007854817840017028824
00013007854817840017963235
00013007854817840018860166"""
# 13007854817840016671868 > UINT64_MAX, so this
# will overflow and return object as the dtype.
result = self.read_csv(StringIO(data))
assert result['ID'].dtype == object
# 13007854817840016671868 > UINT64_MAX, so attempts
# to cast to either int64 or uint64 will result in
# an OverflowError being raised.
for conv in (np.int64, np.uint64):
pytest.raises(OverflowError, self.read_csv,
StringIO(data), converters={'ID': conv})
# These numbers fall right inside the int64-uint64 range,
# so they should be parsed as string.
ui_max = np.iinfo(np.uint64).max
i_max = np.iinfo(np.int64).max
i_min = np.iinfo(np.int64).min
for x in [i_max, i_min, ui_max]:
result = self.read_csv(StringIO(str(x)), header=None)
expected = DataFrame([x])
tm.assert_frame_equal(result, expected)
# These numbers fall just outside the int64-uint64 range,
# so they should be parsed as string.
too_big = ui_max + 1
too_small = i_min - 1
for x in [too_big, too_small]:
result = self.read_csv(StringIO(str(x)), header=None)
expected = DataFrame([str(x)])
tm.assert_frame_equal(result, expected)
# No numerical dtype can hold both negative and uint64 values,
# so they should be cast as string.
data = '-1\n' + str(2**63)
expected = DataFrame([str(-1), str(2**63)])
result = self.read_csv(StringIO(data), header=None)
tm.assert_frame_equal(result, expected)
data = str(2**63) + '\n-1'
expected = DataFrame([str(2**63), str(-1)])
result = self.read_csv(StringIO(data), header=None)
tm.assert_frame_equal(result, expected)
def test_empty_with_nrows_chunksize(self):
# see gh-9535
expected = DataFrame([], columns=['foo', 'bar'])
result = self.read_csv(StringIO('foo,bar\n'), nrows=10)
tm.assert_frame_equal(result, expected)
result = next(iter(self.read_csv(
StringIO('foo,bar\n'), chunksize=10)))
tm.assert_frame_equal(result, expected)
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
result = self.read_csv(StringIO('foo,bar\n'),
nrows=10, as_recarray=True)
result = DataFrame(result[2], columns=result[1],
index=result[0])
tm.assert_frame_equal(DataFrame.from_records(
result), expected, check_index_type=False)
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
result = next(iter(self.read_csv(StringIO('foo,bar\n'),
chunksize=10, as_recarray=True)))
result = DataFrame(result[2], columns=result[1], index=result[0])
tm.assert_frame_equal(DataFrame.from_records(result), expected,
check_index_type=False)
def test_eof_states(self):
# see gh-10728, gh-10548
# With skip_blank_lines = True
expected = DataFrame([[4, 5, 6]], columns=['a', 'b', 'c'])
# gh-10728: WHITESPACE_LINE
data = 'a,b,c\n4,5,6\n '
result = self.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
# gh-10548: EAT_LINE_COMMENT
data = 'a,b,c\n4,5,6\n#comment'
result = self.read_csv(StringIO(data), comment='#')
tm.assert_frame_equal(result, expected)
# EAT_CRNL_NOP
data = 'a,b,c\n4,5,6\n\r'
result = self.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
# EAT_COMMENT
data = 'a,b,c\n4,5,6#comment'
result = self.read_csv(StringIO(data), comment='#')
tm.assert_frame_equal(result, expected)
# SKIP_LINE
data = 'a,b,c\n4,5,6\nskipme'
result = self.read_csv(StringIO(data), skiprows=[2])
tm.assert_frame_equal(result, expected)
# With skip_blank_lines = False
# EAT_LINE_COMMENT
data = 'a,b,c\n4,5,6\n#comment'
result = self.read_csv(
StringIO(data), comment='#', skip_blank_lines=False)
expected = DataFrame([[4, 5, 6]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# IN_FIELD
data = 'a,b,c\n4,5,6\n '
result = self.read_csv(StringIO(data), skip_blank_lines=False)
expected = DataFrame(
[['4', 5, 6], [' ', None, None]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# EAT_CRNL
data = 'a,b,c\n4,5,6\n\r'
result = self.read_csv(StringIO(data), skip_blank_lines=False)
expected = DataFrame(
[[4, 5, 6], [None, None, None]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# Should produce exceptions
# ESCAPED_CHAR
data = "a,b,c\n4,5,6\n\\"
pytest.raises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
# ESCAPE_IN_QUOTED_FIELD
data = 'a,b,c\n4,5,6\n"\\'
pytest.raises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
# IN_QUOTED_FIELD
data = 'a,b,c\n4,5,6\n"'
pytest.raises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
def test_uneven_lines_with_usecols(self):
# See gh-12203
csv = r"""a,b,c
0,1,2
3,4,5,6,7
8,9,10
"""
# make sure that an error is still thrown
# when the 'usecols' parameter is not provided
msg = r"Expected \d+ fields in line \d+, saw \d+"
with tm.assert_raises_regex(ValueError, msg):
df = self.read_csv(StringIO(csv))
expected = DataFrame({
'a': [0, 3, 8],
'b': [1, 4, 9]
})
usecols = [0, 1]
df = self.read_csv(StringIO(csv), usecols=usecols)
tm.assert_frame_equal(df, expected)
usecols = ['a', 'b']
df = self.read_csv(StringIO(csv), usecols=usecols)
tm.assert_frame_equal(df, expected)
def test_read_empty_with_usecols(self):
# See gh-12493
names = ['Dummy', 'X', 'Dummy_2']
usecols = names[1:2] # ['X']
# first, check to see that the response of
# parser when faced with no provided columns
# throws the correct error, with or without usecols
errmsg = "No columns to parse from file"
with tm.assert_raises_regex(EmptyDataError, errmsg):
self.read_csv(StringIO(''))
with tm.assert_raises_regex(EmptyDataError, errmsg):
self.read_csv(StringIO(''), usecols=usecols)
expected = DataFrame(columns=usecols, index=[0], dtype=np.float64)
df = self.read_csv(StringIO(',,'), names=names, usecols=usecols)
tm.assert_frame_equal(df, expected)
expected = DataFrame(columns=usecols)
df = self.read_csv(StringIO(''), names=names, usecols=usecols)
tm.assert_frame_equal(df, expected)
def test_trailing_spaces(self):
data = "A B C \nrandom line with trailing spaces \nskip\n1,2,3\n1,2.,4.\nrandom line with trailing tabs\t\t\t\n \n5.1,NaN,10.0\n" # noqa
expected = DataFrame([[1., 2., 4.],
[5.1, np.nan, 10.]])
# gh-8661, gh-8679: this should ignore six lines including
# lines with trailing whitespace and blank lines
df = self.read_csv(StringIO(data.replace(',', ' ')),
header=None, delim_whitespace=True,
skiprows=[0, 1, 2, 3, 5, 6], skip_blank_lines=True)
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data.replace(',', ' ')),
header=None, delim_whitespace=True,
skiprows=[0, 1, 2, 3, 5, 6],
skip_blank_lines=True)
tm.assert_frame_equal(df, expected)
# gh-8983: test skipping set of rows after a row with trailing spaces
expected = DataFrame({"A": [1., 5.1], "B": [2., np.nan],
"C": [4., 10]})
df = self.read_table(StringIO(data.replace(',', ' ')),
delim_whitespace=True,
skiprows=[1, 2, 3, 5, 6], skip_blank_lines=True)
tm.assert_frame_equal(df, expected)
def test_raise_on_sep_with_delim_whitespace(self):
# see gh-6607
data = 'a b c\n1 2 3'
with tm.assert_raises_regex(ValueError,
'you can only specify one'):
self.read_table(StringIO(data), sep=r'\s', delim_whitespace=True)
def test_single_char_leading_whitespace(self):
# see gh-9710
data = """\
MyColumn
a
b
a
b\n"""
expected = DataFrame({'MyColumn': list('abab')})
result = self.read_csv(StringIO(data), delim_whitespace=True,
skipinitialspace=True)
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), skipinitialspace=True)
tm.assert_frame_equal(result, expected)
def test_empty_lines(self):
data = """\
A,B,C
1,2.,4.
5.,NaN,10.0
-70,.4,1
"""
expected = np.array([[1., 2., 4.],
[5., np.nan, 10.],
[-70., .4, 1.]])
df = self.read_csv(StringIO(data))
tm.assert_numpy_array_equal(df.values, expected)
df = self.read_csv(StringIO(data.replace(',', ' ')), sep=r'\s+')
tm.assert_numpy_array_equal(df.values, expected)
expected = np.array([[1., 2., 4.],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[5., np.nan, 10.],
[np.nan, np.nan, np.nan],
[-70., .4, 1.]])
df = self.read_csv(StringIO(data), skip_blank_lines=False)
tm.assert_numpy_array_equal(df.values, expected)
def test_whitespace_lines(self):
data = """
\t \t\t
\t
A,B,C
\t 1,2.,4.
5.,NaN,10.0
"""
expected = np.array([[1, 2., 4.],
[5., np.nan, 10.]])
df = self.read_csv(StringIO(data))
tm.assert_numpy_array_equal(df.values, expected)
def test_regex_separator(self):
# see gh-6607
data = """ A B C D
a 1 2 3 4
b 1 2 3 4
c 1 2 3 4
"""
df = self.read_table(StringIO(data), sep=r'\s+')
expected = self.read_csv(StringIO(re.sub('[ ]+', ',', data)),
index_col=0)
assert expected.index.name is None
tm.assert_frame_equal(df, expected)
data = ' a b c\n1 2 3 \n4 5 6\n 7 8 9'
result = self.read_table(StringIO(data), sep=r'\s+')
expected = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
@tm.capture_stdout
def test_verbose_import(self):
text = """a,b,c,d
one,1,2,3
one,1,2,3
,1,2,3
one,1,2,3
,1,2,3
,1,2,3
one,1,2,3
two,1,2,3"""
# Engines are verbose in different ways.
self.read_csv(StringIO(text), verbose=True)
output = sys.stdout.getvalue()
if self.engine == 'c':
assert 'Tokenization took:' in output
assert 'Parser memory cleanup took:' in output
else: # Python engine
assert output == 'Filled 3 NA values in column a\n'
# Reset the stdout buffer.
sys.stdout = StringIO()
text = """a,b,c,d
one,1,2,3
two,1,2,3
three,1,2,3
four,1,2,3
five,1,2,3
,1,2,3
seven,1,2,3
eight,1,2,3"""
self.read_csv(StringIO(text), verbose=True, index_col=0)
output = sys.stdout.getvalue()
# Engines are verbose in different ways.
if self.engine == 'c':
assert 'Tokenization took:' in output
assert 'Parser memory cleanup took:' in output
else: # Python engine
assert output == 'Filled 1 NA values in column a\n'
def test_iteration_open_handle(self):
if PY3:
pytest.skip(
"won't work in Python 3 {0}".format(sys.version_info))
with tm.ensure_clean() as path:
with open(path, 'wb') as f:
f.write('AAA\nBBB\nCCC\nDDD\nEEE\nFFF\nGGG')
with open(path, 'rb') as f:
for line in f:
if 'CCC' in line:
break
if self.engine == 'c':
pytest.raises(Exception, self.read_table,
f, squeeze=True, header=None)
else:
result = self.read_table(f, squeeze=True, header=None)
expected = Series(['DDD', 'EEE', 'FFF', 'GGG'], name=0)
tm.assert_series_equal(result, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
assert expected.A.dtype == 'int64'
assert expected.B.dtype == 'float'
assert expected.C.dtype == 'float'
df = self.read_csv(StringIO(data), sep='|', thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|',
thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
data_with_odd_sep = """A|B|C
1|2.334,01|5
10|13|10,
"""
df = self.read_csv(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
def test_euro_decimal_format(self):
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
df2 = self.read_csv(StringIO(data), sep=';', decimal=',')
assert df2['Number1'].dtype == float
assert df2['Number2'].dtype == float
assert df2['Number3'].dtype == float
def test_inf_parsing(self):
data = """\
,A
a,inf
b,-inf
c,+Inf
d,-Inf
e,INF
f,-INF
g,+INf
h,-INf
i,inF
j,-inF"""
inf = float('inf')
expected = Series([inf, -inf] * 5)
df = self.read_csv(StringIO(data), index_col=0)
tm.assert_almost_equal(df['A'].values, expected.values)
df = self.read_csv(StringIO(data), index_col=0, na_filter=False)
tm.assert_almost_equal(df['A'].values, expected.values)
def test_raise_on_no_columns(self):
# single newline
data = "\n"
pytest.raises(EmptyDataError, self.read_csv, StringIO(data))
# test with more than a single newline
data = "\n\n\n"
pytest.raises(EmptyDataError, self.read_csv, StringIO(data))
def test_compact_ints_use_unsigned(self):
# see gh-13323
data = 'a,b,c\n1,9,258'
# sanity check
expected = DataFrame({
'a': np.array([1], dtype=np.int64),
'b': np.array([9], dtype=np.int64),
'c': np.array([258], dtype=np.int64),
})
out = self.read_csv(StringIO(data))
tm.assert_frame_equal(out, expected)
expected = DataFrame({
'a': np.array([1], dtype=np.int8),
'b': np.array([9], dtype=np.int8),
'c': np.array([258], dtype=np.int16),
})
# default behaviour for 'use_unsigned'
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
out = self.read_csv(StringIO(data), compact_ints=True)
tm.assert_frame_equal(out, expected)
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
out = self.read_csv(StringIO(data), compact_ints=True,
use_unsigned=False)
tm.assert_frame_equal(out, expected)
expected = DataFrame({
'a': np.array([1], dtype=np.uint8),
'b': np.array([9], dtype=np.uint8),
'c': np.array([258], dtype=np.uint16),
})
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
out = self.read_csv(StringIO(data), compact_ints=True,
use_unsigned=True)
tm.assert_frame_equal(out, expected)
def test_compact_ints_as_recarray(self):
data = ('0,1,0,0\n'
'1,1,0,0\n'
'0,1,0,1')
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
result = self.read_csv( | StringIO(data) | pandas.compat.StringIO |
## Produce College Rankings
## Based on Earnings Outcomes
## Load Libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import statsmodels.formula.api as sm
from scipy import stats
from sklearn.model_selection import KFold
from sklearn.metrics import mean_squared_error
from statsmodels.stats.outliers_influence import variance_inflation_factor
from sklearn.linear_model import Lasso
## Function to standardize a given column
def std_score(x):
return((x - x.mean()) / x.std())
## Initialize random number generator
# So that kfold produces the same cuts
np.random.seed(2001)
## Import clean data set from transform_college.py
df = pd.read_csv('college_cleaned.csv')
info_cols = ['score_sat_act', 'pell_grant_pct', 'born_in_usa_pct',
'female_pct', 'region_high_inc', 'urban_area', 'enrollment',
'black_or_hispanic_pct', 'overage23', 'median_hh_income',
'avg_sat_2011', 'median_act_2011', 'avg_net_price_2011',
'avg_net_price_lowinc_2011', 'admission_rate_2011',
'completion_rate_6yr_2011']
## OLS Linear Model
id_vars = ['id', 'school_name', 'school_city', 'school_state',
'region', 'public']
y_var = 'log_earnings_10yr'
discrete_vars = ['region_high_inc', 'urban_area']
# features for model
cont_vars = ['score_sat_act', 'female_pct', 'born_in_usa_pct',
'pell_grant_pct', 'enrollment', 'overage23']
# available features
cont_vars_lasso = ['score_sat_act', 'female_pct', 'born_in_usa_pct',
'pell_grant_pct', 'enrollment', 'overage23', 'median_hh_income',
'black_or_hispanic_pct', 'pct_college_degree']
## Columns needed for lasso regression
dflasso = df[id_vars + [y_var] + cont_vars_lasso + discrete_vars].copy()
## Standardize continuous columns
for var in cont_vars_lasso:
dflasso[var] = std_score(dflasso[var])
## Columns needed for model
dfmod = dflasso[id_vars + [y_var] + cont_vars + discrete_vars].copy()
plt.hist(dfmod[y_var], 50)
plt.savefig('./plots/y_var_hist.png')
## Features and dependent variable
y_lasso = dflasso[y_var]
X_lasso = dflasso.drop(id_vars + [y_var], 1)
y = dfmod[y_var]
X = dfmod.drop(id_vars + [y_var], 1)
## Check for multicollinearity
vif = [variance_inflation_factor(X.values, i) for i in range(X.shape[1])]
n = 0
print('variance inflation factors')
for x in X.columns:
print(x, vif[n].round(3))
n += 1
## Lasso Regression
def lasso(y_col, x_cols, alphas):
'''
Takes in a list of alphas. Outputs a dataframe containing the coefficients of lasso regressions from each alpha.
Adapted from <NAME> http://chrisalbon.com/machine-learning/lasso_regression_in_scikit.html
'''
# Create an empty data frame
df = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
from pandas.api.types import is_scalar as pd_is_scalar
from dask.array import Array
from dask.dataframe.core import Series
from dask.delayed import delayed
from dask.utils import derived_from
__all__ = ("to_numeric",)
@derived_from(pd, ua_args=["downcast"])
def to_numeric(arg, errors="raise", meta=None):
"""
Return type depends on input. Delayed if scalar, otherwise same as input.
For errors, only "raise" and "coerce" are allowed.
"""
if errors not in ("raise", "coerce"):
raise ValueError("invalid error value specified")
is_series = isinstance(arg, Series)
is_array = isinstance(arg, Array)
is_scalar = pd_is_scalar(arg)
if not any([is_series, is_array, is_scalar]):
raise TypeError(
"arg must be a list, tuple, dask.array.Array, or dask.dataframe.Series"
)
if meta is not None:
if is_scalar:
raise KeyError("``meta`` is not allowed when input is a scalar.")
else:
if is_series or is_array:
meta = | pd.to_numeric(arg._meta) | pandas.to_numeric |
# Author: <NAME>, PhD
#
# Email: <EMAIL>
#
# Organization: National Center for Advancing Translational Sciences (NCATS/NIH)
#
# References
#
# Ref: https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.aggregate.html
# Ref: https://stackoverflow.com/questions/27298178/concatenate-strings-from-several-rows-using-pandas-groupby
# Ref: https://stackoverflow.com/questions/32117848/pandas-groupby-concatenate-strings-in-multiple-columns
# Ref; https://stackoverflow.com/questions/60260774/pandas-agg-dropping-columns-lambda-function
import pandas as pd
import sys
def extract_metadata (df, md_cols):
orig_cols = {}
length = df.shape[0]
first = True
md = []
if md_cols != 'na':
md_cols = md_cols.split (';')
for col in md_cols:
col = col.strip()
orig_cols[col] = list (map(str, df[col]))
if first:
first = False
for i in range(length):
combined_md = ''
for col in orig_cols:
md_val = orig_cols [col][i]
if md_val == 'nan':
md_val = ''
combined_md += col + ':' + md_val + ';'
md.append (combined_md[:-1])
else:
md = ['na' for i in range (length)]
return (md)
def merge_metadata (data_type, df, md):
if data_type == 'host_protein':
host_proteins = list(df['host_protein'])
activations = list(df['activation'])
activation_types = list(df['activation_type'])
df = | pd.DataFrame ({'host_protein':host_proteins, 'activation':activations, 'activation_type':activation_types, 'metadata': md}) | pandas.DataFrame |
import sys
import pandas as pd
import numpy as np
import h5py
import os
import time
import pickle
import multiprocessing as mp
from os import listdir
from os.path import isfile, join, splitext, dirname, abspath
from joblib import Parallel, delayed
from datetime import datetime
from dataset_paths import (
get_balanced_h5_path,
get_postprocess_folder_general,
get_full_csr_adj,
get_balanced_csr_adj,
get_tree_csr_adj,
get_raw_dataset_csv_path,
get_common_config_details,
print_timing_output,
)
from TimerManager import TimerManager
sys.path.append("..")
def get_balanced_file_list(config_obj, local_override=False):
BALANCED_DIR = get_balanced_h5_path(config_obj, local_override)
balanced_file_list = [
f
for f in listdir(BALANCED_DIR)
if isfile(join(BALANCED_DIR, f))
if f != ".DS_Store"
]
balanced_file_list.sort()
#print("Tree list Length: ", len(balanced_file_list))
return balanced_file_list
def create_if_not_exists(directory_to_possibly_create):
if not os.path.exists(directory_to_possibly_create):
os.makedirs(directory_to_possibly_create)
def get_file_tag(config_obj):
(
dataset,
data_subset_type,
matrix_name,
num_trees,
tree_type,
parallelism,
) = get_common_config_details(config_obj)
file_tag = num_trees + tree_type + "_" + parallelism
if config_obj["has_labels"]:
if config_obj["weighted_status"]:
file_tag = file_tag + "_weighted_outcomes_"
elif config_obj["tiebreak_node"] != "None":
file_tag = (
file_tag
+ "_unweighted_tiebreakNode"
+ str(config_obj["tiebreak_node"])
+ "_outcomes_"
)
else:
file_tag = file_tag + "_unweighted_outcomes_"
else:
if config_obj["weighted_status"]:
file_tag = file_tag + "_weighted_no_outcomes_"
elif config_obj["tiebreak_node"] != "None":
file_tag = (
file_tag
+ "_unweighted_tiebreakNode"
+ str(config_obj["tiebreak_node"])
+ "_no_outcomes_"
)
else:
file_tag = file_tag + "_unweighted_no_outcomes_"
return file_tag
########################################################################
### Calculate vertex dfs
########################################################################
def postprocess_vertex_df(config_obj, dont_remake_override=False):
(
dataset,
data_subset_type,
matrix_name,
num_trees,
tree_type,
parallelism,
) = get_common_config_details(config_obj)
output_folder = get_postprocess_folder_general(config_obj)
output_type = config_obj["machine"]
file_tag = get_file_tag(config_obj)
FULL_DF_PATH = output_folder + file_tag + "_vertex_df.pkl"
vertex_df = None
#print("-------- Entering Post-Process Vertex DF --------")
if (
not isfile(FULL_DF_PATH) or config_obj["postprocess"]
) and not dont_remake_override:
#print("-------- Creating Vertex DF --------")
# if (it's not a file or we want to remake it) AND we're not calling this function from a plotting fn, table fn, etc...
to_be_df_dict = None
trees_list = get_balanced_file_list(config_obj, True)
if parallelism == "parallel" or parallelism == "spark":
num_cores = mp.cpu_count()
#print(
# "Creating vertex df (parallel: ",
# num_cores,
# " cores):",
# dataset,
# ", ",
# data_subset_type,
# ", ",
# matrix_name,
# ") ",
#)
vertex_df_start = datetime.now()
df_tup_list = Parallel(n_jobs=num_cores)(
delayed(get_per_tree_vertex_dict_tup)(tree, config_obj)
for tree in trees_list
)
to_be_df_dict = {tup[0]: tup[1] for tup in df_tup_list}
#print("Finished base, adding component and outcome columns.")
elif parallelism == "serial":
#print(
# "Creating vertex df (serial):",
# dataset,
# ", ",
# data_subset_type,
# ", ",
# matrix_name,
# ") ",
#)
vertex_df_start = datetime.now()
to_be_df_dict = {
tree: get_per_tree_vertex_dict(tree, config_obj) for tree in trees_list
}
#print("Finished base, adding component and outcome columns.")
vertex_df = create_vertex_df_from_vertex_dict(
to_be_df_dict, config_obj, trees_list, FULL_DF_PATH
)
print_timing_output(
"VERTEX_DF_TIME: (hh:mm:ss.ms)",
datetime.now() - vertex_df_start,
output_type,
)
else:
#print("-------- Reading Vertex DF --------")
vertex_df = pd.read_pickle(FULL_DF_PATH)
return vertex_df
def get_users_map(config_obj):
map_csv_path = get_raw_dataset_csv_path(config_obj) + "_map.csv"
if os.path.isfile(map_csv_path):
users_map_df = pd.read_csv(map_csv_path)
ignore = False
else:
#print("No file in map_csv path. Ignore this if your users do not have any mapping.")
users_map_df = None
ignore = True
return users_map_df, ignore
def create_vertex_df_from_vertex_dict(
to_be_df_dict, config_obj, trees_list, FULL_DF_PATH
):
#print("Creating components dict.")
try:
components_dict = {
tree: to_be_df_dict[tree]["component_list"] for tree in trees_list
}
component_df = | pd.DataFrame(components_dict) | pandas.DataFrame |
import sys
import numpy as np
import pandas as pd
from natsort import natsorted
from pyranges.statistics import StatisticsMethods
from pyranges.genomicfeatures import GenomicFeaturesMethods
from pyranges import PyRanges
from pyranges.helpers import single_value_key, get_key_from_df
def set_dtypes(df, int64):
# if extended is None:
# extended = False if df.Start.dtype == np.int32 else True
if not int64:
dtypes = {
"Start": np.int32,
"End": np.int32,
"Chromosome": "category",
"Strand": "category",
}
else:
dtypes = {
"Start": np.int64,
"End": np.int64,
"Chromosome": "category",
"Strand": "category",
}
if "Strand" not in df:
del dtypes["Strand"]
# need to ascertain that object columns do not consist of multiple types
# https://github.com/biocore-ntnu/epic2/issues/32
for column in "Chromosome Strand".split():
if column not in df:
continue
df[column] = df[column].astype(str)
for col, dtype in dtypes.items():
if df[col].dtype.name != dtype:
df[col] = df[col].astype(dtype)
return df
def create_df_dict(df, stranded):
chrs = df.Chromosome.cat.remove_unused_categories()
df["Chromosome"] = chrs
if stranded:
grpby_key = "Chromosome Strand".split()
df["Strand"] = df.Strand.cat.remove_unused_categories()
else:
grpby_key = "Chromosome"
return {k: v for k, v in df.groupby(grpby_key)}
def create_pyranges_df(chromosomes, starts, ends, strands=None):
if isinstance(chromosomes, str) or isinstance(chromosomes, int):
chromosomes = pd.Series([chromosomes] * len(starts), dtype="category")
if strands is not None:
if isinstance(strands, str):
strands = pd.Series([strands] * len(starts), dtype="category")
columns = [chromosomes, starts, ends, strands]
lengths = list(str(len(s)) for s in columns)
assert (
len(set(lengths)) == 1
), "chromosomes, starts, ends and strands must be of equal length. But are {}".format(
", ".join(lengths)
)
colnames = "Chromosome Start End Strand".split()
else:
columns = [chromosomes, starts, ends]
lengths = list(str(len(s)) for s in columns)
assert (
len(set(lengths)) == 1
), "chromosomes, starts and ends must be of equal length. But are {}".format(
", ".join(lengths)
)
colnames = "Chromosome Start End".split()
idx = range(len(starts))
series_to_concat = []
for s in columns:
if isinstance(s, pd.Series):
s = pd.Series(s.values, index=idx)
else:
s = | pd.Series(s, index=idx) | pandas.Series |
################################################################################
# Module: archetypal.template
# Description:
# License: MIT, see full license in LICENSE.txt
# Web: https://github.com/samuelduchesne/archetypal
################################################################################
import collections
import logging as lg
import numpy as np
import pandas as pd
from deprecation import deprecated
from sigfig import round
import archetypal
from archetypal import log, settings, timeit, top, weighted_mean
from archetypal.template import UmiBase, UmiSchedule, UniqueName
def resolve_temp(temp, idf):
"""Resolve the temperature. If a float is passed, simply return it. If a str
is passed, get the schedule and return the mean value.
Args:
temp (float or str):
idf (IDF): the idf object
"""
if isinstance(temp, float):
return temp
elif isinstance(temp, str):
sched = UmiSchedule(Name=temp, idf=idf)
return sched.all_values.mean()
class VentilationSetting(UmiBase):
"""Zone Ventilation Settings
.. image:: ../images/template/zoneinfo-ventilation.png
"""
def __init__(
self,
NatVentSchedule=None,
ScheduledVentilationSchedule=None,
Afn=False,
Infiltration=0.1,
IsBuoyancyOn=True,
IsInfiltrationOn=True,
IsNatVentOn=False,
IsScheduledVentilationOn=False,
IsWindOn=False,
NatVentMaxOutdoorAirTemp=30,
NatVentMaxRelHumidity=90,
NatVentMinOutdoorAirTemp=0,
NatVentZoneTempSetpoint=18,
ScheduledVentilationAch=0.6,
ScheduledVentilationSetpoint=18,
**kwargs
):
"""Initialize a new VentilationSetting (for zone) object
Args:
NatVentSchedule (UmiSchedule): The name of the schedule
(Day | Week | Year) which ultimately modifies the Opening Area
value (see previous field). In its current implementation, any
value greater than 0 will consider, value above The schedule
values must be any positive number between 0 and 1 as a
fraction.
ScheduledVentilationSchedule (UmiSchedule): The name of
the schedule (Schedules Tab) that modifies the maximum design
volume flow rate. This fraction is between 0.0 and 1.0.
Afn (bool):
Infiltration (float): Infiltration rate in ACH
IsBuoyancyOn (bool): If True, simulation takes into account the
stack effect in the infiltration calculation
IsInfiltrationOn (bool): If yes, there is heat transfer between the
building and the outside caused by infiltration
IsNatVentOn (bool): If True, Natural ventilation (air
movement/exchange as a result of openings in the building façade
not consuming any fan energy).
IsScheduledVentilationOn (bool): If True, Ventilation (flow of air
from the outdoor environment directly into a thermal zone) is ON
IsWindOn (bool): If True, simulation takes into account the wind
effect in the infiltration calculation
NatVentMaxOutdoorAirTemp (float): The outdoor temperature (in
Celsius) above which ventilation is shut off. The minimum value
for this field is -100.0°C and the maximum value is 100.0°C. The
default value is 100.0°C if the field is left blank. This upper
temperature limit is intended to avoid overheating a space,
which could result in a cooling load.
NatVentMaxRelHumidity (float): Defines the dehumidifying relative
humidity setpoint, expressed as a percentage (0-100), for each
timestep of the simulation.
NatVentMinOutdoorAirTemp (float): The outdoor temperature (in
Celsius) below which ventilation is shut off. The minimum value
for this field is -100.0°C and the maximum value is 100.0°C. The
default value is -100.0°C if the field is left blank. This lower
temperature limit is intended to avoid overcooling a space,
which could result in a heating load.
NatVentZoneTempSetpoint (float):
ScheduledVentilationAch (float): This factor, along with the Zone
Volume, will be used to determine the Design Flow Rate.
ScheduledVentilationSetpoint (float): The indoor temperature (in
Celsius) below which ventilation is shutoff. The minimum value
for this field is -100.0°C and the maximum value is 100.0°C. The
default value is -100.0°C if the field is left blank. This lower
temperature limit is intended to avoid overcooling a space and
thus result in a heating load. For example, if the user
specifies a minimum temperature of 20°C, ventilation is assumed
to be available if the zone air temperature is above 20°C. If
the zone air temperature drops below 20°C, then ventilation is
automatically turned off.
**kwargs:
"""
super(VentilationSetting, self).__init__(**kwargs)
self.Afn = Afn
self.Infiltration = Infiltration
self.IsBuoyancyOn = IsBuoyancyOn
self.IsInfiltrationOn = IsInfiltrationOn
self.IsNatVentOn = IsNatVentOn
self.IsScheduledVentilationOn = IsScheduledVentilationOn
self.IsWindOn = IsWindOn
self.NatVentMaxOutdoorAirTemp = NatVentMaxOutdoorAirTemp
self.NatVentMaxRelHumidity = NatVentMaxRelHumidity
self.NatVentMinOutdoorAirTemp = NatVentMinOutdoorAirTemp
self.NatVentZoneTempSetpoint = NatVentZoneTempSetpoint
self.ScheduledVentilationAch = ScheduledVentilationAch
self.ScheduledVentilationSetpoint = ScheduledVentilationSetpoint
self.ScheduledVentilationSchedule = ScheduledVentilationSchedule
self.NatVentSchedule = NatVentSchedule
self._belongs_to_zone = kwargs.get("zone", None)
@property
def Infiltration(self):
return float(self._Infiltration)
@Infiltration.setter
def Infiltration(self, value):
self._Infiltration = value
@property
def NatVentMaxOutdoorAirTemp(self):
return float(self._NatVentMaxOutdoorAirTemp)
@NatVentMaxOutdoorAirTemp.setter
def NatVentMaxOutdoorAirTemp(self, value):
self._NatVentMaxOutdoorAirTemp = value
@property
def NatVentMaxRelHumidity(self):
return float(self._NatVentMaxRelHumidity)
@NatVentMaxRelHumidity.setter
def NatVentMaxRelHumidity(self, value):
self._NatVentMaxRelHumidity = value
@property
def NatVentMinOutdoorAirTemp(self):
return float(self._NatVentMinOutdoorAirTemp)
@NatVentMinOutdoorAirTemp.setter
def NatVentMinOutdoorAirTemp(self, value):
self._NatVentMinOutdoorAirTemp = value
@property
def NatVentZoneTempSetpoint(self):
return float(self._NatVentZoneTempSetpoint)
@NatVentZoneTempSetpoint.setter
def NatVentZoneTempSetpoint(self, value):
self._NatVentZoneTempSetpoint = value
@property
def ScheduledVentilationAch(self):
return float(self._ScheduledVentilationAch)
@ScheduledVentilationAch.setter
def ScheduledVentilationAch(self, value):
self._ScheduledVentilationAch = value
@property
def ScheduledVentilationSetpoint(self):
return float(self._ScheduledVentilationSetpoint)
@ScheduledVentilationSetpoint.setter
def ScheduledVentilationSetpoint(self, value):
self._ScheduledVentilationSetpoint = value
def __add__(self, other):
return self.combine(other)
def __hash__(self):
return hash(
(self.__class__.__name__, getattr(self, "Name", None), self.DataSource)
)
def __eq__(self, other):
if not isinstance(other, VentilationSetting):
return False
else:
return all(
[
self.NatVentSchedule == other.NatVentSchedule,
self.ScheduledVentilationSchedule
== self.ScheduledVentilationSchedule,
self.Afn == other.Afn,
self.Infiltration == other.Infiltration,
self.IsBuoyancyOn == other.IsBuoyancyOn,
self.IsInfiltrationOn == other.IsInfiltrationOn,
self.IsNatVentOn == other.IsNatVentOn,
self.IsScheduledVentilationOn == other.IsScheduledVentilationOn,
self.IsWindOn == other.IsWindOn,
self.NatVentMaxOutdoorAirTemp == other.NatVentMaxOutdoorAirTemp,
self.NatVentMaxRelHumidity == other.NatVentMaxRelHumidity,
self.NatVentMinOutdoorAirTemp == other.NatVentMinOutdoorAirTemp,
self.NatVentZoneTempSetpoint == other.NatVentZoneTempSetpoint,
self.ScheduledVentilationAch == other.ScheduledVentilationAch,
self.ScheduledVentilationSetpoint
== other.ScheduledVentilationSetpoint,
]
)
@classmethod
@deprecated(
deprecated_in="1.3.1",
removed_in="1.5",
current_version=archetypal.__version__,
details="Use from_dict function instead",
)
def from_json(cls, *args, **kwargs):
return cls.from_dict(*args, **kwargs)
@classmethod
def from_dict(cls, *args, **kwargs):
"""
Args:
*args:
**kwargs:
"""
vs = cls(*args, **kwargs)
vent_sch = kwargs.get("ScheduledVentilationSchedule", None)
vs.ScheduledVentilationSchedule = vs.get_ref(vent_sch)
nat_sch = kwargs.get("NatVentSchedule", None)
vs.NatVentSchedule = vs.get_ref(nat_sch)
return vs
def to_json(self):
"""Convert class properties to dict"""
self.validate() # Validate object before trying to get json format
data_dict = collections.OrderedDict()
data_dict["$id"] = str(self.id)
data_dict["Afn"] = self.Afn
data_dict["IsBuoyancyOn"] = self.IsBuoyancyOn
data_dict["Infiltration"] = round(self.Infiltration, 3)
data_dict["IsInfiltrationOn"] = self.IsInfiltrationOn
data_dict["IsNatVentOn"] = self.IsNatVentOn
data_dict["IsScheduledVentilationOn"] = self.IsScheduledVentilationOn
data_dict["NatVentMaxRelHumidity"] = round(self.NatVentMaxRelHumidity, 3)
data_dict["NatVentMaxOutdoorAirTemp"] = round(self.NatVentMaxOutdoorAirTemp, 3)
data_dict["NatVentMinOutdoorAirTemp"] = round(self.NatVentMinOutdoorAirTemp, 3)
data_dict["NatVentSchedule"] = self.NatVentSchedule.to_dict()
data_dict["NatVentZoneTempSetpoint"] = round(self.NatVentZoneTempSetpoint, 3)
data_dict["ScheduledVentilationAch"] = round(self.ScheduledVentilationAch, 3)
data_dict[
"ScheduledVentilationSchedule"
] = self.ScheduledVentilationSchedule.to_dict()
data_dict["ScheduledVentilationSetpoint"] = round(
self.ScheduledVentilationSetpoint, 3
)
data_dict["IsWindOn"] = self.IsWindOn
data_dict["Category"] = self.Category
data_dict["Comments"] = self.Comments
data_dict["DataSource"] = self.DataSource
data_dict["Name"] = UniqueName(self.Name)
return data_dict
@classmethod
@timeit
def from_zone(cls, zone, **kwargs):
"""
Args:
zone (template.zone.Zone): zone to gets information from
"""
# If Zone is not part of Conditioned Area, it should not have a
# VentilationSetting object.
if not zone.is_part_of_total_floor_area:
return None
name = zone.Name + "_VentilationSetting"
df = {"a": zone.idf.sql()}
ni_df = nominal_infiltration(df)
sched_df = nominal_mech_ventilation(df)
nat_df = nominal_nat_ventilation(df)
index = ("a", zone.Name.upper())
# Do infiltration
Infiltration, IsInfiltrationOn = do_infiltration(index, ni_df, zone)
# Do natural ventilation
(
IsNatVentOn,
IsWindOn,
IsBuoyancyOn,
NatVentMaxOutdoorAirTemp,
NatVentMaxRelHumidity,
NatVentMinOutdoorAirTemp,
NatVentSchedule,
NatVentZoneTempSetpoint,
) = do_natural_ventilation(index, nat_df, zone)
# Do scheduled ventilation
(
ScheduledVentilationSchedule,
IsScheduledVentilationOn,
ScheduledVentilationAch,
ScheduledVentilationSetpoint,
) = do_scheduled_ventilation(index, sched_df, zone)
z_vent = cls(
Name=name,
zone=zone,
Infiltration=Infiltration,
IsInfiltrationOn=IsInfiltrationOn,
IsWindOn=IsWindOn,
IsBuoyancyOn=IsBuoyancyOn,
IsNatVentOn=IsNatVentOn,
NatVentSchedule=NatVentSchedule,
NatVentMaxRelHumidity=NatVentMaxRelHumidity,
NatVentMaxOutdoorAirTemp=NatVentMaxOutdoorAirTemp,
NatVentMinOutdoorAirTemp=NatVentMinOutdoorAirTemp,
NatVentZoneTempSetpoint=NatVentZoneTempSetpoint,
ScheduledVentilationSchedule=ScheduledVentilationSchedule,
IsScheduledVentilationOn=IsScheduledVentilationOn,
ScheduledVentilationAch=ScheduledVentilationAch,
ScheduledVentilationSetpoint=ScheduledVentilationSetpoint,
idf=zone.idf,
Category=zone.idf.name,
**kwargs
)
return z_vent
def combine(self, other, weights=None):
"""Combine two VentilationSetting objects together.
Args:
other (VentilationSetting):
weights (list-like, optional): A list-like object of len 2. If None,
the volume of the zones for which self and other belongs is
used.
Returns:
(VentilationSetting): the combined VentilationSetting object.
"""
# Check if other is None. Simply return self
if not other:
return self
if not self:
return other
# Check if other is the same type as self
if not isinstance(other, self.__class__):
msg = "Cannot combine %s with %s" % (
self.__class__.__name__,
other.__class__.__name__,
)
raise NotImplementedError(msg)
# Check if other is not the same as self
if self == other:
return self
meta = self._get_predecessors_meta(other)
if not weights:
zone_weight = settings.zone_weight
weights = [
getattr(self._belongs_to_zone, str(zone_weight)),
getattr(other._belongs_to_zone, str(zone_weight)),
]
log(
'using zone {} "{}" as weighting factor in "{}" '
"combine.".format(
zone_weight,
" & ".join(list(map(str, map(int, weights)))),
self.__class__.__name__,
)
)
a = UmiSchedule.combine(self.NatVentSchedule, other.NatVentSchedule, weights)
b = UmiSchedule.combine(
self.ScheduledVentilationSchedule,
other.ScheduledVentilationSchedule,
weights,
)
c = any((self.Afn, other.Afn))
d = self._float_mean(other, "Infiltration", weights)
e = any((self.IsBuoyancyOn, other.IsBuoyancyOn))
f = any((self.IsInfiltrationOn, other.IsInfiltrationOn))
g = any((self.IsNatVentOn, other.IsNatVentOn))
h = any((self.IsScheduledVentilationOn, other.IsScheduledVentilationOn))
i = any((self.IsWindOn, other.IsWindOn))
j = self._float_mean(other, "NatVentMaxOutdoorAirTemp", weights)
k = self._float_mean(other, "NatVentMaxRelHumidity", weights)
l = self._float_mean(other, "NatVentMinOutdoorAirTemp", weights)
m = self._float_mean(other, "NatVentZoneTempSetpoint", weights)
n = self._float_mean(other, "ScheduledVentilationAch", weights)
o = self._float_mean(other, "ScheduledVentilationSetpoint", weights)
new_attr = dict(
NatVentSchedule=a,
ScheduledVentilationSchedule=b,
Afn=c,
Infiltration=d,
IsBuoyancyOn=e,
IsInfiltrationOn=f,
IsNatVentOn=g,
IsScheduledVentilationOn=h,
IsWindOn=i,
NatVentMaxOutdoorAirTemp=j,
NatVentMaxRelHumidity=k,
NatVentMinOutdoorAirTemp=l,
NatVentZoneTempSetpoint=m,
ScheduledVentilationAch=n,
ScheduledVentilationSetpoint=o,
)
# create a new object with the previous attributes
new_obj = self.__class__(**meta, **new_attr, idf=self.idf)
new_obj.predecessors.update(self.predecessors + other.predecessors)
return new_obj
def validate(self):
"""Validates UmiObjects and fills in missing values"""
if not self.NatVentSchedule:
self.NatVentSchedule = UmiSchedule.constant_schedule(
hourly_value=0, Name="AlwaysOff", allow_duplicates=True
)
if not self.ScheduledVentilationSchedule:
self.ScheduledVentilationSchedule = UmiSchedule.constant_schedule(
hourly_value=0, Name="AlwaysOff", allow_duplicates=True
)
return self
def mapping(self):
self.validate()
return dict(
Afn=self.Afn,
IsBuoyancyOn=self.IsBuoyancyOn,
Infiltration=self.Infiltration,
IsInfiltrationOn=self.IsInfiltrationOn,
IsNatVentOn=self.IsNatVentOn,
IsScheduledVentilationOn=self.IsScheduledVentilationOn,
NatVentMaxRelHumidity=self.NatVentMaxRelHumidity,
NatVentMaxOutdoorAirTemp=self.NatVentMaxOutdoorAirTemp,
NatVentMinOutdoorAirTemp=self.NatVentMinOutdoorAirTemp,
NatVentSchedule=self.NatVentSchedule,
NatVentZoneTempSetpoint=self.NatVentZoneTempSetpoint,
ScheduledVentilationAch=self.ScheduledVentilationAch,
ScheduledVentilationSchedule=self.ScheduledVentilationSchedule,
ScheduledVentilationSetpoint=self.ScheduledVentilationSetpoint,
IsWindOn=self.IsWindOn,
Category=self.Category,
Comments=self.Comments,
DataSource=self.DataSource,
Name=self.Name,
)
def get_ref(self, ref):
"""Gets item matching ref id
Args:
ref:
"""
return next(
iter(
[
value
for value in VentilationSetting.CREATED_OBJECTS
if value.id == ref["$ref"]
]
),
None,
)
def do_infiltration(index, inf_df, zone):
"""Gets infiltration information of the zone
Args:
index (tuple): Zone name
inf_df (dataframe): Dataframe with infiltration information for each
zone
zone (template.zone.Zone): zone to gets information from
"""
if not inf_df.empty:
try:
Infiltration = inf_df.loc[index, "ACH - Air Changes per Hour"]
IsInfiltrationOn = any(inf_df.loc[index, "Name"])
except:
Infiltration = 0
IsInfiltrationOn = False
else:
Infiltration = 0
IsInfiltrationOn = False
return Infiltration, IsInfiltrationOn
def do_natural_ventilation(index, nat_df, zone):
"""Gets natural ventilation information of the zone
Args:
index (tuple): Zone name
nat_df:
zone (template.zone.Zone): zone to gets information from
"""
if not nat_df.empty:
try:
IsNatVentOn = any(nat_df.loc[index, "Name"])
schedule_name_ = nat_df.loc[index, "Schedule Name"]
quantity = nat_df.loc[index, "Volume Flow Rate/Floor Area {m3/s/m2}"]
NatVentSchedule = UmiSchedule(
Name=schedule_name_, idf=zone.idf, quantity=quantity
)
except KeyError:
# todo: For some reason, a ZoneVentilation:WindandStackOpenArea
# 'Opening Area Fraction Schedule Name' is read as Constant-0.0
# in the nat_df. For the mean time, a zone containing such an
# object will be turned on with an AlwaysOn schedule.
IsNatVentOn = True
NatVentSchedule = UmiSchedule.constant_schedule(
idf=zone.idf, allow_duplicates=True, quantity=np.nan
)
except Exception:
IsNatVentOn = False
NatVentSchedule = UmiSchedule.constant_schedule(
idf=zone.idf, allow_duplicates=True
)
finally:
try:
NatVentMaxRelHumidity = 90 # todo: not sure if it is being used
NatVentMaxOutdoorAirTemp = resolve_temp(
nat_df.loc[index, "Maximum Outdoor Temperature{C}/Schedule"],
zone.idf,
)
NatVentMinOutdoorAirTemp = resolve_temp(
nat_df.loc[index, "Minimum Outdoor Temperature{C}/Schedule"],
zone.idf,
)
NatVentZoneTempSetpoint = resolve_temp(
nat_df.loc[index, "Minimum Indoor Temperature{C}/Schedule"],
zone.idf,
)
except KeyError:
# this zone is not in the nat_df. Revert to defaults.
NatVentMaxRelHumidity = 90
NatVentMaxOutdoorAirTemp = 30
NatVentMinOutdoorAirTemp = 0
NatVentZoneTempSetpoint = 18
else:
IsNatVentOn = False
NatVentSchedule = UmiSchedule.constant_schedule(
idf=zone.idf, allow_duplicates=True
)
NatVentMaxRelHumidity = 90
NatVentMaxOutdoorAirTemp = 30
NatVentMinOutdoorAirTemp = 0
NatVentZoneTempSetpoint = 18
# Is Wind ON
if not zone.idf.idfobjects["ZoneVentilation:WindandStackOpenArea".upper()].list1:
IsWindOn = False
IsBuoyancyOn = False
else:
IsWindOn = True
IsBuoyancyOn = True
return (
IsNatVentOn,
IsWindOn,
IsBuoyancyOn,
NatVentMaxOutdoorAirTemp,
NatVentMaxRelHumidity,
NatVentMinOutdoorAirTemp,
NatVentSchedule,
NatVentZoneTempSetpoint,
)
def do_scheduled_ventilation(index, scd_df, zone):
"""Gets schedule ventilation information of the zone
Args:
index (tuple): Zone name
scd_df:
zone (template.zone.Zone): zone to gets information from
"""
if not scd_df.empty:
try:
IsScheduledVentilationOn = any(scd_df.loc[index, "Name"])
schedule_name_ = scd_df.loc[index, "Schedule Name"]
ScheduledVentilationSchedule = UmiSchedule(
Name=schedule_name_, idf=zone.idf
)
ScheduledVentilationAch = scd_df.loc[index, "ACH - Air Changes per Hour"]
ScheduledVentilationSetpoint = resolve_temp(
scd_df.loc[index, "Minimum " "Indoor " "Temperature{" "C}/Schedule"],
zone.idf,
)
except:
ScheduledVentilationSchedule = UmiSchedule.constant_schedule(
hourly_value=0, Name="AlwaysOff", idf=zone.idf, allow_duplicates=True
)
IsScheduledVentilationOn = False
ScheduledVentilationAch = 0
ScheduledVentilationSetpoint = 18
else:
ScheduledVentilationSchedule = UmiSchedule.constant_schedule(
hourly_value=0, Name="AlwaysOff", idf=zone.idf, allow_duplicates=True
)
IsScheduledVentilationOn = False
ScheduledVentilationAch = 0
ScheduledVentilationSetpoint = 18
return (
ScheduledVentilationSchedule,
IsScheduledVentilationOn,
ScheduledVentilationAch,
ScheduledVentilationSetpoint,
)
def nominal_nat_ventilation(df):
_nom_vent = nominal_ventilation(df)
if _nom_vent.empty:
return _nom_vent
nom_natvent = (
_nom_vent.reset_index()
.set_index(["Archetype", "Zone Name"])
.loc[
lambda e: e["Fan Type {Exhaust;Intake;Natural}"].str.contains("Natural"), :
]
if not _nom_vent.empty
else None
)
return nom_natvent
def nominal_mech_ventilation(df):
_nom_vent = nominal_ventilation(df)
if _nom_vent.empty:
return _nom_vent
nom_vent = (
_nom_vent.reset_index()
.set_index(["Archetype", "Zone Name"])
.loc[
lambda e: ~e["Fan Type {Exhaust;Intake;Natural}"].str.contains("Natural"), :
]
if not _nom_vent.empty
else None
)
return nom_vent
def nominal_infiltration(df):
"""Nominal Infiltration
Args:
df:
Returns:
df
References:
* `Nominal Infiltration Table \
<https://bigladdersoftware.com/epx/docs/8-9/output-details-and \
-examples/eplusout-sql.html#nominalinfiltration-table>`_
"""
df = get_from_tabulardata(df)
report_name = "Initialization Summary"
table_name = "ZoneInfiltration Airflow Stats Nominal"
tbstr = df[
(df.ReportName == report_name) & (df.TableName == table_name)
].reset_index()
if tbstr.empty:
log(
"Table {} does not exist. "
"Returning an empty DataFrame".format(table_name),
lg.WARNING,
)
return pd.DataFrame([])
tbpiv = tbstr.pivot_table(
index=["Archetype", "RowName"],
columns="ColumnName",
values="Value",
aggfunc=lambda x: " ".join(x),
)
tbpiv.replace({"N/A": np.nan}, inplace=True)
return (
tbpiv.reset_index()
.groupby(["Archetype", "Zone Name"])
.agg(lambda x: pd.to_numeric(x, errors="ignore").sum())
)
def nominal_ventilation(df):
"""Nominal Ventilation
Args:
df:
Returns:
df
References:
* `Nominal Ventilation Table \
<https://bigladdersoftware.com/epx/docs/8-9/output-details-and \
-examples/eplusout-sql.html#nominalventilation-table>`_
"""
df = get_from_tabulardata(df)
report_name = "Initialization Summary"
table_name = "ZoneVentilation Airflow Stats Nominal"
tbstr = df[
(df.ReportName == report_name) & (df.TableName == table_name)
].reset_index()
if tbstr.empty:
log(
"Table {} does not exist. "
"Returning an empty DataFrame".format(table_name),
lg.WARNING,
)
return pd.DataFrame([])
tbpiv = tbstr.pivot_table(
index=["Archetype", "RowName"],
columns="ColumnName",
values="Value",
aggfunc=lambda x: " ".join(x),
)
tbpiv = tbpiv.replace({"N/A": np.nan}).apply(
lambda x: | pd.to_numeric(x, errors="ignore") | pandas.to_numeric |
import json
from datetime import datetime as dt
from datetime import timedelta
from numpy import busday_count
from os import makedirs
from os.path import isdir
from os.path import join
# from os.path import isfile
from os import listdir
import pandas as pd
from tws_futures.helpers import project
def save_as_json(data, file_path):
with open(file_path, 'w') as f:
f.write(json.dumps(data, indent=1, sort_keys=True))
def load_json(file_path):
with open(file_path, 'r') as f:
return json.loads(f.read())
def cache(data, location):
_meta = data['meta_data']
end_date = _meta['end_date']
storage_dir = join(location, end_date)
make_dirs(storage_dir)
file_name = join(storage_dir, f'{_meta["symbol"]}_{_meta["expiry"]}.json')
save_as_json(data, file_name)
def make_datetime(target_date, date_format='%Y%m%d'):
return dt.strptime(target_date, date_format)
def find_date(target_date, number_of_days):
assert isinstance(target_date, dt), f'Target date must be a datetime object'
return target_date - timedelta(days=number_of_days)
def get_weekmask(weekends=False):
return [1, 1, 1, 1, 1, 1, 1] if weekends else [1, 1, 1, 1, 1, 0, 0]
def get_holidays(year='2021'):
return [f'{year}-01-01']
def get_business_days(start, end, weekends=False):
return int(busday_count(start.date(), end.date(),
weekmask=get_weekmask(weekends),
holidays=[f'{start.year}']))
def make_dirs(path):
if not(isdir(path)):
makedirs(path)
def setup(end_date):
# TODO: parameterise data storage location
# TODO: localise month map
year, month = end_date[:4], int(end_date[4:6])
target_dir = join(project.HISTORICAL_DATA_STORAGE, year, project.MONTH_MAP[month])
make_dirs(target_dir)
return target_dir
def generate_csv(end_date, location):
"""
Reads cached data and converts that into CSV
:param end_date: target date for which the data is to be converted (YYYYMMDD)
:param location: location from where data is cached
"""
columns = ['symbol', 'expiry', 'end_date', 'end_time', 'open', 'high',
'low', 'close', 'average', 'volume', 'bar_count']
read_from = join(location, end_date)
files = list(filter(lambda x: x.endswith('json'), listdir(read_from)))
for file in files:
file_path = join(read_from, file)
temp = load_json(file_path)
_bars, _meta = temp['bar_data'], temp['meta_data']
temp = | pd.DataFrame(_bars) | pandas.DataFrame |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import logging
import os
import h5py
import numpy as np
import pandas as pd
import torch
from core.config import get_model_name
from core.evaluate import accuracy
from core.inference import get_final_preds
from utils.transforms import flip_back
from utils.vis import save_debug_images, save_batch_fusion_heatmaps, save_debug_heatmaps
logger = logging.getLogger(__name__)
# def routing(raw_features, aggre_features, is_aggre, meta):
# if not is_aggre:
# return raw_features
#
# output = []
# for r, a, m in zip(raw_features, aggre_features, meta):
# view = torch.zeros_like(a)
# batch_size = a.size(0)
# for i in range(batch_size):
# s = m['source'][i]
# view[i] = a[i] if s != 'mpii' else r[i] # make it compatible with dataset rather than only h36m
# output.append(view)
# return output
def merge_first_two_dims(tensor):
dim0 = tensor.shape[0]
dim1 = tensor.shape[1]
left = tensor.shape[2:]
return tensor.view(dim0*dim1, *left)
def train(config, data, model, criterion, optim, epoch, output_dir,
writer_dict, **kwargs):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
avg_acc = AverageMeter()
model.train()
end = time.time()
for i, (input_, target_, weight_, meta_) in enumerate(data):
data_time.update(time.time() - end)
output, extra = model(input_, **meta_)
input = merge_first_two_dims(input_)
target = merge_first_two_dims(target_)
weight = merge_first_two_dims(weight_)
meta = dict()
for kk in meta_:
meta[kk] = merge_first_two_dims(meta_[kk])
target_cuda = target.cuda()
weight_cuda = weight.cuda()
loss = 0
b_imu_fuse = extra['imu_fuse']
if b_imu_fuse:
loss += 0.5 * criterion(extra['origin_hms'], target_cuda, weight_cuda)
target_mask = torch.as_tensor(target_cuda > 0.001, dtype=torch.float32).cuda()
imu_masked = heatmaps * target_mask
target_imu_joint = target_cuda * extra['joint_channel_mask'][0]
loss += 0.5 * criterion(imu_masked, target_imu_joint, weight_cuda)
else:
loss += criterion(extra['origin_hms'], target_cuda, weight_cuda)
optim.zero_grad()
loss.backward()
optim.step()
losses.update(loss.item(), len(input) * input[0].size(0))
_, acc, cnt, pre = accuracy(output.detach().cpu().numpy(), target.detach().cpu().numpy())
avg_acc.update(acc, cnt)
batch_time.update(time.time() - end)
end = time.time()
if i % config.PRINT_FREQ == 0:
gpu_memory_usage = torch.cuda.memory_allocated(0)
msg = 'Epoch: [{0}][{1}/{2}]\t' \
'Time {batch_time.val:.3f}s ({batch_time.avg:.3f}s)\t' \
'Speed {speed:.1f} samples/s\t' \
'Data {data_time.val:.3f}s ({data_time.avg:.3f}s)\t' \
'Loss {loss.val:.5f} ({loss.avg:.5f})\t' \
'Accuracy {acc.val:.3f} ({acc.avg:.3f})\t' \
'Memory {memory:.1f}'.format(
epoch, i, len(data), batch_time=batch_time,
speed=input.shape[0] / batch_time.val,
data_time=data_time, loss=losses, acc=avg_acc, memory=gpu_memory_usage)
logger.info(msg)
writer = writer_dict['writer']
global_steps = writer_dict['train_global_steps']
writer.add_scalar('train_loss', losses.val, global_steps)
writer.add_scalar('train_acc', avg_acc.val, global_steps)
writer_dict['train_global_steps'] = global_steps + 1
# for k in range(len(input)):
view_name = 'view_{}'.format(0)
prefix = '{}_{}_{:08}'.format(
os.path.join(output_dir, 'train'), view_name, i)
meta_for_debug_imgs = dict()
meta_for_debug_imgs['joints_vis'] = meta['joints_vis']
meta_for_debug_imgs['joints_2d_transformed'] = meta['joints_2d_transformed']
save_debug_images(config, input, meta_for_debug_imgs, target,
pre * 4, extra['origin_hms'], prefix)
if extra is not None and 'fused_hms' in extra:
fuse_hm = extra['fused_hms']
prefix = '{}_{}_{:08}'.format(
os.path.join(output_dir, 'fused_hms'), view_name, i)
save_debug_heatmaps(config, input, meta_for_debug_imgs, target,
pre * 4, fuse_hm, prefix)
def validate(config, loader, dataset, model, criterion, output_dir,
writer_dict=None, **kwargs):
model.eval()
batch_time = AverageMeter()
losses = AverageMeter()
avg_acc = AverageMeter()
nview = len(config.SELECTED_VIEWS)
nsamples = len(dataset) * nview
njoints = config.NETWORK.NUM_JOINTS
height = int(config.NETWORK.HEATMAP_SIZE[0])
width = int(config.NETWORK.HEATMAP_SIZE[1])
all_preds = np.zeros((nsamples, njoints, 3), dtype=np.float32)
all_heatmaps = np.zeros(
(nsamples, njoints, height, width), dtype=np.float32)
idx = 0
with torch.no_grad():
end = time.time()
for i, (input_, target_, weight_, meta_) in enumerate(loader):
batch = input_.shape[0]
output, extra = model(input_, **meta_)
input = merge_first_two_dims(input_)
target = merge_first_two_dims(target_)
weight = merge_first_two_dims(weight_)
meta = dict()
for kk in meta_:
meta[kk] = merge_first_two_dims(meta_[kk])
target_cuda = target.cuda()
weight_cuda = weight.cuda()
loss = criterion(output, target_cuda, weight_cuda)
nimgs = input.size()[0]
losses.update(loss.item(), nimgs)
_, acc, cnt, pre = accuracy(output.detach().cpu().numpy(), target.detach().cpu().numpy(), thr=0.083)
avg_acc.update(acc, cnt)
batch_time.update(time.time() - end)
end = time.time()
pred, maxval = get_final_preds(config,
output.clone().cpu().numpy(),
meta['center'],
meta['scale'])
pred = pred[:, :, 0:2]
pred = np.concatenate((pred, maxval), axis=2)
all_preds[idx:idx + nimgs] = pred
all_heatmaps[idx:idx + nimgs] = output.cpu().numpy()
# image_only_heatmaps[idx:idx + nimgs] = img_detected.cpu().numpy()
idx += nimgs
if i % config.PRINT_FREQ == 0:
msg = 'Test: [{0}/{1}]\t' \
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' \
'Loss {loss.val:.4f} ({loss.avg:.4f})\t' \
'Accuracy {acc.val:.3f} ({acc.avg:.3f})'.format(
i, len(loader), batch_time=batch_time,
loss=losses, acc=avg_acc)
logger.info(msg)
view_name = 'view_{}'.format(0)
prefix = '{}_{}_{:08}'.format(
os.path.join(output_dir, 'validation'), view_name, i)
meta_for_debug_imgs = dict()
meta_for_debug_imgs['joints_vis'] = meta['joints_vis']
meta_for_debug_imgs['joints_2d_transformed'] = meta['joints_2d_transformed']
save_debug_images(config, input, meta_for_debug_imgs, target,
pre * 4, extra['origin_hms'], prefix)
if 'fused_hms' in extra:
fused_hms = extra['fused_hms']
prefix = '{}_{}_{:08}'.format(
os.path.join(output_dir, 'fused_hms'), view_name, i)
save_debug_heatmaps(config, input, meta_for_debug_imgs, target,
pre * 4, fused_hms, prefix)
detection_thresholds = [0.075, 0.05, 0.025, 0.0125] # 150,100,50,25 mm
perf_indicators = []
cur_time = time.strftime("%Y-%m-%d-%H-%M", time.gmtime())
for thresh in detection_thresholds:
name_value, perf_indicator, per_grouping_detected = dataset.evaluate(all_preds, threshold=thresh)
perf_indicators.append(perf_indicator)
names = name_value.keys()
values = name_value.values()
num_values = len(name_value)
_, full_arch_name = get_model_name(config)
logger.info('Detection Threshold set to {} aka {}mm'.format(thresh, thresh * 2000.0))
logger.info('| Arch ' +
' '.join(['| {: <5}'.format(name) for name in names]) + ' |')
logger.info('|--------' * (num_values + 1) + '|')
logger.info('| ' + '------ ' +
' '.join(['| {:.4f}'.format(value) for value in values]) +
' |')
logger.info('| ' + full_arch_name)
logger.info('Overall Perf on threshold {} is {}\n'.format(thresh, perf_indicator))
logger.info('\n')
if per_grouping_detected is not None:
df = | pd.DataFrame(per_grouping_detected) | pandas.DataFrame |
# Mar21, 2022
##
#---------------------------------------------------------------------
# SERVER only input all files (.bam and .fa) output MeH matrix in .csv
# August 3, 2021 clean
# FINAL github
#---------------------------------------------------------------------
import random
import math
import pysam
import csv
import sys
import pandas as pd
import numpy as np
import datetime
import time as t
from collections import Counter, defaultdict, OrderedDict
#---------------------------------------
# Functions definition
#---------------------------------------
def open_log(fname):
open_log.logfile = open(fname, 'w', 1)
def logm(message):
log_message = "[%s] %s\n" % (datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), message)
print(log_message),
open_log.logfile.write(log_message)
def close_log():
open_log.logfile.close()
# Count # of windows with enough reads for complete/impute
def coverage(methbin,complete,w):
count=0
tot = 0
meth=methbin.iloc[:,methbin.columns!='Qname']
if len(meth.columns)>=w:
for i in range(len(meth.columns)-w+1):
# extract a window
temp = meth.iloc[:,i:i+w].copy()
#print(temp)
tot = tot+1
if (enough_reads(window=temp,complete=complete,w=w)):
count=count+1
#toprint=temp.notnull().sum(axis=1)>=w
#print(toprint.sum())
#print(count)
#print(tot)
return count/tot*100
else:
return 0
# Check whether a window has enough reads for complete/impute
def enough_reads(window,w,complete):
temp=np.isnan(window).sum(axis=1)==0
if complete: # For heterogeneity estimation
return temp.sum()>=2**w
else: # for imputation
tempw1=np.isnan(window).sum(axis=1)==1
return temp.sum()>=2**(w-2) and tempw1.sum()>0
def impute(window,w):
full_ind=np.where(np.isnan(window).sum(axis=1)==0)[0]
part_ind=np.where(np.isnan(window).sum(axis=1)==1)[0]
for i in range(len(part_ind)):
sam = []
# which column is nan
pos=np.where(np.isnan(window[part_ind[i],:]))[0]
if np.unique(window[np.where(np.invert(np.isnan(window[:,pos])))[0],pos]).shape[0]==1:
window[part_ind[i],pos]=window[np.where(np.invert(np.isnan(window[:,pos])))[0],pos][0]
else:
#print("win_part i pos =",window[part_ind[i],pos])
for j in range(len(full_ind)):
if (window[part_ind[i],:]==window[full_ind[j],:]).sum()==w-1:
sam.append(j)
if len(sam)>0:
s1=random.sample(sam, 1)
s=window[full_ind[s1],pos]
else:
s=random.sample(window[np.where(np.invert(np.isnan(window[:,pos])))[0],pos].tolist(), k=1)[0]
window[part_ind[i],pos]=np.float64(s)
#print("win_part i =",window[part_ind[i],pos])
#print("s = ",np.float64(s))
return window
def getcomplete(window,w):
temp=np.isnan(window).sum(axis=1)==0
mat=window[np.where(temp)[0],:]
#temp=window.notnull().sum(axis=1)>=w
#mat=window.iloc[np.where(temp)[0],:]
#else:
# temp=mat.notnull().sum(axis=1)>=w-1
return mat
def PattoDis(mat,dist=1):
s=mat.shape[0]
dis=np.zeros((s,s))
for i in range(s):
for j in range(s):
if j<i:
if dist==1:
d=Ham_d(mat.iloc[i,],mat.iloc[j,])
else:
d=WDK_d(mat.iloc[i,],mat.iloc[j,])
dis[i,j]=dis[j,i]=d
return dis
def Ham_d(pat1,pat2):
return (pat1!=pat2).sum()
def WDK_d(pat1,pat2):
d=0
w=pat1.shape[0]
for i in range(w): # k-1
for j in range(w-i): # starting pos
s=(w-i-1)*(1-np.all(pat1[j:j+i+1]==pat2[j:j+i+1]))
d+=s
return d
# input a window of w CGs and output a list of proportions with starting genomic location and genomic distance across
def window_summ(pat,start,dis,chrom):
m=np.shape(pat)[0]
d=np.shape(pat)[1]
all_pos=np.zeros((2**d,d))
for i in range(d):
all_pos[:,i]=np.linspace(0,2**d-1,2**d)%(2**(i+1))//(2**i)
#print(all_pos)
prob=np.zeros((2**d,1))
#print(prob)
for i in range(2**d):
count = 0
for j in range(m):
if (all_pos[i,:]==pat.iloc[j,:]).sum()==d:
count += 1
#print(count)
prob[i]=count
if d==3:
out=pd.DataFrame({'chrom':chrom,'pos':start,'p01':prob[0],'p02':prob[1],'p03':prob[2],'p04':prob[3],\
'p05':prob[4],'p06':prob[5],'p07':prob[6],'p08':prob[7],'dis':dis})
if d==4:
out=pd.DataFrame({'chrom':chrom,'pos':start,'p01':prob[0],'p02':prob[1],'p03':prob[2],'p04':prob[3],\
'p05':prob[4],'p06':prob[5],'p07':prob[6],'p08':prob[7],'p09':prob[8],'p10':prob[9],\
'p11':prob[10],'p12':prob[11],'p13':prob[12],'p14':prob[13],'p15':prob[14],\
'p16':prob[15],'dis':dis})
if d==5:
out=pd.DataFrame({'chrom':chrom,'pos':start,'p01':prob[0],'p02':prob[1],'p03':prob[2],'p04':prob[3],\
'p05':prob[4],'p06':prob[5],'p07':prob[6],'p08':prob[7],'p09':prob[8],'p10':prob[9],\
'p11':prob[10],'p12':prob[11],'p13':prob[12],'p14':prob[13],'p15':prob[14],\
'p16':prob[15],'p17':prob[16],'p18':prob[17],'p19':prob[18],'p20':prob[19],\
'p21':prob[20],'p22':prob[21],'p23':prob[22],'p24':prob[23],'p25':prob[24],\
'p26':prob[25],'p27':prob[26],'p28':prob[27],'p29':prob[28],'p30':prob[29],\
'p31':prob[30],'p32':prob[31],'dis':dis})
if d==6:
out=pd.DataFrame({'chrom':chrom,'pos':start,'p01':prob[0],'p02':prob[1],'p03':prob[2],'p04':prob[3],\
'p05':prob[4],'p06':prob[5],'p07':prob[6],'p08':prob[7],'p09':prob[8],'p10':prob[9],\
'p11':prob[10],'p12':prob[11],'p13':prob[12],'p14':prob[13],'p15':prob[14],\
'p16':prob[15],'p17':prob[16],'p18':prob[17],'p19':prob[18],'p20':prob[19],\
'p21':prob[20],'p22':prob[21],'p23':prob[22],'p24':prob[23],'p25':prob[24],\
'p26':prob[25],'p27':prob[26],'p28':prob[27],'p29':prob[28],'p30':prob[29],\
'p31':prob[30],'p32':prob[31],'p33':prob[32],'p34':prob[33],'p35':prob[34],\
'p36':prob[35],'p37':prob[36],'p38':prob[37],'p39':prob[38],'p40':prob[39],\
'p41':prob[40],'p42':prob[41],'p43':prob[42],'p44':prob[43],'p45':prob[44],\
'p46':prob[45],'p47':prob[46],'p48':prob[47],'p49':prob[48],'p50':prob[49],\
'p51':prob[50],'p52':prob[51],'p53':prob[52],'p54':prob[53],'p55':prob[54],\
'p56':prob[55],'p57':prob[56],'p58':prob[57],'p59':prob[58],'p60':prob[59],\
'p61':prob[60],'p62':prob[61],'p63':prob[62],'p64':prob[63],'dis':dis})
return out
def MeHperwindow(pat,start,dis,chrom,D,w,optional,MeH=2,dist=1,strand='f'):
count=np.zeros((2**w,1))
m=np.shape(pat)[0]
pat=np.array(pat)
if w==2:
pat = Counter([str(i[0])+str(i[1]) for i in pat.astype(int).tolist()])
count=np.array([float(pat[i]) for i in ['00','10','01','11']])
if w==3:
pat = Counter([str(i[0])+str(i[1])+str(i[2]) for i in pat.astype(int).tolist()])
count=np.array([float(pat[i]) for i in ['000','100','010','110','001','101','011','111']])
if w==4:
pat = Counter([str(i[0])+str(i[1])+str(i[2])+str(i[3]) for i in pat.astype(int).tolist()])
count=np.array([float(pat[i]) for i in ['0000','1000','0100','1100','0010','1010','0110','1110','0001',\
'1001','0101','1101','0011','1011','0111','1111']])
if w==5:
pat = Counter([str(i[0])+str(i[1])+str(i[2])+str(i[3])+str(i[4]) for i in pat.astype(int).tolist()])
count=np.array([float(pat[i]) for i in ['00000','10000','01000','11000','00100','10100','01100','11100','00010',\
'10010','01010','11010','00110','10110','01110','11110','00001','10001','01001','11001','00101',\
'10101','01101','11101','00011','10011','01011','11011','00111','10111','01111','11111']])
if w==6:
pat = Counter([str(i[0])+str(i[1])+str(i[2])+str(i[3])+str(i[4])+str(i[5]) for i in pat.astype(int).tolist()])
count=np.array([float(pat[i]) for i in ['000000','100000','010000','110000','001000','101000','011000','111000','000100',\
'100100','010100','110100','001100','101100','011100','111100','000010','100010','010010','110010','001010',\
'101010','011010','111010','000110', '100110','010110','110110','001110','101110','011110','111110',\
'000001','100001','010001','110001','001001','101001','011001','111001','000101',\
'100101','010101','110101','001101','101101','011101','111101','000011','100011','010011','110011','001011',\
'101011','011011','111011','000111', '100111','010111','110111','001111','101111','011111','111111']])
if MeH==1: # Abundance based
score=(((count/m)**2).sum(axis=0))**(-1)
elif MeH==2: # PWS based
interaction=np.multiply.outer(count/m,count/m).reshape((2**w,2**w))
Q=sum(sum(D*interaction))
#print("Q =",Q)
if Q==0:
score=0
else:
score=(sum(sum(D*(interaction**2)))/(Q**2))**(-0.5)
elif MeH==3: #Phylogeny based
count=count.reshape(2**w)
count=np.concatenate((count[[0]],count))
if dist==1 and w==4:
phylotree=np.append(np.append(np.append(np.append([0],np.repeat(0.5,16)),np.repeat(0.25,6)),[0.5]),np.repeat(0.25,6))
#phylotree=np.repeat(0,1).append(np.repeat(0.5,16)).append(np.repeat(0.25,6)).append(0.5).append(np.repeat(0.25,6))
countn=np.zeros(30)
#count<-rep(0,29)
countn[1:17]=count[[1,9,5,3,2,13,11,10,7,6,4,15,14,12,8,16]]
countn[17]=countn[4]+countn[7]
countn[18]=countn[9]+countn[12]
countn[19]=countn[1]+countn[2]
countn[20]=countn[3]+countn[6]
countn[21]=countn[17]+countn[18]
countn[22]=countn[19]+countn[20]
countn[23]=countn[21]+countn[22]
countn[24]=countn[5]+countn[8]
countn[25]=countn[10]+countn[13]
countn[26]=countn[24]+countn[25]
countn[27]=countn[23]+countn[26]
countn[28]=countn[11]+countn[14]
countn[29]=countn[27]+countn[28]
#Q=sum(sum(phylotree*count))
if dist==2 and w==4:
phylotree=np.append(np.append(np.append(np.append([0],np.repeat(3,16)),np.repeat(1.5,6)),[3.2,0.8]),np.repeat(2,3),np.repeat(1.5,2))
#phylotree=c(rep(3,16),rep(1.5,6),3.2,0.8,rep(2,3),1.5,1.5)
countn=np.zeros(30)
#print(count)
countn[1:17]=count[[1,9,5,3,2,13,11,10,7,6,4,15,14,12,8,16]]
countn[17]=countn[1]+countn[2]
countn[18]=countn[5]+countn[8]
countn[19]=countn[3]+countn[6]
countn[20]=countn[10]+countn[13]
countn[21]=countn[4]+countn[7]
countn[22]=countn[11]+countn[14]
countn[23]=countn[17]+countn[18]
countn[24]=countn[21]+countn[22]
countn[25]=countn[19]+countn[20]
countn[26]=countn[23]+countn[24]
countn[27]=countn[25]+countn[26]
countn[28]=countn[9]+countn[12]
countn[29]=countn[27]+countn[28]
#Q=sum(phylotree*count)
if dist==2 and w==3:
phylotree=np.append(np.append(np.append([0],np.repeat(1.5,8)),np.repeat(0.75,3)),np.repeat(1.5,0.75))
#phylotree=np.array(0).append(np.repeat(1.5,8)).append(np.repeat(0.75,3)).append(1.5,0.75)
#phylotree=c(rep(1.5,8),rep(0.75,3),1.5,0.75)
countn=np.zeros(14)
countn[1:9]=count[1:9]
countn[9]=countn[1]+countn[2]
countn[10]=countn[5]+countn[6]
countn[11]=countn[3]+countn[4]
countn[12]=countn[9]+countn[10]
countn[13]=countn[11]+countn[12]
#Q=sum(phylotree*count)
if dist==1 and w==3:
phylotree=np.append(np.append(np.append([0],np.repeat(0.5,8)),np.repeat(0.25,3)),[0.5,0.25])
#phylotree=np.array(0).append(np.repeat(0.5,8)).append(np.repeat(0.25,3)).append(0.5,0.25)
countn=np.zeros(14)
countn[1:9]=count[1:9]
countn[9]=countn[1]+countn[2]
countn[10]=countn[5]+countn[6]
countn[11]=countn[3]+countn[4]
countn[12]=countn[9]+countn[10]
countn[13]=countn[11]+countn[12]
#print("count = ",count)
#print("phylotree = ",phylotree)
Q=sum(phylotree*countn)
score=sum(phylotree*((countn/Q)**2))**(-1)
elif MeH==4: #Entropy
score=0
for i in count:
if i>0:
score-=(i/m)*np.log2(i/m)/w
elif MeH==5: #Epipoly
score=1-((count/m)**2).sum(axis=0)
if optional:
if MeH!=3:
count=count.reshape(2**w)
count=np.concatenate((count[[0]],count))
if w==3:
opt=pd.DataFrame({'chrom':chrom,'pos':start,'p01':count[1],'p02':count[2],'p03':count[3],'p04':count[4],\
'p05':count[5],'p06':count[6],'p07':count[7],'p08':count[8],'MeH':round(score,5),'dis':dis,'strand':strand}, index=[0])
if w==4:
opt=pd.DataFrame({'chrom':chrom,'pos':start,'p01':count[1],'p02':count[2],'p03':count[3],'p04':count[4],\
'p05':count[5],'p06':count[6],'p07':count[7],'p08':count[8],'p09':count[9],'p10':count[10],\
'p11':count[11],'p12':count[12],'p13':count[13],'p14':count[14],'p15':count[15],\
'p16':count[16],'MeH':round(score,5),'dis':dis,'strand':strand}, index=[0])
if w==5:
opt=pd.DataFrame({'chrom':chrom,'pos':start,'p01':count[1],'p02':count[2],'p03':count[3],'p04':count[4],\
'p05':count[5],'p06':count[6],'p07':count[7],'p08':count[8],'p09':count[9],'p10':count[10],\
'p11':count[11],'p12':count[12],'p13':count[13],'p14':count[14],'p15':count[15],\
'p16':count[16],'p17':count[17],'p18':count[18],'p19':count[19],'p20':count[20],\
'p21':count[21],'p22':count[22],'p23':count[23],'p24':count[24],'p25':count[25],\
'p26':count[26],'p27':count[27],'p28':count[28],'p29':count[29],'p30':count[30],\
'p31':count[31],'p32':count[32],'MeH':round(score,5),'dis':dis,'strand':strand}, index=[0])
if w==6:
opt=pd.DataFrame({'chrom':chrom,'pos':start,'p01':count[1],'p02':count[2],'p03':count[3],'p04':count[4],\
'p05':count[5],'p06':count[6],'p07':count[7],'p08':count[8],'p09':count[9],'p10':count[10],\
'p11':count[11],'p12':count[12],'p13':count[13],'p14':count[14],'p15':count[15],\
'p16':count[16],'p17':count[17],'p18':count[18],'p19':count[19],'p20':count[20],\
'p21':count[21],'p22':count[22],'p23':count[23],'p24':count[24],'p25':count[25],\
'p26':count[26],'p27':count[27],'p28':count[28],'p29':count[29],'p30':count[30],\
'p31':count[31],'p32':count[32],'p33':count[33],'p34':count[34],'p35':count[35],\
'p36':count[36],'p37':count[37],'p38':count[38],'p39':count[39],'p40':count[40],\
'p41':count[41],'p42':count[42],'p43':count[43],'p44':count[44],'p45':count[45],\
'p46':count[46],'p47':count[47],'p48':count[48],'p49':count[49],'p50':count[50],\
'p51':count[51],'p52':count[52],'p53':count[53],'p54':count[54],'p55':count[55],\
'p56':count[56],'p57':count[57],'p58':count[58],'p59':count[59],'p60':count[60],\
'p61':count[61],'p62':count[62],'p63':count[63],'p64':count[64],'MeH':round(score,5),'dis':dis,'strand':strand}, index=[0])
return out, opt
else:
out=pd.DataFrame({'chrom':chrom,'pos':start,'MeH':round(score,5),'dis':dis,'strand':strand}, index=[0])
return out
def impute(window,w):
full_ind=np.where(np.isnan(window).sum(axis=1)==0)[0]
part_ind=np.where(np.isnan(window).sum(axis=1)==1)[0]
for i in range(len(part_ind)):
sam = []
# which column is nan
pos=np.where(np.isnan(window[part_ind[i],:]))[0]
if np.unique(window[np.where(np.invert(np.isnan(window[:,pos])))[0],pos]).shape[0]==1:
window[part_ind[i],pos]=window[np.where(np.invert(np.isnan(window[:,pos])))[0],pos][0]
else:
#print("win_part i pos =",window[part_ind[i],pos])
for j in range(len(full_ind)):
if (window[part_ind[i],:]==window[full_ind[j],:]).sum()==w-1:
sam.append(j)
if len(sam)>0:
s1=random.sample(sam, 1)
s=window[full_ind[s1],pos]
else:
s=random.sample(window[np.where(np.invert(np.isnan(window[:,pos])))[0],pos].tolist(), k=1)[0]
window[part_ind[i],pos]=np.float64(s)
return window
def CGgenome_scr(bamfile,w,fa,optional,melv,silence=False,dist=1,MeH=2,imp=True):
filename, file_extension = os.path.splitext(bamfile)
sample = str.split(filename,'_')[0]
coverage = cov_context = 0
# load bamfile
samfile = pysam.AlignmentFile("MeHdata/%s.bam" % (filename), "rb")
# load reference genome
fastafile = pysam.FastaFile('MeHdata/%s.fa' % fa)
# initialise data frame for genome screening (load C from bam file)
aggreR = aggreC = pd.DataFrame(columns=['Qname'])
# initialise data frame for output
ResultPW = pd.DataFrame(columns=['chrom','pos','MeH','dis','strand'])
if melv:
ResML = pd.DataFrame(columns=['chrom','pos','ML','strand','depth'])
# if user wants to output compositions of methylation patterns at every eligible window, initialise data frame
if optional:
if w==3:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08',\
'MeH','dis','strand'])
if w==4:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08','p09','p10','p11',\
'p12','p13','p14','p15','p16','MeH','dis','strand'])
if w==5:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16'\
,'p17','p18','p19','p20','p21','p22','p23','p24','p25','p26','p27','p28',\
'p29','p30','p31','p32','MeH','dis','strand'])
if w==5:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16'\
,'p17','p18','p19','p20','p21','p22','p23','p24','p25','p26','p27','p28',\
'p29','p30','p31','p32','p33','p34','p35','p36','p37','p38','p39','p40','p41','p42','p43','p44','p45','p46'\
,'p47','p48','p49','p50','p51','p52','p53','p54','p55','p56','p57','p58','p59','p60','p61','p62','p63','p64'\
,'MeH','dis','strand'])
neverr = never = True
# all methylation patterns for Methylation heterogeneity evaluation
all_pos=np.zeros((2**w,w))
for i in range(w):
all_pos[:,i]=np.linspace(0,2**w-1,2**w)%(2**(i+1))//(2**i)
# distance matrix, also for Methylation heterogeneity evaluation
D=PattoDis(pd.DataFrame(all_pos),dist=dist) # 1:Hamming distance, 2: WDK
start=datetime.datetime.now()
# vector for saving methylation statuses before imputation
MU=np.zeros((2,w))
# screen bamfile by column
for pileupcolumn in samfile.pileup():
coverage += 1
chrom = pileupcolumn.reference_name
if not silence:
if (pileupcolumn.pos % 2000000 == 1):
print("CG %s s %s w %s %s pos %s Result %s" % (datetime.datetime.now(),filename,w,chrom,pileupcolumn.pos,ResultPW.shape[0]))
# Forward strand, check if 'CG' in reference genome
if (fastafile.fetch(chrom,pileupcolumn.pos,pileupcolumn.pos+2)=='CG'):
cov_context += 1
temp = pd.DataFrame(columns=['Qname',pileupcolumn.pos+1])
pileupcolumn.set_min_base_quality(0)
# append reads in the column
for pileupread in pileupcolumn.pileups:
if not pileupread.is_del and not pileupread.is_refskip and not pileupread.alignment.is_reverse: # C
d = {'Qname': [pileupread.alignment.query_name], pileupcolumn.pos+1: [pileupread.alignment.query_sequence[pileupread.query_position]]}
df2 = | pd.DataFrame(data=d) | pandas.DataFrame |
import os
import warnings
from collections import OrderedDict
from unittest.mock import patch
import numpy as np
import pandas as pd
import pytest
import woodwork as ww
from sklearn.exceptions import NotFittedError, UndefinedMetricWarning
from sklearn.preprocessing import label_binarize
from evalml.exceptions import NoPositiveLabelException
from evalml.model_understanding.graphs import (
binary_objective_vs_threshold,
calculate_permutation_importance,
confusion_matrix,
decision_tree_data_from_estimator,
decision_tree_data_from_pipeline,
get_linear_coefficients,
get_prediction_vs_actual_data,
get_prediction_vs_actual_over_time_data,
graph_binary_objective_vs_threshold,
graph_confusion_matrix,
graph_partial_dependence,
graph_permutation_importance,
graph_precision_recall_curve,
graph_prediction_vs_actual,
graph_prediction_vs_actual_over_time,
graph_roc_curve,
graph_t_sne,
normalize_confusion_matrix,
precision_recall_curve,
roc_curve,
t_sne,
visualize_decision_tree,
)
from evalml.objectives import CostBenefitMatrix
from evalml.pipelines import (
BinaryClassificationPipeline,
DecisionTreeRegressor,
ElasticNetRegressor,
LinearRegressor,
MulticlassClassificationPipeline,
RegressionPipeline,
TimeSeriesRegressionPipeline,
)
from evalml.problem_types import ProblemTypes
from evalml.utils import get_random_state, infer_feature_types
@pytest.fixture
def test_pipeline():
class TestPipeline(BinaryClassificationPipeline):
component_graph = [
"Simple Imputer",
"One Hot Encoder",
"Standard Scaler",
"Logistic Regression Classifier",
]
def __init__(self, parameters):
super().__init__(self.component_graph, parameters=parameters)
return TestPipeline(parameters={"Logistic Regression Classifier": {"n_jobs": 1}})
@pytest.mark.parametrize("data_type", ["np", "pd", "ww"])
def test_confusion_matrix(data_type, make_data_type):
y_true = np.array([2, 0, 2, 2, 0, 1, 1, 0, 2])
y_predicted = np.array([0, 0, 2, 2, 0, 2, 1, 1, 1])
y_true = make_data_type(data_type, y_true)
y_predicted = make_data_type(data_type, y_predicted)
conf_mat = confusion_matrix(y_true, y_predicted, normalize_method=None)
conf_mat_expected = np.array([[2, 1, 0], [0, 1, 1], [1, 1, 2]])
assert np.array_equal(conf_mat_expected, conf_mat.to_numpy())
assert isinstance(conf_mat, pd.DataFrame)
conf_mat = confusion_matrix(y_true, y_predicted, normalize_method="all")
conf_mat_expected = conf_mat_expected / 9.0
assert np.array_equal(conf_mat_expected, conf_mat.to_numpy())
assert isinstance(conf_mat, pd.DataFrame)
conf_mat = confusion_matrix(y_true, y_predicted, normalize_method="true")
conf_mat_expected = np.array(
[[2 / 3.0, 1 / 3.0, 0], [0, 0.5, 0.5], [0.25, 0.25, 0.5]]
)
assert np.array_equal(conf_mat_expected, conf_mat.to_numpy())
assert isinstance(conf_mat, pd.DataFrame)
conf_mat = confusion_matrix(y_true, y_predicted, normalize_method="pred")
conf_mat_expected = np.array(
[[2 / 3.0, 1 / 3.0, 0], [0, 1 / 3.0, 1 / 3.0], [1 / 3.0, 1 / 3.0, 2 / 3.0]]
)
assert np.allclose(conf_mat_expected, conf_mat.to_numpy(), equal_nan=True)
assert isinstance(conf_mat, pd.DataFrame)
with pytest.raises(ValueError, match="Invalid value provided"):
conf_mat = confusion_matrix(
y_true, y_predicted, normalize_method="Invalid Option"
)
@pytest.mark.parametrize("data_type", ["ww", "np", "pd"])
def test_normalize_confusion_matrix(data_type, make_data_type):
conf_mat = np.array([[2, 3, 0], [0, 1, 1], [1, 0, 2]])
conf_mat = make_data_type(data_type, conf_mat)
conf_mat_normalized = normalize_confusion_matrix(conf_mat)
assert all(conf_mat_normalized.sum(axis=1) == 1.0)
assert isinstance(conf_mat_normalized, pd.DataFrame)
conf_mat_normalized = normalize_confusion_matrix(conf_mat, "pred")
for col_sum in conf_mat_normalized.sum(axis=0):
assert col_sum == 1.0 or col_sum == 0.0
conf_mat_normalized = normalize_confusion_matrix(conf_mat, "all")
assert conf_mat_normalized.sum().sum() == 1.0
# testing with named pd.DataFrames
conf_mat_df = pd.DataFrame()
conf_mat_df["col_1"] = [0, 1, 2]
conf_mat_df["col_2"] = [0, 0, 3]
conf_mat_df["col_3"] = [2, 0, 0]
conf_mat_normalized = normalize_confusion_matrix(conf_mat_df)
assert all(conf_mat_normalized.sum(axis=1) == 1.0)
assert list(conf_mat_normalized.columns) == ["col_1", "col_2", "col_3"]
conf_mat_normalized = normalize_confusion_matrix(conf_mat_df, "pred")
for col_sum in conf_mat_normalized.sum(axis=0):
assert col_sum == 1.0 or col_sum == 0.0
conf_mat_normalized = normalize_confusion_matrix(conf_mat_df, "all")
assert conf_mat_normalized.sum().sum() == 1.0
@pytest.mark.parametrize("data_type", ["ww", "np", "pd"])
def test_normalize_confusion_matrix_error(data_type, make_data_type):
conf_mat = np.array([[0, 0, 0], [0, 0, 0], [0, 0, 0]])
conf_mat = make_data_type(data_type, conf_mat)
warnings.simplefilter("default", category=RuntimeWarning)
with pytest.raises(
ValueError,
match='Invalid value provided for "normalize_method": invalid option',
):
normalize_confusion_matrix(conf_mat, normalize_method="invalid option")
with pytest.raises(ValueError, match="Invalid value provided"):
normalize_confusion_matrix(conf_mat, normalize_method=None)
with pytest.raises(ValueError, match="Sum of given axis is 0"):
normalize_confusion_matrix(conf_mat, "true")
with pytest.raises(ValueError, match="Sum of given axis is 0"):
normalize_confusion_matrix(conf_mat, "pred")
with pytest.raises(ValueError, match="Sum of given axis is 0"):
normalize_confusion_matrix(conf_mat, "all")
@pytest.mark.parametrize("data_type", ["ww", "pd", "np"])
def test_confusion_matrix_labels(data_type, make_data_type):
y_true = np.array([True, False, True, True, False, False])
y_pred = np.array([False, False, True, True, False, False])
y_true = make_data_type(data_type, y_true)
y_pred = make_data_type(data_type, y_pred)
conf_mat = confusion_matrix(y_true=y_true, y_predicted=y_pred)
labels = [False, True]
assert np.array_equal(conf_mat.index, labels)
assert np.array_equal(conf_mat.columns, labels)
y_true = np.array([0, 1, 0, 1, 0, 1])
y_pred = np.array([0, 1, 1, 1, 1, 1])
y_true = make_data_type(data_type, y_true)
y_pred = make_data_type(data_type, y_pred)
conf_mat = confusion_matrix(y_true=y_true, y_predicted=y_pred)
labels = [0, 1]
assert np.array_equal(conf_mat.index, labels)
assert np.array_equal(conf_mat.columns, labels)
y_true = np.array(["blue", "red", "blue", "red"])
y_pred = np.array(["blue", "red", "red", "red"])
y_true = make_data_type(data_type, y_true)
y_pred = make_data_type(data_type, y_pred)
conf_mat = confusion_matrix(y_true=y_true, y_predicted=y_pred)
labels = ["blue", "red"]
assert np.array_equal(conf_mat.index, labels)
assert np.array_equal(conf_mat.columns, labels)
y_true = np.array(["blue", "red", "red", "red", "orange", "orange"])
y_pred = np.array(["red", "blue", "blue", "red", "orange", "orange"])
y_true = make_data_type(data_type, y_true)
y_pred = make_data_type(data_type, y_pred)
conf_mat = confusion_matrix(y_true=y_true, y_predicted=y_pred)
labels = ["blue", "orange", "red"]
assert np.array_equal(conf_mat.index, labels)
assert np.array_equal(conf_mat.columns, labels)
y_true = np.array([0, 1, 2, 1, 2, 1, 2, 3])
y_pred = np.array([0, 1, 1, 1, 1, 1, 3, 3])
y_true = make_data_type(data_type, y_true)
y_pred = make_data_type(data_type, y_pred)
conf_mat = confusion_matrix(y_true=y_true, y_predicted=y_pred)
labels = [0, 1, 2, 3]
assert np.array_equal(conf_mat.index, labels)
assert np.array_equal(conf_mat.columns, labels)
@pytest.fixture
def binarized_ys(X_y_multi):
_, y_true = X_y_multi
rs = get_random_state(42)
y_tr = label_binarize(y_true, classes=[0, 1, 2])
y_pred_proba = y_tr * rs.random(y_tr.shape)
return y_true, y_tr, y_pred_proba
def test_precision_recall_curve_return_type():
y_true = np.array([0, 0, 1, 1])
y_predict_proba = np.array([0.1, 0.4, 0.35, 0.8])
precision_recall_curve_data = precision_recall_curve(y_true, y_predict_proba)
assert isinstance(precision_recall_curve_data["precision"], np.ndarray)
assert isinstance(precision_recall_curve_data["recall"], np.ndarray)
assert isinstance(precision_recall_curve_data["thresholds"], np.ndarray)
assert isinstance(precision_recall_curve_data["auc_score"], float)
@pytest.mark.parametrize("data_type", ["np", "pd", "pd2d", "li", "ww"])
def test_precision_recall_curve(data_type, make_data_type):
y_true = np.array([0, 0, 1, 1])
y_predict_proba = np.array([0.1, 0.4, 0.35, 0.8])
if data_type == "pd2d":
data_type = "pd"
y_predict_proba = np.array([[0.9, 0.1], [0.6, 0.4], [0.65, 0.35], [0.2, 0.8]])
y_true = make_data_type(data_type, y_true)
y_predict_proba = make_data_type(data_type, y_predict_proba)
precision_recall_curve_data = precision_recall_curve(y_true, y_predict_proba)
precision = precision_recall_curve_data.get("precision")
recall = precision_recall_curve_data.get("recall")
thresholds = precision_recall_curve_data.get("thresholds")
precision_expected = np.array([0.66666667, 0.5, 1, 1])
recall_expected = np.array([1, 0.5, 0.5, 0])
thresholds_expected = np.array([0.35, 0.4, 0.8])
np.testing.assert_almost_equal(precision_expected, precision, decimal=5)
np.testing.assert_almost_equal(recall_expected, recall, decimal=5)
np.testing.assert_almost_equal(thresholds_expected, thresholds, decimal=5)
def test_precision_recall_curve_pos_label_idx():
y_true = pd.Series(np.array([0, 0, 1, 1]))
y_predict_proba = pd.DataFrame(
np.array([[0.9, 0.1], [0.6, 0.4], [0.65, 0.35], [0.2, 0.8]])
)
precision_recall_curve_data = precision_recall_curve(
y_true, y_predict_proba, pos_label_idx=1
)
precision = precision_recall_curve_data.get("precision")
recall = precision_recall_curve_data.get("recall")
thresholds = precision_recall_curve_data.get("thresholds")
precision_expected = np.array([0.66666667, 0.5, 1, 1])
recall_expected = np.array([1, 0.5, 0.5, 0])
thresholds_expected = np.array([0.35, 0.4, 0.8])
np.testing.assert_almost_equal(precision_expected, precision, decimal=5)
np.testing.assert_almost_equal(recall_expected, recall, decimal=5)
np.testing.assert_almost_equal(thresholds_expected, thresholds, decimal=5)
y_predict_proba = pd.DataFrame(
np.array([[0.1, 0.9], [0.4, 0.6], [0.35, 0.65], [0.8, 0.2]])
)
precision_recall_curve_data = precision_recall_curve(
y_true, y_predict_proba, pos_label_idx=0
)
np.testing.assert_almost_equal(precision_expected, precision, decimal=5)
np.testing.assert_almost_equal(recall_expected, recall, decimal=5)
np.testing.assert_almost_equal(thresholds_expected, thresholds, decimal=5)
def test_precision_recall_curve_pos_label_idx_error(make_data_type):
y_true = np.array([0, 0, 1, 1])
y_predict_proba = np.array([[0.9, 0.1], [0.6, 0.4], [0.65, 0.35], [0.2, 0.8]])
with pytest.raises(
NoPositiveLabelException,
match="Predicted probabilities of shape \\(4, 2\\) don't contain a column at index 9001",
):
precision_recall_curve(y_true, y_predict_proba, pos_label_idx=9001)
@pytest.mark.parametrize("data_type", ["np", "pd", "ww"])
def test_graph_precision_recall_curve(X_y_binary, data_type, make_data_type):
go = pytest.importorskip(
"plotly.graph_objects",
reason="Skipping plotting test because plotly not installed",
)
X, y_true = X_y_binary
rs = get_random_state(42)
y_pred_proba = y_true * rs.random(y_true.shape)
X = make_data_type(data_type, X)
y_true = make_data_type(data_type, y_true)
fig = graph_precision_recall_curve(y_true, y_pred_proba)
assert isinstance(fig, type(go.Figure()))
fig_dict = fig.to_dict()
assert fig_dict["layout"]["title"]["text"] == "Precision-Recall"
assert len(fig_dict["data"]) == 1
precision_recall_curve_data = precision_recall_curve(y_true, y_pred_proba)
assert np.array_equal(
fig_dict["data"][0]["x"], precision_recall_curve_data["recall"]
)
assert np.array_equal(
fig_dict["data"][0]["y"], precision_recall_curve_data["precision"]
)
assert fig_dict["data"][0]["name"] == "Precision-Recall (AUC {:06f})".format(
precision_recall_curve_data["auc_score"]
)
def test_graph_precision_recall_curve_title_addition(X_y_binary):
go = pytest.importorskip(
"plotly.graph_objects",
reason="Skipping plotting test because plotly not installed",
)
X, y_true = X_y_binary
rs = get_random_state(42)
y_pred_proba = y_true * rs.random(y_true.shape)
fig = graph_precision_recall_curve(
y_true, y_pred_proba, title_addition="with added title text"
)
assert isinstance(fig, type(go.Figure()))
fig_dict = fig.to_dict()
assert (
fig_dict["layout"]["title"]["text"] == "Precision-Recall with added title text"
)
@pytest.mark.parametrize("data_type", ["np", "pd", "ww"])
def test_roc_curve_binary(data_type, make_data_type):
y_true = np.array([1, 1, 0, 0])
y_predict_proba = np.array([0.1, 0.4, 0.35, 0.8])
y_true = make_data_type(data_type, y_true)
y_predict_proba = make_data_type(data_type, y_predict_proba)
roc_curve_data = roc_curve(y_true, y_predict_proba)[0]
fpr_rates = roc_curve_data.get("fpr_rates")
tpr_rates = roc_curve_data.get("tpr_rates")
thresholds = roc_curve_data.get("thresholds")
auc_score = roc_curve_data.get("auc_score")
fpr_expected = np.array([0, 0.5, 0.5, 1, 1])
tpr_expected = np.array([0, 0, 0.5, 0.5, 1])
thresholds_expected = np.array([1.8, 0.8, 0.4, 0.35, 0.1])
assert np.array_equal(fpr_expected, fpr_rates)
assert np.array_equal(tpr_expected, tpr_rates)
assert np.array_equal(thresholds_expected, thresholds)
assert auc_score == pytest.approx(0.25, 1e-5)
assert isinstance(roc_curve_data["fpr_rates"], np.ndarray)
assert isinstance(roc_curve_data["tpr_rates"], np.ndarray)
assert isinstance(roc_curve_data["thresholds"], np.ndarray)
y_true = np.array([1, 1, 0, 0])
y_predict_proba = np.array([[0.9, 0.1], [0.6, 0.4], [0.65, 0.35], [0.2, 0.8]])
y_predict_proba = make_data_type(data_type, y_predict_proba)
y_true = make_data_type(data_type, y_true)
roc_curve_data = roc_curve(y_true, y_predict_proba)[0]
fpr_rates = roc_curve_data.get("fpr_rates")
tpr_rates = roc_curve_data.get("tpr_rates")
thresholds = roc_curve_data.get("thresholds")
auc_score = roc_curve_data.get("auc_score")
fpr_expected = np.array([0, 0.5, 0.5, 1, 1])
tpr_expected = np.array([0, 0, 0.5, 0.5, 1])
thresholds_expected = np.array([1.8, 0.8, 0.4, 0.35, 0.1])
assert np.array_equal(fpr_expected, fpr_rates)
assert np.array_equal(tpr_expected, tpr_rates)
assert np.array_equal(thresholds_expected, thresholds)
assert auc_score == pytest.approx(0.25, 1e-5)
assert isinstance(roc_curve_data["fpr_rates"], np.ndarray)
assert isinstance(roc_curve_data["tpr_rates"], np.ndarray)
assert isinstance(roc_curve_data["thresholds"], np.ndarray)
@pytest.mark.parametrize("data_type", ["np", "pd", "ww"])
def test_roc_curve_multiclass(data_type, make_data_type):
y_true = np.array([1, 2, 0, 0, 2, 1])
y_predict_proba = np.array(
[
[0.33, 0.33, 0.33],
[0.05, 0.05, 0.90],
[0.75, 0.15, 0.10],
[0.8, 0.1, 0.1],
[0.1, 0.1, 0.8],
[0.3, 0.4, 0.3],
]
)
y_true = make_data_type(data_type, y_true)
y_predict_proba = make_data_type(data_type, y_predict_proba)
roc_curve_data = roc_curve(y_true, y_predict_proba)
fpr_expected = [[0, 0, 0, 1], [0, 0, 0, 0.25, 0.75, 1], [0, 0, 0, 0.5, 1]]
tpr_expected = [[0, 0.5, 1, 1], [0, 0.5, 1, 1, 1, 1], [0, 0.5, 1, 1, 1]]
thresholds_expected = [
[1.8, 0.8, 0.75, 0.05],
[1.4, 0.4, 0.33, 0.15, 0.1, 0.05],
[1.9, 0.9, 0.8, 0.3, 0.1],
]
auc_expected = [1, 1, 1]
y_true_unique = y_true
if data_type == "ww":
y_true_unique = y_true
for i in np.unique(y_true_unique):
fpr_rates = roc_curve_data[i].get("fpr_rates")
tpr_rates = roc_curve_data[i].get("tpr_rates")
thresholds = roc_curve_data[i].get("thresholds")
auc_score = roc_curve_data[i].get("auc_score")
assert np.array_equal(fpr_expected[i], fpr_rates)
assert np.array_equal(tpr_expected[i], tpr_rates)
assert np.array_equal(thresholds_expected[i], thresholds)
assert auc_expected[i] == pytest.approx(auc_score, 1e-5)
assert isinstance(roc_curve_data[i]["fpr_rates"], np.ndarray)
assert isinstance(roc_curve_data[i]["tpr_rates"], np.ndarray)
assert isinstance(roc_curve_data[i]["thresholds"], np.ndarray)
@pytest.mark.parametrize("data_type", ["np", "pd", "ww"])
def test_graph_roc_curve_binary(X_y_binary, data_type, make_data_type):
go = pytest.importorskip(
"plotly.graph_objects",
reason="Skipping plotting test because plotly not installed",
)
X, y_true = X_y_binary
rs = get_random_state(42)
y_pred_proba = y_true * rs.random(y_true.shape)
y_true = make_data_type(data_type, y_true)
y_pred_proba = make_data_type(data_type, y_pred_proba)
fig = graph_roc_curve(y_true, y_pred_proba)
assert isinstance(fig, type(go.Figure()))
fig_dict = fig.to_dict()
assert fig_dict["layout"]["title"]["text"] == "Receiver Operating Characteristic"
assert len(fig_dict["data"]) == 2
roc_curve_data = roc_curve(y_true, y_pred_proba)[0]
assert np.array_equal(fig_dict["data"][0]["x"], roc_curve_data["fpr_rates"])
assert np.array_equal(fig_dict["data"][0]["y"], roc_curve_data["tpr_rates"])
assert np.allclose(
np.array(fig_dict["data"][0]["text"]).astype(float),
np.array(roc_curve_data["thresholds"]).astype(float),
)
assert fig_dict["data"][0]["name"] == "Class 1 (AUC {:06f})".format(
roc_curve_data["auc_score"]
)
assert np.array_equal(fig_dict["data"][1]["x"], np.array([0, 1]))
assert np.array_equal(fig_dict["data"][1]["y"], np.array([0, 1]))
assert fig_dict["data"][1]["name"] == "Trivial Model (AUC 0.5)"
def test_graph_roc_curve_nans():
go = pytest.importorskip(
"plotly.graph_objects",
reason="Skipping plotting test because plotly not installed",
)
one_val_y_zero = np.array([0])
with pytest.warns(UndefinedMetricWarning):
fig = graph_roc_curve(one_val_y_zero, one_val_y_zero)
assert isinstance(fig, type(go.Figure()))
fig_dict = fig.to_dict()
assert np.array_equal(fig_dict["data"][0]["x"], np.array([0.0, 1.0]))
assert np.allclose(
fig_dict["data"][0]["y"], np.array([np.nan, np.nan]), equal_nan=True
)
fig1 = graph_roc_curve(
np.array([np.nan, 1, 1, 0, 1]), np.array([0, 0, 0.5, 0.1, 0.9])
)
fig2 = graph_roc_curve(
np.array([1, 0, 1, 0, 1]), np.array([0, np.nan, 0.5, 0.1, 0.9])
)
assert fig1 == fig2
def test_graph_roc_curve_multiclass(binarized_ys):
go = pytest.importorskip(
"plotly.graph_objects",
reason="Skipping plotting test because plotly not installed",
)
y_true, y_tr, y_pred_proba = binarized_ys
fig = graph_roc_curve(y_true, y_pred_proba)
assert isinstance(fig, type(go.Figure()))
fig_dict = fig.to_dict()
assert fig_dict["layout"]["title"]["text"] == "Receiver Operating Characteristic"
assert len(fig_dict["data"]) == 4
for i in range(3):
roc_curve_data = roc_curve(y_tr[:, i], y_pred_proba[:, i])[0]
assert np.array_equal(fig_dict["data"][i]["x"], roc_curve_data["fpr_rates"])
assert np.array_equal(fig_dict["data"][i]["y"], roc_curve_data["tpr_rates"])
assert np.allclose(
np.array(fig_dict["data"][i]["text"]).astype(float),
np.array(roc_curve_data["thresholds"]).astype(float),
)
assert fig_dict["data"][i]["name"] == "Class {name} (AUC {:06f})".format(
roc_curve_data["auc_score"], name=i + 1
)
assert np.array_equal(fig_dict["data"][3]["x"], np.array([0, 1]))
assert np.array_equal(fig_dict["data"][3]["y"], np.array([0, 1]))
assert fig_dict["data"][3]["name"] == "Trivial Model (AUC 0.5)"
with pytest.raises(
ValueError,
match="Number of custom class names does not match number of classes",
):
graph_roc_curve(y_true, y_pred_proba, custom_class_names=["one", "two"])
def test_graph_roc_curve_multiclass_custom_class_names(binarized_ys):
go = pytest.importorskip(
"plotly.graph_objects",
reason="Skipping plotting test because plotly not installed",
)
y_true, y_tr, y_pred_proba = binarized_ys
custom_class_names = ["one", "two", "three"]
fig = graph_roc_curve(y_true, y_pred_proba, custom_class_names=custom_class_names)
assert isinstance(fig, type(go.Figure()))
fig_dict = fig.to_dict()
assert fig_dict["layout"]["title"]["text"] == "Receiver Operating Characteristic"
for i in range(3):
roc_curve_data = roc_curve(y_tr[:, i], y_pred_proba[:, i])[0]
assert np.array_equal(fig_dict["data"][i]["x"], roc_curve_data["fpr_rates"])
assert np.array_equal(fig_dict["data"][i]["y"], roc_curve_data["tpr_rates"])
assert fig_dict["data"][i]["name"] == "Class {name} (AUC {:06f})".format(
roc_curve_data["auc_score"], name=custom_class_names[i]
)
assert np.array_equal(fig_dict["data"][3]["x"], np.array([0, 1]))
assert np.array_equal(fig_dict["data"][3]["y"], np.array([0, 1]))
assert fig_dict["data"][3]["name"] == "Trivial Model (AUC 0.5)"
def test_graph_roc_curve_title_addition(X_y_binary):
go = pytest.importorskip(
"plotly.graph_objects",
reason="Skipping plotting test because plotly not installed",
)
X, y_true = X_y_binary
rs = get_random_state(42)
y_pred_proba = y_true * rs.random(y_true.shape)
fig = graph_roc_curve(y_true, y_pred_proba, title_addition="with added title text")
assert isinstance(fig, type(go.Figure()))
fig_dict = fig.to_dict()
assert (
fig_dict["layout"]["title"]["text"]
== "Receiver Operating Characteristic with added title text"
)
@pytest.mark.parametrize("data_type", ["np", "pd", "ww"])
def test_graph_confusion_matrix_default(X_y_binary, data_type, make_data_type):
go = pytest.importorskip(
"plotly.graph_objects",
reason="Skipping plotting test because plotly not installed",
)
X, y_true = X_y_binary
rs = get_random_state(42)
y_pred = np.round(y_true * rs.random(y_true.shape)).astype(int)
y_true = make_data_type(data_type, y_true)
y_pred = make_data_type(data_type, y_pred)
fig = graph_confusion_matrix(y_true, y_pred)
assert isinstance(fig, type(go.Figure()))
fig_dict = fig.to_dict()
assert (
fig_dict["layout"]["title"]["text"]
== 'Confusion matrix, normalized using method "true"'
)
assert fig_dict["layout"]["xaxis"]["title"]["text"] == "Predicted Label"
assert np.all(fig_dict["layout"]["xaxis"]["tickvals"] == np.array([0, 1]))
assert fig_dict["layout"]["yaxis"]["title"]["text"] == "True Label"
assert np.all(fig_dict["layout"]["yaxis"]["tickvals"] == np.array([0, 1]))
assert fig_dict["layout"]["yaxis"]["autorange"] == "reversed"
heatmap = fig_dict["data"][0]
conf_mat = confusion_matrix(y_true, y_pred, normalize_method="true")
conf_mat_unnormalized = confusion_matrix(y_true, y_pred, normalize_method=None)
assert np.array_equal(heatmap["x"], conf_mat.columns)
assert np.array_equal(heatmap["y"], conf_mat.columns)
assert np.array_equal(heatmap["z"], conf_mat)
assert np.array_equal(heatmap["customdata"], conf_mat_unnormalized)
assert (
heatmap["hovertemplate"]
== "<b>True</b>: %{y}<br><b>Predicted</b>: %{x}<br><b>Normalized Count</b>: %{z}<br><b>Raw Count</b>: %{customdata} <br><extra></extra>"
)
annotations = fig.__dict__["_layout_obj"]["annotations"]
# check that the figure has text annotations for the confusion matrix
for i in range(len(annotations)):
assert "text" in annotations[i]
def test_graph_confusion_matrix_norm_disabled(X_y_binary):
go = pytest.importorskip(
"plotly.graph_objects",
reason="Skipping plotting test because plotly not installed",
)
X, y_true = X_y_binary
rs = get_random_state(42)
y_pred = np.round(y_true * rs.random(y_true.shape)).astype(int)
fig = graph_confusion_matrix(y_true, y_pred, normalize_method=None)
assert isinstance(fig, type(go.Figure()))
fig_dict = fig.to_dict()
assert fig_dict["layout"]["title"]["text"] == "Confusion matrix"
assert fig_dict["layout"]["xaxis"]["title"]["text"] == "Predicted Label"
assert np.all(fig_dict["layout"]["xaxis"]["tickvals"] == np.array([0, 1]))
assert fig_dict["layout"]["yaxis"]["title"]["text"] == "True Label"
assert np.all(fig_dict["layout"]["yaxis"]["tickvals"] == np.array([0, 1]))
assert fig_dict["layout"]["yaxis"]["autorange"] == "reversed"
heatmap = fig_dict["data"][0]
conf_mat = confusion_matrix(y_true, y_pred, normalize_method=None)
conf_mat_normalized = confusion_matrix(y_true, y_pred, normalize_method="true")
assert np.array_equal(heatmap["x"], conf_mat.columns)
assert np.array_equal(heatmap["y"], conf_mat.columns)
assert np.array_equal(heatmap["z"], conf_mat)
assert np.array_equal(heatmap["customdata"], conf_mat_normalized)
assert (
heatmap["hovertemplate"]
== "<b>True</b>: %{y}<br><b>Predicted</b>: %{x}<br><b>Raw Count</b>: %{z}<br><b>Normalized Count</b>: %{customdata} <br><extra></extra>"
)
def test_graph_confusion_matrix_title_addition(X_y_binary):
go = pytest.importorskip(
"plotly.graph_objects",
reason="Skipping plotting test because plotly not installed",
)
X, y_true = X_y_binary
rs = get_random_state(42)
y_pred = np.round(y_true * rs.random(y_true.shape)).astype(int)
fig = graph_confusion_matrix(y_true, y_pred, title_addition="with added title text")
assert isinstance(fig, type(go.Figure()))
fig_dict = fig.to_dict()
assert (
fig_dict["layout"]["title"]["text"]
== 'Confusion matrix with added title text, normalized using method "true"'
)
def test_graph_permutation_importance(X_y_binary, test_pipeline):
go = pytest.importorskip(
"plotly.graph_objects",
reason="Skipping plotting test because plotly not installed",
)
X, y = X_y_binary
clf = test_pipeline
clf.fit(X, y)
fig = graph_permutation_importance(test_pipeline, X, y, "Log Loss Binary")
assert isinstance(fig, go.Figure)
fig_dict = fig.to_dict()
assert (
fig_dict["layout"]["title"]["text"] == "Permutation Importance<br><sub>"
"The relative importance of each input feature's overall "
"influence on the pipelines' predictions, computed using the "
"permutation importance algorithm.</sub>"
)
assert len(fig_dict["data"]) == 1
perm_importance_data = calculate_permutation_importance(
clf, X, y, "Log Loss Binary"
)
assert np.array_equal(
fig_dict["data"][0]["x"][::-1], perm_importance_data["importance"].values
)
assert np.array_equal(
fig_dict["data"][0]["y"][::-1], perm_importance_data["feature"]
)
@patch("evalml.model_understanding.graphs.calculate_permutation_importance")
def test_graph_permutation_importance_show_all_features(mock_perm_importance):
go = pytest.importorskip(
"plotly.graph_objects",
reason="Skipping plotting test because plotly not installed",
)
mock_perm_importance.return_value = pd.DataFrame(
{"feature": ["f1", "f2"], "importance": [0.0, 0.6]}
)
figure = graph_permutation_importance(
test_pipeline, pd.DataFrame(), pd.Series(), "Log Loss Binary"
)
assert isinstance(figure, go.Figure)
data = figure.data[0]
assert np.any(data["x"] == 0.0)
@patch("evalml.model_understanding.graphs.calculate_permutation_importance")
def test_graph_permutation_importance_threshold(mock_perm_importance):
go = pytest.importorskip(
"plotly.graph_objects",
reason="Skipping plotting test because plotly not installed",
)
mock_perm_importance.return_value = pd.DataFrame(
{"feature": ["f1", "f2"], "importance": [0.0, 0.6]}
)
with pytest.raises(
ValueError,
match="Provided importance threshold of -0.1 must be greater than or equal to 0",
):
fig = graph_permutation_importance(
test_pipeline,
pd.DataFrame(),
pd.Series(),
"Log Loss Binary",
importance_threshold=-0.1,
)
fig = graph_permutation_importance(
test_pipeline,
pd.DataFrame(),
pd.Series(),
"Log Loss Binary",
importance_threshold=0.5,
)
assert isinstance(fig, go.Figure)
data = fig.data[0]
assert np.all(data["x"] >= 0.5)
@pytest.mark.parametrize("data_type", ["np", "pd", "ww"])
def test_cost_benefit_matrix_vs_threshold(
data_type, X_y_binary, logistic_regression_binary_pipeline_class, make_data_type
):
X, y = X_y_binary
X = make_data_type(data_type, X)
y = make_data_type(data_type, y)
cbm = CostBenefitMatrix(
true_positive=1, true_negative=-1, false_positive=-7, false_negative=-2
)
pipeline = logistic_regression_binary_pipeline_class(parameters={})
pipeline.fit(X, y)
original_pipeline_threshold = pipeline.threshold
cost_benefit_df = binary_objective_vs_threshold(pipeline, X, y, cbm, steps=5)
assert list(cost_benefit_df.columns) == ["threshold", "score"]
assert cost_benefit_df.shape == (6, 2)
assert not cost_benefit_df.isnull().all().all()
assert pipeline.threshold == original_pipeline_threshold
@pytest.mark.parametrize("data_type", ["np", "pd", "ww"])
def test_binary_objective_vs_threshold(
data_type, X_y_binary, logistic_regression_binary_pipeline_class, make_data_type
):
X, y = X_y_binary
X = make_data_type(data_type, X)
y = make_data_type(data_type, y)
pipeline = logistic_regression_binary_pipeline_class(parameters={})
pipeline.fit(X, y)
# test objective with score_needs_proba == True
with pytest.raises(ValueError, match="Objective `score_needs_proba` must be False"):
binary_objective_vs_threshold(pipeline, X, y, "Log Loss Binary")
# test with non-binary objective
with pytest.raises(
ValueError, match="can only be calculated for binary classification objectives"
):
binary_objective_vs_threshold(pipeline, X, y, "f1 micro")
# test objective with score_needs_proba == False
results_df = binary_objective_vs_threshold(pipeline, X, y, "f1", steps=5)
assert list(results_df.columns) == ["threshold", "score"]
assert results_df.shape == (6, 2)
assert not results_df.isnull().all().all()
@patch("evalml.pipelines.BinaryClassificationPipeline.score")
def test_binary_objective_vs_threshold_steps(
mock_score, X_y_binary, logistic_regression_binary_pipeline_class
):
X, y = X_y_binary
cbm = CostBenefitMatrix(
true_positive=1, true_negative=-1, false_positive=-7, false_negative=-2
)
pipeline = logistic_regression_binary_pipeline_class(parameters={})
pipeline.fit(X, y)
mock_score.return_value = {"Cost Benefit Matrix": 0.2}
cost_benefit_df = binary_objective_vs_threshold(pipeline, X, y, cbm, steps=234)
mock_score.assert_called()
assert list(cost_benefit_df.columns) == ["threshold", "score"]
assert cost_benefit_df.shape == (235, 2)
@pytest.mark.parametrize("data_type", ["np", "pd", "ww"])
@patch("evalml.model_understanding.graphs.binary_objective_vs_threshold")
def test_graph_binary_objective_vs_threshold(
mock_cb_thresholds,
data_type,
X_y_binary,
logistic_regression_binary_pipeline_class,
make_data_type,
):
go = pytest.importorskip(
"plotly.graph_objects",
reason="Skipping plotting test because plotly not installed",
)
X, y = X_y_binary
X = make_data_type(data_type, X)
y = make_data_type(data_type, y)
pipeline = logistic_regression_binary_pipeline_class(parameters={})
cbm = CostBenefitMatrix(
true_positive=1, true_negative=-1, false_positive=-7, false_negative=-2
)
mock_cb_thresholds.return_value = pd.DataFrame(
{"threshold": [0, 0.5, 1.0], "score": [100, -20, 5]}
)
figure = graph_binary_objective_vs_threshold(pipeline, X, y, cbm)
assert isinstance(figure, go.Figure)
data = figure.data[0]
assert not np.any(np.isnan(data["x"]))
assert not np.any(np.isnan(data["y"]))
assert np.array_equal(data["x"], mock_cb_thresholds.return_value["threshold"])
assert np.array_equal(data["y"], mock_cb_thresholds.return_value["score"])
@patch("evalml.model_understanding.graphs.jupyter_check")
@patch("evalml.model_understanding.graphs.import_or_raise")
def test_jupyter_graph_check(
import_check, jupyter_check, X_y_binary, X_y_regression, test_pipeline
):
pytest.importorskip(
"plotly.graph_objects",
reason="Skipping plotting test because plotly not installed",
)
X, y = X_y_binary
X = X[:20, :5]
y = y[:20]
clf = test_pipeline
clf.fit(X, y)
cbm = CostBenefitMatrix(
true_positive=1, true_negative=-1, false_positive=-7, false_negative=-2
)
jupyter_check.return_value = False
with pytest.warns(None) as graph_valid:
graph_permutation_importance(test_pipeline, X, y, "log loss binary")
assert len(graph_valid) == 0
with pytest.warns(None) as graph_valid:
graph_confusion_matrix(y, y)
assert len(graph_valid) == 0
jupyter_check.return_value = True
with pytest.warns(None) as graph_valid:
graph_partial_dependence(clf, X, features=0, grid_resolution=20)
assert len(graph_valid) == 0
import_check.assert_called_with("ipywidgets", warning=True)
with pytest.warns(None) as graph_valid:
graph_binary_objective_vs_threshold(test_pipeline, X, y, cbm, steps=5)
assert len(graph_valid) == 0
import_check.assert_called_with("ipywidgets", warning=True)
with pytest.warns(None) as graph_valid:
rs = get_random_state(42)
y_pred_proba = y * rs.random(y.shape)
graph_precision_recall_curve(y, y_pred_proba)
assert len(graph_valid) == 0
import_check.assert_called_with("ipywidgets", warning=True)
with pytest.warns(None) as graph_valid:
graph_permutation_importance(test_pipeline, X, y, "log loss binary")
assert len(graph_valid) == 0
import_check.assert_called_with("ipywidgets", warning=True)
with pytest.warns(None) as graph_valid:
graph_confusion_matrix(y, y)
assert len(graph_valid) == 0
import_check.assert_called_with("ipywidgets", warning=True)
with pytest.warns(None) as graph_valid:
rs = get_random_state(42)
y_pred_proba = y * rs.random(y.shape)
graph_roc_curve(y, y_pred_proba)
assert len(graph_valid) == 0
import_check.assert_called_with("ipywidgets", warning=True)
Xr, yr = X_y_regression
with pytest.warns(None) as graph_valid:
rs = get_random_state(42)
y_preds = yr * rs.random(yr.shape)
graph_prediction_vs_actual(yr, y_preds)
assert len(graph_valid) == 0
import_check.assert_called_with("ipywidgets", warning=True)
@pytest.mark.parametrize("data_type", ["np", "pd", "ww"])
def test_get_prediction_vs_actual_data(data_type, make_data_type):
y_true = np.array([1, 2, 3000, 4, 5, 6, 7, 8, 9, 10, 11, 12])
y_pred = np.array([5, 4, 2, 8, 6, 6, 5, 1, 7, 2, 1, 3000])
y_true_in = make_data_type(data_type, y_true)
y_pred_in = make_data_type(data_type, y_pred)
with pytest.raises(ValueError, match="Threshold must be positive!"):
get_prediction_vs_actual_data(y_true_in, y_pred_in, outlier_threshold=-1)
outlier_loc = [2, 11]
results = get_prediction_vs_actual_data(
y_true_in, y_pred_in, outlier_threshold=2000
)
assert isinstance(results, pd.DataFrame)
assert np.array_equal(results["prediction"], y_pred)
assert np.array_equal(results["actual"], y_true)
for i, value in enumerate(results["outlier"]):
if i in outlier_loc:
assert value == "#ffff00"
else:
assert value == "#0000ff"
results = get_prediction_vs_actual_data(y_true_in, y_pred_in)
assert isinstance(results, pd.DataFrame)
assert np.array_equal(results["prediction"], y_pred)
assert np.array_equal(results["actual"], y_true)
assert (results["outlier"] == "#0000ff").all()
def test_graph_prediction_vs_actual_default():
go = pytest.importorskip(
"plotly.graph_objects",
reason="Skipping plotting test because plotly not installed",
)
y_true = [1, 2, 3000, 4, 5, 6, 7, 8, 9, 10, 11, 12]
y_pred = [5, 4, 2, 8, 6, 6, 5, 1, 7, 2, 1, 3000]
fig = graph_prediction_vs_actual(y_true, y_pred)
assert isinstance(fig, type(go.Figure()))
fig_dict = fig.to_dict()
assert (
fig_dict["layout"]["title"]["text"] == "Predicted vs Actual Values Scatter Plot"
)
assert fig_dict["layout"]["xaxis"]["title"]["text"] == "Prediction"
assert fig_dict["layout"]["yaxis"]["title"]["text"] == "Actual"
assert len(fig_dict["data"]) == 2
assert fig_dict["data"][0]["name"] == "y = x line"
assert fig_dict["data"][0]["x"] == fig_dict["data"][0]["y"]
assert len(fig_dict["data"][1]["x"]) == len(y_true)
assert fig_dict["data"][1]["marker"]["color"] == "#0000ff"
assert fig_dict["data"][1]["name"] == "Values"
@pytest.mark.parametrize("data_type", ["pd", "ww"])
def test_graph_prediction_vs_actual(data_type):
go = pytest.importorskip(
"plotly.graph_objects",
reason="Skipping plotting test because plotly not installed",
)
y_true = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
y_pred = [5, 4, 3, 8, 6, 3, 5, 9, 7, 12, 1, 2]
with pytest.raises(ValueError, match="Threshold must be positive!"):
graph_prediction_vs_actual(y_true, y_pred, outlier_threshold=-1)
fig = graph_prediction_vs_actual(y_true, y_pred, outlier_threshold=100)
assert isinstance(fig, type(go.Figure()))
fig_dict = fig.to_dict()
assert (
fig_dict["layout"]["title"]["text"] == "Predicted vs Actual Values Scatter Plot"
)
assert fig_dict["layout"]["xaxis"]["title"]["text"] == "Prediction"
assert fig_dict["layout"]["yaxis"]["title"]["text"] == "Actual"
assert len(fig_dict["data"]) == 2
assert fig_dict["data"][1]["marker"]["color"] == "#0000ff"
y_true = pd.Series(y_true)
y_pred = | pd.Series(y_pred) | pandas.Series |
import glob
import os
import sys
# these imports and usings need to be in the same order
sys.path.insert(0, "../")
sys.path.insert(0, "TP_model")
sys.path.insert(0, "TP_model/fit_and_forecast")
from Reff_functions import *
from Reff_constants import *
from sys import argv
from datetime import timedelta, datetime
from scipy.special import expit
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import matplotlib
matplotlib.use("Agg")
def forecast_TP(data_date):
from scenarios import scenarios, scenario_dates
from params import (
num_forecast_days,
alpha_start_date,
delta_start_date,
omicron_start_date,
truncation_days,
start_date,
sim_start_date,
third_start_date,
mob_samples,
)
data_date = pd.to_datetime(data_date)
# Define inputs
sim_start_date = pd.to_datetime(sim_start_date)
# Add 3 days buffer to mobility forecast
num_forecast_days = num_forecast_days + 3
# data_date = pd.to_datetime('2022-01-25')
print("============")
print("Generating forecasts using data from", data_date)
print("============")
# convert third start date to the correct format
third_start_date = pd.to_datetime(third_start_date)
third_end_date = data_date - timedelta(truncation_days)
# a different end date to deal with issues in fitting
third_end_date_diff = data_date - timedelta(18 + 7 + 7)
third_states = sorted(["NSW", "VIC", "ACT", "QLD", "SA", "TAS", "NT", "WA"])
# third_states = sorted(['NSW', 'VIC', 'ACT', 'QLD', 'SA', 'NT'])
# choose dates for each state for third wave
# NOTE: These need to be in date sorted order
third_date_range = {
"ACT": pd.date_range(start="2021-08-15", end=third_end_date).values,
"NSW": pd.date_range(start="2021-06-25", end=third_end_date).values,
"NT": pd.date_range(start="2021-12-20", end=third_end_date).values,
"QLD": pd.date_range(start="2021-07-30", end=third_end_date).values,
"SA": pd.date_range(start="2021-12-10", end=third_end_date).values,
"TAS": pd.date_range(start="2021-12-20", end=third_end_date).values,
"VIC": pd.date_range(start="2021-07-10", end=third_end_date).values,
"WA": pd.date_range(start="2022-01-01", end=third_end_date).values,
}
# Get Google Data - Don't use the smoothed data?
df_google_all = read_in_google(Aus_only=True, moving=True, local=True)
third_end_date = pd.to_datetime(data_date) - pd.Timedelta(days=truncation_days)
results_dir = (
"results/"
+ data_date.strftime("%Y-%m-%d")
+ "/"
)
# Load in vaccination data by state and date which should have the same date as the
# NNDSS/linelist data use the inferred VE
vaccination_by_state_delta = pd.read_csv(
results_dir + "adjusted_vaccine_ts_delta" + data_date.strftime("%Y-%m-%d") + ".csv",
parse_dates=["date"],
)
vaccination_by_state_delta = vaccination_by_state_delta[["state", "date", "effect"]]
vaccination_by_state_delta = vaccination_by_state_delta.pivot(
index="state", columns="date", values="effect"
) # Convert to matrix form
# Convert to simple array for indexing
vaccination_by_state_delta_array = vaccination_by_state_delta.to_numpy()
vaccination_by_state_omicron = pd.read_csv(
results_dir + "adjusted_vaccine_ts_omicron" + data_date.strftime("%Y-%m-%d") + ".csv",
parse_dates=["date"],
)
vaccination_by_state_omicron = vaccination_by_state_omicron[["state", "date", "effect"]]
vaccination_by_state_omicron = vaccination_by_state_omicron.pivot(
index="state", columns="date", values="effect"
) # Convert to matrix form
# Convert to simple array for indexing
vaccination_by_state_omicron_array = vaccination_by_state_omicron.to_numpy()
# Get survey data
surveys = pd.DataFrame()
path = "data/md/Barometer wave*.csv"
for file in glob.glob(path):
surveys = surveys.append(pd.read_csv(file, parse_dates=["date"]))
surveys = surveys.sort_values(by="date")
print("Latest microdistancing survey is {}".format(surveys.date.values[-1]))
surveys.loc[surveys.state != "ACT", "state"] = (
surveys.loc[surveys.state != "ACT", "state"]
.map(states_initials)
.fillna(surveys.loc[surveys.state != "ACT", "state"])
)
surveys["proportion"] = surveys["count"] / surveys.respondents
surveys.date = pd.to_datetime(surveys.date)
always = surveys.loc[surveys.response == "Always"].set_index(["state", "date"])
always = always.unstack(["state"])
# fill in date range
idx = pd.date_range("2020-03-01", pd.to_datetime("today"))
always = always.reindex(idx, fill_value=np.nan)
always.index.name = "date"
always = always.fillna(method="bfill")
always = always.stack(["state"])
# Zero out before first survey 20th March
always = always.reset_index().set_index("date")
always.loc[:"2020-03-20", "count"] = 0
always.loc[:"2020-03-20", "respondents"] = 0
always.loc[:"2020-03-20", "proportion"] = 0
always = always.reset_index().set_index(["state", "date"])
survey_X = pd.pivot_table(
data=always, index="date", columns="state", values="proportion"
)
prop_all = survey_X
## read in and process mask wearing data
mask_wearing = pd.DataFrame()
path = "data/face_coverings/face_covering_*_.csv"
for file in glob.glob(path):
mask_wearing = mask_wearing.append(pd.read_csv(file, parse_dates=["date"]))
mask_wearing = mask_wearing.sort_values(by="date")
print("Latest mask wearing survey is {}".format(mask_wearing.date.values[-1]))
# mask_wearing['state'] = mask_wearing['state'].map(states_initials).fillna(mask_wearing['state'])
mask_wearing.loc[mask_wearing.state != "ACT", "state"] = (
mask_wearing.loc[mask_wearing.state != "ACT", "state"]
.map(states_initials)
.fillna(mask_wearing.loc[mask_wearing.state != "ACT", "state"])
)
mask_wearing["proportion"] = mask_wearing["count"] / mask_wearing.respondents
mask_wearing.date = pd.to_datetime(mask_wearing.date)
mask_wearing_always = mask_wearing.loc[
mask_wearing.face_covering == "Always"
].set_index(["state", "date"])
mask_wearing_always = mask_wearing_always.unstack(["state"])
idx = pd.date_range("2020-03-01", pd.to_datetime("today"))
mask_wearing_always = mask_wearing_always.reindex(idx, fill_value=np.nan)
mask_wearing_always.index.name = "date"
# fill back to earlier and between weeks.
# Assume survey on day x applies for all days up to x - 6
mask_wearing_always = mask_wearing_always.fillna(method="bfill")
mask_wearing_always = mask_wearing_always.stack(["state"])
# Zero out before first survey 20th March
mask_wearing_always = mask_wearing_always.reset_index().set_index("date")
mask_wearing_always.loc[:"2020-03-20", "count"] = 0
mask_wearing_always.loc[:"2020-03-20", "respondents"] = 0
mask_wearing_always.loc[:"2020-03-20", "proportion"] = 0
mask_wearing_X = pd.pivot_table(
data=mask_wearing_always, index="date", columns="state", values="proportion"
)
mask_wearing_all = mask_wearing_X
# Get posterior
df_samples = read_in_posterior(
date=data_date.strftime("%Y-%m-%d"),
)
states = sorted(["NSW", "QLD", "SA", "VIC", "TAS", "WA", "ACT", "NT"])
plot_states = states.copy()
one_month = data_date + timedelta(days=num_forecast_days)
days_from_March = (one_month - pd.to_datetime(start_date)).days
# filter out future info
prop = prop_all.loc[:data_date]
masks = mask_wearing_all.loc[:data_date]
df_google = df_google_all.loc[df_google_all.date <= data_date]
# use this trick of saving the google data and then reloading it to kill
# the date time values
df_google.to_csv("results/test_google_data.csv")
df_google = pd.read_csv("results/test_google_data.csv")
# remove the temporary file
# os.remove("results/test_google_data.csv")
# Simple interpolation for missing vlaues in Google data
df_google = df_google.interpolate(method="linear", axis=0)
df_google.date = pd.to_datetime(df_google.date)
# forecast time parameters
today = data_date.strftime("%Y-%m-%d")
# add days to forecast if we are missing data
if df_google.date.values[-1] < data_date:
n_forecast = num_forecast_days + (data_date - df_google.date.values[-1]).days
else:
n_forecast = num_forecast_days
training_start_date = datetime(2020, 3, 1, 0, 0)
print(
"Forecast ends at {} days after 1st March".format(
(pd.to_datetime(today) - pd.to_datetime(training_start_date)).days
+ num_forecast_days
)
)
print(
"Final date is {}".format(pd.to_datetime(today) + timedelta(days=num_forecast_days))
)
df_google = df_google.loc[df_google.date >= training_start_date]
outdata = {"date": [], "type": [], "state": [], "mean": [], "std": []}
predictors = mov_values.copy()
# predictors.remove("residential_7days")
# Setup Figures
axes = []
figs = []
for var in predictors:
fig, ax_states = plt.subplots(figsize=(7, 8), nrows=4, ncols=2, sharex=True)
axes.append(ax_states)
# fig.suptitle(var)
figs.append(fig)
# extra fig for microdistancing
var = "Proportion people always microdistancing"
fig, ax_states = plt.subplots(figsize=(7, 8), nrows=4, ncols=2, sharex=True)
axes.append(ax_states)
figs.append(fig)
# # extra fig for mask wearing
var = "Proportion people always wearing masks"
fig, ax_states = plt.subplots(figsize=(7, 8), nrows=4, ncols=2, sharex=True)
axes.append(ax_states)
figs.append(fig)
var = "Reduction in Reff due to vaccination"
fig, ax_states = plt.subplots(figsize=(7, 8), nrows=4, ncols=2, sharex=True)
axes.append(ax_states)
figs.append(fig)
var = "Reduction in Reff due to vaccination"
fig, ax_states = plt.subplots(figsize=(7, 8), nrows=4, ncols=2, sharex=True)
axes.append(ax_states)
figs.append(fig)
# Forecasting Params
n_training = 21 # Period to examine trend
n_baseline = 150 # Period to create baseline
n_training_vaccination = 30 # period to create trend for vaccination
# since this can be useful, predictor ordering is:
# [
# 'retail_and_recreation_7days',
# 'grocery_and_pharmacy_7days',
# 'parks_7days',
# 'transit_stations_7days',
# 'workplaces_7days'
# ]
# Loop through states and run forecasting.
print("============")
print("Forecasting macro, micro and vaccination")
print("============")
state_Rmed = {}
state_sims = {}
for i, state in enumerate(states):
rownum = int(i / 2)
colnum = np.mod(i, 2)
rows = df_google.loc[df_google.state == state].shape[0]
# Rmed currently a list, needs to be a matrix
Rmed_array = np.zeros(shape=(rows, len(predictors), mob_samples))
for j, var in enumerate(predictors):
for n in range(mob_samples):
# historically we want a little more noise. In the actual forecasting of trends
# we don't want this to be quite that prominent.
Rmed_array[:, j, n] = df_google[df_google["state"] == state][
var
].values.T + np.random.normal(
loc=0, scale=df_google[df_google["state"] == state][var + "_std"]
)
dates = df_google[df_google["state"] == state]["date"]
# cap min and max at historical or (-50,0)
# 1 by predictors by mob_samples size
minRmed_array = np.minimum(-50, np.amin(Rmed_array, axis=0))
maxRmed_array = np.maximum(10, np.amax(Rmed_array, axis=0))
# days by predictors by samples
sims = np.zeros(shape=(n_forecast, len(predictors), mob_samples))
for n in range(mob_samples): # Loop through simulations
Rmed = Rmed_array[:, :, n]
minRmed = minRmed_array[:, n]
maxRmed = maxRmed_array[:, n]
if maxRmed[1] < 20:
maxRmed[1] = 50
R_baseline_mean = np.mean(Rmed[-n_baseline:, :], axis=0)
if state not in {"WA"}:
R_baseline_mean[-1] = 0
R_diffs = np.diff(Rmed[-n_training:, :], axis=0)
mu = np.mean(R_diffs, axis=0)
cov = np.cov(R_diffs, rowvar=False) # columns are vars, rows are obs
# Forecast mobility forward sequentially by day.
# current = np.mean(Rmed[-9:-2, :], axis=0) # Start from last valid days
# current = np.mean(Rmed[-1, :], axis=0) # Start from last valid days
current = Rmed[-1, :] # Start from last valid days
for i in range(n_forecast):
# ## SCENARIO MODELLING
# This code chunk will allow you manually set the distancing params for a state to allow for modelling.
if scenarios[state] == "":
# Proportion of trend_force to regression_to_baseline_force
p_force = (n_forecast - i) / (n_forecast)
# Generate a single forward realisation of trend
trend_force = np.random.multivariate_normal(mu, cov)
# Generate a single forward realisation of baseline regression
# regression to baseline force stronger in standard forecasting
regression_to_baseline_force = np.random.multivariate_normal(
0.05 * (R_baseline_mean - current), cov
)
new_forcast_points = (
current + p_force * trend_force + (1 - p_force) * regression_to_baseline_force
) # Find overall simulation step
# Apply minimum and maximum
new_forcast_points = np.maximum(minRmed, new_forcast_points)
new_forcast_points = np.minimum(maxRmed, new_forcast_points)
current = new_forcast_points
elif scenarios[state] != "":
# Make baseline cov for generating points
cov_baseline = np.cov(Rmed[-42:-28, :], rowvar=False)
mu_current = Rmed[-1, :]
mu_victoria = np.array(
[
-55.35057887,
-22.80891056,
-46.59531636,
-75.99942378,
-44.71119293,
]
)
mu_baseline = np.mean(Rmed[-42:-28, :], axis=0)
# mu_baseline = 0*np.mean(Rmed[-42:-28, :], axis=0)
if scenario_dates[state] != "":
scenario_change_point = (
pd.to_datetime(scenario_dates[state]) - data_date
).days + (n_forecast - 42)
# Constant Lockdown
if (
scenarios[state] == "no_reversion"
or scenarios[state] == "school_opening_2022"
):
# take a continuous median to account for noise in recent observations (such as sunny days)
# mu_current = np.mean(Rmed[-7:, :], axis=0)
# cov_baseline = np.cov(Rmed[-28:, :], rowvar=False)
new_forcast_points = np.random.multivariate_normal(
mu_current, cov_baseline
)
elif scenarios[state] == "no_reversion_continuous_lockdown":
# add the new scenario here
new_forcast_points = np.random.multivariate_normal(
mu_current, cov_baseline
)
# No Lockdown
elif scenarios[state] == "full_reversion":
# a full reversion scenario changes the social mobility and microdistancing
# behaviours at the scenario change date and then applies a return to baseline force
if i < scenario_change_point:
new_forcast_points = np.random.multivariate_normal(
mu_current, cov_baseline
)
else:
# baseline is within lockdown period so take a new baseline of 0's and trend towards this
R_baseline_0 = np.zeros_like(R_baseline_mean)
R_baseline_0 = mu_baseline
# set adjusted baselines by eyeline for now, need to get this automated
# R_baseline_0[1] = 10 # baseline of +10% for Grocery based on other jurisdictions
# # apply specific baselines to the jurisdictions progressing towards normal restrictions
# if state == 'NSW':
# R_baseline_0[3] = -25 # baseline of -25% for Transit based on 2021-April to 2021-July (pre-third-wave lockdowns)
# elif state == 'ACT':
# R_baseline_0[1] = 20 # baseline of +20% for Grocery based on other jurisdictions
# R_baseline_0[3] = -25 # baseline of -25% for Transit based on 2021-April to 2021-July (pre-third-wave lockdowns)
# elif state == 'VIC':
# R_baseline_0[0] = -15 # baseline of -15% for R&R based on 2021-April to 2021-July (pre-third-wave lockdowns)
# R_baseline_0[3] = -30 # baseline of -30% for Transit based on 2021-April to 2021-July (pre-third-wave lockdowns)
# R_baseline_0[4] = -15 # baseline of -15% for workplaces based on 2021-April to 2021-July (pre-third-wave lockdowns)
# the force we trend towards the baseline above with
p_force = (n_forecast - i) / (n_forecast)
trend_force = np.random.multivariate_normal(
mu, cov
) # Generate a single forward realisation of trend
# baseline scalar is smaller for this as we want slow returns
adjusted_baseline_drift_mean = R_baseline_0 - current
# we purposely scale the transit measure so that we increase a little more quickly
# tmp = 0.05 * adjusted_baseline_drift_mean[3]
adjusted_baseline_drift_mean *= 0.005
# adjusted_baseline_drift_mean[3] = tmp
regression_to_baseline_force = np.random.multivariate_normal(
adjusted_baseline_drift_mean, cov
) # Generate a single forward realisation of baseline regression
new_forcast_points = (
current
+ p_force * trend_force
+ (1 - p_force) * regression_to_baseline_force
) # Find overall simulation step
# new_forcast_points = current + regression_to_baseline_force # Find overall simulation step
# Apply minimum and maximum
new_forcast_points = np.maximum(minRmed, new_forcast_points)
new_forcast_points = np.minimum(maxRmed, new_forcast_points)
current = new_forcast_points
elif scenarios[state] == "immediately_baseline":
# this scenario is used to return instantly to the baseline levels
if i < scenario_change_point:
new_forcast_points = np.random.multivariate_normal(
mu_current, cov_baseline
)
else:
# baseline is within lockdown period so take a new baseline of 0's and trend towards this
R_baseline_0 = np.zeros_like(R_baseline_mean)
# jump immediately to baseline
new_forcast_points = np.random.multivariate_normal(
R_baseline_0, cov_baseline
)
# Temporary Lockdown
elif scenarios[state] == "half_reversion":
if i < scenario_change_point:
new_forcast_points = np.random.multivariate_normal(
mu_current, cov_baseline
)
else:
new_forcast_points = np.random.multivariate_normal(
(mu_current + mu_baseline) / 2, cov_baseline
)
# Stage 4
elif scenarios[state] == "stage4":
if i < scenario_change_point:
new_forcast_points = np.random.multivariate_normal(
mu_current, cov_baseline
)
else:
new_forcast_points = np.random.multivariate_normal(
mu_victoria, cov_baseline
)
# Set this day in this simulation to the forecast realisation
sims[i, :, n] = new_forcast_points
dd = [dates.tolist()[-1] + timedelta(days=x) for x in range(1, n_forecast + 1)]
sims_med = np.median(sims, axis=2) # N by predictors
sims_q25 = np.percentile(sims, 25, axis=2)
sims_q75 = np.percentile(sims, 75, axis=2)
# forecast mircodistancing
# Get a baseline value of microdistancing
mu_overall = np.mean(prop[state].values[-n_baseline:])
md_diffs = np.diff(prop[state].values[-n_training:])
mu_diffs = np.mean(md_diffs)
std_diffs = np.std(md_diffs)
extra_days_md = (
pd.to_datetime(df_google.date.values[-1])
- pd.to_datetime(prop[state].index.values[-1])
).days
# Set all values to current value.
current = [prop[state].values[-1]] * mob_samples
new_md_forecast = []
# Forecast mobility forward sequentially by day.
for i in range(n_forecast + extra_days_md):
# SCENARIO MODELLING
# This code chunk will allow you manually set the distancing params for a state to allow for modelling.
if scenarios[state] == "":
# Proportion of trend_force to regression_to_baseline_force
p_force = (n_forecast + extra_days_md - i) / (n_forecast + extra_days_md)
# Generate step realisations in training trend direction
trend_force = np.random.normal(mu_diffs, std_diffs, size=mob_samples)
# Generate realisations that draw closer to baseline
regression_to_baseline_force = np.random.normal(
0.05 * (mu_overall - current), std_diffs
)
current = (
current
+ p_force * trend_force
+ (1 - p_force) * regression_to_baseline_force
) # Balance forces
# current = current+p_force*trend_force # Balance forces
elif scenarios[state] != "":
current = np.array(current)
# Make baseline cov for generating points
std_baseline = np.std(prop[state].values[-42:-28])
mu_baseline = np.mean(prop[state].values[-42:-28], axis=0)
mu_current = prop[state].values[-1]
if scenario_dates[state] != "":
scenario_change_point = (
pd.to_datetime(scenario_dates[state]) - data_date
).days + extra_days_md
# Constant Lockdown
if (
scenarios[state] == "no_reversion"
or scenarios[state] == "school_opening_2022"
):
# use only more recent data to forecast under a no-reversion scenario
# std_lockdown = np.std(prop[state].values[-24:-4])
# current = np.random.normal(mu_current, std_lockdown)
current = np.random.normal(mu_current, std_baseline)
# No Lockdown
elif scenarios[state] == "full_reversion":
if i < scenario_change_point:
current = np.random.normal(mu_current, std_baseline)
else:
mu_baseline_0 = 0.2
# Proportion of trend_force to regression_to_baseline_force
p_force = (n_forecast + extra_days_md - i) / (
n_forecast + extra_days_md
)
# take a mean of the differences over the last 2 weeks
mu_diffs = np.mean(np.diff(prop[state].values[-14:]))
# Generate step realisations in training trend direction
trend_force = np.random.normal(mu_diffs, std_baseline)
# Generate realisations that draw closer to baseline
regression_to_baseline_force = np.random.normal(
0.005 * (mu_baseline_0 - current), std_baseline
)
current = current + regression_to_baseline_force # Balance forces
elif scenarios[state] == "immediately_baseline":
# this scenario is an immediate return to baseline values
if i < scenario_change_point:
current = np.random.normal(mu_current, std_baseline)
else:
mu_baseline_0 = 0.2
# jump immediately to baseline
current = np.random.normal(mu_baseline_0, std_baseline)
# Temporary Lockdown
elif scenarios[state] == "half_reversion": # No Lockdown
if i < scenario_change_point:
current = np.random.normal(mu_current, std_baseline)
else:
# Revert to values halfway between the before and after
current = np.random.normal(
(mu_current + mu_baseline) / 2, std_baseline
)
new_md_forecast.append(current)
md_sims = np.vstack(new_md_forecast) # Put forecast days together
md_sims = np.minimum(1, md_sims)
md_sims = np.maximum(0, md_sims)
dd_md = [
prop[state].index[-1] + timedelta(days=x)
for x in range(1, n_forecast + extra_days_md + 1)
]
## currently not forecasting masks — may return in the future but will need to assess.
# forecast mask wearing compliance
# Get a baseline value of microdistancing
mu_overall = np.mean(masks[state].values[-n_baseline:])
md_diffs = np.diff(masks[state].values[-n_training:])
mu_diffs = np.mean(md_diffs)
std_diffs = np.std(md_diffs)
extra_days_masks = (
pd.to_datetime(df_google.date.values[-1])
- pd.to_datetime(masks[state].index.values[-1])
).days
# Set all values to current value.
current = [masks[state].values[-1]] * mob_samples
new_masks_forecast = []
# Forecast mobility forward sequentially by day.
for i in range(n_forecast + extra_days_masks):
# SCENARIO MODELLING
# This code chunk will allow you manually set the distancing params for a state to allow for modelling.
if scenarios[state] == "":
# masksortion of trend_force to regression_to_baseline_force
p_force = (n_forecast + extra_days_masks - i) / (
n_forecast + extra_days_masks
)
# Generate step realisations in training trend direction
trend_force = np.random.normal(mu_diffs, std_diffs, size=mob_samples)
# Generate realisations that draw closer to baseline
# regression_to_baseline_force = np.random.normal(0.05*(mu_overall - current), std_diffs)
# current = current + p_force*trend_force + (1-p_force)*regression_to_baseline_force # Balance forces
current = current + trend_force
elif scenarios[state] != "":
current = np.array(current)
# Make baseline cov for generating points
std_baseline = np.std(masks[state].values[-42:-28])
mu_baseline = np.mean(masks[state].values[-42:-28], axis=0)
mu_current = masks[state].values[-1]
if scenario_dates[state] != "":
scenario_change_point = (
pd.to_datetime(scenario_dates[state]) - data_date
).days + extra_days_masks
# Constant Lockdown
if (
scenarios[state] == "no_reversion"
or scenarios[state] == "school_opening_2022"
):
# use only more recent data to forecast under a no-reversion scenario
# std_lockdown = np.std(masks[state].values[-24:-4])
# current = np.random.normal(mu_current, std_lockdown)
current = np.random.normal(mu_current, std_baseline)
# No Lockdown
elif scenarios[state] == "full_reversion":
if i < scenario_change_point:
current = np.random.normal(mu_current, std_baseline)
else:
mu_baseline_0 = 0.2
# masksortion of trend_force to regression_to_baseline_force
p_force = (n_forecast + extra_days_masks - i) / (
n_forecast + extra_days_masks
)
# take a mean of the differences over the last 2 weeks
mu_diffs = np.mean(np.diff(masks[state].values[-14:]))
# Generate step realisations in training trend direction
trend_force = np.random.normal(mu_diffs, std_baseline)
# Generate realisations that draw closer to baseline
regression_to_baseline_force = np.random.normal(
0.005 * (mu_baseline_0 - current), std_baseline
)
current = current + regression_to_baseline_force # Balance forces
elif scenarios[state] == "immediately_baseline":
# this scenario is an immediate return to baseline values
if i < scenario_change_point:
current = np.random.normal(mu_current, std_baseline)
else:
mu_baseline_0 = 0.2
# jump immediately to baseline
current = np.random.normal(mu_baseline_0, std_baseline)
# Temporary Lockdown
elif scenarios[state] == "half_reversion": # No Lockdown
if i < scenario_change_point:
current = np.random.normal(mu_current, std_baseline)
else:
# Revert to values halfway between the before and after
current = np.random.normal(
(mu_current + mu_baseline) / 2, std_baseline
)
new_masks_forecast.append(current)
masks_sims = np.vstack(new_masks_forecast) # Put forecast days together
masks_sims = np.minimum(1, masks_sims)
masks_sims = np.maximum(0, masks_sims)
dd_masks = [
masks[state].index[-1] + timedelta(days=x)
for x in range(1, n_forecast + extra_days_masks + 1)
]
# Forecasting vaccine effect
# if state == "WA":
# last_fit_date = pd.to_datetime(third_end_date)
# else:
last_fit_date = pd.to_datetime(third_date_range[state][-1])
extra_days_vacc = (pd.to_datetime(df_google.date.values[-1]) - last_fit_date).days
total_forecasting_days = n_forecast + extra_days_vacc
# get the VE on the last day
mean_delta = vaccination_by_state_delta.loc[state][last_fit_date + timedelta(1)]
mean_omicron = vaccination_by_state_omicron.loc[state][last_fit_date + timedelta(1)]
current = np.zeros_like(mob_samples)
new_delta = []
new_omicron = []
# variance on the vaccine forecasts is equivalent to what we use in the fitting
var_vax = 0.00005
a_vax = np.zeros_like(mob_samples)
b_vax = np.zeros_like(mob_samples)
for d in pd.date_range(
last_fit_date + timedelta(1),
pd.to_datetime(today) + timedelta(days=num_forecast_days),
):
mean_delta = vaccination_by_state_delta.loc[state][d]
a_vax = mean_delta * (mean_delta * (1 - mean_delta) / var_vax - 1)
b_vax = (1 - mean_delta) * (mean_delta * (1 - mean_delta) / var_vax - 1)
current = np.random.beta(a_vax, b_vax, mob_samples)
new_delta.append(current.tolist())
mean_omicron = vaccination_by_state_omicron.loc[state][d]
a_vax = mean_omicron * (mean_omicron * (1 - mean_omicron) / var_vax - 1)
b_vax = (1 - mean_omicron) * (mean_omicron * (1 - mean_omicron) / var_vax - 1)
current = np.random.beta(a_vax, b_vax, mob_samples)
new_omicron.append(current.tolist())
vacc_sims_delta = np.vstack(new_delta)
vacc_sims_omicron = np.vstack(new_omicron)
dd_vacc = [
last_fit_date + timedelta(days=x)
for x in range(1, n_forecast + extra_days_vacc + 1)
]
for j, var in enumerate(
predictors
+ ["md_prop"]
+ ["masks_prop"]
+ ["vaccination_delta"]
+ ["vaccination_omicron"]
):
# Record data
axs = axes[j]
if (state == "AUS") and (var == "md_prop"):
continue
if var == "md_prop":
outdata["type"].extend([var] * len(dd_md))
outdata["state"].extend([state] * len(dd_md))
outdata["date"].extend([d.strftime("%Y-%m-%d") for d in dd_md])
outdata["mean"].extend(np.mean(md_sims, axis=1))
outdata["std"].extend(np.std(md_sims, axis=1))
elif var == "masks_prop":
outdata["type"].extend([var] * len(dd_masks))
outdata["state"].extend([state] * len(dd_masks))
outdata["date"].extend([d.strftime("%Y-%m-%d") for d in dd_masks])
outdata["mean"].extend(np.mean(masks_sims, axis=1))
outdata["std"].extend(np.std(masks_sims, axis=1))
elif var == "vaccination_delta":
outdata["type"].extend([var] * len(dd_vacc))
outdata["state"].extend([state] * len(dd_vacc))
outdata["date"].extend([d.strftime("%Y-%m-%d") for d in dd_vacc])
outdata["mean"].extend(np.mean(vacc_sims_delta, axis=1))
outdata["std"].extend(np.std(vacc_sims_delta, axis=1))
elif var == "vaccination_omicron":
outdata["type"].extend([var] * len(dd_vacc))
outdata["state"].extend([state] * len(dd_vacc))
outdata["date"].extend([d.strftime("%Y-%m-%d") for d in dd_vacc])
outdata["mean"].extend(np.mean(vacc_sims_omicron, axis=1))
outdata["std"].extend(np.std(vacc_sims_omicron, axis=1))
else:
outdata["type"].extend([var] * len(dd))
outdata["state"].extend([state] * len(dd))
outdata["date"].extend([d.strftime("%Y-%m-%d") for d in dd])
outdata["mean"].extend(np.mean(sims[:, j, :], axis=1))
outdata["std"].extend(np.std(sims[:, j, :], axis=1))
if state in plot_states:
if var == "md_prop":
# md plot
axs[rownum, colnum].plot(prop[state].index, prop[state].values, lw=1)
axs[rownum, colnum].plot(dd_md, np.median(md_sims, axis=1), "k", lw=1)
axs[rownum, colnum].fill_between(
dd_md,
np.quantile(md_sims, 0.25, axis=1),
np.quantile(md_sims, 0.75, axis=1),
color="k",
alpha=0.1,
)
elif var == "masks_prop":
# masks plot
axs[rownum, colnum].plot(masks[state].index, masks[state].values, lw=1)
axs[rownum, colnum].plot(
dd_masks, np.median(masks_sims, axis=1), "k", lw=1
)
axs[rownum, colnum].fill_between(
dd_masks,
np.quantile(masks_sims, 0.25, axis=1),
np.quantile(masks_sims, 0.75, axis=1),
color="k",
alpha=0.1,
)
elif var == "vaccination_delta":
# vaccination plot
axs[rownum, colnum].plot(
vaccination_by_state_delta.loc[
state, ~vaccination_by_state_delta.loc[state].isna()
].index,
vaccination_by_state_delta.loc[
state, ~vaccination_by_state_delta.loc[state].isna()
].values,
lw=1,
)
axs[rownum, colnum].plot(
dd_vacc, np.median(vacc_sims_delta, axis=1), color="C1", lw=1
)
axs[rownum, colnum].fill_between(
dd_vacc,
np.quantile(vacc_sims_delta, 0.25, axis=1),
np.quantile(vacc_sims_delta, 0.75, axis=1),
color="C1",
alpha=0.1,
)
elif var == "vaccination_omicron":
# vaccination plot
axs[rownum, colnum].plot(
vaccination_by_state_omicron.loc[
state, ~vaccination_by_state_omicron.loc[state].isna()
].index,
vaccination_by_state_omicron.loc[
state, ~vaccination_by_state_omicron.loc[state].isna()
].values,
lw=1,
)
axs[rownum, colnum].plot(
dd_vacc, np.median(vacc_sims_omicron, axis=1), color="C1", lw=1
)
axs[rownum, colnum].fill_between(
dd_vacc,
np.quantile(vacc_sims_omicron, 0.25, axis=1),
np.quantile(vacc_sims_omicron, 0.75, axis=1),
color="C1",
alpha=0.1,
)
else:
# all other predictors
axs[rownum, colnum].plot(
dates, df_google[df_google["state"] == state][var].values, lw=1
)
axs[rownum, colnum].fill_between(
dates,
np.percentile(Rmed_array[:, j, :], 25, axis=1),
np.percentile(Rmed_array[:, j, :], 75, axis=1),
alpha=0.5,
)
axs[rownum, colnum].plot(dd, sims_med[:, j], color="C1", lw=1)
axs[rownum, colnum].fill_between(
dd, sims_q25[:, j], sims_q75[:, j], color="C1", alpha=0.1
)
# axs[rownum,colnum].axvline(dd[-num_forecast_days], ls = '--', color = 'black', lw=1) # plotting a vertical line at the end of the data date
# axs[rownum,colnum].axvline(dd[-(num_forecast_days+truncation_days)], ls = '-.', color='grey', lw=1) # plotting a vertical line at the forecast date
axs[rownum, colnum].set_title(state)
# plotting horizontal line at 1
axs[rownum, colnum].axhline(1, ls="--", c="k", lw=1)
axs[rownum, colnum].set_title(state)
axs[rownum, colnum].tick_params("x", rotation=90)
axs[rownum, colnum].tick_params("both", labelsize=8)
# plot the start date of the data and indicators of the data we are actually fitting to (in grey)
axs[rownum, colnum].axvline(data_date, ls="-.", color="black", lw=1)
if j < len(predictors):
axs[rownum, colnum].set_ylabel(
predictors[j].replace("_", " ")[:-5], fontsize=7
)
elif var == "md_prop":
axs[rownum, colnum].set_ylabel(
"Proportion of respondents\n micro-distancing", fontsize=7
)
elif var == "masks_prop":
axs[rownum, colnum].set_ylabel(
"Proportion of respondents\n wearing masks", fontsize=7
)
elif var == "vaccination_delta" or var == "vaccination_omicron":
axs[rownum, colnum].set_ylabel(
"Reduction in TP \n from vaccination", fontsize=7
)
# historically we want to store the higher variance mobilities
state_Rmed[state] = Rmed_array
state_sims[state] = sims
os.makedirs(
"figs/mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts",
exist_ok=True,
)
for i, fig in enumerate(figs):
fig.text(0.5, 0.02, "Date", ha="center", va="center", fontsize=15)
if i < len(predictors): # this plots the google mobility forecasts
fig.tight_layout()
fig.savefig(
"figs/mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts/"
+ str(predictors[i])
+ ".png",
dpi=400,
)
elif i == len(predictors): # this plots the microdistancing forecasts
fig.tight_layout()
fig.savefig(
"figs/mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts/micro_dist.png",
dpi=400,
)
elif i == len(predictors) + 1: # this plots the microdistancing forecasts
fig.tight_layout()
fig.savefig(
"figs/mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts/mask_wearing.png",
dpi=400,
)
elif i == len(predictors) + 2: # finally this plots the delta VE forecasts
fig.tight_layout()
fig.savefig(
"figs/mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts/delta_vaccination.png",
dpi=400,
)
else: # finally this plots the omicron VE forecasts
fig.tight_layout()
fig.savefig(
"figs/mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts/omicron_vaccination.png",
dpi=400,
)
df_out = pd.DataFrame.from_dict(outdata)
df_md = df_out.loc[df_out.type == "md_prop"]
df_masks = df_out.loc[df_out.type == "masks_prop"]
df_out = df_out.loc[df_out.type != "vaccination_delta"]
df_out = df_out.loc[df_out.type != "vaccination_omicron"]
df_out = df_out.loc[df_out.type != "md_prop"]
df_out = df_out.loc[df_out.type != "masks_prop"]
df_forecast = pd.pivot_table(
df_out, columns=["type"], index=["date", "state"], values=["mean"]
)
df_std = pd.pivot_table(
df_out, columns=["type"], index=["date", "state"], values=["std"]
)
df_forecast_md = pd.pivot_table(
df_md, columns=["state"], index=["date"], values=["mean"]
)
df_forecast_md_std = pd.pivot_table(
df_md, columns=["state"], index=["date"], values=["std"]
)
df_forecast_masks = pd.pivot_table(
df_masks, columns=["state"], index=["date"], values=["mean"]
)
df_forecast_masks_std = pd.pivot_table(
df_masks, columns=["state"], index=["date"], values=["std"]
)
# align with google order in columns
df_forecast = df_forecast.reindex([("mean", val) for val in predictors], axis=1)
df_std = df_std.reindex([("std", val) for val in predictors], axis=1)
df_forecast.columns = predictors # remove the tuple name of columns
df_std.columns = predictors
df_forecast = df_forecast.reset_index()
df_std = df_std.reset_index()
df_forecast.date = pd.to_datetime(df_forecast.date)
df_std.date = pd.to_datetime(df_std.date)
df_forecast_md = df_forecast_md.reindex([("mean", state) for state in states], axis=1)
df_forecast_md_std = df_forecast_md_std.reindex(
[("std", state) for state in states], axis=1
)
df_forecast_md.columns = states
df_forecast_md_std.columns = states
df_forecast_md = df_forecast_md.reset_index()
df_forecast_md_std = df_forecast_md_std.reset_index()
df_forecast_md.date = pd.to_datetime(df_forecast_md.date)
df_forecast_md_std.date = pd.to_datetime(df_forecast_md_std.date)
df_forecast_masks = df_forecast_masks.reindex(
[("mean", state) for state in states], axis=1
)
df_forecast_masks_std = df_forecast_masks_std.reindex(
[("std", state) for state in states], axis=1
)
df_forecast_masks.columns = states
df_forecast_masks_std.columns = states
df_forecast_masks = df_forecast_masks.reset_index()
df_forecast_masks_std = df_forecast_masks_std.reset_index()
df_forecast_masks.date = pd.to_datetime(df_forecast_masks.date)
df_forecast_masks_std.date = pd.to_datetime(df_forecast_masks_std.date)
df_R = df_google[["date", "state"] + mov_values + [val + "_std" for val in mov_values]]
df_R = pd.concat([df_R, df_forecast], ignore_index=True, sort=False)
df_R["policy"] = (df_R.date >= "2020-03-20").astype("int8")
df_md = pd.concat([prop, df_forecast_md.set_index("date")])
df_masks = pd.concat([masks, df_forecast_masks.set_index("date")])
# now we read in the ve time series and create an adjusted timeseries from March 1st
# that includes no effect prior
vaccination_by_state = pd.read_csv(
results_dir + "adjusted_vaccine_ts_delta" + data_date.strftime("%Y-%m-%d") + ".csv",
parse_dates=["date"],
)
# there are a couple NA's early on in the time series but is likely due to slightly different start dates
vaccination_by_state.fillna(1, inplace=True)
vaccination_by_state = vaccination_by_state[["state", "date", "effect"]]
vaccination_by_state = vaccination_by_state.pivot(
index="state", columns="date", values="effect"
) # Convert to matrix form
# initialise a complete dataframe which will be the full VE timeseries plus the forecasted VE
df_ve_delta = pd.DataFrame()
# loop over states and get the offset compoonenets of the full VE
before_vacc_dates = pd.date_range(
start_date, vaccination_by_state.columns[0] - timedelta(days=1), freq="d"
)
# this is just a df of ones with all the missing dates as indices (8 comes from 8 jurisdictions)
before_vacc_Reff_reduction = pd.DataFrame(np.ones(((1, len(before_vacc_dates)))))
before_vacc_Reff_reduction.columns = before_vacc_dates
for state in states:
before_vacc_Reff_reduction.index = {state}
# merge the vaccine data and the 1's dataframes
df_ve_delta[state] = pd.concat(
[before_vacc_Reff_reduction.loc[state].T, vaccination_by_state.loc[state].T]
)
# clip off extra days
df_ve_delta = df_ve_delta[
df_ve_delta.index <= pd.to_datetime(today) + timedelta(days=num_forecast_days)
]
# save the forecasted vaccination line
df_ve_delta.to_csv(
results_dir
+ "forecasted_vaccination_delta"
+ data_date.strftime("%Y-%m-%d")
+ ".csv"
)
vaccination_by_state = pd.read_csv(
results_dir
+ "adjusted_vaccine_ts_omicron"
+ data_date.strftime("%Y-%m-%d")
+ ".csv",
parse_dates=["date"],
)
# there are a couple NA's early on in the time series but is likely due to slightly different start dates
vaccination_by_state.fillna(1, inplace=True)
vaccination_by_state = vaccination_by_state[["state", "date", "effect"]]
vaccination_by_state = vaccination_by_state.pivot(
index="state", columns="date", values="effect"
) # Convert to matrix form
# initialise a complete dataframe which will be the full VE timeseries plus the forecasted VE
df_ve_omicron = pd.DataFrame()
# loop over states and get the offset compoonenets of the full VE
before_vacc_dates = pd.date_range(
start_date, pd.to_datetime(omicron_start_date) - timedelta(days=1), freq="d"
)
# this is just a df of ones with all the missing dates as indices (8 comes from 8 jurisdictions)
before_vacc_Reff_reduction = pd.DataFrame(np.ones(((1, len(before_vacc_dates)))))
before_vacc_Reff_reduction.columns = before_vacc_dates
for state in states:
before_vacc_Reff_reduction.index = {state}
# merge the vaccine data and the 1's dataframes
df_ve_omicron[state] = pd.concat(
[
before_vacc_Reff_reduction.loc[state].T,
vaccination_by_state.loc[state][
vaccination_by_state.loc[state].index
>= pd.to_datetime(omicron_start_date)
],
]
)
df_ve_omicron = df_ve_omicron[
df_ve_omicron.index <= pd.to_datetime(today) + timedelta(days=num_forecast_days)
]
# save the forecasted vaccination line
df_ve_omicron.to_csv(
results_dir
+ "forecasted_vaccination_omicron"
+ data_date.strftime("%Y-%m-%d")
+ ".csv"
)
print("============")
print("Plotting forecasted estimates")
print("============")
expo_decay = True
theta_md = np.tile(df_samples["theta_md"].values, (df_md["NSW"].shape[0], 1))
fig, ax = plt.subplots(figsize=(12, 9), nrows=4, ncols=2, sharex=True, sharey=True)
for i, state in enumerate(plot_states):
# np.random.normal(df_md[state].values, df_md_std.values)
prop_sim = df_md[state].values
if expo_decay:
md = ((1 + theta_md).T ** (-1 * prop_sim)).T
else:
md = 2 * expit(-1 * theta_md * prop_sim[:, np.newaxis])
row = i // 2
col = i % 2
ax[row, col].plot(
df_md[state].index, np.median(md, axis=1), label="Microdistancing"
)
ax[row, col].fill_between(
df_md[state].index,
np.quantile(md, 0.25, axis=1),
np.quantile(md, 0.75, axis=1),
label="Microdistancing",
alpha=0.4,
color="C0",
)
ax[row, col].fill_between(
df_md[state].index,
np.quantile(md, 0.05, axis=1),
np.quantile(md, 0.95, axis=1),
label="Microdistancing",
alpha=0.4,
color="C0",
)
ax[row, col].set_title(state)
ax[row, col].tick_params("x", rotation=45)
ax[row, col].set_xticks(
[df_md[state].index.values[-n_forecast - extra_days_md]],
minor=True,
)
ax[row, col].xaxis.grid(which="minor", linestyle="-.", color="grey", linewidth=1)
fig.text(
0.03,
0.5,
"Multiplicative effect \n of micro-distancing $M_d$",
ha="center",
va="center",
rotation="vertical",
fontsize=20,
)
fig.text(0.5, 0.04, "Date", ha="center", va="center", fontsize=20)
plt.tight_layout(rect=[0.05, 0.04, 1, 1])
fig.savefig(
"figs/"
+ "mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts/md_factor.png",
dpi=144,
)
theta_masks = np.tile(df_samples["theta_masks"].values, (df_masks["NSW"].shape[0], 1))
fig, ax = plt.subplots(figsize=(12, 9), nrows=4, ncols=2, sharex=True, sharey=True)
for i, state in enumerate(plot_states):
# np.random.normal(df_md[state].values, df_md_std.values)
masks_prop_sim = df_masks[state].values
if expo_decay:
mask_wearing_factor = ((1 + theta_masks).T ** (-1 * masks_prop_sim)).T
else:
mask_wearing_factor = 2 * expit(
-1 * theta_masks * masks_prop_sim[:, np.newaxis]
)
row = i // 2
col = i % 2
ax[row, col].plot(
df_masks[state].index,
np.median(mask_wearing_factor, axis=1),
label="Microdistancing",
)
ax[row, col].fill_between(
df_masks[state].index,
np.quantile(mask_wearing_factor, 0.25, axis=1),
np.quantile(mask_wearing_factor, 0.75, axis=1),
label="Microdistancing",
alpha=0.4,
color="C0",
)
ax[row, col].fill_between(
df_masks[state].index,
np.quantile(mask_wearing_factor, 0.05, axis=1),
np.quantile(mask_wearing_factor, 0.95, axis=1),
label="Microdistancing",
alpha=0.4,
color="C0",
)
ax[row, col].set_title(state)
ax[row, col].tick_params("x", rotation=45)
ax[row, col].set_xticks(
[df_masks[state].index.values[-n_forecast - extra_days_masks]], minor=True
)
ax[row, col].xaxis.grid(which="minor", linestyle="-.", color="grey", linewidth=1)
fig.text(
0.03,
0.5,
"Multiplicative effect \n of mask-wearing $M_d$",
ha="center",
va="center",
rotation="vertical",
fontsize=20,
)
fig.text(0.5, 0.04, "Date", ha="center", va="center", fontsize=20)
plt.tight_layout(rect=[0.05, 0.04, 1, 1])
fig.savefig(
"figs/"
+ "mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts/mask_wearing_factor.png",
dpi=144,
)
df_R = df_R.sort_values("date")
# samples = df_samples.sample(n_samples) # test on sample of 2
# keep all samples
samples = df_samples.iloc[:mob_samples, :]
# for strain in ("Delta", "Omicron"):
# samples = df_samples
# flags for advanced scenario modelling
advanced_scenario_modelling = False
save_for_SA = False
# since this can be useful, predictor ordering is:
# ['retail_and_recreation_7days', 'grocery_and_pharmacy_7days', 'parks_7days', 'transit_stations_7days', 'workplaces_7days']
typ = "R_L"
forecast_type = ["R_L"]
for strain in ("Delta", "Omicron"):
print("============")
print("Calculating", strain, "TP")
print("============")
state_Rs = {
"state": [],
"date": [],
"type": [],
"median": [],
"lower": [],
"upper": [],
"bottom": [],
"top": [],
"mean": [],
"std": [],
}
ban = "2020-03-20"
# VIC and NSW allow gatherings of up to 20 people, other jurisdictions allow for
new_pol = "2020-06-01"
expo_decay = True
# start and end date for the third wave
# Subtract 10 days to avoid right truncation
third_end_date = data_date - pd.Timedelta(days=truncation_days)
typ_state_R = {}
mob_forecast_date = df_forecast.date.min()
state_key = {
"ACT": "1",
"NSW": "2",
"NT": "3",
"QLD": "4",
"SA": "5",
"TAS": "6",
"VIC": "7",
"WA": "8",
}
total_N_p_third_omicron = 0
for v in third_date_range.values():
tmp = sum(v >= pd.to_datetime(omicron_start_date))
# add a plus one for inclusion of end date (the else 0 is due to QLD having no Omicron potential)
total_N_p_third_omicron += tmp if tmp > 0 else 0
state_R = {}
for (kk, state) in enumerate(states):
# sort df_R by date so that rows are dates. rows are dates, columns are predictors
df_state = df_R.loc[df_R.state == state]
dd = df_state.date
post_values = samples[predictors].values.T
prop_sim = df_md[state].values
# grab vaccination data
vacc_ts_delta = df_ve_delta[state]
vacc_ts_omicron = df_ve_omicron[state]
# take right size of md to be N by N
theta_md = np.tile(samples["theta_md"].values, (df_state.shape[0], 1))
theta_masks = np.tile(samples["theta_masks"].values, (df_state.shape[0], 1))
r = samples["r[" + str(kk + 1) + "]"].values
tau = samples["tau[" + str(kk + 1) + "]"].values
m0 = samples["m0[" + str(kk + 1) + "]"].values
m1 = samples["m1[" + str(kk + 1) + "]"].values
# m1 = 1.0
md = ((1 + theta_md).T ** (-1 * prop_sim)).T
masks = ((1 + theta_masks).T ** (-1 * masks_prop_sim)).T
third_states_indices = {
state: index + 1 for (index, state) in enumerate(third_states)
}
third_days = {k: v.shape[0] for (k, v) in third_date_range.items()}
third_days_cumulative = np.append(
[0], np.cumsum([v for v in third_days.values()])
)
vax_idx_ranges = {
k: range(third_days_cumulative[i], third_days_cumulative[i + 1])
for (i, k) in enumerate(third_days.keys())
}
third_days_tot = sum(v for v in third_days.values())
# get the sampled vaccination effect (this will be incomplete as it's only over the fitting period)
sampled_vax_effects_all = samples[
["ve_delta[" + str(j + 1) + "]" for j in range(third_days_tot)]
].T
vacc_tmp = sampled_vax_effects_all.iloc[vax_idx_ranges[state], :]
# now we layer in the posterior vaccine multiplier effect which ill be a (T,mob_samples) array
# get before and after fitting and tile them
vacc_ts_data_before = pd.concat(
[vacc_ts_delta.loc[vacc_ts_delta.index < third_date_range[state][0]]]
* mob_samples,
axis=1,
).to_numpy()
vacc_ts_data_after = pd.concat(
[vacc_ts_delta.loc[vacc_ts_delta.index > third_date_range[state][-1]]]
* mob_samples,
axis=1,
).to_numpy()
# merge in order
vacc_ts_delta = np.vstack(
[vacc_ts_data_before, vacc_tmp, vacc_ts_data_after]
)
# construct a range of dates for omicron which starts at the maximum of the start date for that state or the Omicron start date
third_omicron_date_range = {
k: pd.date_range(
start=max(v[0], pd.to_datetime(omicron_start_date)), end=v[-1]
).values
for (k, v) in third_date_range.items()
}
third_omicron_days = {
k: v.shape[0] for (k, v) in third_omicron_date_range.items()
}
third_omicron_days_cumulative = np.append(
[0], np.cumsum([v for v in third_omicron_days.values()])
)
omicron_ve_idx_ranges = {
k: range(
third_omicron_days_cumulative[i],
third_omicron_days_cumulative[i + 1],
)
for (i, k) in enumerate(third_omicron_days.keys())
}
third_omicron_days_tot = sum(v for v in third_omicron_days.values())
# get the sampled vaccination effect (this will be incomplete as it's only over the fitting period)
sampled_vax_effects_all = (
samples[
["ve_omicron[" + str(j + 1) + "]" for j in range(third_omicron_days_tot)]
].T
)
vacc_tmp = sampled_vax_effects_all.iloc[omicron_ve_idx_ranges[state], :]
# now we layer in the posterior vaccine multiplier effect which ill be a (T,mob_samples) array
# get before and after fitting and tile them
vacc_ts_data_before = pd.concat(
[
vacc_ts_omicron.loc[
vacc_ts_omicron.index < third_omicron_date_range[state][0]
]
]
* mob_samples,
axis=1,
).to_numpy()
vacc_ts_data_after = pd.concat(
[
vacc_ts_omicron.loc[
vacc_ts_omicron.index > third_date_range[state][-1]
]
]
* mob_samples,
axis=1,
).to_numpy()
# merge in order
vacc_ts_omicron = np.vstack(
[vacc_ts_data_before, vacc_tmp, vacc_ts_data_after]
)
# setup some variables for handling the omicron starts
third_states_indices = {
state: index + 1 for (index, state) in enumerate(third_states)
}
omicron_start_day = (
pd.to_datetime(omicron_start_date) - pd.to_datetime(start_date)
).days
days_into_omicron = np.cumsum(
np.append(
[0],
[
(v >= pd.to_datetime(omicron_start_date)).sum()
for v in third_date_range.values()
],
)
)
idx = {}
kk = 0
for k in third_date_range.keys():
idx[k] = range(days_into_omicron[kk], days_into_omicron[kk + 1])
kk += 1
# tile the reduction in vaccination effect for omicron (i.e. VE is (1+r)*VE)
voc_vacc_product = np.zeros_like(vacc_ts_delta)
# calculate the voc effects
voc_multiplier_delta = samples["voc_effect_delta"].values
voc_multiplier_omicron = samples["voc_effect_omicron"].values
# sample the right R_L
sim_R = samples["R_Li[" + state_key[state] + "]"].values
for n in range(mob_samples):
# add gaussian noise to predictors before forecast
# df_state.loc[
df_state.loc[df_state.date < mob_forecast_date, predictors] = (
state_Rmed[state][:, :, n] / 100
)
# add gaussian noise to predictors after forecast
df_state.loc[df_state.date >= mob_forecast_date, predictors] = (
state_sims[state][:, :, n] / 100
)
## ADVANCED SCENARIO MODELLING - USE ONLY FOR POINT ESTIMATES
# set non-grocery values to 0
if advanced_scenario_modelling:
df_state.loc[:, predictors[0]] = 0
df_state.loc[:, predictors[2]] = 0
df_state.loc[:, predictors[3]] = 0
df_state.loc[:, predictors[4]] = 0
df1 = df_state.loc[df_state.date <= ban]
X1 = df1[predictors] # N by K
md[: X1.shape[0], :] = 1
if n == 0:
# initialise arrays (loggodds)
# N by K times (Nsamples by K )^T = Ndate by Nsamples
logodds = X1 @ post_values[:, n]
df2 = df_state.loc[
(df_state.date > ban) & (df_state.date < new_pol)
]
df3 = df_state.loc[df_state.date >= new_pol]
X2 = df2[predictors]
X3 = df3[predictors]
logodds = np.append(logodds, X2 @ post_values[:, n], axis=0)
logodds = np.append(logodds, X3 @ post_values[:, n], axis=0)
else:
# concatenate to pre-existing logodds martrix
logodds1 = X1 @ post_values[:, n]
df2 = df_state.loc[
(df_state.date > ban) & (df_state.date < new_pol)
]
df3 = df_state.loc[df_state.date >= new_pol]
X2 = df2[predictors]
X3 = df3[predictors]
prop2 = df_md.loc[ban:new_pol, state].values
prop3 = df_md.loc[new_pol:, state].values
logodds2 = X2 @ post_values[:, n]
logodds3 = X3 @ post_values[:, n]
logodds_sample = np.append(logodds1, logodds2, axis=0)
logodds_sample = np.append(logodds_sample, logodds3, axis=0)
# concatenate to previous
logodds = np.vstack((logodds, logodds_sample))
# create an matrix of mob_samples realisations which is an indicator of the voc (delta right now)
# which will be 1 up until the voc_start_date and then it will be values from the posterior sample
voc_multiplier_alpha = samples["voc_effect_alpha"].values
voc_multiplier_delta = samples["voc_effect_delta"].values
voc_multiplier_omicron = samples["voc_effect_omicron"].values
# number of days into omicron forecast
tt = 0
# loop over days in third wave and apply the appropriate form (i.e. decay or not)
# note that in here we apply the entire sample to the vaccination data to create a days by samples array
tmp_date = pd.to_datetime("2020-03-01")
# get the correct Omicron start date
# omicron_start_date_tmp = np.maximum(
# pd.to_datetime(omicron_start_date),
# pd.to_datetime(third_date_range[state][0]),
# )
omicron_start_date_tmp = pd.to_datetime(omicron_start_date)
omicron_start_day_tmp = (
pd.to_datetime(omicron_start_date_tmp) - pd.to_datetime(start_date)
).days
for ii in range(mob_samples):
# if before omicron introduced in a jurisdiction, we consider what period we're at:
# 1. Wildtype
# 2. Alpha
# 3. Delta
voc_vacc_product[:, ii] = vacc_ts_delta[:, ii]
idx_start = df_state.loc[df_state.date < alpha_start_date].shape[0]
idx_end = df_state.loc[df_state.date < delta_start_date].shape[0]
voc_vacc_product[idx_start:idx_end, ii] *= voc_multiplier_alpha[ii]
idx_start = idx_end
idx_end = df_state.loc[df_state.date < omicron_start_date_tmp].shape[0]
voc_vacc_product[idx_start:idx_end, ii] *= voc_multiplier_delta[ii]
idx_start = idx_end
idx_end = np.shape(voc_vacc_product)[0]
if strain == "Delta":
voc_vacc_product[idx_start:idx_end, ii] *= voc_multiplier_delta[ii]
elif strain == "Omicron":
# if omicron we need to account for the Omicron VE prior to the introduction of
# omicron in mid November
voc_vacc_product[idx_start:idx_end, ii] = (
vacc_ts_omicron[idx_start:idx_end, ii] * voc_multiplier_omicron[ii]
)
# save the components of the TP
pd.DataFrame(sim_R).to_csv(results_dir + "baseline_R_L_" + strain + ".csv")
pd.DataFrame(md).to_csv(results_dir + "md_" + strain + ".csv")
pd.DataFrame(masks).to_csv(results_dir + "masks_" + strain + ".csv")
macro = 2 * expit(logodds.T)
pd.DataFrame(macro).to_csv(results_dir + "macro_" + strain + ".csv")
pd.DataFrame(voc_vacc_product).to_csv(results_dir + "voc_vacc_product_" + strain + ".csv")
# calculate TP
R_L = (
2 * expit(logodds.T)
* md
* masks
* sim_R
* voc_vacc_product
)
# now we increase TP by 15% based on school reopening (this code can probably be reused
# but inferring it would be pretty difficult
# due to lockdowns and various interruptions since March 2020)
if scenarios[state] == "school_opening_2022":
R_L[dd.values >= pd.to_datetime(scenario_dates[state]), :] = (
1.15 * R_L[dd.values >= pd.to_datetime(scenario_dates[state]), :]
)
# calculate summary stats
R_L_med = np.median(R_L, axis=1)
R_L_lower = np.percentile(R_L, 25, axis=1)
R_L_upper = np.percentile(R_L, 75, axis=1)
R_L_bottom = np.percentile(R_L, 5, axis=1)
R_L_top = np.percentile(R_L, 95, axis=1)
# R_L
state_Rs["state"].extend([state] * df_state.shape[0])
state_Rs["type"].extend([typ] * df_state.shape[0])
state_Rs["date"].extend(dd.values) # repeat mob_samples times?
state_Rs["lower"].extend(R_L_lower)
state_Rs["median"].extend(R_L_med)
state_Rs["upper"].extend(R_L_upper)
state_Rs["top"].extend(R_L_top)
state_Rs["bottom"].extend(R_L_bottom)
state_Rs["mean"].extend(np.mean(R_L, axis=1))
state_Rs["std"].extend(np.std(R_L, axis=1))
state_R[state] = R_L
# generate a summary for the R_I
for state in states:
# R_I
if strain == "Delta":
R_I = samples["R_I"].values[:df_state.shape[0]]
elif strain == "Omicron":
# if Omicron period, then we need to multiply in the VoC effect as there's a period
# in the fitting where Delta and Omicron overlap (i.e. R_I = R_I * P(t) where P(t) is
# a product term).
R_I = samples["R_I_omicron"].values[:df_state.shape[0]]
state_Rs["state"].extend([state] * df_state.shape[0])
state_Rs["type"].extend(["R_I"] * df_state.shape[0])
state_Rs["date"].extend(dd.values)
state_Rs["lower"].extend(np.repeat(np.percentile(R_I, 25), df_state.shape[0]))
state_Rs["median"].extend(np.repeat(np.median(R_I), df_state.shape[0]))
state_Rs["upper"].extend(np.repeat(np.percentile(R_I, 75), df_state.shape[0]))
state_Rs["top"].extend(np.repeat(np.percentile(R_I, 95), df_state.shape[0]))
state_Rs["bottom"].extend(np.repeat(np.percentile(R_I, 5), df_state.shape[0]))
state_Rs["mean"].extend(np.repeat(np.mean(R_I), df_state.shape[0]))
state_Rs["std"].extend(np.repeat(np.std(R_I), df_state.shape[0]))
df_Rhats = pd.DataFrame().from_dict(state_Rs)
df_Rhats = df_Rhats.set_index(["state", "date", "type"])
d = pd.DataFrame()
for state in states:
for i, typ in enumerate(forecast_type):
if i == 0:
t = pd.DataFrame.from_dict(state_R[state])
t["date"] = dd.values
t["state"] = state
t["type"] = typ
else:
temp = pd.DataFrame.from_dict(state_R[state])
temp["date"] = dd.values
temp["state"] = state
temp["type"] = typ
t = t.append(temp)
# R_I
if strain == "Delta":
# use the Delta import reproduction number before Omicron starts
i = pd.DataFrame(np.tile(samples["R_I"].values, (len(dd.values), 1)))
elif strain == "Omicron":
# use the Omicron import reproduction number after Omicron starts
i = pd.DataFrame(np.tile(samples["R_I_omicron"].values, (len(dd.values), 1)))
i["date"] = dd.values
i["type"] = "R_I"
i["state"] = state
t = t.append(i)
d = d.append(t)
d = d.set_index(["state", "date", "type"])
df_Rhats = df_Rhats.join(d)
df_Rhats = df_Rhats.reset_index()
df_Rhats.state = df_Rhats.state.astype(str)
df_Rhats.type = df_Rhats.type.astype(str)
fig, ax = plt.subplots(figsize=(12, 9), nrows=4, ncols=2, sharex=True, sharey=True)
for i, state in enumerate(plot_states):
row = i // 2
col = i % 2
plot_df = df_Rhats.loc[(df_Rhats.state == state) & (df_Rhats.type == "R_L")].copy()
# split the TP into pre data date and after
plot_df_backcast = plot_df.loc[plot_df["date"] <= data_date].copy()
plot_df_forecast = plot_df.loc[plot_df["date"] > data_date].copy()
# plot the backcast TP
ax[row, col].plot(plot_df_backcast.date, plot_df_backcast["median"], color="C0")
ax[row, col].fill_between(
plot_df_backcast.date,
plot_df_backcast["lower"],
plot_df_backcast["upper"],
alpha=0.4,
color="C0",
)
ax[row, col].fill_between(
plot_df_backcast.date,
plot_df_backcast["bottom"],
plot_df_backcast["top"],
alpha=0.4,
color="C0",
)
# plot the forecast TP
ax[row, col].plot(plot_df_forecast.date, plot_df_forecast["median"], color="C1")
ax[row, col].fill_between(
plot_df_forecast.date,
plot_df_forecast["lower"],
plot_df_forecast["upper"],
alpha=0.4,
color="C1",
)
ax[row, col].fill_between(
plot_df_forecast.date,
plot_df_forecast["bottom"],
plot_df_forecast["top"],
alpha=0.4,
color="C1",
)
ax[row, col].tick_params("x", rotation=90)
ax[row, col].set_title(state)
ax[row, col].set_yticks(
[1],
minor=True,
)
ax[row, col].set_yticks([0, 2, 4, 6], minor=False)
ax[row, col].set_yticklabels([0, 2, 4, 6], minor=False)
ax[row, col].yaxis.grid(which="minor", linestyle="--", color="black", linewidth=2)
ax[row, col].set_ylim((0, 6))
# ax[row, col].set_xticks([plot_df.date.values[-n_forecast]], minor=True)
ax[row, col].axvline(data_date, ls="-.", color="black", lw=1)
# plot window start date
plot_window_start_date = min(
pd.to_datetime(today) - timedelta(days=6 * 30),
sim_start_date - timedelta(days=truncation_days),
)
# create a plot window over the last six months
ax[row, col].set_xlim(
plot_window_start_date,
pd.to_datetime(today) + timedelta(days=num_forecast_days),
)
# plot the start date
ax[row, col].axvline(sim_start_date, ls="--", color="green", lw=2)
ax[row, col].xaxis.grid(which="minor", linestyle="-.", color="grey", linewidth=2)
fig.text(
0.03,
0.5,
"Transmission potential",
va="center",
ha="center",
rotation="vertical",
fontsize=20,
)
fig.text(0.525, 0.02, "Date", va="center", ha="center", fontsize=20)
plt.tight_layout(rect=[0.04, 0.04, 1, 1])
plt.savefig(
"figs/mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts/TP_6_month_"
+ strain
+ data_date.strftime("%Y-%m-%d")
+ ".png",
dpi=144,
)
fig, ax = plt.subplots(figsize=(12, 9), nrows=4, ncols=2, sharex=True, sharey=True)
for i, state in enumerate(plot_states):
row = i // 2
col = i % 2
plot_df = df_Rhats.loc[(df_Rhats.state == state) & (df_Rhats.type == "R_L")].copy()
# split the TP into pre data date and after
plot_df_backcast = plot_df.loc[plot_df["date"] <= data_date].copy()
plot_df_forecast = plot_df.loc[plot_df["date"] > data_date].copy()
# plot the backcast TP
ax[row, col].plot(plot_df_backcast.date, plot_df_backcast["median"], color="C0")
ax[row, col].fill_between(
plot_df_backcast.date,
plot_df_backcast["lower"],
plot_df_backcast["upper"],
alpha=0.4,
color="C0",
)
ax[row, col].fill_between(
plot_df_backcast.date,
plot_df_backcast["bottom"],
plot_df_backcast["top"],
alpha=0.4,
color="C0",
)
# plot the forecast TP
ax[row, col].plot(plot_df_forecast.date, plot_df_forecast["median"], color="C1")
ax[row, col].fill_between(
plot_df_forecast.date,
plot_df_forecast["lower"],
plot_df_forecast["upper"],
alpha=0.4,
color="C1",
)
ax[row, col].fill_between(
plot_df_forecast.date,
plot_df_forecast["bottom"],
plot_df_forecast["top"],
alpha=0.4,
color="C1",
)
ax[row, col].tick_params("x", rotation=90)
ax[row, col].set_title(state)
ax[row, col].set_yticks(
[1],
minor=True,
)
ax[row, col].set_yticks([0, 2, 4, 6], minor=False)
ax[row, col].set_yticklabels([0, 2, 4, 6], minor=False)
ax[row, col].yaxis.grid(which="minor", linestyle="--", color="black", linewidth=2)
ax[row, col].set_ylim((0, 6))
# ax[row, col].set_xticks([plot_df.date.values[-n_forecast]], minor=True)
ax[row, col].axvline(data_date, ls="-.", color="black", lw=1)
# plot window start date
plot_window_start_date = min(
pd.to_datetime(today) - timedelta(days=12 * 30),
sim_start_date - timedelta(days=truncation_days),
)
# create a plot window over the last six months
ax[row, col].set_xlim(
plot_window_start_date,
pd.to_datetime(today) + timedelta(days=num_forecast_days),
)
# plot the start date
ax[row, col].axvline(sim_start_date, ls="--", color="green", lw=2)
ax[row, col].xaxis.grid(which="minor", linestyle="-.", color="grey", linewidth=2)
fig.text(
0.03,
0.5,
"Transmission potential",
va="center",
ha="center",
rotation="vertical",
fontsize=20,
)
fig.text(0.525, 0.02, "Date", va="center", ha="center", fontsize=20)
plt.tight_layout(rect=[0.04, 0.04, 1, 1])
print("============")
print("Saving results")
print("============")
plt.savefig(
"figs/mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts/TP_12_month_"
+ strain
+ data_date.strftime("%Y-%m-%d")
+ ".png",
dpi=144,
)
# save values for the functional omicron related proportions for each state
prop_omicron_vars = ("r", "tau", "m0", "m1")
for (kk, state) in enumerate(states):
# sort df_R by date so that rows are dates. rows are dates, columns are predictors
df_state = df_R.loc[df_R.state == state].copy()
for v in prop_omicron_vars:
# take right size of the values to be N by N
y = samples[v + "[" + str(kk + 1) + "]"].values
pd.DataFrame(y[:mob_samples]).to_csv(
results_dir
+ v
+ "_"
+ state
+ data_date.strftime("%Y-%m-%d")
+ ".csv"
)
# now we save the sampled TP paths
# convert the appropriate sampled susceptible depletion factors to a csv and save them for simulation
# NOTE: this will not save an updated median, mean etc for the R_I's. We don't use it so it's not
# really important but it should be noted for later if we are comparing things. The step function
# R_I -> R_I_omicron, is noticeable and shouldn't be overlooked.
df_Rhats = df_Rhats[
["state", "date", "type", "median", "bottom", "lower", "upper", "top"]
+ [i for i in range(mob_samples)]
]
# # save the file as a csv (easier to handle in Julia for now)
df_Rhats.to_csv(
results_dir
+ "soc_mob_R_"
+ strain
+ data_date.strftime("%Y-%m-%d")
+ ".csv"
)
return None
def calculate_Reff_local(
Reff,
R_I,
R_I_omicron,
voc_effect,
prop_import,
omicron_start_day,
):
"""
Apply the same mixture model idea as per the TP model to get
R_eff^L = (R_eff - rho * RI)/(1 - rho)
and use this to weight the TP historically.
"""
# calculate this all in one step. Note that we set the Reff to -1 if
# the prop_import = 1 as in that instance the relationship breaks due to division by 0.
Reff_local = np.zeros(shape=Reff.shape[0])
for n in range(len(Reff_local)):
# adjust the Reff based on the time period of interest
if n < omicron_start_day:
R_I_tmp = R_I
else:
R_I_tmp = R_I_omicron * voc_effect
if prop_import[n] < 1:
Reff_local[n] = (Reff[n] - prop_import[n] * R_I_tmp) / (1 - prop_import[n])
else:
Reff_local[n] = 0
# Reff_local = [
# (Reff[t] - prop_import[t] * R_I) / (1 - prop_import[t])
# if prop_import[t] < 1 else -1 for t in range(Reff.shape[0])
# ]
return Reff_local
def adjust_TP(data_date):
from params import (
num_forecast_days,
alpha_start_date,
delta_start_date,
omicron_start_date,
truncation_days,
start_date,
sim_start_date,
third_start_date,
n_days_nowcast_TP_adjustment,
mob_samples,
)
print("============")
print("Adjusting TP forecasts using data from", data_date)
print("============")
data_date = pd.to_datetime(data_date)
# convert third start date to the correct format
third_start_date = pd.to_datetime(third_start_date)
third_end_date = data_date - timedelta(truncation_days)
sim_start_date = pd.to_datetime(sim_start_date)
# a different end date to deal with issues in fitting
third_end_date_diff = data_date - timedelta(18 + 7 + 7)
third_states = sorted(["NSW", "VIC", "ACT", "QLD", "SA", "TAS", "NT", "WA"])
# third_states = sorted(['NSW', 'VIC', 'ACT', 'QLD', 'SA', 'NT'])
# choose dates for each state for third wave
# NOTE: These need to be in date sorted order
third_date_range = {
"ACT": pd.date_range(start="2021-08-15", end=third_end_date).values,
"NSW": pd.date_range(start=third_start_date, end=third_end_date).values,
"NT": pd.date_range(start="2021-12-01", end=third_end_date).values,
"QLD": pd.date_range(start="2021-07-30", end=third_end_date).values,
"SA": pd.date_range(start="2021-11-25", end=third_end_date).values,
"TAS": pd.date_range(start="2021-12-20", end=third_end_date).values,
"VIC": pd.date_range(start="2021-08-01", end=third_end_date).values,
"WA": pd.date_range(start="2022-01-01", end=third_end_date).values,
}
# Get Google Data - Don't use the smoothed data?
df_google_all = read_in_google(Aus_only=True, moving=True, local=True)
third_end_date = pd.to_datetime(data_date) - pd.Timedelta(days=truncation_days)
results_dir = (
"results/"
+ data_date.strftime("%Y-%m-%d")
+ "/"
)
# Load in vaccination data by state and date which should have the same date as the
# NNDSS/linelist data use the inferred VE
vaccination_by_state_delta = pd.read_csv(
results_dir + "adjusted_vaccine_ts_delta" + data_date.strftime("%Y-%m-%d") + ".csv",
parse_dates=["date"],
)
vaccination_by_state_delta = vaccination_by_state_delta[["state", "date", "effect"]]
vaccination_by_state_delta = vaccination_by_state_delta.pivot(
index="state", columns="date", values="effect"
) # Convert to matrix form
# Convert to simple array for indexing
vaccination_by_state_delta_array = vaccination_by_state_delta.to_numpy()
vaccination_by_state_omicron = pd.read_csv(
results_dir + "adjusted_vaccine_ts_omicron" + data_date.strftime("%Y-%m-%d") + ".csv",
parse_dates=["date"],
)
vaccination_by_state_omicron = vaccination_by_state_omicron[["state", "date", "effect"]]
vaccination_by_state_omicron = vaccination_by_state_omicron.pivot(
index="state", columns="date", values="effect"
) # Convert to matrix form
# Convert to simple array for indexing
vaccination_by_state_omicron_array = vaccination_by_state_omicron.to_numpy()
# Get survey data
surveys = pd.DataFrame()
path = "data/md/Barometer wave*.csv"
for file in glob.glob(path):
surveys = surveys.append(pd.read_csv(file, parse_dates=["date"]))
surveys = surveys.sort_values(by="date")
print("Latest microdistancing survey is {}".format(surveys.date.values[-1]))
surveys.loc[surveys.state != "ACT", "state"] = (
surveys.loc[surveys.state != "ACT", "state"]
.map(states_initials)
.fillna(surveys.loc[surveys.state != "ACT", "state"])
)
surveys["proportion"] = surveys["count"] / surveys.respondents
surveys.date = pd.to_datetime(surveys.date)
always = surveys.loc[surveys.response == "Always"].set_index(["state", "date"])
always = always.unstack(["state"])
# fill in date range
idx = pd.date_range("2020-03-01", pd.to_datetime("today"))
always = always.reindex(idx, fill_value=np.nan)
always.index.name = "date"
always = always.fillna(method="bfill")
always = always.stack(["state"])
# Zero out before first survey 20th March
always = always.reset_index().set_index("date")
always.loc[:"2020-03-20", "count"] = 0
always.loc[:"2020-03-20", "respondents"] = 0
always.loc[:"2020-03-20", "proportion"] = 0
always = always.reset_index().set_index(["state", "date"])
survey_X = pd.pivot_table(
data=always, index="date", columns="state", values="proportion"
)
prop_all = survey_X
## read in and process mask wearing data
mask_wearing = pd.DataFrame()
path = "data/face_coverings/face_covering_*_.csv"
for file in glob.glob(path):
mask_wearing = mask_wearing.append(pd.read_csv(file, parse_dates=["date"]))
mask_wearing = mask_wearing.sort_values(by="date")
print("Latest mask wearing survey is {}".format(mask_wearing.date.values[-1]))
# mask_wearing['state'] = mask_wearing['state'].map(states_initials).fillna(mask_wearing['state'])
mask_wearing.loc[mask_wearing.state != "ACT", "state"] = (
mask_wearing.loc[mask_wearing.state != "ACT", "state"]
.map(states_initials)
.fillna(mask_wearing.loc[mask_wearing.state != "ACT", "state"])
)
mask_wearing["proportion"] = mask_wearing["count"] / mask_wearing.respondents
mask_wearing.date = pd.to_datetime(mask_wearing.date)
mask_wearing_always = mask_wearing.loc[
mask_wearing.face_covering == "Always"
].set_index(["state", "date"])
mask_wearing_always = mask_wearing_always.unstack(["state"])
idx = pd.date_range("2020-03-01", pd.to_datetime("today"))
mask_wearing_always = mask_wearing_always.reindex(idx, fill_value=np.nan)
mask_wearing_always.index.name = "date"
# fill back to earlier and between weeks.
# Assume survey on day x applies for all days up to x - 6
mask_wearing_always = mask_wearing_always.fillna(method="bfill")
mask_wearing_always = mask_wearing_always.stack(["state"])
# Zero out before first survey 20th March
mask_wearing_always = mask_wearing_always.reset_index().set_index("date")
mask_wearing_always.loc[:"2020-03-20", "count"] = 0
mask_wearing_always.loc[:"2020-03-20", "respondents"] = 0
mask_wearing_always.loc[:"2020-03-20", "proportion"] = 0
mask_wearing_X = pd.pivot_table(
data=mask_wearing_always, index="date", columns="state", values="proportion"
)
mask_wearing_all = mask_wearing_X
# Get posterior
df_samples = read_in_posterior(
date=data_date.strftime("%Y-%m-%d"),
)
states = sorted(["NSW", "QLD", "SA", "VIC", "TAS", "WA", "ACT", "NT"])
plot_states = states.copy()
one_month = data_date + timedelta(days=num_forecast_days)
days_from_March = (one_month - pd.to_datetime(start_date)).days
# filter out future info
prop = prop_all.loc[:data_date]
masks = mask_wearing_all.loc[:data_date]
df_google = df_google_all.loc[df_google_all.date <= data_date]
# use this trick of saving the google data and then reloading it to kill
# the date time values
df_google.to_csv("results/test_google_data.csv")
df_google = pd.read_csv("results/test_google_data.csv")
# remove the temporary file
# os.remove("results/test_google_data.csv")
# Simple interpolation for missing vlaues in Google data
df_google = df_google.interpolate(method="linear", axis=0)
df_google.date = pd.to_datetime(df_google.date)
# forecast time parameters
today = data_date.strftime("%Y-%m-%d")
# add days to forecast if we are missing data
if df_google.date.values[-1] < data_date:
n_forecast = num_forecast_days + (data_date - df_google.date.values[-1]).days
else:
n_forecast = num_forecast_days
training_start_date = datetime(2020, 3, 1, 0, 0)
omicron_start_day = (pd.to_datetime(omicron_start_date) - pd.to_datetime(start_date)).days
for strain in ("Delta", "Omicron"):
"""
Run adjustment model for the local TP estimates. This will adjust the local component of the
TP
"""
print("=========================")
print("Running TP adjustment model for", strain, "TP")
print("=========================")
df_forecast2 = pd.read_csv(
results_dir
+ "soc_mob_R_"
+ strain
+ data_date.strftime("%Y-%m-%d")
+ ".csv"
)
# read in Reff samples
df_Reff = pd.read_csv(
"results/EpyReff/Reff_"
+ strain
+ "_samples"
+ data_date.strftime("%Y-%m-%d")
+ ".csv",
parse_dates=["INFECTION_DATES"],
)
inferred_prop_imports = pd.read_csv(
results_dir
+ "rho_samples"
+ data_date.strftime("%Y-%m-%d")
+ ".csv",
parse_dates=["date"],
)
# read in the case data and note that we want this to be infection dates to match up to Reff changes
case_data = read_in_NNDSS(
data_date, apply_delay_at_read=True, apply_inc_at_read=True
)
case_data = case_data[["date_inferred", "STATE", "imported", "local"]]
# this is the forecasted TP dataframe, without R_L type
df_forecast2_new = df_forecast2.loc[df_forecast2.type != "R_L"]
end_date = pd.to_datetime(today) + timedelta(days=num_forecast_days)
states_to_adjust = ["NSW", "QLD", "SA", "VIC", "TAS", "WA", "ACT", "NT"]
# read in the samples for weighting between TP and Reff.
samples2 = pd.read_csv(
results_dir
+ "posterior_sample_"
+ data_date.strftime("%Y-%m-%d")
+ ".csv"
)
# extract the import values
if strain == "Delta":
R_I = samples2.R_I.to_numpy()
R_I_omicron = samples2.R_I_omicron.to_numpy()
voc_effect = samples2.voc_effect_delta.to_numpy()
elif strain == "Omicron":
# extract the import values
R_I_omicron = samples2.R_I_omicron.to_numpy()
voc_effect = samples2.voc_effect_omicron.to_numpy()
last_date_for_reff = (
pd.to_datetime(data_date)
- pd.Timedelta(days=truncation_days + n_days_nowcast_TP_adjustment - 1)
)
print("==============")
print("The last date the Reff estimate is used is", last_date_for_reff)
print("==============")
for state in states:
# filter case data by state
case_data_state = case_data.loc[case_data.STATE == state]
# take a sum of cases each day (this does not fill out missing days)
df_cases = case_data_state.groupby(["date_inferred", "STATE"]).agg(sum)
df_cases = df_cases.reset_index()
df_cases = df_cases.set_index("date_inferred")
# now we want to fill out indices by adding 0's on days with 0 cases and ensuring we go right up to the current truncated date
idx = pd.date_range(
pd.to_datetime("2020-03-01"),
last_date_for_reff,
)
is_omicron = np.array(idx >= pd.to_datetime(omicron_start_date))
df_cases = df_cases.reindex(idx, fill_value=0)
# filter the TP and Reff by state
df_forecast2_state_R_L = df_forecast2.loc[
((df_forecast2.state == state) & (df_forecast2.type == "R_L"))
]
df_Reff_state = df_Reff.loc[df_Reff.STATE == state]
# take a rolling average of the cases over the interval of consideration
idx = (pd.to_datetime(df_forecast2_state_R_L.date) >= pd.to_datetime("2020-03-01")) & (
pd.to_datetime(df_forecast2_state_R_L.date) <= last_date_for_reff
)
df_forecast2_state_R_L_sims = df_forecast2_state_R_L.iloc[:, 9:].loc[idx]
Reff = df_Reff_state.loc[
(df_Reff_state.INFECTION_DATES >= pd.to_datetime("2020-03-01"))
& (df_Reff_state.INFECTION_DATES<= last_date_for_reff)
].iloc[:, :-2]
# take 7-day moving averages for the local, imported, and total cases
ma_period = 7
df_cases_local = df_cases["local"]
df_cases_imported = df_cases["imported"]
df_cases_local_ma = df_cases_local.rolling(7, min_periods=1).mean()
# only want to use indices over the fitting horizon, after this point we rely on the TP model
idx = (df_cases.index >= pd.to_datetime("2020-03-01")) & (
df_cases.index <= last_date_for_reff
)
df_cases_local = df_cases_local[idx]
df_cases_imported = df_cases_imported[idx]
df_cases_local_ma = df_cases_local_ma[idx]
# dictionary to store sampled Rt paths
Rt = {}
ratio_import_to_local = df_cases_imported / (df_cases_local + df_cases_imported)
# set nan or infs to 0
ratio_import_to_local.replace([np.nan, np.inf], 0, inplace=True)
ratio_import_to_local = ratio_import_to_local.rolling(7, min_periods=1).mean()
# now replace the fitted period with the correct proportions
inferred_prop_imports_state = (
inferred_prop_imports
.loc[inferred_prop_imports.state == state]
.iloc[:,1:-1]
.set_index("date")
)
n_Reff_samples = Reff.shape[1]
# loop over the TP paths for a state
for (n, col_str) in enumerate(df_forecast2_state_R_L_sims):
ratio_import_to_local_combined = pd.Series(
inferred_prop_imports_state[str(int(col_str) % mob_samples)][i]
if i in inferred_prop_imports_state.index else ratio_import_to_local[i]
for i in ratio_import_to_local.index
)
ratio_import_to_local_combined.index = ratio_import_to_local.index
ratio_import_to_local_combined = ratio_import_to_local_combined.to_numpy()
if state in states_to_adjust:
# sample a Reff path from EpyReff (there are only 2000 of these)
Reff_sample = Reff.iloc[:, n % n_Reff_samples].to_numpy()
TP_local = np.array(df_forecast2_state_R_L_sims[col_str])
# Index by col_str % n_samples as we would be cycling the values in the R_I
Reff_local = calculate_Reff_local(
Reff_sample,
R_I[int(col_str) % mob_samples],
R_I_omicron[int(col_str) % mob_samples],
voc_effect[int(col_str) % mob_samples],
ratio_import_to_local_combined,
omicron_start_day=omicron_start_day,
)
omega = pd.Series(
(
np.random.beta(35, L_ma) if L_ma >= 5 else 1
for L_ma in df_cases_local_ma.to_numpy()
),
index=df_cases_local_ma.index,
)
# apply the mixture modelling and the adjustment to ensure we don't get negative
Rt[col_str] = np.maximum(0, (1 - omega) * Reff_local + omega * TP_local)
# store Rt in a dataframe
Rt = pd.DataFrame.from_dict(Rt, orient="index", columns=df_cases_local_ma.index)
# next step is to stich the adjusted df back with the forecasting of TP
idx = pd.to_datetime(df_forecast2_state_R_L.date) > last_date_for_reff
df_forecast2_after = df_forecast2_state_R_L.iloc[:, 9:].loc[idx].T
# concatenate updated df with the forecasted TP
df_full = pd.concat([Rt, df_forecast2_after], axis=1)
# transpose the full df for consistent structuring
df_full = df_full.T
# calculate the summary statistics as per the original df
df_full["bottom"] = np.percentile(df_full, 5, axis=1)
df_full["lower"] = np.percentile(df_full, 25, axis=1)
df_full["median"] = np.percentile(df_full, 50, axis=1)
df_full["upper"] = np.percentile(df_full, 75, axis=1)
df_full["top"] = np.percentile(df_full, 95, axis=1)
df_full["mean"] = np.mean(df_full, axis=1)
df_full["std"] = np.std(df_full, axis=1)
# put date back in
df_full["date"] = pd.date_range(start_date, periods=df_full.shape[0])
df_full["state"] = [state] * df_full.shape[0]
df_full["type"] = ["R_L"] * df_full.shape[0]
# reset indices
df_full.reset_index(drop=True, inplace=True)
# merge df back with the other variables
df_forecast2_new = pd.concat([df_forecast2_new, df_full], axis=0)
fig, ax = plt.subplots(figsize=(12, 9), nrows=4, ncols=2, sharex=True, sharey=True)
for i, state in enumerate(plot_states):
row = i // 2
col = i % 2
plot_df = df_forecast2_new.loc[
(df_forecast2_new.state == state) & (df_forecast2_new.type == "R_L")
]
# split the TP into pre data date and after
plot_df_backcast = plot_df.loc[plot_df["date"] <= data_date]
plot_df_forecast2 = plot_df.loc[plot_df["date"] > data_date]
# plot the backcast TP
ax[row, col].plot(plot_df_backcast.date, plot_df_backcast["median"], color="C0")
ax[row, col].fill_between(
plot_df_backcast.date,
plot_df_backcast["lower"],
plot_df_backcast["upper"],
alpha=0.4,
color="C0",
)
ax[row, col].fill_between(
plot_df_backcast.date,
plot_df_backcast["bottom"],
plot_df_backcast["top"],
alpha=0.4,
color="C0",
)
# plot the forecast TP
ax[row, col].plot(plot_df_forecast2.date, plot_df_forecast2["median"], color="C1")
ax[row, col].fill_between(
plot_df_forecast2.date,
plot_df_forecast2["lower"],
plot_df_forecast2["upper"],
alpha=0.4,
color="C1",
)
ax[row, col].fill_between(
plot_df_forecast2.date,
plot_df_forecast2["bottom"],
plot_df_forecast2["top"],
alpha=0.4,
color="C1",
)
ax[row, col].tick_params("x", rotation=90)
ax[row, col].set_title(state)
ax[row, col].set_yticks(
[1],
minor=True,
)
ax[row, col].set_yticks([0, 2, 4, 6], minor=False)
ax[row, col].set_yticklabels([0, 2, 4, 6], minor=False)
ax[row, col].yaxis.grid(which="minor", linestyle="--", color="black", linewidth=2)
ax[row, col].set_ylim((0, 6))
# ax[row, col].set_xticks([plot_df.date.values[-n_forecast]], minor=True)
ax[row, col].axvline(data_date, ls="-.", color="black", lw=1)
# plot window start date
plot_window_start_date = min(
pd.to_datetime(today) - timedelta(days=6 * 30),
sim_start_date - timedelta(days=truncation_days),
)
# create a plot window over the last six months
ax[row, col].set_xlim(
plot_window_start_date,
| pd.to_datetime(today) | pandas.to_datetime |
# coding: utf-8
# In[1]:
import warnings
import numpy as np
import pandas as pd
import seaborn as sns
from scipy import interp
from itertools import cycle
from sklearn.svm import LinearSVC
import matplotlib.pyplot as plt
from sklearn.metrics import roc_curve,auc
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import classification_report
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score, StratifiedKFold
warnings.filterwarnings('ignore')
# In[2]:
def algorithm(algoname,colors,train,test,pos):
mean_tpr,lw,i =0.0, 2,1
mean_fpr = np.linspace(0, 1, 100)
fold_accuracy= []
skfold = StratifiedKFold(n_splits=10,shuffle = True)
for (trainindex,testindex), color in zip(skfold.split(train, test.values.ravel()), colors):
X_train, X_test = train.loc[trainindex], train.loc[testindex]
y_train, y_test = test.loc[trainindex], test.loc[testindex]
model = algoname.fit(X_train,y_train.values.ravel())
fold_accuracy.append(model.score(X_test,y_test.values.ravel()))
result = model.predict(X_test)
fpr, tpr, thresholds= roc_curve(y_test.values,result,pos_label=pos)
mean_tpr += interp(mean_fpr, fpr, tpr)
mean_tpr[0] = 0.0
roc_auc = auc(fpr, tpr)
#plt.step(fpr, tpr, lw=lw, color=color,label='ROC fold %d (area = %0.2f)' % (i, roc_auc))
i+=1
mean_tpr /= skfold.get_n_splits(train,test.values.ravel())
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
plt.step(mean_fpr, mean_tpr, color='g', linestyle='--',
label='Mean ROC (area = %0.2f)' % mean_auc, lw=lw)
plt.title("Average accuracy: {0:.3f}".format(np.asarray(fold_accuracy).mean()))
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('FPR')
plt.ylabel('TPR')
plt.legend(loc="lower right")
return ("Average accuracy: {0:.3f} (+/-{1:.3f})".format(np.asarray(fold_accuracy).mean(), np.asarray(fold_accuracy).std()))
# In[3]:
import math
import operator
def euclidean_distance(data1,data2):
result = 0.0
for val in range(len(data2)):
result += (data1[val]-data2[val])**2
return math.sqrt(result)
def knn(train,test,k):
dist,kneighbors = [],[]
for a,c in train.iterrows():
distance = euclidean_distance(c,test)
dist.append((c,distance))
dist.sort(key=operator.itemgetter(1))
for i in range(k):
kneighbors.append(dist[i][0])
return kneighbors
def majorityVote(kneighbors):
vote = {}
for i in range(len(kneighbors)):
lst = kneighbors[i][-1]
if lst in vote:
vote[lst]+=1
else:
vote[lst]=1
majority = max(vote.items(), key=operator.itemgetter(1))[0]
return majority
# In[4]:
estimators = [10,100,300,600,800,1000]
depth = [1,2,50,100,300,800,None]
features = ['auto','sqrt',0.2, None]
min_sampleleaf = [1,5,10,50,100,200,500]
randomstate = [1,50,100,500,None]
colors = cycle(['brown','lightcoral','red','magenta','cyan', 'indigo', 'seagreen', 'yellow', 'blue', 'darkorange'])
penalties = ['l1','l2']
cvalue = [1.0,0.1,0.5,0.8,0.9]
solve = ['newton-cg', 'lbfgs', 'liblinear', 'sag']
tolerance = []
classweight = ['balanced',None]
max_iter = [10,100,500,1000]
randomState = [None,10,100,500,1000,1024]
neighbors = [5,10,50,100]
weight = ['uniform','distance']
algo = ['auto', 'ball_tree', 'kd_tree', 'brute']
dual = [True,False]
# In[5]:
crx = | pd.read_csv('crx.data',header=None,sep = ',') | pandas.read_csv |
import os
import re
import numpy as np
import pandas as pd
import nltk
from nltk import WordNetLemmatizer
from nltk.tokenize import word_tokenize
from argparse import ArgumentParser
import json
from collections import Counter
from nltk.corpus import stopwords
import spacy
nlp = spacy.load("en_core_web_sm")
# paths
model_path = "../../models/Stanford-OpenIE-Python/"
# functions
def get_root_verb(text):
doc = nlp(text)
if len(doc) > 0:
for token in doc:
if token.dep_ == "ROOT" and token.head.pos_ == "VERB":
return str(token)
else:
return ""
else:
return ""
def extract_compounds(text):
"""Extract compound noun phrases with beginning and end idxs.
Keyword arguments:
text -- the actual text source from which to extract entities
"""
comp_idx = 0
compound = []
compound_nps = []
tok_idx = 0
for idx, tok in enumerate(nlp(text)):
if tok.dep_ == "compound":
# capture hyphenated compounds
children = "".join([c.text for c in tok.children])
if "-" in children:
compound.append("".join([children, tok.text]))
else:
compound.append(tok.text)
# remember starting index of first child in compound or word
try:
tok_idx = [c for c in tok.children][0].idx
except IndexError:
if len(compound) == 1:
tok_idx = tok.idx
comp_idx = tok.i
# append the last word in a compound phrase
if tok.i - comp_idx == 1:
compound.append(tok.text)
if len(compound) > 1:
compound = " ".join(compound)
compound_nps.append(
(compound, tok_idx, tok_idx + len(compound), "COMPOUND")
)
# reset parameters
tok_idx = 0
compound = []
if len(compound_nps) != 0:
return compound_nps[0][0]
else:
return ""
def fix_entities(text):
if "clinton" in text:
return "<NAME>"
elif "donald" in text:
return "<NAME>"
elif "hillary" in text:
return "<NAME>"
elif "trump" in text:
return "<NAME>"
else:
return text
def shorten_relations(relation, n=3):
if len(word_tokenize(relation)) > n:
return ""
else:
return relation
def remove_stops(word):
if word.lower() in set(stopwords.words("english")):
return ""
else:
return word
def extract_entities_spacy(text):
proc = nlp(text)
if len(proc.ents) == 0:
return ""
else:
return " ".join([x.text for x in proc.ents])
def pp_pipeline(triples_path, output_dir, model_name):
"""Perform pre-processing pipeline"""
filenames = [
triples_path + i
for i in sorted(os.listdir(triples_path))
if ".txt" in i and "_triple_" in i
]
swedish_ids = []
doc_ids = []
with open(output_dir + "{:s}.txt".format(model_name), "w") as outfile:
for j in range(len(filenames)):
with open(filenames[j]) as infile:
m = re.search("\d", filenames[j])
if filenames[j][m.start()] == "0" and "swedish" not in filenames[j]:
for line in infile:
outfile.write(
"en" + "|" + "true" + "|" + str(j) + "|" + line + "\n"
)
elif filenames[j][m.start()] != "0" and "swedish" not in filenames[j]:
for line in infile:
outfile.write(
"en" + "|" + "fake" + "|" + str(j) + "|" + line + "\n"
)
elif "swedish" in filenames[j]:
doc_ids.append(("".join(filter(str.isdigit, filenames[j])))[1:])
swedish_ids.append(j)
for line in infile:
outfile.write(
"sv" + "|" + "fake" + "|" + str(j) + "|" + line + "\n"
)
np.savetxt(
"../../data/external/swedish_news/ids.txt", np.array(swedish_ids), fmt="%s"
)
np.savetxt(
"../../data/external/swedish_news/doc_ids.txt", np.array(doc_ids), fmt="%s"
)
df_triples = pd.read_csv(
output_dir + "{:s}.txt".format(model_name), header=None, sep="\t"
)
df_triples = pd.DataFrame(
df_triples[0].str.split("|", 5).tolist(),
columns=["lang", "status", "article_id", "e1", "r", "e2"],
)
lemmatizer = WordNetLemmatizer()
df_triples["l1"] = (
df_triples["e1"]
.apply(lambda x: extract_entities_spacy(x))
.apply(lambda x: x.lower().strip())
.apply(lambda x: fix_entities(x))
.apply(remove_stops)
.apply(lambda x: re.sub("[^\s'_A-Za-z]", "", x))
.apply(lambda x: x.lstrip().rstrip())
)
df_triples["l2"] = (
df_triples["e2"]
.apply(lambda x: extract_entities_spacy(x))
.apply(lambda x: x.lower().strip())
.apply(lambda x: fix_entities(x))
.apply(remove_stops)
.apply(lambda x: re.sub("[^\s'_A-Za-z]", "", x))
.apply(lambda x: x.lstrip().rstrip())
)
df_triples["rel"] = (
df_triples["r"]
.apply(lambda x: x.lower().strip())
.apply(lambda x: lemmatizer.lemmatize(x, pos="v"))
.apply(lambda x: re.sub("[^\s'_A-Za-z]", "", x))
.apply(lambda x: x.lstrip().rstrip())
)
total_entities = | pd.concat([df_triples["l1"], df_triples["l2"]]) | pandas.concat |
import abc
import logging
import math
import os
import time
import numpy as np
import pandas as pd
from PyDSS.common import PV_LOAD_SHAPE_FILENAME
from PyDSS.reports.reports import ReportBase, ReportGranularity
from PyDSS.utils.dataframe_utils import read_dataframe, write_dataframe
from PyDSS.utils.utils import dump_data
PF1_SCENARIO = "pf1"
CONTROL_MODE_SCENARIO = "control_mode"
logger = logging.getLogger(__name__)
class PvReportBase(ReportBase, abc.ABC):
"""Base class for PV reports"""
def __init__(self, name, results, simulation_config):
super().__init__(name, results, simulation_config)
assert len(results.scenarios) == 2
self._control_mode_scenario = results.scenarios[0]
assert self._control_mode_scenario.name == "control_mode"
self._pf1_scenario = results.scenarios[1]
cm_profiles = self._control_mode_scenario.read_pv_profiles()
if not cm_profiles:
self._pv_system_names = []
return
self._pv_system_names = [x["name"] for x in cm_profiles["pv_systems"]]
self._pf1_pv_systems = {
x["name"]: x for x in self._pf1_scenario.read_pv_profiles()["pv_systems"]
}
self._control_mode_pv_systems = {
x["name"]: x for x in cm_profiles["pv_systems"]
}
def _get_pv_system_info(self, pv_system, scenario):
if scenario == PF1_SCENARIO:
pv_systems = self._pf1_pv_systems
else:
pv_systems = self._control_mode_pv_systems
return pv_systems[pv_system]
def _has_pv_systems(self):
return len(self._pv_system_names) > 0
@staticmethod
def get_required_exports(settings):
granularity = ReportGranularity(settings.reports.granularity)
_type, sum_elements = ReportBase._params_from_granularity(granularity)
return {
"PVSystems": [
{
"property": "Powers",
"store_values_type": _type,
"sum_elements": sum_elements,
"data_conversion": "sum_abs_real",
},
],
}
@staticmethod
def get_required_scenario_names():
return set(["pf1", "control_mode"])
@staticmethod
def set_required_project_settings(settings):
if not settings.exports.export_pv_profiles:
settings.exports.export_pv_profiles = True
logger.info("Enabled Export PV Profiles")
class PvClippingReport(PvReportBase):
"""Reports PV Clipping for the simulation.
The report generates a pv_clipping output file. The file extension depends
on the input parameters. If the data was collected at every time point then
the output file will be .csv or .h5, depending on 'Export Format.'
Otherwise, the output file will be .json.
TODO: This is an experimental report. Outputs have not been validated.
"""
PER_TIME_POINT_FILENAME = "pv_clipping.h5"
TOTAL_FILENAME = "pv_clipping.json"
NAME = "PV Clipping"
def __init__(self, name, results, simulation_config):
super().__init__(name, results, simulation_config)
if not self._has_pv_systems():
return
diff_tolerance = self._report_settings.diff_tolerance_percent_pmpp * .01
denominator_tolerance = self._report_settings.denominator_tolerance_percent_pmpp * .01
logger.debug("tolerances: diff=%s denominator=%s", diff_tolerance, denominator_tolerance)
self._diff_tolerances = {}
self._denominator_tolerances = {}
for pv_system in self._pf1_scenario.read_pv_profiles()["pv_systems"]:
self._diff_tolerances[pv_system["name"]] = pv_system["pmpp"] * diff_tolerance
self._denominator_tolerances[pv_system["name"]] = pv_system["pmpp"] * denominator_tolerance
@staticmethod
def _calculate_clipping(total_dc_power, pf1_real_power):
return (total_dc_power - pf1_real_power) * 100 / pf1_real_power
@staticmethod
def _calculate_clipping_array(dc_power, pf1_real_power):
dcp = dc_power.values
rp = pf1_real_power.values
return (dcp - rp) / rp * 100
def _get_total_dc_power_across_pv_systems(self):
total_dc_power = 0.0
for name in self._pv_system_names:
total_dc_power += self._get_total_dc_power(name)
return total_dc_power
def _get_total_dc_power(self, pv_system):
cm_info = self._get_pv_system_info(pv_system, CONTROL_MODE_SCENARIO)
pmpp = cm_info["pmpp"]
irradiance = cm_info["irradiance"]
total_irradiance = cm_info["load_shape_pmult_sum"]
return pmpp * irradiance * total_irradiance
def _generate_per_pv_system_per_time_point(self, output_dir):
pv_load_shapes = self._read_pv_load_shapes()
pf1_real_power_full = self._pf1_scenario.get_full_dataframe(
"PVSystems", "Powers"
)
name = None
# TODO: Apply tolerances to other granularity options.
def calc_clipping(dcp, rp):
if dcp < self._denominator_tolerances[name]:
return 0
diff = dcp - rp
if diff < 0 and abs(diff) < self._diff_tolerances[name]:
return 0
return (dcp - rp) / rp * 100
data = {}
for _name in self._pv_system_names:
name = _name
cm_info = self._get_pv_system_info(name, CONTROL_MODE_SCENARIO)
pf1_real_power = pf1_real_power_full[name + "__Powers"]
dc_power = pv_load_shapes[cm_info["load_shape_profile"]] * \
cm_info["pmpp"] * \
cm_info["irradiance"]
assert len(dc_power) == len(pf1_real_power), \
f"{len(dc_power)} {len(pf1_real_power)}"
col = name + "__Clipping"
data[col] = dc_power.combine(pf1_real_power, calc_clipping).values
df = pd.DataFrame(data, index=pf1_real_power_full.index)
self._export_dataframe_report(df, output_dir, "pv_clipping")
def _generate_per_pv_system_total(self, output_dir):
data = {"pv_systems": []}
for name in self._pv_system_names:
pf1_real_power = self._pf1_scenario.get_element_property_value(
"PVSystems", "PowersSum", name
)
dc_power = self._get_total_dc_power(name)
clipping = self._calculate_clipping(dc_power, pf1_real_power)
data["pv_systems"].append(
{
"name": name,
"clipping": clipping,
}
)
self._export_json_report(data, output_dir, self.TOTAL_FILENAME)
def _generate_all_pv_systems_per_time_point(self, output_dir):
pf1_real_power = self._pf1_scenario.get_summed_element_dataframe(
"PVSystems", "Powers"
)
pv_load_shapes = self._read_pv_load_shapes()
dc_powers = {}
for name in self._pv_system_names:
cm_info = self._get_pv_system_info(name, CONTROL_MODE_SCENARIO)
series = pv_load_shapes[cm_info["load_shape_profile"]]
dc_power = series * cm_info["pmpp"] * cm_info["irradiance"]
assert len(dc_power) == len(pf1_real_power)
dc_powers[name] = dc_power.values
# TODO: just for validation
assert math.isclose(sum(dc_power.values), cm_info["load_shape_pmult_sum"] * cm_info["pmpp"] * cm_info["irradiance"])
df = pd.DataFrame(dc_powers, index=pf1_real_power.index)
total_dc_power = df.sum(axis=1)
clipping = pd.DataFrame(
self._calculate_clipping_array(total_dc_power, pf1_real_power.iloc[:, 0]),
index=pf1_real_power.index,
columns=["TotalClipping"],
)
self._export_dataframe_report(clipping, output_dir, "pv_clipping")
def _generate_all_pv_systems_total(self, output_dir):
total_dc_power = self._get_total_dc_power_across_pv_systems()
pf1_real_power = next(iter(
self._pf1_scenario.get_summed_element_total("PVSystems", "PowersSum").values()
))
clipping = self._calculate_clipping(total_dc_power, pf1_real_power)
data = {"clipping": clipping}
self._export_json_report(data, output_dir, self.TOTAL_FILENAME)
def _read_pv_load_shapes(self):
path = os.path.join(
str(self._settings.project.active_project_path),
"Exports",
CONTROL_MODE_SCENARIO,
PV_LOAD_SHAPE_FILENAME,
)
return read_dataframe(path)
def generate(self, output_dir):
if not self._has_pv_systems():
return
granularity = self._settings.reports.granularity
if granularity == ReportGranularity.PER_ELEMENT_PER_TIME_POINT:
self._generate_per_pv_system_per_time_point(output_dir)
elif granularity == ReportGranularity.PER_ELEMENT_TOTAL:
self._generate_per_pv_system_total(output_dir)
elif granularity == ReportGranularity.ALL_ELEMENTS_PER_TIME_POINT:
self._generate_all_pv_systems_per_time_point(output_dir)
elif granularity == ReportGranularity.ALL_ELEMENTS_TOTAL:
self._generate_all_pv_systems_total(output_dir)
else:
assert False
class PvCurtailmentReport(PvReportBase):
"""Reports PV Curtailment at every time point in the simulation.
The report generates a pv_curtailment output file. The file extension
depends on the input parameters. If the data was collected at every time
point then the output file will be .csv or .h5, depending on 'Export
Format.' Otherwise, the output file will be .json.
TODO: This is an experimental report. Outputs have not been validated.
"""
PER_TIME_POINT_FILENAME = "pv_curtailment.h5"
TOTAL_FILENAME = "pv_curtailment.json"
NAME = "PV Curtailment"
def __init__(self, name, results, simulation_config):
super().__init__(name, results, simulation_config)
if not self._has_pv_systems():
return
diff_tolerance = self._report_settings.diff_tolerance_percent_pmpp * .01
denominator_tolerance = self._report_settings.denominator_tolerance_percent_pmpp * .01
logger.debug("tolerances: diff=%s denominator=%s", diff_tolerance, denominator_tolerance)
self._diff_tolerances = {}
self._denominator_tolerances = {}
for pv_system in self._pf1_scenario.read_pv_profiles()["pv_systems"]:
self._diff_tolerances[pv_system["name"]] = pv_system["pmpp"] * diff_tolerance
self._denominator_tolerances[pv_system["name"]] = pv_system["pmpp"] * denominator_tolerance
def _generate_per_pv_system_per_time_point(self, output_dir):
pf1_power = self._pf1_scenario.get_full_dataframe("PVSystems", "Powers")
control_mode_power = self._control_mode_scenario.get_full_dataframe(
"PVSystems", "Powers"
)
name = None
def calc_curtailment(pf1, cm):
if pf1 < self._denominator_tolerances[name]:
return 0
diff = pf1 - cm
if diff < 0 and abs(diff) < self._diff_tolerances[name]:
return 0
return (pf1 - cm) / pf1 * 100
data = {}
for col in pf1_power.columns:
name = col.split("__")[0]
s_pf1 = pf1_power[col]
s_cm = control_mode_power[col]
new_name = col.replace("Powers", "Curtailment")
data[new_name] = s_pf1.combine(s_cm, calc_curtailment).values
df = | pd.DataFrame(data, index=pf1_power.index) | pandas.DataFrame |
import yfinance as yf
import ta
import pandas as pd
from datetime import date, timedelta, datetime
from IPython.display import clear_output
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
ticker = 'FSLY'
start_date = '2019-10-23'
end_date = '2020-10-23'
def get_stock_backtest_data(ticker, start, end):
date_fmt = '%Y-%m-%d'
start_date_buffer = datetime.strptime(start_date, date_fmt) - timedelta(days=365)
start_date_buffer = start_date_buffer.strftime(date_fmt)
df = yf.download(ticker, start=start_date_buffer, end=end_date)
return df
df = get_stock_backtest_data(ticker, start_date, end_date)
df['CLOSE_PREV'] = df.Close.shift(1)
k_band = ta.volatility.KeltnerChannel(df.High, df.Low, df.Close, 10)
df['K_BAND_UB'] = k_band.keltner_channel_hband().round(4)
df['K_BAND_LB'] = k_band.keltner_channel_lband().round(4)
df[['K_BAND_UB', 'K_BAND_LB']].dropna().head()
df['LONG'] = (df.Close <= df.K_BAND_LB) & (df.CLOSE_PREV > df.K_BAND_LB)
df['EXIT_LONG'] = (df.Close >= df.K_BAND_UB) & (df.CLOSE_PREV < df.K_BAND_UB)
df['SHORT'] = (df.Close >= df.K_BAND_UB) & (df.CLOSE_PREV < df.K_BAND_UB)
df['EXIT_SHORT'] = (df.Close <= df.K_BAND_LB) & (df.CLOSE_PREV > df.K_BAND_LB)
df.LONG = df.LONG.shift(1)
df.EXIT_LONG = df.EXIT_LONG.shift(1)
df.SHORT = df.SHORT.shift(1)
df.EXIT_SHORT = df.EXIT_SHORT.shift(1)
print(df[['LONG', 'EXIT_LONG', 'SHORT', 'EXIT_SHORT']].dropna().head())
def strategy_KeltnerChannel_origin(df, **kwargs):
n = kwargs.get('n', 10)
data = df.copy()
k_band = ta.volatility.KeltnerChannel(data.High, data.Low, data.Close, n)
data['K_BAND_UB'] = k_band.keltner_channel_hband().round(4)
data['K_BAND_LB'] = k_band.keltner_channel_lband().round(4)
data['CLOSE_PREV'] = data.Close.shift(1)
data['LONG'] = (data.Close <= data.K_BAND_LB) & (data.CLOSE_PREV > data.K_BAND_LB)
data['EXIT_LONG'] = (data.Close >= data.K_BAND_UB) & (data.CLOSE_PREV < data.K_BAND_UB)
data['SHORT'] = (data.Close >= data.K_BAND_UB) & (data.CLOSE_PREV < data.K_BAND_UB)
data['EXIT_SHORT'] = (data.Close <= data.K_BAND_LB) & (data.CLOSE_PREV > data.K_BAND_LB)
data.LONG = data.LONG.shift(1)
data.EXIT_LONG = data.EXIT_LONG.shift(1)
data.SHORT = data.SHORT.shift(1)
data.EXIT_SHORT = data.EXIT_SHORT.shift(1)
return data
df = strategy_KeltnerChannel_origin(df, n=10)
def strategy_BollingerBands(df, **kwargs):
n = kwargs.get('n', 10)
n_rng = kwargs.get('n_rng', 2)
data = df.copy()
boll = ta.volatility.BollingerBands(data.Close, n, n_rng)
data['BOLL_LBAND_INDI'] = boll.bollinger_lband_indicator()
data['BOLL_UBAND_INDI'] = boll.bollinger_hband_indicator()
data['CLOSE_PREV'] = data.Close.shift(1)
data['LONG'] = data.BOLL_LBAND_INDI == 1
data['EXIT_LONG'] = data.BOLL_UBAND_INDI == 1
data['SHORT'] = data.BOLL_UBAND_INDI == 1
data['EXIT_SHORT'] = data.BOLL_LBAND_INDI == 1
data.LONG = data.LONG.shift(1)
data.EXIT_LONG = data.EXIT_LONG.shift(1)
data.SHORT = data.SHORT.shift(1)
data.EXIT_SHORT = data.EXIT_SHORT.shift(1)
return data
# df = get_stock_backtest_data(ticker, start_date, end_date)
# strategy_BollingerBands(df, n=10, n_rng=2)
def strategy_MA(df, **kwargs):
n = kwargs.get('n', 50)
ma_type = kwargs.get('ma_type', 'sma')
ma_type = ma_type.strip().lower()
data = df.copy()
if ma_type == 'sma':
sma = ta.trend.SMAIndicator(data.Close, n)
data['MA'] = sma.sma_indicator().round(4)
elif ma_type == 'ema':
ema = ta.trend.EMAIndicator(data.Close, n)
data['MA'] = ema.ema_indicator().round(4)
data['CLOSE_PREV'] = data.Close.shift(1)
data['LONG'] = (data.Close > data.MA) & (data.CLOSE_PREV <= data.MA)
data['EXIT_LONG'] = (data.Close < data.MA) & (data.CLOSE_PREV >= data.MA)
data['SHORT'] = (data.Close < data.MA) & (data.CLOSE_PREV >= data.MA)
data['EXIT_SHORT'] = (data.Close > data.MA) & (data.CLOSE_PREV <= data.MA)
data.LONG = data.LONG.shift(1)
data.EXIT_LONG = data.EXIT_LONG.shift(1)
data.SHORT = data.SHORT.shift(1)
data.EXIT_SHORT = data.EXIT_SHORT.shift(1)
return data
# df = get_stock_backtest_data(ticker, start_date, end_date)
# strategy_SMA(df, n=10, ma_type='ema')
def strategy_MACD(df, **kwargs):
n_slow = kwargs.get('n_slow', 26)
n_fast = kwargs.get('n_fast', 12)
n_sign = kwargs.get('n_sign', 9)
data = df.copy()
macd = ta.trend.MACD(data.Close, n_slow, n_fast, n_sign)
data['MACD_DIFF'] = macd.macd_diff().round(4)
data['MACD_DIFF_PREV'] = data.MACD_DIFF.shift(1)
data['LONG'] = (data.MACD_DIFF > 0) & (data.MACD_DIFF_PREV <= 0)
data['EXIT_LONG'] = (data.MACD_DIFF < 0) & (data.MACD_DIFF_PREV >= 0)
data['SHORT'] = (data.MACD_DIFF < 0) & (data.MACD_DIFF_PREV >= 0)
data['EXIT_SHORT'] = (data.MACD_DIFF > 0) & (data.MACD_DIFF_PREV <= 0)
data.LONG = data.LONG.shift(1)
data.EXIT_LONG = data.EXIT_LONG.shift(1)
data.SHORT = data.SHORT.shift(1)
data.EXIT_SHORT = data.EXIT_SHORT.shift(1)
return data
# df = get_stock_backtest_data(ticker, start_date, end_date)
# strategy_MACD(df, n_slow=26, n_fast=12, n_sign=9)
def strategy_RSI(df, **kwargs):
n = kwargs.get('n', 14)
data = df.copy()
rsi = ta.momentum.RSIIndicator(data.Close, n)
data['RSI'] = rsi.rsi().round(4)
data['RSI_PREV'] = data.RSI.shift(1)
data['LONG'] = (data.RSI > 30) & (data.RSI_PREV <= 30)
data['EXIT_LONG'] = (data.RSI < 70) & (data.RSI_PREV >= 70)
data['SHORT'] = (data.RSI < 70) & (data.RSI_PREV >= 70)
data['EXIT_SHORT'] = (data.RSI > 30) & (data.RSI_PREV <= 30)
data.LONG = data.LONG.shift(1)
data.EXIT_LONG = data.EXIT_LONG.shift(1)
data.SHORT = data.SHORT.shift(1)
data.EXIT_SHORT = data.EXIT_SHORT.shift(1)
return data
# df = get_stock_backtest_data(ticker, start_date, end_date)
# strategy_RSI(df, n_slow=26, n_fast=12, n_sign=9)
def strategy_WR(df, **kwargs):
n = kwargs.get('n', 14)
data = df.copy()
wr = ta.momentum.WilliamsRIndicator(data.High, data.Low, data.Close, n)
data['WR'] = wr.wr().round(4)
data['WR_PREV'] = data.WR.shift(1)
data['LONG'] = (data.WR > -80) & (data.WR_PREV <= -80)
data['EXIT_LONG'] = (data.WR < -20) & (data.WR_PREV >= -20)
data['SHORT'] = (data.WR < -20) & (data.WR_PREV >= -20)
data['EXIT_SHORT'] = (data.WR > -80) & (data.WR_PREV <= -80)
data.LONG = data.LONG.shift(1)
data.EXIT_LONG = data.EXIT_LONG.shift(1)
data.SHORT = data.SHORT.shift(1)
data.EXIT_SHORT = data.EXIT_SHORT.shift(1)
return data
# df = get_stock_backtest_data(ticker, start_date, end_date)
# strategy_WR(df, n_slow=26, n_fast=12, n_sign=9)
def strategy_Stochastic_fast(df, **kwargs):
k = kwargs.get('k', 20)
d = kwargs.get('d', 5)
data = df.copy()
sto = ta.momentum.StochasticOscillator(data.High, data.Low, data.Close, k, d)
data['K'] = sto.stoch().round(4)
data['D'] = sto.stoch_signal().round(4)
data['DIFF'] = data['K'] - data['D']
data['DIFF_PREV'] = data.DIFF.shift(1)
data['LONG'] = (data.DIFF > 0) & (data.DIFF_PREV <= 0)
data['EXIT_LONG'] = (data.DIFF < 0) & (data.DIFF_PREV >= 0)
data['SHORT'] = (data.DIFF < 0) & (data.DIFF_PREV >= 0)
data['EXIT_SHORT'] = (data.DIFF > 0) & (data.DIFF_PREV <= 0)
data.LONG = data.LONG.shift(1)
data.EXIT_LONG = data.EXIT_LONG.shift(1)
data.SHORT = data.SHORT.shift(1)
data.EXIT_SHORT = data.EXIT_SHORT.shift(1)
return data
# df = get_stock_backtest_data(ticker, start_date, end_date)
# strategy_Stochastic_fast(df, k=20, d=5)
def strategy_Stochastic_slow(df, **kwargs):
k = kwargs.get('k', 20)
d = kwargs.get('d', 5)
dd = kwargs.get('dd', 3)
data = df.copy()
sto = ta.momentum.StochasticOscillator(data.High, data.Low, data.Close, k, d)
data['K'] = sto.stoch().round(4)
data['D'] = sto.stoch_signal().round(4)
ma = ta.trend.SMAIndicator(data.D, dd)
data['DD'] = ma.sma_indicator().round(4)
data['DIFF'] = data['D'] - data['DD']
data['DIFF_PREV'] = data.DIFF.shift(1)
data['LONG'] = (data.DIFF > 0) & (data.DIFF_PREV <= 0)
data['EXIT_LONG'] = (data.DIFF < 0) & (data.DIFF_PREV >= 0)
data['SHORT'] = (data.DIFF < 0) & (data.DIFF_PREV >= 0)
data['EXIT_SHORT'] = (data.DIFF > 0) & (data.DIFF_PREV <= 0)
data.LONG = data.LONG.shift(1)
data.EXIT_LONG = data.EXIT_LONG.shift(1)
data.SHORT = data.SHORT.shift(1)
data.EXIT_SHORT = data.EXIT_SHORT.shift(1)
return data
# df = get_stock_backtest_data(ticker, start_date, end_date)
# strategy_Stochastic_slow(df, k=20, d=5, dd=3)
def strategy_Ichmoku(df, **kwargs):
n_conv = kwargs.get('n_conv', 9)
n_base = kwargs.get('n_base', 26)
n_span_b = kwargs.get('n_span_b', 26)
data = df.copy()
ichmoku = ta.trend.IchimokuIndicator(data.High, data.Low, n_conv, n_base, n_span_b)
data['BASE'] = ichmoku.ichimoku_base_line().round(4)
data['CONV'] = ichmoku.ichimoku_conversion_line().round(4)
data['DIFF'] = data['CONV'] - data['BASE']
data['DIFF_PREV'] = data.DIFF.shift(1)
data['LONG'] = (data.DIFF > 0) & (data.DIFF_PREV <= 0)
data['EXIT_LONG'] = (data.DIFF < 0) & (data.DIFF_PREV >= 0)
data['SHORT'] = (data.DIFF < 0) & (data.DIFF_PREV >= 0)
data['EXIT_SHORT'] = (data.DIFF > 0) & (data.DIFF_PREV <= 0)
data.LONG = data.LONG.shift(1)
data.EXIT_LONG = data.EXIT_LONG.shift(1)
data.SHORT = data.SHORT.shift(1)
data.EXIT_SHORT = data.EXIT_SHORT.shift(1)
return data
# df = get_stock_backtest_data(ticker, start_date, end_date)
# strategy_Ichmoku(df, n_conv=9, n_base=26, n_span_b=26)
bt_df = df[(df.index >= start_date) & (df.index <= end_date)]
def prepare_stock_ta_backtest_data(df, start_date, end_date, strategy, **strategy_params):
df_strategy = strategy(df, **strategy_params)
bt_df = df_strategy[(df_strategy.index >= start_date) & (df_strategy.index <= end_date)]
return bt_df
bt_df = prepare_stock_ta_backtest_data(
df, start_date, end_date, strategy_KeltnerChannel_origin, n=10
)
bt_df.head()
balance = 1000000
pnl = 0
position = 0
last_signal = 'hold'
last_price = 0
c = 0
trade_date_start = []
trade_date_end = []
trade_days = []
trade_side = []
trade_pnl = []
trade_ret = []
cum_value = []
for index, row in bt_df.iterrows():
# check and close any positions
if row.EXIT_LONG and last_signal == 'long':
trade_date_end.append(row.name)
trade_days.append(c)
pnl = (row.Open - last_price) * position
trade_pnl.append(pnl)
trade_ret.append((row.Open / last_price - 1) * 100)
balance = balance + row.Open * position
position = 0
last_signal = 'hold'
c = 0
elif row.EXIT_SHORT and last_signal == 'short':
trade_date_end.append(row.name)
trade_days.append(c)
pnl = (row.Open - last_price) * position
trade_pnl.append(pnl)
trade_ret.append((last_price / row.Open - 1) * 100)
balance = balance + pnl
position = 0
last_signal = 'hold'
c = 0
# check signal and enter any possible position
if row.LONG and last_signal != 'long':
last_signal = 'long'
last_price = row.Open
trade_date_start.append(row.name)
trade_side.append('long')
position = int(balance / row.Open)
cost = position * row.Open
balance = balance - cost
c = 0
elif row.SHORT and last_signal != 'short':
last_signal = 'short'
last_price = row.Open
trade_date_start.append(row.name)
trade_side.append('short')
position = int(balance / row.Open) * -1
c = 0
# compute market value and count days for any possible poisition
if last_signal == 'hold':
market_value = balance
elif last_signal == 'long':
c = c + 1
market_value = position * row.Close + balance
else:
c = c + 1
market_value = (row.Close - last_price) * position + balance
cum_value.append(market_value)
cum_ret_df = | pd.DataFrame(cum_value, index=bt_df.index, columns=['CUM_RET']) | pandas.DataFrame |
import os
import pandas as pd
import numpy as np
"""
Rewrite DataFrame Keys: ['open', 'close', 'high', 'low', 'volume', 'money'] + 6
[ma_1, ma_2, ......, ma_12] 12
[momentum_1, momentum_2, ......, momentum_12] 12
[obv_1, obv_2, ......, obv_12] 12
Total 42
"""
def data_rewrite(security):
load_path = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'data')
load_file = os.path.join(load_path, security + '.h5')
if not os.path.exists(load_file):
raise ValueError('{},文件不存在'.format(load_file))
raw_data = pd.read_hdf(load_file)
indices = raw_data.index.tolist()
rows = len(indices)
store_indices = []
rewrite_data = np.zeros((rows, 42))
last_12_close = []
last_12_obv = []
cur_obv = 0.
invalid_rows = 0
cur_idx = 0
while cur_idx < rows:
if cur_idx % 10000 == 0:
print("\r {} / {}".format(cur_idx, rows), end=' ')
cur_row = raw_data.loc[indices[cur_idx]]
if pd.isnull(cur_row[0]):
invalid_rows += 1
cur_idx += 1
continue
if indices[cur_idx].date() != indices[cur_idx - 1].date():
if not pd.isnull(raw_data.loc[indices[cur_idx - 1]]['close']):
last_12_close.append(raw_data.loc[indices[cur_idx - 1]]['close'])
last_12_obv.append(cur_obv)
cur_obv = 0
while len(last_12_close) > 12:
del last_12_close[0]
del last_12_obv[0]
assert len(last_12_obv) <= 12
rewrite_idx = cur_idx - invalid_rows
cur_obv += cur_row['volume'] * np.sign(cur_row['close'] - cur_row['open'])
rewrite_data[rewrite_idx, 0] = cur_row['open']
rewrite_data[rewrite_idx, 1] = cur_row['close']
rewrite_data[rewrite_idx, 2] = cur_row['high']
rewrite_data[rewrite_idx, 3] = cur_row['low']
rewrite_data[rewrite_idx, 4] = cur_row['volume']
rewrite_data[rewrite_idx, 5] = cur_row['money']
offset = 6
# ma_t
for i in range(12):
sum_p = cur_row['close']
for j in range(1, i + 2):
if len(last_12_close) >= j:
sum_p += last_12_close[-j]
else:
sum_p += cur_row['close']
rewrite_data[rewrite_idx, offset] = round(sum_p / (i + 2), 2)
offset += 1
# momentum_t
for i in range(12):
if len(last_12_close) >= (i + 1):
momentum_i = cur_row['close'] - last_12_close[-i - 1]
else:
momentum_i = 0.
rewrite_data[rewrite_idx, offset] = momentum_i
offset += 1
# obv_t
for i in range(12):
sum_obv = cur_obv
for j in range(1, i + 2):
if len(last_12_obv) >= j:
sum_obv += last_12_obv[-j]
else:
sum_obv += cur_obv
rewrite_data[rewrite_idx, offset] = round(sum_obv / (i + 2), 2)
offset += 1
assert offset == 42
store_indices.append(indices[cur_idx])
cur_idx += 1
rewrite_data = rewrite_data[:len(store_indices)]
print(rewrite_data.shape)
store_columns = ['open', 'close', 'high', 'low', 'volume', 'money']
for i in range(1, 13):
store_columns += ['ma_%d' % i]
for i in range(1, 13):
store_columns += ['mom_%d' % i]
for i in range(1, 13):
store_columns += ['obv_%d' % i]
df = | pd.DataFrame(rewrite_data, index=store_indices, columns=store_columns) | pandas.DataFrame |
try:
import spacy
from spacy.gold import offsets_from_biluo_tags as _offsets_from_biluo_tags
from spacy.gold import iob_to_biluo as _iob_to_biluo
import pandas as pd
HAS_SPACY = True
except:
HAS_SPACY = False
from pathlib import Path
import json,random,os,tempfile,logging
__all__=["_from_bio_tags","_from_json","ner_prepare_data","_create_zip"]
def _raise_spacy_import_error():
raise Exception('This module requires pandas and spacy version 2.1.8. Install it using \"pip install pandas spacy==2.1.8\"')
def _create_zip(zipname, path):
import shutil
if os.path.exists(os.path.join(path, zipname) + '.zip'):
os.remove(os.path.join(path, zipname) + '.zip')
temp_dir = tempfile.TemporaryDirectory().name
zip_file = shutil.make_archive(os.path.join(temp_dir, zipname), 'zip', path)
shutil.move(zip_file, path)
def _from_bio_tags(tokens_collection, tags_collection):
"""
Converts training data from ``BIO`` format to spacy offsets.
===================== ===========================================
**Argument** **Description**
--------------------- -------------------------------------------
tokens_collection Required [list]. List of token lists
Example: [[This,is,a,test],[This,is,a,test1]]
--------------------- -------------------------------------------
tags_collection Required [list]. List of tag lists
Example: [[B-tagname,O,O,O],[B-tagname,I-tagname,O,O]]
===================== ===========================================
"""
nlp=spacy.blank('en')
train_data = []
for tags, tokens in zip(tags_collection, tokens_collection):
try:
tags = _iob_to_biluo(tags)
doc = spacy.tokens.doc.Doc(
nlp.vocab, words = tokens, spaces = [True]*(len(tokens)-1)+[False])
# run the standard pipeline against it
for name, proc in nlp.pipeline:
doc = proc(doc)
text=' '.join(tokens)
tags = _offsets_from_biluo_tags(doc, tags)
train_data.append((text,{'entities':tags}))
except:
pass
return train_data
def _from_json(path, text_key='text', offset_key='labels'):
"""
Converts training data from JSON format to spacy offsets.
===================== ===========================================
**Argument** **Description**
--------------------- -------------------------------------------
text_key Optional:str='text. Json key under which text is available
--------------------- -------------------------------------------
offset_key Optional:str='labels. Json key under which offsets are available
===================== ===========================================
json-schema:
----------
{"id": 1, "text": "EU rejects ...", "labels": [[0,2,"ORG"], [11,17, "MISC"], [34,41,"ORG"]]}
{"id": 2, "text": "<NAME>", "labels": [[0, 15, "PERSON"]]}
{"id": 3, "text": "<NAME>", "labels": [[10, 15, "PERSON"]]}
----------
returns: A json file that can be consumed by ner_databunch.
"""
train_data = []
with open(path,'r', encoding='UTF-8') as f:
data_list = f.readlines()
for i, item in enumerate(data_list):
try:
train_data.append((json.loads(item).get(text_key), {'entities':json.loads(item).get(offset_key)}))
except:
pass
return train_data
def ner_prepare_data(dataset_type, path, class_mapping=None, val_split_pct=0.1):
"""
Prepares a data object
===================== ===========================================
**Argument** **Description**
--------------------- -------------------------------------------
dataset_type Required string. ['ner_json', 'BIO', 'LBIOU']
--------------------- -------------------------------------------
address_tag Optional dict. Address field/tag name
in the training data.
val_split_pct Optional Float. Percentage of training data to keep
as validation. The default value is 0.1.
===================== ===========================================
returns: A list [text,{entities},text,{entities}] that can be ingested by ``EntityRecognizer``.
"""
import spacy
v_list=spacy.__version__.split('.')
version=sum([int(j)*10**(2*i) for i,j in enumerate(v_list[::-1])])
if version<20108: #checking spacy version
return logging.error(f'Entity recognition model needs spacy version 2.1.8 or higher. Your current spacy version is {spacy.__version__}, please update using \'pip install')
if not HAS_SPACY:
_raise_spacy_import_error()
path=Path(path)
if class_mapping:
address_tag=class_mapping.get('address_tag')
else:
address_tag='Address'
if dataset_type == 'ner_json':
train_data = _from_json(path=path)
path=path.parent
elif dataset_type == 'BIO':
tags_collection = []
tokens_collection = []
tags_df = pd.read_csv(path/'tags.csv')
tokens_df = pd.read_csv(path/'tokens.csv')
for i,tags in tags_df.iterrows():
tags_collection.append(list(tags.dropna()))
for i,tokens in tokens_df.iterrows():
tokens_collection.append(list(tokens.dropna()))
train_data = _from_bio_tags(tags_collection=tags_collection, tokens_collection=tokens_collection)
elif dataset_type == 'LBIOU':
tags_collection = []
tokens_collection = []
tags_df = pd.read_csv(path/'tags.csv')
tokens_df = pd.read_csv(path/'tokens.csv')
for i,tags in tags_df.iterrows():
tags_collection.append(list(tags.dropna()))
for i,tokens in tokens_df.iterrows():
tokens_collection.append(list(tokens.dropna()))
train_data = _offsets_from_biluo_tags(tags=tags_collection, tokens=tokens_collection)
# return train_data
data=DatabunchNER(train_data, val_split_pct=val_split_pct, address_tag=address_tag, test_ds=None)
data.path=path
return data
class _NERItemlist():
"""
Creates a dataset to store data within ``ner_databunch`` object.
===================== ===========================================
**Argument** **Description**
--------------------- -------------------------------------------
bs Batch size.
--------------------- -------------------------------------------
data Required:DatabunchNER.
===================== ===========================================
:returns: dataset.
"""
def __init__(self, bs, data):
self.bs = bs
self.entities = ['TEXT']+list({i[2] for i in pd.concat([pd.Series(i['entities']) for i in [o[1] for o in data]])}) ##Extracting all the unique entity names from input json
self.data = data
self.x = [o[0] for o in data]
self.y = [o[1] for o in data]
def __getitem__(self, i):
return self.data[i]
def __len__(self):
return len(self.data)
def _random_batch(self, data):
res = []
for j in range(self.bs):
res.append(random.choice(data))
return res
def _entities_to_dataframe(self, item):
"""
This function is used to create pandas dataframe from training input data json.
"""
text = item[0]
df = pd.DataFrame(item[1].get('entities'))
out_dict = {}
for x in df[2].unique(): out_dict[x] = df[df[2] == x][[0, 1]].values.tolist()
out = {}
out['text'] = text
for key in out_dict.keys():
for tpl in out_dict.get(key):
if out.get(key) == None:
out[key] = []
out[key].append(text[tpl[0]:tpl[1]])
return | pd.Series(out) | pandas.Series |
import pandas as pd
import numpy as np
def merge_all(Curr,Bonds,OilN,NetSp,FundsRates, Jobs, pred_days=100):
Curr.columns=Curr.columns.get_level_values(0)
OilN.columns=OilN.columns.get_level_values(0)
Feedt=pd.merge(Bonds,OilN,how='outer',left_index=True,right_index=True)
Feedt=pd.merge(Feedt,FundsRates,how='outer',left_index=True,right_index=True)
Feedt=pd.merge(Feedt,Jobs,how='outer',left_index=True,right_index=True)
Feedt['NetSp']=NetSp
Feedt.fillna(method='pad',inplace=True)
Feedt=pd.merge(Curr,Feedt,how='outer',left_index=True,right_index=True)
Feed=Feedt.copy()
Feed.dropna(inplace=True) #this will drop all current prices too if the data has not been updated
Y=Feed['Result']
Ymag=Feed['Gain']
Feed.drop(['Result','Gain'],axis=1, inplace=True)
Feed=Feed.T
Feedt.drop(['Result','Gain'],axis=1, inplace=True)
Feedt.dropna(axis=0,inplace=True)
Feedt=Feedt.T
Y=Y.values.reshape(1,Feed.shape[1])
Ymag=Ymag.values.reshape(1,Feed.shape[1])
return Feed, Y, Ymag, Feedt.iloc[:,-pred_days:] #Defaulting using last 100 full examples as predictor input, can be adjusted later
def SplitData3way(X,Y,percent_train=90):
#First cut out the recent test and dev sets: all values in training set are prior to values in the test and dev
ntrain=int(percent_train/100*X.shape[1])
Permuttrain=list(np.random.permutation(ntrain))
Xtrain=np.float32(X[:,Permuttrain])
Ytrain=np.float32(Y[:,Permuttrain])
m=X.shape[1]
# X=(X-X.mean(axis=1).reshape(-1,1))/X.std(axis=1).reshape(-1,1) ###### Batch normalization did not seem to have any effect
Permuttestdev= list(np.random.permutation(m-ntrain)+ntrain)
# print(Permuttestdev)
ntest=len(Permuttestdev)//2
# print(ntest)
Permuttest=Permuttestdev[:ntest]
Permutdev=Permuttestdev[ntest:]
Xtest=np.float32(X[:,Permuttest])
Ytest=np.float32(Y[:,Permuttest])
Xdev=np.float32(X[:,Permutdev])
Ydev=np.float32(Y[:,Permutdev])
# print(Xtrain, Ytrain, Xtest, Ytest, Xdev)
return Xtrain, Ytrain, Xtest, Ytest, Xdev, Ydev
def merge_all_DB(Curr,Bonds,OilN,NetSp,FundsRates, Jobs, pred_days=100):
Curr.columns=Curr.columns.get_level_values(0)
OilN.columns=OilN.columns.get_level_values(0)
Feedt=pd.merge(Bonds,OilN,how='outer',left_index=True,right_index=True)
Feedt= | pd.merge(Feedt,FundsRates,how='outer',left_index=True,right_index=True) | pandas.merge |
import ibeis
import six
import vtool
import utool
import numpy as np
import numpy.linalg as npl # NOQA
import pandas as pd
from vtool import clustering2 as clustertool
from vtool import nearest_neighbors as nntool
from plottool import draw_func2 as df2
np.set_printoptions(precision=2)
pd.set_option('display.max_rows', 10)
pd.set_option('display.max_columns', 10)
pd.set_option('isplay.notebook_repr_html', True)
ibeis.ensure_pz_mtest()
#taids = ibs.get_valid_aids()
#tvecs_list = ibs.get_annot_vecs(taids)
#tkpts_list = ibs.get_annot_kpts(taids)
#tvec_list = np.vstack(tvecs_list)
#print(idx2_vec)
#labels, words = vtool.clustering.cached_akmeans(tvec_list, 1000, 30, cache_dir='.')
#tvecdf_list = [pd.DataFrame(vecs) for vecs in tvecs_list]
#tvecs_df = pd.DataFrame(tvecdf_list, index=taids)
#kpts_col = pd.DataFrame(tkpts_list, index=taids, columns=['kpts'])
#vecs_col = pd.DataFrame(tvecs_list, index=taids, columns=['vecs'])
#tvecs_dflist = [pd.DataFrame(vecs, index=np.arange(len(vecs))) for vecs in tvecs_list]
#pd.concat(tvecs_dflist)
## Bui
#taids = ibs.get_valid_aids()
#tvecs_list = ibs.get_annot_vecs(taids)
#tkpts_list = ibs.get_annot_kpts(taids)
#orig_idx2_vec, orig_idx2_ax, orig_idx2_fx = vtool.nearest_neighbors.invertible_stack(tvecs_list, taids)
#annots_df = pd.concat([vecs_col, kpts_col], axis=1)
#annots_df
#idx2_vec = np.vstack(annots_df['vecs'].values)
##idx2_ax =
#idx2_vec, idx2_ax, idx2_fx = vtool.nearest_neighbors.invertible_stack(tvecs_list, taids)
#labels, words = vtool.clustering2.cached_akmeans(tvec_list, 1000, 30)
#words = centroids
def display_info(ibs, invindex, annots_df):
#################
#from ibeis.other import dbinfo
#print(ibs.get_infostr())
#dbinfo.get_dbinfo(ibs, verbose=True)
#################
#print('Inverted Index Stats: vectors per word')
#print(utool.get_stats_str(map(len, invindex.wx2_idxs.values())))
#################
qfx2_vec = annots_df['vecs'][1]
centroids = invindex.words
num_pca_dims = 3 # 3
whiten = False
kwd = dict(num_pca_dims=num_pca_dims,
whiten=whiten,)
#clustertool.rrr()
def makeplot_(fnum, prefix, data, labels='centroids', centroids=centroids):
return clustertool.plot_centroids(data, centroids, labels=labels,
fnum=fnum, prefix=prefix + '\n', **kwd)
#makeplot_(1, 'centroid vecs', centroids)
#makeplot_(2, 'database vecs', invindex.idx2_vec)
#makeplot_(3, 'query vecs', qfx2_vec)
#makeplot_(4, 'database vecs', invindex.idx2_vec)
#makeplot_(5, 'query vecs', qfx2_vec)
#################
def make_annot_df(ibs):
aid_list = ibs.get_valid_aids()
_kpts_col = pd.DataFrame(ibs.get_annot_kpts(aid_list),
index=aid_list, columns=['kpts'])
_vecs_col = pd.DataFrame(ibs.get_annot_vecs(aid_list),
index=aid_list, columns=['vecs'])
annots_df = pd.concat([_vecs_col, _kpts_col], axis=1)
return annots_df
def learn_visual_words(annots_df, train_aids, nCentroids):
vecs_list = annots_df['vecs'][train_aids].as_matrix()
train_vecs = np.vstack(vecs_list)
print('Training %d word vocabulary with %d annots and %d descriptors' %
(nCentroids, len(train_aids), len(train_vecs)))
words = clustertool.cached_akmeans(train_vecs, nCentroids, max_iters=100)
return words
def index_data_annots(annots_df, daids, words):
vecs_list = annots_df['vecs'][daids]
flann_params = {}
wordflann = vtool.nearest_neighbors.flann_cache(words, flann_params=flann_params)
ax2_aid = np.array(daids)
idx2_vec, idx2_ax, idx2_fx = nntool.invertible_stack(vecs_list, np.arange(len(ax2_aid)))
invindex = InvertedIndex(words, wordflann, idx2_vec, idx2_ax, idx2_fx, ax2_aid)
invindex.compute_internals()
return invindex
@six.add_metaclass(utool.ReloadingMetaclass)
class InvertedIndex(object):
def __init__(invindex, words, wordflann, idx2_vec, idx2_ax, idx2_fx, ax2_aid):
invindex.wordflann = wordflann
invindex.words = words # visual word centroids
invindex.ax2_aid = ax2_aid # annot index -> annot id
invindex.idx2_vec = idx2_vec # stacked index -> descriptor vector
invindex.idx2_ax = idx2_ax # stacked index -> annot index
invindex.idx2_fx = idx2_fx # stacked index -> feature index
invindex.idx2_wx = None # stacked index -> word index
invindex.wx2_idxs = None # word index -> stacked indexes
invindex.wx2_drvecs = None # word index -> residual vectors
#invindex.compute_internals()
def compute_internals(invindex):
idx2_vec = invindex.idx2_vec
wx2_idxs, idx2_wx = invindex.assign_to_words(idx2_vec)
wx2_drvecs = invindex.compute_residuals(idx2_vec, wx2_idxs)
invindex.idx2_wx = idx2_wx
invindex.wx2_idxs = wx2_idxs
invindex.wx2_drvecs = wx2_drvecs
def assign_to_words(invindex, idx2_vec):
idx2_wx, _idx2_wdist = invindex.wordflann.nn_index(idx2_vec, 1)
if True:
assign_df = pd.DataFrame(idx2_wx, columns=['wordindex'])
grouping = assign_df.groupby('wordindex')
wx2_idxs = grouping.wordindex.indices
else:
# TODO: replace with pandas groupby
idx_list = list(range(len(idx2_wx)))
wx2_idxs = utool.group_items(idx_list, idx2_wx.tolist())
return wx2_idxs, idx2_wx
def compute_residuals(invindex, idx2_vec, wx2_idxs):
""" returns mapping from word index to a set of residual vectors """
words = invindex.words
wx2_rvecs = {}
for word_index in wx2_idxs.keys():
# for each word
idxs = wx2_idxs[word_index]
vecs = np.array(idx2_vec[idxs], dtype=np.float64)
word = np.array(words[word_index], dtype=np.float64)
# compute residuals of all vecs assigned to this word
residuals = np.array([word - vec for vec in vecs])
# normalize residuals
residuals_n = vtool.linalg.normalize_rows(residuals)
wx2_rvecs[word_index] = residuals_n
return wx2_rvec
#def smk_similarity(wx2_qrvecs, wx2_drvecs):
# similarity_matrix = (rvecs1.dot(rvecs2.T))
def query_inverted_index(annots_df, qaid, invindex):
qfx2_vec = annots_df['vecs'][qaid]
wx2_qfxs, qfx2_wx = invindex.assign_to_words(qfx2_vec)
wx2_qrvecs = invindex.compute_residuals(qfx2_vec, wx2_qfxs)
daid = invindex.ax2_aid[0]
def single_daid_similairty(invindex, daid):
""" daid = 4
FIXME: Inefficient code
"""
ax = np.where(invindex.ax2_aid == daid)[0]
wx2_dfxs = {}
wx2_drvecs = {}
for wx, idxs in invindex.wx2_idxs.items():
valid = (invindex.idx2_ax[idxs] == ax)
dfxs = invindex.idx2_fx[idxs][valid]
drvecs = invindex.wx2_drvecs[wx][valid]
wx2_dfxs[wx] = dfxs
wx2_drvecs[wx] = drvecs
# Similarity to a single database annotation
query_wxs = set(wx2_qrvecs.keys())
data_wxs = set(wx2_drvecs.keys())
total_score = 0
for wx in data_wxs.intersection(query_wxs):
qrvecs = wx2_qrvecs[wx]
drvecs = wx2_drvecs[wx]
residual_similarity = qrvecs.dot(drvecs.T)
scores = selectivity_function(residual_similarity)
total_score += scores.sum()
return total_score
def selectivity_function(residual_similarity, alpha=3, thresh=0):
""" sigma from SMK paper """
u = residual_similarity
scores = (np.sign(u) * np.abs(u)) ** alpha
scores[scores <= thresh] = 0
return scores
# Entire database
daid2_score = utool.ddict(lambda: 0)
query_wxs = set(wx2_qrvecs.keys())
data_wxs = set(invindex.wx2_drvecs.keys())
qfx2_axs = []
qfx2_fm = []
qfx2_fs = []
aid_fm = []
aid_fs = []
idx2_daid = pd.Series(invindex.ax2_aid[invindex.idx2_ax], name='daid')
idx2_dfx = pd.Series(invindex.idx2_fx, name='dfx')
idx2_wfx = pd.Series(invindex.idx2_wx, name='dwx')
idx_df = pd.concat((idx2_daid, idx2_dfx, idx2_wfx), axis=1, names=['idx'])
idx_df = pd.concat((idx2_daid, idx2_dfx, idx2_wfx), axis=1, names=['idx'])
invindex.idx_df = idx_df
for wx in data_wxs.intersection(query_wxs):
qrvecs = wx2_qrvecs[wx]
drvecs = invindex.wx2_drvecs[wx]
residual_similarity = qrvecs.dot(drvecs.T)
# all pairs of scores
_idxs = pd.Series(invindex.wx2_idxs[wx], name='idx')
qfxs = pd.Series(qfxs, wx2_qfxs[wx], name='qfx')
dfxs = invindex.idx2_fx[_idxs.values]
score_matrix = selectivity_function(residual_similarity)
score_df = pd.DataFrame(score_matrix, index=qfxs, columns=_idxs,)
dax_score_grp = score_df.groupby(invindex.idx_df['daid'], axis=1)
score_qfx_v_daid = dax_score_grp.sum()
dax_score_grp = score_df.groupby(idx2_daid, axis=1)
score_qfx_v_daid = dax_score_grp.sum()
daid2_wordscore = score_qfx_v_daid.sum(axis=0)
for aid in daid2_wordscore.index:
daid2_score[aid] = daid2_wordscore[aid]
#score_mi = pd.MultiIndex.from_product((qfxs, _idxs), names=('qfxs', '_idxs'))
#print()
#score_df = pd.DataFrame(score_matrix, index=score_mi)
# Scores for each database vector
#scores = pd.DataFrame(score_matrix.sum(axis=0), columns=['score'])
# Use cartesian product of these indexes to produce feature matches
#qfxs = pd.DataFrame(wx2_qfxs[wx], columns=['qfx'])
#dfxs = pd.DataFrame(invindex.idx2_fx[_idxs], columns=['dfx'])
#daxs = pd.DataFrame(invindex.idx2_ax[_idxs], columns=['dax'])
#daids = pd.DataFrame(invindex.ax2_aid[invindex.idx2_ax[_idxs]], columns=['daid'])
#print(scores)
#print(daids)
#result_df = pd.concat((scores, daids), axis=1) # concat columns
#daid_group = result_df.groupby(['daid'])
#daid2_wordscore = daid_group['score'].sum()
print(utool.dict_str(daid2_score))
aidkeys = np.array(daid2_score.keys())
totalscores = np.array(daid2_score.values())
sortx = totalscores.argsort()[::-1]
ranked_aids = aidkeys[sortx]
ranked_scores = totalscores[sortx]
score_df = | pd.DataFrame(ranked_scores, index=ranked_aids, columns=['score']) | pandas.DataFrame |
import re
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays import IntervalArray
class TestSeriesReplace:
def test_replace_explicit_none(self):
# GH#36984 if the user explicitly passes value=None, give it to them
ser = pd.Series([0, 0, ""], dtype=object)
result = ser.replace("", None)
expected = pd.Series([0, 0, None], dtype=object)
tm.assert_series_equal(result, expected)
df = pd.DataFrame(np.zeros((3, 3)))
df.iloc[2, 2] = ""
result = df.replace("", None)
expected = pd.DataFrame(
{
0: np.zeros(3),
1: np.zeros(3),
2: np.array([0.0, 0.0, None], dtype=object),
}
)
assert expected.iloc[2, 2] is None
tm.assert_frame_equal(result, expected)
# GH#19998 same thing with object dtype
ser = pd.Series([10, 20, 30, "a", "a", "b", "a"])
result = ser.replace("a", None)
expected = pd.Series([10, 20, 30, None, None, "b", None])
assert expected.iloc[-1] is None
tm.assert_series_equal(result, expected)
def test_replace_noop_doesnt_downcast(self):
# GH#44498
ser = pd.Series([None, None, pd.Timestamp("2021-12-16 17:31")], dtype=object)
res = ser.replace({np.nan: None}) # should be a no-op
tm.assert_series_equal(res, ser)
assert res.dtype == object
# same thing but different calling convention
res = ser.replace(np.nan, None)
tm.assert_series_equal(res, ser)
assert res.dtype == object
def test_replace(self):
N = 100
ser = pd.Series(np.random.randn(N))
ser[0:4] = np.nan
ser[6:10] = 0
# replace list with a single value
return_value = ser.replace([np.nan], -1, inplace=True)
assert return_value is None
exp = ser.fillna(-1)
tm.assert_series_equal(ser, exp)
rs = ser.replace(0.0, np.nan)
ser[ser == 0.0] = np.nan
tm.assert_series_equal(rs, ser)
ser = pd.Series(np.fabs(np.random.randn(N)), tm.makeDateIndex(N), dtype=object)
ser[:5] = np.nan
ser[6:10] = "foo"
ser[20:30] = "bar"
# replace list with a single value
rs = ser.replace([np.nan, "foo", "bar"], -1)
assert (rs[:5] == -1).all()
assert (rs[6:10] == -1).all()
assert (rs[20:30] == -1).all()
assert (pd.isna(ser[:5])).all()
# replace with different values
rs = ser.replace({np.nan: -1, "foo": -2, "bar": -3})
assert (rs[:5] == -1).all()
assert (rs[6:10] == -2).all()
assert (rs[20:30] == -3).all()
assert (pd.isna(ser[:5])).all()
# replace with different values with 2 lists
rs2 = ser.replace([np.nan, "foo", "bar"], [-1, -2, -3])
tm.assert_series_equal(rs, rs2)
# replace inplace
return_value = ser.replace([np.nan, "foo", "bar"], -1, inplace=True)
assert return_value is None
assert (ser[:5] == -1).all()
assert (ser[6:10] == -1).all()
assert (ser[20:30] == -1).all()
def test_replace_nan_with_inf(self):
ser = pd.Series([np.nan, 0, np.inf])
tm.assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))
ser = pd.Series([np.nan, 0, "foo", "bar", np.inf, None, pd.NaT])
tm.assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))
filled = ser.copy()
filled[4] = 0
tm.assert_series_equal(ser.replace(np.inf, 0), filled)
def test_replace_listlike_value_listlike_target(self, datetime_series):
ser = pd.Series(datetime_series.index)
tm.assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))
# malformed
msg = r"Replacement lists must match in length\. Expecting 3 got 2"
with pytest.raises(ValueError, match=msg):
ser.replace([1, 2, 3], [np.nan, 0])
# ser is dt64 so can't hold 1 or 2, so this replace is a no-op
result = ser.replace([1, 2], [np.nan, 0])
tm.assert_series_equal(result, ser)
ser = pd.Series([0, 1, 2, 3, 4])
result = ser.replace([0, 1, 2, 3, 4], [4, 3, 2, 1, 0])
tm.assert_series_equal(result, pd.Series([4, 3, 2, 1, 0]))
def test_replace_gh5319(self):
# API change from 0.12?
# GH 5319
ser = pd.Series([0, np.nan, 2, 3, 4])
expected = ser.ffill()
result = ser.replace([np.nan])
tm.assert_series_equal(result, expected)
ser = pd.Series([0, np.nan, 2, 3, 4])
expected = ser.ffill()
result = ser.replace(np.nan)
tm.assert_series_equal(result, expected)
def test_replace_datetime64(self):
# GH 5797
ser = pd.Series(pd.date_range("20130101", periods=5))
expected = ser.copy()
expected.loc[2] = pd.Timestamp("20120101")
result = ser.replace({pd.Timestamp("20130103"): pd.Timestamp("20120101")})
tm.assert_series_equal(result, expected)
result = ser.replace(pd.Timestamp("20130103"), pd.Timestamp("20120101"))
tm.assert_series_equal(result, expected)
def test_replace_nat_with_tz(self):
# GH 11792: Test with replacing NaT in a list with tz data
ts = pd.Timestamp("2015/01/01", tz="UTC")
s = pd.Series([pd.NaT, pd.Timestamp("2015/01/01", tz="UTC")])
result = s.replace([np.nan, pd.NaT], pd.Timestamp.min)
expected = pd.Series([pd.Timestamp.min, ts], dtype=object)
tm.assert_series_equal(expected, result)
def test_replace_timedelta_td64(self):
tdi = pd.timedelta_range(0, periods=5)
ser = pd.Series(tdi)
# Using a single dict argument means we go through replace_list
result = ser.replace({ser[1]: ser[3]})
expected = pd.Series([ser[0], ser[3], ser[2], ser[3], ser[4]])
tm.assert_series_equal(result, expected)
def test_replace_with_single_list(self):
ser = pd.Series([0, 1, 2, 3, 4])
result = ser.replace([1, 2, 3])
tm.assert_series_equal(result, pd.Series([0, 0, 0, 0, 4]))
s = ser.copy()
return_value = s.replace([1, 2, 3], inplace=True)
assert return_value is None
tm.assert_series_equal(s, pd.Series([0, 0, 0, 0, 4]))
# make sure things don't get corrupted when fillna call fails
s = ser.copy()
msg = (
r"Invalid fill method\. Expecting pad \(ffill\) or backfill "
r"\(bfill\)\. Got crash_cymbal"
)
with pytest.raises(ValueError, match=msg):
return_value = s.replace([1, 2, 3], inplace=True, method="crash_cymbal")
assert return_value is None
tm.assert_series_equal(s, ser)
def test_replace_mixed_types(self):
ser = pd.Series(np.arange(5), dtype="int64")
def check_replace(to_rep, val, expected):
sc = ser.copy()
result = ser.replace(to_rep, val)
return_value = sc.replace(to_rep, val, inplace=True)
assert return_value is None
tm.assert_series_equal(expected, result)
tm.assert_series_equal(expected, sc)
# 3.0 can still be held in our int64 series, so we do not upcast GH#44940
tr, v = [3], [3.0]
check_replace(tr, v, ser)
# Note this matches what we get with the scalars 3 and 3.0
check_replace(tr[0], v[0], ser)
# MUST upcast to float
e = pd.Series([0, 1, 2, 3.5, 4])
tr, v = [3], [3.5]
check_replace(tr, v, e)
# casts to object
e = pd.Series([0, 1, 2, 3.5, "a"])
tr, v = [3, 4], [3.5, "a"]
check_replace(tr, v, e)
# again casts to object
e = pd.Series([0, 1, 2, 3.5, pd.Timestamp("20130101")])
tr, v = [3, 4], [3.5, pd.Timestamp("20130101")]
check_replace(tr, v, e)
# casts to object
e = pd.Series([0, 1, 2, 3.5, True], dtype="object")
tr, v = [3, 4], [3.5, True]
check_replace(tr, v, e)
# test an object with dates + floats + integers + strings
dr = pd.Series(pd.date_range("1/1/2001", "1/10/2001", freq="D"))
result = dr.astype(object).replace([dr[0], dr[1], dr[2]], [1.0, 2, "a"])
expected = pd.Series([1.0, 2, "a"] + dr[3:].tolist(), dtype=object)
tm.assert_series_equal(result, expected)
def test_replace_bool_with_string_no_op(self):
s = pd.Series([True, False, True])
result = s.replace("fun", "in-the-sun")
tm.assert_series_equal(s, result)
def test_replace_bool_with_string(self):
# nonexistent elements
s = pd.Series([True, False, True])
result = s.replace(True, "2u")
expected = pd.Series(["2u", False, "2u"])
tm.assert_series_equal(expected, result)
def test_replace_bool_with_bool(self):
s = pd.Series([True, False, True])
result = s.replace(True, False)
expected = pd.Series([False] * len(s))
tm.assert_series_equal(expected, result)
def test_replace_with_dict_with_bool_keys(self):
s = pd.Series([True, False, True])
result = s.replace({"asdf": "asdb", True: "yes"})
expected = pd.Series(["yes", False, "yes"])
tm.assert_series_equal(result, expected)
def test_replace_Int_with_na(self, any_int_ea_dtype):
# GH 38267
result = pd.Series([0, None], dtype=any_int_ea_dtype).replace(0, pd.NA)
expected = pd.Series([pd.NA, pd.NA], dtype=any_int_ea_dtype)
tm.assert_series_equal(result, expected)
result = pd.Series([0, 1], dtype=any_int_ea_dtype).replace(0, pd.NA)
result.replace(1, pd.NA, inplace=True)
tm.assert_series_equal(result, expected)
def test_replace2(self):
N = 100
ser = pd.Series(np.fabs(np.random.randn(N)), tm.makeDateIndex(N), dtype=object)
ser[:5] = np.nan
ser[6:10] = "foo"
ser[20:30] = "bar"
# replace list with a single value
rs = ser.replace([np.nan, "foo", "bar"], -1)
assert (rs[:5] == -1).all()
assert (rs[6:10] == -1).all()
assert (rs[20:30] == -1).all()
assert (pd.isna(ser[:5])).all()
# replace with different values
rs = ser.replace({np.nan: -1, "foo": -2, "bar": -3})
assert (rs[:5] == -1).all()
assert (rs[6:10] == -2).all()
assert (rs[20:30] == -3).all()
assert (pd.isna(ser[:5])).all()
# replace with different values with 2 lists
rs2 = ser.replace([np.nan, "foo", "bar"], [-1, -2, -3])
tm.assert_series_equal(rs, rs2)
# replace inplace
return_value = ser.replace([np.nan, "foo", "bar"], -1, inplace=True)
assert return_value is None
assert (ser[:5] == -1).all()
assert (ser[6:10] == -1).all()
assert (ser[20:30] == -1).all()
def test_replace_with_dictlike_and_string_dtype(self, nullable_string_dtype):
# GH 32621, GH#44940
ser = pd.Series(["one", "two", np.nan], dtype=nullable_string_dtype)
expected = pd.Series(["1", "2", np.nan], dtype=nullable_string_dtype)
result = ser.replace({"one": "1", "two": "2"})
tm.assert_series_equal(expected, result)
def test_replace_with_empty_dictlike(self):
# GH 15289
s = pd.Series(list("abcd"))
tm.assert_series_equal(s, s.replace({}))
with tm.assert_produces_warning(FutureWarning):
empty_series = pd.Series([])
tm.assert_series_equal(s, s.replace(empty_series))
def test_replace_string_with_number(self):
# GH 15743
s = pd.Series([1, 2, 3])
result = s.replace("2", np.nan)
expected = pd.Series([1, 2, 3])
tm.assert_series_equal(expected, result)
def test_replace_replacer_equals_replacement(self):
# GH 20656
# make sure all replacers are matching against original values
s = pd.Series(["a", "b"])
expected = pd.Series(["b", "a"])
result = s.replace({"a": "b", "b": "a"})
tm.assert_series_equal(expected, result)
def test_replace_unicode_with_number(self):
# GH 15743
s = pd.Series([1, 2, 3])
result = s.replace("2", np.nan)
expected = pd.Series([1, 2, 3])
tm.assert_series_equal(expected, result)
def test_replace_mixed_types_with_string(self):
# Testing mixed
s = pd.Series([1, 2, 3, "4", 4, 5])
result = s.replace([2, "4"], np.nan)
expected = pd.Series([1, np.nan, 3, np.nan, 4, 5])
tm.assert_series_equal(expected, result)
@pytest.mark.parametrize(
"categorical, numeric",
[
(pd.Categorical(["A"], categories=["A", "B"]), [1]),
(pd.Categorical(["A", "B"], categories=["A", "B"]), [1, 2]),
],
)
def test_replace_categorical(self, categorical, numeric):
# GH 24971, GH#23305
ser = pd.Series(categorical)
result = ser.replace({"A": 1, "B": 2})
expected = pd.Series(numeric).astype("category")
if 2 not in expected.cat.categories:
# i.e. categories should be [1, 2] even if there are no "B"s present
# GH#44940
expected = expected.cat.add_categories(2)
tm.assert_series_equal(expected, result)
def test_replace_categorical_single(self):
# GH 26988
dti = pd.date_range("2016-01-01", periods=3, tz="US/Pacific")
s = pd.Series(dti)
c = s.astype("category")
expected = c.copy()
expected = expected.cat.add_categories("foo")
expected[2] = "foo"
expected = expected.cat.remove_unused_categories()
assert c[2] != "foo"
result = c.replace(c[2], "foo")
tm.assert_series_equal(expected, result)
assert c[2] != "foo" # ensure non-inplace call does not alter original
return_value = c.replace(c[2], "foo", inplace=True)
assert return_value is None
tm.assert_series_equal(expected, c)
first_value = c[0]
return_value = c.replace(c[1], c[0], inplace=True)
assert return_value is None
assert c[0] == c[1] == first_value # test replacing with existing value
def test_replace_with_no_overflowerror(self):
# GH 25616
# casts to object without Exception from OverflowError
s = pd.Series([0, 1, 2, 3, 4])
result = s.replace([3], ["100000000000000000000"])
expected = pd.Series([0, 1, 2, "100000000000000000000", 4])
tm.assert_series_equal(result, expected)
s = pd.Series([0, "100000000000000000000", "100000000000000000001"])
result = s.replace(["100000000000000000000"], [1])
expected = pd.Series([0, 1, "100000000000000000001"])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"ser, to_replace, exp",
[
([1, 2, 3], {1: 2, 2: 3, 3: 4}, [2, 3, 4]),
(["1", "2", "3"], {"1": "2", "2": "3", "3": "4"}, ["2", "3", "4"]),
],
)
def test_replace_commutative(self, ser, to_replace, exp):
# GH 16051
# DataFrame.replace() overwrites when values are non-numeric
series = pd.Series(ser)
expected = pd.Series(exp)
result = series.replace(to_replace)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"ser, exp", [([1, 2, 3], [1, True, 3]), (["x", 2, 3], ["x", True, 3])]
)
def test_replace_no_cast(self, ser, exp):
# GH 9113
# BUG: replace int64 dtype with bool coerces to int64
series = pd.Series(ser)
result = series.replace(2, True)
expected = pd.Series(exp)
tm.assert_series_equal(result, expected)
def test_replace_invalid_to_replace(self):
# GH 18634
# API: replace() should raise an exception if invalid argument is given
series = pd.Series(["a", "b", "c "])
msg = (
r"Expecting 'to_replace' to be either a scalar, array-like, "
r"dict or None, got invalid type.*"
)
with pytest.raises(TypeError, match=msg):
series.replace(lambda x: x.strip())
@pytest.mark.parametrize("frame", [False, True])
def test_replace_nonbool_regex(self, frame):
obj = pd.Series(["a", "b", "c "])
if frame:
obj = obj.to_frame()
msg = "'to_replace' must be 'None' if 'regex' is not a bool"
with pytest.raises(ValueError, match=msg):
obj.replace(to_replace=["a"], regex="foo")
@pytest.mark.parametrize("frame", [False, True])
def test_replace_empty_copy(self, frame):
obj = pd.Series([], dtype=np.float64)
if frame:
obj = obj.to_frame()
res = obj.replace(4, 5, inplace=True)
assert res is None
res = obj.replace(4, 5, inplace=False)
tm.assert_equal(res, obj)
assert res is not obj
def test_replace_only_one_dictlike_arg(self, fixed_now_ts):
# GH#33340
ser = pd.Series([1, 2, "A", fixed_now_ts, True])
to_replace = {0: 1, 2: "A"}
value = "foo"
msg = "Series.replace cannot use dict-like to_replace and non-None value"
with pytest.raises(ValueError, match=msg):
ser.replace(to_replace, value)
to_replace = 1
value = {0: "foo", 2: "bar"}
msg = "Series.replace cannot use dict-value and non-None to_replace"
with pytest.raises(ValueError, match=msg):
ser.replace(to_replace, value)
def test_replace_extension_other(self, frame_or_series):
# https://github.com/pandas-dev/pandas/issues/34530
obj = frame_or_series(pd.array([1, 2, 3], dtype="Int64"))
result = obj.replace("", "") # no exception
# should not have changed dtype
tm.assert_equal(obj, result)
def _check_replace_with_method(self, ser: pd.Series):
df = ser.to_frame()
res = ser.replace(ser[1], method="pad")
expected = pd.Series([ser[0], ser[0]] + list(ser[2:]), dtype=ser.dtype)
tm.assert_series_equal(res, expected)
res_df = df.replace(ser[1], method="pad")
tm.assert_frame_equal(res_df, expected.to_frame())
ser2 = ser.copy()
res2 = ser2.replace(ser[1], method="pad", inplace=True)
assert res2 is None
tm.assert_series_equal(ser2, expected)
res_df2 = df.replace(ser[1], method="pad", inplace=True)
assert res_df2 is None
tm.assert_frame_equal(df, expected.to_frame())
def test_replace_ea_dtype_with_method(self, any_numeric_ea_dtype):
arr = pd.array([1, 2, pd.NA, 4], dtype=any_numeric_ea_dtype)
ser = pd.Series(arr)
self._check_replace_with_method(ser)
@pytest.mark.parametrize("as_categorical", [True, False])
def test_replace_interval_with_method(self, as_categorical):
# in particular interval that can't hold NA
idx = pd.IntervalIndex.from_breaks(range(4))
ser = pd.Series(idx)
if as_categorical:
ser = ser.astype("category")
self._check_replace_with_method(ser)
@pytest.mark.parametrize("as_period", [True, False])
@pytest.mark.parametrize("as_categorical", [True, False])
def test_replace_datetimelike_with_method(self, as_period, as_categorical):
idx = pd.date_range("2016-01-01", periods=5, tz="US/Pacific")
if as_period:
idx = idx.tz_localize(None).to_period("D")
ser = pd.Series(idx)
ser.iloc[-2] = pd.NaT
if as_categorical:
ser = ser.astype("category")
self._check_replace_with_method(ser)
def test_replace_with_compiled_regex(self):
# https://github.com/pandas-dev/pandas/issues/35680
s = pd.Series(["a", "b", "c"])
regex = re.compile("^a$")
result = s.replace({regex: "z"}, regex=True)
expected = pd.Series(["z", "b", "c"])
tm.assert_series_equal(result, expected)
def test_pandas_replace_na(self):
# GH#43344
ser = pd.Series(["AA", "BB", "CC", "DD", "EE", "", pd.NA], dtype="string")
regex_mapping = {
"AA": "CC",
"BB": "CC",
"EE": "CC",
"CC": "CC-REPL",
}
result = ser.replace(regex_mapping, regex=True)
exp = pd.Series(["CC", "CC", "CC-REPL", "DD", "CC", "", pd.NA], dtype="string")
tm.assert_series_equal(result, exp)
@pytest.mark.parametrize(
"dtype, input_data, to_replace, expected_data",
[
("bool", [True, False], {True: False}, [False, False]),
("int64", [1, 2], {1: 10, 2: 20}, [10, 20]),
("Int64", [1, 2], {1: 10, 2: 20}, [10, 20]),
("float64", [1.1, 2.2], {1.1: 10.1, 2.2: 20.5}, [10.1, 20.5]),
("Float64", [1.1, 2.2], {1.1: 10.1, 2.2: 20.5}, [10.1, 20.5]),
("string", ["one", "two"], {"one": "1", "two": "2"}, ["1", "2"]),
(
pd.IntervalDtype("int64"),
IntervalArray([pd.Interval(1, 2), pd.Interval(2, 3)]),
{pd.Interval(1, 2): pd.Interval(10, 20)},
IntervalArray([pd.Interval(10, 20), pd.Interval(2, 3)]),
),
(
pd.IntervalDtype("float64"),
IntervalArray([pd.Interval(1.0, 2.7), pd.Interval(2.8, 3.1)]),
{ | pd.Interval(1.0, 2.7) | pandas.Interval |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
from numpy import nan
import numpy as np
import pandas as pd
from pandas.types.common import is_integer, is_scalar
from pandas import Index, Series, DataFrame, isnull, date_range
from pandas.core.index import MultiIndex
from pandas.core.indexing import IndexingError
from pandas.tseries.index import Timestamp
from pandas.tseries.offsets import BDay
from pandas.tseries.tdi import Timedelta
from pandas.compat import lrange, range
from pandas import compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from pandas.tests.series.common import TestData
JOIN_TYPES = ['inner', 'outer', 'left', 'right']
class TestSeriesIndexing(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_get(self):
# GH 6383
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56, 45,
51, 39, 55, 43, 54, 52, 51, 54]))
result = s.get(25, 0)
expected = 0
self.assertEqual(result, expected)
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56,
45, 51, 39, 55, 43, 54, 52, 51, 54]),
index=pd.Float64Index(
[25.0, 36.0, 49.0, 64.0, 81.0, 100.0,
121.0, 144.0, 169.0, 196.0, 1225.0,
1296.0, 1369.0, 1444.0, 1521.0, 1600.0,
1681.0, 1764.0, 1849.0, 1936.0],
dtype='object'))
result = s.get(25, 0)
expected = 43
self.assertEqual(result, expected)
# GH 7407
# with a boolean accessor
df = pd.DataFrame({'i': [0] * 3, 'b': [False] * 3})
vc = df.i.value_counts()
result = vc.get(99, default='Missing')
self.assertEqual(result, 'Missing')
vc = df.b.value_counts()
result = vc.get(False, default='Missing')
self.assertEqual(result, 3)
result = vc.get(True, default='Missing')
self.assertEqual(result, 'Missing')
def test_delitem(self):
# GH 5542
# should delete the item inplace
s = Series(lrange(5))
del s[0]
expected = Series(lrange(1, 5), index=lrange(1, 5))
assert_series_equal(s, expected)
del s[1]
expected = Series(lrange(2, 5), index=lrange(2, 5))
assert_series_equal(s, expected)
# empty
s = Series()
def f():
del s[0]
self.assertRaises(KeyError, f)
# only 1 left, del, add, del
s = Series(1)
del s[0]
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='int64')))
s[0] = 1
assert_series_equal(s, Series(1))
del s[0]
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='int64')))
# Index(dtype=object)
s = Series(1, index=['a'])
del s['a']
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='object')))
s['a'] = 1
assert_series_equal(s, Series(1, index=['a']))
del s['a']
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='object')))
def test_getitem_setitem_ellipsis(self):
s = Series(np.random.randn(10))
np.fix(s)
result = s[...]
assert_series_equal(result, s)
s[...] = 5
self.assertTrue((result == 5).all())
def test_getitem_negative_out_of_bounds(self):
s = Series(tm.rands_array(5, 10), index=tm.rands_array(10, 10))
self.assertRaises(IndexError, s.__getitem__, -11)
self.assertRaises(IndexError, s.__setitem__, -11, 'foo')
def test_pop(self):
# GH 6600
df = DataFrame({'A': 0, 'B': np.arange(5, dtype='int64'), 'C': 0, })
k = df.iloc[4]
result = k.pop('B')
self.assertEqual(result, 4)
expected = Series([0, 0], index=['A', 'C'], name=4)
assert_series_equal(k, expected)
def test_getitem_get(self):
idx1 = self.series.index[5]
idx2 = self.objSeries.index[5]
self.assertEqual(self.series[idx1], self.series.get(idx1))
self.assertEqual(self.objSeries[idx2], self.objSeries.get(idx2))
self.assertEqual(self.series[idx1], self.series[5])
self.assertEqual(self.objSeries[idx2], self.objSeries[5])
self.assertEqual(
self.series.get(-1), self.series.get(self.series.index[-1]))
self.assertEqual(self.series[5], self.series.get(self.series.index[5]))
# missing
d = self.ts.index[0] - BDay()
self.assertRaises(KeyError, self.ts.__getitem__, d)
# None
# GH 5652
for s in [Series(), Series(index=list('abc'))]:
result = s.get(None)
self.assertIsNone(result)
def test_iget(self):
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.iget(1)
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.irow(1)
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.iget_value(1)
for i in range(len(s)):
result = s.iloc[i]
exp = s[s.index[i]]
assert_almost_equal(result, exp)
# pass a slice
result = s.iloc[slice(1, 3)]
expected = s.ix[2:4]
assert_series_equal(result, expected)
# test slice is a view
result[:] = 0
self.assertTrue((s[1:3] == 0).all())
# list of integers
result = s.iloc[[0, 2, 3, 4, 5]]
expected = s.reindex(s.index[[0, 2, 3, 4, 5]])
assert_series_equal(result, expected)
def test_iget_nonunique(self):
s = Series([0, 1, 2], index=[0, 1, 0])
self.assertEqual(s.iloc[2], 2)
def test_getitem_regression(self):
s = Series(lrange(5), index=lrange(5))
result = s[lrange(5)]
assert_series_equal(result, s)
def test_getitem_setitem_slice_bug(self):
s = Series(lrange(10), lrange(10))
result = s[-12:]
assert_series_equal(result, s)
result = s[-7:]
assert_series_equal(result, s[3:])
result = s[:-12]
assert_series_equal(result, s[:0])
s = Series(lrange(10), lrange(10))
s[-12:] = 0
self.assertTrue((s == 0).all())
s[:-12] = 5
self.assertTrue((s == 0).all())
def test_getitem_int64(self):
idx = np.int64(5)
self.assertEqual(self.ts[idx], self.ts[5])
def test_getitem_fancy(self):
slice1 = self.series[[1, 2, 3]]
slice2 = self.objSeries[[1, 2, 3]]
self.assertEqual(self.series.index[2], slice1.index[1])
self.assertEqual(self.objSeries.index[2], slice2.index[1])
self.assertEqual(self.series[2], slice1[1])
self.assertEqual(self.objSeries[2], slice2[1])
def test_getitem_boolean(self):
s = self.series
mask = s > s.median()
# passing list is OK
result = s[list(mask)]
expected = s[mask]
assert_series_equal(result, expected)
self.assert_index_equal(result.index, s.index[mask])
def test_getitem_boolean_empty(self):
s = Series([], dtype=np.int64)
s.index.name = 'index_name'
s = s[s.isnull()]
self.assertEqual(s.index.name, 'index_name')
self.assertEqual(s.dtype, np.int64)
# GH5877
# indexing with empty series
s = Series(['A', 'B'])
expected = Series(np.nan, index=['C'], dtype=object)
result = s[Series(['C'], dtype=object)]
assert_series_equal(result, expected)
s = Series(['A', 'B'])
expected = Series(dtype=object, index=Index([], dtype='int64'))
result = s[Series([], dtype=object)]
assert_series_equal(result, expected)
# invalid because of the boolean indexer
# that's empty or not-aligned
def f():
s[Series([], dtype=bool)]
self.assertRaises(IndexingError, f)
def f():
s[Series([True], dtype=bool)]
self.assertRaises(IndexingError, f)
def test_getitem_generator(self):
gen = (x > 0 for x in self.series)
result = self.series[gen]
result2 = self.series[iter(self.series > 0)]
expected = self.series[self.series > 0]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
def test_type_promotion(self):
# GH12599
s = pd.Series()
s["a"] = pd.Timestamp("2016-01-01")
s["b"] = 3.0
s["c"] = "foo"
expected = Series([pd.Timestamp("2016-01-01"), 3.0, "foo"],
index=["a", "b", "c"])
assert_series_equal(s, expected)
def test_getitem_boolean_object(self):
# using column from DataFrame
s = self.series
mask = s > s.median()
omask = mask.astype(object)
# getitem
result = s[omask]
expected = s[mask]
assert_series_equal(result, expected)
# setitem
s2 = s.copy()
cop = s.copy()
cop[omask] = 5
s2[mask] = 5
assert_series_equal(cop, s2)
# nans raise exception
omask[5:10] = np.nan
self.assertRaises(Exception, s.__getitem__, omask)
self.assertRaises(Exception, s.__setitem__, omask, 5)
def test_getitem_setitem_boolean_corner(self):
ts = self.ts
mask_shifted = ts.shift(1, freq=BDay()) > ts.median()
# these used to raise...??
self.assertRaises(Exception, ts.__getitem__, mask_shifted)
self.assertRaises(Exception, ts.__setitem__, mask_shifted, 1)
# ts[mask_shifted]
# ts[mask_shifted] = 1
self.assertRaises(Exception, ts.ix.__getitem__, mask_shifted)
self.assertRaises(Exception, ts.ix.__setitem__, mask_shifted, 1)
# ts.ix[mask_shifted]
# ts.ix[mask_shifted] = 2
def test_getitem_setitem_slice_integers(self):
s = Series(np.random.randn(8), index=[2, 4, 6, 8, 10, 12, 14, 16])
result = s[:4]
expected = s.reindex([2, 4, 6, 8])
assert_series_equal(result, expected)
s[:4] = 0
self.assertTrue((s[:4] == 0).all())
self.assertTrue(not (s[4:] == 0).any())
def test_getitem_out_of_bounds(self):
# don't segfault, GH #495
self.assertRaises(IndexError, self.ts.__getitem__, len(self.ts))
# GH #917
s = Series([])
self.assertRaises(IndexError, s.__getitem__, -1)
def test_getitem_setitem_integers(self):
# caused bug without test
s = Series([1, 2, 3], ['a', 'b', 'c'])
self.assertEqual(s.ix[0], s['a'])
s.ix[0] = 5
self.assertAlmostEqual(s['a'], 5)
def test_getitem_box_float64(self):
value = self.ts[5]
tm.assertIsInstance(value, np.float64)
def test_getitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
self.assertRaises(KeyError, s.__getitem__, 1)
self.assertRaises(KeyError, s.ix.__getitem__, 1)
def test_getitem_unordered_dup(self):
obj = Series(lrange(5), index=['c', 'a', 'a', 'b', 'b'])
self.assertTrue(is_scalar(obj['c']))
self.assertEqual(obj['c'], 0)
def test_getitem_dups_with_missing(self):
# breaks reindex, so need to use .ix internally
# GH 4246
s = Series([1, 2, 3, 4], ['foo', 'bar', 'foo', 'bah'])
expected = s.ix[['foo', 'bar', 'bah', 'bam']]
result = s[['foo', 'bar', 'bah', 'bam']]
assert_series_equal(result, expected)
def test_getitem_dups(self):
s = Series(range(5), index=['A', 'A', 'B', 'C', 'C'], dtype=np.int64)
expected = Series([3, 4], index=['C', 'C'], dtype=np.int64)
result = s['C']
assert_series_equal(result, expected)
def test_getitem_dataframe(self):
rng = list(range(10))
s = pd.Series(10, index=rng)
df = pd.DataFrame(rng, index=rng)
self.assertRaises(TypeError, s.__getitem__, df > 5)
def test_getitem_callable(self):
# GH 12533
s = pd.Series(4, index=list('ABCD'))
result = s[lambda x: 'A']
self.assertEqual(result, s.loc['A'])
result = s[lambda x: ['A', 'B']]
tm.assert_series_equal(result, s.loc[['A', 'B']])
result = s[lambda x: [True, False, True, True]]
tm.assert_series_equal(result, s.iloc[[0, 2, 3]])
def test_setitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
# equivalent of an append
s2 = s.copy()
s2[1] = 5
expected = s.append(Series([5], index=[1]))
assert_series_equal(s2, expected)
s2 = s.copy()
s2.ix[1] = 5
expected = s.append(Series([5], index=[1]))
assert_series_equal(s2, expected)
def test_setitem_float_labels(self):
# note labels are floats
s = Series(['a', 'b', 'c'], index=[0, 0.5, 1])
tmp = s.copy()
s.ix[1] = 'zoo'
tmp.iloc[2] = 'zoo'
assert_series_equal(s, tmp)
def test_setitem_callable(self):
# GH 12533
s = pd.Series([1, 2, 3, 4], index=list('ABCD'))
s[lambda x: 'A'] = -1
tm.assert_series_equal(s, pd.Series([-1, 2, 3, 4], index=list('ABCD')))
def test_setitem_other_callable(self):
# GH 13299
inc = lambda x: x + 1
s = pd.Series([1, 2, -1, 4])
s[s < 0] = inc
expected = pd.Series([1, 2, inc, 4])
tm.assert_series_equal(s, expected)
def test_slice(self):
numSlice = self.series[10:20]
numSliceEnd = self.series[-10:]
objSlice = self.objSeries[10:20]
self.assertNotIn(self.series.index[9], numSlice.index)
self.assertNotIn(self.objSeries.index[9], objSlice.index)
self.assertEqual(len(numSlice), len(numSlice.index))
self.assertEqual(self.series[numSlice.index[0]],
numSlice[numSlice.index[0]])
self.assertEqual(numSlice.index[1], self.series.index[11])
self.assertTrue(tm.equalContents(numSliceEnd, np.array(self.series)[
-10:]))
# test return view
sl = self.series[10:20]
sl[:] = 0
self.assertTrue((self.series[10:20] == 0).all())
def test_slice_can_reorder_not_uniquely_indexed(self):
s = Series(1, index=['a', 'a', 'b', 'b', 'c'])
s[::-1] # it works!
def test_slice_float_get_set(self):
self.assertRaises(TypeError, lambda: self.ts[4.0:10.0])
def f():
self.ts[4.0:10.0] = 0
self.assertRaises(TypeError, f)
self.assertRaises(TypeError, self.ts.__getitem__, slice(4.5, 10.0))
self.assertRaises(TypeError, self.ts.__setitem__, slice(4.5, 10.0), 0)
def test_slice_floats2(self):
s = Series(np.random.rand(10), index=np.arange(10, 20, dtype=float))
self.assertEqual(len(s.ix[12.0:]), 8)
self.assertEqual(len(s.ix[12.5:]), 7)
i = np.arange(10, 20, dtype=float)
i[2] = 12.2
s.index = i
self.assertEqual(len(s.ix[12.0:]), 8)
self.assertEqual(len(s.ix[12.5:]), 7)
def test_slice_float64(self):
values = np.arange(10., 50., 2)
index = Index(values)
start, end = values[[5, 15]]
s = Series(np.random.randn(20), index=index)
result = s[start:end]
expected = s.iloc[5:16]
assert_series_equal(result, expected)
result = s.loc[start:end]
assert_series_equal(result, expected)
df = DataFrame(np.random.randn(20, 3), index=index)
result = df[start:end]
expected = df.iloc[5:16]
tm.assert_frame_equal(result, expected)
result = df.loc[start:end]
tm.assert_frame_equal(result, expected)
def test_setitem(self):
self.ts[self.ts.index[5]] = np.NaN
self.ts[[1, 2, 17]] = np.NaN
self.ts[6] = np.NaN
self.assertTrue(np.isnan(self.ts[6]))
self.assertTrue(np.isnan(self.ts[2]))
self.ts[np.isnan(self.ts)] = 5
self.assertFalse(np.isnan(self.ts[2]))
# caught this bug when writing tests
series = Series(tm.makeIntIndex(20).astype(float),
index=tm.makeIntIndex(20))
series[::2] = 0
self.assertTrue((series[::2] == 0).all())
# set item that's not contained
s = self.series.copy()
s['foobar'] = 1
app = Series([1], index=['foobar'], name='series')
expected = self.series.append(app)
assert_series_equal(s, expected)
# Test for issue #10193
key = pd.Timestamp('2012-01-01')
series = pd.Series()
series[key] = 47
expected = pd.Series(47, [key])
assert_series_equal(series, expected)
series = pd.Series([], pd.DatetimeIndex([], freq='D'))
series[key] = 47
expected = pd.Series(47, pd.DatetimeIndex([key], freq='D'))
assert_series_equal(series, expected)
def test_setitem_dtypes(self):
# change dtypes
# GH 4463
expected = Series([np.nan, 2, 3])
s = Series([1, 2, 3])
s.iloc[0] = np.nan
assert_series_equal(s, expected)
s = Series([1, 2, 3])
s.loc[0] = np.nan
assert_series_equal(s, expected)
s = Series([1, 2, 3])
s[0] = np.nan
assert_series_equal(s, expected)
s = Series([False])
s.loc[0] = np.nan
assert_series_equal(s, Series([np.nan]))
s = Series([False, True])
s.loc[0] = np.nan
assert_series_equal(s, Series([np.nan, 1.0]))
def test_set_value(self):
idx = self.ts.index[10]
res = self.ts.set_value(idx, 0)
self.assertIs(res, self.ts)
self.assertEqual(self.ts[idx], 0)
# equiv
s = self.series.copy()
res = s.set_value('foobar', 0)
self.assertIs(res, s)
self.assertEqual(res.index[-1], 'foobar')
self.assertEqual(res['foobar'], 0)
s = self.series.copy()
s.loc['foobar'] = 0
self.assertEqual(s.index[-1], 'foobar')
self.assertEqual(s['foobar'], 0)
def test_setslice(self):
sl = self.ts[5:20]
self.assertEqual(len(sl), len(sl.index))
self.assertTrue(sl.index.is_unique)
def test_basic_getitem_setitem_corner(self):
# invalid tuples, e.g. self.ts[:, None] vs. self.ts[:, 2]
with tm.assertRaisesRegexp(ValueError, 'tuple-index'):
self.ts[:, 2]
with tm.assertRaisesRegexp(ValueError, 'tuple-index'):
self.ts[:, 2] = 2
# weird lists. [slice(0, 5)] will work but not two slices
result = self.ts[[slice(None, 5)]]
expected = self.ts[:5]
assert_series_equal(result, expected)
# OK
self.assertRaises(Exception, self.ts.__getitem__,
[5, slice(None, None)])
self.assertRaises(Exception, self.ts.__setitem__,
[5, slice(None, None)], 2)
def test_basic_getitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
result = self.ts[indices]
expected = self.ts.reindex(indices)
assert_series_equal(result, expected)
result = self.ts[indices[0]:indices[2]]
expected = self.ts.ix[indices[0]:indices[2]]
assert_series_equal(result, expected)
# integer indexes, be careful
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
inds = [0, 2, 5, 7, 8]
arr_inds = np.array([0, 2, 5, 7, 8])
result = s[inds]
expected = s.reindex(inds)
assert_series_equal(result, expected)
result = s[arr_inds]
expected = s.reindex(arr_inds)
assert_series_equal(result, expected)
# GH12089
# with tz for values
s = Series(pd.date_range("2011-01-01", periods=3, tz="US/Eastern"),
index=['a', 'b', 'c'])
expected = Timestamp('2011-01-01', tz='US/Eastern')
result = s.loc['a']
self.assertEqual(result, expected)
result = s.iloc[0]
self.assertEqual(result, expected)
result = s['a']
self.assertEqual(result, expected)
def test_basic_setitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices] = 0
exp.ix[indices] = 0
assert_series_equal(cp, exp)
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices[0]:indices[2]] = 0
exp.ix[indices[0]:indices[2]] = 0
assert_series_equal(cp, exp)
# integer indexes, be careful
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
inds = [0, 4, 6]
arr_inds = np.array([0, 4, 6])
cp = s.copy()
exp = s.copy()
s[inds] = 0
s.ix[inds] = 0
assert_series_equal(cp, exp)
cp = s.copy()
exp = s.copy()
s[arr_inds] = 0
s.ix[arr_inds] = 0
assert_series_equal(cp, exp)
inds_notfound = [0, 4, 5, 6]
arr_inds_notfound = np.array([0, 4, 5, 6])
self.assertRaises(Exception, s.__setitem__, inds_notfound, 0)
self.assertRaises(Exception, s.__setitem__, arr_inds_notfound, 0)
# GH12089
# with tz for values
s = Series(pd.date_range("2011-01-01", periods=3, tz="US/Eastern"),
index=['a', 'b', 'c'])
s2 = s.copy()
expected = Timestamp('2011-01-03', tz='US/Eastern')
s2.loc['a'] = expected
result = s2.loc['a']
self.assertEqual(result, expected)
s2 = s.copy()
s2.iloc[0] = expected
result = s2.iloc[0]
self.assertEqual(result, expected)
s2 = s.copy()
s2['a'] = expected
result = s2['a']
self.assertEqual(result, expected)
def test_ix_getitem(self):
inds = self.series.index[[3, 4, 7]]
assert_series_equal(self.series.ix[inds], self.series.reindex(inds))
assert_series_equal(self.series.ix[5::2], self.series[5::2])
# slice with indices
d1, d2 = self.ts.index[[5, 15]]
result = self.ts.ix[d1:d2]
expected = self.ts.truncate(d1, d2)
assert_series_equal(result, expected)
# boolean
mask = self.series > self.series.median()
assert_series_equal(self.series.ix[mask], self.series[mask])
# ask for index value
self.assertEqual(self.ts.ix[d1], self.ts[d1])
self.assertEqual(self.ts.ix[d2], self.ts[d2])
def test_ix_getitem_not_monotonic(self):
d1, d2 = self.ts.index[[5, 15]]
ts2 = self.ts[::2][[1, 2, 0]]
self.assertRaises(KeyError, ts2.ix.__getitem__, slice(d1, d2))
self.assertRaises(KeyError, ts2.ix.__setitem__, slice(d1, d2), 0)
def test_ix_getitem_setitem_integer_slice_keyerrors(self):
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
# this is OK
cp = s.copy()
cp.ix[4:10] = 0
self.assertTrue((cp.ix[4:10] == 0).all())
# so is this
cp = s.copy()
cp.ix[3:11] = 0
self.assertTrue((cp.ix[3:11] == 0).values.all())
result = s.ix[4:10]
result2 = s.ix[3:11]
expected = s.reindex([4, 6, 8, 10])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# non-monotonic, raise KeyError
s2 = s.iloc[lrange(5) + lrange(5, 10)[::-1]]
self.assertRaises(KeyError, s2.ix.__getitem__, slice(3, 11))
self.assertRaises(KeyError, s2.ix.__setitem__, slice(3, 11), 0)
def test_ix_getitem_iterator(self):
idx = iter(self.series.index[:10])
result = self.series.ix[idx]
assert_series_equal(result, self.series[:10])
def test_setitem_with_tz(self):
for tz in ['US/Eastern', 'UTC', 'Asia/Tokyo']:
orig = pd.Series(pd.date_range('2016-01-01', freq='H', periods=3,
tz=tz))
self.assertEqual(orig.dtype, 'datetime64[ns, {0}]'.format(tz))
# scalar
s = orig.copy()
s[1] = pd.Timestamp('2011-01-01', tz=tz)
exp = pd.Series([pd.Timestamp('2016-01-01 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2016-01-01 02:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
# vector
vals = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz)], index=[1, 2])
self.assertEqual(vals.dtype, 'datetime64[ns, {0}]'.format(tz))
s[[1, 2]] = vals
exp = pd.Series([pd.Timestamp('2016-01-01 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2012-01-01 00:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
def test_setitem_with_tz_dst(self):
# GH XXX
tz = 'US/Eastern'
orig = pd.Series(pd.date_range('2016-11-06', freq='H', periods=3,
tz=tz))
self.assertEqual(orig.dtype, 'datetime64[ns, {0}]'.format(tz))
# scalar
s = orig.copy()
s[1] = pd.Timestamp('2011-01-01', tz=tz)
exp = pd.Series([pd.Timestamp('2016-11-06 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2016-11-06 02:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
# vector
vals = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz)], index=[1, 2])
self.assertEqual(vals.dtype, 'datetime64[ns, {0}]'.format(tz))
s[[1, 2]] = vals
exp = pd.Series([pd.Timestamp('2016-11-06 00:00', tz=tz),
| pd.Timestamp('2011-01-01 00:00', tz=tz) | pandas.Timestamp |
## License: ?
## Copyright(c) <NAME>. All Rights Reserved.
## Copyright(c) 2017 Intel Corporation. All Rights Reserved.
# Run this file initially to get the initial position of the player
# Positions(2d, 3d) are stored in pickle files which need to be imported in the main code to get initial positions/angles
# Use for calibration
import cmath
import math
import os
import cv2
import numpy as np
import pyrealsense2 as rs
from cubemos.skeletontracking.core_wrapper import CM_TargetComputeDevice #refer to cubmos documentation for installation
from cubemos.skeletontracking.native_wrapper import Api #refer to cubmos documentation for installation
import pandas as pd
from datetime import datetime
import pickle
import math
joints = ['Nose','Neck','Right_shoulder','Right_elbow','Right_wrist','Left_shoulder',
'Left_elbow','Left_wrist','Right_hip','Right_knee','Right_ankle','Left_hip',
'Left_knee','Left_ankle','Right_eye','Left_eye','Right_ear','Left_ear']
prev_joint_3d_coords ={key: (0,0,0) for key in joints}
prev_joint_locations = {key: (0,0) for key in joints}
prev_joint_distances = {key: 0 for key in joints}
pipeline = rs.pipeline()
config = rs.config()
config.enable_stream(rs.stream.color, 1280, 720, rs.format.bgr8, 15)
config.enable_stream(rs.stream.depth, 1280, 720, rs.format.z16, 15)
distance_data2d = []
position_data2d = []
position_data3d = []
#To save video
#out = cv2.VideoWriter('skeleton_coordinates.mp4', 0x7634706d, 15.0, (1280, 720))
##########################################################################################################################
def default_license_dir():
return os.path.join(os.environ["HOME"], ".cubemos", "skeleton_tracking", "license") #"LOCALAPPDATA" in place of "HOME" for windows 10
#return os.path.join(os.environ["LOCALAPPDATA"], "Cubemos", "SkeletonTracking", "license")
##########################################################################################################################
api = Api(default_license_dir())
sdk_path = os.environ["CUBEMOS_SKEL_SDK"]
model_path = os.path.join(sdk_path, "models", "skeleton-tracking", "fp32", "skeleton-tracking.cubemos")
api.load_model(CM_TargetComputeDevice.CM_CPU, model_path)
profile = pipeline.start(config)
depth_scale = profile.get_device().first_depth_sensor().get_depth_scale()
colorizer = rs.colorizer()
##########################################################################################################################
def get_valid_coordinates(skeleton, depth, confidence_threshold):
result_coordinate = {}
result_distance = {}
for i in range (len(skeleton.joints)):
if skeleton.confidences[i] >= confidence_threshold:
if skeleton.joints[i][0] >= 0 and skeleton.joints[i][1] >= 0:
result_coordinate[joints[i]] = tuple(map(int, skeleton.joints[i]))
dist,_,_,_ = cv2.mean((depth[result_coordinate[joints[i]][1]-3:result_coordinate[joints[i]][1]+3,result_coordinate[joints[i]][0]-3:result_coordinate[joints[i]][0]+3].astype(float))*depth_scale)
result_distance[joints[i]] = dist
return result_coordinate,result_distance
##########################################################################################################################
def convert_depth_to_phys_coord_using_realsense(intrin,x, y, depth):
result = rs.rs2_deproject_pixel_to_point(intrin, [x, y], depth)
#result[0]: right (x), result[1]: down (y), result[2]: forward (z) from camera POV
return result[0], result[1], result[2]
##########################################################################################################################
def calculateAngle(x1, y1, z1,x2, y2, z2,x3, y3, z3):
ABx = x1 - x2
ABy = y1 - y2
ABz = z1 - z2
BCx = x3 - x2
BCy = y3 - y2
BCz = z3 - z2
dotProduct = ABx * BCx +ABy * BCy +ABz * BCz
magnitudeAB = ABx * ABx +ABy * ABy +ABz * ABz
magnitudeBC = BCx * BCx +BCy * BCy +BCz * BCz
angle = dotProduct
if (magnitudeAB == 0 or magnitudeBC == 0):
angle = 0.0
else:
angle = cmath.acos(angle/math.sqrt(magnitudeAB *magnitudeBC))
angle = (angle * 180) / math.pi
return(round(abs(angle), 4))
##########################################################################################################################
def get_initial_position():
# print("3d pos:", position_data3d)
# print("2d pos:", position_data2d)
# print("2d dis:", distance_data2d)
df = pd.DataFrame(position_data3d ,columns=joints)
df2 = pd.DataFrame(position_data2d ,columns=joints)
df3 = | pd.DataFrame(distance_data2d ,columns=joints) | pandas.DataFrame |
import pandas as pd
from typing import Optional
import umap
def target_encoding(train_df:pd.DataFrame, test_df:pd.DataFrame, target_key:str, encoding_keys:list, method='mean') -> pd.DataFrame:
"""do target encoding. encoded column name is enc_ + method + '_' + encoding_key
Arguments:
train_df {pd.DataFrame} -- [df for training]
test_df {pd.DataFrame} -- [df for test]
target_key {str} -- [object key name]
encoding_keys {list} -- [list of key names that you want to encode]
Keyword Arguments:
method {str} -- [how to encode 'mean', median', 'mode'] (default: {'mean'})
Returns:
pd.DataFrame -- [encoded test dataframe]
"""
for encoding_key in encoding_keys:
enc_key_name = 'enc_' + method + '_' + encoding_key
if method is 'mean':
encoding_value = train_df.groupby(encoding_key)[target_key].mean()
elif method is 'median':
encoding_value = train_df.groupby(encoding_key)[target_key].median()
else:
encoding_value = train_df.groupby(encoding_key)[target_key].mean()
test_df.loc[:, enc_key_name] = test_df.loc[:, encoding_key].map(encoding_value)
# move target_key to last
column_list = test_df.columns.to_list()
column_list.remove(target_key)
column_list.append(target_key)
return test_df[column_list]
def dimensionality_reduction(df:pd.DataFrame, method='umap', n_neighbors=100) -> pd.DataFrame:
"""add umap features to input df
Arguments:
df {pd.DataFrame} -- [input df]
Keyword Arguments:
method {str} -- [what dim reduction method you use] (default: {'umap'})
Returns:
pd.DataFrame -- [input df + umap features]
"""
um = umap.UMAP(n_neighbors=n_neighbors, n_components=2)
um.fit(df)
tmp = um.transform(df.values)
um_df = | pd.DataFrame(tmp, columns=['dim_x', 'dim_y']) | pandas.DataFrame |
# -*- coding:utf-8 -*-
import datetime
from random import random
import numpy as np
import pandas as pd
def get_random_univariate_forecast_dataset():
X = pd.DataFrame({'ds': | pd.date_range("20130101", periods=100) | pandas.date_range |
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib as mpl
import methfun as mf
import methdata as md
from scipy.interpolate import UnivariateSpline
# to register datetimes in matplotlib
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
mf.matplotlib_update_settings() # make prettier plots
# to avoid displaying plots: change backend
import matplotlib
# matplotlib.use('Agg')
matplotlib.use('Qt5Agg')
# os.environ['PATH'] = os.environ['PATH'] + ':/'
# length of time series
N = 2**14
tdf0 = pd.read_csv(os.path.join(md.outdir_data, 'results_tdf.csv'),index_col=0)
sdf0 = pd.read_csv(os.path.join(md.outdir_data, 'scalars_sdf.csv'),
header=[0,1], index_col = 0)
# fwat = sdf0['flux_ec', 'H2O'].values
# q005fwat = np.quantile(fwat, 0.05)
# q01fwat = np.quantile(fwat, 0.25)
# q05fwat = np.quantile(fwat, 0.5)
# q095fwat = np.quantile(fwat,0.95)
##################################### CONDITION FOR RETAINING RUNS:: ###########
cond1 = (tdf0['ustar'] > 0.2) \
& (tdf0['is_stationary'] == True) \
& (tdf0['exists'] == True) \
& (tdf0['windir'] > 230) \
& (tdf0['windir'] < 270) \
& (tdf0['length'] >= N)\
& (tdf0['h2o_is_local'] == True) \
& (sdf0['flux_ec', 'H2O'] > 0)
###################################### FOR PLOTTING ONLY:: #####################
cond_noz0 = (tdf0['ustar'] > 0.2) \
& (tdf0['is_stationary'] == True) \
& (tdf0['exists'] == True) \
& (tdf0['length'] >= N) \
& (tdf0['h2o_is_local'] == True) \
& (sdf0['flux_ec', 'H2O'] > 0)
############### OTHER CONDITIONS, JUST TO COUNT HOW MANY RUNS ARE EXCLUDED:: ###
cond_turb_intensity = (tdf0['ustar'] > 0.2)
cond_wind_sector = (tdf0['windir'] > 230) & (tdf0['windir'] < 270)
cond_stationary = (tdf0['is_stationary'] == True)
cond_water_vapor = (tdf0['h2o_is_local'] == True) & (sdf0['flux_ec', 'H2O'] > 0)
nruns_turb_int = np.size(cond_turb_intensity[cond_turb_intensity > 0])
nruns_wind_sector = np.size(cond_wind_sector[cond_wind_sector > 0])
nruns_stationary = np.size(cond_stationary[cond_stationary > 0])
nruns_water_vapor = np.size(cond_water_vapor[cond_water_vapor > 0])
# only to plot all wind rirections::
tdfnoz0 = tdf0[cond_noz0].copy()
sdfnoz0 = sdf0[cond_noz0].copy()
# for the rest of the analysis::
tdf = tdf0[cond1].copy()
sdf = sdf0[cond1].copy()
rdf = pd.read_csv( os.path.join(md.outdir_data, 'results_rdf.csv'), index_col=0)
# rdf = pd.read_csv( os.path.join(md.outdir_data, 'results_rdf_0.5.csv'), index_col=0)
# rdf = pd.read_csv( os.path.join(md.outdir_data, 'results_rdf_0.3.csv'), index_col=0)
nruns = np.shape(rdf)[0]
rdf['datetime'] = pd.to_datetime(rdf['csv_name'], format='%Y%m%d_%H%M')
sdf['datetime'] = pd.to_datetime(rdf['csv_name'], format='%Y%m%d_%H%M')
tdf['datetime'] = pd.to_datetime(tdf['csv_name'], format='%Y%m%d_%H%M')
tdf0['datetime'] = | pd.to_datetime(tdf0['csv_name'], format='%Y%m%d_%H%M') | pandas.to_datetime |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
import numpy as np
import pandas as pd
from pandas import (Index, Series, DataFrame, isnull)
from pandas.compat import lrange
from pandas import compat
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
from .common import TestData
class TestSeriesApply(TestData, tm.TestCase):
def test_apply(self):
with np.errstate(all='ignore'):
assert_series_equal(self.ts.apply(np.sqrt), np.sqrt(self.ts))
# elementwise-apply
import math
assert_series_equal(self.ts.apply(math.exp), np.exp(self.ts))
# how to handle Series result, #2316
result = self.ts.apply(lambda x: Series(
[x, x ** 2], index=['x', 'x^2']))
expected = DataFrame({'x': self.ts, 'x^2': self.ts ** 2})
tm.assert_frame_equal(result, expected)
# empty series
s = Series(dtype=object, name='foo', index=pd.Index([], name='bar'))
rs = s.apply(lambda x: x)
tm.assert_series_equal(s, rs)
# check all metadata (GH 9322)
self.assertIsNot(s, rs)
self.assertIs(s.index, rs.index)
self.assertEqual(s.dtype, rs.dtype)
self.assertEqual(s.name, rs.name)
# index but no data
s = Series(index=[1, 2, 3])
rs = s.apply(lambda x: x)
tm.assert_series_equal(s, rs)
def test_apply_same_length_inference_bug(self):
s = Series([1, 2])
f = lambda x: (x, x + 1)
result = s.apply(f)
expected = s.map(f)
assert_series_equal(result, expected)
s = Series([1, 2, 3])
result = s.apply(f)
expected = s.map(f)
assert_series_equal(result, expected)
def test_apply_dont_convert_dtype(self):
s = Series(np.random.randn(10))
f = lambda x: x if x > 0 else np.nan
result = s.apply(f, convert_dtype=False)
self.assertEqual(result.dtype, object)
def test_apply_args(self):
s = Series(['foo,bar'])
result = s.apply(str.split, args=(',', ))
self.assertEqual(result[0], ['foo', 'bar'])
tm.assertIsInstance(result[0], list)
def test_apply_box(self):
# ufunc will not be boxed. Same test cases as the test_map_box
vals = [pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02')]
s = pd.Series(vals)
self.assertEqual(s.dtype, 'datetime64[ns]')
# boxed value must be Timestamp instance
res = s.apply(lambda x: '{0}_{1}_{2}'.format(x.__class__.__name__,
x.day, x.tz))
exp = pd.Series(['Timestamp_1_None', 'Timestamp_2_None'])
tm.assert_series_equal(res, exp)
vals = [pd.Timestamp('2011-01-01', tz='US/Eastern'),
pd.Timestamp('2011-01-02', tz='US/Eastern')]
s = pd.Series(vals)
self.assertEqual(s.dtype, 'datetime64[ns, US/Eastern]')
res = s.apply(lambda x: '{0}_{1}_{2}'.format(x.__class__.__name__,
x.day, x.tz))
exp = pd.Series(['Timestamp_1_US/Eastern', 'Timestamp_2_US/Eastern'])
tm.assert_series_equal(res, exp)
# timedelta
vals = [pd.Timedelta('1 days'), pd.Timedelta('2 days')]
s = pd.Series(vals)
self.assertEqual(s.dtype, 'timedelta64[ns]')
res = s.apply(lambda x: '{0}_{1}'.format(x.__class__.__name__, x.days))
exp = pd.Series(['Timedelta_1', 'Timedelta_2'])
tm.assert_series_equal(res, exp)
# period (object dtype, not boxed)
vals = [pd.Period('2011-01-01', freq='M'),
pd.Period('2011-01-02', freq='M')]
s = pd.Series(vals)
self.assertEqual(s.dtype, 'object')
res = s.apply(lambda x: '{0}_{1}'.format(x.__class__.__name__,
x.freqstr))
exp = pd.Series(['Period_M', 'Period_M'])
tm.assert_series_equal(res, exp)
def test_apply_datetimetz(self):
values = pd.date_range('2011-01-01', '2011-01-02',
freq='H').tz_localize('Asia/Tokyo')
s = pd.Series(values, name='XX')
result = s.apply(lambda x: x + pd.offsets.Day())
exp_values = pd.date_range('2011-01-02', '2011-01-03',
freq='H').tz_localize('Asia/Tokyo')
exp = pd.Series(exp_values, name='XX')
tm.assert_series_equal(result, exp)
# change dtype
# GH 14506 : Returned dtype changed from int32 to int64
result = s.apply(lambda x: x.hour)
exp = pd.Series(list(range(24)) + [0], name='XX', dtype=np.int64)
tm.assert_series_equal(result, exp)
# not vectorized
def f(x):
if not isinstance(x, pd.Timestamp):
raise ValueError
return str(x.tz)
result = s.map(f)
exp = pd.Series(['Asia/Tokyo'] * 25, name='XX')
tm.assert_series_equal(result, exp)
class TestSeriesMap(TestData, tm.TestCase):
def test_map(self):
index, data = tm.getMixedTypeDict()
source = Series(data['B'], index=data['C'])
target = Series(data['C'][:4], index=data['D'][:4])
merged = target.map(source)
for k, v in compat.iteritems(merged):
self.assertEqual(v, source[target[k]])
# input could be a dict
merged = target.map(source.to_dict())
for k, v in compat.iteritems(merged):
self.assertEqual(v, source[target[k]])
# function
result = self.ts.map(lambda x: x * 2)
self.assert_series_equal(result, self.ts * 2)
# GH 10324
a = Series([1, 2, 3, 4])
b = Series(["even", "odd", "even", "odd"], dtype="category")
c = Series(["even", "odd", "even", "odd"])
exp = Series(["odd", "even", "odd", np.nan], dtype="category")
self.assert_series_equal(a.map(b), exp)
exp = Series(["odd", "even", "odd", np.nan])
self.assert_series_equal(a.map(c), exp)
a = Series(['a', 'b', 'c', 'd'])
b = Series([1, 2, 3, 4],
index=pd.CategoricalIndex(['b', 'c', 'd', 'e']))
c = Series([1, 2, 3, 4], index=Index(['b', 'c', 'd', 'e']))
exp = Series([np.nan, 1, 2, 3])
self.assert_series_equal(a.map(b), exp)
exp = Series([np.nan, 1, 2, 3])
self.assert_series_equal(a.map(c), exp)
a = Series(['a', 'b', 'c', 'd'])
b = Series(['B', 'C', 'D', 'E'], dtype='category',
index=pd.CategoricalIndex(['b', 'c', 'd', 'e']))
c = Series(['B', 'C', 'D', 'E'], index=Index(['b', 'c', 'd', 'e']))
exp = Series(pd.Categorical([np.nan, 'B', 'C', 'D'],
categories=['B', 'C', 'D', 'E']))
self.assert_series_equal(a.map(b), exp)
exp = Series([np.nan, 'B', 'C', 'D'])
self.assert_series_equal(a.map(c), exp)
def test_map_compat(self):
# related GH 8024
s = Series([True, True, False], index=[1, 2, 3])
result = s.map({True: 'foo', False: 'bar'})
expected = Series(['foo', 'foo', 'bar'], index=[1, 2, 3])
| assert_series_equal(result, expected) | pandas.util.testing.assert_series_equal |
"""
Routines for casting.
"""
from contextlib import suppress
from datetime import date, datetime, timedelta
from typing import (
TYPE_CHECKING,
Any,
Dict,
List,
Optional,
Sequence,
Set,
Sized,
Tuple,
Type,
Union,
)
import numpy as np
from pandas._libs import lib, tslib, tslibs
from pandas._libs.tslibs import (
NaT,
OutOfBoundsDatetime,
Period,
Timedelta,
Timestamp,
conversion,
iNaT,
ints_to_pydatetime,
ints_to_pytimedelta,
)
from pandas._libs.tslibs.timezones import tz_compare
from pandas._typing import AnyArrayLike, ArrayLike, Dtype, DtypeObj, Scalar, Shape
from pandas.util._validators import validate_bool_kwarg
from pandas.core.dtypes.common import (
DT64NS_DTYPE,
INT64_DTYPE,
POSSIBLY_CAST_DTYPES,
TD64NS_DTYPE,
ensure_int8,
ensure_int16,
ensure_int32,
ensure_int64,
ensure_object,
ensure_str,
is_bool,
is_bool_dtype,
is_categorical_dtype,
is_complex,
is_complex_dtype,
is_datetime64_dtype,
is_datetime64_ns_dtype,
is_datetime64tz_dtype,
is_datetime_or_timedelta_dtype,
is_dtype_equal,
is_extension_array_dtype,
is_float,
is_float_dtype,
is_integer,
is_integer_dtype,
is_numeric_dtype,
is_object_dtype,
is_scalar,
is_sparse,
is_string_dtype,
is_timedelta64_dtype,
is_timedelta64_ns_dtype,
is_unsigned_integer_dtype,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import (
DatetimeTZDtype,
ExtensionDtype,
IntervalDtype,
PeriodDtype,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCDatetimeArray,
ABCDatetimeIndex,
ABCExtensionArray,
ABCPeriodArray,
ABCPeriodIndex,
ABCSeries,
)
from pandas.core.dtypes.inference import is_list_like
from pandas.core.dtypes.missing import (
is_valid_nat_for_dtype,
isna,
na_value_for_dtype,
notna,
)
if TYPE_CHECKING:
from pandas import Series
from pandas.core.arrays import ExtensionArray
from pandas.core.indexes.base import Index
_int8_max = np.iinfo(np.int8).max
_int16_max = np.iinfo(np.int16).max
_int32_max = np.iinfo(np.int32).max
_int64_max = np.iinfo(np.int64).max
def maybe_convert_platform(values):
""" try to do platform conversion, allow ndarray or list here """
if isinstance(values, (list, tuple, range)):
values = construct_1d_object_array_from_listlike(values)
if getattr(values, "dtype", None) == np.object_:
if hasattr(values, "_values"):
values = values._values
values = lib.maybe_convert_objects(values)
return values
def is_nested_object(obj) -> bool:
"""
return a boolean if we have a nested object, e.g. a Series with 1 or
more Series elements
This may not be necessarily be performant.
"""
if isinstance(obj, ABCSeries) and is_object_dtype(obj.dtype):
if any(isinstance(v, ABCSeries) for v in obj._values):
return True
return False
def maybe_box_datetimelike(value: Scalar, dtype: Optional[Dtype] = None) -> Scalar:
"""
Cast scalar to Timestamp or Timedelta if scalar is datetime-like
and dtype is not object.
Parameters
----------
value : scalar
dtype : Dtype, optional
Returns
-------
scalar
"""
if dtype == object:
pass
elif isinstance(value, (np.datetime64, datetime)):
value = tslibs.Timestamp(value)
elif isinstance(value, (np.timedelta64, timedelta)):
value = tslibs.Timedelta(value)
return value
def maybe_downcast_to_dtype(result, dtype: Union[str, np.dtype]):
"""
try to cast to the specified dtype (e.g. convert back to bool/int
or could be an astype of float64->float32
"""
do_round = False
if is_scalar(result):
return result
elif isinstance(result, ABCDataFrame):
# occurs in pivot_table doctest
return result
if isinstance(dtype, str):
if dtype == "infer":
inferred_type = lib.infer_dtype(ensure_object(result), skipna=False)
if inferred_type == "boolean":
dtype = "bool"
elif inferred_type == "integer":
dtype = "int64"
elif inferred_type == "datetime64":
dtype = "datetime64[ns]"
elif inferred_type == "timedelta64":
dtype = "timedelta64[ns]"
# try to upcast here
elif inferred_type == "floating":
dtype = "int64"
if issubclass(result.dtype.type, np.number):
do_round = True
else:
dtype = "object"
dtype = np.dtype(dtype)
elif dtype.type is Period:
from pandas.core.arrays import PeriodArray
with suppress(TypeError):
# e.g. TypeError: int() argument must be a string, a
# bytes-like object or a number, not 'Period
return PeriodArray(result, freq=dtype.freq)
converted = maybe_downcast_numeric(result, dtype, do_round)
if converted is not result:
return converted
# a datetimelike
# GH12821, iNaT is cast to float
if dtype.kind in ["M", "m"] and result.dtype.kind in ["i", "f"]:
if hasattr(dtype, "tz"):
# not a numpy dtype
if dtype.tz:
# convert to datetime and change timezone
from pandas import to_datetime
result = to_datetime(result).tz_localize("utc")
result = result.tz_convert(dtype.tz)
else:
result = result.astype(dtype)
return result
def maybe_downcast_numeric(result, dtype: DtypeObj, do_round: bool = False):
"""
Subset of maybe_downcast_to_dtype restricted to numeric dtypes.
Parameters
----------
result : ndarray or ExtensionArray
dtype : np.dtype or ExtensionDtype
do_round : bool
Returns
-------
ndarray or ExtensionArray
"""
if not isinstance(dtype, np.dtype):
# e.g. SparseDtype has no itemsize attr
return result
if isinstance(result, list):
# reached via groupby.agg._ohlc; really this should be handled earlier
result = np.array(result)
def trans(x):
if do_round:
return x.round()
return x
if dtype.kind == result.dtype.kind:
# don't allow upcasts here (except if empty)
if result.dtype.itemsize <= dtype.itemsize and result.size:
return result
if is_bool_dtype(dtype) or is_integer_dtype(dtype):
if not result.size:
# if we don't have any elements, just astype it
return trans(result).astype(dtype)
# do a test on the first element, if it fails then we are done
r = result.ravel()
arr = np.array([r[0]])
if isna(arr).any():
# if we have any nulls, then we are done
return result
elif not isinstance(r[0], (np.integer, np.floating, int, float, bool)):
# a comparable, e.g. a Decimal may slip in here
return result
if (
issubclass(result.dtype.type, (np.object_, np.number))
and notna(result).all()
):
new_result = trans(result).astype(dtype)
if new_result.dtype.kind == "O" or result.dtype.kind == "O":
# np.allclose may raise TypeError on object-dtype
if (new_result == result).all():
return new_result
else:
if np.allclose(new_result, result, rtol=0):
return new_result
elif (
issubclass(dtype.type, np.floating)
and not is_bool_dtype(result.dtype)
and not is_string_dtype(result.dtype)
):
return result.astype(dtype)
return result
def maybe_cast_result(
result: ArrayLike, obj: "Series", numeric_only: bool = False, how: str = ""
) -> ArrayLike:
"""
Try casting result to a different type if appropriate
Parameters
----------
result : array-like
Result to cast.
obj : Series
Input Series from which result was calculated.
numeric_only : bool, default False
Whether to cast only numerics or datetimes as well.
how : str, default ""
How the result was computed.
Returns
-------
result : array-like
result maybe casted to the dtype.
"""
dtype = obj.dtype
dtype = maybe_cast_result_dtype(dtype, how)
assert not is_scalar(result)
if (
is_extension_array_dtype(dtype)
and not is_categorical_dtype(dtype)
and dtype.kind != "M"
):
# We have to special case categorical so as not to upcast
# things like counts back to categorical
cls = dtype.construct_array_type()
result = maybe_cast_to_extension_array(cls, result, dtype=dtype)
elif numeric_only and is_numeric_dtype(dtype) or not numeric_only:
result = maybe_downcast_to_dtype(result, dtype)
return result
def maybe_cast_result_dtype(dtype: DtypeObj, how: str) -> DtypeObj:
"""
Get the desired dtype of a result based on the
input dtype and how it was computed.
Parameters
----------
dtype : DtypeObj
Input dtype.
how : str
How the result was computed.
Returns
-------
DtypeObj
The desired dtype of the result.
"""
from pandas.core.arrays.boolean import BooleanDtype
from pandas.core.arrays.integer import Int64Dtype
if how in ["add", "cumsum", "sum"] and (dtype == np.dtype(bool)):
return np.dtype(np.int64)
elif how in ["add", "cumsum", "sum"] and isinstance(dtype, BooleanDtype):
return Int64Dtype()
return dtype
def maybe_cast_to_extension_array(
cls: Type["ExtensionArray"], obj: ArrayLike, dtype: Optional[ExtensionDtype] = None
) -> ArrayLike:
"""
Call to `_from_sequence` that returns the object unchanged on Exception.
Parameters
----------
cls : class, subclass of ExtensionArray
obj : arraylike
Values to pass to cls._from_sequence
dtype : ExtensionDtype, optional
Returns
-------
ExtensionArray or obj
"""
from pandas.core.arrays.string_ import StringArray
from pandas.core.arrays.string_arrow import ArrowStringArray
assert isinstance(cls, type), f"must pass a type: {cls}"
assertion_msg = f"must pass a subclass of ExtensionArray: {cls}"
assert issubclass(cls, ABCExtensionArray), assertion_msg
# Everything can be converted to StringArrays, but we may not want to convert
if (
issubclass(cls, (StringArray, ArrowStringArray))
and lib.infer_dtype(obj) != "string"
):
return obj
try:
result = cls._from_sequence(obj, dtype=dtype)
except Exception:
# We can't predict what downstream EA constructors may raise
result = obj
return result
def maybe_upcast_putmask(
result: np.ndarray, mask: np.ndarray, other: Scalar
) -> Tuple[np.ndarray, bool]:
"""
A safe version of putmask that potentially upcasts the result.
The result is replaced with the first N elements of other,
where N is the number of True values in mask.
If the length of other is shorter than N, other will be repeated.
Parameters
----------
result : ndarray
The destination array. This will be mutated in-place if no upcasting is
necessary.
mask : boolean ndarray
other : scalar
The source value.
Returns
-------
result : ndarray
changed : bool
Set to true if the result array was upcasted.
Examples
--------
>>> arr = np.arange(1, 6)
>>> mask = np.array([False, True, False, True, True])
>>> result, _ = maybe_upcast_putmask(arr, mask, False)
>>> result
array([1, 0, 3, 0, 0])
"""
if not isinstance(result, np.ndarray):
raise ValueError("The result input must be a ndarray.")
if not is_scalar(other):
# We _could_ support non-scalar other, but until we have a compelling
# use case, we assume away the possibility.
raise ValueError("other must be a scalar")
if mask.any():
# Two conversions for date-like dtypes that can't be done automatically
# in np.place:
# NaN -> NaT
# integer or integer array -> date-like array
if result.dtype.kind in ["m", "M"]:
if isna(other):
other = result.dtype.type("nat")
elif is_integer(other):
other = np.array(other, dtype=result.dtype)
def changeit():
# we are forced to change the dtype of the result as the input
# isn't compatible
r, _ = maybe_upcast(result, fill_value=other, copy=True)
np.place(r, mask, other)
return r, True
# we want to decide whether place will work
# if we have nans in the False portion of our mask then we need to
# upcast (possibly), otherwise we DON't want to upcast (e.g. if we
# have values, say integers, in the success portion then it's ok to not
# upcast)
new_dtype, _ = maybe_promote(result.dtype, other)
if new_dtype != result.dtype:
# we have a scalar or len 0 ndarray
# and its nan and we are changing some values
if isna(other):
return changeit()
try:
np.place(result, mask, other)
except TypeError:
# e.g. int-dtype result and float-dtype other
return changeit()
return result, False
def maybe_casted_values(
index: "Index", codes: Optional[np.ndarray] = None
) -> ArrayLike:
"""
Convert an index, given directly or as a pair (level, code), to a 1D array.
Parameters
----------
index : Index
codes : np.ndarray[intp] or None, default None
Returns
-------
ExtensionArray or ndarray
If codes is `None`, the values of `index`.
If codes is passed, an array obtained by taking from `index` the indices
contained in `codes`.
"""
values = index._values
if values.dtype == np.object_:
values = lib.maybe_convert_objects(values)
# if we have the codes, extract the values with a mask
if codes is not None:
mask: np.ndarray = codes == -1
if mask.size > 0 and mask.all():
# we can have situations where the whole mask is -1,
# meaning there is nothing found in codes, so make all nan's
dtype = index.dtype
fill_value = na_value_for_dtype(dtype)
values = construct_1d_arraylike_from_scalar(fill_value, len(mask), dtype)
else:
values = values.take(codes)
if mask.any():
if isinstance(values, np.ndarray):
values, _ = maybe_upcast_putmask(values, mask, np.nan)
else:
values[mask] = np.nan
return values
def maybe_promote(dtype, fill_value=np.nan):
"""
Find the minimal dtype that can hold both the given dtype and fill_value.
Parameters
----------
dtype : np.dtype or ExtensionDtype
fill_value : scalar, default np.nan
Returns
-------
dtype
Upcasted from dtype argument if necessary.
fill_value
Upcasted from fill_value argument if necessary.
"""
if not is_scalar(fill_value) and not is_object_dtype(dtype):
# with object dtype there is nothing to promote, and the user can
# pass pretty much any weird fill_value they like
raise ValueError("fill_value must be a scalar")
# if we passed an array here, determine the fill value by dtype
if isinstance(fill_value, np.ndarray):
if issubclass(fill_value.dtype.type, (np.datetime64, np.timedelta64)):
fill_value = fill_value.dtype.type("NaT", "ns")
else:
# we need to change to object type as our
# fill_value is of object type
if fill_value.dtype == np.object_:
dtype = np.dtype(np.object_)
fill_value = np.nan
if dtype == np.object_ or dtype.kind in ["U", "S"]:
# We treat string-like dtypes as object, and _always_ fill
# with np.nan
fill_value = np.nan
dtype = np.dtype(np.object_)
# returns tuple of (dtype, fill_value)
if issubclass(dtype.type, np.datetime64):
if isinstance(fill_value, datetime) and fill_value.tzinfo is not None:
# Trying to insert tzaware into tznaive, have to cast to object
dtype = np.dtype(np.object_)
elif is_integer(fill_value) or (is_float(fill_value) and not isna(fill_value)):
dtype = np.dtype(np.object_)
else:
try:
fill_value = Timestamp(fill_value).to_datetime64()
except (TypeError, ValueError):
dtype = np.dtype(np.object_)
elif issubclass(dtype.type, np.timedelta64):
if (
is_integer(fill_value)
or (is_float(fill_value) and not np.isnan(fill_value))
or isinstance(fill_value, str)
):
# TODO: What about str that can be a timedelta?
dtype = np.dtype(np.object_)
else:
try:
fv = Timedelta(fill_value)
except ValueError:
dtype = np.dtype(np.object_)
else:
if fv is NaT:
# NaT has no `to_timedelta64` method
fill_value = np.timedelta64("NaT", "ns")
else:
fill_value = fv.to_timedelta64()
elif is_datetime64tz_dtype(dtype):
if isna(fill_value):
fill_value = NaT
elif not isinstance(fill_value, datetime):
dtype = np.dtype(np.object_)
elif fill_value.tzinfo is None:
dtype = np.dtype(np.object_)
elif not tz_compare(fill_value.tzinfo, dtype.tz):
# TODO: sure we want to cast here?
dtype = np.dtype(np.object_)
elif is_extension_array_dtype(dtype) and isna(fill_value):
fill_value = dtype.na_value
elif is_float(fill_value):
if issubclass(dtype.type, np.bool_):
dtype = np.dtype(np.object_)
elif issubclass(dtype.type, np.integer):
dtype = np.dtype(np.float64)
elif dtype.kind == "f":
mst = np.min_scalar_type(fill_value)
if mst > dtype:
# e.g. mst is np.float64 and dtype is np.float32
dtype = mst
elif dtype.kind == "c":
mst = np.min_scalar_type(fill_value)
dtype = np.promote_types(dtype, mst)
elif is_bool(fill_value):
if not issubclass(dtype.type, np.bool_):
dtype = np.dtype(np.object_)
elif is_integer(fill_value):
if issubclass(dtype.type, np.bool_):
dtype = np.dtype(np.object_)
elif issubclass(dtype.type, np.integer):
if not np.can_cast(fill_value, dtype):
# upcast to prevent overflow
mst = np.min_scalar_type(fill_value)
dtype = np.promote_types(dtype, mst)
if dtype.kind == "f":
# Case where we disagree with numpy
dtype = np.dtype(np.object_)
elif is_complex(fill_value):
if issubclass(dtype.type, np.bool_):
dtype = np.dtype(np.object_)
elif issubclass(dtype.type, (np.integer, np.floating)):
mst = np.min_scalar_type(fill_value)
dtype = np.promote_types(dtype, mst)
elif dtype.kind == "c":
mst = np.min_scalar_type(fill_value)
if mst > dtype:
# e.g. mst is np.complex128 and dtype is np.complex64
dtype = mst
elif fill_value is None:
if is_float_dtype(dtype) or is_complex_dtype(dtype):
fill_value = np.nan
elif is_integer_dtype(dtype):
dtype = np.float64
fill_value = np.nan
elif is_datetime_or_timedelta_dtype(dtype):
fill_value = dtype.type("NaT", "ns")
else:
dtype = np.dtype(np.object_)
fill_value = np.nan
else:
dtype = np.dtype(np.object_)
# in case we have a string that looked like a number
if is_extension_array_dtype(dtype):
pass
elif issubclass(np.dtype(dtype).type, (bytes, str)):
dtype = np.dtype(np.object_)
fill_value = _ensure_dtype_type(fill_value, dtype)
return dtype, fill_value
def _ensure_dtype_type(value, dtype: DtypeObj):
"""
Ensure that the given value is an instance of the given dtype.
e.g. if out dtype is np.complex64_, we should have an instance of that
as opposed to a python complex object.
Parameters
----------
value : object
dtype : np.dtype or ExtensionDtype
Returns
-------
object
"""
# Start with exceptions in which we do _not_ cast to numpy types
if is_extension_array_dtype(dtype):
return value
elif dtype == np.object_:
return value
elif isna(value):
# e.g. keep np.nan rather than try to cast to np.float32(np.nan)
return value
return dtype.type(value)
def infer_dtype_from(val, pandas_dtype: bool = False) -> Tuple[DtypeObj, Any]:
"""
Interpret the dtype from a scalar or array.
Parameters
----------
val : object
pandas_dtype : bool, default False
whether to infer dtype including pandas extension types.
If False, scalar/array belongs to pandas extension types is inferred as
object
"""
if is_scalar(val):
return infer_dtype_from_scalar(val, pandas_dtype=pandas_dtype)
return infer_dtype_from_array(val, pandas_dtype=pandas_dtype)
def infer_dtype_from_scalar(val, pandas_dtype: bool = False) -> Tuple[DtypeObj, Any]:
"""
Interpret the dtype from a scalar.
Parameters
----------
pandas_dtype : bool, default False
whether to infer dtype including pandas extension types.
If False, scalar belongs to pandas extension types is inferred as
object
"""
dtype: DtypeObj = np.dtype(object)
# a 1-element ndarray
if isinstance(val, np.ndarray):
msg = "invalid ndarray passed to infer_dtype_from_scalar"
if val.ndim != 0:
raise ValueError(msg)
dtype = val.dtype
val = val.item()
elif isinstance(val, str):
# If we create an empty array using a string to infer
# the dtype, NumPy will only allocate one character per entry
# so this is kind of bad. Alternately we could use np.repeat
# instead of np.empty (but then you still don't want things
# coming out as np.str_!
dtype = np.dtype(object)
elif isinstance(val, (np.datetime64, datetime)):
val = Timestamp(val)
if val is NaT or val.tz is None:
dtype = np.dtype("M8[ns]")
else:
if pandas_dtype:
dtype = DatetimeTZDtype(unit="ns", tz=val.tz)
else:
# return datetimetz as object
return np.dtype(object), val
val = val.value
elif isinstance(val, (np.timedelta64, timedelta)):
val = Timedelta(val).value
dtype = np.dtype("m8[ns]")
elif is_bool(val):
dtype = np.dtype(np.bool_)
elif is_integer(val):
if isinstance(val, np.integer):
dtype = np.dtype(type(val))
else:
dtype = np.dtype(np.int64)
try:
np.array(val, dtype=dtype)
except OverflowError:
dtype = np.array(val).dtype
elif is_float(val):
if isinstance(val, np.floating):
dtype = np.dtype(type(val))
else:
dtype = np.dtype(np.float64)
elif is_complex(val):
dtype = np.dtype(np.complex_)
elif pandas_dtype:
if lib.is_period(val):
dtype = PeriodDtype(freq=val.freq)
elif lib.is_interval(val):
subtype = infer_dtype_from_scalar(val.left, pandas_dtype=True)[0]
dtype = IntervalDtype(subtype=subtype)
return dtype, val
def dict_compat(d: Dict[Scalar, Scalar]) -> Dict[Scalar, Scalar]:
"""
Convert datetimelike-keyed dicts to a Timestamp-keyed dict.
Parameters
----------
d: dict-like object
Returns
-------
dict
"""
return {maybe_box_datetimelike(key): value for key, value in d.items()}
def infer_dtype_from_array(
arr, pandas_dtype: bool = False
) -> Tuple[DtypeObj, ArrayLike]:
"""
Infer the dtype from an array.
Parameters
----------
arr : array
pandas_dtype : bool, default False
whether to infer dtype including pandas extension types.
If False, array belongs to pandas extension types
is inferred as object
Returns
-------
tuple (numpy-compat/pandas-compat dtype, array)
Notes
-----
if pandas_dtype=False. these infer to numpy dtypes
exactly with the exception that mixed / object dtypes
are not coerced by stringifying or conversion
if pandas_dtype=True. datetime64tz-aware/categorical
types will retain there character.
Examples
--------
>>> np.asarray([1, '1'])
array(['1', '1'], dtype='<U21')
>>> infer_dtype_from_array([1, '1'])
(dtype('O'), [1, '1'])
"""
if isinstance(arr, np.ndarray):
return arr.dtype, arr
if not is_list_like(arr):
arr = [arr]
if pandas_dtype and is_extension_array_dtype(arr):
return arr.dtype, arr
elif isinstance(arr, ABCSeries):
return arr.dtype, np.asarray(arr)
# don't force numpy coerce with nan's
inferred = lib.infer_dtype(arr, skipna=False)
if inferred in ["string", "bytes", "mixed", "mixed-integer"]:
return (np.dtype(np.object_), arr)
arr = np.asarray(arr)
return arr.dtype, arr
def maybe_infer_dtype_type(element):
"""
Try to infer an object's dtype, for use in arithmetic ops.
Uses `element.dtype` if that's available.
Objects implementing the iterator protocol are cast to a NumPy array,
and from there the array's type is used.
Parameters
----------
element : object
Possibly has a `.dtype` attribute, and possibly the iterator
protocol.
Returns
-------
tipo : type
Examples
--------
>>> from collections import namedtuple
>>> Foo = namedtuple("Foo", "dtype")
>>> maybe_infer_dtype_type(Foo(np.dtype("i8")))
dtype('int64')
"""
tipo = None
if hasattr(element, "dtype"):
tipo = element.dtype
elif is_list_like(element):
element = np.asarray(element)
tipo = element.dtype
return tipo
def maybe_upcast(
values: ArrayLike,
fill_value: Scalar = np.nan,
dtype: Dtype = None,
copy: bool = False,
) -> Tuple[ArrayLike, Scalar]:
"""
Provide explicit type promotion and coercion.
Parameters
----------
values : ndarray or ExtensionArray
The array that we want to maybe upcast.
fill_value : what we want to fill with
dtype : if None, then use the dtype of the values, else coerce to this type
copy : bool, default True
If True always make a copy even if no upcast is required.
Returns
-------
values: ndarray or ExtensionArray
the original array, possibly upcast
fill_value:
the fill value, possibly upcast
"""
if not is_scalar(fill_value) and not is_object_dtype(values.dtype):
# We allow arbitrary fill values for object dtype
raise ValueError("fill_value must be a scalar")
if is_extension_array_dtype(values):
if copy:
values = values.copy()
else:
if dtype is None:
dtype = values.dtype
new_dtype, fill_value = maybe_promote(dtype, fill_value)
if new_dtype != values.dtype:
values = values.astype(new_dtype)
elif copy:
values = values.copy()
return values, fill_value
def invalidate_string_dtypes(dtype_set: Set[DtypeObj]):
"""
Change string like dtypes to object for
``DataFrame.select_dtypes()``.
"""
non_string_dtypes = dtype_set - {np.dtype("S").type, np.dtype("<U").type}
if non_string_dtypes != dtype_set:
raise TypeError("string dtypes are not allowed, use 'object' instead")
def coerce_indexer_dtype(indexer, categories):
""" coerce the indexer input array to the smallest dtype possible """
length = len(categories)
if length < _int8_max:
return ensure_int8(indexer)
elif length < _int16_max:
return ensure_int16(indexer)
elif length < _int32_max:
return ensure_int32(indexer)
return ensure_int64(indexer)
def astype_nansafe(
arr, dtype: DtypeObj, copy: bool = True, skipna: bool = False
) -> ArrayLike:
"""
Cast the elements of an array to a given dtype a nan-safe manner.
Parameters
----------
arr : ndarray
dtype : np.dtype
copy : bool, default True
If False, a view will be attempted but may fail, if
e.g. the item sizes don't align.
skipna: bool, default False
Whether or not we should skip NaN when casting as a string-type.
Raises
------
ValueError
The dtype was a datetime64/timedelta64 dtype, but it had no unit.
"""
# dispatch on extension dtype if needed
if is_extension_array_dtype(dtype):
return dtype.construct_array_type()._from_sequence(arr, dtype=dtype, copy=copy)
if not isinstance(dtype, np.dtype):
dtype = pandas_dtype(dtype)
if issubclass(dtype.type, str):
return lib.ensure_string_array(
arr.ravel(), skipna=skipna, convert_na_value=False
).reshape(arr.shape)
elif is_datetime64_dtype(arr):
if is_object_dtype(dtype):
return ints_to_pydatetime(arr.view(np.int64))
elif dtype == np.int64:
if isna(arr).any():
raise ValueError("Cannot convert NaT values to integer")
return arr.view(dtype)
# allow frequency conversions
if dtype.kind == "M":
return arr.astype(dtype)
raise TypeError(f"cannot astype a datetimelike from [{arr.dtype}] to [{dtype}]")
elif is_timedelta64_dtype(arr):
if is_object_dtype(dtype):
return ints_to_pytimedelta(arr.view(np.int64))
elif dtype == np.int64:
if isna(arr).any():
raise ValueError("Cannot convert NaT values to integer")
return arr.view(dtype)
if dtype not in [INT64_DTYPE, TD64NS_DTYPE]:
# allow frequency conversions
# we return a float here!
if dtype.kind == "m":
mask = isna(arr)
result = arr.astype(dtype).astype(np.float64)
result[mask] = np.nan
return result
elif dtype == TD64NS_DTYPE:
return arr.astype(TD64NS_DTYPE, copy=copy)
raise TypeError(f"cannot astype a timedelta from [{arr.dtype}] to [{dtype}]")
elif np.issubdtype(arr.dtype, np.floating) and np.issubdtype(dtype, np.integer):
if not np.isfinite(arr).all():
raise ValueError("Cannot convert non-finite values (NA or inf) to integer")
elif is_object_dtype(arr):
# work around NumPy brokenness, #1987
if np.issubdtype(dtype.type, np.integer):
return lib.astype_intsafe(arr.ravel(), dtype).reshape(arr.shape)
# if we have a datetime/timedelta array of objects
# then coerce to a proper dtype and recall astype_nansafe
elif is_datetime64_dtype(dtype):
from pandas import to_datetime
return astype_nansafe(to_datetime(arr).values, dtype, copy=copy)
elif is_timedelta64_dtype(dtype):
from pandas import to_timedelta
return astype_nansafe(to_timedelta(arr)._values, dtype, copy=copy)
if dtype.name in ("datetime64", "timedelta64"):
msg = (
f"The '{dtype.name}' dtype has no unit. Please pass in "
f"'{dtype.name}[ns]' instead."
)
raise ValueError(msg)
if copy or is_object_dtype(arr) or is_object_dtype(dtype):
# Explicit copy, or required since NumPy can't view from / to object.
return arr.astype(dtype, copy=True)
return arr.view(dtype)
def soft_convert_objects(
values: np.ndarray,
datetime: bool = True,
numeric: bool = True,
timedelta: bool = True,
copy: bool = True,
):
"""
Try to coerce datetime, timedelta, and numeric object-dtype columns
to inferred dtype.
Parameters
----------
values : np.ndarray[object]
datetime : bool, default True
numeric: bool, default True
timedelta : bool, default True
copy : bool, default True
Returns
-------
np.ndarray
"""
validate_bool_kwarg(datetime, "datetime")
validate_bool_kwarg(numeric, "numeric")
validate_bool_kwarg(timedelta, "timedelta")
validate_bool_kwarg(copy, "copy")
conversion_count = sum((datetime, numeric, timedelta))
if conversion_count == 0:
raise ValueError("At least one of datetime, numeric or timedelta must be True.")
# Soft conversions
if datetime:
# GH 20380, when datetime is beyond year 2262, hence outside
# bound of nanosecond-resolution 64-bit integers.
try:
values = lib.maybe_convert_objects(values, convert_datetime=True)
except OutOfBoundsDatetime:
pass
if timedelta and is_object_dtype(values.dtype):
# Object check to ensure only run if previous did not convert
values = lib.maybe_convert_objects(values, convert_timedelta=True)
if numeric and is_object_dtype(values.dtype):
try:
converted = lib.maybe_convert_numeric(values, set(), coerce_numeric=True)
except (ValueError, TypeError):
pass
else:
# If all NaNs, then do not-alter
values = converted if not isna(converted).all() else values
values = values.copy() if copy else values
return values
def convert_dtypes(
input_array: AnyArrayLike,
convert_string: bool = True,
convert_integer: bool = True,
convert_boolean: bool = True,
convert_floating: bool = True,
) -> Dtype:
"""
Convert objects to best possible type, and optionally,
to types supporting ``pd.NA``.
Parameters
----------
input_array : ExtensionArray, Index, Series or np.ndarray
convert_string : bool, default True
Whether object dtypes should be converted to ``StringDtype()``.
convert_integer : bool, default True
Whether, if possible, conversion can be done to integer extension types.
convert_boolean : bool, defaults True
Whether object dtypes should be converted to ``BooleanDtypes()``.
convert_floating : bool, defaults True
Whether, if possible, conversion can be done to floating extension types.
If `convert_integer` is also True, preference will be give to integer
dtypes if the floats can be faithfully casted to integers.
Returns
-------
dtype
new dtype
"""
is_extension = is_extension_array_dtype(input_array.dtype)
if (
convert_string or convert_integer or convert_boolean or convert_floating
) and not is_extension:
try:
inferred_dtype = lib.infer_dtype(input_array)
except ValueError:
# Required to catch due to Period. Can remove once GH 23553 is fixed
inferred_dtype = input_array.dtype
if not convert_string and is_string_dtype(inferred_dtype):
inferred_dtype = input_array.dtype
if convert_integer:
target_int_dtype = "Int64"
if is_integer_dtype(input_array.dtype):
from pandas.core.arrays.integer import INT_STR_TO_DTYPE
inferred_dtype = INT_STR_TO_DTYPE.get(
input_array.dtype.name, target_int_dtype
)
if not is_integer_dtype(input_array.dtype) and is_numeric_dtype(
input_array.dtype
):
inferred_dtype = target_int_dtype
else:
if is_integer_dtype(inferred_dtype):
inferred_dtype = input_array.dtype
if convert_floating:
if not is_integer_dtype(input_array.dtype) and is_numeric_dtype(
input_array.dtype
):
from pandas.core.arrays.floating import FLOAT_STR_TO_DTYPE
inferred_float_dtype = FLOAT_STR_TO_DTYPE.get(
input_array.dtype.name, "Float64"
)
# if we could also convert to integer, check if all floats
# are actually integers
if convert_integer:
arr = input_array[notna(input_array)]
if (arr.astype(int) == arr).all():
inferred_dtype = "Int64"
else:
inferred_dtype = inferred_float_dtype
else:
inferred_dtype = inferred_float_dtype
else:
if is_float_dtype(inferred_dtype):
inferred_dtype = input_array.dtype
if convert_boolean:
if is_bool_dtype(input_array.dtype):
inferred_dtype = "boolean"
else:
if isinstance(inferred_dtype, str) and inferred_dtype == "boolean":
inferred_dtype = input_array.dtype
else:
inferred_dtype = input_array.dtype
return inferred_dtype
def maybe_castable(arr: np.ndarray) -> bool:
# return False to force a non-fastpath
assert isinstance(arr, np.ndarray) # GH 37024
# check datetime64[ns]/timedelta64[ns] are valid
# otherwise try to coerce
kind = arr.dtype.kind
if kind == "M":
return is_datetime64_ns_dtype(arr.dtype)
elif kind == "m":
return is_timedelta64_ns_dtype(arr.dtype)
return arr.dtype.name not in POSSIBLY_CAST_DTYPES
def maybe_infer_to_datetimelike(
value: Union[ArrayLike, Scalar], convert_dates: bool = False
):
"""
we might have a array (or single object) that is datetime like,
and no dtype is passed don't change the value unless we find a
datetime/timedelta set
this is pretty strict in that a datetime/timedelta is REQUIRED
in addition to possible nulls/string likes
Parameters
----------
value : np.array / Series / Index / list-like
convert_dates : bool, default False
if True try really hard to convert dates (such as datetime.date), other
leave inferred dtype 'date' alone
"""
# TODO: why not timedelta?
if isinstance(
value, (ABCDatetimeIndex, ABCPeriodIndex, ABCDatetimeArray, ABCPeriodArray)
):
return value
v = value
if not is_list_like(v):
v = [v]
v = np.array(v, copy=False)
# we only care about object dtypes
if not is_object_dtype(v):
return value
shape = v.shape
if v.ndim != 1:
v = v.ravel()
if not len(v):
return value
def try_datetime(v):
# safe coerce to datetime64
try:
# GH19671
v = tslib.array_to_datetime(v, require_iso8601=True, errors="raise")[0]
except ValueError:
# we might have a sequence of the same-datetimes with tz's
# if so coerce to a DatetimeIndex; if they are not the same,
# then these stay as object dtype, xref GH19671
from pandas import DatetimeIndex
try:
values, tz = conversion.datetime_to_datetime64(v)
return DatetimeIndex(values).tz_localize("UTC").tz_convert(tz=tz)
except (ValueError, TypeError):
pass
except Exception:
pass
return v.reshape(shape)
def try_timedelta(v):
# safe coerce to timedelta64
# will try first with a string & object conversion
from pandas import to_timedelta
try:
td_values = to_timedelta(v)
except ValueError:
return v.reshape(shape)
else:
return np.asarray(td_values).reshape(shape)
inferred_type = lib.infer_datetimelike_array(ensure_object(v))
if inferred_type == "date" and convert_dates:
value = try_datetime(v)
elif inferred_type == "datetime":
value = try_datetime(v)
elif inferred_type == "timedelta":
value = try_timedelta(v)
elif inferred_type == "nat":
# if all NaT, return as datetime
if isna(v).all():
value = try_datetime(v)
else:
# We have at least a NaT and a string
# try timedelta first to avoid spurious datetime conversions
# e.g. '00:00:01' is a timedelta but technically is also a datetime
value = try_timedelta(v)
if lib.infer_dtype(value, skipna=False) in ["mixed"]:
# cannot skip missing values, as NaT implies that the string
# is actually a datetime
value = try_datetime(v)
return value
def maybe_cast_to_datetime(value, dtype: Optional[DtypeObj]):
"""
try to cast the array/value to a datetimelike dtype, converting float
nan to iNaT
"""
from pandas.core.tools.datetimes import to_datetime
from pandas.core.tools.timedeltas import to_timedelta
if dtype is not None:
is_datetime64 = | is_datetime64_dtype(dtype) | pandas.core.dtypes.common.is_datetime64_dtype |
# version: 0.5
# this for pre-process from raw data and store into csv file format
import os, sys
import pandas as pd
import numpy as np
import ta
def preProcessPATH(pathBase, fileType, countryCode=''):
fileList = []
for dirPath, dirNames, fileNames in os.walk(pathBase):
for i, f in enumerate(fileNames):
if fileType in f:
newFilePath = os.path.join(dirPath, f)
fileList.append(newFilePath)
for newF in fileList:
if '.asp' in newF:
preProcessASP(newF, countryCode)
elif '.html' in newF:
preProcessHTML(newF, countryCode)
elif '.csv' in newF:
preProcessCSV(newF)
return
def preProcessFromStock(stockDataFrame, stockName):
# RSI, KD
RSI = ta.momentum.RSIIndicator(close=stockDataFrame["Close"])
KD = ta.momentum.StochasticOscillator(high=stockDataFrame["High"],
low=stockDataFrame["Low"],
close=stockDataFrame["Close"])
K = RSI.rsi() * KD.stoch() / 100
D = RSI.rsi() * KD.stoch_signal() / 100
# KC
KC = ta.volatility.KeltnerChannel(high=stockDataFrame["High"],
low=stockDataFrame["Low"],
close=stockDataFrame["Close"])
KC_high = KC.keltner_channel_hband()
KC_middle = KC.keltner_channel_mband()
KC_low = KC.keltner_channel_lband()
# SMA
windowList = [5, 10, 20, 60, 120, 240]
resultList = []
for i in windowList:
SMA = ta.trend.SMAIndicator(close=stockDataFrame["Close"], window=i)
resultList.append(SMA.sma_indicator())
def shiftList(seq, n):
return seq[n:]
# Ichimoku Cloud
ICH = ta.trend.IchimokuIndicator(high=stockDataFrame["High"],
low=stockDataFrame["Low"],
visual=True)
ICH_a = ICH.ichimoku_a()
ICH_b = ICH.ichimoku_b()
ICH_base_line = ICH.ichimoku_base_line()
ICH_conversion_line = ICH.ichimoku_conversion_line()
# plot convert
ICH_plot_1 = ((stockDataFrame["Close"] - ICH_conversion_line) +
(stockDataFrame["Close"] - ICH_base_line))
ICH_plot_2 = (ICH_a - ICH_b)
# plot convert
displacement = 26
ich_tmp_1 = shiftList(ICH_a, displacement - 1)
ich_tmp_2 = shiftList(ICH_b, displacement - 1)
ich_tmp_1 = stockDataFrame["Close"] - ich_tmp_1
ich_tmp_2 = stockDataFrame["Close"] - ich_tmp_2
ICH_plot_3 = []
for idx in range(len(ich_tmp_1)):
newA = ich_tmp_1[idx]
newB = ich_tmp_2[idx]
# magnitude
new_ = min(abs(newA), abs(newB))
# sign
tmpSign = 0
if newA > 0 and newB > 0:
tmpSign = 1
elif newA < 0 and newB < 0:
tmpSign = -1
ICH_plot_3.append(new_ * tmpSign)
# Score - Self Index
# > 0: buy; < 0: sell; number range 50 ~ -50
Score = []
# Trend Analysis
# > 0: 漲勢; = 0: 平盤; < 0: 跌勢
# TrendAnalysis, TrendAnalysisRaw = [], []
# thresholdRatio = 1.5
# tmp1MidNumber, timeLength = 5, 10 # which means 5 % and 10 days
# Score Point
# ScorePoint = []
# midPoint1, midPoint2, midPoint3 = 20, 30, 50
counter = 0
for i in range(len(stockDataFrame['Date'])):
if i == 0:
Score.append(0)
# ScorePoint.append(0)
# TrendAnalysis.append(0)
# TrendAnalysisRaw.append(0)
continue
todayScore = 0
if D[i] < 10:
todayScore = todayScore + 10
if D[i] < K[i]:
todayScore = todayScore + 5
elif D[i] > 70:
todayScore = todayScore - 10
if D[i] > K[i]:
todayScore = todayScore - 5
tmp1, tmp1_, tmp2, tmp3, tmp4 = 0, 0, 0, 0, 0
try: # plot_3 Cloud Distance
tmp1 = 100 * (float(ICH_plot_3[i]) /
float(stockDataFrame["Close"][i]))
except:
counter = counter + 1
try: # plot_1 Rise Potential
tmp2 = (100 / 2) * (float(ICH_plot_1[i]) /
float(stockDataFrame["Close"][i]))
except:
counter = counter + 1
try: # plot_2 Cloud Protection
tmp3 = 1 - abs(
(float(ICH_plot_2[i]) / float(stockDataFrame["Close"][i])))
except:
counter = counter + 1
try: # plot_2 Cloud Protection
tmp4 = 100 * ((float(ICH_plot_2[i]) - float(ICH_plot_2[i - 1])) /
float(stockDataFrame["Close"][i]))
except:
counter = counter + 1
# TrendAnalysis
# TrendAnalysisRaw.append(tmp1)
# tmp1_ = tmp1
# if tmp1_ < tmp1MidNumber and tmp1_ > -tmp1MidNumber:
# tmp1_ = 0
# elif np.isnan(tmp1_):
# tmp1_ = 0
# else:
# timeLength_ = int(timeLength * float((100 + 2 * abs(tmp1_)) / 100))
# length = len(TrendAnalysis)
# length_ = min(timeLength_, length)
# minTrendAnalysis, maxTrendAnalysis = 100, -100
# for j in range(length_):
# tmp_ = TrendAnalysis[length - 1 - j]
# if tmp_ < minTrendAnalysis:
# minTrendAnalysis = tmp_
# if tmp_ > maxTrendAnalysis:
# maxTrendAnalysis = tmp_
# if tmp1_ < 0 and tmp1_ > minTrendAnalysis:
# tmp1_ = -1
# elif tmp1_ > 0 and tmp1_ < maxTrendAnalysis:
# tmp1_ = 1
# elif tmp1_ > tmp1MidNumber * thresholdRatio and TrendAnalysis[
# -1] == 0:
# tmp1_ = 1
# elif tmp1_ < -tmp1MidNumber * thresholdRatio and TrendAnalysis[
# -1] == 0:
# tmp1_ = -1
# TrendAnalysis.append(tmp1_)
todayScore = todayScore - tmp1 - tmp2 * tmp3 + tmp4
if ICH_plot_1[i] > ICH_plot_2[i]:
if ICH_plot_2[i - 1] > ICH_plot_1[i - 1]:
todayScore = todayScore + 10
elif ICH_plot_1[i] < ICH_plot_2[i]:
if ICH_plot_2[i - 1] < ICH_plot_1[i - 1]:
todayScore = todayScore - 10
Score.append(todayScore)
# newTmp_ = 0
# if len(Score) >= 3:
# if Score[-1] > Score[-2]:
# if Score[-1] > -midPoint2 and (Score[-2] < -midPoint3
# and Score[-3] < -midPoint3):
# newTmp_ = -30
# elif Score[-1] > -midPoint2 and (Score[-2] < -midPoint2
# and Score[-3] < -midPoint2):
# newTmp_ = -20
# elif Score[-1] > -midPoint1 and (Score[-2] + Score[-3] <
# -2 * midPoint1):
# newTmp_ = -10
# elif Score[-1] < Score[-2]:
# if Score[-1] < midPoint2 and (Score[-2] > midPoint3
# and Score[-3] > midPoint3):
# newTmp_ = 30
# elif Score[-1] < midPoint2 and (Score[-2] > midPoint2
# and Score[-3] > midPoint2):
# newTmp_ = 20
# elif Score[-1] < midPoint1 and (Score[-2] + Score[-3] >
# 2 * midPoint1):
# newTmp_ = 10
# ScorePoint.append(newTmp_)
Score_ = | pd.Series(Score) | pandas.Series |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 7 15:38:47 2022
@author: jimmy
"""
# Pandas Data Series
import pandas as pd
"""1.Write a Pandas program to create and display a one-dimensional
array-like object containing an array of data using Pandas module
"""
data = [2,4,8,16]
data_frame = pd.DataFrame(data)
data_series = pd.Series(data)
print(f"{data_series}, table description")
"""2.Write a Pandas program to convert a Panda module Series to Python
list and it's type
"""
update_data = data_series.tolist()
print(f"convert pd.series to list: {update_data}", type(update_data))
"""3. Write a Pandas program to add, subtract, multiple and divide
two Pandas Series. """
dataset_1 = pd.Series([2,4,6,8,10],index =['a','b','c','d','e'])
dataset_2 = pd.Series([1,3,5,7,9],index =['a','b','c','d','e'])
sub = dataset_1.subtract(dataset_2)
print(f"Subtract: \n{sub}")
dev = dataset_1.divide(dataset_2).round(4)
print(f"Divide: \n{dev}")
mul = dataset_1.multiply(dataset_2)
print(f"Multiply: \n{mul}")
add = dataset_1.add(dataset_2)
print(f"Add: \n{add}")
"""
4.Write a Pandas program to compare the elements of the two Pandas Series
"""
dataset_3 = | pd.Series([2, 4, 6, 8, 10, 0]) | pandas.Series |
import os
import itertools
import collections
import pprint
import numpy as np
import pandas as pd
from scipy import stats as sps
from scipy.interpolate import interp1d
from datetime import datetime
import seaborn as sns
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import ticker
import matplotlib.dates as mdates
from matplotlib.dates import date2num, num2date
from matplotlib.backends.backend_pgf import FigureCanvasPgf
from matplotlib.colors import ListedColormap
from scipy.interpolate import griddata
import matplotlib.colors as colors
from lib.rt import compute_daily_rts, R_T_RANGE
import lib.rt_nbinom
from lib.summary import *
TO_HOURS = 24.0
DPI = 200
NO_PLOT = False
TEST_LAG = 48.0 # hours
LINE_WIDTH = 7.0
COL_WIDTH = 3.333
FIG_SIZE_TRIPLE = (COL_WIDTH / 3, COL_WIDTH / 3 * 4/6)
FIG_SIZE_TRIPLE_TALL = (COL_WIDTH / 3, COL_WIDTH / 3 * 5/6)
FIG_SIZE_DOUBLE = (COL_WIDTH / 2, COL_WIDTH / 2 * 4/6)
FIG_SIZE_DOUBLE_TALL = (COL_WIDTH / 2, COL_WIDTH / 2 * 5/6)
CUSTOM_FIG_SIZE_FULL_PAGE_TRIPLE = (LINE_WIDTH / 3, COL_WIDTH / 2 * 5/6)
FIG_SIZE_FULL_PAGE_TRIPLE = (LINE_WIDTH / 3, LINE_WIDTH / 3 * 4/6)
FIG_SIZE_FULL_PAGE_TRIPLE_TALL = (LINE_WIDTH / 3, LINE_WIDTH / 3 * 5/6)
FIG_SIZE_FULL_PAGE_DOUBLE_ARXIV = (LINE_WIDTH / 2, LINE_WIDTH / 3 * 4/6) # 2
FIG_SIZE_FULL_PAGE_DOUBLE_ARXIV_TALL = (LINE_WIDTH / 2, LINE_WIDTH / 3 * 4.5/6) # 2 tall
FIG_SIZE_FULL_PAGE_TRIPLE_ARXIV = (LINE_WIDTH / 3.3, LINE_WIDTH / 3 * 3.5/6) # 4x3 full page
FIG_SIZE_FULL_PAGE_TRIPLE_ARXIV_SMALL = (LINE_WIDTH / 3.7, LINE_WIDTH / 3 * 2.5/6) # 6x4 full page
CUSTOM_FIG_SIZE_FULL_PAGE_QUAD = (LINE_WIDTH / 4, COL_WIDTH / 2 * 5/6)
SIGCONF_RCPARAMS_DOUBLE = {
# Fig params
"figure.autolayout": True, # Makes sure nothing the feature is neat & tight.
"figure.figsize": FIG_SIZE_DOUBLE, # Column width: 3.333 in, space between cols: 0.333 in.
"figure.dpi": 150, # Displays figures nicely in notebooks.
# Axes params
"axes.linewidth": 0.5, # Matplotlib's current default is 0.8.
"hatch.linewidth": 0.3,
"xtick.major.width": 0.5,
"xtick.minor.width": 0.5,
'xtick.major.pad': 1.0,
'xtick.major.size': 1.75,
'xtick.minor.pad': 1.0,
'xtick.minor.size': 1.0,
"ytick.major.width": 0.5,
"ytick.minor.width": 0.5,
'ytick.major.pad': 1.0,
'ytick.major.size': 1.75,
'ytick.minor.pad': 1.0,
'ytick.minor.size': 1.0,
"axes.labelpad": 0.5,
# Plot params
"lines.linewidth": 0.8, # Width of lines
"lines.markeredgewidth": 0.3,
# Legend params
"legend.fontsize": 8.5, # Make the legend/label fonts a little smaller
"legend.frameon": True, # Remove the black frame around the legend
"legend.handletextpad": 0.3,
"legend.borderaxespad": 0.2,
"legend.labelspacing": 0.1,
"patch.linewidth": 0.5,
# Font params
"text.usetex": True, # use LaTeX to write all text
"font.family": "serif", # use serif rather than sans-serif
"font.serif": "Linux Libertine O", # use "Linux Libertine" as the standard font
"font.size": 9,
"axes.titlesize": 8, # LaTeX default is 10pt font.
"axes.labelsize": 8, # LaTeX default is 10pt font.
"xtick.labelsize": 6,
"ytick.labelsize": 6,
# PDF settings
"pgf.texsystem": "xelatex", # Use Xelatex which is TTF font aware
"pgf.rcfonts": False, # Use pgf.preamble, ignore standard Matplotlib RC
"pgf.preamble": [
r'\usepackage{fontspec}',
r'\usepackage{unicode-math}',
r'\usepackage{libertine}',
r'\setmainfont{Linux Libertine O}',
r'\setmathfont{Linux Libertine O}',
]
}
SIGCONF_RCPARAMS_TRIPLE = {
# Fig params
"figure.autolayout": True, # Makes sure nothing the feature is neat & tight.
"figure.figsize": FIG_SIZE_TRIPLE, # Column width: 3.333 in, space between cols: 0.333 in.
"figure.dpi": 150, # Displays figures nicely in notebooks.
# Axes params
"axes.linewidth": 0.4, # Matplotlib's current default is 0.8.
"hatch.linewidth": 0.3,
"xtick.major.width": 0.4,
"xtick.minor.width": 0.4,
'xtick.major.pad': 1.0,
'xtick.major.size': 1.75,
'xtick.minor.pad': 1.0,
'xtick.minor.size': 1.0,
"ytick.major.width": 0.4,
"ytick.minor.width": 0.4,
'ytick.major.pad': 1.0,
'ytick.major.size': 1.75,
'ytick.minor.pad': 1.0,
'ytick.minor.size': 1.0,
"axes.labelpad": 0.5,
# Plot params
"lines.linewidth": 0.8, # Width of lines
"lines.markeredgewidth": 0.3,
# Legend
"legend.fontsize": 5.5, # Make the legend/label fonts a little smaller
"legend.frameon": True, # Remove the black frame around the legend
"legend.handletextpad": 0.5,
"legend.borderaxespad": 0.0,
"legend.labelspacing": 0.05,
"patch.linewidth": 0.3,
# Font params
"text.usetex": True, # use LaTeX to write all text
"font.family": "serif", # use serif rather than sans-serif
"font.serif": "Linux Libertine O", # use "Linux Libertine" as the standard font
"font.size": 6,
"axes.titlesize": 5, # LaTeX default is 10pt font.
"axes.labelsize": 5, # LaTeX default is 10pt font.
"xtick.labelsize": 5,
"ytick.labelsize": 5,
# PDF settings
"pgf.texsystem": "xelatex", # Use Xelatex which is TTF font aware
"pgf.rcfonts": False, # Use pgf.preamble, ignore standard Matplotlib RC
"pgf.preamble": [
r'\usepackage{fontspec}',
r'\usepackage{unicode-math}',
r'\usepackage{libertine}',
r'\setmainfont{Linux Libertine O}',
r'\setmathfont{Linux Libertine O}',
]
}
NEURIPS_LINE_WIDTH = 5.5 # Text width: 5.5in (double figure minus spacing 0.2in).
FIG_SIZE_NEURIPS_DOUBLE = (NEURIPS_LINE_WIDTH / 2, NEURIPS_LINE_WIDTH / 2 * 4/6)
FIG_SIZE_NEURIPS_TRIPLE = (NEURIPS_LINE_WIDTH / 3, NEURIPS_LINE_WIDTH / 3 * 4/6)
FIG_SIZE_NEURIPS_DOUBLE_TALL = (NEURIPS_LINE_WIDTH / 2, NEURIPS_LINE_WIDTH / 2 * 5/6)
FIG_SIZE_NEURIPS_TRIPLE_TALL = (NEURIPS_LINE_WIDTH / 3, NEURIPS_LINE_WIDTH / 3 * 5/6)
NEURIPS_RCPARAMS = {
"figure.autolayout": False, # Makes sure nothing the feature is neat & tight.
"figure.figsize": FIG_SIZE_NEURIPS_DOUBLE,
"figure.dpi": 150, # Displays figures nicely in notebooks.
# Axes params
"axes.linewidth": 0.5, # Matplotlib's current default is 0.8.
"xtick.major.width": 0.5,
"xtick.minor.width": 0.5,
"ytick.major.width": 0.5,
"ytick.minor.width": 0.5,
"hatch.linewidth": 0.3,
"xtick.major.width": 0.5,
"xtick.minor.width": 0.5,
'xtick.major.pad': 1.0,
'xtick.major.size': 1.75,
'xtick.minor.pad': 1.0,
'xtick.minor.size': 1.0,
'ytick.major.pad': 1.0,
'ytick.major.size': 1.75,
'ytick.minor.pad': 1.0,
'ytick.minor.size': 1.0,
"axes.labelpad": 0.5,
# Grid
"grid.linewidth": 0.3,
# Plot params
"lines.linewidth": 1.0,
"lines.markersize": 4,
'errorbar.capsize': 3.0,
# Font
"text.usetex": True, # use LaTeX to write all text
"font.family": "serif", # use serif rather than sans-serif
"font.serif": "Times New Roman", # use "Times New Roman" as the standard font
"font.size": 8.5,
"axes.titlesize": 8.5, # LaTeX default is 10pt font.
"axes.labelsize": 8.5, # LaTeX default is 10pt font.
"xtick.labelsize": 8,
"ytick.labelsize": 8,
# Legend
"legend.fontsize": 7, # Make the legend/label fonts a little smaller
"legend.frameon": True, # Remove the black frame around the legend
"legend.handletextpad": 0.3,
"legend.borderaxespad": 0.2,
"legend.labelspacing": 0.1,
"patch.linewidth": 0.5,
# PDF
"pgf.texsystem": "xelatex", # use Xelatex which is TTF font aware
"pgf.rcfonts": False, # Use pgf.preamble, ignore standard Matplotlib RC
"pgf.preamble": [
r'\usepackage{fontspec}',
r'\usepackage{unicode-math}',
r'\setmainfont{Times New Roman}',
],
}
def trans_data_to_axis(ax):
"""Compute the transform from data to axis coordinate system in axis `ax`"""
axis_to_data = ax.transAxes + ax.transData.inverted()
data_to_axis = axis_to_data.inverted()
return data_to_axis
def days_to_datetime(arr, start_date):
# timestamps
ts = arr * 24 * 60 * 60 + pd.Timestamp(start_date).timestamp()
return pd.to_datetime(ts, unit='s')
def lockdown_widget(ax, lockdown_at, start_date, lockdown_label_y, lockdown_label='Lockdown',
xshift=0.0, zorder=None, ls='--', color='black', text_off=False):
"""
Draw the lockdown widget corresponding to a vertical line at the desired location along with a
label. The data can be passed either in `float` or in `datetime` format.
Parameters
----------
ax
Axis to draw on
lockdown_at
Location of vertical lockdown line
start_date
Value of the origin of the x-axis
lockdown_label_y
Location of the text label on the y-axis
lockdown_label : str (optional, default: 'Lockdown')
Text label
xshift : float (optional, default: 0.0)
Shift in a-axis of the text label
zorder : int (optional, default: None)
z-order of the widget
ls : str (optional, default: '--')
Linestyle of the vertical line
color : str (optional, default: 'black')
color of the vertical line
text_off : bool (optional, default: False)
Indicate if the text label should be turned off
"""
if isinstance(start_date, float): # If plot with float x-axis
lckdn_x = start_date + lockdown_at
ax.axvline(lckdn_x, linestyle=ls, color=color, label='_nolegend_',
zorder=zorder)
else:
# If plot with datetime x-axis
lckdn_dt = days_to_datetime(lockdown_at, start_date=start_date) # str to datetime
lckdn_x_d = lckdn_dt.toordinal() # datetime to float in data coordinates
ax.axvline(lckdn_x_d, linestyle=ls, color=color, label='_nolegend_',
zorder=zorder)
# Display the text label
if not text_off:
if xshift == 0.0:
# Automatic shift of the text in the plot (normalized) axis coordinates
lckdn_x_a, _ = trans_data_to_axis(ax).transform([lckdn_x_d, 0.0]) # data coordinates to axis coordinates
ax.text(x=lckdn_x_a, y=lockdown_label_y, s=lockdown_label,
transform=ax.transAxes, rotation=90,
verticalalignment='bottom',
horizontalalignment='right')
else:
# NOTE: for backward-compatibility, manual shift of the text, should be removed
ax.text(x=lckdn_dt + pd.Timedelta(xshift, unit='d'),
y=lockdown_label_y, s=lockdown_label, rotation=90)
def target_widget(show_target,start_date, ax, zorder=None, ms=4.0, label='COVID-19 case data'):
txx = np.linspace(0, show_target.shape[0] - 1, num=show_target.shape[0])
txx = days_to_datetime(txx, start_date=start_date)
ax.plot(txx, show_target, ls='', marker='x', ms=ms,
color='black', label=label, zorder=zorder)
class CustomSitesProportionFixedLocator(plt.Locator):
"""
Custom locator to avoid tick font bug of matplotlib
"""
def __init__(self):
pass
def __call__(self):
return np.log(np.array([2, 5, 10, 25, 100]))
class Plotter(object):
"""
Plotting class
"""
def __init__(self):
# plot constants
# check out https://colorhunt.co/
self.color_expo = '#ffcc00'
self.color_iasy = '#00a8cc'
self.color_ipre = '#005082'
self.color_isym = '#000839'
self.color_testing = '#ffa41b'
self.color_posi = '#4daf4a'
self.color_nega = '#e41a1c'
self.color_all = '#ffa41b'
self.color_positive = '#00a8cc'
self.color_age = '#005082'
self.color_tracing = '#000839'
self.color_infected = '#000839'
self.filling_alpha = 0.2
self.color_different_scenarios = [
'#e41a1c',
'#377eb8',
'#4daf4a',
'#984ea3',
'#ff7f00',
'#ffff33',
'#a65628',
'#f781bf',
'#999999'
]
self.color_different_scenarios_alt = [
'#a1dab4',
'#41b6c4',
'#2c7fb8',
'#253494',
]
# 2D visualization
self.density_alpha = 0.7
self.marker_home = "^"
self.marker_site = "o"
self.color_home = '#000839'
self.color_site = '#000000'
self.size_home = 80
self.size_site = 300
def _set_matplotlib_params(self, format='dobule'):
matplotlib.backend_bases.register_backend('pdf', FigureCanvasPgf)
if format == 'double':
plt.rcParams.update(SIGCONF_RCPARAMS_DOUBLE)
elif format == 'triple':
plt.rcParams.update(SIGCONF_RCPARAMS_TRIPLE)
if format == 'neurips-double':
plt.rcParams.update(NEURIPS_RCPARAMS)
else:
raise ValueError('Invalid figure format.')
def _set_default_axis_settings(self, ax):
# Hide the right and top spines
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
# Only show ticks on the left and bottom spines
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
def plot_cumulative_infected(self, sim, title='Example', filename='daily_inf_0',
figsize=(6, 5), errorevery=20, acc=1000, ymax=None,
lockdown_label='Lockdown', lockdown_at=None,
lockdown_label_y=None, show_target=None,
start_date='1970-01-01',
subplot_adjust=None, legend_loc='upper right'):
''''
Plots daily infected split by group
averaged over random restarts, using error bars for std-dev
'''
if acc > sim.max_time:
acc = int(sim.max_time)
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
ts, iasy_mu, iasy_sig = self.__comp_state_cumulative(sim, 'iasy', acc)
# _, ipre_mu, ipre_sig = self.__comp_state_cumulative(sim, 'ipre', acc)
_, isym_mu, isym_sig = self.__comp_state_cumulative(sim, 'isym', acc)
# _, expo_mu, iexpo_sig = self.__comp_state_cumulative(sim, 'expo', acc)
# _, posi_mu, posi_sig = self.__comp_state_cumulative(sim, 'posi', acc)
line_xaxis = np.zeros(ts.shape)
line_iasy = iasy_mu
line_isym = iasy_mu + isym_mu
error_isym = np.sqrt(iasy_sig**2 + isym_sig**2)
# Convert x-axis into posix timestamps and use pandas to plot as dates
ts = days_to_datetime(ts, start_date=start_date)
# lines
ax.plot(ts, line_iasy, c='black', linestyle='-')
ax.errorbar(ts, line_isym, yerr=error_isym, c='black', linestyle='-',
elinewidth=0.8, errorevery=errorevery, capsize=3.0)
# filling
ax.fill_between(ts, line_xaxis, line_iasy, alpha=self.filling_alpha, label='Asymptomatic',
edgecolor=self.color_iasy, facecolor=self.color_iasy, linewidth=0, zorder=0)
ax.fill_between(ts, line_iasy, line_isym, alpha=self.filling_alpha, label='Symptomatic',
edgecolor=self.color_isym, facecolor=self.color_isym, linewidth=0, zorder=0)
# limits
if ymax is None:
ymax = 1.5 * np.max(iasy_mu + isym_mu)
ax.set_ylim((0, ymax))
# ax.set_xlabel('Days')
ax.set_ylabel('People')
# extra
if lockdown_at is not None:
lockdown_widget(ax, lockdown_at, start_date,
lockdown_label_y,
lockdown_label)
if show_target is not None:
target_widget(show_target, start_date, ax)
# Hide the right and top spines
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
# Only show ticks on the left and bottom spines
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
#set ticks every week
ax.xaxis.set_major_locator(mdates.WeekdayLocator())
#set major ticks format
ax.xaxis.set_major_formatter(mdates.DateFormatter('%b %d'))
fig.autofmt_xdate(bottom=0.2, rotation=0, ha='center')
# legend
ax.legend(loc=legend_loc, borderaxespad=0.5)
subplot_adjust = subplot_adjust or {'bottom':0.14, 'top': 0.98, 'left': 0.12, 'right': 0.96}
plt.subplots_adjust(**subplot_adjust)
plt.draw()
plt.savefig('plots/' + filename + '.png', format='png', facecolor=None,
dpi=DPI, bbox_inches='tight')
if NO_PLOT:
plt.close()
return
def plot_daily_infected(self, sim, title='Example', filename='daily_inf_0',
figsize=(6, 5), errorevery=20, acc=1000, ymax=None,
lockdown_label='Lockdown', lockdown_at=None,
lockdown_label_y=None, show_target=None,
lockdown_end=None,
start_date='1970-01-01',
subplot_adjust=None, legend_loc='upper right'):
''''
Plots daily infected split by group
averaged over random restarts, using error bars for std-dev
'''
if acc > sim.max_time:
acc = int(sim.max_time)
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
ts, iasy_mu, iasy_sig = comp_state_over_time(sim, 'iasy', acc)
_, ipre_mu, ipre_sig = comp_state_over_time(sim, 'ipre', acc)
_, isym_mu, isym_sig = comp_state_over_time(sim, 'isym', acc)
# _, expo_mu, iexpo_sig = comp_state_over_time(sim, 'expo', acc)
# _, posi_mu, posi_sig = comp_state_over_time(sim, 'posi', acc)
line_xaxis = np.zeros(ts.shape)
line_iasy = iasy_mu
line_ipre = iasy_mu + ipre_mu
line_isym = iasy_mu + ipre_mu + isym_mu
error_isym = np.sqrt(iasy_sig**2 + ipre_sig**2 + isym_sig**2)
# Convert x-axis into posix timestamps and use pandas to plot as dates
ts = days_to_datetime(ts, start_date=start_date)
# lines
ax.plot(ts, line_iasy,
c='black', linestyle='-')
ax.plot(ts, line_ipre,
c='black', linestyle='-')
ax.errorbar(ts, line_isym, yerr=error_isym, c='black', linestyle='-',
elinewidth=0.8, errorevery=errorevery, capsize=3.0)
# filling
ax.fill_between(ts, line_xaxis, line_iasy, alpha=0.5, label='Asymptomatic',
edgecolor=self.color_iasy, facecolor=self.color_iasy, linewidth=0, zorder=0)
ax.fill_between(ts, line_iasy, line_ipre, alpha=0.5, label='Pre-symptomatic',
edgecolor=self.color_ipre, facecolor=self.color_ipre, linewidth=0, zorder=0)
ax.fill_between(ts, line_ipre, line_isym, alpha=0.5, label='Symptomatic',
edgecolor=self.color_isym, facecolor=self.color_isym, linewidth=0, zorder=0)
# limits
if ymax is None:
ymax = 1.5 * np.max(iasy_mu + ipre_mu + isym_mu)
ax.set_ylim((0, ymax))
# ax.set_xlabel('Days')
ax.set_ylabel('People')
# extra
if lockdown_at is not None:
lockdown_widget(ax, lockdown_at, start_date,
lockdown_label_y,
lockdown_label)
if lockdown_end is not None:
lockdown_widget(ax=ax, lockdown_at=lockdown_end, start_date=start_date,
lockdown_label_y=lockdown_label_y,
lockdown_label='End of lockdown', ls='dotted')
if show_target is not None:
target_widget(show_target, start_date, ax)
# Hide the right and top spines
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
# Only show ticks on the left and bottom spines
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
#set ticks every week
ax.xaxis.set_major_locator(mdates.WeekdayLocator())
#set major ticks format
ax.xaxis.set_major_formatter(mdates.DateFormatter('%b %d'))
fig.autofmt_xdate(bottom=0.2, rotation=0, ha='center')
# legend
ax.legend(loc=legend_loc, borderaxespad=0.5)
subplot_adjust = subplot_adjust or {'bottom':0.14, 'top': 0.98, 'left': 0.12, 'right': 0.96}
plt.subplots_adjust(**subplot_adjust)
plt.draw()
plt.savefig('plots/' + filename + '.png', format='png', facecolor=None,
dpi=DPI, bbox_inches='tight')
if NO_PLOT:
plt.close()
return
def plot_daily_tested(self, sim, title='Example', filename='daily_tested_0', figsize=(10, 10), errorevery=20,
acc=1000, ymax=None):
''''
Plots daily tested, positive daily tested, negative daily tested
averaged over random restarts, using error bars for std-dev
'''
if acc > sim.max_time:
acc = int(sim.max_time)
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
# automatically shifted by `test_lag` in the function
ts, posi_mu, posi_sig = comp_state_over_time(sim, 'posi', acc)
_, nega_mu, nega_sig = comp_state_over_time(sim, 'nega', acc)
line_xaxis = np.zeros(ts.shape)
line_posi = posi_mu
line_nega = posi_mu + nega_mu
error_posi = posi_sig
error_nega = nega_sig + posi_sig
T = posi_mu.shape[0]
# lines
ax.errorbar(ts, line_posi, yerr=posi_sig, elinewidth=0.8, errorevery=errorevery,
c='black', linestyle='dotted')
ax.errorbar(ts, line_nega, yerr=nega_sig, elinewidth=0.8, errorevery=errorevery,
c='black', linestyle='-')
# filling
ax.fill_between(ts, line_xaxis, line_posi, alpha=0.5, label=r'Positive tests',
edgecolor=self.color_posi, facecolor=self.color_posi, linewidth=0, zorder=0)
ax.fill_between(ts, line_posi, line_nega, alpha=0.5, label=r'Negative tests',
edgecolor=self.color_nega, facecolor=self.color_nega, linewidth=0, zorder=0)
# axis
ax.set_xlim((0, np.max(ts)))
if ymax is None:
ymax = 1.5 * np.max(posi_mu + nega_mu)
ax.set_ylim((0, ymax))
ax.set_xlabel(r'$t$ [days]')
ax.set_ylabel(r'Tests')
# Hide the right and top spines
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
# Only show ticks on the left and bottom spines
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
# legend
fig.legend(loc='center right', borderaxespad=0.1)
# Adjust the scaling factor to fit your legend text completely outside the plot
plt.subplots_adjust(right=0.70)
ax.set_title(title, pad=20)
plt.draw()
plt.savefig('plots/' + filename + '.png', format='png', facecolor=None,
dpi=DPI, bbox_inches='tight')
if NO_PLOT:
plt.close()
return
def plot_daily_at_home(self, sim, title='Example', filename='daily_at_home_0', figsize=(10, 10), errorevery=20, acc=1000, ymax=None):
''''
Plots daily tested, positive daily tested, negative daily tested
averaged over random restarts, using error bars for std-dev
'''
if acc > sim.max_time:
acc = int(sim.max_time)
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
ts, all_mu, all_sig = comp_contained_over_time(sim, 'SocialDistancingForAllMeasure', acc)
_, positive_mu, positive_sig = comp_contained_over_time(sim, 'SocialDistancingForPositiveMeasure', acc)
_, age_mu, age_sig = comp_contained_over_time(sim, 'SocialDistancingByAgeMeasure', acc)
_, tracing_mu, tracing_sig = comp_contained_over_time(sim, 'SocialDistancingForSmartTracing', acc)
_, iasy_mu, iasy_sig = comp_state_over_time(sim, 'iasy', acc)
_, ipre_mu, ipre_sig = comp_state_over_time(sim, 'ipre', acc)
_, isym_mu, isym_sig = comp_state_over_time(sim, 'isym', acc)
line_xaxis = np.zeros(ts.shape)
line_all = all_mu
line_positive = positive_mu
line_age = age_mu
line_tracing = tracing_mu
line_infected = iasy_mu + ipre_mu + isym_mu
error_all = all_sig
error_positive = positive_sig
error_age = age_sig
error_tracing = tracing_sig
error_infected = np.sqrt(np.square(iasy_sig) + np.square(ipre_sig) + np.square(isym_sig))
# lines
ax.errorbar(ts, line_infected, label=r'Total infected', errorevery=errorevery, c=self.color_infected, linestyle='--', yerr=error_infected)
ax.errorbar(ts, line_all, yerr=error_all, elinewidth=0.8, errorevery=errorevery,
c='black', linestyle='-')
ax.errorbar(ts, line_positive, yerr=error_positive, elinewidth=0.8, errorevery=errorevery,
c='black', linestyle='-')
ax.errorbar(ts, line_age, yerr=error_age, elinewidth=0.8, errorevery=errorevery,
c='black', linestyle='-')
ax.errorbar(ts, line_tracing, yerr=error_tracing, elinewidth=0.8, errorevery=errorevery,
c='black', linestyle='-')
# filling
ax.fill_between(ts, line_xaxis, line_all, alpha=self.filling_alpha, label=r'SD for all',
edgecolor=self.color_all, facecolor=self.color_all, linewidth=0, zorder=0)
ax.fill_between(ts, line_xaxis, line_positive, alpha=self.filling_alpha, label=r'SD for positively tested',
edgecolor=self.color_positive, facecolor=self.color_positive, linewidth=0, zorder=0)
ax.fill_between(ts, line_xaxis, line_age, alpha=self.filling_alpha, label=r'SD for age group',
edgecolor=self.color_age, facecolor=self.color_age, linewidth=0, zorder=0)
ax.fill_between(ts, line_xaxis, line_tracing, alpha=self.filling_alpha, label=r'SD for traced contacts',
edgecolor=self.color_tracing, facecolor=self.color_tracing, linewidth=0, zorder=0)
# axis
ax.set_xlim((0, np.max(ts)))
if ymax is None:
ymax = 1.5 * np.max([all_mu, positive_mu, age_mu, tracing_mu])
ax.set_ylim((0, ymax))
ax.set_xlabel(r'$t$ [days]')
ax.set_ylabel(r'[people]')
# Hide the right and top spines
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
# Only show ticks on the left and bottom spines
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
# legend
fig.legend(loc='center right', borderaxespad=0.1)
# Adjust the scaling factor to fit your legend text completely outside the plot
plt.subplots_adjust(right=0.70)
ax.set_title(title, pad=20)
plt.draw()
plt.savefig('plots/' + filename + '.png', format='png', facecolor=None,
dpi=DPI, bbox_inches='tight')
if NO_PLOT:
plt.close()
return
def compare_total_infections(self, sims, titles, figtitle='Title', figformat='double',
filename='compare_inf_0', figsize=None, errorevery=20, acc=500, ymax=None, x_axis_dates=True,
lockdown_label='Lockdown', lockdown_at=None, lockdown_label_y=None, lockdown_xshift=0.0,
conditional_measures=None,
show_positives=False, show_legend=True, legend_is_left=False,
subplot_adjust=None, start_date='1970-01-01', xtick_interval=2, first_one_dashed=False,
show_single_runs=False, which_single_runs=None):
''''
Plots total infections for each simulation, named as provided by `titles`
to compare different measures/interventions taken. Colors taken as defined in __init__, and
averaged over random restarts, using error bars for std-dev
'''
assert isinstance(sims[0], str), '`sims` must be list of filepaths'
# Set double figure format
self._set_matplotlib_params(format=figformat)
# Draw figure
fig, ax = plt.subplots(1, 1, figsize=figsize)
for i, sim in enumerate(sims):
is_conditional = True if i == conditional_measures else False
try:
data = load_condensed_summary(sim, acc)
except FileNotFoundError:
acc = create_condensed_summary_from_path(sim, acc=acc)
data = load_condensed_summary(sim, acc)
ts = data['ts']
lockdown_at = data['lockdowns'] if is_conditional else lockdown_at
if x_axis_dates:
# Convert x-axis into posix timestamps and use pandas to plot as dates
ts = days_to_datetime(ts, start_date=start_date)
if not show_single_runs:
iasy_mu = data['iasy_mu']
iasy_sig = data['iasy_sig']
ipre_mu = data['ipre_mu']
ipre_sig = data['ipre_sig']
isym_mu = data['isym_mu']
isym_sig = data['isym_sig']
line_infected = iasy_mu + ipre_mu + isym_mu
error_infected = np.sqrt(np.square(iasy_sig) + np.square(ipre_sig) + np.square(isym_sig))
# lines
ax.plot(ts, line_infected, linestyle='-', label=titles[i], c=self.color_different_scenarios[i])
ax.fill_between(ts, np.maximum(line_infected - 2 * error_infected, 0), line_infected + 2 * error_infected,
color=self.color_different_scenarios[i], alpha=self.filling_alpha, linewidth=0.0)
else:
iasy = data['iasy']
ipre = data['ipre']
isym = data['isym']
lines_infected = iasy + ipre + isym
# lines
runs = [which_single_runs] if which_single_runs else range(min(show_single_runs, sim.random_repeats))
for k, r in enumerate(runs):
ax.plot(ts, lines_infected[:, r], linestyle='-', label=titles[i] if k == 0 else None,
c=self.color_different_scenarios[i])
# For conditional measures only
if lockdown_at:
for lockdown in lockdown_at[r]:
start_lockdown = lockdown[0] / TO_HOURS
end_lockdown = lockdown[1] / TO_HOURS
lockdown_widget(ax, start_lockdown, 0.0,
lockdown_label_y,
None)
lockdown_widget(ax, end_lockdown, 0.0,
lockdown_label_y,
None, ls='-')
# axis
ax.set_xlim(left=np.min(ts))
if ymax is None:
ymax = 1.5 * np.max(iasy_mu + ipre_mu + isym_mu)
ax.set_ylim((0, ymax))
# ax.set_xlabel('Days')
if x_axis_dates:
# set xticks every week
ax.xaxis.set_minor_locator(mdates.WeekdayLocator(byweekday=1, interval=1))
ax.xaxis.set_major_locator(mdates.WeekdayLocator(byweekday=1, interval=xtick_interval))
ax.xaxis.set_major_formatter(mdates.DateFormatter('%b %d'))
fig.autofmt_xdate(bottom=0.2, rotation=0, ha='center')
else:
ax.set_xlabel(r'$t$ [days]')
ax.set_ylabel('Infected')
if not isinstance(lockdown_at, list):
if lockdown_at is not None:
lockdown_widget(ax, lockdown_at, start_date,
lockdown_label_y,
lockdown_label,
xshift=lockdown_xshift)
# Set default axes style
self._set_default_axis_settings(ax=ax)
if show_legend:
# legend
if legend_is_left:
leg = ax.legend(loc='upper left',
bbox_to_anchor=(0.001, 0.999),
bbox_transform=ax.transAxes,
# prop={'size': 5.6}
)
else:
leg = ax.legend(loc='upper right',
bbox_to_anchor=(0.999, 0.999),
bbox_transform=ax.transAxes,
# prop={'size': 5.6}
)
subplot_adjust = subplot_adjust or {'bottom':0.14, 'top': 0.98, 'left': 0.12, 'right': 0.96}
plt.subplots_adjust(**subplot_adjust)
plt.savefig('plots/' + filename + '.pdf', format='pdf', facecolor=None,
dpi=DPI, bbox_inches='tight')
if NO_PLOT:
plt.close()
return
def compare_quantity(self, sims, titles, quantity='infected', mode='total', ymax=None,
normalization_baseline_path=None,
start_date='1970-01-01', xtick_interval=3, x_axis_dates=False,
figformat='double', filename='compare_epidemics', figsize=None,
lockdown_label='Lockdown', lockdown_at=None, lockdown_label_y=None, lockdown_xshift=0.0,
show_legend=True, legend_is_left=False, subplot_adjust=None):
''''
Plots `quantity` in `mode` for each simulation, named as provided by `titles`
to compare different measures/interventions taken. Colors taken as defined in __init__, and
averaged over random restarts, using error bars for std-dev
'''
assert isinstance(sims[0], str), '`sims` must be list of filepaths'
assert mode in ['total', 'daily', 'cumulative']
assert quantity in ['infected', 'hosp', 'dead']
labeldict = {'total': {'infected': 'Infected',
'hosp': 'Hospitalized',
'dead': 'Fatalities'},
'cumulative': {'infected': 'Cumulative Infections',
'hosp': 'Cumulative Hospitalizations',
'dead': 'Cumulative Fatalities'},
'daily': {'infected': 'Daily Infections',
'hosp': 'Daily Hospitalizations',
'dead': 'Daily Fatalities'},
}
# Set double figure format
self._set_matplotlib_params(format=figformat)
# Draw figure
fig, ax = plt.subplots(1, 1, figsize=figsize)
# Load baseline data
# if normalization_baseline_path:
# baseline_data = load_condensed_summary_compat(normalization_baseline_path)
# baseline_cases, _ = get_plot_data(baseline_data, quantity=quantity, mode=mode)
for i, sim in enumerate(sims):
data = load_condensed_summary_compat(sim)
ts = data['ts'] if not x_axis_dates else days_to_datetime(data['ts'], start_date=start_date)
line_cases, error_cases = get_plot_data(data, quantity=quantity, mode=mode)
ylabel = labeldict[mode][quantity]
# if normalization_baseline_path:
# line_cases = 1 - line_cases / baseline_cases
# error_cases = error_cases / baseline_cases
# line_cases = np.nan_to_num(line_cases, nan=0.0)
# error_cases = np.nan_to_num(error_cases, nan=0.0)
# ylabel = f'Reduction in ' + ylabel
# lines
ax.plot(ts, line_cases, linestyle='-', label=titles[i], c=self.color_different_scenarios[i])
ax.fill_between(ts, np.maximum(line_cases - 2 * error_cases, 0), line_cases + 2 * error_cases,
color=self.color_different_scenarios[i], alpha=self.filling_alpha, linewidth=0.0)
# axis
ax.set_xlim(left=np.min(ts))
if ymax is None:
ymax = 1.5 * np.max(line_cases)
ax.set_ylim((0, ymax))
if x_axis_dates:
# set xticks every week
ax.xaxis.set_minor_locator(mdates.WeekdayLocator(byweekday=1, interval=1))
ax.xaxis.set_major_locator(mdates.WeekdayLocator(byweekday=1, interval=xtick_interval))
ax.xaxis.set_major_formatter(mdates.DateFormatter('%b %d'))
fig.autofmt_xdate(bottom=0.2, rotation=0, ha='center')
else:
ax.set_xlabel(r'$t$ [days]')
ax.set_ylabel(ylabel)
if lockdown_at is not None:
lockdown_widget(ax, lockdown_at, start_date,
lockdown_label_y,
lockdown_label,
xshift=lockdown_xshift)
# Set default axes style
self._set_default_axis_settings(ax=ax)
if show_legend:
# legend
if legend_is_left:
leg = ax.legend(loc='upper left',
bbox_to_anchor=(0.001, 0.999),
bbox_transform=ax.transAxes,
# prop={'size': 5.6}
)
else:
leg = ax.legend(loc='upper right',
bbox_to_anchor=(0.999, 0.999),
bbox_transform=ax.transAxes,
# prop={'size': 5.6}
)
subplot_adjust = subplot_adjust or {'bottom':0.14, 'top': 0.98, 'left': 0.12, 'right': 0.96}
plt.subplots_adjust(**subplot_adjust)
plt.savefig('plots/' + filename + '.pdf', format='pdf', facecolor=None,
dpi=DPI, bbox_inches='tight')
if NO_PLOT:
plt.close()
return
def compare_total_fatalities_and_hospitalizations(self, sims, titles, mode='show_both',
figtitle=r'Hospitalizations and Fatalities',
lockdown_label='Lockdown', lockdown_at=None, lockdown_label_y=None,
figformat='neurips-double',
xtick_interval=2, lockdown_xshift=0.0,
filename='compare_inf_0', figsize=(10, 10), errorevery=20, acc=1000, ymax=None,
show_legend=True, legendYoffset=0.0, legend_is_left=False, legendXoffset=0.0,
subplot_adjust=None, start_date='1970-01-01', first_one_dashed=False):
''''
Plots total fatalities and hospitalizations for each simulation, named as provided by `titles`
to compare different measures/interventions taken. Colors taken as defined in __init__, and
averaged over random restarts, using error bars for std-dev
'''
# Set double figure format
self._set_matplotlib_params(format=figformat)
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
# hospitalizations
for i, sim in enumerate(sims):
if isinstance(sim, str):
try:
data = load_condensed_summary(sim, acc=acc)
except FileNotFoundError:
acc = create_condensed_summary_from_path(sim, acc=acc)
data = load_condensed_summary(sim, acc=acc)
acc = data['acc']
ts = data['ts']
hosp_mu = data['hosp_mu']
hosp_sig = data['hosp_sig']
dead_mu = data['dead_mu']
dead_sig = data['dead_sig']
loaded_extracted_data = True
else:
loaded_extracted_data = False
if not loaded_extracted_data:
if acc > sim.max_time:
acc = int(sim.max_time)
ts, hosp_mu, hosp_sig = comp_state_over_time(sim, 'hosp', acc)
ts, dead_mu, dead_sig = comp_state_over_time(sim, 'dead', acc)
# Convert x-axis into posix timestamps and use pandas to plot as dates
ts = days_to_datetime(ts, start_date=start_date)
# lines
# ax.errorbar(ts, hosp_mu, yerr=2*hosp_sig, label=titles[i], errorevery=errorevery,
# c=self.color_different_scenarios[i], linestyle='-', elinewidth=0.8, capsize=3.0)
# ax.errorbar(ts, dead_mu, yerr=2*dead_sig, errorevery=errorevery,
# c=self.color_different_scenarios[i], linestyle='dotted', elinewidth=0.8, capsize=3.0)
if mode == 'show_both' or mode == 'show_hosp_only':
ax.plot(ts, hosp_mu, linestyle='-',
label=titles[i], c=self.color_different_scenarios[i])
ax.fill_between(ts, hosp_mu - 2 * hosp_sig, hosp_mu + 2 * hosp_sig,
color=self.color_different_scenarios[i], alpha=self.filling_alpha, linewidth=0.0)
if mode == 'show_both' or mode == 'show_dead_only':
linestyle = '-' if mode == 'show_dead_only' else 'dotted'
labels = titles[i] if mode == 'show_dead_only' else None
ax.plot(ts, dead_mu, linestyle=linestyle,
label=labels, c=self.color_different_scenarios[i])
ax.fill_between(ts, dead_mu - 2 * dead_sig, dead_mu + 2 * dead_sig,
color=self.color_different_scenarios[i], alpha=self.filling_alpha, linewidth=0.0)
# axis
ax.set_xlim(left=np.min(ts))
if ymax is None:
ymax = 1.5 * np.max(hosp_mu + hosp_sig)
ax.set_ylim((0, ymax))
# ax.set_xlabel('Days')
ax.set_ylabel('People')
if not isinstance(lockdown_at, list):
if lockdown_at is not None:
lockdown_widget(ax, lockdown_at, start_date,
lockdown_label_y,
lockdown_label,
xshift=lockdown_xshift)
# ax.xaxis.set_minor_locator(mdates.WeekdayLocator(byweekday=1, interval=1))
ax.xaxis.set_major_locator(mdates.WeekdayLocator(byweekday=1, interval=xtick_interval))
#set major ticks format
ax.xaxis.set_major_formatter(mdates.DateFormatter('%b %d'))
fig.autofmt_xdate(bottom=0.2, rotation=0, ha='center')
self._set_default_axis_settings(ax=ax)
# legend
if show_legend:
# legend
if legend_is_left:
leg = ax.legend(loc='upper left', borderaxespad=0.5)
else:
leg = ax.legend(loc='upper right', borderaxespad=0.5)
if legendYoffset != 0.0:
# Get the bounding box of the original legend
bb = leg.get_bbox_to_anchor().inverse_transformed(ax.transAxes)
# Change to location of the legend.
bb.y0 += legendYoffset
bb.y1 += legendYoffset
leg.set_bbox_to_anchor(bb, transform=ax.transAxes)
if legendXoffset != 0.0:
# Get the bounding box of the original legend
bb = leg.get_bbox_to_anchor().inverse_transformed(ax.transAxes)
# Change to location of the legend.
bb.x0 += legendXoffset
bb.x1 += legendXoffset
leg.set_bbox_to_anchor(bb, transform=ax.transAxes)
subplot_adjust = subplot_adjust or {
'bottom': 0.14, 'top': 0.98, 'left': 0.12, 'right': 0.96}
plt.subplots_adjust(**subplot_adjust)
plt.savefig('plots/' + filename + '.pdf', format='pdf', facecolor=None,
dpi=DPI, bbox_inches='tight')
if NO_PLOT:
plt.close()
return
def plot_2d_infections_at_time(self, sim, at_time, density_bandwidth=1.0, restart=0,
title='Example', filename='2d_inf_0', figsize=(10, 10), acc=1000, ymax=None):
'''
Plots 2d visualization using mobility object. The bandwidth set by `density_bandwidth`
determines the bandwidth of the RBF kernel in KDE used to generate the plot.
Smaller means more affected by local changes. Set the colors and markers in the __init__ function
'''
if acc > sim.max_time:
acc = int(sim.max_time)
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
# infections
r = restart
is_expo = is_state_at(sim, r, 'expo', at_time)
is_iasy = is_state_at(sim, r, 'iasy', at_time)
is_ipre = is_state_at(sim, r, 'ipre', at_time)
is_isym = is_state_at(sim, r, 'isym', at_time)
is_infected = is_iasy | is_ipre | is_isym
no_state = (1 - is_infected) & (1 - is_expo)
idx_expo = np.where(is_expo)[0]
idx_infected = np.where(is_infected)[0]
idx_none = np.where(no_state)[0]
# self.color_isym = 'red'
# self.color_expo= 'yellow'
### sites
site_loc = sim.site_loc
ax.scatter(site_loc[:, 0], site_loc[:, 1], alpha=self.filling_alpha, label='public sites',
marker=self.marker_site, color=self.color_site, facecolors=self.color_site, s=self.size_site)
### home locations and their states
home_loc = sim.home_loc
# no state
ax.scatter(home_loc[idx_none, 0], home_loc[idx_none, 1],
marker=self.marker_home, color=self.color_home,
facecolors='none', s=self.size_home)
try:
# expo
ax.scatter(home_loc[idx_expo, 0], home_loc[idx_expo, 1],
marker=self.marker_home, color=self.color_home,
facecolors=self.color_expo, s=self.size_home, label='exposed households')
sns.kdeplot(home_loc[idx_expo, 0], home_loc[idx_expo, 1], shade=True, alpha=self.density_alpha,
shade_lowest=False, cbar=False, ax=ax, color=self.color_expo, bw=density_bandwidth, zorder=0)
# infected
ax.scatter(home_loc[idx_infected, 0], home_loc[idx_infected, 1],
marker=self.marker_home, color=self.color_home,
facecolors=self.color_isym, s=self.size_home, label='infected households')
sns.kdeplot(home_loc[idx_infected, 0], home_loc[idx_infected, 1], shade=True, alpha=self.density_alpha,
shade_lowest=False, cbar=False, ax=ax, color=self.color_isym, bw=density_bandwidth, zorder=0)
except:
print('KDE failed, likely no exposed and infected at this time. Try different timing.')
plt.close()
return
# axis
ax.set_xlim((-0.1, 1.1))
ax.set_ylim((-0.1, 1.1))
plt.axis('off')
# legend
fig.legend(loc='center right', borderaxespad=0.1)
# Adjust the scaling factor to fit your legend text completely outside the plot
plt.subplots_adjust(right=0.85)
ax.set_title(title, pad=20)
plt.draw()
plt.savefig('plots/' + filename + '.png', format='png', facecolor=None,
dpi=DPI, bbox_inches='tight')
if NO_PLOT:
plt.close()
return
def compare_hospitalizations_over_time(self, sims, titles, figtitle='Hospitalizations', filename='compare_hosp_0',
capacity_line_at=20, figsize=(10, 10), errorevery=20, acc=500, ymax=None):
''''
Plots total hospitalizations for each simulation, named as provided by `titles`
to compare different measures/interventions taken. Colors taken as defined in __init__, and
averaged over random restarts, using error bars for std-dev.
The value of `capacity_line_at` defines the y-intercept of the hospitalization capacity line
'''
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
for i in range(len(sims)):
if acc > sims[i].max_time:
acc = int(sims[i].max_time)
ts, line_hosp, error_sig = comp_state_over_time(
sims[i], 'hosp', acc)
line_xaxis = np.zeros(ts.shape)
# lines
ax.errorbar(ts, line_hosp, yerr=error_sig, errorevery=errorevery,
c='black', linestyle='-', elinewidth=0.8)
# filling
ax.fill_between(ts, line_xaxis, line_hosp, alpha=self.filling_alpha, zorder=0,
label=r'Hospitalized under: ' + titles[i], edgecolor=self.color_different_scenarios[i],
facecolor=self.color_different_scenarios[i], linewidth=0)
# capacity line
ax.plot(ts, capacity_line_at * np.ones(ts.shape[0]), label=r'Max. hospitalization capacity',
c='red', linestyle='--', linewidth=4.0)
# axis
ax.set_xlim((0, np.max(ts)))
if ymax is None:
ymax = 1.5 * np.max(line_hosp + error_sig)
ax.set_ylim((0, ymax))
ax.set_xlabel(r'$t$ [days]')
ax.set_ylabel(r'[people]')
# Hide the right and top spines
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
# Only show ticks on the left and bottom spines
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
# legend
fig.legend(loc='center right', borderaxespad=0.1)
# Adjust the scaling factor to fit your legend text completely outside the plot
plt.subplots_adjust(right=0.70)
ax.set_title(figtitle, pad=20)
plt.draw()
plt.savefig('plots/' + filename + '.png', format='png', facecolor=None,
dpi=DPI, bbox_inches='tight')
if NO_PLOT:
plt.close()
return
def plot_positives_vs_target(self, sims, titles, targets, title='Example',
filename='inference_0', figsize=None, figformat='triple', errorevery=1, acc=17, ymax=None,
start_date='1970-01-01', lockdown_label='Lockdown', lockdown_at=None,
lockdown_label_y=None, subplot_adjust=None, n_age_groups=None, small_figure=False, show_legend=True):
''''
Plots daily tested averaged over random restarts, using error bars for std-dev
together with targets from inference
'''
# Set triple figure format
self._set_matplotlib_params(format=figformat)
fig, ax = plt.subplots(figsize=figsize)
for i, sim in enumerate(sims):
if isinstance(sim, str):
try:
data = load_condensed_summary(sim, acc)
except FileNotFoundError:
acc = create_condensed_summary_from_path(sim, acc=acc, n_age_groups=n_age_groups)
data = load_condensed_summary(sim, acc=acc)
acc = data['acc']
ts = data['ts']
posi_mu = data['posi_mu']
posi_sig = data['posi_sig']
else:
if acc > sim.max_time:
acc = int(sim.max_time)
ts, posi_mu, posi_sig = comp_state_over_time(sim, 'posi', acc)
plain_ts = ts
# Convert x-axis into posix timestamps and use pandas to plot as dates
ts = days_to_datetime(ts, start_date=start_date)
# lines
ax.plot(ts, posi_mu, label=titles[i], c=self.color_different_scenarios[i])
ax.fill_between(ts, posi_mu - 2 * posi_sig, posi_mu + 2 * posi_sig,
color=self.color_different_scenarios[i],
alpha=self.filling_alpha, linewidth=0.0)
# target
if small_figure:
target_widget(targets, start_date, ax, label='Real cases', ms=1.0)
else:
target_widget(targets, start_date, ax, label='Real cases')
if ymax is None:
ymax = 1.5 * np.max(posi_mu)
ax.set_ylim((0, ymax))
ax.set_ylabel(r'Positive cases')
# lockdown
if lockdown_at is not None:
if small_figure:
xshift = 3.5 * pd.to_timedelta( | pd.to_datetime(ts[-1]) | pandas.to_datetime |
"""Track and analyze grocery spending at an item level.
This app allows a user to input a grocery item.
"""
# app.py
import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
from dash.dependencies import Input, Output, State
import plotly.express as px
import pandas as pd
import dash_table
from dash_table import FormatTemplate
from datetime import date
df = pd.read_csv('data/items.csv') # read items.csv file into df
df_category = pd.read_csv('data/category.csv') # read category.csv file into df
pd.options.display.float_format = '{:.2f}'.format # set pandas format to 2 decimals
df['total'] = df['price'] * df['quantity'] # add 'total' column to df
df['month_year'] = pd.to_datetime(df['date']).dt.strftime('%B %Y') # add 'month_year' column to df and convert to 'month year' str format
df = df.sort_values(by='date').reset_index(drop=True) # sort df by date and reset and drop index
df_table = df[['name', 'price', 'quantity', 'date']] # create df to display table in layout
df_date = df.sort_values(by='date', ascending=False) # sort df by date in descending order and set to variable
df_date = df_date.head(1) # select top row of df
df_date = df_date.month_year.item() # select value from 'month_year' column to use as default in date dropdown
# Ref: https://dash.plotly.com/layout
app = dash.Dash(__name__, external_stylesheets=[dbc.themes.BOOTSTRAP])
app.config.suppress_callback_exceptions = True
# Ref: https://dash-bootstrap-components.opensource.faculty.ai/docs/components/form/
def InputItem():
"""Create a form with inputs to add a new item.
Args:
none
Returns:
item name: input text field to enter item name
category: dropdown to select category
price: input numeric field to enter price
quantity: slider to select quantity
date: date picker to select date of purchase
output: text to show exceptions
"""
input_addItem = dbc.Form(
[
dbc.FormGroup(
[
dbc.Label('Item Name'),
dcc.Input(
id='name',
placeholder='Enter grocery item',
style={'width': '100%'}
),
]
),
dbc.FormGroup(
[
html.Label('Category'),
dcc.Dropdown(
id='category',
options=[
{'label': i, 'value': i} for i in sorted(df_category.Category)
],
)
]
),
dbc.FormGroup(
[
html.Label('Price'),
dcc.Input(
id='price',
type='number',
placeholder='Enter price of item',
style={'width': '100%'}
)
]
),
dbc.FormGroup(
[
html.Label('Quantity'),
dcc.Slider(
id='quantity',
min=0,
max=10,
step=1,
marks={
i: '{}'.format(i)
if i == 1
else str(i)
for i in range(1,11)
},
value=1,
),
html.Br(),
html.Label('Date of Purchase'),
html.Br(),
dcc.DatePickerSingle(
id='date',
month_format='MMM Do, YY',
date=date.today()
)
]
),
html.Div(id='output-add-item',
style={'color': 'red'})
]
)
return input_addItem
button_item = html.Div([
dbc.Row(
[
dbc.Col(
dbc.Button(
"New Item", id='button-new-item', color="primary", className="ml-2", n_clicks=0, block=True
),
width="auto",
),
dbc.Modal(
[
dbc.ModalHeader('Add New Item'),
dbc.ModalBody(InputItem()),
dbc.ModalFooter(
[
dbc.Button('Submit', id='submit-new-item', className='ml-auto', n_clicks=0, color='primary'),
dbc.Button('Cancel', id='cancel', className='ml-auto', n_clicks=0, color='primary')
]
)
],
id='modal',
is_open=False,
)
],
no_gutters=True,
className="ml-auto flex-nowrap mt-3 mt-md-0",
align="center"
),
])
# Ref: https://dash.plotly.com/dash-core-components/graph
dashboard = html.Div(
[
html.Br(),
html.Br(),
html.H5('Spending Dashboard', style={'textAlign': 'left'}),
html.Hr(),
html.Div([
dash_table.DataTable(
id='table-item',
data=df.to_dict('records'),
columns=[
{
'name': i, 'id': i
}
for i in (df.columns)
],
)
],
style={'display': 'none'},
),
html.P('Select a date:'),
html.Div([
dcc.Dropdown(
id='dash-monthyear',
options=[
{'label': i, 'value': i} for i in df.month_year.unique()
],
value=df_date,
clearable=False
)],
style={
'width': '20%',
'display': 'inline-block'
},
),
dbc.Row(
dcc.Graph(id='graph-spending-all')
),
html.Hr(),
html.P('Select a category:'),
html.Div([
dcc.Dropdown(id='dash-category',
clearable=False
)
],
style={
'width': '20%',
'display': 'inline-block'
},
),
html.Br(),
dbc.Row([
dbc.Col(
dcc.Graph(id='graph-item')
),
dbc.Col([
dash_table.DataTable(
id='table-item-display',
data=df.to_dict('records'),
columns=[
{'name': 'Name', 'id': 'name'},
{'name': 'Price', 'id': 'price', 'type': 'numeric', 'format': FormatTemplate.money(2)},
{'name': 'Quantity', 'id': 'quantity'},
{'name': 'Date', 'id': 'date'},
],
page_action='native',
page_current=0,
page_size=10,
sort_action='native',
sort_mode='single',
sort_by=[{'column_id': 'date', 'direction': 'desc'}],
style_cell={'textAlign': 'left', 'font-family': 'sans-serif'},
selected_columns=[],
selected_rows=[],
style_as_list_view=True,
)
])
]),
dbc.Row(
dcc.Graph(id='graph-trend')
),
],
style={
'margin-left': '15%',
'margin-right': '5%',
'padding': '20px 10px'
}
)
# Ref: https://dash-bootstrap-components.opensource.faculty.ai/docs/components/navbar/#
navbar = dbc.Navbar(
[
html.A(
dbc.Row(
[
dbc.Col(dbc.NavbarBrand("Grocery Spending Tracker", className="ml-2")),
],
align="left",
no_gutters=True,
),
),
],
color="dark",
dark=True,
fixed='top'
)
# Ref: https://dash-bootstrap-components.opensource.faculty.ai/examples/simple-sidebar/
sidebar = html.Div(
[
html.Br(),
html.Br(),
button_item,
],
style={
'position': 'fixed',
'top': 0,
'left': 0,
'bottom': 0,
'width': '10%',
'padding': '20px 10px',
'background-color': '#f8f9fa'
}
)
@app.callback(
Output('modal', 'is_open'),
Output('output-add-item', 'children'),
[Input('button-new-item', 'n_clicks'),
Input('cancel', 'n_clicks'),
Input('submit-new-item', 'n_clicks')],
[State('modal', 'is_open'),
State('name', 'value'),
State('category', 'value'),
State('price', 'value')
]
)
def toggle_modal(n1, n2, n3, is_open, name, category, price):
"""Callback to toggle modal.
Args:
n1: number of times new item button is clicked
n2: number of times cancel button is clicked
n3: number of times submit button is clicked
is_open: passes open state of modal
name: passes state of item name value
category: passes state of category value
price: passes state of price value
Returns:
enables modal to be toggled between open and closed when the buttons are clicked,
if submit button is clicked and name, category or price is empty (quantity & date have default values):
modal does not close and string displays with missing input fields
"""
ctx = dash.callback_context
input_id = ctx.triggered[0]['prop_id'].split('.')[0]
if input_id == 'button-new-item':
return not is_open, None
elif input_id == 'cancel':
return not is_open, None
elif input_id == 'submit-new-item':
if name == None:
return dash.no_update, 'Please enter an item name.'
if category == None:
return dash.no_update, 'Please select a category.'
if price == None:
return dash.no_update, 'Please enter a price.'
return not is_open, None
return is_open, None
@app.callback(
[Output('name', 'value'),
Output('category', 'value'),
Output('price', 'value'),
Output('quantity', 'value'),
Output('date', 'date')],
[Input('modal', 'is_open')]
)
def clear_input(is_open):
"""Callback to clear input values when modal is opened.
Args:
is_open: open state of modal
Returns:
None for name, category, and price inputs and resets quantity slider to 1 and date to today's date
"""
return (None,None,None,1,date.today())
# Ref: https://dash.plotly.com/basic-callbacks
@app.callback(
Output('dash-category', 'options'),
[Input('table-item', 'data'),
Input('dash-monthyear', 'value')]
)
def set_cat_option(data, month_year):
"""Callback to set category dropdown options based on the month selected.
Args:
data: dataframe
month_year: selected date from dropdown
Returns:
list of categories into category dropdown from dataframe for selected date
"""
dff = pd.DataFrame.from_dict(data)
dff = dff.query('month_year == @month_year')
return [{'label': i, 'value': i} for i in sorted(dff.category.unique())]
@app.callback(
Output('dash-category', 'value'),
[Input('dash-category', 'options')]
)
def set_cat_default(available_options):
"""Callback to set category dropdown default value.
Args:
available_options: list of categories from dropdown
Returns:
first value from category dropdown to set as default dropdown value
"""
return available_options[0]['value']
# Ref: https://plotly.com/python/pie-charts/
@app.callback(
Output('graph-spending-all', 'figure'),
[Input('table-item', 'data'),
Input('dash-monthyear', 'value')]
)
def generate_graph_all_cat(data, month_year):
"""Callback to generate graph to show spending in all categories for the selected month.
Args:
data: dataframe
month_year: selected date from dropdown
Returns:
pie chart dispalying amounts spent per category and total amount spent for selected month
"""
dff = pd.DataFrame.from_dict(data)
dff = dff.query('month_year == @month_year')
dff_total = dff['total'].sum()
total_format = '{:.2f}'.format(dff_total)
fig = px.pie(dff,
values='total',
names='category',
title= 'Spending for All Categories in {}'.format(month_year),
hole= .5)
fig.update_traces(
hoverinfo='label+percent',
texttemplate='%{value:$.2f}',
textposition='inside'
)
fig.update_layout(
annotations= [
dict(text= 'Total Amount <br> ${}'.format(total_format), x=0.5, y=0.5, font_size=15, showarrow=False),
],
legend_title='<b> Category </b>'
)
return fig
# Ref: https://plotly.com/python/pie-charts/
@app.callback(
Output('graph-item', 'figure'),
[Input('table-item', 'data'),
Input('dash-monthyear', 'value'),
Input('dash-category', 'value')]
)
def update_graph_item(data, month_year, category):
"""Callback to generate graph to show amounts spent per item for the selected month and category.
Args:
data: dataframe
month_year: selected date from dropdown
category: selected category from dropdown
Returns:
pie chart dispalying amounts spent per item and total amount spent for selected month and category
"""
dff = | pd.DataFrame.from_dict(data) | pandas.DataFrame.from_dict |
# -*- coding: utf-8 -*-
import time
import pandas as pd
from .momentum import *
from .overlap import *
from .performance import *
from .statistics import *
from .trend import *
from .volatility import *
from .volume import *
from .utils import verify_series
from pandas.core.base import PandasObject
class BasePandasObject(PandasObject):
"""Simple PandasObject Extension
Ensures the DataFrame is not empty and has columns.
Args:
df (pd.DataFrame): Extends Pandas DataFrame
"""
def __init__(self, df, **kwargs):
if df.empty: return
if len(df.columns) > 0:
self._df = df
else:
raise AttributeError(f"[X] No columns!")
def __call__(self, kind, *args, **kwargs):
raise NotImplementedError()
@ | pd.api.extensions.register_dataframe_accessor('ta') | pandas.api.extensions.register_dataframe_accessor |
import os
import pandas as pd
from bs4 import BeautifulSoup
DATA_FOLDER = "../data/Energy_Price"
RESULT_FILENAME = "../data/sm_price/price_time.csv"
# Script to collect data in dataframes and save it in the data folder
def load_xml(data_file):
print(data_file)
with open(data_file, 'r') as src:
soup = BeautifulSoup(src, 'lxml')
return soup
def get_dataframes(data_files):
dframe = pd.DataFrame(columns=["lmp_value", "time"])
for data_file in data_files:
soup = load_xml(os.path.join(DATA_FOLDER, data_file))
data_list = []
for i in soup.findChildren("report_data"):
if i.data_item.text != "LMP_PRC":
continue
element_dict = {"lmp_value": i.value.text,
"time": i.interval_start_gmt.text
}
data_list.append(element_dict)
dframe = dframe.append( | pd.DataFrame(data_list) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 6 00:12:14 2021
@author: charlie.henry
"""
# https://data.austintexas.gov/resource/x44q-icha.csv?match_validity=valid
import pandas as pd
import numpy as np
from sodapy import Socrata
import geopandas
## Include your app token from socrata below
client = Socrata("data.austintexas.gov", "APP_TOKEN",timeout=360)
results = client.get("x44q-icha",where="(start_time like '2019%25' OR start_time like '2020%25' OR start_time like '2021%25') AND match_validity='valid'",limit=80000000)
data = pd.DataFrame.from_records(results)
# Creating date fields
data['datetime'] = | pd.to_datetime(data['start_time'],format='%Y-%m-%dT%H:%M:%S') | pandas.to_datetime |
import os
import pandas as pd
import numpy as np
import scipy.sparse as sp
from logging import getLogger
from libcity.utils import StandardScaler, NormalScaler, NoneScaler, \
MinMax01Scaler, MinMax11Scaler, LogScaler, ensure_dir
from libcity.data.dataset import AbstractDataset
class ChebConvDataset(AbstractDataset):
def __init__(self, config):
self.config = config
self.dataset = self.config.get('dataset', '')
self.cache_dataset = self.config.get('cache_dataset', True)
self.train_rate = self.config.get('train_rate', 0.7)
self.eval_rate = self.config.get('eval_rate', 0.1)
self.scaler_type = self.config.get('scaler', 'none')
# 路径等参数
self.parameters_str = \
str(self.dataset) + '_' + str(self.train_rate) + '_' \
+ str(self.eval_rate) + '_' + str(self.scaler_type)
self.cache_file_name = os.path.join('./libcity/cache/dataset_cache/',
'road_rep_{}.npz'.format(self.parameters_str))
self.cache_file_folder = './libcity/cache/dataset_cache/'
ensure_dir(self.cache_file_folder)
self.data_path = './raw_data/' + self.dataset + '/'
if not os.path.exists(self.data_path):
raise ValueError("Dataset {} not exist! Please ensure the path "
"'./raw_data/{}/' exist!".format(self.dataset, self.dataset))
# 加载数据集的config.json文件
self.geo_file = self.config.get('geo_file', self.dataset)
self.rel_file = self.config.get('rel_file', self.dataset)
# 初始化
self.adj_mx = None
self.scaler = None
self.feature_dim = 0
self.num_nodes = 0
self._logger = getLogger()
self._load_geo()
self._load_rel()
def _load_geo(self):
"""
加载.geo文件,格式[geo_id, type, coordinates, properties(若干列)]
"""
geofile = pd.read_csv(self.data_path + self.geo_file + '.geo')
self.geo_ids = list(geofile['geo_id'])
self.num_nodes = len(self.geo_ids)
self.geo_to_ind = {}
for index, idx in enumerate(self.geo_ids):
self.geo_to_ind[idx] = index
self._logger.info("Loaded file " + self.geo_file + '.geo' + ', num_nodes=' + str(len(self.geo_ids)))
self.road_info = geofile
def _load_rel(self):
"""
加载.rel文件,格式[rel_id, type, origin_id, destination_id, properties(若干列)],
生成N*N的矩阵,默认.rel存在的边表示为1,不存在的边表示为0
Returns:
np.ndarray: self.adj_mx, N*N的邻接矩阵
"""
map_info = | pd.read_csv(self.data_path + self.rel_file + '.rel') | pandas.read_csv |
# coding: utf-8
"""tools for analyzing VPs in an individual precipitation event"""
from collections import OrderedDict
from os import path
from datetime import timedelta
import numpy as np
import pandas as pd
import xarray as xr
import matplotlib as mpl
import matplotlib.pyplot as plt
from scipy.io import loadmat
from radcomp.vertical import (filtering, classification, plotting, insitu, ml,
deriv, NAN_REPLACEMENT)
from radcomp import arm, azs
from radcomp.tools import strftime_date_range, cloudnet
from j24 import home, daterange2str
USE_LEGACY_DATA = False
if USE_LEGACY_DATA:
DATA_DIR = path.join(home(), 'DATA', 'vprhi')
DATA_FILE_FMT = '%Y%m%d_IKA_VP_from_RHI.mat'
else:
DATA_DIR = path.join(home(), 'DATA', 'vprhi2')
DATA_FILE_FMT = '%Y%m%d_IKA_vprhi.mat'
DEFAULT_PARAMS = ['zh', 'zdr', 'kdp']
def case_id_fmt(t_start, t_end=None, dtformat='{year}{month}{day}{hour}',
day_fmt='%d', month_fmt='%m', year_fmt='%y', hour_fmt='T%H'):
"""daterange2str wrapper for date range based IDs"""
return daterange2str(t_start, t_end, dtformat=dtformat, hour_fmt=hour_fmt,
day_fmt=day_fmt, month_fmt=month_fmt,
year_fmt=year_fmt)
def date_us_fmt(t_start, t_end, dtformat='{day} {month} {year}', day_fmt='%d',
month_fmt='%b', year_fmt='%Y'):
"""daterange2str wrapper for US human readable date range format"""
return daterange2str(t_start, t_end, dtformat=dtformat, day_fmt=day_fmt,
month_fmt=month_fmt, year_fmt=year_fmt)
def vprhimat2pn(datapath):
"""Read vertical profile mat files to Panel."""
# TODO: Panel
try:
data = loadmat(datapath)['VP_RHI']
except FileNotFoundError as e:
print('{}. Skipping.'.format(e))
return pd.Panel()
fields = list(data.dtype.fields)
fields.remove('ObsTime')
fields.remove('height')
str2dt = lambda tstr: pd.datetime.strptime(tstr, '%Y-%m-%dT%H:%M:%S')
t = list(map(str2dt, data['ObsTime'][0][0]))
h = data['height'][0][0][0]
data_dict = {}
for field in fields:
data_dict[field] = data[field][0][0].T
try:
return pd.Panel(data_dict, major_axis=h, minor_axis=t)
# sometimes t does not have all values
except ValueError as e:
if data_dict['ZH'].shape[1] == 96:
# manouver to set correct timestamps when data missing
t1 = t[0] + timedelta(hours=23, minutes=45)
midnight = t1.replace(hour=0, minute=0)
if midnight <= t[0]:
midnight += timedelta(hours=24)
dt = t1-midnight
dt_extra = timedelta(minutes=15-(dt.total_seconds()/60)%15)
dt = dt + dt_extra
t = pd.date_range(t[0]-dt, t1-dt, freq='15min')
print('ObsTime missing values! Replacing with generated timestamps.')
return pd.Panel(data_dict, major_axis=h, minor_axis=t)
else:
raise e
def downward_gradient(df):
"""smooth downwards gradient"""
df_smooth = df.fillna(0).apply(filtering.savgol_series, args=(19, 2))
dfg = df_smooth.diff()
dfg = dfg.rolling(5, center=True).mean() # smooth gradient
dfg[df.isnull()] = np.nan
return -dfg
def proc_indicator(pn, var='zdrg', tlims=(-20, -10)):
"""gradient to process indicator"""
return pn[var][(pn.T < tlims[1]) & (pn.T > tlims[0])].sum()
def kdp2phidp(kdp, dr_km):
"""Retrieve phidp from kdp."""
kdp_filled = kdp.fillna(0)
return 2*kdp_filled.cumsum().multiply(dr_km, axis=0)
def data_range(dt_start, dt_end):
"""read raw VP data between datetimes"""
filepath_fmt = path.join(DATA_DIR, DATA_FILE_FMT)
fnames = strftime_date_range(dt_start, dt_end, filepath_fmt)
pns = map(vprhimat2pn, fnames)
pns_out = []
for pn in pns:
if not pn.empty:
pns_out.append(pn)
return pd.concat(pns_out, axis=2, sort=True).loc[:, :, dt_start:dt_end]
def prepare_pn(pn, kdpmax=np.nan):
"""Filter data and calculate extra parameters."""
dr = pd.Series(pn.major_axis.values, index=pn.major_axis).diff().bfill()
dr_km = dr/1000
pn_new = pn.copy()
pn_new['KDP_orig'] = pn_new['KDP'].copy()
#pn_new['KDP'][pn_new['KDP'] < 0] = np.nan
pn_new['phidp'] = kdp2phidp(pn_new['KDP'], dr_km)
kdp = pn_new['KDP'] # a view
# remove extreme KDP values in the panel using a view
if USE_LEGACY_DATA:
kdp[kdp > kdpmax] = 0
#kdp[kdp < 0] = 0
pn_new = filtering.fltr_median(pn_new)
pn_new = filtering.fltr_nonmet(pn_new)
# ensure all small case keys are in place
pn_new = filtering.create_filtered_fields_if_missing(pn_new, DEFAULT_PARAMS)
#pn_new = filtering.fltr_ground_clutter_median(pn_new)
pn_new['kdpg'] = 1000*downward_gradient(pn_new['kdp'])
pn_new['zdrg'] = downward_gradient(pn_new['zdr'])
return pn_new
def dt2pn(dt0, dt1, **kws):
"""Read and preprocess VP data between datetimes."""
pn_raw = data_range(dt0, dt1)
return prepare_pn(pn_raw, **kws)
def fillna(dat, field=''):
"""Fill nan values with values representing zero scatterers."""
data = dat.copy()
if isinstance(data, pd.Panel):
for field in list(data.items):
data[field].fillna(NAN_REPLACEMENT[field.upper()], inplace=True)
elif isinstance(data, pd.DataFrame):
data.fillna(NAN_REPLACEMENT[field.upper()], inplace=True)
return data
def prepare_data(pn, fields=DEFAULT_PARAMS, hlimits=(190, 10e3), kdpmax=None):
"""Prepare data for classification. Scaling has do be done separately."""
try:
data = pn[fields, hlimits[0]:hlimits[1], :].transpose(0, 2, 1)
except TypeError: # assume xarray dataset
pn = pn.to_dataframe().to_panel() # TODO: Panel
data = pn[fields, hlimits[0]:hlimits[1], :].transpose(0, 2, 1)
if kdpmax is not None:
data['KDP'][data['KDP'] > kdpmax] = np.nan
return fillna(data)
def prep_data(pn, vpc):
"""prepare_data wrapper"""
return prepare_data(pn, fields=vpc.params, hlimits=vpc.hlimits, kdpmax=vpc.kdpmax)
def round_time_index(data, resolution='1min'):
"""round datetime index to a given resolution"""
dat = data.copy()
ind = data.index.round(resolution)
dat.index = ind
return dat
def inversion_score(c):
"""Score indicating inversion"""
tdiff = c.data['T'].diff()
return tdiff[tdiff>0].sum().median()
def plot_case(c, params=None, interactive=True, raw=True, n_extra_ax=0,
t_contour_ax_ind=False, above_ml_only=False, t_levels=[0],
inverse_transformed=False, plot_extras=['ts', 'silh', 'cl', 'lwe'],
**kws):
"""Visualize a Case object."""
try:
c.load_model_temperature()
except (ValueError, FileNotFoundError):
pass
if not c.has_ml:
above_ml_only = False
if raw:
data = c.data
else:
data = c.cl_data.transpose(0, 2, 1)
if above_ml_only:
data = c.data_above_ml if raw else c.only_data_above_ml(data)
elif inverse_transformed:
if c.has_ml:
above_ml_only = True
data = c.inverse_transform()
if params is None:
if c.vpc is not None:
params = c.vpc.params
else:
params = DEFAULT_PARAMS
plot_classes = ('cl' in plot_extras) and (c.vpc is not None)
plot_silh = ('silh' in plot_extras) and (c.vpc is not None)
plot_lwe = ('lwe' in plot_extras) and (c.pluvio is not None)
if plot_lwe:
plot_lwe = not c.pluvio.data.empty
plot_azs = ('azs' in plot_extras) and (c.azs().size > 0)
plot_fr = ('fr' in plot_extras) and (c.fr().size > 0)
plot_t = ('ts' in plot_extras) and (c.t_surface().size > 0)
plot_lr = ('lr' in plot_extras)
n_extra_ax += plot_t + plot_lwe + plot_fr + plot_azs + plot_silh
next_free_ax = -n_extra_ax
cmap_override = {'LR': 'seismic', 'kdpg': 'bwr', 'zdrg': 'bwr',
'omega': 'seismic_r'}
if plot_lr:
data['LR'] = data['T'].diff()
params = np.append(params, 'LR')
hlims = (0, 11.5e3) if (c.has_ml and not above_ml_only) else (0, 10e3)
fig, axarr = plotting.plotpn(data, fields=params,
n_extra_ax=n_extra_ax, has_ml=c.has_ml,
cmap_override=cmap_override,
hlims=hlims, **kws)
plotfuns = OrderedDict()
plotfuns[c.plot_t] = plot_t
plotfuns[c.plot_silh] = plot_silh
plotfuns[c.plot_lwe] = plot_lwe
plotfuns[c.plot_azs] = plot_azs
plotfuns[c.plot_fr] = plot_fr
for plotfun, flag in plotfuns.items():
if flag:
plotfun(ax=axarr[next_free_ax])
next_free_ax += 1
# plot temperature contours
if t_contour_ax_ind:
if t_contour_ax_ind == 'all':
t_contour_ax_ind = range(len(params))
try:
for i in t_contour_ax_ind:
c.plot_growth_zones(ax=axarr[i], levels=t_levels)
except TypeError:
warnfmt = '{}: Could not plot temperature contours.'
print(warnfmt.format(c.name()))
if plot_classes:
for iax in range(len(axarr)-1):
c.vpc.class_colors(classes=c.classes(), ax=axarr[iax])
has_vpc = (c.vpc is not None)
if c.has_ml and has_vpc and not above_ml_only:
for i in range(len(params)):
c.plot_ml(ax=axarr[i])
c.cursor = mpl.widgets.MultiCursor(fig.canvas, axarr, color='black',
horizOn=True, vertOn=True, lw=0.5)
if interactive:
on_click_fun = lambda event: c._on_click_plot_dt_cs(event, params=params,
inverse_transformed=inverse_transformed,
above_ml_only=above_ml_only)
fig.canvas.mpl_connect('button_press_event', on_click_fun)
for ax in axarr:
ax.xaxis.grid(True)
ax.yaxis.grid(True)
c.set_xlim(ax)
axarr[0].set_title(date_us_fmt(c.t_start(), c.t_end()))
return fig, axarr
class Case:
"""
Precipitation event class for VP studies.
Attributes:
data (Panel)
cl_data (Panel): non-scaled classifiable data
cl_data_scaled (Panel): scaled classifiable data
vpc (radcomp.vertical.VPC): classification scheme
temperature (Series): stored temperature
pluvio (baecc.instruments.Pluvio)
"""
def __init__(self, data=None, cl_data=None, cl_data_scaled=None,
vpc=None, has_ml=False, timedelta=None,
is_convective=None):
self.data = data
self.cl_data = cl_data
self.cl_data_scaled = cl_data_scaled
self.silh_score = None
self.vpc = vpc
self.pluvio = None
self.has_ml = has_ml
self.is_convective = is_convective
self._timedelta = timedelta
self._data_above_ml = None
self._dt_ax = None
self.cursor = None
self._classes = None
@classmethod
def from_dtrange(cls, t0, t1, **kws):
"""Create a case from data between a time range."""
kdpmax = 0.5
if 'has_ml' in kws:
if kws['has_ml']:
kdpmax = 1.3
pn = dt2pn(t0, t1, kdpmax=kdpmax)
return cls(data=pn, **kws)
@classmethod
def from_mat(cls, matfile, **kws):
"""Case object from a single mat file"""
pn = vprhimat2pn(matfile)
data = prepare_pn(pn)
return cls(data=data, **kws)
@classmethod
def from_xarray(cls, ds, **kws):
"""Case from xarray dataset"""
pn = ds.to_dataframe().to_panel()
#data = filtering.create_filtered_fields_if_missing(pn, DEFAULT_PARAMS)
data = prepare_pn(pn)
return cls(data=data, **kws)
@classmethod
def from_nc(cls, ncfile, **kws):
#y u no work?
ds = xr.open_dataset(ncfile)
return cls.from_xarray(ds **kws)
@property
def data_above_ml(self):
"""lazy loading data above ml"""
if not self.has_ml:
return self.data
if self._data_above_ml is None:
self._data_above_ml = self.only_data_above_ml()
return self._data_above_ml
@property
def timedelta(self):
"""time resolution"""
if self._timedelta is None:
dt = self.timestamps().diff().min()
notefmt = 'Case timedelta was not set. Setting to detected value of {}'
print(notefmt.format(dt))
self._timedelta = self.timestamps().diff().min()
return self._timedelta
@timedelta.setter
def timedelta(self, timedelta):
self._timedelta = timedelta
def dataset(self):
"""data Panel as xarray DataSet"""
mapping = dict(major_axis='height', minor_axis='time')
return self.data.to_xarray().to_dataset(dim='items').rename(mapping)
def only_data_above_ml(self, data=None):
"""Data above ml"""
if data is None:
data = self.data
data = fillna(data)
top = self.ml_limits()[1]
return data.apply(ml.collapse2top, axis=(2, 1), top=top)
def name(self, **kws):
"""date range based id"""
return case_id_fmt(self.t_start(), self.t_end(), **kws)
def t_start(self):
"""data start time"""
return self.data.minor_axis[0]
def t_end(self):
"""data end time"""
return self.data.minor_axis[-1]
def timestamps(self, fill_value=None, round_index=False):
"""Data timestamps as Series. Optionally filled with fill_value."""
t = self.data.minor_axis
data = t if fill_value is None else np.full(t.size, fill_value)
ts = pd.Series(index=t, data=data)
if round_index:
return round_time_index(ts)
return ts
def mask(self, raw=False):
"""common data mask"""
if raw:
return self.data['ZH'].isnull()
return self.data['zh'].isnull()
def load_classification(self, name=None, **kws):
"""Load a classification scheme based on its id, and classify."""
if name is None:
name = self.vpc.name()
self.vpc = classification.VPC.load(name)
self.classify(**kws)
def prepare_cl_data(self, save=True, force_no_crop=False):
"""Prepare unscaled classification data."""
if self.data is None:
return None
cl_data = prep_data(self.data, self.vpc)
if self.has_ml and not force_no_crop:
top = self.ml_limits()[1]
collapsefun = lambda df: ml.collapse2top(df.T, top=top).T
cl_data = cl_data.apply(collapsefun, axis=(1, 2))
cl_data = fillna(cl_data)
if cl_data.size == 0:
return None
if save and not force_no_crop:
self.cl_data = cl_data
return cl_data
def scale_cl_data(self, save=True, force_no_crop=False):
"""scaled version of classification data
time rounded to the nearest minute
"""
cl_data = self.prepare_cl_data(save=save, force_no_crop=force_no_crop)
if cl_data is None:
return None
#scaled = scale_data(cl_data).fillna(0)
scaled = self.vpc.feature_scaling(cl_data).fillna(0)
if save and not force_no_crop:
self.cl_data_scaled = scaled
return scaled
def ml_limits(self, interpolate=True):
"""ML top using peak detection"""
if self.vpc is None:
nans = self.timestamps(fill_value=np.nan)
return nans.copy(), nans.copy()
if 'MLI' not in self.data:
self.prepare_mli(save=True)
bot, top = ml.ml_limits(self.data['MLI'], self.data['RHO'])
if not interpolate:
return bot, top
return tuple(lim.interpolate().bfill().ffill() for lim in (bot, top))
def prepare_mli(self, save=True):
"""Prepare melting layer indicator."""
cl_data_scaled = self.scale_cl_data(force_no_crop=True)
zdr = cl_data_scaled['zdr'].T
try:
z = cl_data_scaled['zh'].T
except KeyError:
z = cl_data_scaled['ZH'].T
rho = self.data['RHO'].loc[z.index]
mli = ml.indicator(zdr, z, rho)
if save:
self.data['MLI'] = mli
return mli
def classify(self, vpc=None, save=True):
"""classify based on class_scheme"""
if vpc is not None:
self.vpc = vpc
if self.cl_data_scaled is None:
self.scale_cl_data()
classify_kws = {}
if 'temp_mean' in self.vpc.params_extra:
classify_kws['extra_df'] = self.t_surface()
if self.cl_data_scaled is not None and self.vpc is not None:
classes, silh = self.vpc.classify(self.cl_data_scaled, **classify_kws)
classes.name = 'class'
if save:
self.silh_score = silh.reindex(self.data.minor_axis)
self._classes = classes
return classes, silh
return None, None
def inverse_transform(self):
"""inverse transformed classification data"""
pn = self.vpc.inverse_data
pn.major_axis = self.cl_data_scaled.minor_axis
pn.minor_axis = self.data.minor_axis
return self.vpc.feature_scaling(pn, inverse=True)
def plot_classes(self):
"""plot_classes wrapper"""
return plotting.plot_classes(self.cl_data_scaled, self.classes())
def plot(self, **kws):
"""Visualize the case."""
return plot_case(self, **kws)
def plot_growth_zones(self, **kws):
"""plotting.plot_growth_zones wrapper"""
self.load_model_temperature()
plotting.plot_growth_zones(self.data['T'], **kws)
def plot_ml(self, linestyle='', marker='_', ax=None):
"""Plot melting layer highlighting interpolated parts."""
ax = ax or plt.gca()
common_kws = dict(linestyle=linestyle, marker=marker)
_, topi = self.ml_limits(interpolate=True)
_, top = self.ml_limits(interpolate=False)
ax.plot(topi.index, topi.values, color='gray', zorder=5, **common_kws)
ax.plot(top.index, top.values, color='black', zorder=6, **common_kws)
return ax
def shift(self, data):
"""shift data for plotting"""
half_dt = self.timedelta/2
return data.shift(freq=half_dt)
def plot_series(self, data, ax=None, **kws):
"""Plot time series correctly shifted."""
ax = ax or plt.gca()
dat = self.shift(data)
plotting.plot_data(dat, ax=ax, **kws)
self.set_xlim(ax)
return ax
def plot_t(self, ax, tmin=-25, tmax=10):
"""Plot surface temperature."""
self.plot_series(self.t_surface(), ax=ax)
ax.set_ylabel(plotting.LABELS['temp_mean'])
ax.set_ylim([tmin, tmax])
return ax
def plot_lwe(self, ax, rmax=4):
"""plot LWE"""
self.plot_series(self.lwe(), ax=ax, label=self.pluvio.name)
ax.set_ylim(bottom=0, top=rmax)
ax.set_ylabel(plotting.LABELS['intensity'])
return ax
def plot_fr(self, ax, frmin=-0.1, frmax=1):
"""Plot riming fraction."""
self.plot_series(self.fr(), ax=ax, label='FR')
ax.set_ylim(bottom=frmin, top=frmax)
ax.set_ylabel(plotting.LABELS[self.fr().name])
return ax
def plot_azs(self, ax, amin=10, amax=4000):
"""Plot prefactor of Z-S relation"""
a_zs = self.azs()
label = plotting.LABELS[a_zs.name]
self.plot_series(a_zs, ax=ax, label=label)
ax.set_ylabel(plotting.LABELS[a_zs.name])
ax.set_yscale('log')
ax.set_ylim(bottom=amin, top=amax)
ax.set_yticks([10, 100, 1000])
return ax
def plot_silh(self, ax=None):
"""Plot silhouette coefficient"""
self.plot_series(self.silh_score, ax=ax)
ax.set_ylabel('silhouette\ncoefficient')
ax.set_ylim(bottom=-1, top=1)
ax.set_yticks([-1, 0, 1])
return ax
def train(self, **kws):
"""Train a classification scheme with scaled classification data."""
if self.vpc.extra_weight:
extra_df = self.t_surface()
else:
extra_df = None
if self.cl_data_scaled is None:
self.scale_cl_data()
return self.vpc.train(data=self.cl_data_scaled,
extra_df=extra_df, **kws)
def _on_click_plot_dt_cs(self, event, params=None, **kws):
"""on click plot profiles at a timestamp"""
try:
dt = plotting.num2date(event.xdata)
except TypeError: # clicked outside axes
return
ax, update, axkws = plotting.handle_ax(self._dt_ax)
axkws.update(kws)
self._dt_ax = self.plot_data_at(dt, params=params, **axkws)
if update:
ax.get_figure().canvas.draw()
def nearest_datetime(self, dt, method='nearest', **kws):
"""Round datetime to nearest data timestamp."""
i = self.data.minor_axis.get_loc(dt, method=method, **kws)
return self.data.minor_axis[i]
def plot_data_at(self, dt, params=None, inverse_transformed=False,
above_ml_only=False, **kws):
"""Plot profiles at given timestamp."""
data_orig = self.data_above_ml if above_ml_only else self.data
# integer location
i = data_orig.minor_axis.get_loc(dt, method='nearest')
dti = data_orig.minor_axis[i]
data = data_orig.iloc[:, :, i]
if params is not None:
data = data[params]
axarr = plotting.plot_vps(data, has_ml=self.has_ml, **kws)
if inverse_transformed:
plotting.plot_vps(self.inverse_transform().iloc[:, :, i],
has_ml=self.has_ml, axarr=axarr)
if not above_ml_only and self.has_ml:
_, ml_top = self.ml_limits(interpolate=False)
_, ml_top_i = self.ml_limits(interpolate=True)
for ax in axarr:
ax.axhline(ml_top_i.loc[dti], color='gray')
ax.axhline(ml_top.loc[dti], color='black')
t = data_orig.minor_axis[i]
axarr[1].set_title(str(t))
return axarr
def set_xlim(self, ax):
start = self.t_start()-self.timedelta/2
end = self.t_end()+self.timedelta/2
ax.set_xlim(left=start, right=end)
return ax
def base_minute(self):
"""positive offset in minutes for profile measurements after each hour
"""
return self.data.minor_axis[0].round('1min').minute%15
def base_middle(self):
dt_minutes = round(self.timedelta.total_seconds()/60)
return self.base_minute()-dt_minutes/2
def time_weighted_mean(self, data, offset_half_delta=True):
dt = self.timedelta
if offset_half_delta:
base = self.base_middle()
offset = dt/2
else:
base = self.base_minute()
offset = 0
return insitu.time_weighted_mean(data, rule=dt, base=base, offset=offset)
def t_surface(self, use_arm=False, interp_gaps=True):
"""resampled ground temperature
Returns:
Series: resampled temperature
"""
t_end = self.t_end()+pd.Timedelta(minutes=15)
if use_arm:
t = arm.var_in_timerange(self.t_start(), t_end, var='temp_mean')
else:
hdfpath = path.join(home(), 'DATA', 't_fmi_14-17.h5')
if not path.exists(hdfpath):
return pd.Series()
t = pd.read_hdf(hdfpath, 'data')['TC'][self.t_start():t_end]
t.name = 'temp_mean'
tre = t.resample('15min', base=self.base_minute()).mean()
if interp_gaps:
tre = tre.interpolate()
return tre
def azs(self, **kws):
t_end = self.t_end()+pd.Timedelta(minutes=15)
data = azs.load_series()[self.t_start(): t_end]
if data.empty:
return pd.Series()
return data.resample('15min', base=self.base_minute()).mean()
def load_pluvio(self, **kws):
"""load_pluvio wrapper"""
self.pluvio = insitu.load_pluvio(start=self.t_start(),
end=self.t_end(), **kws)
def load_model_data(self, variable='temperature'):
"""Load interpolated model data."""
self.data[variable] = cloudnet.load_as_df(self.data.major_axis,
self.data.minor_axis,
variable=variable)
def load_model_temperature(self, overwrite=False):
"""Load interpolated model temperature if not already loaded."""
if 'T' in self.data and not overwrite:
return
t = cloudnet.load_as_df(self.data.major_axis, self.data.minor_axis,
variable='temperature') - 273.15
self.data['T'] = t
def lwe(self):
"""liquid water equivalent precipitation rate"""
if self.pluvio is None:
self.load_pluvio()
i = self.pluvio.intensity()
return self.time_weighted_mean(i, offset_half_delta=False)
def fr(self):
"""rime mass fraction"""
t_end = self.t_end()+ | pd.Timedelta(minutes=15) | pandas.Timedelta |
import os
import glob
import click
import pickle
import zipfile
import datetime
import pandas as pd
@click.command()
@click.option('--redmine_instance', help='Path to pickled Redmine API instance')
@click.option('--issue', help='Path to pickled Redmine issue')
@click.option('--work_dir', help='Path to Redmine issue work directory')
@click.option('--description', help='Path to pickled Redmine description')
def qiimecombine_redmine(redmine_instance, issue, work_dir, description):
# Unpickle Redmine objects
redmine_instance = pickle.load(open(redmine_instance, 'rb'))
issue = pickle.load(open(issue, 'rb'))
description = pickle.load(open(description, 'rb'))
"""
Description is expected to be in the following format.
level=taxlevel
column_header=column_sample # Allow searching on more than one column header by adding more lines
optional! startdate-enddate. If not provided, assume we want to search all.
For column_header=column_sample rows, using a ~ instead of an = will allow for searching for partial matches
instead of complete
So an example would be:
level=5
sample_type=meat
180401-190408
"""
# Parse description
try:
level, column_headers, column_contents, start_date, end_date, operators = parse_description(description)
except: # Don't try to be too specific here, this could blow up in many ways.
redmine_instance.issue.update(resource_id=issue.id,
status_id=4,
notes='Your description was not formatted correctly. See DOCS for a description '
'of how to format the description.')
return
tax_barplots = glob.glob('/mnt/nas2/processed_sequence_data/miseq_assemblies/*/qiime2/taxonomy_barplot.qzv')
# As it turns out, qiime2 qzv files are actually just zip files with a bunch of data/metadata.
# https://github.com/joey711/phyloseq/issues/830
# Getting at data seems to be easiest if we just unzip and read in the relevant csv files with pandas
for tax_barplot in tax_barplots:
# Check the date of the assembly to see if it's in our range.
date_string = tax_barplot.split('/')[-3]
run_date = string_to_year(date_string)
if start_date < run_date < end_date:
cmd = 'cp {} {}'.format(tax_barplot, work_dir)
os.system(cmd)
output_dir = os.path.join(work_dir, tax_barplot.split('/')[-3])
with zipfile.ZipFile(os.path.join(work_dir, 'taxonomy_barplot.qzv'), 'r') as zipomatic:
zipomatic.extractall(output_dir)
# Now we should have folders for all of our QIIME2 runs (named with as run dates).
# Grab the csv file for level of interest for all of them.
dataframe_list = list()
csv_files = glob.glob(os.path.join(work_dir, '*', '*', 'data', 'level-{}.csv'.format(level)))
for csv_file in csv_files:
df = | pd.read_csv(csv_file) | pandas.read_csv |
'''
Author:
<NAME> <<EMAIL>>
<NAME> <<EMAIL>>
<NAME> <<EMAIL>>
<NAME> <<EMAIL>>
'''
import pandas as pd
import numpy as np
from sklearn.cluster import KMeans
from sklearn.preprocessing import StandardScaler
from matplotlib import pyplot as plt
import plotly.subplots as tls
import plotly.graph_objs as go
import plotly
from sklearn.feature_selection import mutual_info_regression
INPUT_FILE = "./dataSource/features_combined.csv"
## Add feature names that you want to filter out
NULL_FEATURES = ['country', 'Country_Region', 'entity', 'total_covid_19_tests']
## Add Features that you want to load from features_combined.csv
FILTER_FEATURES = ['Country_Region', 'total_covid_19_tests', 'Confirmed', 'pop2020',
'HDI Rank (2018)', 'inform_risk', 'inform_p2p_hazard_and_exposure_dimension',
'population_density', 'population_living_in_urban_areas',
'proportion_of_population_with_basic_handwashing_facilities_on_premises',
'people_using_at_least_basic_sanitation_services',
'inform_vulnerability', 'inform_health_conditions',
'inform_epidemic_vulnerability', 'mortality_rate_under_5',
'prevalence_of_undernourishment', 'inform_lack_of_coping_capacity',
'inform_access_to_healthcare', 'current_health_expenditure_per_capita',
'maternal_mortality_ratio']
# Plotting cluster to world map
def plot_clusters(df_k, title):
colorscale = [[0, 'blue'], [0.25, 'green'], [0.5, 'yellow'], [0.75, 'orange'], [1, 'red']]
map_data = [dict(type='choropleth',
locations=df_k['country_region'].astype(str),
z=df_k['cluster'].astype(int),
locationmode='country names',
colorscale=colorscale)]
final_map = dict(data=map_data,
layout_title_text="<b>" + title + "</b>")
plotly.offline.plot(final_map)
# Plotting all world maps together
def plot_multiple_maps(df_list, title=None):
## plot result
_colorscale = [[0, 'blue'], [0.25, 'green'], [0.5, 'yellow'], [0.75, 'orange'], [1, 'red']]
ROW, COL = 3, 1
if not title: title = 'Unscaled vs Scaled vs Scaled with Top Factors'
fig = tls.make_subplots(rows=ROW, cols=COL, column_widths=[1], row_heights=[0.33, 0.33, 0.33],
specs=[[{"type": "choropleth"}], [{"type": "choropleth"}], [{"type": "choropleth"}]])
for r in range(ROW):
for c in range(COL):
_df = df_list[c * ROW + r]
fig.add_trace(
go.Choropleth(type='choropleth',
locations=_df['country_region'].astype(str),
z=_df['cluster'].astype(int),
locationmode='country names',
showscale=True, colorscale=_colorscale,
colorbar=dict(
title="Cluster Index",
yanchor="top", x=-0.2, y=1,
ticks="outside", ticksuffix="(num)",
), ),
row=r + 1, col=c + 1
)
fig.update_layout(
title=title,
autosize=True,
width=1400,
height=900,
)
fig.show()
dataset_feature = pd.read_csv(INPUT_FILE)
not_null_features_df = dataset_feature[dataset_feature[NULL_FEATURES].notnull().all(1)]
not_zero_total_tests_df = not_null_features_df.loc[not_null_features_df['total_covid_19_tests'] != 0]
dataset_features_by_country = not_zero_total_tests_df[FILTER_FEATURES]
dataset_features_by_country.fillna(0)
dataset_features_by_country.loc[
dataset_features_by_country.Country_Region == 'US', 'Country_Region'] = 'United States of America'
temp_data = dataset_features_by_country.sort_values(by=["Country_Region"])
temp_data = temp_data.reset_index(drop=True)
temp_data['pop2020'] = temp_data['pop2020'].apply(lambda x: x * 1000)
temp_data["confirmed_ratio"] = temp_data["Confirmed"] / temp_data["pop2020"]
temp_data["confirmed_ratio"] = temp_data["confirmed_ratio"].apply(lambda x: x * 1000)
temp_data["test_ratio"] = temp_data["total_covid_19_tests"] / temp_data["pop2020"]
temp_data["test_ratio"] = temp_data["test_ratio"].apply(lambda x: x * 1000)
temp_data = temp_data.replace("No data", 0)
temp_data = temp_data.replace(np.inf, 0)
temp_data = temp_data.replace(np.nan, 0)
temp_data = temp_data.replace('x', 0)
print(temp_data)
# Plot confirmed cases on the world map
colorscale = [[0, 'blue'], [0.25, 'green'], [0.5, 'yellow'], [0.75, 'orange'], [1, 'red']]
world_map = [dict(type='choropleth',
locations=temp_data['Country_Region'].astype(str),
z=temp_data['Confirmed'].astype(int),
locationmode='country names',
colorscale=colorscale)]
final_map = dict(data=world_map,
layout_title_text="<b>Confirmed COVID-19 Cases</b>")
plotly.offline.plot(final_map)
indicator_data = temp_data.drop(columns=["Country_Region", "pop2020", "Confirmed", "total_covid_19_tests"])
print("DATA FOR CLUSTERING\n", indicator_data.tail(10))
print("\nfeatures:", indicator_data.columns)
# ------------------------------------------------------------------------------------------
# CLUSTER WITH UNSCALED DATA
# ------------------------------------------------------------------------------------------
data_unscaled = temp_data.drop(columns=["Country_Region", "pop2020", "Confirmed", "total_covid_19_tests"])
# Plot inertia to find the best number of clusters to use
inertia_elbow = []
for i in range(1, 15):
kmeans = KMeans(n_clusters=i, init='k-means++')
kmeans.fit(data_unscaled)
inertia_elbow.append(kmeans.inertia_)
plt.plot(range(1, 15), inertia_elbow)
plt.xlabel("# of clusters")
plt.ylabel("Inertia")
plt.title("Optimal clusters for UNSCALED data")
plt.show()
# Find the factor that impacts the confirmed ratio the most to visualize the clusters
mutual_info = mutual_info_regression(indicator_data.drop(columns=['confirmed_ratio']), indicator_data['confirmed_ratio'])
mutual_info = | pd.Series(mutual_info) | pandas.Series |
"""
Tests the relational_features module.
"""
import re
import unittest
import numpy as np
import pandas as pd
import mock
from .context import relational_features
from .context import config
from .context import util
from .context import test_utils as tu
class RelationalFeaturesTestCase(unittest.TestCase):
def setUp(self):
config_obj = tu.sample_config()
util_obj = util.Util()
self.test_obj = relational_features.RelationalFeatures(config_obj,
util_obj)
def tearDown(self):
self.test_obj = None
def test_init(self):
result = self.test_obj
self.assertTrue(isinstance(result.config_obj, config.Config))
self.assertTrue(isinstance(result.util_obj, util.Util))
@mock.patch('pandas.concat')
def test_build(self, mock_concat):
df = tu.sample_df(10)
self.test_obj.util_obj.start = mock.Mock()
self.test_obj.settings = mock.Mock(return_value=('bl', 'wl'))
self.test_obj.build_features = mock.Mock()
self.test_obj.build_features.side_effect = [('tr', '', 'td'),
('te', 'l', '')]
self.test_obj.strip_labels = mock.Mock(return_value='stripped')
mock_concat.return_value = df
self.test_obj.util_obj.end = mock.Mock()
result = self.test_obj.build('train_df', 'test_df', 'test', fw='fw')
exp_start = 'building relational features...'
exp_build = [mock.call('train_df', 'bl', 'wl'),
mock.call('stripped', 'bl', 'wl', 'td')]
self.assertTrue(list(result[0]) == ['com_id', 'random'])
self.assertTrue(len(result[0]) == 10)
self.assertTrue(result[1] == ['l'])
self.test_obj.util_obj.start.assert_called_with(exp_start, fw='fw')
self.test_obj.settings.assert_called()
self.test_obj.strip_labels.assert_called_with('test_df')
self.assertTrue(self.test_obj.build_features.call_args_list ==
exp_build)
mock_concat.assert_called_with(['tr', 'te'])
self.test_obj.util_obj.end.assert_called_with(fw='fw')
def test_settings(self):
result = self.test_obj.settings()
self.assertTrue(result == (3, 10))
def test_strip_labels_no_noisy_labels(self):
df = tu.sample_df(2)
df_copy = tu.sample_df(2)
df_copy.columns = ['com_id', 'label']
df.copy = mock.Mock(return_value=df_copy)
result = self.test_obj.strip_labels(df)
self.assertTrue(list(result['com_id']) == [0, 1])
self.assertTrue(np.isnan(result['label'].sum()))
def test_strip_labels_noisy_labels(self):
df = tu.sample_df(2)
df_copy = tu.sample_df(2)
df_copy.columns = ['com_id', 'label']
df_copy['noisy_labels'] = [6, 9]
df.copy = mock.Mock(return_value=df_copy)
result = self.test_obj.strip_labels(df)
self.assertTrue(list(result['com_id']) == [0, 1])
self.assertTrue(list(result['label']) == [6, 9])
def test_build_features(self):
feats = ('f', 'd', 'l')
self.test_obj.soundcloud = mock.Mock()
self.test_obj.youtube = mock.Mock(return_value=feats)
self.test_obj.twitter = mock.Mock()
self.test_obj.ifwe = mock.Mock()
self.test_obj.yelp_hotel = mock.Mock()
self.test_obj.yelp_restaurant = mock.Mock()
self.test_obj.config_obj.domain = 'youtube'
result = self.test_obj.build_features('df', 7, 8, train_dicts='td')
self.assertTrue(result == ('f', 'd', 'l'))
self.test_obj.soundcloud.assert_not_called()
self.test_obj.youtube.assert_called_with('df', 7, 8, 'td')
self.test_obj.twitter.assert_not_called()
self.test_obj.ifwe.assert_not_called()
self.test_obj.yelp_hotel.assert_not_called()
self.test_obj.yelp_restaurant.assert_not_called()
def test_soundcloud(self):
data = [[0, 1, 100, '', 'h', 0], [1, 2, 100, '', 't', 1],
[2, 1, 100, '', 'h', 0], [3, 2, 102, '', 'b', 1]]
df = pd.DataFrame(data, columns=['com_id', 'user_id', 'track_id',
'timestamp', 'text', 'label'])
self.test_obj.config_obj.pseudo = True
result = self.test_obj.soundcloud(df)
exp1 = pd.Series([0, 0, 1, 1])
exp2 = pd.Series([0, 0, 0, 1.0])
exp3 = | pd.Series([0.0, 0.0, 0.0, 0.0]) | pandas.Series |
from pandas import DataFrame
import pandas as pd
from sklearn.cross_decomposition import PLSRegression, PLSCanonical
def pls_wrapper(pls):
class PLSPandasMixin(pls):
def fit(self, x, y):
self.x = x
self.y = y
return super().fit(x, y)
def transform(self, x, y):
#assert all(x.index == self.x.index) and all(y.index == self.y.index)
T, U = super().transform(x, y)
return DataFrame(T, index=x.index), DataFrame(U, index=y.index)
@property
def W(self):
return DataFrame(self.x_weights_, index=self.x.columns)
@property
def C(self):
return DataFrame(self.y_weights_, index=self.x.columns)
@property
def P(self):
return DataFrame(self.x_loadings_, index=self.x.columns)
@property
def Q(self):
return DataFrame(self.y_loadings_, index=self.y.columns)
PLSPandasMixin.__name__ = 'Pandas' + pls.__name__
return PLSPandasMixin
PandasPLSRegression = pls_wrapper(PLSRegression)
PandasPLSCanonical = pls_wrapper(PLSCanonical)
def custom_scorer(func, greater_is_better=False):
if not greater_is_better:
return lambda e, x, y: -func(e, x, y)
return func
def scores_by_component(scores_map):
return pd.concat([
pd.concat([
| DataFrame(scores[component]) | pandas.DataFrame |
from flask import Flask, render_template, request, redirect, url_for, session
import pandas as pd
import pymysql
import os
import io
#from werkzeug.utils import secure_filename
from pulp import *
import numpy as np
import pymysql
import pymysql.cursors
from pandas.io import sql
#from sqlalchemy import create_engine
import pandas as pd
import numpy as np
#import io
import statsmodels.formula.api as smf
import statsmodels.api as sm
import scipy.optimize as optimize
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
#from flask import Flask, render_template, request, redirect, url_for, session, g
from sklearn.linear_model import LogisticRegression
from math import sin, cos, sqrt, atan2, radians
from statsmodels.tsa.arima_model import ARIMA
#from sqlalchemy import create_engine
from collections import defaultdict
from sklearn import linear_model
import statsmodels.api as sm
import scipy.stats as st
import pandas as pd
import numpy as np
from pulp import *
import pymysql
import math
app = Flask(__name__)
app.secret_key = os.urandom(24)
localaddress="D:\\home\\site\\wwwroot"
localpath=localaddress
os.chdir(localaddress)
@app.route('/')
def index():
return redirect(url_for('home'))
@app.route('/home')
def home():
return render_template('home.html')
@app.route('/demandplanning')
def demandplanning():
return render_template("Demand_Planning.html")
@app.route("/elasticopt",methods = ['GET','POST'])
def elasticopt():
if request.method== 'POST':
start_date =request.form['from']
end_date=request.form['to']
prdct_name=request.form['typedf']
# connection = pymysql.connect(host='localhost',
# user='user',
# password='',
# db='test',
# charset='utf8mb4',
# cursorclass=pymysql.cursors.DictCursor)
#
# x=connection.cursor()
# x.execute("select * from `transcdata`")
# connection.commit()
# datass=pd.DataFrame(x.fetchall())
datass = pd.read_csv("C:\\Users\\1026819\\Downloads\\optimizdata.csv")
# datas = datass[(datass['Week']>=start_date) & (datass['Week']<=end_date )]
datas=datass
df = datas[datas['Product'] == prdct_name]
df=datass
changeData=pd.concat([df['Product_Price'],df['Product_Qty']],axis=1)
changep=[]
changed=[]
for i in range(0,len(changeData)-1):
changep.append(changeData['Product_Price'].iloc[i]-changeData['Product_Price'].iloc[i+1])
changed.append(changeData['Product_Qty'].iloc[1]-changeData['Product_Qty'].iloc[i+1])
cpd=pd.concat([pd.DataFrame(changep),pd.DataFrame(changed)],axis=1)
cpd.columns=['Product_Price','Product_Qty']
sortedpricedata=df.sort_values(['Product_Price'], ascending=[True])
spq=pd.concat([sortedpricedata['Product_Price'],sortedpricedata['Product_Qty']],axis=1).reset_index(drop=True)
pint=[]
dint=[]
x = spq['Product_Price']
num_bins = 5
# n, pint, patches = plt.hist(x, num_bins, facecolor='blue', alpha=0.5)
y = spq['Product_Qty']
num_bins = 5
# n, dint, patches = plt.hist(y, num_bins, facecolor='blue', alpha=0.5)
arr= np.zeros(shape=(len(pint),len(dint)))
count=0
for i in range(0, len(pint)):
lbp=pint[i]
if i==len(pint)-1:
ubp=pint[i]+1
else:
ubp=pint[i+1]
for j in range(0, len(dint)):
lbd=dint[j]
if j==len(dint)-1:
ubd=dint[j]+1
else:
ubd=dint[j+1]
print(lbd,ubd)
for k in range(0, len(spq)):
if (spq['Product_Price'].iloc[k]>=lbp\
and spq['Product_Price'].iloc[k]<ubp):
if(spq['Product_Qty'].iloc[k]>=lbd\
and spq['Product_Qty'].iloc[k]<ubd):
count+=1
arr[i][j]+=1
price_range=np.zeros(shape=(len(pint),2))
for j in range(0,len(pint)):
lbp=pint[j]
price_range[j][0]=lbp
if j==len(pint)-1:
ubp=pint[j]+1
price_range[j][1]=ubp
else:
ubp=pint[j+1]
price_range[j][1]=ubp
demand_range=np.zeros(shape=(len(dint),2))
for j in range(0,len(dint)):
lbd=dint[j]
demand_range[j][0]=lbd
if j==len(dint)-1:
ubd=dint[j]+1
demand_range[j][1]=ubd
else:
ubd=dint[j+1]
demand_range[j][1]=ubd
pr=pd.DataFrame(price_range)
pr.columns=['Price','Demand']
dr=pd.DataFrame(demand_range)
dr.columns=['Price','Demand']
priceranges=pr.Price.astype(str).str.cat(pr.Demand.astype(str), sep='-')
demandranges=dr.Price.astype(str).str.cat(dr.Demand.astype(str), sep='-')
price=pd.DataFrame(arr)
price.columns=demandranges
price.index=priceranges
pp=price.reset_index()
global data
data=pd.concat([df['Week'],df['Product_Qty'],df['Product_Price'],df['Comp_Prod_Price'],df['Promo1'],df['Promo2'],df['overallsale']],axis=1)
return render_template('dataview.html',cpd=cpd.values,pp=pp.to_html(index=False),data=data.to_html(index=False),graphdata=data.values,ss=1)
return render_template('dataview.html')
@app.route('/priceelasticity',methods = ['GET','POST'])
def priceelasticity():
return render_template('Optimisation_heatmap_revenue.html')
@app.route("/elasticity",methods = ['GET','POST'])
def elasticity():
if request.method== 'POST':
Price=0
Average_Price=0
Promotions=0
Promotionss=0
if request.form.get('Price'):
Price=1
if request.form.get('Average_Price'):
Average_Price=1
if request.form.get('Promotion_1'):
Promotions=1
if request.form.get('Promotion_2'):
Promotionss=1
Modeldata=pd.DataFrame()
Modeldata['Product_Qty']=data.Product_Qty
lst=[]
for row in data.index:
lst.append(row+1)
Modeldata['Week']=np.log(lst)
if Price == 1:
Modeldata['Product_Price']=data['Product_Price']
if Price == 0:
Modeldata['Product_Price']=0
if Average_Price==1:
Modeldata['Comp_Prod_Price']=data['Comp_Prod_Price']
if Average_Price==0:
Modeldata['Comp_Prod_Price']=0
if Promotions==1:
Modeldata['Promo1']=data['Promo1']
if Promotions==0:
Modeldata['Promo1']=0
if Promotionss==1:
Modeldata['Promo2']=data['Promo2']
if Promotionss==0:
Modeldata['Promo2']=0
diffpriceprodvscomp= (Modeldata['Product_Price']-Modeldata['Comp_Prod_Price'])
promo1=Modeldata.Promo1
promo2=Modeldata.Promo2
week=Modeldata.Week
quantityproduct=Modeldata.Product_Qty
df=pd.concat([quantityproduct,diffpriceprodvscomp,promo1,promo2,week],axis=1)
df.columns=['quantityproduct','diffpriceprodvscomp','promo1','promo2','week']
Model = smf.ols(formula='df.quantityproduct ~ df.diffpriceprodvscomp + df.promo1 + df.promo2 + df.week', data=df)
res = Model.fit()
global intercept,diffpriceprodvscomp_param,promo1_param,promo2_param,week_param
intercept=res.params[0]
diffpriceprodvscomp_param=res.params[1]
promo1_param=res.params[2]
promo2_param=res.params[3]
week_param=res.params[4]
Product_Price_min=0
maxvalue_of_price=int(Modeldata['Product_Price'].max())
Product_Price_max=int(Modeldata['Product_Price'].max())
if maxvalue_of_price==0:
Product_Price_max=1
maxfunction=[]
pricev=[]
weeks=[]
dd=[]
ddl=[]
for vatr in range(0,len(Modeldata)):
weeks.append(lst[vatr])
for Product_Price in range(Product_Price_min,Product_Price_max+1):
function=0
function=(intercept+(Modeldata['Promo1'].iloc[vatr]*promo1_param)+(Modeldata['Promo2'].iloc[vatr]*promo2_param) +
(diffpriceprodvscomp_param*(Product_Price-Modeldata['Comp_Prod_Price'].iloc[vatr]))+(Modeldata['Week'].iloc[vatr]*lst[vatr]))
maxfunction.append(function)
dd.append(Product_Price)
ddl.append(vatr)
for Product_Price in range(Product_Price_min,Product_Price_max+1):
pricev.append(Product_Price)
df1=pd.DataFrame(maxfunction)
df2=pd.DataFrame(dd)
df3=pd.DataFrame(ddl)
dfo=pd.concat([df3,df2,df1],axis=1)
dfo.columns=['weeks','prices','Demandfunctions']
demand=[]
for rows in dfo.values:
w=int(rows[0])
p=int(rows[1])
d=int(rows[2])
demand.append([w,p,d])
Co_eff=pd.DataFrame(res.params.values)#intercept
standard_error=pd.DataFrame(res.bse.values)#standard error
p_values=pd.DataFrame(res.pvalues.values)
conf_lower =pd.DataFrame(res.conf_int()[0].values)
conf_higher =pd.DataFrame(res.conf_int()[1].values)
R_square=res.rsquared
atr=['Intercept','DeltaPrice','Promo1','Promo2','Week']
atribute=pd.DataFrame(atr)
SummaryTable=pd.concat([atribute,Co_eff,standard_error,p_values,conf_lower,conf_higher],axis=1)
SummaryTable.columns=['Atributes','Co_eff','Standard_error','P_values','conf_lower','conf_higher']
reshapedf=df1.values.reshape(len(Modeldata),(-Product_Price_min+(Product_Price_max+1)))
dataofmas=pd.DataFrame(reshapedf)
maxv=dataofmas.apply( max, axis=1 )
minv=dataofmas.apply(min,axis=1)
avgv=dataofmas.sum(axis=1)/(-Product_Price_min+(Product_Price_max+1))
wks=pd.DataFrame(weeks)
ddofs=pd.concat([wks,minv,avgv,maxv],axis=1)
dataofmas=pd.DataFrame(reshapedf)
kk=pd.DataFrame()
sums=0
for i in range(0,len(dataofmas.columns)):
sums=sums+i
vv=i*dataofmas[[i]]
kk=pd.concat([kk,vv],axis=1)
dfr=pd.DataFrame(kk)
mrevenue=dfr.apply( max, axis=1 )
prices=dfr.idxmax(axis=1)
wks=pd.DataFrame(weeks)
revenuedf=pd.concat([wks,mrevenue,prices],axis=1)
return render_template('Optimisation_heatmap_revenue.html',revenuedf=revenuedf.values,ddofs=ddofs.values,SummaryTable=SummaryTable.to_html(index=False),ss=1,weeks=weeks,demand=demand,pricev=pricev,R_square=R_square)
@app.route('/inputtomaxm',methods=["GET","POST"])
def inputtomaxm():
return render_template("Optimize.html")
@app.route("/maxm",methods=["GET","POST"])
def maxm():
if request.method=="POST":
week=request.form['TimePeriod']
price_low=request.form['Price_Lower']
price_max=request.form['Price_Upper']
promofirst=request.form['Promotion_1']
promosecond=request.form['Promotion_2']
# week=24
# price_low=6
# price_max=20
# promofirst=1
# promosecond=0
#
# time_period=24
#
# global a
# a=243.226225
# global b
# b=-9.699634
# global d
# d=1.671505
# global pr1
# pr1=21.866260
# global pr2
# pr2=-0.511606
# global cm
# cm=-14.559594
# global s_0
# s_0= 2000
# promo1=1
# promo2=0
time_period=int(week)
global a
a=intercept
global b
b=diffpriceprodvscomp_param
global d
d=week_param
global pr1
pr1=promo1_param
global pr2
pr2=promo2_param
global s_0
s_0= 2000
promo1=int(promofirst)
promo2=int(promosecond)
global comp
comp=np.random.randint(7,15,time_period)
def demand(p, a=a, b=b, d=d, promo1=promo1,promo2_param=promo2,comp=comp, t=np.linspace(1,time_period,time_period)):
""" Return demand given an array of prices p for times t
(see equation 5 above)"""
return a+(b*(p-comp))+(d*t)+(promo1*pr1)+(promo2*pr2)
def objective(p_t, a, b, d,promo1,promo2, comp, t=np.linspace(1,time_period,time_period)):
return -1.0 * np.sum( p_t * demand(p_t, a, b, d,promo1,promo2, comp, t) )
def constraint_1(p_t, s_0, a, b, d, promo1,promo2, comp, t=np.linspace(1,time_period,time_period)):
""" Inventory constraint. s_0 - np.sum(x_t) >= 0.
This is an inequality constraint. See more below.
"""
return s_0 - np.sum(demand(p_t, a, b, d,promo1,promo2, comp, t))
def constraint_2(p_t):
#""" Positive demand. Another inequality constraint x_t >= 0 """
return p_t
t = np.linspace(1,time_period,time_period)
# Starting values :
b_min=int(price_low)
p_start = b_min * np.ones(len(t))
# bounds on the values :
bmax=int(price_max)
bounds = tuple((0,bmax) for x in p_start)
import scipy.optimize as optimize
# Constraints :
constraints = ({'type': 'ineq', 'fun': lambda x, s_0=s_0: constraint_1(x,s_0, a, b, d,promo1,promo2, comp, t=t)},
{'type': 'ineq', 'fun': lambda x: constraint_2(x)}
)
opt_results = optimize.minimize(objective, p_start, args=(a, b, d,promo1,promo2, comp, t),
method='SLSQP', bounds=bounds, constraints=constraints)
np.sum(opt_results['x'])
opt_price=opt_results['x']
opt_demand=demand(opt_results['x'], a, b, d, promo1,promo2_param, comp, t=t)
weeks=[]
for row in range(1,len(opt_price)+1):
weeks.append(row)
d=pd.DataFrame(weeks).astype(int)
dd=pd.DataFrame(opt_price)
optimumumprice_perweek=pd.concat([d,dd,pd.DataFrame(opt_demand).astype(int)],axis=1)
optimumumprice_perweek.columns=['Week','Price','Demand']
dataval=optimumumprice_perweek
diff=[]
diffs=[]
for i in range(0,len(opt_demand)-1):
valss=opt_demand[i]-opt_demand[i+1]
diff.append(valss)
diffs.append(i+1)
differenceofdemand_df=pd.concat([pd.DataFrame(diffs),pd.DataFrame(diff)],axis=1)
MP=round(optimumumprice_perweek.loc[optimumumprice_perweek['Price'].idxmin()],1)
minimumprice=pd.DataFrame(MP).T
MaxP=round(optimumumprice_perweek.loc[optimumumprice_perweek['Price'].idxmax()],1)
maximumprice=pd.DataFrame(MaxP).T
averageprice=round((optimumumprice_perweek['Price'].sum()/len(optimumumprice_perweek)),2)
MD=round(optimumumprice_perweek.loc[optimumumprice_perweek['Demand'].idxmin()],0)
minimumDemand=pd.DataFrame(MD).T
MaxD=round(optimumumprice_perweek.loc[optimumumprice_perweek['Demand'].idxmax()],0)
maximumDemand=pd.DataFrame(MaxD).T
averageDemand=round((optimumumprice_perweek['Demand'].sum()/len(optimumumprice_perweek)),0)
totaldemand=round(optimumumprice_perweek['Demand'].sum(),0)
return render_template("Optimize.html",totaldemand=totaldemand,averageDemand=averageDemand,maximumDemand=maximumDemand.values,minimumDemand=minimumDemand.values,averageprice=averageprice,maximumprice=maximumprice.values,minimumprice=minimumprice.values,dataval=dataval.values,differenceofdemand_df=differenceofdemand_df.values,optimumumprice_perweek=optimumumprice_perweek.to_html(index=False),ll=1)
@app.route("/Inventorymanagment",methods=["GET","POST"])
def Inventorymanagment():
return render_template("Inventory_Management.html")
@app.route("/DISTRIBUTION_NETWORK_OPT",methods=["GET","POST"])
def DISTRIBUTION_NETWORK_OPT():
return render_template("DISTRIBUTION_NETWORK_OPTIMIZATION.html")
@app.route("/Procurement_Plan",methods=["GET","POST"])
def Procurement_Plan():
return render_template("Procurement_Planning.html")
#<NAME>
@app.route("/fleetallocation")
def fleetallocation():
return render_template('fleetallocation.html')
@app.route("/reset")
def reset():
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
cur.execute("DELETE FROM `input`")
cur.execute("DELETE FROM `output`")
cur.execute("DELETE FROM `Scenario`")
conn.commit()
conn.close()
open(localaddress+'\\static\\demodata.txt', 'w').close()
return render_template('fleetallocation.html')
@app.route("/dalink",methods = ['GET','POST'])
def dalink():
sql = "INSERT INTO `input` (`Route`,`SLoc`,`Ship-to Abb`,`Primary Equipment`,`Batch`,`Prod Dt`,`SW`,`Met Held`,`Heat No`,`Delivery Qty`,`Width`,`Length`,`Test Cut`,`Customer Priority`) VALUES( %s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
if request.method == 'POST':
typ = request.form.get('type')
frm = request.form.get('from')
to = request.form.get('to')
if typ and frm and to:
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
curr = conn.cursor()
cur.execute("SELECT * FROM `inventory_data` WHERE `Primary Equipment` = '" + typ + "' AND `Prod Dt` BETWEEN '" + frm + "' AND '" + to + "'")
res = cur.fetchall()
if len(res)==0:
conn.close()
return render_template('fleetallocation.html',alert='No data available')
sfile = pd.DataFrame(res)
df1 = pd.DataFrame(sfile)
df1['Prod Dt'] =df1['Prod Dt'].astype(object)
for index, i in df1.iterrows():
data = (i['Route'],i['SLoc'],i['Ship-to Abb'],i['Primary Equipment'],i['Batch'],i['Prod Dt'],i['SW'],i['Met Held'],i['Heat No'],i['Delivery Qty'],i['Width'],i['Length'],i['Test Cut'],i['Customer Priority'])
curr.execute(sql,data)
conn.commit()
conn.close()
return render_template('fleetallocation.html',typ=" Equipment type: "+typ,frm="From: "+frm,to=" To:"+to,data = sfile.to_html(index=False))
else:
return render_template('fleetallocation.html',alert ='All input fields are required')
return render_template('fleetallocation.html')
@app.route('/optimise', methods=['GET', 'POST'])
def optimise():
open(localaddress+'\\static\\demodata.txt', 'w').close()
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
curr = conn.cursor()
cur.execute("DELETE FROM `output`")
conn.commit()
os.system('python optimising.py')
sa=1
cur.execute("SELECT * FROM `output`")
result = cur.fetchall()
if len(result)==0:
say=0
else:
say=1
curr.execute("SELECT * FROM `input`")
sfile = curr.fetchall()
if len(sfile)==0:
conn.close()
return render_template('fleetallocation.html',say=say,sa=sa,alert='No data available')
sfile = pd.DataFrame(sfile)
conn.close()
with open(localaddress+"\\static\\demodata.txt", "r") as f:
content = f.read()
return render_template('fleetallocation.html',say=say,sa=sa,data = sfile.to_html(index=False),content=content)
@app.route("/scenario")
def scenario():
return render_template('scenario.html')
@app.route("/scenario_insert", methods=['GET','POST'])
def scenario_insert():
if request.method == 'POST':
scenario = request.form.getlist("scenario[]")
customer_priority = request.form.getlist("customer_priority[]")
oldest_sw = request.form.getlist("oldest_sw[]")
production_date = request.form.getlist("production_date[]")
met_held_group = request.form.getlist("met_held_group[]")
test_cut_group = request.form.getlist("test_cut_group[]")
sub_grouping_rules = request.form.getlist("sub_grouping_rules[]")
load_lower_bounds = request.form.getlist("load_lower_bounds[]")
load_upper_bounds = request.form.getlist("load_upper_bounds[]")
width_bounds = request.form.getlist("width_bounds[]")
length_bounds = request.form.getlist("length_bounds[]")
description = request.form.getlist("description[]")
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
curr = conn.cursor()
lngth = len(scenario)
curr.execute("DELETE FROM `scenario`")
if scenario and customer_priority and oldest_sw and production_date and met_held_group and test_cut_group and sub_grouping_rules and load_lower_bounds and load_upper_bounds and width_bounds and length_bounds and description:
say=0
for i in range(lngth):
scenario_clean = scenario[i]
customer_priority_clean = customer_priority[i]
oldest_sw_clean = oldest_sw[i]
production_date_clean = production_date[i]
met_held_group_clean = met_held_group[i]
test_cut_group_clean = test_cut_group[i]
sub_grouping_rules_clean = sub_grouping_rules[i]
load_lower_bounds_clean = load_lower_bounds[i]
load_upper_bounds_clean = load_upper_bounds[i]
width_bounds_clean = width_bounds[i]
length_bounds_clean = length_bounds[i]
description_clean = description[i]
if scenario_clean and customer_priority_clean and oldest_sw_clean and production_date_clean and met_held_group_clean and test_cut_group_clean and sub_grouping_rules_clean and load_lower_bounds_clean and load_upper_bounds_clean and width_bounds_clean and length_bounds_clean:
cur.execute("INSERT INTO `scenario`(scenario, customer_priority, oldest_sw, production_date, met_held_group, test_cut_group, sub_grouping_rules, load_lower_bounds, load_upper_bounds, width_bounds, length_bounds, description) VALUES('"+scenario_clean+"' ,'"+customer_priority_clean+"','"+oldest_sw_clean+"','"+production_date_clean+"','"+met_held_group_clean+"','"+test_cut_group_clean+"', '"+sub_grouping_rules_clean+"','"+load_lower_bounds_clean+"', '"+load_upper_bounds_clean+"','"+width_bounds_clean+"','"+length_bounds_clean+"','"+description_clean+"')")
else:
say = 1
conn.commit()
if(say==0):
alert='All Scenarios inserted'
else:
alert='Some scenarios were not inserted'
return (alert)
conn.close()
return ('All fields are required!')
return ('Failed!!!')
@app.route("/fetch", methods=['GET','POST'])
def fetch():
if request.method == 'POST':
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
cur.execute("SELECT * FROM scenario")
result = cur.fetchall()
if len(result)==0:
conn.close()
return render_template('scenario.html',alert1='No scenarios Available')
result1 = pd.DataFrame(result)
result1 = result1.drop('Sub-grouping rules', axis=1)
conn.close()
return render_template('scenario.html',sdata = result1.to_html(index=False))
return ("Error")
@app.route("/delete", methods=['GET','POST'])
def delete():
if request.method == 'POST':
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
cur.execute("DELETE FROM scenario")
conn.commit()
conn.close()
return render_template('scenario.html',alert1="All the scenerios were dropped!")
return ("Error")
@app.route('/papadashboard', methods=['GET', 'POST'])
def papadashboard():
sql1 = "SELECT `Scenario`, MAX(`Wagon-No`) AS 'Wagon Used', COUNT(`Batch`) AS 'Products Allocated', SUM(`Delivery Qty`) AS 'Total Product Allocated', SUM(`Delivery Qty`)/(MAX(`Wagon-No`)) AS 'Average Load Carried', SUM(`Width`)/(MAX(`Wagon-No`)) AS 'Average Width Used' FROM `output` WHERE `Wagon-No`>0 GROUP BY `Scenario`"
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
curs = conn.cursor()
curs.execute("SELECT `scenario` FROM `scenario`")
sdata = curs.fetchall()
if len(sdata)==0:
conn.close()
return render_template('warning.html',alert='No data available')
cur1 = conn.cursor()
cur1.execute(sql1)
data1 = cur1.fetchall()
if len(data1)==0:
conn.close()
return render_template('warning.html',alert='Infeasible to due Insufficient Load')
cu = conn.cursor()
cu.execute("SELECT `length_bounds`,`width_bounds`,`load_lower_bounds`,`load_upper_bounds` FROM `scenario`")
sdaa = cu.fetchall()
sdaa = pd.DataFrame(sdaa)
asa=list()
for index, i in sdaa.iterrows():
hover = "Length Bound:"+str(i['length_bounds'])+", Width Bound:"+str(i['width_bounds'])+", Load Upper Bound:"+str(i['load_upper_bounds'])+", Load Lower Bound:"+str(i['load_lower_bounds'])
asa.append(hover)
asa=pd.DataFrame(asa)
asa.columns=['Details']
data1 = pd.DataFrame(data1)
data1['Average Width Used'] = data1['Average Width Used'].astype(int)
data1['Total Product Allocated'] = data1['Total Product Allocated'].astype(int)
data1['Average Load Carried'] = data1['Average Load Carried'].astype(float)
data1['Average Load Carried'] = round(data1['Average Load Carried'],2)
data1['Average Load Carried'] = data1['Average Load Carried'].astype(str)
fdata = pd.DataFrame(columns=['Scenario','Wagon Used','Products Allocated','Total Product Allocated','Average Load Carried','Average Width Used','Details'])
fdata[['Scenario','Wagon Used','Products Allocated','Total Product Allocated','Average Load Carried','Average Width Used']] = data1[['Scenario','Wagon Used','Products Allocated','Total Product Allocated','Average Load Carried','Average Width Used']]
fdata['Details'] = asa['Details']
fdata = fdata.values
sql11 = "SELECT `Scenario`, SUM(`Delivery Qty`)/(MAX(`Wagon-No`)) AS 'Average Load Carried', COUNT(`Batch`) AS 'Allocated', SUM(`Delivery Qty`) AS 'Load Allocated' FROM `output`WHERE `Wagon-No`>0 GROUP BY `Scenario`"
sql21 = "SELECT COUNT(`Batch`) AS 'Total Allocated' FROM `output` GROUP BY `Scenario`"
sql31 = "SELECT `load_upper_bounds` FROM `scenario`"
conn1 = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur11 = conn1.cursor()
cur21 = conn1.cursor()
cur31 = conn1.cursor()
cur11.execute(sql11)
data11 = cur11.fetchall()
data11 = pd.DataFrame(data11)
cur21.execute(sql21)
data21 = cur21.fetchall()
data21 = pd.DataFrame(data21)
cur31.execute(sql31)
data31 = cur31.fetchall()
data31 = pd.DataFrame(data31)
data11['Average Load Carried']=data11['Average Load Carried'].astype(float)
fdata1 = pd.DataFrame(columns=['Scenario','Utilisation Percent','Allocation Percent','Total Load Allocated'])
fdata1['Utilisation Percent'] = round(100*(data11['Average Load Carried']/data31['load_upper_bounds']),2)
data11['Load Allocated']=data11['Load Allocated'].astype(int)
fdata1[['Scenario','Total Load Allocated']]=data11[['Scenario','Load Allocated']]
data11['Allocated']=data11['Allocated'].astype(float)
data21['Total Allocated']=data21['Total Allocated'].astype(float)
fdata1['Allocation Percent'] = round(100*(data11['Allocated']/data21['Total Allocated']),2)
fdata1['Allocation Percent'] = fdata1['Allocation Percent'].astype(str)
fdat1 = fdata1.values
conn1.close()
if request.method == 'POST':
conn2 = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn2.cursor()
ata = request.form['name']
cur.execute("SELECT * FROM `output` WHERE `Scenario` = '"+ata+"' ")
ssdata = cur.fetchall()
datasss = pd.DataFrame(ssdata)
data=datasss.replace("Not Allocated", 0)
df=data[['Delivery Qty','Wagon-No','Width','Group-Number']]
df['Wagon-No']=df['Wagon-No'].astype(int)
a=df['Wagon-No'].max()
##bar1
result_array = np.array([])
for i in range (a):
data_i = df[df['Wagon-No'] == i+1]
del_sum_i = data_i['Delivery Qty'].sum()
per_i=[((del_sum_i)/(205000)*100)]
result_array = np.append(result_array, per_i)
result_array1 = np.array([])
for j in range (a):
data_j = df[df['Wagon-No'] == j+1]
del_sum_j = data_j['Width'].sum()
per_util_j=[((del_sum_j)/(370)*100)]
result_array1 = np.append(result_array1, per_util_j)
##pie1
df112 = df[df['Wagon-No'] == 0]
pie1 = df112 ['Width'].sum()
df221 = df[df['Wagon-No'] > 0]
pie11 = df221['Width'].sum()
df1=data[['SW','Group-Number']]
dff1 = df1[data['Wagon-No'] == 0]
da1 =dff1.groupby(['SW']).count()
re11 = np.array([])
res12 = np.append(re11,da1)
da1['SW'] = da1.index
r1 = np.array([])
r12 = np.append(r1, da1['SW'])
df0=data[['Group-Number','Route','SLoc','Ship-to Abb','Wagon-No','Primary Equipment']]
df1=df0.replace("Not Allocated", 0)
f2 = pd.DataFrame(df1)
f2['Wagon-No']=f2['Wagon-No'].astype(int)
####Not-Allocated
f2['Group']=data['Group-Number']
df=f2[['Group','Wagon-No']]
dee = df[df['Wagon-No'] == 0]
deer =dee.groupby(['Group']).count()##Not Allocated
deer['Group'] = deer.index
##Total-Data
f2['Group1']=data['Group-Number']
dfc=f2[['Group1','Wagon-No']]
dfa=pd.DataFrame(dfc)
der = dfa[dfa['Wagon-No'] >= 0]
dear =der.groupby(['Group1']).count()##Wagons >1
dear['Group1'] = dear.index
dear.rename(columns={'Wagon-No': 'Allocated'}, inplace=True)
result = pd.concat([deer, dear], axis=1, join_axes=[dear.index])
resu=result[['Group1','Wagon-No','Allocated']]
result1=resu.fillna(00)
r5 = np.array([])
r6 = np.append(r5, result1['Wagon-No'])
r66=r6[0:73]###Not Allocated
r7 = np.append(r5, result1['Allocated'])
r77=r7[0:73]####total
r8 = np.append(r5, result1['Group1'])
r88=r8[0:73]###group
conn2.close()
return render_template('papadashboard.html',say=1,data=fdata,data1=fdat1,ata=ata,bar1=result_array,bar11=result_array1,pie11=pie1,pie111=pie11,x=r12,y=res12,xname=r88, bar7=r77,bar8=r66)
conn.close()
return render_template('papadashboard.html',data=fdata,data1=fdat1)
@app.route('/facilityallocation')
def facilityallocation():
return render_template('facilityhome.html')
@app.route('/dataimport')
def dataimport():
return render_template('facilityimport.html')
@app.route('/dataimport1')
def dataimport1():
return redirect(url_for('dataimport'))
@app.route('/facility_location')
def facility_location():
return render_template('facility_location.html')
@app.route('/facility')
def facility():
return redirect(url_for('facilityallocation'))
@app.route("/imprt", methods=['GET','POST'])
def imprt():
global customerdata
global factorydata
global Facyy
global Custo
customerfile = request.files['CustomerData'].read()
factoryfile = request.files['FactoryData'].read()
if len(customerfile)==0 or len(factoryfile)==0:
return render_template('facilityhome.html',warning='Data Invalid')
cdat=pd.read_csv(io.StringIO(customerfile.decode('utf-8')))
customerdata=pd.DataFrame(cdat)
fdat=pd.read_csv(io.StringIO(factoryfile.decode('utf-8')))
factorydata=pd.DataFrame(fdat)
Custo=customerdata.drop(['Lat','Long'],axis=1)
Facyy=factorydata.drop(['Lat','Long'],axis=1)
return render_template('facilityimport1.html',loc1=factorydata.values,loc2=customerdata.values,factory=Facyy.to_html(index=False),customer=Custo.to_html(index=False))
@app.route("/gmap")
def gmap():
custdata=customerdata
Factorydata=factorydata
price=1
#to get distance beetween customer and factory
#first get the Dimension
#get no of factories
Numberoffact=len(Factorydata)
#get Number of Customer
Numberofcust=len(custdata)
#Get The dist/unit cost
cost=price
#def function for distance calculation
# approximate radius of earth in km
def dist(lati1,long1,lati2,long2,cost):
R = 6373.0
lat1 = radians(lati1)
lon1 = radians(long1)
lat2 = radians(lati2)
lon2 = radians(long2)
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2
c = 2 * atan2(sqrt(a), sqrt(1 - a))
distance =round(R * c,2)
return distance*cost
#Create a list for customer and factory
def costtable(custdata,Factorydata):
distance=list()
for lat1,long1 in zip(custdata.Lat, custdata.Long):
for lat2,long2 in zip(Factorydata.Lat, Factorydata.Long):
distance.append(dist(lat1,long1,lat2,long2,cost))
distable=np.reshape(distance, (Numberofcust,Numberoffact)).T
tab=pd.DataFrame(distable,index=[Factorydata.Factory],columns=[custdata.Customer])
return tab
DelCost=costtable(custdata,Factorydata)#return cost table of the customer and factoery
#creating Demand Table
demand=np.array(custdata.Demand)
col1=np.array(custdata.Customer)
Demand=pd.DataFrame(demand,col1).T
cols=sorted(col1)
#Creating capacity table
fact=np.array(Factorydata.Capacity)
col2=np.array(Factorydata.Factory)
Capacity=pd.DataFrame(fact,index=col2).T
colo=sorted(col2)
#creating Fixed cost table
fixed_c=np.array(Factorydata.FixedCost)
col3=np.array(Factorydata.Factory)
FixedCost= pd.DataFrame(fixed_c,index=col3)
# Create the 'prob' variable to contain the problem data
model = LpProblem("Min Cost Facility Location problem",LpMinimize)
production = pulp.LpVariable.dicts("Production",
((factory, cust) for factory in Capacity for cust in Demand),
lowBound=0,
cat='Integer')
factory_status =pulp.LpVariable.dicts("factory_status", (factory for factory in Capacity),
cat='Binary')
cap_slack =pulp.LpVariable.dicts("capslack",
(cust for cust in Demand),
lowBound=0,
cat='Integer')
model += pulp.lpSum(
[DelCost.loc[factory, cust] * production[factory, cust] for factory in Capacity for cust in Demand]
+ [FixedCost.loc[factory] * factory_status[factory] for factory in Capacity] + 5000000*cap_slack[cust] for cust in Demand)
for cust in Demand:
model += pulp.lpSum(production[factory, cust] for factory in Capacity)+cap_slack[cust] == Demand[cust]
for factory in Capacity:
model += pulp.lpSum(production[factory, cust] for cust in Demand) <= Capacity[factory]*factory_status[factory]
model.solve()
print("Status:", LpStatus[model.status])
for v in model.variables():
print(v.name, "=", v.varValue)
print("Total Cost of Ingredients per can = ", value(model.objective))
# Getting the table for the Factorywise Allocation
def factoryalloc(model,Numberoffact,Numberofcust,listoffac,listofcus):
listj=list()
listk=list()
listcaps=list()
for v in model.variables():
listj.append(v.varValue)
customer=listj[(len(listj)-Numberofcust-Numberoffact):(len(listj)-Numberoffact)]
del listj[(len(listj)-Numberoffact-Numberofcust):len(listj)]
for row in listj:
if row==0:
listk.append(0)
else:
listk.append(1)
x=np.reshape(listj,(Numberoffact,Numberofcust))
y=np.reshape(listk,(Numberoffact,Numberofcust))
FactoryAlloc_table=pd.DataFrame(x,index=listoffac,columns=listofcus)
Factorystatus=pd.DataFrame(y,index=listoffac,columns=listofcus)
return FactoryAlloc_table,Factorystatus,customer
Alltable,FactorystatusTable,ded=factoryalloc(model,Numberoffact,Numberofcust,colo,cols)
Allstatus=list()
dede=pd.DataFrame(ded,columns=['UnSatisfied'])
finaldede=dede[dede.UnSatisfied != 0]
colss=pd.DataFrame(cols,columns=['CustomerLocation'])
fina=pd.concat([colss,finaldede],axis=1, join='inner')
print(fina)
for i in range(len(Alltable)):
for j in range(len(Alltable.columns)):
if (Alltable.loc[Alltable.index[i], Alltable.columns[j]]>0):
all=[Alltable.index[i], Alltable.columns[j], Alltable.loc[Alltable.index[i], Alltable.columns[j]]]
Allstatus.append(all)
Status=pd.DataFrame(Allstatus,columns=['Factory','Customer','Allocation']).astype(str)
#To get the Factory Data
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
#Making Connection to the Database
cur = con.cursor()
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
Status.to_sql(con=engine, name='facilityallocation',index=False, if_exists='replace')
cur = con.cursor()
cur1 = con.cursor()
cur.execute("SELECT * FROM `facilityallocation`")
file=cur.fetchall()
dat=pd.DataFrame(file)
lst=dat[['Factory','Customer']]
mlst=[]
names=lst['Factory'].unique().tolist()
for name in names:
lsty=lst.loc[lst.Factory==name]
mlst.append(lsty.values)
data=dat[['Factory','Customer','Allocation']]
sql="SELECT SUM(`Allocation`) AS 'UseCapacity', `Factory` FROM `facilityallocation` GROUP BY `Factory`"
cur1.execute(sql)
file2=cur1.fetchall()
udata=pd.DataFrame(file2)
bdata=factorydata.sort_values(by=['Factory'])
adata=bdata['Capacity']
con.close()
infdata=dat[['Customer','Factory','Allocation']]
infodata=infdata.sort_values(by=['Customer'])
namess=infodata.Customer.unique()
lstyy=[]
for nam in namess:
bb=infodata[infodata.Customer==nam]
comment=bb['Factory']+":"+bb['Allocation']
prin=[nam,str(comment.values).strip('[]')]
lstyy.append(prin)
return render_template('facilityoptimise.html',say=1,lstyy=lstyy,x1=adata.values,x2=udata.values,dat=mlst,loc1=factorydata.values,
loc2=customerdata.values,factory=Facyy.to_html(index=False),customer=Custo.to_html(index=False),summary=data.to_html(index=False))
#Demand Forecast
@app.route('/demandforecast')
def demandforecast():
return render_template('demandforecast.html')
@app.route("/demandforecastdataimport",methods = ['GET','POST'])
def demandforecastdataimport():
if request.method== 'POST':
global actualforecastdata
flat=request.files['flat'].read()
if len(flat)==0:
return('No Data Selected')
cdat=pd.read_csv(io.StringIO(flat.decode('utf-8')))
actualforecastdata=pd.DataFrame(cdat)
return render_template('demandforecast.html',data=actualforecastdata.to_html(index=False))
@app.route('/demandforecastinput', methods = ['GET', 'POST'])
def demandforecastinput():
if request.method=='POST':
global demandforecastfrm
global demandforecasttoo
global demandforecastinputdata
demandforecastfrm=request.form['from']
demandforecasttoo=request.form['to']
value=request.form['typedf']
demandforecastinputdata=actualforecastdata[(actualforecastdata['Date'] >= demandforecastfrm) & (actualforecastdata['Date'] <= demandforecasttoo)]
if value=='monthly': ##monthly
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
demandforecastinputdata.to_sql(con=engine, name='demandforecastinputdata', index=False,if_exists='replace')
return redirect(url_for('monthlyforecast'))
if value=='quarterly': ##quarterly
global Quaterdata
dated2 = demandforecastinputdata['Date']
nlst=[]
for var in dated2:
var1 = int(var[5:7])
if var1 >=1 and var1 <4:
varr=var[:4]+'-01-01'
elif var1 >=4 and var1 <7:
varr=var[:4]+'-04-01'
elif var1 >=7 and var1 <10:
varr=var[:4]+'-07-01'
else:
varr=var[:4]+'-10-01'
nlst.append(varr)
nwlst=pd.DataFrame(nlst,columns=['Newyear'])
demandforecastinputdata=demandforecastinputdata.reset_index()
demandforecastinputdata['Date']=nwlst['Newyear']
Quaterdata=demandforecastinputdata.groupby(['Date']).sum()
Quaterdata=Quaterdata.reset_index()
Quaterdata=Quaterdata.drop('index',axis=1)
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
Quaterdata.to_sql(con=engine, name='demandforecastinputdata', index=False,if_exists='replace')
return redirect(url_for('quarterlyforecast'))
if value=='yearly': ##yearly
global Yeardata
#copydata=demandforecastinputdata
dated1 = demandforecastinputdata['Date']
lst=[]
for var in dated1:
var1 = var[:4]+'-01-01'
lst.append(var1)
newlst=pd.DataFrame(lst,columns=['NewYear'])
demandforecastinputdata=demandforecastinputdata.reset_index()
demandforecastinputdata['Date']=newlst['NewYear']
Yeardata=demandforecastinputdata.groupby(['Date']).sum()
Yeardata=Yeardata.reset_index()
Yeardata=Yeardata.drop('index',axis=1)
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
Yeardata.to_sql(con=engine, name='demandforecastinputdata', index=False,if_exists='replace')
return redirect(url_for('yearlyforecast'))
#if value=='weakly': ##weakly
# return redirect(url_for('output4'))
return render_template('demandforecast.html')
@app.route("/monthlyforecast",methods = ['GET','POST'])
def monthlyforecast():
data = pd.DataFrame(demandforecastinputdata)
# container1
a1=data.sort_values(['GDP','TotalDemand'], ascending=[True,True])
# container2
a2=data.sort_values(['Pi_Exports','TotalDemand'], ascending=[True,True])
# container3
a3=data.sort_values(['Market_Share','TotalDemand'], ascending=[True,True])
# container4
a4=data.sort_values(['Advertisement_Expense','TotalDemand'], ascending=[True,True])
# container1
df=a1[['GDP']]
re11 = np.array([])
res11 = np.append(re11,df)
df1=a1[['TotalDemand']]
r1 = np.array([])
r11 = np.append(r1, df1)
# top graph
tdf=data['Date'].astype(str)
tre11 = np.array([])
tres11 = np.append(tre11,tdf)
tr1 = np.array([])
tr11 = np.append(tr1, df1)
# container2
udf=a2[['Pi_Exports']]
ure11 = np.array([])
ures11 = np.append(ure11,udf)
ur1 = np.array([])
ur11 = np.append(ur1, df1)
# container3
vdf=a3[['Market_Share']]
vre11 = np.array([])
vres11 = np.append(vre11,vdf)
vr1 = np.array([])
vr11 = np.append(vr1, df1)
# container4
wdf=a4[['Advertisement_Expense']]
wre11 = np.array([])
wres11 = np.append(wre11,wdf)
wr1 = np.array([])
wr11 = np.append(wr1, df1)
if request.method == 'POST':
mov=0
exp=0
reg=0
ari=0
arx=0
till = request.form.get('till')
if request.form.get('moving'):
mov=1
if request.form.get('ESPO'):
exp=1
if request.form.get('regression'):
reg=1
if request.form.get('ARIMA'):
ari=1
if request.form.get('ARIMAX'):
arx=1
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = con.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS `ftech` (`mov` VARCHAR(1),`exp` VARCHAR(1), `reg` VARCHAR(1),`ari` VARCHAR(1),`arx` VARCHAR(1),`till` VARCHAR(10))")
cur.execute("DELETE FROM `ftech`")
con.commit()
cur.execute("INSERT INTO `ftech` VALUES('"+str(mov)+"','"+str(exp)+"','"+str(reg)+"','"+str(ari)+"','"+str(arx)+"','"+str(till)+"')")
con.commit()
cur.execute("CREATE TABLE IF NOT EXISTS `forecastoutput`(`Model` VARCHAR(25),`Date` VARCHAR(10),`TotalDemand` VARCHAR(10),`RatioIncrease` VARCHAR(10),`Spain` VARCHAR(10),`Austria` VARCHAR(10),`Japan` VARCHAR(10),`Hungary` VARCHAR(10),`Germany` VARCHAR(10),`Polland` VARCHAR(10),`UK` VARCHAR(10),`France` VARCHAR(10),`Romania` VARCHAR(10),`Italy` VARCHAR(10),`Greece` VARCHAR(10),`Crotia` VARCHAR(10),`Holland` VARCHAR(10),`Finland` VARCHAR(10),`Hongkong` VARCHAR(10))")
con.commit()
cur.execute("DELETE FROM `forecastoutput`")
con.commit()
sql = "INSERT INTO `forecastoutput` (`Model`,`Date`,`TotalDemand`,`RatioIncrease`,`Spain`,`Austria`,`Japan`,`Hungary`,`Germany`,`Polland`,`UK`,`France`,`Romania`,`Italy`,`Greece`,`Crotia`,`Holland`,`Finland`,`Hongkong`) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
#read the monthly file and index that with time
df=data.set_index('Date')
split_point =int(0.7*len(df))
D, V = df[0:split_point],df[split_point:]
data=pd.DataFrame(D)
#Functions for ME, MAE, MAPE
#ME
def ME(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(y_true - y_pred)
#MAE
def MAE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs(y_true - y_pred))
#MAPE
def MAPE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_pred)) * 100
cur1=con.cursor()
cur1.execute("SELECT * FROM `ftech`")
ftech=pd.DataFrame(cur1.fetchall())
ari=int(ftech['ari'])
arx=int(ftech['arx'])
exp=int(ftech['exp'])
mov=int(ftech['mov'])
reg=int(ftech['reg'])
start_index1=str(D['GDP'].index[-1])
end_index1=str(ftech['till'][0])
#end_index1=indx[:4]
df2 = pd.DataFrame(data=0,index=["ME","MAE","MAPE"],columns=["Moving Average","ARIMA","Exponential Smoothing","Regression"])
if mov==1:
#2---------------simple moving average-------------------------
#################################MovingAverage#######################
list1=list()
def mavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(0,0,1))
results_ARIMA1=model1.fit(disp=0)
# start_index1 = '2017-01-01'
# end_index1 = '2022-01-01' #4 year forecast
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list1.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["Moving Average"].iloc[0]=s
df2["Moving Average"].iloc[1]=so
df2["Moving Average"].iloc[2]=son
s=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(s)+1):
a=s.iloc[j-2]
b=s.iloc[j-1]
ratio_inc.append(int(((b-a)/a)*100))
return list1,ratio_inc
print(data)
Ma_Out,ratio_incma=mavg(data)
dfs=pd.DataFrame(Ma_Out)
tdfs=dfs.T
print(tdfs)
tdfs.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
tdfs['Model']='Moving Average'
tdfs['RatioIncrease']=ratio_incma
tdfs['Date']=(tdfs.index).strftime("20%y-%m-%d")
tdfs.astype(str)
for index, i in tdfs.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if ari==1:
##--------------min errors--ARIMA (1,0,0)-----------------------------
############################for Total Demand Monthly####################################
list2=list()
def AutoRimavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(1,0,0))
results_ARIMA1=model1.fit(disp=0)
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list2.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["ARIMA"].iloc[0]=s
df2["ARIMA"].iloc[1]=so
df2["ARIMA"].iloc[2]=son
Ars=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(Ars)+1):
As=(Ars.iloc[j-2])
bs=(Ars.iloc[j-1])
ratio_inc.append(int(((As-bs)/As)*100))
return list1,ratio_inc
Arimamodel,ratio_inc=AutoRimavg(data)
Amodel=pd.DataFrame(Arimamodel)
Results=Amodel.T
Results.astype(str)
Results.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
Results['Model']="ARIMA"
Results['RatioIncrease']=ratio_inc
Results['Date']=Results.index.astype(str)
for index, i in Results.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if reg==1:
#Linear Regression
#Regression Modeling
dates=pd.date_range(start_index1,end_index1,freq='M')
lprd=len(dates)
dateofterms= pd.PeriodIndex(freq='M', start=start_index1, periods=lprd+1)
dofterm=dateofterms.strftime("20%y-%m-%d")
Rdate=pd.DataFrame(dofterm)
noofterms=len(dofterm)
def regression(data,V,noofterms):
#Getting length of Data Frame
lenofdf=len(data.columns.tolist())
#Getting List Of Atributes in Data Frame
listofatr=list()
listofatr=data.columns.tolist()
#making list of pred
pred=pd.DataFrame()
#now riun for each row
for i in range(0,(lenofdf)-5):
df=pd.DataFrame(data[data.columns.tolist()[i]].reset_index())
xvar=list()
for row in df[listofatr[i]]:
xvar.append(row)
df5=pd.DataFrame(xvar)
yvar=list()
for j in range(0,len(df[listofatr[i]])):
yvar.append(j)
dfss=pd.DataFrame(yvar)
clf = linear_model.LinearRegression()
clf.fit(dfss,df5)
# Make predictions using the testing set
dfv=pd.DataFrame(V[V.columns.tolist()[i]].reset_index())
k=list()
for l in range(len(df[listofatr[i]]),len(df[listofatr[i]])+len(dfv)):
k.append(l)
ks=pd.DataFrame(k)
#Future prediction
predlist=list()
for j in range(len(df[listofatr[i]]),len(df[listofatr[i]])+noofterms):
predlist.append(j)
dataframeoflenofpred=pd.DataFrame(predlist)
dateframeofpred=pd.DataFrame(clf.predict(dataframeoflenofpred))
pred=pd.concat([pred,dateframeofpred],axis=1)
#Accuracy Of the mODEL
y_pred = clf.predict(ks)
if(i==0):
meanerror=ME(dfv[listofatr[i]], y_pred)
mae=MAE(dfv[listofatr[i]], y_pred)
mape=MAPE(dfv[listofatr[i]],y_pred)
df2["Regression"].iloc[0]=meanerror
df2["Regression"].iloc[1]=mae
df2["Regression"].iloc[2]=mape
regp=pd.DataFrame(pred)
ratio_incrr=[]
ratio_incrr.append(0)
for j in range(2,len(regp)+1):
Ra=regp.iloc[j-2]
Rb=regp.iloc[j-1]
ratio_incrr.append(int(((Rb-Ra)/Ra)*100))
return pred,ratio_incrr
monthlyRegression,ratio_incrr=regression(data,V,noofterms)
r=pd.DataFrame(monthlyRegression)
r.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
r['Model']="Regression"
r['Date']=Rdate
r['RatioIncrease']=ratio_incrr
r.astype(str)
for index, i in r.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if exp==1:
#Exponential Smoothing
dates=pd.date_range(start_index1,end_index1,freq='M')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='M', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y-%m-%d")
Edate=pd.DataFrame(dateofterms)
predictonterm=len(Edate)
def exponential_smoothing(series, alpha,predictonterm):
result = [series[0]] # first value is same as series
for i in range(1,len(series)):
result.append(alpha * series[i] + (1 - alpha) * result[i-1])
preds=result[len(series)-1]#pred
actual=series[len(series)-1]#actual
forecastlist=[]
for i in range(0,predictonterm):
forecast=(alpha*actual)+((1-alpha)*preds)
forecastlist.append(forecast)
actual=preds
preds=forecast
return result,forecastlist
def Exponentialmooth(data,alpha,predicterm):
predexp=list()
forecaste=pd.DataFrame()
m=len(data.columns.tolist())
for i in range(0,m-5):
pred,forecasts=exponential_smoothing(data[data.columns.tolist()[i]],0.5,predictonterm)
ss=pd.DataFrame(forecasts)
predexp.append(pred)
forecaste=pd.concat([forecaste,ss],axis=1)
if(i==0):
meanerr=ME(len(data[data.columns.tolist()[i]]),predexp)
meanaverr=MAE(data[data.columns.tolist()[i]],predexp)
mperr=MAPE(data[data.columns.tolist()[i]],predexp)
df2["Exponential Smoothing"].iloc[0]=meanerr
df2["Exponential Smoothing"].iloc[1]=meanaverr
df2["Exponential Smoothing"].iloc[2]=mperr
Exponentials=pd.DataFrame(forecaste)
ratio_incex=[]
ratio_incex.append(0)
for j in range(2,len(Exponentials)+1):
Ea=Exponentials.iloc[j-2]
Eb=Exponentials.iloc[j-1]
ratio_incex.append(int(((Eb-Ea)/Ea)*100))
return forecaste,ratio_incex
fore,ratio_incex=Exponentialmooth(data,0.5,predictonterm)
skf=pd.DataFrame(fore)
skf.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
skf['Model']="Exponential Smoothing"
skf['Date']=Edate
skf['RatioIncrease']=ratio_incex
skf.astype(str)
for index, i in skf.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
dates=pd.date_range(start_index1,end_index1,freq='M')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='M', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y-%m-%d")
ss=pd.DataFrame(dateofterms,columns=['Date'])
dataframeforsum=pd.concat([ss])
if mov==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutput` WHERE `Model`= 'Moving Average'" )
Xmdata = cur.fetchall()
Xmadata = pd.DataFrame(Xmdata)
movsummm=pd.DataFrame(Xmadata)
movsummm.columns=['Moving Average']
dataframeforsum=pd.concat([dataframeforsum,movsummm],axis=1)
if ari==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutput` WHERE `Model`= 'ARIMA'" )
Xadata = cur.fetchall()
Xardata = pd.DataFrame(Xadata)
movsumma=pd.DataFrame(Xardata)
movsumma.columns=['ARIMA']
dataframeforsum=pd.concat([dataframeforsum,movsumma],axis=1)
if exp==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutput` WHERE `Model`= 'Exponential Smoothing'" )
Xedata = cur.fetchall()
Xesdata = pd.DataFrame(Xedata)
exp=pd.DataFrame(Xesdata)
exp.columns=['Exponential Smoothing']
dataframeforsum=pd.concat([dataframeforsum,exp],axis=1)
if reg==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutput` WHERE `Model`= 'Regression'" )
Xrdata = cur.fetchall()
Xredata = pd.DataFrame(Xrdata)
regr=pd.DataFrame(Xredata)
regr.columns=['Regression']
dataframeforsum=pd.concat([dataframeforsum,regr],axis=1)
dataframeforsum.astype(str)
from pandas.io import sql
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
dataframeforsum.to_sql(con=engine, name='summaryoutput',index=False, if_exists='replace')
engine2 = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
df2.to_sql(con=engine2, name='summaryerror',index=False, if_exists='replace')
con.commit()
cnr=con.cursor()
cnr.execute("SELECT * FROM `summaryoutput`")
sdata = cnr.fetchall()
summaryq = pd.DataFrame(sdata)
con.close()
return render_template('monthly.html',summaryq=summaryq.to_html(index=False),sayy=1,smt='Monthly',yr1=demandforecastfrm+' to ',yr2=demandforecasttoo,x=res11,y=r11,x1=tres11,y1=tr11,x2=ures11,y2=ur11,x3=vres11,y3=vr11,x4=wres11,y4=wr11)
return render_template('monthly.html',sayy=1,smt='Monthly',yr1=demandforecastfrm+' to ',yr2=demandforecasttoo,x=res11,y=r11,x1=tres11,y1=tr11,x2=ures11,y2=ur11,x3=vres11,y3=vr11,x4=wres11,y4=wr11)
##quarterly
@app.route("/quarterlyforecast",methods = ['GET','POST'])
def quarterlyforecast():
data = pd.DataFrame(Quaterdata)
a1=data.sort_values(['GDP','TotalDemand'], ascending=[True,True])# container1
a2=data.sort_values(['Pi_Exports','TotalDemand'], ascending=[True,True])# container2
a3=data.sort_values(['Market_Share','TotalDemand'], ascending=[True,True])# container3
a4=data.sort_values(['Advertisement_Expense','TotalDemand'], ascending=[True,True])# container4
# container1
df=a1[['GDP']]/3
re11 = np.array([])
res11 = np.append(re11,df)
df1=a1[['TotalDemand']]
r1 = np.array([])
r11 = np.append(r1, df1)
# top graph
tdf=data['Date'].astype(str)
tre11 = np.array([])
tres11 = np.append(tre11,tdf)
tr1 = np.array([])
tr11 = np.append(tr1, df1)
# container2
udf=a2[['Pi_Exports']]
ure11 = np.array([])
ures11 = np.append(ure11,udf)
ur1 = np.array([])
ur11 = np.append(ur1, df1)
# container3
vdf=a3[['Market_Share']]/3
vre11 = np.array([])
vres11 = np.append(vre11,vdf)
vr1 = np.array([])
vr11 = np.append(vr1, df1)
# container4
wdf=a4[['Advertisement_Expense']]
wre11 = np.array([])
wres11 = np.append(wre11,wdf)
wr1 = np.array([])
wr11 = np.append(wr1, df1)
if request.method == 'POST':
mov=0
exp=0
reg=0
ari=0
arx=0
till = request.form.get('till')
if request.form.get('moving'):
mov=1
if request.form.get('ESPO'):
exp=1
if request.form.get('regression'):
reg=1
if request.form.get('ARIMA'):
ari=1
if request.form.get('ARIMAX'):
arx=1
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = con.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS `ftech` (`mov` VARCHAR(1),`exp` VARCHAR(1), `reg` VARCHAR(1),`ari` VARCHAR(1),`arx` VARCHAR(1),`till` VARCHAR(10))")
cur.execute("DELETE FROM `ftech`")
con.commit()
cur.execute("INSERT INTO `ftech` VALUES('"+str(mov)+"','"+str(exp)+"','"+str(reg)+"','"+str(ari)+"','"+str(arx)+"','"+str(till)+"')")
con.commit()
cur.execute("CREATE TABLE IF NOT EXISTS `forecastoutputq`(`Model` VARCHAR(25),`Date` VARCHAR(10),`TotalDemand` VARCHAR(10),`RatioIncrease` VARCHAR(10),`Spain` VARCHAR(10),`Austria` VARCHAR(10),`Japan` VARCHAR(10),`Hungary` VARCHAR(10),`Germany` VARCHAR(10),`Polland` VARCHAR(10),`UK` VARCHAR(10),`France` VARCHAR(10),`Romania` VARCHAR(10),`Italy` VARCHAR(10),`Greece` VARCHAR(10),`Crotia` VARCHAR(10),`Holland` VARCHAR(10),`Finland` VARCHAR(10),`Hongkong` VARCHAR(10))")
con.commit()
cur.execute("DELETE FROM `forecastoutputq`")
con.commit()
sql = "INSERT INTO `forecastoutputq` (`Model`,`Date`,`TotalDemand`,`RatioIncrease`,`Spain`,`Austria`,`Japan`,`Hungary`,`Germany`,`Polland`,`UK`,`France`,`Romania`,`Italy`,`Greece`,`Crotia`,`Holland`,`Finland`,`Hongkong`) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
#read the monthly file and index that with time
df=data.set_index('Date')
split_point =int(0.7*len(df))
D, V = df[0:split_point],df[split_point:]
data=pd.DataFrame(D)
#Functions for ME, MAE, MAPE
#ME
def ME(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(y_true - y_pred)
#MAE
def MAE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs(y_true - y_pred))
#MAPE
def MAPE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_pred)) * 100
cur1=con.cursor()
cur1.execute("SELECT * FROM `ftech`")
ftech=pd.DataFrame(cur1.fetchall())
ari=int(ftech['ari'])
arx=int(ftech['arx'])
exp=int(ftech['exp'])
mov=int(ftech['mov'])
reg=int(ftech['reg'])
start_index1=str(D['GDP'].index[-1])
end_index1=str(ftech['till'][0])
#end_index1=indx[:4]
df2 = pd.DataFrame(data=0,index=["ME","MAE","MAPE"],columns=["Moving Average","ARIMA","Exponential Smoothing","Regression"])
if mov==1:
#2---------------simple moving average-------------------------
#################################MovingAverage#######################
list1=list()
def mavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(0,0,1))
results_ARIMA1=model1.fit(disp=0)
# start_index1 = '2017-01-01'
# end_index1 = '2022-01-01' #4 year forecast
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list1.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["Moving Average"].iloc[0]=s
df2["Moving Average"].iloc[1]=so
df2["Moving Average"].iloc[2]=son
s=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(s)+1):
a=s.iloc[j-2]
b=s.iloc[j-1]
ratio_inc.append(int(((b-a)/a)*100))
return list1,ratio_inc
print(data)
Ma_Out,ratio_incma=mavg(data)
dfs=pd.DataFrame(Ma_Out)
tdfs=dfs.T
print(tdfs)
tdfs.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
tdfs['Model']='Moving Average'
tdfs['RatioIncrease']=ratio_incma
tdfs['Date']=(tdfs.index).strftime("20%y-%m-%d")
tdfs.astype(str)
for index, i in tdfs.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if ari==1:
##--------------min errors--ARIMA (1,0,0)-----------------------------
############################for Total Demand Monthly####################################
list2=list()
def AutoRimavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(1,0,0))
results_ARIMA1=model1.fit(disp=0)
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list2.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["ARIMA"].iloc[0]=s
df2["ARIMA"].iloc[1]=so
df2["ARIMA"].iloc[2]=son
Ars=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(Ars)+1):
As=(Ars.iloc[j-2])
bs=(Ars.iloc[j-1])
ratio_inc.append(int(((As-bs)/As)*100))
return list1,ratio_inc
Arimamodel,ratio_inc=AutoRimavg(data)
Amodel=pd.DataFrame(Arimamodel)
Results=Amodel.T
Results.astype(str)
Results.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
Results['Model']="ARIMA"
Results['RatioIncrease']=ratio_inc
Results['Date']=Results.index.astype(str)
for index, i in Results.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if reg==1:
#Linear Regression
#Regression Modeling
dates=pd.date_range(start_index1,end_index1,freq='3M')
lprd=len(dates)
dateofterms= pd.PeriodIndex(freq='3M', start=start_index1, periods=lprd+1)
dofterm=dateofterms.strftime("20%y-%m-%d")
Rdate=pd.DataFrame(dofterm)
noofterms=len(dofterm)
def regression(data,V,noofterms):
#Getting length of Data Frame
lenofdf=len(data.columns.tolist())
#Getting List Of Atributes in Data Frame
listofatr=list()
listofatr=data.columns.tolist()
#making list of pred
pred=pd.DataFrame()
#now riun for each row
for i in range(0,(lenofdf)-5):
df=pd.DataFrame(data[data.columns.tolist()[i]].reset_index())
xvar=list()
for row in df[listofatr[i]]:
xvar.append(row)
df5=pd.DataFrame(xvar)
yvar=list()
for j in range(0,len(df[listofatr[i]])):
yvar.append(j)
dfss=pd.DataFrame(yvar)
clf = linear_model.LinearRegression()
clf.fit(dfss,df5)
# Make predictions using the testing set
dfv=pd.DataFrame(V[V.columns.tolist()[i]].reset_index())
k=list()
for l in range(len(df[listofatr[i]]),len(df[listofatr[i]])+len(dfv)):
k.append(l)
ks=pd.DataFrame(k)
#Future prediction
predlist=list()
for j in range(len(df[listofatr[i]]),len(df[listofatr[i]])+noofterms):
predlist.append(j)
dataframeoflenofpred=pd.DataFrame(predlist)
dateframeofpred=pd.DataFrame(clf.predict(dataframeoflenofpred))
pred=pd.concat([pred,dateframeofpred],axis=1)
#Accuracy Of the mODEL
y_pred = clf.predict(ks)
if(i==0):
meanerror=ME(dfv[listofatr[i]], y_pred)
mae=MAE(dfv[listofatr[i]], y_pred)
mape=MAPE(dfv[listofatr[i]],y_pred)
df2["Regression"].iloc[0]=meanerror
df2["Regression"].iloc[1]=mae
df2["Regression"].iloc[2]=mape
regp=pd.DataFrame(pred)
ratio_incrr=[]
ratio_incrr.append(0)
for j in range(2,len(regp)+1):
Ra=regp.iloc[j-2]
Rb=regp.iloc[j-1]
ratio_incrr.append(int(((Rb-Ra)/Ra)*100))
return pred,ratio_incrr
monthlyRegression,ratio_incrr=regression(data,V,noofterms)
r=pd.DataFrame(monthlyRegression)
r.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
r['Model']="Regression"
r['Date']=Rdate
r['RatioIncrease']=ratio_incrr
r.astype(str)
for index, i in r.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if exp==1:
#Exponential Smoothing
dates=pd.date_range(start_index1,end_index1,freq='3M')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='3M', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y-%m-%d")
Edate=pd.DataFrame(dateofterms)
predictonterm=len(Edate)
def exponential_smoothing(series, alpha,predictonterm):
result = [series[0]] # first value is same as series
for i in range(1,len(series)):
result.append(alpha * series[i] + (1 - alpha) * result[i-1])
preds=result[len(series)-1]#pred
actual=series[len(series)-1]#actual
forecastlist=[]
for i in range(0,predictonterm):
forecast=(alpha*actual)+((1-alpha)*preds)
forecastlist.append(forecast)
actual=preds
preds=forecast
return result,forecastlist
def Exponentialmooth(data,alpha,predicterm):
predexp=list()
forecaste=pd.DataFrame()
m=len(data.columns.tolist())
for i in range(0,m-5):
pred,forecasts=exponential_smoothing(data[data.columns.tolist()[i]],0.5,predictonterm)
ss=pd.DataFrame(forecasts)
predexp.append(pred)
forecaste=pd.concat([forecaste,ss],axis=1)
if(i==0):
meanerr=ME(len(data[data.columns.tolist()[i]]),predexp)
meanaverr=MAE(data[data.columns.tolist()[i]],predexp)
mperr=MAPE(data[data.columns.tolist()[i]],predexp)
df2["Exponential Smoothing"].iloc[0]=meanerr
df2["Exponential Smoothing"].iloc[1]=meanaverr
df2["Exponential Smoothing"].iloc[2]=mperr
Exponentials=pd.DataFrame(forecaste)
ratio_incex=[]
ratio_incex.append(0)
for j in range(2,len(Exponentials)+1):
Ea=Exponentials.iloc[j-2]
Eb=Exponentials.iloc[j-1]
ratio_incex.append(int(((Eb-Ea)/Ea)*100))
return forecaste,ratio_incex
fore,ratio_incex=Exponentialmooth(data,0.5,predictonterm)
skf=pd.DataFrame(fore)
skf.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
skf['Model']="Exponential Smoothing"
skf['Date']=Edate
skf['RatioIncrease']=ratio_incex
skf.astype(str)
for index, i in skf.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
dates=pd.date_range(start_index1,end_index1,freq='3M')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='3M', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y-%m-%d")
ss=pd.DataFrame(dateofterms,columns=['Date'])
dataframeforsum=pd.concat([ss])
if mov==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputq` WHERE `Model`= 'Moving Average'" )
Xmdata = cur.fetchall()
Xmadata = pd.DataFrame(Xmdata)
movsummm=pd.DataFrame(Xmadata)
movsummm.columns=['Moving Average']
dataframeforsum=pd.concat([dataframeforsum,movsummm],axis=1)
if ari==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputq` WHERE `Model`= 'ARIMA'" )
Xadata = cur.fetchall()
Xardata = pd.DataFrame(Xadata)
movsumma=pd.DataFrame(Xardata)
movsumma.columns=['ARIMA']
dataframeforsum=pd.concat([dataframeforsum,movsumma],axis=1)
if exp==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputq` WHERE `Model`= 'Exponential Smoothing'" )
Xedata = cur.fetchall()
Xesdata = pd.DataFrame(Xedata)
exp=pd.DataFrame(Xesdata)
exp.columns=['Exponential Smoothing']
dataframeforsum=pd.concat([dataframeforsum,exp],axis=1)
if reg==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputq` WHERE `Model`= 'Regression'" )
Xrdata = cur.fetchall()
Xredata = pd.DataFrame(Xrdata)
regr=pd.DataFrame(Xredata)
regr.columns=['Regression']
dataframeforsum=pd.concat([dataframeforsum,regr],axis=1)
dataframeforsum.astype(str)
from pandas.io import sql
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
dataframeforsum.to_sql(con=engine, name='summaryoutputq',index=False, if_exists='replace')
engine2 = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
df2.to_sql(con=engine2, name='summaryerror',index=False, if_exists='replace')
con.commit()
cnr=con.cursor()
cnr.execute("SELECT * FROM `summaryoutputq`")
sdata = cnr.fetchall()
summaryq = pd.DataFrame(sdata)
con.close()
return render_template('quarterly.html',summaryq=summaryq.to_html(index=False),sayy=1,smt='Quarterly',yr1=demandforecastfrm+' to ',yr2=demandforecasttoo,x=res11,y=r11,x1=tres11,y1=tr11,x2=ures11,y2=ur11,x3=vres11,y3=vr11,x4=wres11,y4=wr11)
return render_template('quarterly.html',sayy=1,smt='Quarterly',yr1=demandforecastfrm+' to ',yr2=demandforecasttoo,x=res11,y=r11,x1=tres11,y1=tr11,x2=ures11,y2=ur11,x3=vres11,y3=vr11,x4=wres11,y4=wr11)
##yearly
@app.route("/yearlyforecast",methods = ['GET','POST'])
def yearlyforecast():
data = pd.DataFrame(Yeardata)
a1=data.sort_values(['GDP','TotalDemand'], ascending=[True,True])# container1
a2=data.sort_values(['Pi_Exports','TotalDemand'], ascending=[True,True])# container2
a3=data.sort_values(['Market_Share','TotalDemand'], ascending=[True,True])# container3
a4=data.sort_values(['Advertisement_Expense','TotalDemand'], ascending=[True,True])# container4
# container1
df=a1[['GDP']]/12
re11 = np.array([])
res11 = np.append(re11,df)
df1=a1[['TotalDemand']]
r1 = np.array([])
r11 = np.append(r1, df1)
# top graph
tdf=data['Date']
vari=[]
for var in tdf:
vari.append(var[:4])
tres11 = vari
tr1 = np.array([])
tr11 = np.append(tr1, df1)
# container2
udf=a2[['Pi_Exports']]
ure11 = np.array([])
ures11 = np.append(ure11,udf)
ur1 = np.array([])
ur11 = np.append(ur1, df1)
# container3
vdf=a3[['Market_Share']]/12
vre11 = np.array([])
vres11 = np.append(vre11,vdf)
vr1 = np.array([])
vr11 = np.append(vr1, df1)
# container4
wdf=a4[['Advertisement_Expense']]
wre11 = np.array([])
wres11 = np.append(wre11,wdf)
wr1 = np.array([])
wr11 = np.append(wr1, df1)
if request.method == 'POST':
mov=0
exp=0
reg=0
ari=0
arx=0
till = request.form.get('till')
if request.form.get('moving'):
mov=1
if request.form.get('ESPO'):
exp=1
if request.form.get('regression'):
reg=1
if request.form.get('ARIMA'):
ari=1
if request.form.get('ARIMAX'):
arx=1
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = con.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS `ftech` (`mov` VARCHAR(1),`exp` VARCHAR(1), `reg` VARCHAR(1),`ari` VARCHAR(1),`arx` VARCHAR(1),`till` VARCHAR(10))")
cur.execute("DELETE FROM `ftech`")
con.commit()
cur.execute("INSERT INTO `ftech` VALUES('"+str(mov)+"','"+str(exp)+"','"+str(reg)+"','"+str(ari)+"','"+str(arx)+"','"+str(till)+"')")
con.commit()
cur.execute("CREATE TABLE IF NOT EXISTS `forecastoutputy`(`Model` VARCHAR(25),`Date` VARCHAR(10),`TotalDemand` VARCHAR(10),`RatioIncrease` VARCHAR(10),`Spain` VARCHAR(10),`Austria` VARCHAR(10),`Japan` VARCHAR(10),`Hungary` VARCHAR(10),`Germany` VARCHAR(10),`Polland` VARCHAR(10),`UK` VARCHAR(10),`France` VARCHAR(10),`Romania` VARCHAR(10),`Italy` VARCHAR(10),`Greece` VARCHAR(10),`Crotia` VARCHAR(10),`Holland` VARCHAR(10),`Finland` VARCHAR(10),`Hongkong` VARCHAR(10))")
con.commit()
cur.execute("DELETE FROM `forecastoutputy`")
con.commit()
sql = "INSERT INTO `forecastoutputy` (`Model`,`Date`,`TotalDemand`,`RatioIncrease`,`Spain`,`Austria`,`Japan`,`Hungary`,`Germany`,`Polland`,`UK`,`France`,`Romania`,`Italy`,`Greece`,`Crotia`,`Holland`,`Finland`,`Hongkong`) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
#read the monthly file and index that with time
df=data.set_index('Date')
split_point =int(0.7*len(df))
D, V = df[0:split_point],df[split_point:]
data=pd.DataFrame(D)
#Functions for ME, MAE, MAPE
#ME
def ME(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(y_true - y_pred)
#MAE
def MAE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs(y_true - y_pred))
#MAPE
def MAPE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_pred)) * 100
cur1=con.cursor()
cur1.execute("SELECT * FROM `ftech`")
ftech=pd.DataFrame(cur1.fetchall())
ari=int(ftech['ari'])
arx=int(ftech['arx'])
exp=int(ftech['exp'])
mov=int(ftech['mov'])
reg=int(ftech['reg'])
start_index1=str(D['GDP'].index[-1])
end_index1=str(ftech['till'][0])
#end_index1=indx[:4]
df2 = pd.DataFrame(data=0,index=["ME","MAE","MAPE"],columns=["Moving Average","ARIMA","Exponential Smoothing","Regression"])
if mov==1:
#2---------------simple moving average-------------------------
#################################MovingAverage#######################
list1=list()
def mavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(0,0,1))
results_ARIMA1=model1.fit(disp=0)
# start_index1 = '2017-01-01'
# end_index1 = '2022-01-01' #4 year forecast
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list1.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["Moving Average"].iloc[0]=s
df2["Moving Average"].iloc[1]=so
df2["Moving Average"].iloc[2]=son
s=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(s)+1):
a=s.iloc[j-2]
b=s.iloc[j-1]
ratio_inc.append(int(((b-a)/a)*100))
return list1,ratio_inc
print(data)
Ma_Out,ratio_incma=mavg(data)
dfs=pd.DataFrame(Ma_Out)
tdfs=dfs.T
print(tdfs)
tdfs.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
tdfs['Model']='Moving Average'
tdfs['RatioIncrease']=ratio_incma
dindex=(tdfs.index).strftime("20%y")
tdfs['Date']=(dindex)
tdfs.astype(str)
for index, i in tdfs.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if ari==1:
##--------------min errors--ARIMA (1,0,0)-----------------------------
############################for Total Demand Monthly####################################
list2=list()
def AutoRimavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(1,0,0))
results_ARIMA1=model1.fit(disp=0)
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list2.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["ARIMA"].iloc[0]=s
df2["ARIMA"].iloc[1]=so
df2["ARIMA"].iloc[2]=son
Ars=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(Ars)+1):
As=(Ars.iloc[j-2])
bs=(Ars.iloc[j-1])
ratio_inc.append(int(((As-bs)/As)*100))
return list1,ratio_inc
Arimamodel,ratio_inc=AutoRimavg(data)
Amodel=pd.DataFrame(Arimamodel)
Results=Amodel.T
Results.astype(str)
Results.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
Results['Model']="ARIMA"
Results['RatioIncrease']=ratio_inc
Results['Date']=Results.index.astype(str)
for index, i in Results.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if reg==1:
#Linear Regression
#Regression Modeling
dates=pd.date_range(start_index1,end_index1,freq='A')
lprd=len(dates)
dateofterms= pd.PeriodIndex(freq='A', start=start_index1, periods=lprd+1)
dofterm=dateofterms.strftime("20%y")
Rdate=pd.DataFrame(dofterm)
noofterms=len(dofterm)
def regression(data,V,noofterms):
#Getting length of Data Frame
lenofdf=len(data.columns.tolist())
#Getting List Of Atributes in Data Frame
listofatr=list()
listofatr=data.columns.tolist()
#making list of pred
pred=pd.DataFrame()
#now riun for each row
for i in range(0,(lenofdf)-5):
df=pd.DataFrame(data[data.columns.tolist()[i]].reset_index())
xvar=list()
for row in df[listofatr[i]]:
xvar.append(row)
df5=pd.DataFrame(xvar)
yvar=list()
for j in range(0,len(df[listofatr[i]])):
yvar.append(j)
dfss=pd.DataFrame(yvar)
clf = linear_model.LinearRegression()
clf.fit(dfss,df5)
# Make predictions using the testing set
dfv=pd.DataFrame(V[V.columns.tolist()[i]].reset_index())
k=list()
for l in range(len(df[listofatr[i]]),len(df[listofatr[i]])+len(dfv)):
k.append(l)
ks=pd.DataFrame(k)
#Future prediction
predlist=list()
for j in range(len(df[listofatr[i]]),len(df[listofatr[i]])+noofterms):
predlist.append(j)
dataframeoflenofpred=pd.DataFrame(predlist)
dateframeofpred=pd.DataFrame(clf.predict(dataframeoflenofpred))
pred=pd.concat([pred,dateframeofpred],axis=1)
#Accuracy Of the mODEL
y_pred = clf.predict(ks)
if(i==0):
meanerror=ME(dfv[listofatr[i]], y_pred)
mae=MAE(dfv[listofatr[i]], y_pred)
mape=MAPE(dfv[listofatr[i]],y_pred)
df2["Regression"].iloc[0]=meanerror
df2["Regression"].iloc[1]=mae
df2["Regression"].iloc[2]=mape
regp=pd.DataFrame(pred)
ratio_incrr=[]
ratio_incrr.append(0)
for j in range(2,len(regp)+1):
Ra=regp.iloc[j-2]
Rb=regp.iloc[j-1]
ratio_incrr.append(int(((Rb-Ra)/Ra)*100))
return pred,ratio_incrr
monthlyRegression,ratio_incrr=regression(data,V,noofterms)
r=pd.DataFrame(monthlyRegression)
r.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
r['Model']="Regression"
r['Date']=Rdate
r['RatioIncrease']=ratio_incrr
r.astype(str)
for index, i in r.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if exp==1:
#Exponential Smoothing
dates=pd.date_range(start_index1,end_index1,freq='A')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='A', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y")
Edate=pd.DataFrame(dateofterms)
predictonterm=len(Edate)
def exponential_smoothing(series, alpha,predictonterm):
result = [series[0]] # first value is same as series
for i in range(1,len(series)):
result.append(alpha * series[i] + (1 - alpha) * result[i-1])
preds=result[len(series)-1]#pred
actual=series[len(series)-1]#actual
forecastlist=[]
for i in range(0,predictonterm):
forecast=(alpha*actual)+((1-alpha)*preds)
forecastlist.append(forecast)
actual=preds
preds=forecast
return result,forecastlist
def Exponentialmooth(data,alpha,predicterm):
predexp=list()
forecaste=pd.DataFrame()
m=len(data.columns.tolist())
for i in range(0,m-5):
pred,forecasts=exponential_smoothing(data[data.columns.tolist()[i]],0.5,predictonterm)
ss=pd.DataFrame(forecasts)
predexp.append(pred)
forecaste=pd.concat([forecaste,ss],axis=1)
if(i==0):
meanerr=ME(len(data[data.columns.tolist()[i]]),predexp)
meanaverr=MAE(data[data.columns.tolist()[i]],predexp)
mperr=MAPE(data[data.columns.tolist()[i]],predexp)
df2["Exponential Smoothing"].iloc[0]=meanerr
df2["Exponential Smoothing"].iloc[1]=meanaverr
df2["Exponential Smoothing"].iloc[2]=mperr
Exponentials=pd.DataFrame(forecaste)
ratio_incex=[]
ratio_incex.append(0)
for j in range(2,len(Exponentials)+1):
Ea=Exponentials.iloc[j-2]
Eb=Exponentials.iloc[j-1]
ratio_incex.append(int(((Eb-Ea)/Ea)*100))
return forecaste,ratio_incex
fore,ratio_incex=Exponentialmooth(data,0.5,predictonterm)
skf=pd.DataFrame(fore)
skf.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
skf['Model']="Exponential Smoothing"
skf['Date']=Edate
skf['RatioIncrease']=ratio_incex
skf.astype(str)
for index, i in skf.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
dates=pd.date_range(start_index1,end_index1,freq='A')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='A', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y")
ss=pd.DataFrame(dateofterms,columns=['Date'])
dataframeforsum=pd.concat([ss])
if mov==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputy` WHERE `Model`= 'Moving Average'" )
Xmdata = cur.fetchall()
Xmadata = pd.DataFrame(Xmdata)
movsummm=pd.DataFrame(Xmadata)
movsummm.columns=['Moving Average']
dataframeforsum=pd.concat([dataframeforsum,movsummm],axis=1)
if ari==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputy` WHERE `Model`= 'ARIMA'" )
Xadata = cur.fetchall()
Xardata = pd.DataFrame(Xadata)
movsumma=pd.DataFrame(Xardata)
movsumma.columns=['ARIMA']
dataframeforsum=pd.concat([dataframeforsum,movsumma],axis=1)
if exp==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputy` WHERE `Model`= 'Exponential Smoothing'" )
Xedata = cur.fetchall()
Xesdata = | pd.DataFrame(Xedata) | pandas.DataFrame |
#SPDX-License-Identifier: MIT
""" Helper methods constant across all workers """
import requests
import datetime
import time
import traceback
import json
import os
import sys
import math
import logging
import numpy
import copy
import concurrent
import multiprocessing
import psycopg2
import csv
import io
from logging import FileHandler, Formatter, StreamHandler
from multiprocessing import Process, Queue, Pool
from os import getpid
import sqlalchemy as s
import pandas as pd
from pathlib import Path
from urllib.parse import urlparse, quote
from sqlalchemy.ext.automap import automap_base
from augur.config import AugurConfig
from augur.logging import AugurLogging
from sqlalchemy.sql.expression import bindparam
from concurrent import futures
import dask.dataframe as dd
class Worker():
ROOT_AUGUR_DIR = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
## Set Thread Safety for OSX
# os.system("./osx-thread.sh")
def __init__(self, worker_type, config={}, given=[], models=[], data_tables=[], operations_tables=[], platform="github"):
self.worker_type = worker_type
self.collection_start_time = None
self._task = None # task currently being worked on (dict)
self._child = None # process of currently running task (multiprocessing process)
self._queue = Queue() # tasks stored here 1 at a time (in a mp queue so it can translate across multiple processes)
self.data_tables = data_tables
self.operations_tables = operations_tables
self._root_augur_dir = Worker.ROOT_AUGUR_DIR
self.platform = platform
# count of tuples inserted in the database (to store stats for each task in op tables)
self.update_counter = 0
self.insert_counter = 0
self._results_counter = 0
# if we are finishing a previous task, certain operations work differently
self.finishing_task = False
# Update config with options that are general and not specific to any worker
self.augur_config = AugurConfig(self._root_augur_dir)
self.config = {
'worker_type': self.worker_type,
'host': self.augur_config.get_value('Server', 'host'),
'gh_api_key': self.augur_config.get_value('Database', 'key'),
'gitlab_api_key': self.augur_config.get_value('Database', 'gitlab_api_key'),
'offline_mode': False
}
self.config.update(self.augur_config.get_section("Logging"))
try:
worker_defaults = self.augur_config.get_default_config()['Workers'][self.config['worker_type']]
self.config.update(worker_defaults)
except KeyError as e:
logging.warn('Could not get default configuration for {}'.format(self.config['worker_type']))
worker_info = self.augur_config.get_value('Workers', self.config['worker_type'])
self.config.update(worker_info)
worker_port = self.config['port']
while True:
try:
r = requests.get('http://{}:{}/AUGWOP/heartbeat'.format(
self.config['host'], worker_port)).json()
if 'status' in r:
if r['status'] == 'alive':
worker_port += 1
except:
break
self.config.update({
'port': worker_port,
'id': "workers.{}.{}".format(self.worker_type, worker_port),
'capture_output': False,
'location': 'http://{}:{}'.format(self.config['host'], worker_port),
'port_broker': self.augur_config.get_value('Server', 'port'),
'host_broker': self.augur_config.get_value('Server', 'host'),
'host_database': self.augur_config.get_value('Database', 'host'),
'port_database': self.augur_config.get_value('Database', 'port'),
'user_database': self.augur_config.get_value('Database', 'user'),
'name_database': self.augur_config.get_value('Database', 'name'),
'password_database': self.augur_config.get_value('Database', 'password')
})
self.config.update(config)
# Initialize logging in the main process
self.initialize_logging()
# Clear log contents from previous runs
open(self.config["server_logfile"], "w").close()
open(self.config["collection_logfile"], "w").close()
# Get configured collection logger
self.logger = logging.getLogger(self.config["id"])
self.logger.info('Worker (PID: {}) initializing...'.format(str(os.getpid())))
self.task_info = None
self.repo_id = None
self.owner = None
self.repo = None
self.given = given
self.models = models
self.debug_data = [] if 'debug_data' not in self.config else self.config['debug_data']
self.specs = {
'id': self.config['id'], # what the broker knows this worker as
'location': self.config['location'], # host + port worker is running on (so broker can send tasks here)
'qualifications': [
{
'given': self.given, # type of repo this worker can be given as a task
'models': self.models # models this worker can fill for a repo as a task
}
],
'config': self.config
}
# Send broker hello message
if self.config['offline_mode'] is False:
self.connect_to_broker()
try:
self.tool_source
self.tool_version
self.data_source
except:
self.tool_source = 'Augur Worker Testing'
self.tool_version = '0.0.0'
self.data_source = 'Augur Worker Testing'
def __repr__(self):
return f"{self.config['id']}"
def write_debug_data(self, data, name):
if name in self.debug_data:
with open(f'{name}.json', 'w') as f:
json.dump(data, f)
def initialize_logging(self):
self.config['log_level'] = self.config['log_level'].upper()
if self.config['debug']:
self.config['log_level'] = 'DEBUG'
if self.config['verbose']:
format_string = AugurLogging.verbose_format_string
else:
format_string = AugurLogging.simple_format_string
formatter = Formatter(fmt=format_string)
error_formatter = Formatter(fmt=AugurLogging.error_format_string)
worker_dir = AugurLogging.get_log_directories(self.augur_config, reset_logfiles=False) + "/workers/"
Path(worker_dir).mkdir(exist_ok=True)
logfile_dir = worker_dir + f"/{self.worker_type}/"
Path(logfile_dir).mkdir(exist_ok=True)
server_logfile = logfile_dir + '{}_{}_server.log'.format(self.worker_type, self.config["port"])
collection_logfile = logfile_dir + '{}_{}_collection.log'.format(self.worker_type, self.config["port"])
collection_errorfile = logfile_dir + '{}_{}_collection.err'.format(self.worker_type, self.config["port"])
self.config.update({
'logfile_dir': logfile_dir,
'server_logfile': server_logfile,
'collection_logfile': collection_logfile,
'collection_errorfile': collection_errorfile
})
collection_file_handler = FileHandler(filename=self.config['collection_logfile'], mode="a")
collection_file_handler.setFormatter(formatter)
collection_file_handler.setLevel(self.config['log_level'])
collection_errorfile_handler = FileHandler(filename=self.config['collection_errorfile'], mode="a")
collection_errorfile_handler.setFormatter(error_formatter)
collection_errorfile_handler.setLevel(logging.WARNING)
logger = logging.getLogger(self.config['id'])
logger.handlers = []
logger.addHandler(collection_file_handler)
logger.addHandler(collection_errorfile_handler)
logger.setLevel(self.config['log_level'])
logger.propagate = False
if self.config['debug']:
self.config['log_level'] = 'DEBUG'
console_handler = StreamHandler()
console_handler.setFormatter(formatter)
console_handler.setLevel(self.config['log_level'])
logger.addHandler(console_handler)
if self.config['quiet']:
logger.disabled = True
self.logger = logger
def initialize_database_connections(self):
DB_STR = 'postgresql://{}:{}@{}:{}/{}'.format(
self.config['user_database'], self.config['password_database'], self.config['host_database'], self.config['port_database'], self.config['name_database']
)
# Create an sqlalchemy engine for both database schemas
self.logger.info("Making database connections")
db_schema = 'augur_data'
self.db = s.create_engine(DB_STR, poolclass=s.pool.NullPool,
connect_args={'options': '-csearch_path={}'.format(db_schema)})
helper_schema = 'augur_operations'
self.helper_db = s.create_engine(DB_STR, poolclass=s.pool.NullPool,
connect_args={'options': '-csearch_path={}'.format(helper_schema)})
metadata = s.MetaData()
helper_metadata = s.MetaData()
# Reflect only the tables we will use for each schema's metadata object
metadata.reflect(self.db, only=self.data_tables)
helper_metadata.reflect(self.helper_db, only=self.operations_tables)
Base = automap_base(metadata=metadata)
HelperBase = automap_base(metadata=helper_metadata)
Base.prepare()
HelperBase.prepare()
# So we can access all our tables when inserting, updating, etc
for table in self.data_tables:
setattr(self, '{}_table'.format(table), Base.classes[table].__table__)
try:
self.logger.info(HelperBase.classes.keys())
except:
pass
for table in self.operations_tables:
try:
setattr(self, '{}_table'.format(table), HelperBase.classes[table].__table__)
except Exception as e:
self.logger.error("Error setting attribute for table: {} : {}".format(table, e))
# Increment so we are ready to insert the 'next one' of each of these most recent ids
self.history_id = self.get_max_id('worker_history', 'history_id', operations_table=True) + 1
# Organize different api keys/oauths available
self.logger.info("Initializing API key.")
if 'gh_api_key' in self.config or 'gitlab_api_key' in self.config:
self.init_oauths(self.platform)
else:
self.oauths = [{'oauth_id': 0}]
@property
def results_counter(self):
""" Property that is returned when the worker's current results_counter is referenced
"""
if self.worker_type == 'facade_worker':
return self.cfg.repos_processed #TODO: figure out why this doesn't work...
else:
return self._results_counter
@results_counter.setter
def results_counter(self, value):
""" entry point for the broker to add a task to the queue
Adds this task to the queue, and calls method to process queue
"""
self._results_counter = value
@property
def task(self):
""" Property that is returned when the worker's current task is referenced
"""
return self._task
@task.setter
def task(self, value):
""" entry point for the broker to add a task to the queue
Adds this task to the queue, and calls method to process queue
"""
# If the task has one of our "valid" job types
if value['job_type'] == "UPDATE" or value['job_type'] == "MAINTAIN":
self._queue.put(value)
# Setting that causes paginating through ALL pages, not just unknown ones
# This setting is set by the housekeeper and is attached to the task before it gets sent here
if 'focused_task' in value:
if value['focused_task'] == 1:
self.logger.debug("Focused task is ON\n")
self.finishing_task = True
self._task = value
self.run()
def cancel(self):
""" Delete/cancel current task
"""
self._task = None
def run(self):
""" Kicks off the processing of the queue if it is not already being processed
Gets run whenever a new task is added
"""
# Spawn a subprocess to handle message reading and performing the tasks
self._child = Process(target=self.collect, args=())
self._child.start()
def collect(self):
""" Function to process each entry in the worker's task queue
Determines what action to take based off the message type
"""
self.initialize_logging() # need to initialize logging again in child process cause multiprocessing
self.logger.info("Starting data collection process\n")
self.initialize_database_connections()
while True:
if not self._queue.empty():
message = self._queue.get() # Get the task off our MP queue
else:
self.logger.info("No job found.")
break
self.logger.info("Popped off message: {}\n".format(str(message)))
if message['job_type'] == 'STOP':
break
# If task is not a valid job type
if message['job_type'] != 'MAINTAIN' and message['job_type'] != 'UPDATE':
raise ValueError('{} is not a recognized task type'.format(message['job_type']))
pass
# Query repo_id corresponding to repo url of given task
repoUrlSQL = s.sql.text("""
SELECT min(repo_id) as repo_id FROM repo WHERE repo_git = '{}'
""".format(message['given'][self.given[0][0]]))
repo_id = int(pd.read_sql(repoUrlSQL, self.db, params={}).iloc[0]['repo_id'])
self.logger.info("repo_id for which data collection is being initiated: {}".format(str(repo_id)))
# Call method corresponding to model sent in task
try:
model_method = getattr(self, '{}_model'.format(message['models'][0]))
self.record_model_process(repo_id, 'repo_info')
except Exception as e:
self.logger.error('Error: {}.\nNo defined method for model: {}, '.format(e, message['models'][0]) +
'must have name of {}_model'.format(message['models'][0]))
self.register_task_failure(message, repo_id, e)
break
# Model method calls wrapped in try/except so that any unexpected error that occurs can be caught
# and worker can move onto the next task without stopping
try:
self.logger.info("Calling model method {}_model".format(message['models'][0]))
self.task_info = message
self.repo_id = repo_id
self.owner, self.repo = self.get_owner_repo(list(message['given'].values())[0])
model_method(message, repo_id)
except Exception as e: # this could be a custom exception, might make things easier
self.register_task_failure(message, repo_id, e)
break
self.logger.debug('Closing database connections\n')
self.db.dispose()
self.helper_db.dispose()
self.logger.info("Collection process finished")
def sync_df_types(self, subject, source, subject_columns, source_columns):
type_dict = {}
for index in range(len(source_columns)):
if type(source[source_columns[index]].values[0]) == numpy.datetime64:
subject[subject_columns[index]] = pd.to_datetime(
subject[subject_columns[index]], utc=True
)
source[source_columns[index]] = pd.to_datetime(
source[source_columns[index]], utc=True
)
continue
type_dict[subject_columns[index]] = type(source[source_columns[index]].values[0])
subject = subject.astype(type_dict)
return subject, source
def get_sqlalchemy_type(self, data, column_name=None):
if type(data) == str:
try:
time.strptime(data, "%Y-%m-%dT%H:%M:%SZ")
return s.types.TIMESTAMP
except ValueError:
return s.types.String
elif (
isinstance(data, (int, numpy.integer))
or (isinstance(data, float) and column_name and 'id' in column_name)
):
return s.types.BigInteger
elif isinstance(data, float):
return s.types.Float
elif type(data) in [numpy.datetime64, pd._libs.tslibs.timestamps.Timestamp]:
return s.types.TIMESTAMP
elif column_name and 'id' in column_name:
return s.types.BigInteger
return s.types.String
def _convert_float_nan_to_int(self, df):
for column in df.columns:
if (
df[column].dtype == float
and ((df[column] % 1 == 0) | (df[column].isnull())).all()
):
df[column] = df[column].astype("Int64").astype(object).where(
pd.notnull(df[column]), None
)
return df
def _setup_postgres_merge(self, data_sets, sort=False):
metadata = s.MetaData()
data_tables = []
# Setup/create tables
for index, data in enumerate(data_sets):
data_table = s.schema.Table(f"merge_data_{index}_{os.getpid()}", metadata)
df = pd.DataFrame(data)
columns = sorted(list(df.columns)) if sort else df.columns
df = self._convert_float_nan_to_int(df)
for column in columns:
data_table.append_column(
s.schema.Column(
column, self.get_sqlalchemy_type(
df.fillna(method='bfill').iloc[0][column], column_name=column
)
)
)
data_tables.append(data_table)
metadata.create_all(self.db, checkfirst=True)
# Insert data to tables
for data_table, data in zip(data_tables, data_sets):
self.bulk_insert(
data_table, insert=data, increment_counter=False, convert_float_int=True
)
session = s.orm.Session(self.db)
self.logger.info("Session created for merge tables")
return data_tables, metadata, session
def _close_postgres_merge(self, metadata, session):
session.close()
self.logger.info("Session closed")
# metadata.reflect(self.db, only=[new_data_table.name, table_values_table.name])
metadata.drop_all(self.db, checkfirst=True)
self.logger.info("Merge tables dropped")
def _get_data_set_columns(self, data, columns):
if not len(data):
return []
self.logger.info("Getting data set columns")
df = pd.DataFrame(data, columns=data[0].keys())
final_columns = copy.deepcopy(columns)
for column in columns:
if '.' not in column:
continue
root = column.split('.')[0]
if root not in df.columns:
df[root] = None
expanded_column = pd.DataFrame(
df[root].where(df[root].notna(), lambda x: [{}]).tolist()
)
expanded_column.columns = [
f'{root}.{attribute}' for attribute in expanded_column.columns
]
if column not in expanded_column.columns:
expanded_column[column] = None
final_columns += list(expanded_column.columns)
try:
df = df.join(expanded_column)
except ValueError:
# columns already added (happens if trying to expand the same column twice)
# TODO: Catch this before by only looping unique prefixs?
self.logger.info("Columns have already been added, moving on...")
pass
self.logger.info(final_columns)
self.logger.info(list(set(final_columns)))
self.logger.info("Finished getting data set columns")
return df[list(set(final_columns))].to_dict(orient='records')
def organize_needed_data(
self, new_data, table_values, table_pkey, action_map={}, in_memory=True
):
if len(table_values) == 0:
return new_data, []
if len(new_data) == 0:
return [], []
need_insertion = pd.DataFrame()
need_updates = pd.DataFrame()
if not in_memory:
new_data_columns = action_map['insert']['source']
table_value_columns = action_map['insert']['augur']
if 'update' in action_map:
new_data_columns += action_map['update']['source']
table_value_columns += action_map['update']['augur']
(new_data_table, table_values_table), metadata, session = self._setup_postgres_merge(
[
self._get_data_set_columns(new_data, new_data_columns),
self._get_data_set_columns(table_values, table_value_columns)
]
)
need_insertion = pd.DataFrame(session.query(new_data_table).join(table_values_table,
eval(
' and '.join([
f"table_values_table.c.{table_column} == new_data_table.c.{source_column}" \
for table_column, source_column in zip(action_map['insert']['augur'],
action_map['insert']['source'])
])
), isouter=True).filter(
table_values_table.c[action_map['insert']['augur'][0]] == None
).all(), columns=table_value_columns)
self.logger.info("need_insertion calculated successfully")
need_updates = pd.DataFrame(columns=table_value_columns)
if 'update' in action_map:
need_updates = pd.DataFrame(session.query(new_data_table).join(table_values_table,
s.and_(
eval(' and '.join([f"table_values_table.c.{table_column} == new_data_table.c.{source_column}" for \
table_column, source_column in zip(action_map['insert']['augur'], action_map['insert']['source'])])),
eval(' and '.join([f"table_values_table.c.{table_column} != new_data_table.c.{source_column}" for \
table_column, source_column in zip(action_map['update']['augur'], action_map['update']['source'])]))
) ).all(), columns=table_value_columns)
self.logger.info("need_updates calculated successfully")
self._close_postgres_merge(metadata, session)
new_data_df = pd.DataFrame(new_data)
need_insertion, new_data_df = self.sync_df_types(
need_insertion, new_data_df, table_value_columns, new_data_columns
)
need_insertion = need_insertion.merge(
new_data_df, how='inner', left_on=table_value_columns, right_on=new_data_columns
)
self.logger.info(
f"Table needs {len(need_insertion)} insertions and "
f"{len(need_updates)} updates.\n")
else:
table_values_df = pd.DataFrame(table_values, columns=table_values[0].keys())
new_data_df = pd.DataFrame(new_data).dropna(subset=action_map['insert']['source'])
new_data_df, table_values_df = self.sync_df_types(new_data_df, table_values_df,
action_map['insert']['source'], action_map['insert']['augur'])
need_insertion = new_data_df.merge(table_values_df, suffixes=('','_table'),
how='outer', indicator=True, left_on=action_map['insert']['source'],
right_on=action_map['insert']['augur']).loc[lambda x : x['_merge']=='left_only']
if 'update' in action_map:
new_data_df, table_values_df = self.sync_df_types(new_data_df, table_values_df,
action_map['update']['source'], action_map['update']['augur'])
partitions = math.ceil(len(new_data_df) / 1000)
attempts = 0
while attempts < 50:
try:
need_updates = pd.DataFrame()
self.logger.info(f"Trying {partitions} partitions\n")
for sub_df in numpy.array_split(new_data_df, partitions):
self.logger.info(f"Trying a partition, len {len(sub_df)}\n")
need_updates = pd.concat([ need_updates, sub_df.merge(table_values_df, left_on=action_map['insert']['source'],
right_on=action_map['insert']['augur'], suffixes=('','_table'), how='inner',
indicator=False).merge(table_values_df, left_on=action_map['update']['source'],
right_on=action_map['update']['augur'], suffixes=('','_table'), how='outer',
indicator=True).loc[lambda x : x['_merge']=='left_only'] ])
self.logger.info(f"need_updates merge: {len(sub_df)} worked\n")
break
except MemoryError as e:
self.logger.info(f"new_data ({sub_df.shape}) is too large to allocate memory for " +
f"need_updates df merge.\nMemoryError: {e}\nTrying again with {partitions + 1} partitions...\n")
partitions += 1
attempts += 1
# self.logger.info(f"End attempt # {attempts}\n")
if attempts >= 50:
self.loggger.info("Max need_updates merge attempts exceeded, cannot perform " +
"updates on this repo.\n")
else:
need_updates = need_updates.drop([column for column in list(need_updates.columns) if \
column not in action_map['update']['augur'] and column not in action_map['insert']['augur']],
axis='columns')
for column in action_map['insert']['augur']:
need_updates[f'b_{column}'] = need_updates[column]
need_updates = need_updates.drop([column for column in action_map['insert']['augur']], axis='columns')
return need_insertion.to_dict('records'), need_updates.to_dict('records')
def assign_tuple_action(self, new_data, table_values, update_col_map, duplicate_col_map, table_pkey, value_update_col_map={}):
""" DEPRECATED
Include an extra key-value pair on each element of new_data that represents
the action that should be taken with this element (i.e. 'need_insertion')
:param new_data: List of dictionaries, data to be assigned an action to
:param table_values: Pandas DataFrame, existing data in the database to check
what action should be taken on the new_data depending on the presence of
each element in this DataFrame
:param update_col_map: Dictionary, maps the column names of the source data
to the field names in our database for columns that should be checked for
updates (if source data value != value in existing database row, then an
update is needed). Key is source data column name, value is database field name.
Example: {'id': 'gh_issue_id'}
:param duplicate_col_map: Dictionary, maps the column names of the source data
to the field names in our database for columns that should be checked for
duplicates (if source data value == value in existing database row, then this
element is a duplicate and would not need an insertion). Key is source data
column name, value is database field name. Example: {'id': 'gh_issue_id'}
:param table_pkey: String, the field name of the primary key of the table in
the database that we are checking the table_values for.
:param value_update_col_map: Dictionary, sometimes we add a new field to a table,
and we want to trigger an update of that row in the database even if all of the
data values are the same and would not need an update ordinarily. Checking for
a specific existing value in the database field allows us to do this. The key is the
name of the field in the database we are checking for a specific value to trigger
an update, the value is the value we are checking for equality to trigger an update.
Example: {'cntrb_id': None}
:return: List of dictionaries, contains all the same elements of new_data, except
each element now has an extra key-value pair with the key being 'flag', and
the value being 'need_insertion', 'need_update', or 'none'
"""
need_insertion_count = 0
need_update_count = 0
if type(table_values) == list:
if len(table_values) > 0:
table_values = pd.DataFrame(table_values, columns=table_values[0].keys())
else:
table_values = pd.DataFrame(table_values)
for i, obj in enumerate(new_data):
if type(obj) != dict:
new_data[i] = {'flag': 'none'}
continue
obj['flag'] = 'none' # default of no action needed
existing_tuple = None
for db_dupe_key in list(duplicate_col_map.keys()):
if table_values.isin([obj[duplicate_col_map[db_dupe_key]]]).any().any():
if table_values[table_values[db_dupe_key].isin(
[obj[duplicate_col_map[db_dupe_key]]])].to_dict('records'):
existing_tuple = table_values[table_values[db_dupe_key].isin(
[obj[duplicate_col_map[db_dupe_key]]])].to_dict('records')[0]
continue
obj['flag'] = 'need_insertion'
need_insertion_count += 1
break
if obj['flag'] == 'need_insertion':
continue
if not existing_tuple:
self.logger.info('An existing tuple was not found for this data ' +
'point and we have reached the check-updates portion of assigning ' +
'tuple action, so we will now move to next data point\n')
continue
# If we need to check the values of the existing tuple to determine if an update is needed
for augur_col, value_check in value_update_col_map.items():
not_nan_check = not (math.isnan(value_check) and math.isnan(existing_tuple[augur_col])) if value_check is not None else True
if existing_tuple[augur_col] != value_check and not_nan_check:
continue
self.logger.info("Found a tuple that needs an update for column: {}\n".format(augur_col))
obj['flag'] = 'need_update'
obj['pkey'] = existing_tuple[table_pkey]
need_update_count += 1
if obj['flag'] == 'need_update':
self.logger.info('Already determined that current tuple needs update, skipping checking further updates. '
'Moving to next tuple.\n')
continue
# Now check the existing tuple's values against the response values to determine if an update is needed
for col in update_col_map.keys():
if update_col_map[col] not in obj:
continue
if obj[update_col_map[col]] == existing_tuple[col]:
continue
self.logger.info("Found a tuple that needs an update for column: {}\n".format(col))
obj['flag'] = 'need_update'
self.logger.info(existing_tuple)
obj['pkey'] = existing_tuple[table_pkey]
need_update_count += 1
self.logger.info("Page recieved has {} tuples, while filtering duplicates this ".format(len(new_data)) +
"was reduced to {} tuples, and {} tuple updates are needed.\n".format(need_insertion_count, need_update_count))
return new_data
def check_duplicates(self, new_data, table_values, key):
""" Filters what items of the new_data json (list of dictionaries) that are not
present in the table_values df
:param new_data: List of dictionaries, new data to filter duplicates out of
:param table_values: Pandas DataFrame, existing data to check what data is already
present in the database
:param key: String, key of each dict in new_data whose value we are checking
duplicates with
:return: List of dictionaries, contains elements of new_data that are not already
present in the database
"""
need_insertion = []
for obj in new_data:
if type(obj) != dict:
continue
if not table_values.isin([obj[key]]).any().any():
need_insertion.append(obj)
self.logger.info("Page recieved has {} tuples, while filtering duplicates this ".format(str(len(new_data))) +
"was reduced to {} tuples.\n".format(str(len(need_insertion))))
return need_insertion
def connect_to_broker(self):
connected = False
for i in range(5):
try:
self.logger.debug("Connecting to broker, attempt {}\n".format(i))
if i > 0:
time.sleep(10)
requests.post('http://{}:{}/api/unstable/workers'.format(
self.config['host_broker'],self.config['port_broker']), json=self.specs)
self.logger.info("Connection to the broker was successful\n")
connected = True
break
except requests.exceptions.ConnectionError:
self.logger.error('Cannot connect to the broker. Trying again...\n')
if not connected:
sys.exit('Could not connect to the broker after 5 attempts! Quitting...\n')
@staticmethod
def dump_queue(queue):
""" Empties all pending items in a queue and returns them in a list.
"""
result = []
queue.put("STOP")
for i in iter(queue.get, 'STOP'):
result.append(i)
# time.sleep(.1)
return result
def find_id_from_login(self, login, platform='github'):
""" Retrieves our contributor table primary key value for the contributor with
the given GitHub login credentials, if this contributor is not there, then
they get inserted.
:param login: String, the GitHub login username to find the primary key id for
:return: Integer, the id of the row in our database with the matching GitHub login
"""
idSQL = s.sql.text("""
SELECT cntrb_id FROM contributors WHERE cntrb_login = '{}' \
AND LOWER(data_source) = '{} api'
""".format(login, platform))
rs = pd.read_sql(idSQL, self.db, params={})
data_list = [list(row) for row in rs.itertuples(index=False)]
try:
return data_list[0][0]
except:
self.logger.info('contributor needs to be added...')
if platform == 'github':
cntrb_url = ("https://api.github.com/users/" + login)
elif platform == 'gitlab':
cntrb_url = ("https://gitlab.com/api/v4/users?username=" + login )
self.logger.info("Hitting endpoint: {} ...\n".format(cntrb_url))
while True:
try:
r = requests.get(url=cntrb_url, headers=self.headers)
break
except TimeoutError as e:
self.logger.info("Request timed out. Sleeping 10 seconds and trying again...\n")
time.sleep(30)
self.update_rate_limit(r)
contributor = r.json()
company = None
location = None
email = None
if 'company' in contributor:
company = contributor['company']
if 'location' in contributor:
location = contributor['location']
if 'email' in contributor:
email = contributor['email']
if platform == 'github':
cntrb = {
'cntrb_login': contributor['login'] if 'login' in contributor else None,
'cntrb_email': contributor['email'] if 'email' in contributor else None,
'cntrb_company': contributor['company'] if 'company' in contributor else None,
'cntrb_location': contributor['location'] if 'location' in contributor else None,
'cntrb_created_at': contributor['created_at'] if 'created_at' in contributor else None,
'cntrb_canonical': None,
'gh_user_id': contributor['id'] if 'id' in contributor else None,
'gh_login': contributor['login'] if 'login' in contributor else None,
'gh_url': contributor['url'] if 'url' in contributor else None,
'gh_html_url': contributor['html_url'] if 'html_url' in contributor else None,
'gh_node_id': contributor['node_id'] if 'node_id' in contributor else None,
'gh_avatar_url': contributor['avatar_url'] if 'avatar_url' in contributor else None,
'gh_gravatar_id': contributor['gravatar_id'] if 'gravatar_id' in contributor else None,
'gh_followers_url': contributor['followers_url'] if 'followers_url' in contributor else None,
'gh_following_url': contributor['following_url'] if 'following_url' in contributor else None,
'gh_gists_url': contributor['gists_url'] if 'gists_url' in contributor else None,
'gh_starred_url': contributor['starred_url'] if 'starred_url' in contributor else None,
'gh_subscriptions_url': contributor['subscriptions_url'] if 'subscriptions_url' in contributor else None,
'gh_organizations_url': contributor['organizations_url'] if 'organizations_url' in contributor else None,
'gh_repos_url': contributor['repos_url'] if 'repos_url' in contributor else None,
'gh_events_url': contributor['events_url'] if 'events_url' in contributor else None,
'gh_received_events_url': contributor['received_events_url'] if 'received_events_url' in contributor else None,
'gh_type': contributor['type'] if 'type' in contributor else None,
'gh_site_admin': contributor['site_admin'] if 'site_admin' in contributor else None,
'tool_source': self.tool_source,
'tool_version': self.tool_version,
'data_source': self.data_source
}
elif platform == 'gitlab':
cntrb = {
'cntrb_login': contributor[0]['username'] if 'username' in contributor[0] else None,
'cntrb_email': email,
'cntrb_company': company,
'cntrb_location': location,
'cntrb_created_at': contributor[0]['created_at'] if 'created_at' in contributor[0] else None,
'cntrb_canonical': None,
'gh_user_id': contributor[0]['id'],
'gh_login': contributor[0]['username'],
'gh_url': contributor[0]['web_url'],
'gh_html_url': None,
'gh_node_id': None,
'gh_avatar_url': contributor[0]['avatar_url'],
'gh_gravatar_id': None,
'gh_followers_url': None,
'gh_following_url': None,
'gh_gists_url': None,
'gh_starred_url': None,
'gh_subscriptions_url': None,
'gh_organizations_url': None,
'gh_repos_url': None,
'gh_events_url': None,
'gh_received_events_url': None,
'gh_type': None,
'gh_site_admin': None,
'tool_source': self.tool_source,
'tool_version': self.tool_version,
'data_source': self.data_source
}
result = self.db.execute(self.contributors_table.insert().values(cntrb))
self.logger.info("Primary key inserted into the contributors table: " + str(result.inserted_primary_key))
self.results_counter += 1
self.cntrb_id_inc = int(result.inserted_primary_key[0])
self.logger.info(f"Inserted contributor: {cntrb['cntrb_login']}\n")
return self.find_id_from_login(login, platform)
def get_owner_repo(self, git_url):
""" Gets the owner and repository names of a repository from a git url
:param git_url: String, the git url of a repository
:return: Tuple, includes the owner and repository names in that order
"""
split = git_url.split('/')
owner = split[-2]
repo = split[-1]
if '.git' == repo[-4:]:
repo = repo[:-4]
return owner, repo
def get_max_id(self, table, column, default=25150, operations_table=False):
""" Gets the max value (usually used for id/pk's) of any Integer column
of any table
:param table: String, the table that consists of the column you want to
query a max value for
:param column: String, the column that you want to query the max value for
:param default: Integer, if there are no values in the
specified column, the value of this parameter will be returned
:param operations_table: Boolean, if True, this signifies that the table/column
that is wanted to be queried is in the augur_operations schema rather than
the augur_data schema. Default False
:return: Integer, the max value of the specified column/table
"""
maxIdSQL = s.sql.text("""
SELECT max({0}.{1}) AS {1}
FROM {0}
""".format(table, column))
db = self.db if not operations_table else self.helper_db
rs = pd.read_sql(maxIdSQL, db, params={})
if rs.iloc[0][column] is not None:
max_id = int(rs.iloc[0][column]) + 1
self.logger.info("Found max id for {} column in the {} table: {}\n".format(column, table, max_id))
else:
max_id = default
self.logger.warning("Could not find max id for {} column in the {} table... " +
"using default set to: {}\n".format(column, table, max_id))
return max_id
def get_table_values(self, cols, tables, where_clause=""):
""" Can query all values of any column(s) from any table(s)
with an optional where clause
:param cols: List of Strings, column(s) that user wants to query
:param tables: List of Strings, table(s) that user wants to query
:param where_clause: String, optional where clause to filter the values
queried
:return: Pandas DataFrame, contains all values queried in the columns, tables, and
optional where clause provided
"""
table_str = tables[0]
del tables[0]
col_str = cols[0]
del cols[0]
for table in tables:
table_str += ", " + table
for col in cols:
col_str += ", " + col
table_values_sql = s.sql.text("""
SELECT {} FROM {} {}
""".format(col_str, table_str, where_clause))
self.logger.info("Getting table values with the following PSQL query: \n{}\n".format(
table_values_sql))
values = pd.read_sql(table_values_sql, self.db, params={})
return values
def init_oauths(self, platform='github'):
self.oauths = []
self.headers = None
self.logger.info("Trying initialization.")
# Make a list of api key in the config combined w keys stored in the database
# Select endpoint to hit solely to retrieve rate limit
# information from headers of the response
# Adjust header keys needed to fetch rate limit information from the API responses
if platform == 'github':
url = "https://api.github.com/users/gabe-heim"
oauthSQL = s.sql.text("""
SELECT * FROM worker_oauth WHERE access_token <> '{}' and platform = 'github'
""".format(self.config['gh_api_key']))
key_name = 'gh_api_key'
rate_limit_header_key = "X-RateLimit-Remaining"
rate_limit_reset_header_key = "X-RateLimit-Reset"
elif platform == 'gitlab':
url = "https://gitlab.com/api/v4/version"
oauthSQL = s.sql.text("""
SELECT * FROM worker_oauth WHERE access_token <> '{}' and platform = 'gitlab'
""".format(self.config['gitlab_api_key']))
key_name = 'gitlab_api_key'
rate_limit_header_key = 'ratelimit-remaining'
rate_limit_reset_header_key = 'ratelimit-reset'
for oauth in [{'oauth_id': 0, 'access_token': self.config[key_name]}] + json.loads(
pd.read_sql(oauthSQL, self.helper_db, params={}).to_json(orient="records")
):
if platform == 'github':
self.headers = {'Authorization': 'token %s' % oauth['access_token']}
elif platform == 'gitlab':
self.headers = {'Authorization': 'Bearer %s' % oauth['access_token']}
response = requests.get(url=url, headers=self.headers)
self.oauths.append({
'oauth_id': oauth['oauth_id'],
'access_token': oauth['access_token'],
'rate_limit': int(response.headers[rate_limit_header_key]),
'seconds_to_reset': (
datetime.datetime.fromtimestamp(
int(response.headers[rate_limit_reset_header_key])
) - datetime.datetime.now()
).total_seconds()
})
self.logger.debug("Found OAuth available for use: {}".format(self.oauths[-1]))
if len(self.oauths) == 0:
self.logger.info(
"No API keys detected, please include one in your config or in the "
"worker_oauths table in the augur_operations schema of your database."
)
# First key to be used will be the one specified in the config (first element in
# self.oauths array will always be the key in use)
if platform == 'github':
self.headers = {'Authorization': 'token %s' % self.oauths[0]['access_token']}
elif platform == 'gitlab':
self.headers = {'Authorization': 'Bearer %s' % self.oauths[0]['access_token']}
self.logger.info("OAuth initialized\n")
def bulk_insert(
self, table, insert=[], update=[], unique_columns=[], update_columns=[],
max_attempts=3, attempt_delay=3, increment_counter=True, convert_float_int=False
):
""" Performs bulk inserts/updates of the given data to the given table
:param table: String, name of the table that we are inserting/updating rows
:param insert: List of dicts, data points to insert
:param update: List of dicts, data points to update, only needs key/value
pairs of the update_columns and the unique_columns
:param unique_columns: List of strings, column names that would uniquely identify any
given data point
:param update_columns: List of strings, names of columns that are being updated
:param max_attempts: Integer, number of attempts to perform on inserting/updating
before moving on
:param attempt_delay: Integer, number of seconds to wait in between attempts
:returns: SQLAlchemy database execution response object(s), contains metadata
about number of rows inserted etc. This data is not often used.
"""
self.logger.info(
f"{len(insert)} insertions are needed and {len(update)} "
f"updates are needed for {table}"
)
update_result = None
insert_result = None
if len(update) > 0:
attempts = 0
update_start_time = time.time()
while attempts < max_attempts:
try:
update_result = self.db.execute(
table.update().where(
eval(
' and '.join(
[
f"self.{table}_table.c.{key} == bindparam('b_{key}')"
for key in unique_columns
]
)
)
).values(
{key: key for key in update_columns}
),
update
)
if increment_counter:
self.update_counter += update_result.rowcount
self.logger.info(
f"Updated {update_result.rowcount} rows in "
f"{time.time() - update_start_time} seconds"
)
break
except Exception as e:
self.logger.info(f"Warning! Error bulk updating data: {e}")
time.sleep(attempt_delay)
attempts += 1
if len(insert) > 0:
insert_start_time = time.time()
def psql_insert_copy(table, conn, keys, data_iter):
"""
Execute SQL statement inserting data
Parameters
----------
table : pandas.io.sql.SQLTable
conn : sqlalchemy.engine.Engine or sqlalchemy.engine.Connection
keys : list of str
Column names
data_iter : Iterable that iterates the values to be inserted
"""
# gets a DBAPI connection that can provide a cursor
dbapi_conn = conn.connection
with dbapi_conn.cursor() as cur:
s_buf = io.StringIO()
writer = csv.writer(s_buf)
writer.writerows(data_iter)
s_buf.seek(0)
columns = ', '.join('"{}"'.format(k) for k in keys)
if table.schema:
table_name = '{}.{}'.format(table.schema, table.name)
else:
table_name = table.name
sql = 'COPY {} ({}) FROM STDIN WITH CSV'.format(
table_name, columns)
cur.copy_expert(sql=sql, file=s_buf)
df = | pd.DataFrame(insert) | pandas.DataFrame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.